Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 31 additions & 6 deletions .werft/build.ts
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ Tracing.initialize()
werft = new Werft("build")
})
.then(() => build(context, version))
.then(() => VM.stopKubectlPortForwards())
.then(() => werft.endAllSpans())
.catch((err) => {
werft.rootSpan.setStatus({
Expand All @@ -52,6 +53,8 @@ Tracing.initialize()
// Explicitly not using process.exit as we need to flush tracing, see tracing.js
process.exitCode = 1
}

VM.stopKubectlPortForwards()
})

// Werft phases
Expand All @@ -77,7 +80,11 @@ const installerSlices = {
}

const vmSlices = {
BOOT_VM: 'Booting VM'
BOOT_VM: 'Booting VM',
START_KUBECTL_PORT_FORWARDS: 'Start kubectl port forwards',
COPY_CERT_MANAGER_RESOURCES: 'Copy CertManager resources from core-dev',
INSTALL_LETS_ENCRYPT_ISSUER: 'Install Lets Encrypt issuer',
KUBECONFIG: 'Getting kubeconfig'
}

export function parseVersion(context) {
Expand Down Expand Up @@ -272,7 +279,7 @@ export async function build(context, version) {

const destname = version.split(".")[0];
const namespace = `staging-${destname}`;
const domain = `${destname}.staging.gitpod-dev.com`;
const domain = withVM ? `${destname}.preview.gitpod-dev.com` : `${destname}.staging.gitpod-dev.com`;
const monitoringDomain = `${destname}.preview.gitpod-dev.com`;
const url = `https://${domain}`;
const deploymentConfig: DeploymentConfig = {
Expand All @@ -293,18 +300,36 @@ export async function build(context, version) {
if (withVM) {
werft.phase(phases.VM, "Start VM");

if (!VM.vmExists({ name: destname })) {
werft.log(vmSlices.COPY_CERT_MANAGER_RESOURCES, 'Copy over CertManager resources from core-dev')
exec(`kubectl get secret clouddns-dns01-solver-svc-acct -n certmanager -o yaml | sed 's/namespace: certmanager/namespace: cert-manager/g' > clouddns-dns01-solver-svc-acct.yaml`, { slice: vmSlices.COPY_CERT_MANAGER_RESOURCES })
exec(`kubectl get clusterissuer letsencrypt-issuer-gitpod-core-dev -o yaml | sed 's/letsencrypt-issuer-gitpod-core-dev/letsencrypt-issuer/g' > letsencrypt-issuer.yaml`, { slice: vmSlices.COPY_CERT_MANAGER_RESOURCES })

const existingVM = VM.vmExists({ name: destname })
if (!existingVM) {
werft.log(vmSlices.BOOT_VM, 'Starting VM')
VM.startVM({ name: destname })
} else {
werft.log(vmSlices.BOOT_VM, 'VM already exists')
}

werft.log(vmSlices.BOOT_VM, 'Waiting for VM to be ready')
VM.waitForVM({ name: destname, timeoutMS: 1000 * 60 * 3 })
VM.waitForVM({ name: destname, timeoutMS: 1000 * 60 * 3, slice: vmSlices.BOOT_VM })

werft.done(phases.VM)
return
werft.log(vmSlices.START_KUBECTL_PORT_FORWARDS, 'Starting SSH port forwarding')
VM.startSSHProxy({ name: destname, slice: vmSlices.START_KUBECTL_PORT_FORWARDS })

werft.log(vmSlices.START_KUBECTL_PORT_FORWARDS, 'Starting Kube API port forwarding')
VM.startKubeAPIProxy({ name: destname, slice: vmSlices.START_KUBECTL_PORT_FORWARDS })

werft.log(vmSlices.KUBECONFIG, 'Copying k3s kubeconfig')
VM.copyk3sKubeconfig({ path: 'k3s.yml', timeoutMS: 1000 * 60 * 3, slice: vmSlices.KUBECONFIG })
// NOTE: This was a quick have to override the existing kubeconfig so all future kubectl commands use the k3s cluster.
// We might want to keep both kubeconfigs around and be explicit about which one we're using.s
exec(`mv k3s.yml /home/gitpod/.kube/config`)

if (!existingVM) {
exec(`kubectl apply -f clouddns-dns01-solver-svc-acct.yaml -f letsencrypt-issuer.yaml`, { slice: vmSlices.INSTALL_LETS_ENCRYPT_ISSUER, dontCheckRc: true })
}
}

werft.phase(phases.PREDEPLOY, "Checking for existing installations...");
Expand Down
11 changes: 11 additions & 0 deletions .werft/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,9 @@ pod:
- name: harvester-kubeconfig
secret:
secretName: harvester-kubeconfig
- name: harvester-vm-ssh-keys
secret:
secretName: harvester-vm-ssh-keys
# - name: deploy-key
# secret:
# secretName: deploy-key
Expand Down Expand Up @@ -82,6 +85,8 @@ pod:
readOnly: false
- name: harvester-kubeconfig
mountPath: /mnt/secrets/harvester-kubeconfig
- name: harvester-vm-ssh-keys
mountPath: /mnt/secrets/harvester-vm-ssh-keys
# - name: deploy-key
# mountPath: /mnt/secrets/deploy-key
# readOnly: true
Expand Down Expand Up @@ -163,6 +168,12 @@ pod:
export DOCKER_HOST=tcp://$NODENAME:2475
sudo chown -R gitpod:gitpod /workspace

mkdir /workspace/.ssh
cp /mnt/secrets/harvester-vm-ssh-keys/id_rsa /workspace/.ssh/id_rsa_harvester_vm
cp /mnt/secrets/harvester-vm-ssh-keys/id_rsa.pub /workspace/.ssh/id_rsa_harvester_vm.pub
sudo chmod 600 /workspace/.ssh/id_rsa_harvester_vm
sudo chmod 644 /workspace/.ssh/id_rsa_harvester_vm.pub

(cd .werft && yarn install && mv node_modules ..) | werft log slice prep
printf '{{ toJson . }}' > context.json

Expand Down
50 changes: 21 additions & 29 deletions .werft/vm/manifests.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,9 @@ type VirtualMachineManifestArguments = {
vmName: string
namespace: string
claimName: string,
userDataSecretName: string
}

export function VirtualMachineManifest({ vmName, namespace, claimName, userDataSecretName }: VirtualMachineManifestArguments) {
export function VirtualMachineManifest({ vmName, namespace, claimName }: VirtualMachineManifestArguments) {
return `
apiVersion: kubevirt.io/v1
type: kubevirt.io.virtualmachine
Expand Down Expand Up @@ -46,7 +45,7 @@ spec:
machine:
type: q35
cpu:
cores: 1
cores: 4
sockets: 1
threads: 1
devices:
Expand All @@ -64,8 +63,8 @@ spec:
bus: virtio
resources:
limits:
memory: 2Gi
cpu: 1
memory: 8Gi
cpu: 4
evictionStrategy: LiveMigrate
networks:
- pod: {}
Expand All @@ -76,10 +75,23 @@ spec:
claimName: ${claimName}
- name: cloudinitdisk
cloudInitNoCloud:
networkDataSecretRef:
name: ${userDataSecretName}
secretRef:
name: ${userDataSecretName}
userData: |-
#cloud-config
users:
- name: ubuntu
sudo: "ALL=(ALL) NOPASSWD: ALL"
ssh_authorized_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC/aB/HYsb56V0NBOEab6j33v3LIxRiGqG4fmidAryAXevLyTANJPF8m44KSzSQg7AI7PMy6egxQp/JqH2b+3z1cItWuHZSU+klsKNuf5HxK7AOrND3ahbejZfyYewtKFQ3X9rv5Sk8TAR5gw5oPbkTR61jiLa58Sw7UkhLm2EDguGASb6mBal8iboiF8Wpl8QIvPmJaGIOY2YwXLepwFA3S3kVqW88eh2WFmjTMre5ASLguYNkHXjyb/TuhVFzAvphzpl84RAaEyjKYnk45fh4xRXx+oKqlfKRJJ/Owxa7SmGO+/4rWb3chdnpodHeu7XjERmjYLY+r46sf6n6ySgEht1xAWjMb1uqZqkDx+fDDsjFSeaN3ncX6HSoDOrphFmXYSwaMpZ8v67A791fuUPrMLC+YMckhTuX2g4i3XUdumIWvhaMvKhy/JRRMsfUH0h+KAkBLI6tn5ozoXiQhgM4SAE5HsMr6CydSIzab0yY3sq0avmZgeoc78+8PKPkZG1zRMEspV/hKKBC8hq7nm0bu4IgzuEIYHowOD8svqA0ufhDWxTt6A4Jo0xDzhFyKme7KfmW7SIhpejf3T1Wlf+QINs1hURr8LSOZEyY2SzYmAoQ49N0SSPb5xyG44cptpKcj0WCAJjBJoZqz0F5x9TjJ8XToB5obyJfRHD1JjxoMQ== [email protected]
chpasswd:
list: |
ubuntu:ubuntu
expire: False
runcmd:
- curl -sfL https://get.k3s.io | sh -
- sleep 10
- kubectl label nodes ${vmName} gitpod.io/workload_meta=true gitpod.io/workload_ide=true gitpod.io/workload_workspace_services=true gitpod.io/workload_workspace_regular=true gitpod.io/workload_workspace_headless=true gitpod.io/workspace_0=true gitpod.io/workspace_1=true gitpod.io/workspace_2=true
- kubectl create ns certs
- kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.6.1/cert-manager.yaml
`
}

Expand Down Expand Up @@ -123,23 +135,3 @@ type UserDataSecretManifestOptions = {
namespace: string,
secretName: string
}

export function UserDataSecretManifest({ namespace, secretName }: UserDataSecretManifestOptions) {
const userdata = Buffer.from(`#cloud-config
users:
- name: ubuntu
lock_passwd: false
sudo: "ALL=(ALL) NOPASSWD: ALL"
passwd: "$6$exDY1mhS4KUYCE/2$zmn9ToZwTKLhCw.b4/b.ZRTIZM30JZ4QrOQ2aOXJ8yk96xpcCof0kxKwuX1kqLG/ygbJ1f8wxED22bTL4F46P0"`).toString("base64")
return `
apiVersion: v1
type: secret
kind: Secret
data:
networkdata: ""
userdata: ${userdata}
metadata:
name: ${secretName}
namespace: ${namespace}
`
}
68 changes: 54 additions & 14 deletions .werft/vm/vm.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import { exec } from '../util/shell';
import { getGlobalWerftInstance } from '../util/werft';

import * as Manifests from './manifests'

Expand All @@ -21,27 +22,18 @@ EOF
*/
export function startVM(options: { name: string }) {
const namespace = `preview-${options.name}`
const userDataSecretName = `userdata-${options.name}`

kubectlApplyManifest(
Manifests.NamespaceManifest({
namespace
})
)

kubectlApplyManifest(
Manifests.UserDataSecretManifest({
namespace,
secretName: userDataSecretName,
})
)

kubectlApplyManifest(
Manifests.VirtualMachineManifest({
namespace,
vmName: options.name,
claimName: `${options.name}-${Date.now()}`,
userDataSecretName
claimName: `${options.name}-${Date.now()}`
}),
{ validate: false }
)
Expand All @@ -68,12 +60,13 @@ export function vmExists(options: { name: string }) {
* Wait until the VM Instance reaches the Running status.
* If the VM Instance doesn't reach Running before the timeoutMS it will throw an Error.
*/
export function waitForVM(options: { name: string, timeoutMS: number }) {
export function waitForVM(options: { name: string, timeoutMS: number, slice: string }) {
const werft = getGlobalWerftInstance()
const namespace = `preview-${options.name}`
const startTime = Date.now()
while (true) {

const status = exec(`kubectl --kubeconfig ${KUBECONFIG_PATH} -n ${namespace} get vmi ${options.name} -o jsonpath="{.status.phase}"`, { silent: true }).stdout.trim()
const status = exec(`kubectl --kubeconfig ${KUBECONFIG_PATH} -n ${namespace} get vmi ${options.name} -o jsonpath="{.status.phase}"`, { silent: true, slice: options.slice }).stdout.trim()

if (status == "Running") {
return
Expand All @@ -84,8 +77,55 @@ export function waitForVM(options: { name: string, timeoutMS: number }) {
throw new Error("VM didn reach Running status before the timeout")
}

console.log(`VM is not yet running. Current status is ${status}. Sleeping 5 seconds`)
exec('sleep 5', { silent: true })
werft.log(options.slice, `VM is not yet running. Current status is ${status}. Sleeping 5 seconds`)
exec('sleep 5', { silent: true, slice: options.slice })
}
}

/**
* Copies the k3s kubeconfig out of the VM and places it at `path`
* If it doesn't manage to do so before the timeout it will throw an Error
*/
export function copyk3sKubeconfig(options: { path: string, timeoutMS: number, slice: string }) {
const werft = getGlobalWerftInstance()
const startTime = Date.now()
while (true) {

const status = exec(`ssh -i /workspace/.ssh/id_rsa_harvester_vm [email protected] -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no 'sudo cat /etc/rancher/k3s/k3s.yaml' > ${options.path}`, { silent: true, dontCheckRc: true, slice: options.slice })

if (status.code == 0) {
return
}

const elapsedTimeMs = Date.now() - startTime
if (elapsedTimeMs > options.timeoutMS) {
throw new Error(`Wasn't able to copy out the kubeconfig before the timeout. Exit code ${status.code}. Stderr: ${status.stderr}. Stdout: ${status.stdout}`)
}

werft.log(options.slice, `Wasn't able to copy out kubeconfig yet. Sleeping 5 seconds`)
exec('sleep 5', { silent: true, slice: options.slice })
}
}

/**
* Proxy 127.0.0.1:22 to :22 in the VM through the k8s service
*/
export function startSSHProxy(options: { name: string, slice: string }) {
const namespace = `preview-${options.name}`
exec(`sudo kubectl --kubeconfig=${KUBECONFIG_PATH} -n ${namespace} port-forward service/proxy 22:22`, { async: true, silent: true, slice: options.slice })
}

/**
* Proxy 127.0.0.1:6443 to :6443 in the VM through the k8s service
*/
export function startKubeAPIProxy(options: { name: string, slice: string }) {
const namespace = `preview-${options.name}`
exec(`sudo kubectl --kubeconfig=${KUBECONFIG_PATH} -n ${namespace} port-forward service/proxy 6443:6443`, { async: true, silent: true, slice: options.slice })
}

/**
* Terminates all running kubectl proxies
*/
export function stopKubectlPortForwards() {
exec(`sudo killall kubectl || true`)
}