Skip to content

Commit ad09dbd

Browse files
committed
Add opt-in to use Harvester k3s cluster for development
1 parent d4e1f3f commit ad09dbd

File tree

4 files changed

+85
-44
lines changed

4 files changed

+85
-44
lines changed

.werft/build.ts

Lines changed: 37 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,9 @@ Tracing.initialize()
5252
// Explicitly not using process.exit as we need to flush tracing, see tracing.js
5353
process.exitCode = 1
5454
}
55+
56+
// TODO: Find a better place for this. This terminates background proxies.
57+
exec(`sudo killall kubectl || true`)
5558
})
5659

5760
// Werft phases
@@ -77,7 +80,10 @@ const installerSlices = {
7780
}
7881

7982
const vmSlices = {
80-
BOOT_VM: 'Booting VM'
83+
BOOT_VM: 'Booting VM',
84+
SSH_PROXY: 'SSH Proxy',
85+
COPY_CERT_MANAGER_RESOURCES: 'Copy CertManager resources from core-dev',
86+
KUBECONFIG: 'Getting kubeconfig'
8187
}
8288

8389
export function parseVersion(context) {
@@ -272,7 +278,7 @@ export async function build(context, version) {
272278

273279
const destname = version.split(".")[0];
274280
const namespace = `staging-${destname}`;
275-
const domain = `${destname}.staging.gitpod-dev.com`;
281+
const domain = withVM ? `${destname}.preview.gitpod-dev.com` : `${destname}.staging.gitpod-dev.com`;
276282
const monitoringDomain = `${destname}.preview.gitpod-dev.com`;
277283
const url = `https://${domain}`;
278284
const deploymentConfig: DeploymentConfig = {
@@ -293,7 +299,12 @@ export async function build(context, version) {
293299
if (withVM) {
294300
werft.phase(phases.VM, "Start VM");
295301

296-
if (!VM.vmExists({ name: destname })) {
302+
werft.log(vmSlices.COPY_CERT_MANAGER_RESOURCES, 'Copy over CertManager resources from core-dev')
303+
exec(`kubectl get secret clouddns-dns01-solver-svc-acct -n certmanager -o yaml | sed 's/namespace: certmanager/namespace: cert-manager/g' > clouddns-dns01-solver-svc-acct.yaml`)
304+
exec(`kubectl get clusterissuer letsencrypt-issuer-gitpod-core-dev -o yaml | sed 's/letsencrypt-issuer-gitpod-core-dev/letsencrypt-issuer/g' > letsencrypt-issuer.yaml`)
305+
306+
const existingVM = VM.vmExists({ name: destname })
307+
if (!existingVM) {
297308
werft.log(vmSlices.BOOT_VM, 'Starting VM')
298309
VM.startVM({ name: destname })
299310
} else {
@@ -303,8 +314,29 @@ export async function build(context, version) {
303314
werft.log(vmSlices.BOOT_VM, 'Waiting for VM to be ready')
304315
VM.waitForVM({ name: destname, timeoutMS: 1000 * 60 * 3 })
305316

306-
werft.done(phases.VM)
307-
return
317+
// TODO: We're hoping to have the proxy in the Harvester cluster be able to forward
318+
// both SSH and Kube API so we don't need these two. Currently this will cause
319+
// the Werft job to never exit as these two proxies will keep the job from
320+
// terminating.
321+
werft.log(vmSlices.SSH_PROXY, 'Starting SSH proxy')
322+
VM.startSSHProxy({ name: destname })
323+
324+
werft.log(vmSlices.SSH_PROXY, 'Starting k8s API proxy')
325+
VM.startKubeAPIProxy({ name: destname })
326+
327+
werft.log(vmSlices.KUBECONFIG, 'Copying k3s kubeconfig')
328+
// TODO: This is currently flaky and sometimes fails if the VM isn't ready in time.
329+
// instead of just sleeping we should periodically check if SSH access is ready.
330+
exec(`sleep 10`)
331+
exec(`ssh -i /workspace/.ssh/id_rsa_harvester_vm [email protected] -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no 'sudo cat /etc/rancher/k3s/k3s.yaml' > k3s.yml`)
332+
333+
// TODO: This was a quick have to override the existing kubeconfig so all future kubectl commands use the k3s cluster.
334+
// We might want to keep both kubeconfigs around and be explicit about which one we're using
335+
exec(`mv k3s.yml /home/gitpod/.kube/config`)
336+
337+
if (!existingVM) {
338+
exec(`kubectl apply -f clouddns-dns01-solver-svc-acct.yaml -f letsencrypt-issuer.yaml`)
339+
}
308340
}
309341

310342
werft.phase(phases.PREDEPLOY, "Checking for existing installations...");

.werft/build.yaml

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,9 @@ pod:
3636
- name: harvester-kubeconfig
3737
secret:
3838
secretName: harvester-kubeconfig
39+
- name: harvester-vm-ssh-keys
40+
secret:
41+
secretName: harvester-vm-ssh-keys
3942
# - name: deploy-key
4043
# secret:
4144
# secretName: deploy-key
@@ -82,6 +85,8 @@ pod:
8285
readOnly: false
8386
- name: harvester-kubeconfig
8487
mountPath: /mnt/secrets/harvester-kubeconfig
88+
- name: harvester-vm-ssh-keys
89+
mountPath: /mnt/secrets/harvester-vm-ssh-keys
8590
# - name: deploy-key
8691
# mountPath: /mnt/secrets/deploy-key
8792
# readOnly: true
@@ -163,6 +168,12 @@ pod:
163168
export DOCKER_HOST=tcp://$NODENAME:2475
164169
sudo chown -R gitpod:gitpod /workspace
165170
171+
mkdir /workspace/.ssh
172+
cp /mnt/secrets/harvester-vm-ssh-keys/id_rsa /workspace/.ssh/id_rsa_harvester_vm
173+
cp /mnt/secrets/harvester-vm-ssh-keys/id_rsa.pub /workspace/.ssh/id_rsa_harvester_vm.pub
174+
sudo chmod 600 /workspace/.ssh/id_rsa_harvester_vm
175+
sudo chmod 644 /workspace/.ssh/id_rsa_harvester_vm.pub
176+
166177
(cd .werft && yarn install && mv node_modules ..) | werft log slice prep
167178
printf '{{ toJson . }}' > context.json
168179

.werft/vm/manifests.ts

Lines changed: 21 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,9 @@ type VirtualMachineManifestArguments = {
1515
vmName: string
1616
namespace: string
1717
claimName: string,
18-
userDataSecretName: string
1918
}
2019

21-
export function VirtualMachineManifest({ vmName, namespace, claimName, userDataSecretName }: VirtualMachineManifestArguments) {
20+
export function VirtualMachineManifest({ vmName, namespace, claimName }: VirtualMachineManifestArguments) {
2221
return `
2322
apiVersion: kubevirt.io/v1
2423
type: kubevirt.io.virtualmachine
@@ -46,7 +45,7 @@ spec:
4645
machine:
4746
type: q35
4847
cpu:
49-
cores: 1
48+
cores: 4
5049
sockets: 1
5150
threads: 1
5251
devices:
@@ -64,8 +63,8 @@ spec:
6463
bus: virtio
6564
resources:
6665
limits:
67-
memory: 2Gi
68-
cpu: 1
66+
memory: 8Gi
67+
cpu: 4
6968
evictionStrategy: LiveMigrate
7069
networks:
7170
- pod: {}
@@ -76,10 +75,23 @@ spec:
7675
claimName: ${claimName}
7776
- name: cloudinitdisk
7877
cloudInitNoCloud:
79-
networkDataSecretRef:
80-
name: ${userDataSecretName}
81-
secretRef:
82-
name: ${userDataSecretName}
78+
userData: |-
79+
#cloud-config
80+
users:
81+
- name: ubuntu
82+
sudo: "ALL=(ALL) NOPASSWD: ALL"
83+
ssh_authorized_keys:
84+
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC/aB/HYsb56V0NBOEab6j33v3LIxRiGqG4fmidAryAXevLyTANJPF8m44KSzSQg7AI7PMy6egxQp/JqH2b+3z1cItWuHZSU+klsKNuf5HxK7AOrND3ahbejZfyYewtKFQ3X9rv5Sk8TAR5gw5oPbkTR61jiLa58Sw7UkhLm2EDguGASb6mBal8iboiF8Wpl8QIvPmJaGIOY2YwXLepwFA3S3kVqW88eh2WFmjTMre5ASLguYNkHXjyb/TuhVFzAvphzpl84RAaEyjKYnk45fh4xRXx+oKqlfKRJJ/Owxa7SmGO+/4rWb3chdnpodHeu7XjERmjYLY+r46sf6n6ySgEht1xAWjMb1uqZqkDx+fDDsjFSeaN3ncX6HSoDOrphFmXYSwaMpZ8v67A791fuUPrMLC+YMckhTuX2g4i3XUdumIWvhaMvKhy/JRRMsfUH0h+KAkBLI6tn5ozoXiQhgM4SAE5HsMr6CydSIzab0yY3sq0avmZgeoc78+8PKPkZG1zRMEspV/hKKBC8hq7nm0bu4IgzuEIYHowOD8svqA0ufhDWxTt6A4Jo0xDzhFyKme7KfmW7SIhpejf3T1Wlf+QINs1hURr8LSOZEyY2SzYmAoQ49N0SSPb5xyG44cptpKcj0WCAJjBJoZqz0F5x9TjJ8XToB5obyJfRHD1JjxoMQ== [email protected]
85+
chpasswd:
86+
list: |
87+
ubuntu:ubuntu
88+
expire: False
89+
runcmd:
90+
- curl -sfL https://get.k3s.io | sh -
91+
- sleep 10
92+
- kubectl label nodes ${vmName} gitpod.io/workload_meta=true gitpod.io/workload_ide=true gitpod.io/workload_workspace_services=true gitpod.io/workload_workspace_regular=true gitpod.io/workload_workspace_headless=true gitpod.io/workspace_0=true gitpod.io/workspace_1=true gitpod.io/workspace_2=true
93+
- kubectl create ns certs
94+
- kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.6.1/cert-manager.yaml
8395
`
8496
}
8597

@@ -123,23 +135,3 @@ type UserDataSecretManifestOptions = {
123135
namespace: string,
124136
secretName: string
125137
}
126-
127-
export function UserDataSecretManifest({ namespace, secretName }: UserDataSecretManifestOptions) {
128-
const userdata = Buffer.from(`#cloud-config
129-
users:
130-
- name: ubuntu
131-
lock_passwd: false
132-
sudo: "ALL=(ALL) NOPASSWD: ALL"
133-
passwd: "$6$exDY1mhS4KUYCE/2$zmn9ToZwTKLhCw.b4/b.ZRTIZM30JZ4QrOQ2aOXJ8yk96xpcCof0kxKwuX1kqLG/ygbJ1f8wxED22bTL4F46P0"`).toString("base64")
134-
return `
135-
apiVersion: v1
136-
type: secret
137-
kind: Secret
138-
data:
139-
networkdata: ""
140-
userdata: ${userdata}
141-
metadata:
142-
name: ${secretName}
143-
namespace: ${namespace}
144-
`
145-
}

.werft/vm/vm.ts

Lines changed: 16 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -21,27 +21,18 @@ EOF
2121
*/
2222
export function startVM(options: { name: string }) {
2323
const namespace = `preview-${options.name}`
24-
const userDataSecretName = `userdata-${options.name}`
2524

2625
kubectlApplyManifest(
2726
Manifests.NamespaceManifest({
2827
namespace
2928
})
3029
)
3130

32-
kubectlApplyManifest(
33-
Manifests.UserDataSecretManifest({
34-
namespace,
35-
secretName: userDataSecretName,
36-
})
37-
)
38-
3931
kubectlApplyManifest(
4032
Manifests.VirtualMachineManifest({
4133
namespace,
4234
vmName: options.name,
43-
claimName: `${options.name}-${Date.now()}`,
44-
userDataSecretName
35+
claimName: `${options.name}-${Date.now()}`
4536
}),
4637
{ validate: false }
4738
)
@@ -87,5 +78,20 @@ export function waitForVM(options: { name: string, timeoutMS: number }) {
8778
console.log(`VM is not yet running. Current status is ${status}. Sleeping 5 seconds`)
8879
exec('sleep 5', { silent: true })
8980
}
81+
}
82+
83+
/**
84+
* Proxy 127.0.0.1:22 to :22 in the VM through the k8s service
85+
*/
86+
export function startSSHProxy(options: { name: string }) {
87+
const namespace = `preview-${options.name}`
88+
exec(`sudo kubectl --kubeconfig=${KUBECONFIG_PATH} -n ${namespace} port-forward service/proxy 22:22`, { async: true, silent: true })
89+
}
9090

91+
/**
92+
* Proxy 127.0.0.1:6443 to :6443 in the VM through the k8s service
93+
*/
94+
export function startKubeAPIProxy(options: { name: string }) {
95+
const namespace = `preview-${options.name}`
96+
exec(`sudo kubectl --kubeconfig=${KUBECONFIG_PATH} -n ${namespace} port-forward service/proxy 6443:6443`, { async: true, silent: true })
9197
}

0 commit comments

Comments
 (0)