diff --git a/.werft/build.ts b/.werft/build.ts index bfd7cbe38f9089..ce1b6286410c18 100644 --- a/.werft/build.ts +++ b/.werft/build.ts @@ -14,6 +14,7 @@ import { createHash } from "crypto"; import { InstallMonitoringSatelliteParams, installMonitoringSatellite, observabilityStaticChecks } from './observability/monitoring-satellite'; import { SpanStatusCode } from '@opentelemetry/api'; import * as Tracing from './observability/tracing' +import * as VM from './vm/vm' // Will be set once tracing has been initialized let werft: Werft @@ -58,6 +59,7 @@ const phases = { PREDEPLOY: 'predeploy', DEPLOY: 'deploy', TRIGGER_INTEGRATION_TESTS: 'trigger integration tests', + VM: 'vm' } // Werft slices for deploy phase via installer @@ -74,6 +76,10 @@ const installerSlices = { DEPLOYMENT_WAITING: "monitor server deployment" } +const vmSlices = { + BOOT_VM: 'Booting VM' +} + export function parseVersion(context) { let buildConfig = context.Annotations || {}; const explicitVersion = buildConfig.version; @@ -142,6 +148,7 @@ export async function build(context, version) { const withPayment= "with-payment" in buildConfig; const withObservability = "with-observability" in buildConfig; const withHelm = "with-helm" in buildConfig; + const withVM = "with-vm" in buildConfig; const jobConfig = { buildConfig, @@ -283,6 +290,23 @@ export async function build(context, version) { withObservability, }; + if (withVM) { + werft.phase(phases.VM, "Start VM"); + + if (!VM.vmExists({ name: destname })) { + werft.log(vmSlices.BOOT_VM, 'Starting VM') + VM.startVM({ name: destname }) + } else { + werft.log(vmSlices.BOOT_VM, 'VM already exists') + } + + werft.log(vmSlices.BOOT_VM, 'Waiting for VM to be ready') + VM.waitForVM({ name: destname, timeoutMS: 1000 * 60 * 3 }) + + werft.done(phases.VM) + return + } + werft.phase(phases.PREDEPLOY, "Checking for existing installations..."); // the context namespace is not set at this point const hasGitpodHelmInstall = exec(`helm status ${helmInstallName} -n ${deploymentConfig.namespace}`, {slice: "check for Helm install", dontCheckRc: true}).code === 0; diff --git a/.werft/build.yaml b/.werft/build.yaml index e89f828c607768..4e651b94fd45d5 100644 --- a/.werft/build.yaml +++ b/.werft/build.yaml @@ -33,6 +33,9 @@ pod: hostPath: path: /mnt/disks/ssd0/go-build-cache type: DirectoryOrCreate + - name: harvester-kubeconfig + secret: + secretName: harvester-kubeconfig # - name: deploy-key # secret: # secretName: deploy-key @@ -77,6 +80,8 @@ pod: - name: go-build-cache mountPath: /go-build-cache readOnly: false + - name: harvester-kubeconfig + mountPath: /mnt/secrets/harvester-kubeconfig # - name: deploy-key # mountPath: /mnt/secrets/deploy-key # readOnly: true diff --git a/.werft/vm/manifests.ts b/.werft/vm/manifests.ts new file mode 100644 index 00000000000000..649203a7c5474c --- /dev/null +++ b/.werft/vm/manifests.ts @@ -0,0 +1,145 @@ +type NamespaceManifestOptions = { + namespace: string +} + +export function NamespaceManifest({ namespace }: NamespaceManifestOptions) { + return ` +apiVersion: v1 +kind: Namespace +metadata: + name: ${namespace} +` +} + +type VirtualMachineManifestArguments = { + vmName: string + namespace: string + claimName: string, + userDataSecretName: string +} + +export function VirtualMachineManifest({ vmName, namespace, claimName, userDataSecretName }: VirtualMachineManifestArguments) { + return ` +apiVersion: kubevirt.io/v1 +type: kubevirt.io.virtualmachine +kind: VirtualMachine +metadata: + namespace: ${namespace} + annotations: + harvesterhci.io/volumeClaimTemplates: '[{"metadata":{"name":"${claimName}","annotations":{"harvesterhci.io/imageId":"default/image-cjlm2"}},"spec":{"accessModes":["ReadWriteMany"],"resources":{"requests":{"storage":"10Gi"}},"volumeMode":"Block","storageClassName":"longhorn-image-cjlm2"}}]' + network.harvesterhci.io/ips: "[]" + labels: + harvesterhci.io/creator: harvester + harvesterhci.io/os: ubuntu + name: ${vmName} +spec: + running: true + template: + metadata: + annotations: + harvesterhci.io/sshNames: "[]" + labels: + harvesterhci.io/vmName: ${vmName} + spec: + domain: + hostname: ${vmName} + machine: + type: q35 + cpu: + cores: 1 + sockets: 1 + threads: 1 + devices: + interfaces: + - masquerade: {} + model: virtio + name: default + disks: + - name: system + bootOrder: 1 + disk: + bus: virtio + - name: cloudinitdisk + disk: + bus: virtio + resources: + limits: + memory: 2Gi + cpu: 1 + evictionStrategy: LiveMigrate + networks: + - pod: {} + name: default + volumes: + - name: system + persistentVolumeClaim: + claimName: ${claimName} + - name: cloudinitdisk + cloudInitNoCloud: + networkDataSecretRef: + name: ${userDataSecretName} + secretRef: + name: ${userDataSecretName} +` +} + +type ServiceManifestOptions = { + vmName: string + namespace: string +} + +export function ServiceManifest({ vmName, namespace }: ServiceManifestOptions) { + return ` +apiVersion: v1 +kind: Service +metadata: + name: proxy + namespace: ${namespace} +spec: + ports: + - name: ssh + protocol: TCP + port: 22 + targetPort: 22 + - name: http + protocol: TCP + port: 80 + targetPort: 80 + - name: https + protocol: TCP + port: 443 + targetPort: 443 + - name: kube-api + protocol: TCP + port: 6443 + targetPort: 6443 + selector: + harvesterhci.io/vmName: ${vmName} + type: ClusterIP +` +} + +type UserDataSecretManifestOptions = { + namespace: string, + secretName: string +} + +export function UserDataSecretManifest({ namespace, secretName }: UserDataSecretManifestOptions) { + const userdata = Buffer.from(`#cloud-config +users: + - name: ubuntu + lock_passwd: false + sudo: "ALL=(ALL) NOPASSWD: ALL" + passwd: "$6$exDY1mhS4KUYCE/2$zmn9ToZwTKLhCw.b4/b.ZRTIZM30JZ4QrOQ2aOXJ8yk96xpcCof0kxKwuX1kqLG/ygbJ1f8wxED22bTL4F46P0"`).toString("base64") + return ` +apiVersion: v1 +type: secret +kind: Secret +data: + networkdata: "" + userdata: ${userdata} +metadata: + name: ${secretName} + namespace: ${namespace} +` +} diff --git a/.werft/vm/vm.ts b/.werft/vm/vm.ts new file mode 100644 index 00000000000000..df66a2d56c7927 --- /dev/null +++ b/.werft/vm/vm.ts @@ -0,0 +1,91 @@ +import { exec } from '../util/shell'; + +import * as Manifests from './manifests' + +const KUBECONFIG_PATH = '/mnt/secrets/harvester-kubeconfig/harvester-kubeconfig.yml' + +/** + * Convenience function to kubectl apply a manifest from stdin. + */ +function kubectlApplyManifest(manifest: string, options?: { validate?: boolean }) { + exec(` + cat < options.timeoutMS) { + throw new Error("VM didn reach Running status before the timeout") + } + + console.log(`VM is not yet running. Current status is ${status}. Sleeping 5 seconds`) + exec('sleep 5', { silent: true }) + } + +}