diff --git a/CHANGELOG.md b/CHANGELOG.md index 9ed81c89..c4c48d21 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,15 +15,18 @@ All notable changes to this project will be documented in this file. ### Changed +- Split operator deployment into Deployment and DaemonSet ([#645]). + - Introduce two different modes: `csi-server` and `controller`. + - The CSI server is deployed via a DaemonSet to be available on every node. + - The controller is deployed via a Deployment with a single replica. - Version CRD structs and enums as v1alpha1 ([#636]). -- BREAKING: Rearrange values to be somewhat consistent with the listener-operator value changes ([#641]). - - `image.repository` has been moved to `secretOperator.image.repository`. - - `image.tag` has been moved to `secretOperator.image.tag`. - - `image.pullPolicy` has been moved to `secretOperator.image.pullPolicy`. - - `csiProvisioner` values have been moved to `externalProvisioner`. - - `csiNodeDriverRegistrar` values have been moved to `nodeDriverRegistrar`. - - `node.driver` values have been moved to `secretOperator`. - - `securityContext` values have been moved to `secretOperator.securityContext`. +- BREAKING: Rearrange values to be somewhat consistent with the listener-operator value changes ([#641], [#645]). + - `csiProvisioner` values have been moved to `csiNodeDriver.externalProvisioner`. + - `csiNodeDriverRegistrar` values have been moved to `csiNodeDriver.nodeDriverRegistrar`. + - `node.driver.resources` values have been split into `controllerService.resources` and `csiNodeDriver.nodeService.resources`. + - `securityContext` values have been split into `controllerService.securityContext` and `.csiNodeDriver.nodeService.securityContext`. + - `podAnnotations`, `podSecurityContext`, `nodeSelector`, `tolerations`, and `affinity` have been split into `controllerService` and `csiNodeDriver`. + - `kubeletDir` has been move to `csiNodeDriver.kubeletDir`. - Bump csi-node-driver-registrar to `v2.15.0` ([#642]). - Bump csi-provisioner to `v5.3.0` ([#643]). @@ -33,6 +36,7 @@ All notable changes to this project will be documented in this file. [#642]: https://github.com/stackabletech/secret-operator/pull/642 [#643]: https://github.com/stackabletech/secret-operator/pull/643 [#644]: https://github.com/stackabletech/secret-operator/pull/644 +[#645]: https://github.com/stackabletech/secret-operator/pull/645 ## [25.7.0] - 2025-07-23 diff --git a/Tiltfile b/Tiltfile index fb84bcc0..d2295bde 100644 --- a/Tiltfile +++ b/Tiltfile @@ -26,6 +26,7 @@ if os.path.exists('result'): # oci.stackable.tech/sandbox/opa-operator:7y19m3d8clwxlv34v5q2x4p7v536s00g instead of # oci.stackable.tech/sandbox/opa-operator:0.0.0-dev (which does not exist) k8s_kind('Deployment', image_json_path='{.spec.template.metadata.annotations.internal\\.stackable\\.tech/image}') +k8s_kind('DaemonSet', image_json_path='{.spec.template.metadata.annotations.internal\\.stackable\\.tech/image}') # Exclude stale CRDs from Helm chart, and apply the rest helm_crds, helm_non_crds = filter_yaml( diff --git a/deploy/helm/secret-operator/templates/controller-deployment.yaml b/deploy/helm/secret-operator/templates/controller-deployment.yaml new file mode 100644 index 00000000..dca71496 --- /dev/null +++ b/deploy/helm/secret-operator/templates/controller-deployment.yaml @@ -0,0 +1,97 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "operator.fullname" . }} + labels: + {{- include "operator.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "operator.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + internal.stackable.tech/image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- with .Values.controllerService.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "operator.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + # NOTE (@Techassi): Does it maybe make sense to have two different service accounts? + serviceAccountName: {{ include "operator.fullname" . }}-serviceaccount + securityContext: + {{- toYaml .Values.controllerService.podSecurityContext | nindent 8 }} + containers: + - name: {{ include "operator.appname" . }} + securityContext: + {{- toYaml .Values.controllerService.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + resources: + {{ .Values.controllerService.resources | toYaml | nindent 12 }} + # The arguments passed to the command being run in the container. The final command will + # look like `secret-operator run controller [OPTIONS]`. The controller needs to only run + # once in a Kubernetes cluster and as such is deployed as a Deployment with a single + # replica. + args: + - run + - controller + env: + # The following env vars are passed as clap (think CLI) arguments to the operator. + # They are picked up by clap using the structs defied in the operator. + # (which is turn pulls in https://github.com/stackabletech/operator-rs/blob/main/crates/stackable-operator/src/cli.rs) + # You can read there about the expected values and purposes. + + # Sometimes products need to know the operator image, e.g. the opa-bundle-builder OPA + # sidecar uses the operator image. + - name: OPERATOR_IMAGE + # Tilt can use annotations as image paths, but not env variables + valueFrom: + fieldRef: + fieldPath: metadata.annotations['internal.stackable.tech/image'] + + # Namespace the operator Pod is running in, e.g. used to construct the conversion + # webhook endpoint. + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + + # The name of the Kubernetes Service that point to the operator Pod, e.g. used to + # construct the conversion webhook endpoint. + - name: OPERATOR_SERVICE_NAME + value: {{ include "operator.fullname" . }} + + # Operators need to know the node name they are running on, to e.g. discover the + # Kubernetes domain name from the kubelet API. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + + {{- if .Values.kubernetesClusterDomain }} + - name: KUBERNETES_CLUSTER_DOMAIN + value: {{ .Values.kubernetesClusterDomain | quote }} + {{- end }} + {{- include "telemetry.envVars" . | nindent 12 }} + {{- with .Values.controllerService.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controllerService.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controllerService.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controllerService.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} diff --git a/deploy/helm/secret-operator/templates/daemonset.yaml b/deploy/helm/secret-operator/templates/csi-node-driver-daemonset.yaml similarity index 60% rename from deploy/helm/secret-operator/templates/daemonset.yaml rename to deploy/helm/secret-operator/templates/csi-node-driver-daemonset.yaml index 9da10540..bfbd42f2 100644 --- a/deploy/helm/secret-operator/templates/daemonset.yaml +++ b/deploy/helm/secret-operator/templates/csi-node-driver-daemonset.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: DaemonSet metadata: - name: {{ include "operator.fullname" . }}-daemonset + name: {{ include "operator.fullname" . }}-csi-node-driver labels: {{- include "operator.labels" . | nindent 4 }} spec: @@ -11,10 +11,11 @@ spec: {{- include "operator.selectorLabels" . | nindent 6 }} template: metadata: - {{- with .Values.podAnnotations }} annotations: - {{- toYaml . | nindent 8 }} - {{- end }} + internal.stackable.tech/image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- with .Values.csiNodeDriver.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} labels: {{- include "operator.selectorLabels" . | nindent 8 }} spec: @@ -22,17 +23,24 @@ spec: imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} + # NOTE (@Techassi): Does it maybe make sense to have two different service accounts? serviceAccountName: {{ include "operator.fullname" . }}-serviceaccount securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- toYaml .Values.csiNodeDriver.podSecurityContext | nindent 8 }} containers: - - name: {{ include "operator.appname" . }} + - name: csi-node-service securityContext: - {{- toYaml .Values.secretOperator.securityContext | nindent 12 }} - image: "{{ .Values.secretOperator.image.repository }}:{{ .Values.secretOperator.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.secretOperator.image.pullPolicy }} + {{- toYaml .Values.csiNodeDriver.nodeService.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} resources: - {{ .Values.secretOperator.resources | toYaml | nindent 12 }} + {{ .Values.csiNodeDriver.nodeService.resources | toYaml | nindent 12 }} + # The arguments passed to the command being run in the container. The final command will + # look like `secret-operator run csi-server [OPTIONS]`. The CSI server needs to run on + # every Kubernetes cluster node and as such is deployed as a DaemonSet. + args: + - run + - csi-node-service env: # The following env vars are passed as clap (think CLI) arguments to the operator. # They are picked up by clap using the structs defied in the operator. @@ -42,7 +50,7 @@ spec: - name: CSI_ENDPOINT value: /csi/csi.sock - name: PRIVILEGED - value: {{ .Values.secretOperator.securityContext.privileged | quote }} + value: {{ .Values.csiNodeDriver.nodeService.securityContext.privileged | quote }} # Sometimes products need to know the operator image, e.g. the opa-bundle-builder OPA # sidecar uses the operator image. @@ -80,17 +88,18 @@ spec: - name: csi mountPath: /csi - name: mountpoint - mountPath: {{ .Values.kubeletDir }}/pods - {{- if .Values.secretOperator.securityContext.privileged }} + mountPath: {{ .Values.csiNodeDriver.kubeletDir }}/pods + {{- if .Values.csiNodeDriver.nodeService.securityContext.privileged }} mountPropagation: Bidirectional {{- end }} - name: tmp mountPath: /tmp + - name: external-provisioner - image: "{{ .Values.externalProvisioner.image.repository }}:{{ .Values.externalProvisioner.image.tag }}" - imagePullPolicy: {{ .Values.externalProvisioner.image.pullPolicy }} + image: "{{ .Values.csiNodeDriver.externalProvisioner.image.repository }}:{{ .Values.csiNodeDriver.externalProvisioner.image.tag }}" + imagePullPolicy: {{ .Values.csiNodeDriver.externalProvisioner.image.pullPolicy }} resources: - {{ .Values.externalProvisioner.resources | toYaml | nindent 12 }} + {{ .Values.csiNodeDriver.externalProvisioner.resources | toYaml | nindent 12 }} args: - --csi-address=/csi/csi.sock - --feature-gates=Topology=true @@ -98,14 +107,15 @@ spec: volumeMounts: - name: csi mountPath: /csi + - name: node-driver-registrar - image: "{{ .Values.nodeDriverRegistrar.image.repository }}:{{ .Values.nodeDriverRegistrar.image.tag }}" - imagePullPolicy: {{ .Values.nodeDriverRegistrar.image.pullPolicy }} + image: "{{ .Values.csiNodeDriver.nodeDriverRegistrar.image.repository }}:{{ .Values.csiNodeDriver.nodeDriverRegistrar.image.tag }}" + imagePullPolicy: {{ .Values.csiNodeDriver.nodeDriverRegistrar.image.pullPolicy }} resources: - {{ .Values.nodeDriverRegistrar.resources | toYaml | nindent 12 }} + {{ .Values.csiNodeDriver.nodeDriverRegistrar.resources | toYaml | nindent 12 }} args: - --csi-address=/csi/csi.sock - - --kubelet-registration-path={{ .Values.kubeletDir }}/plugins/secrets.stackable.tech/csi.sock + - --kubelet-registration-path={{ .Values.csiNodeDriver.kubeletDir }}/plugins/secrets.stackable.tech/csi.sock volumeMounts: - name: registration-sock mountPath: /registration @@ -116,27 +126,27 @@ spec: hostPath: # node-driver-registrar appends a driver-unique filename to this path to avoid conflicts # see https://github.com/stackabletech/secret-operator/issues/229 for why this path should not be too long - path: {{ .Values.kubeletDir }}/plugins_registry + path: {{ .Values.csiNodeDriver.kubeletDir }}/plugins_registry - name: csi hostPath: - path: {{ .Values.kubeletDir }}/plugins/secrets.stackable.tech/ + path: {{ .Values.csiNodeDriver.kubeletDir }}/plugins/secrets.stackable.tech/ - name: mountpoint hostPath: - path: {{ .Values.kubeletDir }}/pods/ + path: {{ .Values.csiNodeDriver.kubeletDir }}/pods/ - name: tmp emptyDir: {} - {{- with .Values.nodeSelector }} + {{- with .Values.csiNodeDriver.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.affinity }} + {{- with .Values.csiNodeDriver.affinity }} affinity: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.tolerations }} + {{- with .Values.csiNodeDriver.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.priorityClassName }} + {{- with .Values.csiNodeDriver.priorityClassName }} priorityClassName: {{ . }} {{- end }} diff --git a/deploy/helm/secret-operator/values.yaml b/deploy/helm/secret-operator/values.yaml index d90e7371..fbe32663 100644 --- a/deploy/helm/secret-operator/values.yaml +++ b/deploy/helm/secret-operator/values.yaml @@ -1,39 +1,20 @@ # Default values for secret-operator. --- +# Used by both the Controller Service and Node Service containers image: + repository: oci.stackable.tech/sdp/listener-operator + # tag: 0.0.0-dev + pullPolicy: IfNotPresent pullSecrets: [] -externalProvisioner: - image: - repository: oci.stackable.tech/sdp/sig-storage/csi-provisioner - tag: v5.3.0 - pullPolicy: IfNotPresent - resources: - requests: - cpu: 100m - memory: 128Mi - limits: - cpu: 100m - memory: 128Mi -nodeDriverRegistrar: - image: - repository: oci.stackable.tech/sdp/sig-storage/csi-node-driver-registrar - tag: v2.15.0 - pullPolicy: IfNotPresent - resources: - requests: - cpu: 100m - memory: 128Mi - limits: - cpu: 100m - memory: 128Mi +controllerService: + podAnnotations: {} + podSecurityContext: {} + # fsGroup: 2000 + nodeSelector: {} + tolerations: [] + affinity: {} -secretOperator: - image: - repository: oci.stackable.tech/sdp/secret-operator - # tag: 0.0.0-dev - pullPolicy: IfNotPresent - # Resources of the secret-operator container itself resources: limits: cpu: 100m @@ -45,10 +26,6 @@ secretOperator: securityContext: # secret-operator requires root permissions runAsUser: 0 - # It is strongly recommended to run secret-operator as a privileged container, since - # it enables additional protections for the secret contents. - # Unprivileged mode is EXPERIMENTAL and requires manual migration for an existing cluster. - privileged: true # capabilities: # drop: # - ALL @@ -56,6 +33,69 @@ secretOperator: # runAsNonRoot: true # runAsUser: 1000 +csiNodeDriver: + # Kubelet dir may vary in environments such as microk8s. + # See https://github.com/stackabletech/secret-operator/issues/229 + kubeletDir: /var/lib/kubelet + + podAnnotations: {} + podSecurityContext: {} + # fsGroup: 2000 + nodeSelector: {} + tolerations: [] + affinity: {} + + nodeService: + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + + securityContext: + # secret-operator requires root permissions + runAsUser: 0 + # It is strongly recommended to run secret-operator as a privileged container, since + # it enables additional protections for the secret contents. + # Unprivileged mode is EXPERIMENTAL and requires manual migration for an existing cluster. + privileged: true + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + + externalProvisioner: + image: + repository: oci.stackable.tech/sdp/sig-storage/csi-provisioner + tag: v5.3.0 + pullPolicy: IfNotPresent + # NOTE (@Techassi): Support setting pullSecrets + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 100m + memory: 128Mi + + nodeDriverRegistrar: + image: + repository: oci.stackable.tech/sdp/sig-storage/csi-node-driver-registrar + tag: v2.15.0 + pullPolicy: IfNotPresent + # NOTE (@Techassi): Support setting pullSecrets + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 100m + memory: 128Mi + nameOverride: "" fullnameOverride: "" @@ -68,30 +108,11 @@ serviceAccount: # If not set and create is true, a name is generated using the fullname template name: "" -podAnnotations: {} - # Provide additional labels which get attached to all deployed resources labels: stackable.tech/vendor: Stackable -podSecurityContext: {} - # fsGroup: 2000 - -nodeSelector: {} - -tolerations: [] - -affinity: {} - -# priorityClassName: ... - -# When running on a non-default Kubernetes cluster domain, the cluster domain can be configured here. -# See the https://docs.stackable.tech/home/stable/guides/kubernetes-cluster-domain guide for details. -# kubernetesClusterDomain: my-cluster.local - -# Kubelet dir may vary in environments such as microk8s, see https://github.com/stackabletech/secret-operator/issues/229 -kubeletDir: /var/lib/kubelet - +# Customize default custom resources deployed by the operator secretClasses: tls: # The namespace that the TLS Certificate Authority is installed into. diff --git a/rust/operator-binary/src/main.rs b/rust/operator-binary/src/main.rs index a31884b5..1ec67d61 100644 --- a/rust/operator-binary/src/main.rs +++ b/rust/operator-binary/src/main.rs @@ -4,18 +4,21 @@ use std::{os::unix::prelude::FileTypeExt, path::PathBuf}; -use anyhow::Context; +use anyhow::{Context, Ok, anyhow}; use clap::Parser; use csi_server::{ controller::SecretProvisionerController, identity::SecretProvisionerIdentity, node::SecretProvisionerNode, }; -use futures::{FutureExt, TryStreamExt}; +use futures::{FutureExt, TryFutureExt, TryStreamExt, try_join}; use grpc::csi::v1::{ controller_server::ControllerServer, identity_server::IdentityServer, node_server::NodeServer, }; use stackable_operator::{ - YamlSchema, cli::RunArguments, eos::EndOfSupportChecker, shared::yaml::SerializeOptions, + YamlSchema, + cli::{Command, CommonOptions, RunArguments}, + eos::EndOfSupportChecker, + shared::yaml::SerializeOptions, telemetry::Tracing, }; use tokio::signal::unix::{SignalKind, signal}; @@ -38,14 +41,33 @@ pub const OPERATOR_NAME: &str = "secrets.stackable.tech"; #[derive(clap::Parser)] #[clap(author, version)] -struct Opts { +struct Cli { #[clap(subcommand)] - cmd: stackable_operator::cli::Command, + cmd: Command, } #[derive(clap::Parser)] struct SecretOperatorRun { - #[clap(long, env)] + /// The run mode in which this operator should run in. + #[command(subcommand)] + mode: RunMode, + + #[clap(flatten)] + common: RunArguments, +} + +#[derive(Debug, clap::Subcommand)] +enum RunMode { + /// Run the CSI server, one per Kubernetes cluster node. + CsiNodeService(CsiNodeServiceArguments), + + /// Run the controller, one per Kubernetes cluster. + Controller, +} + +#[derive(Debug, clap::Args)] +struct CsiNodeServiceArguments { + #[arg(long, env)] csi_endpoint: PathBuf, /// Unprivileged mode disables any features that require running secret-operator in a privileged container. @@ -54,11 +76,8 @@ struct SecretOperatorRun { /// - Secret volumes will be stored on disk, rather than in a ramdisk /// /// Unprivileged mode is EXPERIMENTAL and heavily discouraged, since it increases the risk of leaking secrets. - #[clap(long, env)] + #[arg(long, env)] privileged: bool, - - #[clap(flatten)] - common: RunArguments, } mod built_info { @@ -67,32 +86,34 @@ mod built_info { #[tokio::main] async fn main() -> anyhow::Result<()> { - let opts = Opts::parse(); - match opts.cmd { - stackable_operator::cli::Command::Crd => { + let cli = Cli::parse(); + + match cli.cmd { + Command::Crd => { SecretClass::merged_crd(crd::SecretClassVersion::V1Alpha1)? .print_yaml_schema(built_info::PKG_VERSION, SerializeOptions::default())?; TrustStore::merged_crd(crd::TrustStoreVersion::V1Alpha1)? .print_yaml_schema(built_info::PKG_VERSION, SerializeOptions::default())?; } - stackable_operator::cli::Command::Run(SecretOperatorRun { - csi_endpoint, - privileged, - common: - RunArguments { - operator_environment: _, - product_config: _, - watch_namespace, - maintenance, - common, - }, - }) => { + Command::Run(SecretOperatorRun { common, mode }) => { + let RunArguments { + operator_environment: _, + product_config: _, + watch_namespace, + maintenance, + common, + } = common; + + let CommonOptions { + telemetry, + cluster_info, + } = common; + // NOTE (@NickLarsenNZ): Before stackable-telemetry was used: // - The console log level was set by `SECRET_PROVISIONER_LOG`, and is now `CONSOLE_LOG` (when using Tracing::pre_configured). // - The file log level was set by `SECRET_PROVISIONER_LOG`, and is now set via `FILE_LOG` (when using Tracing::pre_configured). // - The file log directory was set by `SECRET_PROVISIONER_LOG_DIRECTORY`, and is now set by `ROLLING_LOGS_DIR` (or via `--rolling-logs `). - let _tracing_guard = - Tracing::pre_configured(built_info::PKG_NAME, common.telemetry).init()?; + let _tracing_guard = Tracing::pre_configured(built_info::PKG_NAME, telemetry).init()?; tracing::info!( built_info.pkg_version = built_info::PKG_VERSION, @@ -111,48 +132,63 @@ async fn main() -> anyhow::Result<()> { let client = stackable_operator::client::initialize_operator( Some(OPERATOR_NAME.to_string()), - &common.cluster_info, + &cluster_info, ) .await?; - if csi_endpoint - .symlink_metadata() - .is_ok_and(|meta| meta.file_type().is_socket()) - { - let _ = std::fs::remove_file(&csi_endpoint); - } - - let mut sigterm = signal(SignalKind::terminate())?; - - let csi_server = Server::builder() - .add_service( - tonic_reflection::server::Builder::configure() - .include_reflection_service(true) - .register_encoded_file_descriptor_set(grpc::FILE_DESCRIPTOR_SET_BYTES) - .build_v1()?, - ) - .add_service(IdentityServer::new(SecretProvisionerIdentity)) - .add_service(ControllerServer::new(SecretProvisionerController { - client: client.clone(), - })) - .add_service(NodeServer::new(SecretProvisionerNode { - client: client.clone(), - node_name: common.cluster_info.kubernetes_node_name.to_owned(), + match mode { + RunMode::CsiNodeService(CsiNodeServiceArguments { + csi_endpoint, privileged, - })) - .serve_with_incoming_shutdown( - UnixListenerStream::new( - uds_bind_private(csi_endpoint).context("failed to bind CSI listener")?, - ) - .map_ok(TonicUnixStream), - sigterm.recv().map(|_| ()), - ); - - let truststore_controller = - truststore_controller::start(&client, &watch_namespace).map(Ok); - - futures::try_join!(csi_server, truststore_controller, eos_checker)?; + }) => { + if csi_endpoint + .symlink_metadata() + .is_ok_and(|meta| meta.file_type().is_socket()) + { + let _ = std::fs::remove_file(&csi_endpoint); + } + + let mut sigterm = signal(SignalKind::terminate())?; + + let csi_server = Server::builder() + .add_service( + tonic_reflection::server::Builder::configure() + .include_reflection_service(true) + .register_encoded_file_descriptor_set( + grpc::FILE_DESCRIPTOR_SET_BYTES, + ) + .build_v1()?, + ) + .add_service(IdentityServer::new(SecretProvisionerIdentity)) + .add_service(ControllerServer::new(SecretProvisionerController { + client: client.clone(), + })) + .add_service(NodeServer::new(SecretProvisionerNode { + node_name: cluster_info.kubernetes_node_name, + privileged, + client, + })) + .serve_with_incoming_shutdown( + UnixListenerStream::new( + uds_bind_private(csi_endpoint) + .context("failed to bind CSI listener")?, + ) + .map_ok(TonicUnixStream), + sigterm.recv().map(|_| ()), + ) + .map_err(|err| anyhow!(err).context("failed to run CSI server")); + + try_join!(csi_server, eos_checker)?; + } + RunMode::Controller => { + let truststore_controller = + truststore_controller::start(client, &watch_namespace).map(anyhow::Ok); + + try_join!(truststore_controller, eos_checker)?; + } + } } } + Ok(()) } diff --git a/rust/operator-binary/src/truststore_controller.rs b/rust/operator-binary/src/truststore_controller.rs index 28bec96d..3c9f599c 100644 --- a/rust/operator-binary/src/truststore_controller.rs +++ b/rust/operator-binary/src/truststore_controller.rs @@ -43,10 +43,10 @@ use crate::{ const CONTROLLER_NAME: &str = "truststore"; const FULL_CONTROLLER_NAME: &str = concatcp!(CONTROLLER_NAME, ".", OPERATOR_NAME); -pub async fn start(client: &stackable_operator::client::Client, watch_namespace: &WatchNamespace) { +pub async fn start(client: stackable_operator::client::Client, watch_namespace: &WatchNamespace) { let (secretclasses, secretclasses_writer) = reflector::store(); let controller = Controller::new( - watch_namespace.get_api::>(client), + watch_namespace.get_api::>(&client), watcher::Config::default(), ); let truststores = controller.store(); @@ -82,16 +82,16 @@ pub async fn start(client: &stackable_operator::client::Client, watch_namespace: ) // TODO: merge this into the other ConfigMap watch .owns( - watch_namespace.get_api::>(client), + watch_namespace.get_api::>(&client), watcher::Config::default(), ) // TODO: merge this into the other Secret watch .owns( - watch_namespace.get_api::>(client), + watch_namespace.get_api::>(&client), watcher::Config::default(), ) .watches( - watch_namespace.get_api::>(client), + watch_namespace.get_api::>(&client), watcher::Config::default(), secretclass_dependency_watch_mapper( truststores.clone(), @@ -100,7 +100,7 @@ pub async fn start(client: &stackable_operator::client::Client, watch_namespace: ), ) .watches( - watch_namespace.get_api::>(client), + watch_namespace.get_api::>(&client), watcher::Config::default(), secretclass_dependency_watch_mapper( truststores, @@ -108,13 +108,7 @@ pub async fn start(client: &stackable_operator::client::Client, watch_namespace: |secretclass, secret| secretclass.spec.backend.refers_to_secret(secret), ), ) - .run( - reconcile, - error_policy, - Arc::new(Ctx { - client: client.clone(), - }), - ) + .run(reconcile, error_policy, Arc::new(Ctx { client })) .for_each_concurrent(16, move |res| { let event_recorder = event_recorder.clone(); async move {