diff --git a/Makefile b/Makefile index 21ecf8872..c0a737df9 100644 --- a/Makefile +++ b/Makefile @@ -223,21 +223,9 @@ image-registry: ## Build the testdata catalog used for e2e tests and push it to test-e2e: KIND_CLUSTER_NAME := operator-controller-e2e test-e2e: KUSTOMIZE_BUILD_DIR := config/overlays/e2e test-e2e: GO_BUILD_FLAGS := -cover +test-e2e: CATALOGD_KUSTOMIZE_BUILD_DIR := catalogd/config/overlays/e2e test-e2e: run image-registry e2e e2e-coverage kind-clean #HELP Run e2e test suite on local kind cluster -# Catalogd e2e tests -FOCUS := $(if $(TEST),-v -focus "$(TEST)") -ifeq ($(origin E2E_FLAGS), undefined) -E2E_FLAGS := -endif -test-catalogd-e2e: ## Run the e2e tests on existing cluster - $(GINKGO) $(E2E_FLAGS) -trace -vv $(FOCUS) test/catalogd-e2e - -catalogd-e2e: KIND_CLUSTER_NAME := catalogd-e2e -catalogd-e2e: ISSUER_KIND := Issuer -catalogd-e2e: ISSUER_NAME := selfsigned-issuer -catalogd-e2e: CATALOGD_KUSTOMIZE_BUILD_DIR := catalogd/config/overlays/e2e -catalogd-e2e: run catalogd-image-registry test-catalogd-e2e ## kind-clean Run e2e test suite on local kind cluster ## image-registry target has to come after run-latest-release, ## because the image-registry depends on the olm-ca issuer. diff --git a/Tiltfile b/Tiltfile index 7aa07e811..5682e106c 100644 --- a/Tiltfile +++ b/Tiltfile @@ -2,7 +2,7 @@ load('.tilt-support', 'deploy_repo') operator_controller = { 'image': 'quay.io/operator-framework/operator-controller', - 'yaml': 'config/overlays/cert-manager', + 'yaml': 'config/overlays/tilt-local-dev', 'binaries': { './cmd/operator-controller': 'operator-controller-controller-manager', }, diff --git a/catalogd/cmd/catalogd/main.go b/catalogd/cmd/catalogd/main.go index e8b3ecf66..cd81b3668 100644 --- a/catalogd/cmd/catalogd/main.go +++ b/catalogd/cmd/catalogd/main.go @@ -30,7 +30,7 @@ import ( "github.com/containers/image/v5/types" "github.com/sirupsen/logrus" - "github.com/spf13/pflag" + "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/fields" k8slabels "k8s.io/apimachinery/pkg/labels" @@ -71,6 +71,7 @@ import ( var ( scheme = runtime.NewScheme() setupLog = ctrl.Log.WithName("setup") + cfg = &config{} ) const ( @@ -78,107 +79,133 @@ const ( authFilePrefix = "catalogd-global-pull-secret" ) +type config struct { + metricsAddr string + enableLeaderElection bool + probeAddr string + pprofAddr string + systemNamespace string + catalogServerAddr string + externalAddr string + cacheDir string + gcInterval time.Duration + certFile string + keyFile string + webhookPort int + pullCasDir string + globalPullSecret string + // Generated config + globalPullSecretKey *k8stypes.NamespacedName +} + +var catalogdCmd = &cobra.Command{ + Use: "catalogd", + Short: "Catalogd is a Kubernetes operator for managing operator catalogs", + RunE: func(cmd *cobra.Command, args []string) error { + if err := validateConfig(cfg); err != nil { + return err + } + cmd.SilenceUsage = true + return run(ctrl.SetupSignalHandler()) + }, +} + +var versionCommand = &cobra.Command{ + Use: "version", + Short: "Print the version information", + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("%#v\n", version.String()) + }, +} + func init() { - utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + // create flagset, the collection of flags for this command + flags := catalogdCmd.Flags() + flags.StringVar(&cfg.metricsAddr, "metrics-bind-address", "", "The address for the metrics endpoint. Requires tls-cert and tls-key. (Default: ':7443')") + flags.StringVar(&cfg.probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + flags.StringVar(&cfg.pprofAddr, "pprof-bind-address", "0", "The address the pprof endpoint binds to. an empty string or 0 disables pprof") + flags.BoolVar(&cfg.enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager") + flags.StringVar(&cfg.systemNamespace, "system-namespace", "", "The namespace catalogd uses for internal state") + flags.StringVar(&cfg.catalogServerAddr, "catalogs-server-addr", ":8443", "The address where catalogs' content will be accessible") + flags.StringVar(&cfg.externalAddr, "external-address", "catalogd-service.olmv1-system.svc", "External address for http(s) server") + flags.StringVar(&cfg.cacheDir, "cache-dir", "/var/cache/", "Directory for file based caching") + flags.DurationVar(&cfg.gcInterval, "gc-interval", 12*time.Hour, "Garbage collection interval") + flags.StringVar(&cfg.certFile, "tls-cert", "", "Certificate file for TLS") + flags.StringVar(&cfg.keyFile, "tls-key", "", "Key file for TLS") + flags.IntVar(&cfg.webhookPort, "webhook-server-port", 9443, "Webhook server port") + flag.StringVar(&cfg.pullCasDir, "pull-cas-dir", "", "The directory of TLS certificate authoritiess to use for verifying HTTPS copullCasDirnnections to image registries.") + flags.StringVar(&cfg.globalPullSecret, "global-pull-secret", "", "Global pull secret (/)") + + // adds version subcommand + catalogdCmd.AddCommand(versionCommand) + + // Add other flags + klog.InitFlags(flag.CommandLine) + flags.AddGoFlagSet(flag.CommandLine) + features.CatalogdFeatureGate.AddFlag(flags) + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(catalogdv1.AddToScheme(scheme)) - //+kubebuilder:scaffold:scheme + ctrl.SetLogger(textlogger.NewLogger(textlogger.NewConfig())) } func main() { - var ( - metricsAddr string - enableLeaderElection bool - probeAddr string - pprofAddr string - catalogdVersion bool - systemNamespace string - catalogServerAddr string - externalAddr string - cacheDir string - gcInterval time.Duration - certFile string - keyFile string - webhookPort int - pullCasDir string - globalPullSecret string - ) - flag.StringVar(&metricsAddr, "metrics-bind-address", "", "The address for the metrics endpoint. Requires tls-cert and tls-key. (Default: ':7443')") - flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") - flag.StringVar(&pprofAddr, "pprof-bind-address", "0", "The address the pprof endpoint binds to. an empty string or 0 disables pprof") - flag.BoolVar(&enableLeaderElection, "leader-elect", false, - "Enable leader election for controller manager. "+ - "Enabling this will ensure there is only one active controller manager.") - flag.StringVar(&systemNamespace, "system-namespace", "", "The namespace catalogd uses for internal state, configuration, and workloads") - flag.StringVar(&catalogServerAddr, "catalogs-server-addr", ":8443", "The address where the unpacked catalogs' content will be accessible") - flag.StringVar(&externalAddr, "external-address", "catalogd-service.olmv1-system.svc", "The external address at which the http(s) server is reachable.") - flag.StringVar(&cacheDir, "cache-dir", "/var/cache/", "The directory in the filesystem that catalogd will use for file based caching") - flag.BoolVar(&catalogdVersion, "version", false, "print the catalogd version and exit") - flag.DurationVar(&gcInterval, "gc-interval", 12*time.Hour, "interval in which garbage collection should be run against the catalog content cache") - flag.StringVar(&certFile, "tls-cert", "", "The certificate file used for serving catalog and metrics. Required to enable the metrics server. Requires tls-key.") - flag.StringVar(&keyFile, "tls-key", "", "The key file used for serving catalog contents and metrics. Required to enable the metrics server. Requires tls-cert.") - flag.IntVar(&webhookPort, "webhook-server-port", 9443, "The port that the mutating webhook server serves at.") - flag.StringVar(&pullCasDir, "pull-cas-dir", "", "The directory of TLS certificate authoritiess to use for verifying HTTPS connections to image registries.") - flag.StringVar(&globalPullSecret, "global-pull-secret", "", "The / of the global pull secret that is going to be used to pull bundle images.") - - klog.InitFlags(flag.CommandLine) - if klog.V(4).Enabled() { - logrus.SetLevel(logrus.DebugLevel) + if err := catalogdCmd.Execute(); err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) } +} - // Combine both flagsets and parse them - pflag.CommandLine.AddGoFlagSet(flag.CommandLine) - features.CatalogdFeatureGate.AddFlag(pflag.CommandLine) - pflag.Parse() +func validateConfig(cfg *config) error { + if (cfg.certFile != "" && cfg.keyFile == "") || (cfg.certFile == "" && cfg.keyFile != "") { + err := fmt.Errorf("tls-cert and tls-key flags must be used together") + setupLog.Error(err, "missing TLS configuration", + "certFile", cfg.certFile, "keyFile", cfg.keyFile) + return err + } - if catalogdVersion { - fmt.Printf("%#v\n", version.String()) - os.Exit(0) + if cfg.metricsAddr != "" && cfg.certFile == "" && cfg.keyFile == "" { + err := fmt.Errorf("metrics-bind-address requires tls-cert and tls-key flags") + setupLog.Error(err, "invalid metrics configuration", + "metricsAddr", cfg.metricsAddr, "certFile", cfg.certFile, "keyFile", cfg.keyFile) + return err } - ctrl.SetLogger(textlogger.NewLogger(textlogger.NewConfig())) + if cfg.certFile != "" && cfg.keyFile != "" && cfg.metricsAddr == "" { + cfg.metricsAddr = ":7443" + } - authFilePath := filepath.Join(os.TempDir(), fmt.Sprintf("%s-%s.json", authFilePrefix, apimachineryrand.String(8))) - var globalPullSecretKey *k8stypes.NamespacedName - if globalPullSecret != "" { - secretParts := strings.Split(globalPullSecret, "/") + if cfg.globalPullSecret != "" { + secretParts := strings.Split(cfg.globalPullSecret, "/") if len(secretParts) != 2 { - setupLog.Error(fmt.Errorf("incorrect number of components"), "value of global-pull-secret should be of the format /") - os.Exit(1) + err := errors.New("value of global-pull-secret should be of the format /") + setupLog.Error(err, "incorrect number of components", + "globalPullSecret", cfg.globalPullSecret) + return err } - globalPullSecretKey = &k8stypes.NamespacedName{Name: secretParts[1], Namespace: secretParts[0]} + cfg.globalPullSecretKey = &k8stypes.NamespacedName{Name: secretParts[1], Namespace: secretParts[0]} } - if (certFile != "" && keyFile == "") || (certFile == "" && keyFile != "") { - setupLog.Error(errors.New("missing TLS configuration"), - "tls-cert and tls-key flags must be used together", - "certFile", certFile, "keyFile", keyFile) - os.Exit(1) - } + return nil +} - if metricsAddr != "" && certFile == "" && keyFile == "" { - setupLog.Error(errors.New("invalid metrics configuration"), - "metrics-bind-address requires tls-cert and tls-key flags to be set", - "metricsAddr", metricsAddr, "certFile", certFile, "keyFile", keyFile) - os.Exit(1) +func run(ctx context.Context) error { + if klog.V(4).Enabled() { + logrus.SetLevel(logrus.DebugLevel) } - if certFile != "" && keyFile != "" && metricsAddr == "" { - metricsAddr = ":7443" - } + authFilePath := filepath.Join(os.TempDir(), fmt.Sprintf("%s-%s.json", authFilePrefix, apimachineryrand.String(8))) protocol := "http://" - if certFile != "" && keyFile != "" { + if cfg.certFile != "" && cfg.keyFile != "" { protocol = "https://" } - externalAddr = protocol + externalAddr - - cfg := ctrl.GetConfigOrDie() + cfg.externalAddr = protocol + cfg.externalAddr - cw, err := certwatcher.New(certFile, keyFile) + cw, err := certwatcher.New(cfg.certFile, cfg.keyFile) if err != nil { setupLog.Error(err, "failed to initialize certificate watcher") - os.Exit(1) + return err } tlsOpts := func(config *tls.Config) { @@ -194,17 +221,17 @@ func main() { // Create webhook server and configure TLS webhookServer := crwebhook.NewServer(crwebhook.Options{ - Port: webhookPort, + Port: cfg.webhookPort, TLSOpts: []func(*tls.Config){ tlsOpts, }, }) metricsServerOptions := metricsserver.Options{} - if len(certFile) > 0 && len(keyFile) > 0 { - setupLog.Info("Starting metrics server with TLS enabled", "addr", metricsAddr, "tls-cert", certFile, "tls-key", keyFile) + if len(cfg.certFile) > 0 && len(cfg.keyFile) > 0 { + setupLog.Info("Starting metrics server with TLS enabled", "addr", cfg.metricsAddr, "tls-cert", cfg.certFile, "tls-key", cfg.keyFile) - metricsServerOptions.BindAddress = metricsAddr + metricsServerOptions.BindAddress = cfg.metricsAddr metricsServerOptions.SecureServing = true metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization @@ -222,13 +249,13 @@ func main() { cacheOptions := crcache.Options{ ByObject: map[client.Object]crcache.ByObject{}, } - if globalPullSecretKey != nil { + if cfg.globalPullSecretKey != nil { cacheOptions.ByObject[&corev1.Secret{}] = crcache.ByObject{ Namespaces: map[string]crcache.Config{ - globalPullSecretKey.Namespace: { + cfg.globalPullSecretKey.Namespace: { LabelSelector: k8slabels.Everything(), FieldSelector: fields.SelectorFromSet(map[string]string{ - "metadata.name": globalPullSecretKey.Name, + "metadata.name": cfg.globalPullSecretKey.Name, }), }, }, @@ -236,12 +263,12 @@ func main() { } // Create manager - mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme, Metrics: metricsServerOptions, - PprofBindAddress: pprofAddr, - HealthProbeBindAddress: probeAddr, - LeaderElection: enableLeaderElection, + PprofBindAddress: cfg.pprofAddr, + HealthProbeBindAddress: cfg.probeAddr, + LeaderElection: cfg.enableLeaderElection, LeaderElectionID: "catalogd-operator-lock", LeaderElectionReleaseOnCancel: true, // Recommended Leader Election values @@ -255,29 +282,29 @@ func main() { }) if err != nil { setupLog.Error(err, "unable to create manager") - os.Exit(1) + return err } // Add the certificate watcher to the manager err = mgr.Add(cw) if err != nil { setupLog.Error(err, "unable to add certificate watcher to manager") - os.Exit(1) + return err } - if systemNamespace == "" { - systemNamespace = podNamespace() + if cfg.systemNamespace == "" { + cfg.systemNamespace = podNamespace() } - if err := fsutil.EnsureEmptyDirectory(cacheDir, 0700); err != nil { + if err := fsutil.EnsureEmptyDirectory(cfg.cacheDir, 0700); err != nil { setupLog.Error(err, "unable to ensure empty cache directory") - os.Exit(1) + return err } - unpackCacheBasePath := filepath.Join(cacheDir, "unpack") + unpackCacheBasePath := filepath.Join(cfg.cacheDir, "unpack") if err := os.MkdirAll(unpackCacheBasePath, 0770); err != nil { setupLog.Error(err, "unable to create cache directory for unpacking") - os.Exit(1) + return err } imageCache := imageutil.CatalogCache(unpackCacheBasePath) @@ -285,10 +312,10 @@ func main() { SourceCtxFunc: func(ctx context.Context) (*types.SystemContext, error) { logger := log.FromContext(ctx) srcContext := &types.SystemContext{ - DockerCertPath: pullCasDir, - OCICertPath: pullCasDir, + DockerCertPath: cfg.pullCasDir, + OCICertPath: cfg.pullCasDir, } - if _, err := os.Stat(authFilePath); err == nil && globalPullSecretKey != nil { + if _, err := os.Stat(authFilePath); err == nil && cfg.globalPullSecretKey != nil { logger.Info("using available authentication information for pulling image") srcContext.AuthFilePath = authFilePath } else if os.IsNotExist(err) { @@ -303,16 +330,16 @@ func main() { var localStorage storage.Instance metrics.Registry.MustRegister(catalogdmetrics.RequestDurationMetric) - storeDir := filepath.Join(cacheDir, storageDir) + storeDir := filepath.Join(cfg.cacheDir, storageDir) if err := os.MkdirAll(storeDir, 0700); err != nil { setupLog.Error(err, "unable to create storage directory for catalogs") - os.Exit(1) + return err } - baseStorageURL, err := url.Parse(fmt.Sprintf("%s/catalogs/", externalAddr)) + baseStorageURL, err := url.Parse(fmt.Sprintf("%s/catalogs/", cfg.externalAddr)) if err != nil { setupLog.Error(err, "unable to create base storage URL") - os.Exit(1) + return err } localStorage = &storage.LocalDirV1{ @@ -323,17 +350,17 @@ func main() { // Config for the catalogd web server catalogServerConfig := serverutil.CatalogServerConfig{ - ExternalAddr: externalAddr, - CatalogAddr: catalogServerAddr, - CertFile: certFile, - KeyFile: keyFile, + ExternalAddr: cfg.externalAddr, + CatalogAddr: cfg.catalogServerAddr, + CertFile: cfg.certFile, + KeyFile: cfg.keyFile, LocalStorage: localStorage, } err = serverutil.AddCatalogServerToManager(mgr, catalogServerConfig, cw) if err != nil { setupLog.Error(err, "unable to configure catalog server") - os.Exit(1) + return err } if err = (&corecontrollers.ClusterCatalogReconciler{ @@ -343,65 +370,65 @@ func main() { Storage: localStorage, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "ClusterCatalog") - os.Exit(1) + return err } - if globalPullSecretKey != nil { - setupLog.Info("creating SecretSyncer controller for watching secret", "Secret", globalPullSecret) + if cfg.globalPullSecretKey != nil { + setupLog.Info("creating SecretSyncer controller for watching secret", "Secret", cfg.globalPullSecret) err := (&corecontrollers.PullSecretReconciler{ Client: mgr.GetClient(), AuthFilePath: authFilePath, - SecretKey: *globalPullSecretKey, + SecretKey: *cfg.globalPullSecretKey, }).SetupWithManager(mgr) if err != nil { setupLog.Error(err, "unable to create controller", "controller", "SecretSyncer") - os.Exit(1) + return err } } //+kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { setupLog.Error(err, "unable to set up health check") - os.Exit(1) + return err } if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { setupLog.Error(err, "unable to set up ready check") - os.Exit(1) + return err } - metaClient, err := metadata.NewForConfig(cfg) + metaClient, err := metadata.NewForConfig(mgr.GetConfig()) if err != nil { setupLog.Error(err, "unable to setup client for garbage collection") - os.Exit(1) + return err } - ctx := ctrl.SetupSignalHandler() gc := &garbagecollection.GarbageCollector{ CachePath: unpackCacheBasePath, Logger: ctrl.Log.WithName("garbage-collector"), MetadataClient: metaClient, - Interval: gcInterval, + Interval: cfg.gcInterval, } if err := mgr.Add(gc); err != nil { setupLog.Error(err, "unable to add garbage collector to manager") - os.Exit(1) + return err } // mutating webhook that labels ClusterCatalogs with name label if err = (&webhook.ClusterCatalog{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "ClusterCatalog") - os.Exit(1) + return err } setupLog.Info("starting mutating webhook manager") if err := mgr.Start(ctx); err != nil { setupLog.Error(err, "problem running manager") - os.Exit(1) + return err } if err := os.Remove(authFilePath); err != nil { setupLog.Error(err, "failed to cleanup temporary auth file") - os.Exit(1) + return err } + return nil } func podNamespace() string { diff --git a/catalogd/config/components/registries-conf/registries_conf_configmap.yaml b/catalogd/config/components/registries-conf/registries_conf_configmap.yaml index 3561bbe59..2604c78f5 100644 --- a/catalogd/config/components/registries-conf/registries_conf_configmap.yaml +++ b/catalogd/config/components/registries-conf/registries_conf_configmap.yaml @@ -6,6 +6,5 @@ metadata: data: registries.conf: | [[registry]] - prefix = "docker-registry.catalogd-e2e.svc:5000" - insecure = true - location = "docker-registry.catalogd-e2e.svc:5000" + prefix = "mirrored-registry.operator-controller-e2e.svc.cluster.local:5000" + location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000" diff --git a/cmd/operator-controller/main.go b/cmd/operator-controller/main.go index 51db2fe14..2a46afc6d 100644 --- a/cmd/operator-controller/main.go +++ b/cmd/operator-controller/main.go @@ -30,7 +30,7 @@ import ( "github.com/containers/image/v5/types" "github.com/sirupsen/logrus" - "github.com/spf13/pflag" + "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" apiextensionsv1client "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" "k8s.io/apimachinery/pkg/fields" @@ -78,8 +78,22 @@ var ( setupLog = ctrl.Log.WithName("setup") defaultSystemNamespace = "olmv1-system" certWatcher *certwatcher.CertWatcher + cfg = &config{} ) +type config struct { + metricsAddr string + certFile string + keyFile string + enableLeaderElection bool + probeAddr string + cachePath string + systemNamespace string + catalogdCasDir string + pullCasDir string + globalPullSecret string +} + const authFilePrefix = "operator-controller-global-pull-secrets" // podNamespace checks whether the controller is running in a Pod vs. @@ -94,83 +108,95 @@ func podNamespace() string { return string(namespace) } -func main() { - var ( - metricsAddr string - certFile string - keyFile string - enableLeaderElection bool - probeAddr string - cachePath string - operatorControllerVersion bool - systemNamespace string - catalogdCasDir string - pullCasDir string - globalPullSecret string - ) - flag.StringVar(&metricsAddr, "metrics-bind-address", "", "The address for the metrics endpoint. Requires tls-cert and tls-key. (Default: ':8443')") - flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") - flag.StringVar(&catalogdCasDir, "catalogd-cas-dir", "", "The directory of TLS certificate authorities to use for verifying HTTPS connections to the Catalogd web service.") - flag.StringVar(&pullCasDir, "pull-cas-dir", "", "The directory of TLS certificate authorities to use for verifying HTTPS connections to image registries.") - flag.StringVar(&certFile, "tls-cert", "", "The certificate file used for the metrics server. Required to enable the metrics server. Requires tls-key.") - flag.StringVar(&keyFile, "tls-key", "", "The key file used for the metrics server. Required to enable the metrics server. Requires tls-cert") - flag.BoolVar(&enableLeaderElection, "leader-elect", false, +var operatorControllerCmd = &cobra.Command{ + Use: "operator-controller", + Short: "operator-controller is the central component of Operator Lifecycle Manager (OLM) v1", + RunE: func(cmd *cobra.Command, args []string) error { + if err := validateMetricsFlags(); err != nil { + return err + } + return run() + }, +} + +var versionCommand = &cobra.Command{ + Use: "version", + Short: "Prints operator-controller version information", + Run: func(cmd *cobra.Command, args []string) { + fmt.Println(version.String()) + }, +} + +func init() { + //create flagset, the collection of flags for this command + flags := operatorControllerCmd.Flags() + flags.StringVar(&cfg.metricsAddr, "metrics-bind-address", "", "The address for the metrics endpoint. Requires tls-cert and tls-key. (Default: ':8443')") + flags.StringVar(&cfg.probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + flags.StringVar(&cfg.catalogdCasDir, "catalogd-cas-dir", "", "The directory of TLS certificate authorities to use for verifying HTTPS connections to the Catalogd web service.") + flags.StringVar(&cfg.pullCasDir, "pull-cas-dir", "", "The directory of TLS certificate authorities to use for verifying HTTPS connections to image registries.") + flags.StringVar(&cfg.certFile, "tls-cert", "", "The certificate file used for the metrics server. Required to enable the metrics server. Requires tls-key.") + flags.StringVar(&cfg.keyFile, "tls-key", "", "The key file used for the metrics server. Required to enable the metrics server. Requires tls-cert") + flags.BoolVar(&cfg.enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") - flag.StringVar(&cachePath, "cache-path", "/var/cache", "The local directory path used for filesystem based caching") - flag.BoolVar(&operatorControllerVersion, "version", false, "Prints operator-controller version information") - flag.StringVar(&systemNamespace, "system-namespace", "", "Configures the namespace that gets used to deploy system resources.") - flag.StringVar(&globalPullSecret, "global-pull-secret", "", "The / of the global pull secret that is going to be used to pull bundle images.") + flags.StringVar(&cfg.cachePath, "cache-path", "/var/cache", "The local directory path used for filesystem based caching") + flags.StringVar(&cfg.systemNamespace, "system-namespace", "", "Configures the namespace that gets used to deploy system resources.") + flags.StringVar(&cfg.globalPullSecret, "global-pull-secret", "", "The / of the global pull secret that is going to be used to pull bundle images.") + + //adds version sub command + operatorControllerCmd.AddCommand(versionCommand) klog.InitFlags(flag.CommandLine) - if klog.V(4).Enabled() { - logrus.SetLevel(logrus.DebugLevel) - } - pflag.CommandLine.AddGoFlagSet(flag.CommandLine) - features.OperatorControllerFeatureGate.AddFlag(pflag.CommandLine) - pflag.Parse() + //add klog flags to flagset + flags.AddGoFlagSet(flag.CommandLine) - if operatorControllerVersion { - fmt.Println(version.String()) - os.Exit(0) - } + //add feature gate flags to flagset + features.OperatorControllerFeatureGate.AddFlag(flags) - if (certFile != "" && keyFile == "") || (certFile == "" && keyFile != "") { + ctrl.SetLogger(textlogger.NewLogger(textlogger.NewConfig())) +} +func validateMetricsFlags() error { + if (cfg.certFile != "" && cfg.keyFile == "") || (cfg.certFile == "" && cfg.keyFile != "") { setupLog.Error(errors.New("missing TLS configuration"), "tls-cert and tls-key flags must be used together", - "certFile", certFile, "keyFile", keyFile) - os.Exit(1) + "certFile", cfg.certFile, "keyFile", cfg.keyFile) + return fmt.Errorf("unable to configure TLS certificates: tls-cert and tls-key flags must be used together") } - if metricsAddr != "" && certFile == "" && keyFile == "" { + if cfg.metricsAddr != "" && cfg.certFile == "" && cfg.keyFile == "" { setupLog.Error(errors.New("invalid metrics configuration"), "metrics-bind-address requires tls-cert and tls-key flags to be set", - "metricsAddr", metricsAddr, "certFile", certFile, "keyFile", keyFile) - os.Exit(1) + "metricsAddr", cfg.metricsAddr, "certFile", cfg.certFile, "keyFile", cfg.keyFile) + return fmt.Errorf("metrics-bind-address requires tls-cert and tls-key flags to be set") } - if certFile != "" && keyFile != "" && metricsAddr == "" { - metricsAddr = ":8443" + if cfg.certFile != "" && cfg.keyFile != "" && cfg.metricsAddr == "" { + cfg.metricsAddr = ":8443" + } + return nil +} +func run() error { + if klog.V(4).Enabled() { + logrus.SetLevel(logrus.DebugLevel) } - - ctrl.SetLogger(textlogger.NewLogger(textlogger.NewConfig())) setupLog.Info("starting up the controller", "version info", version.String()) authFilePath := filepath.Join(os.TempDir(), fmt.Sprintf("%s-%s.json", authFilePrefix, apimachineryrand.String(8))) var globalPullSecretKey *k8stypes.NamespacedName - if globalPullSecret != "" { - secretParts := strings.Split(globalPullSecret, "/") + if cfg.globalPullSecret != "" { + secretParts := strings.Split(cfg.globalPullSecret, "/") if len(secretParts) != 2 { - setupLog.Error(fmt.Errorf("incorrect number of components"), "value of global-pull-secret should be of the format /") - os.Exit(1) + err := fmt.Errorf("incorrect number of components") + setupLog.Error(err, "value of global-pull-secret should be of the format /") + return err } globalPullSecretKey = &k8stypes.NamespacedName{Name: secretParts[1], Namespace: secretParts[0]} } - if systemNamespace == "" { - systemNamespace = podNamespace() + if cfg.systemNamespace == "" { + cfg.systemNamespace = podNamespace() } setupLog.Info("set up manager") @@ -180,7 +206,7 @@ func main() { &catalogd.ClusterCatalog{}: {Label: k8slabels.Everything()}, }, DefaultNamespaces: map[string]crcache.Config{ - systemNamespace: {LabelSelector: k8slabels.Everything()}, + cfg.systemNamespace: {LabelSelector: k8slabels.Everything()}, }, DefaultLabelSelector: k8slabels.Nothing(), } @@ -198,19 +224,19 @@ func main() { } metricsServerOptions := server.Options{} - if len(certFile) > 0 && len(keyFile) > 0 { - setupLog.Info("Starting metrics server with TLS enabled", "addr", metricsAddr, "tls-cert", certFile, "tls-key", keyFile) + if len(cfg.certFile) > 0 && len(cfg.keyFile) > 0 { + setupLog.Info("Starting metrics server with TLS enabled", "addr", cfg.metricsAddr, "tls-cert", cfg.certFile, "tls-key", cfg.keyFile) - metricsServerOptions.BindAddress = metricsAddr + metricsServerOptions.BindAddress = cfg.metricsAddr metricsServerOptions.SecureServing = true metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization // If the certificate files change, the watcher will reload them. var err error - certWatcher, err = certwatcher.New(certFile, keyFile) + certWatcher, err = certwatcher.New(cfg.certFile, cfg.keyFile) if err != nil { setupLog.Error(err, "Failed to initialize certificate watcher") - os.Exit(1) + return err } metricsServerOptions.TLSOpts = append(metricsServerOptions.TLSOpts, func(config *tls.Config) { @@ -239,8 +265,8 @@ func main() { mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme.Scheme, Metrics: metricsServerOptions, - HealthProbeBindAddress: probeAddr, - LeaderElection: enableLeaderElection, + HealthProbeBindAddress: cfg.probeAddr, + LeaderElection: cfg.enableLeaderElection, LeaderElectionID: "9c4404e7.operatorframework.io", LeaderElectionReleaseOnCancel: true, // Recommended Leader Election values @@ -264,19 +290,19 @@ func main() { }) if err != nil { setupLog.Error(err, "unable to start manager") - os.Exit(1) + return err } coreClient, err := corev1client.NewForConfig(mgr.GetConfig()) if err != nil { setupLog.Error(err, "unable to create core client") - os.Exit(1) + return err } tokenGetter := authentication.NewTokenGetter(coreClient, authentication.WithExpirationDuration(1*time.Hour)) clientRestConfigMapper := action.ServiceAccountRestConfigMapper(tokenGetter) cfgGetter, err := helmclient.NewActionConfigGetter(mgr.GetConfig(), mgr.GetRESTMapper(), - helmclient.StorageDriverMapper(action.ChunkedStorageDriverMapper(coreClient, mgr.GetAPIReader(), systemNamespace)), + helmclient.StorageDriverMapper(action.ChunkedStorageDriverMapper(coreClient, mgr.GetAPIReader(), cfg.systemNamespace)), helmclient.ClientNamespaceMapper(func(obj client.Object) (string, error) { ext := obj.(*ocv1.ClusterExtension) return ext.Spec.Namespace, nil @@ -285,7 +311,7 @@ func main() { ) if err != nil { setupLog.Error(err, "unable to config for creating helm client") - os.Exit(1) + return err } acg, err := action.NewWrappedActionClientGetter(cfgGetter, @@ -293,34 +319,34 @@ func main() { ) if err != nil { setupLog.Error(err, "unable to create helm client") - os.Exit(1) + return err } - certPoolWatcher, err := httputil.NewCertPoolWatcher(catalogdCasDir, ctrl.Log.WithName("cert-pool")) + certPoolWatcher, err := httputil.NewCertPoolWatcher(cfg.catalogdCasDir, ctrl.Log.WithName("cert-pool")) if err != nil { setupLog.Error(err, "unable to create CA certificate pool") - os.Exit(1) + return err } if certWatcher != nil { setupLog.Info("Adding certificate watcher to manager") if err := mgr.Add(certWatcher); err != nil { setupLog.Error(err, "unable to add certificate watcher to manager") - os.Exit(1) + return err } } - if err := fsutil.EnsureEmptyDirectory(cachePath, 0700); err != nil { + if err := fsutil.EnsureEmptyDirectory(cfg.cachePath, 0700); err != nil { setupLog.Error(err, "unable to ensure empty cache directory") - os.Exit(1) + return err } - imageCache := imageutil.BundleCache(filepath.Join(cachePath, "unpack")) + imageCache := imageutil.BundleCache(filepath.Join(cfg.cachePath, "unpack")) imagePuller := &imageutil.ContainersImagePuller{ SourceCtxFunc: func(ctx context.Context) (*types.SystemContext, error) { srcContext := &types.SystemContext{ - DockerCertPath: pullCasDir, - OCICertPath: pullCasDir, + DockerCertPath: cfg.pullCasDir, + OCICertPath: cfg.pullCasDir, } logger := log.FromContext(ctx) if _, err := os.Stat(authFilePath); err == nil && globalPullSecretKey != nil { @@ -340,15 +366,15 @@ func main() { return crfinalizer.Result{}, imageCache.Delete(ctx, obj.GetName()) })); err != nil { setupLog.Error(err, "unable to register finalizer", "finalizerKey", controllers.ClusterExtensionCleanupUnpackCacheFinalizer) - os.Exit(1) + return err } cl := mgr.GetClient() - catalogsCachePath := filepath.Join(cachePath, "catalogs") + catalogsCachePath := filepath.Join(cfg.cachePath, "catalogs") if err := os.MkdirAll(catalogsCachePath, 0700); err != nil { setupLog.Error(err, "unable to create catalogs cache directory") - os.Exit(1) + return err } catalogClientBackend := cache.NewFilesystemCache(catalogsCachePath) catalogClient := catalogclient.New(catalogClientBackend, func() (*http.Client, error) { @@ -374,7 +400,7 @@ func main() { aeClient, err := apiextensionsv1client.NewForConfig(mgr.GetConfig()) if err != nil { setupLog.Error(err, "unable to create apiextensions client") - os.Exit(1) + return err } preflights := []applier.Preflight{ @@ -394,7 +420,7 @@ func main() { })) if err != nil { setupLog.Error(err, "unable to register content manager cleanup finalizer") - os.Exit(1) + return err } if err = (&controllers.ClusterExtensionReconciler{ @@ -408,7 +434,7 @@ func main() { Manager: cm, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "ClusterExtension") - os.Exit(1) + return err } if err = (&controllers.ClusterCatalogReconciler{ @@ -417,11 +443,11 @@ func main() { CatalogCachePopulator: catalogClient, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "ClusterCatalog") - os.Exit(1) + return err } if globalPullSecretKey != nil { - setupLog.Info("creating SecretSyncer controller for watching secret", "Secret", globalPullSecret) + setupLog.Info("creating SecretSyncer controller for watching secret", "Secret", cfg.globalPullSecret) err := (&controllers.PullSecretReconciler{ Client: mgr.GetClient(), AuthFilePath: authFilePath, @@ -429,7 +455,7 @@ func main() { }).SetupWithManager(mgr) if err != nil { setupLog.Error(err, "unable to create controller", "controller", "SecretSyncer") - os.Exit(1) + return err } } @@ -437,21 +463,29 @@ func main() { if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { setupLog.Error(err, "unable to set up health check") - os.Exit(1) + return err } if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { setupLog.Error(err, "unable to set up ready check") - os.Exit(1) + return err } setupLog.Info("starting manager") ctx := ctrl.SetupSignalHandler() if err := mgr.Start(ctx); err != nil { setupLog.Error(err, "problem running manager") - os.Exit(1) + return err } if err := os.Remove(authFilePath); err != nil { setupLog.Error(err, "failed to cleanup temporary auth file") + return err + } + return nil +} + +func main() { + if err := operatorControllerCmd.Execute(); err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) os.Exit(1) } } diff --git a/commitchecker.yaml b/commitchecker.yaml index 8e320b7c8..45e7fd682 100644 --- a/commitchecker.yaml +++ b/commitchecker.yaml @@ -1,4 +1,4 @@ -expectedMergeBase: ee8d8210ebea9586f637122acdba729ca5385e89 +expectedMergeBase: 7f00b13e85d1cb0d0092a29d5b9ca8424573e9e1 upstreamBranch: main upstreamOrg: operator-framework upstreamRepo: operator-controller diff --git a/config/overlays/tilt-local-dev/kustomization.yaml b/config/overlays/tilt-local-dev/kustomization.yaml new file mode 100644 index 000000000..81bc3ffdc --- /dev/null +++ b/config/overlays/tilt-local-dev/kustomization.yaml @@ -0,0 +1,16 @@ +# kustomization file for secure operator-controller +# DO NOT ADD A NAMESPACE HERE +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ../../base +components: +- ../../components/tls +# ca must be last or tls will overwrite the namespaces +- ../../components/ca + +patches: + - target: + kind: Deployment + name: controller-manager + path: patches/dev-deployment.yaml diff --git a/config/overlays/tilt-local-dev/patches/dev-deployment.yaml b/config/overlays/tilt-local-dev/patches/dev-deployment.yaml new file mode 100644 index 000000000..2d7cb9467 --- /dev/null +++ b/config/overlays/tilt-local-dev/patches/dev-deployment.yaml @@ -0,0 +1,13 @@ +# remove livenessProbe and readinessProbe so container doesn't restart during breakpoints +- op: replace + path: /spec/template/spec/containers/0/livenessProbe + value: null +- op: replace + path: /spec/template/spec/containers/0/readinessProbe + value: null +- op: remove + # remove --leader-elect so container doesn't restart during breakpoints + path: /spec/template/spec/containers/0/args/2 +- op: add + path: /spec/template/spec/containers/0/args/- + value: --feature-gates=PreflightPermissions=true diff --git a/dev/local-debugging-with-tilt-and-vscode.md b/dev/local-debugging-with-tilt-and-vscode.md new file mode 100644 index 000000000..b74678b5b --- /dev/null +++ b/dev/local-debugging-with-tilt-and-vscode.md @@ -0,0 +1,48 @@ +# Local Debugging in VSCode with Tilt + +This tutorial will show you how to connect the go debugger in VSCode to your running +kind cluster with Tilt for live debugging. + +* Follow the instructions in [this document](podman/setup-local-env-podman.md) to set up your local kind cluster and image registry. +* Next, execute `tilt up` to start the Tilt service (if using podman, you might need to run `DOCKER_BUILDKIT=0 tilt up`). + +Press space to open the web UI where you can monitor the current status of operator-controller and catalogd inside Tilt. + +Create a `launch.json` file in your operator-controller repository if you do not already have one. +Add the following configurations: + +```json +{ + "version": "0.2.0", + "configurations": [ + { + "name": "Debug operator-controller via Tilt", + "type": "go", + "request": "attach", + "mode": "remote", + "port": 30000, + "host": "localhost", + "cwd": "${workspaceFolder}", + "trace": "verbose" + }, + { + "name": "Debug catalogd via Tilt", + "type": "go", + "request": "attach", + "mode": "remote", + "port": 20000, + "host": "localhost", + "cwd": "${workspaceFolder}", + "trace": "verbose" + }, + ] +} +``` + +This creates two "Run and debug" entries in the Debug panel of VSCode. + +Now you can start either debug configuration depending on which component you want to debug. +VSCode will connect the debugger to the port exposed by Tilt. + +Breakpoints should now be fully functional. The debugger can even maintain its +connection through live code updates. \ No newline at end of file diff --git a/dev/podman/setup-local-env-podman.md b/dev/podman/setup-local-env-podman.md index 3328caac0..5cbb16837 100644 --- a/dev/podman/setup-local-env-podman.md +++ b/dev/podman/setup-local-env-podman.md @@ -1,32 +1,36 @@ -## The following are Podman specific steps used to set up on a MacBook (Intel or Apple Silicon) +# Configuring Podman for Tilt -### Verify installed tools (install if needed) +The following tutorial explains how to set up a local development environment using Podman and Tilt on a Linux host. +A few notes on achieving the same result for MacOS are included at the end, but you will likely need to do some +tinkering on your own. + +## Verify installed tools (install if needed) + +Ensure you have installed [Podman](https://podman.io/), [Kind](https://github.com/kubernetes-sigs/kind/), and [Tilt](https://tilt.dev/). ```sh $ podman --version podman version 5.0.1 $ kind version -kind v0.23.0 go1.22.3 darwin/arm64 - -(optional) +kind v0.26.0 go1.23.4 linux/amd64 $ tilt version -v0.33.12, built 2024-03-28 +v0.33.15, built 2024-05-31 ``` -### Start Kind with a local registry -Use this [helper script](./kind-with-registry-podman.sh) to create a local single-node Kind cluster with an attached local image registry. +## Start Kind with a local registry -#### Disable secure access on the local kind registry: +Use this [helper script](./kind-with-registry-podman.sh) to create a local single-node Kind cluster with an attached local image registry. -`podman inspect kind-registry --format '{{.NetworkSettings.Ports}}'` -With the port you find for 127.0.0.1 edit the Podman machine's config file: +## Disable secure access on the local kind registry: -`podman machine ssh` +Verify the port used by the image registry: -`sudo vi /etc/containers/registries.conf.d/100-kind.conf` +```sh +podman inspect kind-registry --format '{{.NetworkSettings.Ports}}' +``` -Should look like: +Edit `/etc/containers/registries.conf.d/100-kind.conf` so it contains the following, substituting 5001 if your registry is using a different port: ```ini [[registry]] @@ -34,21 +38,66 @@ location = "localhost:5001" insecure = true ``` -### export DOCKER_HOST +## Configure the Podman socket -`export DOCKER_HOST=unix:///var/run/docker.sock` +Tilt needs to connect to the Podman socket to initiate image builds. The socket address can differ +depending on your host OS and whether you want to use rootful or rootless Podman. If you're not sure, +you should use rootless. +You can start the rootless Podman socket by running `podman --user start podman.socket`. +If you would like to automatically start the socket in your user session, you can run +`systemctl --user enable --now podman.socket`. -### Optional - Start tilt with the tilt file in the parent directory +Find the location of your user socket with `systemctl --user status podman.socket`: -`DOCKER_BUILDKIT=0 tilt up` +```sh +● podman.socket - Podman API Socket + Loaded: loaded (/usr/lib/systemd/user/podman.socket; enabled; preset: disabled) + Active: active (listening) since Tue 2025-01-28 11:40:50 CST; 7s ago + Invocation: d9604e587f2a4581bc79cbe4efe9c7e7 + Triggers: ● podman.service + Docs: man:podman-system-service(1) + Listen: /run/user/1000/podman/podman.sock (Stream) + CGroup: /user.slice/user-1000.slice/user@1000.service/app.slice/podman.socket +``` -### Optional troubleshooting +The location of the socket is shown in the `Listen` section, which in the example above +is `/run/user/1000/podman/podman.sock`. -In some cases it may be needed to do +Set `DOCKER_HOST` to a unix address at the socket location: + +```sh +export DOCKER_HOST=unix:///run/user/1000/podman/podman.sock ``` -sudo podman-mac-helper install + +Some systems might symlink the Podman socket to a docker socket, in which case +you might need to try something like: + +```sh +export DOCKER_HOST=unix:///var/run/docker.sock ``` + +## Start Tilt + +Running Tilt with a container engine other than Docker requires setting `DOCKER_BUILDKIT=0`. +You can export this, or just run: + +```sh +DOCKER_BUILDKIT=0 tilt up ``` + +## MacOS Troubleshooting + +The instructions above are written for use on a Linux system. You should be able to create +the same or a similar configuration on MacOS, but specific steps will differ. + +In some cases you might need to run: + +```sh +sudo podman-mac-helper install + podman machine stop/start ``` + +When disabling secure access to the registry, you will need to first enter the Podman virtual machine: +`podman machine ssh` diff --git a/go.mod b/go.mod index 5eb57937e..5ca0635d8 100644 --- a/go.mod +++ b/go.mod @@ -24,20 +24,20 @@ require ( github.com/operator-framework/operator-registry v1.50.0 github.com/prometheus/client_golang v1.20.5 github.com/sirupsen/logrus v1.9.3 - github.com/spf13/pflag v1.0.6 + github.com/spf13/cobra v1.8.1 github.com/stretchr/testify v1.10.0 golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c golang.org/x/sync v0.11.0 golang.org/x/tools v0.30.0 gopkg.in/yaml.v2 v2.4.0 - helm.sh/helm/v3 v3.17.0 - k8s.io/api v0.32.0 - k8s.io/apiextensions-apiserver v0.32.0 - k8s.io/apimachinery v0.32.0 - k8s.io/apiserver v0.32.0 - k8s.io/cli-runtime v0.32.0 - k8s.io/client-go v0.32.0 - k8s.io/component-base v0.32.0 + helm.sh/helm/v3 v3.17.1 + k8s.io/api v0.32.1 + k8s.io/apiextensions-apiserver v0.32.1 + k8s.io/apimachinery v0.32.1 + k8s.io/apiserver v0.32.1 + k8s.io/cli-runtime v0.32.1 + k8s.io/client-go v0.32.1 + k8s.io/component-base v0.32.1 k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20241210054802-24370beab758 sigs.k8s.io/controller-runtime v0.19.4 @@ -198,7 +198,7 @@ require ( github.com/sigstore/rekor v1.3.6 // indirect github.com/sigstore/sigstore v1.8.9 // indirect github.com/spf13/cast v1.7.0 // indirect - github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/pflag v1.0.6 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/stretchr/objx v0.5.2 // indirect @@ -245,7 +245,7 @@ require ( gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect - k8s.io/kubectl v0.32.0 // indirect + k8s.io/kubectl v0.32.1 // indirect oras.land/oras-go v1.2.5 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect diff --git a/go.sum b/go.sum index bf18570f9..8be8e4c08 100644 --- a/go.sum +++ b/go.sum @@ -986,33 +986,33 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= -helm.sh/helm/v3 v3.17.0 h1:DUD4AGdNVn7PSTYfxe1gmQG7s18QeWv/4jI9TubnhT0= -helm.sh/helm/v3 v3.17.0/go.mod h1:Mo7eGyKPPHlS0Ml67W8z/lbkox/gD9Xt1XpD6bxvZZA= +helm.sh/helm/v3 v3.17.1 h1:gzVoAD+qVuoJU6KDMSAeo0xRJ6N1znRxz3wyuXRmJDk= +helm.sh/helm/v3 v3.17.1/go.mod h1:nvreuhuR+j78NkQcLC3TYoprCKStLyw5P4T7E5itv2w= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE= -k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0= -k8s.io/apiextensions-apiserver v0.32.0 h1:S0Xlqt51qzzqjKPxfgX1xh4HBZE+p8KKBq+k2SWNOE0= -k8s.io/apiextensions-apiserver v0.32.0/go.mod h1:86hblMvN5yxMvZrZFX2OhIHAuFIMJIZ19bTvzkP+Fmw= -k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg= -k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/apiserver v0.32.0 h1:VJ89ZvQZ8p1sLeiWdRJpRD6oLozNZD2+qVSLi+ft5Qs= -k8s.io/apiserver v0.32.0/go.mod h1:HFh+dM1/BE/Hm4bS4nTXHVfN6Z6tFIZPi649n83b4Ag= -k8s.io/cli-runtime v0.32.0 h1:dP+OZqs7zHPpGQMCGAhectbHU2SNCuZtIimRKTv2T1c= -k8s.io/cli-runtime v0.32.0/go.mod h1:Mai8ht2+esoDRK5hr861KRy6z0zHsSTYttNVJXgP3YQ= -k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= -k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= -k8s.io/component-base v0.32.0 h1:d6cWHZkCiiep41ObYQS6IcgzOUQUNpywm39KVYaUqzU= -k8s.io/component-base v0.32.0/go.mod h1:JLG2W5TUxUu5uDyKiH2R/7NnxJo1HlPoRIIbVLkK5eM= +k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= +k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= +k8s.io/apiextensions-apiserver v0.32.1 h1:hjkALhRUeCariC8DiVmb5jj0VjIc1N0DREP32+6UXZw= +k8s.io/apiextensions-apiserver v0.32.1/go.mod h1:sxWIGuGiYov7Io1fAS2X06NjMIk5CbRHc2StSmbaQto= +k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= +k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apiserver v0.32.1 h1:oo0OozRos66WFq87Zc5tclUX2r0mymoVHRq8JmR7Aak= +k8s.io/apiserver v0.32.1/go.mod h1:UcB9tWjBY7aryeI5zAgzVJB/6k7E97bkr1RgqDz0jPw= +k8s.io/cli-runtime v0.32.1 h1:19nwZPlYGJPUDbhAxDIS2/oydCikvKMHsxroKNGA2mM= +k8s.io/cli-runtime v0.32.1/go.mod h1:NJPbeadVFnV2E7B7vF+FvU09mpwYlZCu8PqjzfuOnkY= +k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= +k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= +k8s.io/component-base v0.32.1 h1:/5IfJ0dHIKBWysGV0yKTFfacZ5yNV1sulPh3ilJjRZk= +k8s.io/component-base v0.32.1/go.mod h1:j1iMMHi/sqAHeG5z+O9BFNCF698a1u0186zkjMZQ28w= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= -k8s.io/kubectl v0.32.0 h1:rpxl+ng9qeG79YA4Em9tLSfX0G8W0vfaiPVrc/WR7Xw= -k8s.io/kubectl v0.32.0/go.mod h1:qIjSX+QgPQUgdy8ps6eKsYNF+YmFOAO3WygfucIqFiE= +k8s.io/kubectl v0.32.1 h1:/btLtXLQUU1rWx8AEvX9jrb9LaI6yeezt3sFALhB8M8= +k8s.io/kubectl v0.32.1/go.mod h1:sezNuyWi1STk4ZNPVRIFfgjqMI6XMf+oCVLjZen/pFQ= k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo= diff --git a/hack/ci/custom-linters/analyzers/setuplognilerrorcheck.go b/hack/ci/custom-linters/analyzers/setuplognilerrorcheck.go index 6eae8aa1e..ba9098a5f 100644 --- a/hack/ci/custom-linters/analyzers/setuplognilerrorcheck.go +++ b/hack/ci/custom-linters/analyzers/setuplognilerrorcheck.go @@ -12,8 +12,9 @@ import ( var SetupLogErrorCheck = &analysis.Analyzer{ Name: "setuplogerrorcheck", - Doc: "Detects improper usage of logger.Error() calls, ensuring the first argument is a non-nil error.", - Run: runSetupLogErrorCheck, + Doc: "Detects and reports improper usages of logger.Error() calls to enforce good practices " + + "and prevent silent failures.", + Run: runSetupLogErrorCheck, } func runSetupLogErrorCheck(pass *analysis.Pass) (interface{}, error) { @@ -72,7 +73,7 @@ func runSetupLogErrorCheck(pass *analysis.Pass) (interface{}, error) { pass.Reportf(callExpr.Pos(), "Incorrect usage of 'logger.Error(nil, ...)'. The first argument must be a non-nil 'error'. "+ - "Passing 'nil' results in silent failures, making debugging harder.\n\n"+ + "Passing 'nil' may hide error details, making debugging harder.\n\n"+ "\U0001F41B **What is wrong?**\n %s\n\n"+ "\U0001F4A1 **How to solve? Return the error, i.e.:**\n logger.Error(%s, %s, \"key\", value)\n\n", sourceLine, suggestedError, suggestedMessage) diff --git a/hack/ci/custom-linters/analyzers/testdata/main.go b/hack/ci/custom-linters/analyzers/testdata/main.go index 0a02ed939..97e712f50 100644 --- a/hack/ci/custom-linters/analyzers/testdata/main.go +++ b/hack/ci/custom-linters/analyzers/testdata/main.go @@ -10,7 +10,7 @@ func testLogger() { var value int // Case 1: Nil error - Ensures the first argument cannot be nil. - logger.Error(nil, "message") // want ".*results in silent failures, making debugging harder.*" + logger.Error(nil, "message") // want ".*may hide error details, making debugging harder*" // Case 2: Odd number of key-value arguments - Ensures key-value pairs are complete. logger.Error(err, "message", "key1") // want ".*Key-value pairs must be provided after the message, but an odd number of arguments was found.*" diff --git a/test/catalogd-e2e/e2e_suite_test.go b/test/catalogd-e2e/e2e_suite_test.go deleted file mode 100644 index a2399bd0e..000000000 --- a/test/catalogd-e2e/e2e_suite_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package catalogde2e - -import ( - "fmt" - "os" - "testing" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - - catalogdv1 "github.com/operator-framework/operator-controller/catalogd/api/v1" -) - -var ( - cfg *rest.Config - c client.Client - err error - kubeClient kubernetes.Interface -) - -func TestE2E(t *testing.T) { - _, err := ctrl.GetConfig() - if err != nil { - fmt.Println("Error: Could not get current Kubernetes context. Verify the cluster configuration") - os.Exit(0) - } - RegisterFailHandler(Fail) - SetDefaultEventuallyTimeout(1 * time.Minute) - SetDefaultEventuallyPollingInterval(1 * time.Second) - RunSpecs(t, "E2E Suite") -} - -var _ = BeforeSuite(func() { - cfg = ctrl.GetConfigOrDie() - - sch := scheme.Scheme - Expect(catalogdv1.AddToScheme(sch)).To(Succeed()) - c, err = client.New(cfg, client.Options{Scheme: sch}) - Expect(err).To(Not(HaveOccurred())) - kubeClient, err = kubernetes.NewForConfig(cfg) - Expect(err).ToNot(HaveOccurred()) -}) diff --git a/test/catalogd-e2e/unpack_test.go b/test/catalogd-e2e/unpack_test.go deleted file mode 100644 index 4c0ad6c01..000000000 --- a/test/catalogd-e2e/unpack_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package catalogde2e - -import ( - "context" - "os" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/google/go-cmp/cmp" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - - catalogdv1 "github.com/operator-framework/operator-controller/catalogd/api/v1" - testutils "github.com/operator-framework/operator-controller/test/utils" -) - -const ( - catalogRefEnvVar = "TEST_CATALOG_IMAGE" - catalogName = "test-catalog" -) - -// catalogImageRef returns the image reference for the test catalog image, defaulting to the value of the environment -// variable TEST_CATALOG_IMAGE if set, falling back to docker-registry.catalogd-e2e.svc:5000/test-catalog:e2e otherwise. -func catalogImageRef() string { - if s := os.Getenv(catalogRefEnvVar); s != "" { - return s - } - - return "docker-registry.catalogd-e2e.svc:5000/test-catalog:e2e" -} - -var _ = Describe("ClusterCatalog Unpacking", func() { - var ( - ctx context.Context - catalog *catalogdv1.ClusterCatalog - ) - When("A ClusterCatalog is created", func() { - BeforeEach(func() { - ctx = context.Background() - var err error - - catalog = &catalogdv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Name: catalogName, - }, - Spec: catalogdv1.ClusterCatalogSpec{ - Source: catalogdv1.CatalogSource{ - Type: catalogdv1.SourceTypeImage, - Image: &catalogdv1.ImageSource{ - Ref: catalogImageRef(), - }, - }, - }, - } - - err = c.Create(ctx, catalog) - Expect(err).ToNot(HaveOccurred()) - }) - - It("Successfully unpacks catalog contents", func() { - By("Ensuring ClusterCatalog has Status.Condition of Progressing with a status == False and reason == Succeeded") - Eventually(func(g Gomega) { - err := c.Get(ctx, types.NamespacedName{Name: catalog.Name}, catalog) - g.Expect(err).ToNot(HaveOccurred()) - cond := meta.FindStatusCondition(catalog.Status.Conditions, catalogdv1.TypeProgressing) - g.Expect(cond).ToNot(BeNil()) - g.Expect(cond.Status).To(Equal(metav1.ConditionTrue)) - g.Expect(cond.Reason).To(Equal(catalogdv1.ReasonSucceeded)) - }).Should(Succeed()) - - By("Checking that it has an appropriate name label") - Expect(catalog.ObjectMeta.Labels).To(Not(BeNil())) - Expect(catalog.ObjectMeta.Labels).To(Not(BeEmpty())) - Expect(catalog.ObjectMeta.Labels).To(HaveKeyWithValue("olm.operatorframework.io/metadata.name", catalogName)) - - By("Making sure the catalog content is available via the http server") - actualFBC, err := testutils.ReadTestCatalogServerContents(ctx, catalog, kubeClient) - Expect(err).To(Not(HaveOccurred())) - - expectedFBC, err := os.ReadFile("../../catalogd/testdata/catalogs/test-catalog/expected_all.json") - Expect(err).To(Not(HaveOccurred())) - Expect(cmp.Diff(expectedFBC, actualFBC)).To(BeEmpty()) - - By("Ensuring ClusterCatalog has Status.Condition of Type = Serving with a status == True") - Eventually(func(g Gomega) { - err := c.Get(ctx, types.NamespacedName{Name: catalog.Name}, catalog) - g.Expect(err).ToNot(HaveOccurred()) - cond := meta.FindStatusCondition(catalog.Status.Conditions, catalogdv1.TypeServing) - g.Expect(cond).ToNot(BeNil()) - g.Expect(cond.Status).To(Equal(metav1.ConditionTrue)) - g.Expect(cond.Reason).To(Equal(catalogdv1.ReasonAvailable)) - }).Should(Succeed()) - }) - AfterEach(func() { - Expect(c.Delete(ctx, catalog)).To(Succeed()) - Eventually(func(g Gomega) { - err = c.Get(ctx, types.NamespacedName{Name: catalog.Name}, &catalogdv1.ClusterCatalog{}) - g.Expect(errors.IsNotFound(err)).To(BeTrue()) - }).Should(Succeed()) - }) - }) -}) diff --git a/test/e2e/cluster_extension_install_test.go b/test/e2e/cluster_extension_install_test.go index 4c05df8b4..8ccca8b5e 100644 --- a/test/e2e/cluster_extension_install_test.go +++ b/test/e2e/cluster_extension_install_test.go @@ -210,9 +210,40 @@ func testInit(t *testing.T) (*ocv1.ClusterExtension, *catalogd.ClusterCatalog, * sa, err := createServiceAccount(context.Background(), name, clusterExtensionName) require.NoError(t, err) + + validateCatalogUnpack(t) + return clusterExtension, extensionCatalog, sa, ns } +func validateCatalogUnpack(t *testing.T) { + catalog := &catalogd.ClusterCatalog{} + t.Log("Ensuring ClusterCatalog has Status.Condition of Progressing with a status == True and reason == Succeeded") + require.EventuallyWithT(t, func(ct *assert.CollectT) { + err := c.Get(context.Background(), types.NamespacedName{Name: testCatalogName}, catalog) + assert.NoError(ct, err) + cond := apimeta.FindStatusCondition(catalog.Status.Conditions, catalogd.TypeProgressing) + assert.NotNil(ct, cond) + assert.Equal(ct, metav1.ConditionTrue, cond.Status) + assert.Equal(ct, catalogd.ReasonSucceeded, cond.Reason) + }, pollDuration, pollInterval) + + t.Log("Checking that catalog has the expected metadata label") + assert.NotNil(t, catalog.ObjectMeta.Labels) + assert.Contains(t, catalog.ObjectMeta.Labels, "olm.operatorframework.io/metadata.name") + assert.Equal(t, testCatalogName, catalog.ObjectMeta.Labels["olm.operatorframework.io/metadata.name"]) + + t.Log("Ensuring ClusterCatalog has Status.Condition of Type = Serving with status == True") + require.EventuallyWithT(t, func(ct *assert.CollectT) { + err := c.Get(context.Background(), types.NamespacedName{Name: testCatalogName}, catalog) + assert.NoError(ct, err) + cond := apimeta.FindStatusCondition(catalog.Status.Conditions, catalogd.TypeServing) + assert.NotNil(ct, cond) + assert.Equal(ct, metav1.ConditionTrue, cond.Status) + assert.Equal(ct, catalogd.ReasonAvailable, cond.Reason) + }, pollDuration, pollInterval) +} + func ensureNoExtensionResources(t *testing.T, clusterExtensionName string) { ls := labels.Set{"olm.operatorframework.io/owner-name": clusterExtensionName} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go b/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go index f0272fd6a..40bce2a68 100644 --- a/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go @@ -237,6 +237,9 @@ func coalesceValues(printf printFn, c *chart.Chart, v map[string]interface{}, pr printf("warning: skipped value for %s.%s: Not a table.", subPrefix, key) } } else { + // If the key is a child chart, coalesce tables with Merge set to true + merge := childChartMergeTrue(c, key, merge) + // Because v has higher precedence than nv, dest values override src // values. coalesceTablesFullKey(printf, dest, src, concatPrefix(subPrefix, key), merge) @@ -249,6 +252,15 @@ func coalesceValues(printf printFn, c *chart.Chart, v map[string]interface{}, pr } } +func childChartMergeTrue(chrt *chart.Chart, key string, merge bool) bool { + for _, subchart := range chrt.Dependencies() { + if subchart.Name() == key { + return true + } + } + return merge +} + // CoalesceTables merges a source map into a destination map. // // dest is considered authoritative. diff --git a/vendor/helm.sh/helm/v3/pkg/kube/resource.go b/vendor/helm.sh/helm/v3/pkg/kube/resource.go index d441db8a7..db8e9178e 100644 --- a/vendor/helm.sh/helm/v3/pkg/kube/resource.go +++ b/vendor/helm.sh/helm/v3/pkg/kube/resource.go @@ -81,5 +81,5 @@ func (r ResourceList) Intersect(rs ResourceList) ResourceList { // isMatchingInfo returns true if infos match on Name and GroupVersionKind. func isMatchingInfo(a, b *resource.Info) bool { - return a.Name == b.Name && a.Namespace == b.Namespace && a.Mapping.GroupVersionKind.Kind == b.Mapping.GroupVersionKind.Kind + return a.Name == b.Name && a.Namespace == b.Namespace && a.Mapping.GroupVersionKind.Kind == b.Mapping.GroupVersionKind.Kind && a.Mapping.GroupVersionKind.Group == b.Mapping.GroupVersionKind.Group } diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.proto b/vendor/k8s.io/api/resource/v1alpha3/generated.proto index 13be7cbd8..e802a0143 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/generated.proto +++ b/vendor/k8s.io/api/resource/v1alpha3/generated.proto @@ -675,7 +675,7 @@ message ResourceClaimStatus { // which issued it knows that it must put the pod back into the queue, // waiting for the ResourceClaim to become usable again. // - // There can be at most 32 such reservations. This may get increased in + // There can be at most 256 such reservations. This may get increased in // the future, but not reduced. // // +optional diff --git a/vendor/k8s.io/api/resource/v1alpha3/types.go b/vendor/k8s.io/api/resource/v1alpha3/types.go index e3d7fd894..fb4d7041d 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/types.go +++ b/vendor/k8s.io/api/resource/v1alpha3/types.go @@ -687,7 +687,7 @@ type ResourceClaimStatus struct { // which issued it knows that it must put the pod back into the queue, // waiting for the ResourceClaim to become usable again. // - // There can be at most 32 such reservations. This may get increased in + // There can be at most 256 such reservations. This may get increased in // the future, but not reduced. // // +optional @@ -715,9 +715,9 @@ type ResourceClaimStatus struct { Devices []AllocatedDeviceStatus `json:"devices,omitempty" protobuf:"bytes,4,opt,name=devices"` } -// ReservedForMaxSize is the maximum number of entries in +// ResourceClaimReservedForMaxSize is the maximum number of entries in // claim.status.reservedFor. -const ResourceClaimReservedForMaxSize = 32 +const ResourceClaimReservedForMaxSize = 256 // ResourceClaimConsumerReference contains enough information to let you // locate the consumer of a ResourceClaim. The user must be a resource in the same diff --git a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go index 1a71d64c1..b41609d11 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go @@ -291,7 +291,7 @@ func (ResourceClaimSpec) SwaggerDoc() map[string]string { var map_ResourceClaimStatus = map[string]string{ "": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.", "allocation": "Allocation is set once the claim has been allocated successfully.", - "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", + "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 256 such reservations. This may get increased in the future, but not reduced.", "devices": "Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers.", } diff --git a/vendor/k8s.io/api/resource/v1beta1/generated.proto b/vendor/k8s.io/api/resource/v1beta1/generated.proto index 6d525d5b8..4ea13e033 100644 --- a/vendor/k8s.io/api/resource/v1beta1/generated.proto +++ b/vendor/k8s.io/api/resource/v1beta1/generated.proto @@ -683,7 +683,7 @@ message ResourceClaimStatus { // which issued it knows that it must put the pod back into the queue, // waiting for the ResourceClaim to become usable again. // - // There can be at most 32 such reservations. This may get increased in + // There can be at most 256 such reservations. This may get increased in // the future, but not reduced. // // +optional diff --git a/vendor/k8s.io/api/resource/v1beta1/types.go b/vendor/k8s.io/api/resource/v1beta1/types.go index a7f1ee7b5..ca79c5a66 100644 --- a/vendor/k8s.io/api/resource/v1beta1/types.go +++ b/vendor/k8s.io/api/resource/v1beta1/types.go @@ -695,7 +695,7 @@ type ResourceClaimStatus struct { // which issued it knows that it must put the pod back into the queue, // waiting for the ResourceClaim to become usable again. // - // There can be at most 32 such reservations. This may get increased in + // There can be at most 256 such reservations. This may get increased in // the future, but not reduced. // // +optional @@ -723,9 +723,9 @@ type ResourceClaimStatus struct { Devices []AllocatedDeviceStatus `json:"devices,omitempty" protobuf:"bytes,4,opt,name=devices"` } -// ReservedForMaxSize is the maximum number of entries in +// ResourceClaimReservedForMaxSize is the maximum number of entries in // claim.status.reservedFor. -const ResourceClaimReservedForMaxSize = 32 +const ResourceClaimReservedForMaxSize = 256 // ResourceClaimConsumerReference contains enough information to let you // locate the consumer of a ResourceClaim. The user must be a resource in the same diff --git a/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go index 1d0176cbc..4ecc35d08 100644 --- a/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go @@ -300,7 +300,7 @@ func (ResourceClaimSpec) SwaggerDoc() map[string]string { var map_ResourceClaimStatus = map[string]string{ "": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.", "allocation": "Allocation is set once the claim has been allocated successfully.", - "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", + "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 256 such reservations. This may get increased in the future, but not reduced.", "devices": "Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers.", } diff --git a/vendor/modules.txt b/vendor/modules.txt index d98bdb7fb..e6b913721 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1343,7 +1343,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# helm.sh/helm/v3 v3.17.0 +# helm.sh/helm/v3 v3.17.1 ## explicit; go 1.23.0 helm.sh/helm/v3/internal/fileutil helm.sh/helm/v3/internal/resolver @@ -1382,7 +1382,7 @@ helm.sh/helm/v3/pkg/storage/driver helm.sh/helm/v3/pkg/time helm.sh/helm/v3/pkg/time/ctime helm.sh/helm/v3/pkg/uploader -# k8s.io/api v0.32.0 +# k8s.io/api v0.32.1 ## explicit; go 1.23.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1443,7 +1443,7 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.32.0 +# k8s.io/apiextensions-apiserver v0.32.1 ## explicit; go 1.23.0 k8s.io/apiextensions-apiserver/pkg/apihelpers k8s.io/apiextensions-apiserver/pkg/apis/apiextensions @@ -1465,7 +1465,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/features -# k8s.io/apimachinery v0.32.0 +# k8s.io/apimachinery v0.32.1 ## explicit; go 1.23.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -1533,7 +1533,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.32.0 +# k8s.io/apiserver v0.32.1 ## explicit; go 1.23.0 k8s.io/apiserver/pkg/apis/apiserver k8s.io/apiserver/pkg/apis/apiserver/install @@ -1581,13 +1581,13 @@ k8s.io/apiserver/pkg/warning k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook/metrics -# k8s.io/cli-runtime v0.32.0 +# k8s.io/cli-runtime v0.32.1 ## explicit; go 1.23.0 k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericiooptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.32.0 +# k8s.io/client-go v0.32.1 ## explicit; go 1.23.0 k8s.io/client-go/applyconfigurations k8s.io/client-go/applyconfigurations/admissionregistration/v1 @@ -1946,7 +1946,7 @@ k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/watchlist k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.32.0 +# k8s.io/component-base v0.32.1 ## explicit; go 1.23.0 k8s.io/component-base/cli/flag k8s.io/component-base/featuregate @@ -1985,7 +1985,7 @@ k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson k8s.io/kube-openapi/pkg/validation/validate -# k8s.io/kubectl v0.32.0 +# k8s.io/kubectl v0.32.1 ## explicit; go 1.23.0 k8s.io/kubectl/pkg/cmd/util k8s.io/kubectl/pkg/scheme