Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@ COLLECT_PROFILES_CMD := $(addprefix bin/, collect-profiles)
OPM := $(addprefix bin/, opm)
OLM_CMDS := $(shell go list -mod=vendor $(OLM_PKG)/cmd/...)
PSM_CMD := $(addprefix bin/, psm)
LIFECYCLE_CONTROLLER_CMD := $(addprefix bin/, lifecycle-controller)
LIFECYCLE_SERVER_CMD := $(addprefix bin/, lifecycle-server)
REGISTRY_CMDS := $(addprefix bin/, $(shell ls staging/operator-registry/cmd | grep -v opm))

# Default image tag for build/olm-container and build/registry-container
Expand Down Expand Up @@ -77,7 +79,7 @@ build/registry:
$(MAKE) $(REGISTRY_CMDS) $(OPM)

build/olm:
$(MAKE) $(PSM_CMD) $(OLM_CMDS) $(COLLECT_PROFILES_CMD) bin/copy-content
$(MAKE) $(PSM_CMD) $(OLM_CMDS) $(COLLECT_PROFILES_CMD) bin/copy-content $(LIFECYCLE_CONTROLLER_CMD) $(LIFECYCLE_SERVER_CMD)

$(OPM): version_flags=-ldflags "-X '$(REGISTRY_PKG)/cmd/opm/version.gitCommit=$(GIT_COMMIT)' -X '$(REGISTRY_PKG)/cmd/opm/version.opmVersion=$(OPM_VERSION)' -X '$(REGISTRY_PKG)/cmd/opm/version.buildDate=$(BUILD_DATE)'"
$(OPM):
Expand All @@ -97,6 +99,12 @@ $(PSM_CMD): FORCE
$(COLLECT_PROFILES_CMD): FORCE
go build $(GO_BUILD_OPTS) $(GO_BUILD_TAGS) -o $(COLLECT_PROFILES_CMD) $(ROOT_PKG)/cmd/collect-profiles

$(LIFECYCLE_CONTROLLER_CMD): FORCE
go build $(GO_BUILD_OPTS) $(GO_BUILD_TAGS) -o $(LIFECYCLE_CONTROLLER_CMD) $(ROOT_PKG)/cmd/lifecycle-controller

$(LIFECYCLE_SERVER_CMD): FORCE
go build $(GO_BUILD_OPTS) $(GO_BUILD_TAGS) -o $(LIFECYCLE_SERVER_CMD) $(ROOT_PKG)/cmd/lifecycle-server

.PHONY: cross
cross: version_flags=-X '$(REGISTRY_PKG)/cmd/opm/version.gitCommit=$(GIT_COMMIT)' -X '$(REGISTRY_PKG)/cmd/opm/version.opmVersion=$(OPM_VERSION)' -X '$(REGISTRY_PKG)/cmd/opm/version.buildDate=$(BUILD_DATE)'
cross:
Expand Down
23 changes: 23 additions & 0 deletions cmd/lifecycle-controller/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
package main

import (
"fmt"
"os"

"github.com/spf13/cobra"
_ "k8s.io/client-go/plugin/pkg/client/auth"
)

func main() {
rootCmd := &cobra.Command{
Use: "lifecycle-controller",
Short: "Lifecycle Metadata Controller for OLM",
}

rootCmd.AddCommand(newStartCmd())

if err := rootCmd.Execute(); err != nil {
fmt.Fprintf(os.Stderr, "error running lifecycle-controller: %v\n", err)
os.Exit(1)
}
}
307 changes: 307 additions & 0 deletions cmd/lifecycle-controller/start.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,307 @@
package main

import (
"cmp"
"context"
"crypto/tls"
"errors"
"fmt"
"net/http"
"os"

"github.com/go-logr/logr"
configv1 "github.com/openshift/api/config/v1"
tlsutil "github.com/openshift/controller-runtime-common/pkg/tls"
"github.com/openshift/library-go/pkg/crypto"
"github.com/openshift/operator-framework-olm/pkg/leaderelection"
controllers "github.com/openshift/operator-framework-olm/pkg/lifecycle-controller"
operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
"github.com/spf13/cobra"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/manager"
metricsfilters "sigs.k8s.io/controller-runtime/pkg/metrics/filters"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
)

const (
defaultMetricsAddr = ":8443"
defaultHealthCheckAddr = ":8081"
leaderElectionID = "lifecycle-controller-lock"
)

var (
disableLeaderElection bool
healthCheckAddr string
metricsAddr string
catalogSourceLabelSelector string
catalogSourceFieldSelector string
tlsCertFile string
tlsKeyFile string
)

func newStartCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "start",
Short: "Start the Lifecycle Controller",
SilenceUsage: true,
RunE: run,
}

cmd.Flags().StringVar(&healthCheckAddr, "health", defaultHealthCheckAddr, "health check address")
cmd.Flags().StringVar(&metricsAddr, "metrics", defaultMetricsAddr, "metrics address")
cmd.Flags().BoolVar(&disableLeaderElection, "disable-leader-election", false, "disable leader election")
cmd.Flags().StringVar(&catalogSourceLabelSelector, "catalog-source-label-selector", "", "label selector for catalog sources to manage (empty means all)")
cmd.Flags().StringVar(&catalogSourceFieldSelector, "catalog-source-field-selector", "", "field selector for catalog sources to manage (empty means all)")
cmd.Flags().StringVar(&tlsCertFile, "tls-cert", "", "path to TLS certificate file for metrics server")
cmd.Flags().StringVar(&tlsKeyFile, "tls-key", "", "path to TLS key file for metrics server")
_ = cmd.MarkFlagRequired("tls-cert")
_ = cmd.MarkFlagRequired("tls-key")
return cmd
}

func run(_ *cobra.Command, _ []string) error {
ctx := ctrl.SetupSignalHandler()
ctrl.SetLogger(klog.NewKlogr())
setupLog := ctrl.Log.WithName("setup")

cfg, err := loadStartConfig(ctx)
if err != nil {
return fmt.Errorf("unable to load startup configuration: %v", err)
}
logConfig(cfg, setupLog)

mgr, err := setupManager(cfg)
if err != nil {
return fmt.Errorf("failed to setup manager instance: %v", err)
}

tlsProfileChan, err := setupTLSProfileWatcher(mgr, cfg)
if err != nil {
return fmt.Errorf("unable to setup TLS profile watcher: %v", err)
}
defer close(tlsProfileChan)
Comment on lines +89 to +93
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# Confirm whether SecurityProfileWatcher.OnProfileChange can be invoked after mgr.Start returns
# and whether the upstream implementation treats ctx cancellation for the callback.
rg -nP --type=go -C5 '\bSecurityProfileWatcher\b' vendor/ 2>/dev/null || \
  rg -nP --type=go -C5 '\bSecurityProfileWatcher\b'
rg -nP --type=go -C3 'OnProfileChange' vendor/ 2>/dev/null || \
  rg -nP --type=go -C3 'OnProfileChange'

Repository: openshift/operator-framework-olm

Length of output: 9224


🏁 Script executed:

cat -n cmd/lifecycle-controller/start.go | sed -n '1,100p'

Repository: openshift/operator-framework-olm

Length of output: 4200


🏁 Script executed:

cat -n cmd/lifecycle-controller/start.go | sed -n '270,295p'

Repository: openshift/operator-framework-olm

Length of output: 1405


🏁 Script executed:

cat -n cmd/lifecycle-controller/start.go | sed -n '180,210p'

Repository: openshift/operator-framework-olm

Length of output: 1719


🏁 Script executed:

# Find all usages of tlsProfileChan to understand the data flow
rg -n 'tlsProfileChan' cmd/lifecycle-controller/start.go -A2 -B2

Repository: openshift/operator-framework-olm

Length of output: 912


🏁 Script executed:

# Find setupTLSProfileWatcher function definition and channel creation
rg -n 'func setupTLSProfileWatcher' cmd/lifecycle-controller/start.go -A30

Repository: openshift/operator-framework-olm

Length of output: 1414


🏁 Script executed:

# Find where tlsChangeChan is created (make call)
rg -n 'make.*tlsChangeChan|make.*event.TypedGenericEvent' cmd/lifecycle-controller/start.go -B5 -A2

Repository: openshift/operator-framework-olm

Length of output: 377


🏁 Script executed:

# Find where tlsChangeChan is consumed/read from
rg -n 'for.*tlsProfileChan|<-.*tlsProfileChan' cmd/ -r

Repository: openshift/operator-framework-olm

Length of output: 490


🏁 Script executed:

# Find where tlsProfileChan is consumed/read from
rg -n 'for.*range.*tlsProfileChan|<-.*tlsProfileChan' cmd/ -B3 -A3

Repository: openshift/operator-framework-olm

Length of output: 58


🏁 Script executed:

# Also check SetupWithManager in the LifecycleServerReconciler
rg -n 'SetupWithManager' pkg/lifecycle-controller/ -A20 | head -80

Repository: openshift/operator-framework-olm

Length of output: 3235


Potential panic / goroutine leak: defer close(tlsProfileChan) races with blocking sends from OnProfileChange.

Two failure modes:

  1. If OnProfileChange fires while the manager is shutting down, the unbuffered send at line 282 blocks (no consumer — controller-runtime has stopped draining). mgr.Start returns, defer close(tlsProfileChan) runs, and the send then panics with send on closed channel.
  2. Even without a close, a blocked send holds the watcher goroutine indefinitely, leaking it past run() returning.

The OnProfileChange callback already receives a context.Context (line 271) — gate the send on ctx.Done() and drop the defer close (leaving it to GC is safe and avoids the race entirely):

🔒️ Suggested fix
-	defer close(tlsProfileChan)
+	// Intentionally not closed: producer (SecurityProfileWatcher) may still be mid-send
+	// when mgr.Start returns; rely on GC after run() exits.
 		OnProfileChange: func(ctx context.Context, oldTLSProfileSpec, newTLSProfileSpec configv1.TLSProfileSpec) {
 			cfg.TLSConfigProvider.UpdateProfile(newTLSProfileSpec)
 			log.Info("applying new TLS profile spec",
 				"minVersion", newTLSProfileSpec.MinTLSVersion,
 				"cipherSuites", newTLSProfileSpec.Ciphers,
 			)

 			_, unsupportedCiphers := cfg.TLSConfigProvider.Get()
 			if len(unsupportedCiphers) > 0 {
 				log.Info("ignoring unsupported ciphers found in TLS profile", "unsupportedCiphers", unsupportedCiphers)
 			}
-			tlsChangeChan <- event.TypedGenericEvent[configv1.TLSProfileSpec]{Object: newTLSProfileSpec}
+			select {
+			case tlsChangeChan <- event.TypedGenericEvent[configv1.TLSProfileSpec]{Object: newTLSProfileSpec}:
+			case <-ctx.Done():
+				return
+			}
 		},
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@cmd/lifecycle-controller/start.go` around lines 89 - 93, The deferred close
of tlsProfileChan in setupTLSProfileWatcher can race with blocking sends from
the OnProfileChange callback and cause panics or leaked goroutines; remove the
defer close(tlsProfileChan) and instead modify the OnProfileChange callback
(which receives a context) to gate its send on tlsProfileChan with a select that
checks ctx.Done() (drop the send if ctx is done) so sends never block or attempt
to send on a closed channel; leave the channel unclosed and let it be GC'd when
no longer referenced.


if err := setupLifecycleServerController(mgr, cfg, tlsProfileChan); err != nil {
return fmt.Errorf("unable to setup lifecycle server controller: %v", err)
}

setupLog.Info("starting manager")
if err := mgr.Start(ctx); err != nil {
return fmt.Errorf("unable to start manager: %v", err)
}

return nil
}

type startConfig struct {
Namespace string
Version string

ServerImage string
CatalogSourceFieldSelector fields.Selector
CatalogSourceLabelSelector labels.Selector
RESTConfig *rest.Config
Scheme *runtime.Scheme

LeaderElection configv1.LeaderElection

InitialTLSProfileSpec configv1.TLSProfileSpec
TLSConfigProvider *controllers.TLSConfigProvider
EnableTLSProfileWatcher bool
}

func loadStartConfig(ctx context.Context) (*startConfig, error) {
cfg := &startConfig{
Namespace: os.Getenv("NAMESPACE"),
Version: cmp.Or(os.Getenv("RELEASE_VERSION"), "unknown"),
ServerImage: os.Getenv("LIFECYCLE_SERVER_IMAGE"),
}
if cfg.Namespace == "" && !disableLeaderElection {
return nil, fmt.Errorf("NAMESPACE environment variable is required when leader election is enabled")
}
if cfg.ServerImage == "" {
return nil, fmt.Errorf("LIFECYCLE_SERVER_IMAGE environment variable is required")
}

// Using a function to load the keypair each time means that we automatically pick up the new certificate when it reloads.
getCertificate := func(_ *tls.ClientHelloInfo) (*tls.Certificate, error) {
cert, err := tls.LoadX509KeyPair(tlsCertFile, tlsKeyFile)
if err != nil {
return nil, err
}
return &cert, nil
}
_, err := getCertificate(nil)
if err != nil {
return nil, fmt.Errorf("failed to load TLS certificate/key: %v", err)
}
cfg.CatalogSourceFieldSelector, err = fields.ParseSelector(catalogSourceFieldSelector)
if err != nil {
return nil, fmt.Errorf("failed to parse catalog source field selector %q: %v", catalogSourceFieldSelector, err)
}
cfg.CatalogSourceLabelSelector, err = labels.Parse(catalogSourceLabelSelector)
if err != nil {
return nil, fmt.Errorf("failed to parse catalog source label selector %q: %v", catalogSourceLabelSelector, err)
}
cfg.RESTConfig, err = ctrl.GetConfig()
if err != nil {
return nil, fmt.Errorf("failed to get rest config: %v", err)
}
cfg.Scheme = setupScheme()
cfg.LeaderElection = leaderelection.GetLeaderElectionConfig(ctrl.Log.WithName("leaderelection"), cfg.RESTConfig, !disableLeaderElection)

cfg.InitialTLSProfileSpec, cfg.EnableTLSProfileWatcher, err = getInitialTLSProfile(ctx, cfg.RESTConfig, cfg.Scheme)
if err != nil {
return nil, fmt.Errorf("failed to get initial TLS security profile: %v", err)
}
cfg.TLSConfigProvider = controllers.NewTLSConfigProvider(getCertificate, cfg.InitialTLSProfileSpec)
return cfg, nil
}

func logConfig(cfg *startConfig, log logr.Logger) {
log.Info("starting lifecycle-controller", "version", cfg.Version)
log.Info("config", "lifecycleServerImage", cfg.ServerImage)
if !cfg.CatalogSourceLabelSelector.Empty() {
log.Info("config", "catalogSourceLabelSelector", cfg.CatalogSourceLabelSelector.String())
}
if !cfg.CatalogSourceFieldSelector.Empty() {
log.Info("config", "catalogSourceFieldSelector", cfg.CatalogSourceFieldSelector.String())
}
tlsProfile, unsupportedCiphers := cfg.TLSConfigProvider.Get()
log.Info("config", "tlsMinVersion", crypto.TLSVersionToNameOrDie(tlsProfile.MinVersion))
log.Info("config", "tlsCipherSuites", crypto.CipherSuitesToNamesOrDie(tlsProfile.CipherSuites))
if len(unsupportedCiphers) > 0 {
log.Error(errors.New("ignored config"), "unsupported TLS cipher suites", "tlsCipherSuites", unsupportedCiphers)
}
}

func getInitialTLSProfile(ctx context.Context, restConfig *rest.Config, sch *runtime.Scheme) (configv1.TLSProfileSpec, bool, error) {
cl, err := client.New(restConfig, client.Options{Scheme: sch})
if err != nil {
return configv1.TLSProfileSpec{}, false, fmt.Errorf("failed to create client: %v", err)
}
initialTLSProfileSpec, err := tlsutil.FetchAPIServerTLSProfile(ctx, cl)
if err != nil {
return *configv1.TLSProfiles[crypto.DefaultTLSProfileType], false, nil
}
return initialTLSProfileSpec, true, nil
}
Comment on lines +189 to +199
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Silently swallowing FetchAPIServerTLSProfile error masks startup problems.

When tlsutil.FetchAPIServerTLSProfile fails (e.g., transient API server error, missing RBAC), this returns the default profile, disables the TLS profile watcher for the entire process lifetime, and returns nil — there's no log line and the operator has no way to distinguish "no APIServer config" from "RBAC was missing at startup". At minimum, log the error; ideally, bubble it up unless the failure is specifically "resource not found":

🔒️ Suggested fix
-func getInitialTLSProfile(ctx context.Context, restConfig *rest.Config, sch *runtime.Scheme) (configv1.TLSProfileSpec, bool, error) {
+func getInitialTLSProfile(ctx context.Context, log logr.Logger, restConfig *rest.Config, sch *runtime.Scheme) (configv1.TLSProfileSpec, bool, error) {
 	cl, err := client.New(restConfig, client.Options{Scheme: sch})
 	if err != nil {
 		return configv1.TLSProfileSpec{}, false, fmt.Errorf("failed to create client: %v", err)
 	}
 	initialTLSProfileSpec, err := tlsutil.FetchAPIServerTLSProfile(ctx, cl)
 	if err != nil {
+		log.Error(err, "failed to fetch APIServer TLS profile; falling back to default and disabling TLS profile watcher")
 		return *configv1.TLSProfiles[crypto.DefaultTLSProfileType], false, nil
 	}
 	return initialTLSProfileSpec, true, nil
 }

(and thread setupLog through from loadStartConfig).

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@cmd/lifecycle-controller/start.go` around lines 189 - 199,
getInitialTLSProfile currently swallows errors from
tlsutil.FetchAPIServerTLSProfile and silently falls back to the default, which
hides startup RBAC/API errors and disables the watcher; modify
getInitialTLSProfile so that it logs the error (using the setupLog passed down
from loadStartConfig) and returns the error to the caller unless the failure is
a clear "resource not found" case, in which case you may continue to return the
default profile and a false flag; thread setupLog through loadStartConfig into
getInitialTLSProfile and update callers to handle the returned error so
transient API/RBAC failures surface instead of being masked.


func setupManager(cfg *startConfig) (manager.Manager, error) {
mgr, err := ctrl.NewManager(cfg.RESTConfig, manager.Options{
Scheme: cfg.Scheme,
Metrics: metricsserver.Options{
BindAddress: metricsAddr,
SecureServing: true,
FilterProvider: metricsfilters.WithAuthenticationAndAuthorization,
TLSOpts: []func(*tls.Config){func(tlsConfig *tls.Config) {
tlsConfig.GetConfigForClient = func(*tls.ClientHelloInfo) (*tls.Config, error) {
tlsCfg, _ := cfg.TLSConfigProvider.Get()
return tlsCfg, nil
}
}},
},
LeaderElection: !cfg.LeaderElection.Disable,
LeaderElectionNamespace: cfg.Namespace,
LeaderElectionID: leaderElectionID,
LeaseDuration: &cfg.LeaderElection.LeaseDuration.Duration,
RenewDeadline: &cfg.LeaderElection.RenewDeadline.Duration,
RetryPeriod: &cfg.LeaderElection.RetryPeriod.Duration,
HealthProbeBindAddress: healthCheckAddr,
LeaderElectionReleaseOnCancel: true,
Cache: cache.Options{
ByObject: map[client.Object]cache.ByObject{
&operatorsv1alpha1.CatalogSource{}: {},
&corev1.Pod{}: {
Label: catalogPodLabelSelector(),
},
&appsv1.Deployment{}: {
Label: controllers.LifecycleServerLabelSelector(),
},
&corev1.ServiceAccount{}: {
Label: controllers.LifecycleServerLabelSelector(),
},
&corev1.Service{}: {
Label: controllers.LifecycleServerLabelSelector(),
},
&networkingv1.NetworkPolicy{}: {
Label: controllers.LifecycleServerLabelSelector(),
},
&configv1.APIServer{}: {
Field: fields.SelectorFromSet(fields.Set{"metadata.name": "cluster"}),
},
},
},
})
if err != nil {
return nil, fmt.Errorf("failed to create manager: %v", err)
}

// Add health check endpoint (used for both liveness and readiness probes)
if err := mgr.AddHealthzCheck("healthz", func(req *http.Request) error {
return nil
}); err != nil {
return nil, fmt.Errorf("failed to configure health check handler: %v", err)
}
return mgr, nil
}

func setupTLSProfileWatcher(mgr manager.Manager, cfg *startConfig) (chan event.TypedGenericEvent[configv1.TLSProfileSpec], error) {
tlsChangeChan := make(chan event.TypedGenericEvent[configv1.TLSProfileSpec])

if !cfg.EnableTLSProfileWatcher {
return tlsChangeChan, nil
}

log := ctrl.Log.WithName("tls-profile")
tlsProfileReconciler := tlsutil.SecurityProfileWatcher{
Client: mgr.GetClient(),
InitialTLSProfileSpec: cfg.InitialTLSProfileSpec,
OnProfileChange: func(ctx context.Context, oldTLSProfileSpec, newTLSProfileSpec configv1.TLSProfileSpec) {
cfg.TLSConfigProvider.UpdateProfile(newTLSProfileSpec)
log.Info("applying new TLS profile spec",
"minVersion", newTLSProfileSpec.MinTLSVersion,
"cipherSuites", newTLSProfileSpec.Ciphers,
)

_, unsupportedCiphers := cfg.TLSConfigProvider.Get()
if len(unsupportedCiphers) > 0 {
log.Info("ignoring unsupported ciphers found in TLS profile", "unsupportedCiphers", unsupportedCiphers)
}
tlsChangeChan <- event.TypedGenericEvent[configv1.TLSProfileSpec]{Object: newTLSProfileSpec}
},
}

if err := tlsProfileReconciler.SetupWithManager(mgr); err != nil {
return nil, err
}
return tlsChangeChan, nil
}

func setupLifecycleServerController(mgr manager.Manager, cfg *startConfig, tlsProfileChan <-chan event.TypedGenericEvent[configv1.TLSProfileSpec]) error {
reconciler := &controllers.LifecycleServerReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("lifecycle-server"),
Scheme: mgr.GetScheme(),
ServerImage: cfg.ServerImage,
CatalogSourceLabelSelector: cfg.CatalogSourceLabelSelector,
CatalogSourceFieldSelector: cfg.CatalogSourceFieldSelector,
TLSConfigProvider: cfg.TLSConfigProvider,
}

if err := reconciler.SetupWithManager(mgr, tlsProfileChan); err != nil {
return fmt.Errorf("unable to setup lifecycle server controller: %v", err)
}
return nil
}
34 changes: 34 additions & 0 deletions cmd/lifecycle-controller/util.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
package main

import (
"fmt"

"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/selection"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"

configv1 "github.com/openshift/api/config/v1"
operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
)

func setupScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(operatorsv1alpha1.AddToScheme(scheme))
utilruntime.Must(configv1.AddToScheme(scheme))

return scheme
}

// catalogPodLabelSelector returns a label selector matching pods with olm.catalogSource label
func catalogPodLabelSelector() labels.Selector {
// This call cannot fail: the label key is valid and selection.Exists requires no values.
req, err := labels.NewRequirement("olm.catalogSource", selection.Exists, nil)
if err != nil {
// Panic on impossible error to satisfy static analysis and catch programming errors
panic(fmt.Sprintf("BUG: failed to create label requirement: %v", err))
}
return labels.NewSelector().Add(*req)
}
Loading