diff --git a/Taskfile.yaml b/Taskfile.yaml index 8cc14526..54447cd4 100644 --- a/Taskfile.yaml +++ b/Taskfile.yaml @@ -118,8 +118,8 @@ tasks: cmds: - go run ./cmd/main.go operator - docker:kind: - desc: "Build container image with current tag from kind cluster and load it" + docker:kind:load: + desc: "Build container image with current tag from kind cluster and load it into kind" vars: CONTAINER_RUNTIME: '{{.CONTAINER_RUNTIME | default "docker"}}' KIND_CLUSTER: '{{.KIND_CLUSTER | default "platform-mesh"}}' @@ -143,8 +143,30 @@ tasks: else kind load docker-image {{.IMAGE_NAME}} --name {{.KIND_CLUSTER}} fi + - echo "Image loaded into kind cluster {{.KIND_CLUSTER}}" + docker:kind:restart: + desc: "Restart security-operator deployments to pick up new image" + vars: + DEPLOYMENT_NAMESPACE: '{{.DEPLOYMENT_NAMESPACE | default "platform-mesh-system"}}' + cmds: - kubectl rollout restart deployment/security-operator -n {{.DEPLOYMENT_NAMESPACE}} - kubectl rollout restart deployment/security-operator-generator -n {{.DEPLOYMENT_NAMESPACE}} - kubectl rollout restart deployment/security-operator-initializer -n {{.DEPLOYMENT_NAMESPACE}} - - echo "Image loaded and all deployments restarted" + - kubectl rollout restart deployment/security-operator-terminator -n {{.DEPLOYMENT_NAMESPACE}} + - echo "All deployments restarted" + docker:kind: + desc: "Build container image, load it into kind cluster, and restart deployments" + vars: + DEPLOYMENT_NAMESPACE: '{{.DEPLOYMENT_NAMESPACE | default "platform-mesh-system"}}' + CONTAINER_RUNTIME: '{{.CONTAINER_RUNTIME | default "docker"}}' + KIND_CLUSTER: '{{.KIND_CLUSTER | default "platform-mesh"}}' + cmds: + - task: docker:kind:load + vars: + DEPLOYMENT_NAMESPACE: '{{.DEPLOYMENT_NAMESPACE}}' + CONTAINER_RUNTIME: '{{.CONTAINER_RUNTIME}}' + KIND_CLUSTER: '{{.KIND_CLUSTER}}' + - task: docker:kind:restart + vars: + DEPLOYMENT_NAMESPACE: '{{.DEPLOYMENT_NAMESPACE}}' diff --git a/cmd/initializer.go b/cmd/initializer.go index 6fc43aa3..d4802899 100644 --- a/cmd/initializer.go +++ b/cmd/initializer.go @@ -6,12 +6,17 @@ import ( helmv2 "github.com/fluxcd/helm-controller/api/v2" sourcev1 "github.com/fluxcd/source-controller/api/v1" + openfgav1 "github.com/openfga/api/proto/openfga/v1" "github.com/platform-mesh/security-operator/internal/controller" + "github.com/platform-mesh/security-operator/internal/predicates" "github.com/spf13/cobra" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/predicate" mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" "k8s.io/apimachinery/pkg/runtime" @@ -19,6 +24,7 @@ import ( "k8s.io/client-go/rest" "github.com/kcp-dev/logicalcluster/v3" + mcclient "github.com/kcp-dev/multicluster-provider/client" "github.com/kcp-dev/multicluster-provider/initializingworkspaces" ) @@ -94,12 +100,36 @@ var initializerCmd = &cobra.Command{ initializerCfg.IDP.AdditionalRedirectURLs = []string{} } - if err := controller.NewLogicalClusterReconciler(log, orgClient, initializerCfg, runtimeClient, mgr). - SetupWithManager(mgr, defaultCfg); err != nil { + if err := controller.NewOrgLogicalClusterReconciler(log, orgClient, initializerCfg, runtimeClient, mgr). + SetupWithManager(mgr, defaultCfg, predicates.LogicalClusterIsAccountTypeOrg()); err != nil { setupLog.Error(err, "unable to create controller", "controller", "LogicalCluster") os.Exit(1) } + kcpCfg, err := getKubeconfigFromPath(initializerCfg.KCP.Kubeconfig) + if err != nil { + log.Error().Err(err).Msg("unable to get KCP kubeconfig") + return err + } + + conn, err := grpc.NewClient(initializerCfg.FGA.Target, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + log.Error().Err(err).Msg("unable to create grpc client") + return err + } + fga := openfgav1.NewOpenFGAServiceClient(conn) + + mcc, err := mcclient.New(kcpCfg, client.Options{Scheme: scheme}) + if err != nil { + log.Error().Err(err).Msg("Failed to create multicluster client") + os.Exit(1) + } + if err := controller.NewAccountLogicalClusterReconciler(log, initializerCfg, fga, mcc, mgr). + SetupWithManager(mgr, defaultCfg, predicate.Not(predicates.LogicalClusterIsAccountTypeOrg())); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AccountLogicalCluster") + os.Exit(1) + } + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { setupLog.Error(err, "unable to set up health check") os.Exit(1) diff --git a/cmd/model_generator.go b/cmd/model_generator.go index cfb2a948..be0f456b 100644 --- a/cmd/model_generator.go +++ b/cmd/model_generator.go @@ -84,7 +84,7 @@ var modelGeneratorCmd = &cobra.Command{ return err } - if err := controller.NewAPIBindingReconciler(log, mgr). + if err := controller.NewAPIBindingReconciler(ctx, log, mgr). SetupWithManager(mgr, defaultCfg); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Resource") return err diff --git a/cmd/operator.go b/cmd/operator.go index 395a62c1..3a6bdf55 100644 --- a/cmd/operator.go +++ b/cmd/operator.go @@ -12,6 +12,7 @@ import ( "github.com/platform-mesh/golang-commons/logger" "github.com/platform-mesh/golang-commons/sentry" corev1alpha1 "github.com/platform-mesh/security-operator/api/v1alpha1" + iclient "github.com/platform-mesh/security-operator/internal/client" "github.com/platform-mesh/security-operator/internal/controller" "github.com/spf13/cobra" "google.golang.org/grpc" @@ -154,7 +155,7 @@ var operatorCmd = &cobra.Command{ fga := openfgav1.NewOpenFGAServiceClient(conn) - if err = controller.NewStoreReconciler(log, fga, mgr). + if err = controller.NewStoreReconciler(ctx, log, fga, mgr). SetupWithManager(mgr, defaultCfg); err != nil { log.Error().Err(err).Str("controller", "store").Msg("unable to create controller") return err @@ -199,7 +200,7 @@ var operatorCmd = &cobra.Command{ // this function can be removed after the operator has migrated the authz models in all environments func migrateAuthorizationModels(ctx context.Context, config *rest.Config, scheme *runtime.Scheme, getClusterClient NewLogicalClusterClientFunc) error { - allClient, err := controller.GetAllClient(config, scheme) + allClient, err := iclient.NewForAllPlatformMeshResources(ctx, config, scheme) if err != nil { return fmt.Errorf("failed to create all-cluster client: %w", err) } diff --git a/cmd/root.go b/cmd/root.go index d1f725e1..29e41b7c 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -32,6 +32,7 @@ var rootCmd = &cobra.Command{ func init() { rootCmd.AddCommand(initializerCmd) + rootCmd.AddCommand(terminatorCmd) rootCmd.AddCommand(operatorCmd) rootCmd.AddCommand(modelGeneratorCmd) rootCmd.AddCommand(initContainerCmd) diff --git a/cmd/terminator.go b/cmd/terminator.go new file mode 100644 index 00000000..58fe5f56 --- /dev/null +++ b/cmd/terminator.go @@ -0,0 +1,143 @@ +package cmd + +import ( + "crypto/tls" + "os" + "strings" + + openfgav1 "github.com/openfga/api/proto/openfga/v1" + platformeshconfig "github.com/platform-mesh/golang-commons/config" + iclient "github.com/platform-mesh/security-operator/internal/client" + "github.com/platform-mesh/security-operator/internal/terminatingworkspaces" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + "github.com/platform-mesh/security-operator/internal/config" + "github.com/platform-mesh/security-operator/internal/controller" + "github.com/platform-mesh/security-operator/internal/predicates" + "github.com/spf13/cobra" + "github.com/spf13/viper" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/predicate" + mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" + + "k8s.io/client-go/rest" + + "github.com/kcp-dev/logicalcluster/v3" + kcptenancyv1alphav1 "github.com/kcp-dev/sdk/apis/tenancy/v1alpha1" + + mcclient "github.com/kcp-dev/multicluster-provider/client" +) + +var terminatorCfg config.Config + +var terminatorCmd = &cobra.Command{ + Use: "terminator", + Short: "FGA terminator for account workspaces", + RunE: func(cmd *cobra.Command, args []string) error { + kcpCfg, err := getKubeconfigFromPath(terminatorCfg.KCP.Kubeconfig) + if err != nil { + log.Error().Err(err).Msg("unable to get KCP kubeconfig") + os.Exit(1) + } + + mgrOpts := ctrl.Options{ + Scheme: scheme, + LeaderElection: defaultCfg.LeaderElection.Enabled, + LeaderElectionID: "security-operator-terminator.platform-mesh.io", + HealthProbeBindAddress: defaultCfg.HealthProbeBindAddress, + Metrics: server.Options{ + BindAddress: defaultCfg.Metrics.BindAddress, + TLSOpts: []func(*tls.Config){ + func(c *tls.Config) { + log.Info().Msg("disabling http/2") + c.NextProtos = []string{"http/1.1"} + }, + }, + }, + } + if defaultCfg.LeaderElection.Enabled { + inClusterCfg, err := rest.InClusterConfig() + if err != nil { + log.Error().Err(err).Msg("unable to create in-cluster config") + return err + } + mgrOpts.LeaderElectionConfig = inClusterCfg + } + + mcc, err := mcclient.New(kcpCfg, client.Options{Scheme: scheme}) + if err != nil { + log.Error().Err(err).Msg("Failed to create multicluster client") + os.Exit(1) + } + rootClient, err := iclient.NewForLogicalCluster(kcpCfg, scheme, logicalcluster.Name("root")) + if err != nil { + log.Error().Err(err).Msgf("Failed to get root client") + os.Exit(1) + } + var wt kcptenancyv1alphav1.WorkspaceType + if err := rootClient.Get(cmd.Context(), client.ObjectKey{ + Name: terminatorCfg.WorkspaceTypeName, + }, &wt); err != nil { + log.Error().Err(err).Msgf("Failed to get WorkspaceType %s", terminatorCfg.WorkspaceTypeName) + os.Exit(1) + } + if len(wt.Status.VirtualWorkspaces) == 0 { + log.Error().Err(err).Msgf("No VirtualWorkspaces found in WorkspaceType %s", terminatorCfg.WorkspaceTypeName) + os.Exit(1) + } + + provider, err := terminatingworkspaces.New(kcpCfg, terminatorCfg.WorkspaceTypeName, + terminatingworkspaces.Options{ + Scheme: mgrOpts.Scheme, + }, + ) + + mgr, err := mcmanager.New(kcpCfg, provider, mgrOpts) + if err != nil { + log.Error().Err(err).Msg("Failed to create manager") + os.Exit(1) + } + + conn, err := grpc.NewClient(terminatorCfg.FGA.Target, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + log.Error().Err(err).Msg("unable to create grpc client") + os.Exit(1) + } + fga := openfgav1.NewOpenFGAServiceClient(conn) + + if err := controller.NewAccountLogicalClusterReconciler(log, terminatorCfg, fga, mcc, mgr). + SetupWithManager(mgr, defaultCfg, predicate.Not(predicates.LogicalClusterIsAccountTypeOrg())); err != nil { + log.Error().Err(err).Msg("Unable to create AccountLogicalClusterTerminator") + os.Exit(1) + } + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + log.Error().Err(err).Msg("unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + log.Error().Err(err).Msg("unable to set up ready check") + os.Exit(1) + } + + setupLog.Info("starting manager") + + return mgr.Start(ctrl.SetupSignalHandler()) + }, +} + +func init() { + rootCmd.AddCommand(terminatorCmd) + + terminatorV := viper.NewWithOptions( + viper.EnvKeyReplacer(strings.NewReplacer("-", "_")), + ) + terminatorV.AutomaticEnv() + if err := platformeshconfig.BindConfigToFlags(terminatorV, terminatorCmd, &terminatorCfg); err != nil { + panic(err) + } +} diff --git a/go.mod b/go.mod index a0c4fff3..1f7e331e 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,13 @@ module github.com/platform-mesh/security-operator -go 1.25.0 +go 1.25.7 + +replace ( + k8s.io/api => k8s.io/api v0.34.0 + k8s.io/apiserver => k8s.io/apiserver v0.34.0 + k8s.io/client-go => k8s.io/client-go v0.34.0 + k8s.io/component-base => k8s.io/component-base v0.34.0 +) require ( github.com/coreos/go-oidc v2.5.0+incompatible @@ -9,7 +16,7 @@ require ( github.com/go-logr/logr v1.4.3 github.com/google/gnostic-models v0.7.1 github.com/kcp-dev/logicalcluster/v3 v3.0.5 - github.com/kcp-dev/multicluster-provider v0.4.0 + github.com/kcp-dev/multicluster-provider v0.5.0 github.com/kcp-dev/sdk v0.30.0 github.com/openfga/api/proto v0.0.0-20260217232149-f917ddb000ce github.com/openfga/language/pkg/go v0.2.0-beta.2.0.20251027165255-0f8f255e5f6c @@ -17,8 +24,10 @@ require ( github.com/platform-mesh/golang-commons v0.9.40 github.com/rs/zerolog v1.34.0 github.com/spf13/cobra v1.10.2 + github.com/spf13/pflag v1.0.10 github.com/spf13/viper v1.21.0 github.com/stretchr/testify v1.11.1 + go.uber.org/zap v1.27.1 golang.org/x/oauth2 v0.35.0 google.golang.org/grpc v1.79.1 google.golang.org/protobuf v1.36.11 @@ -27,8 +36,8 @@ require ( k8s.io/apimachinery v0.35.1 k8s.io/client-go v0.35.1 k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2 - sigs.k8s.io/controller-runtime v0.22.4 - sigs.k8s.io/multicluster-runtime v0.22.4-beta.1 + sigs.k8s.io/controller-runtime v0.23.1 + sigs.k8s.io/multicluster-runtime v0.23.1 sigs.k8s.io/yaml v1.6.0 ) @@ -50,11 +59,13 @@ require ( github.com/getsentry/sentry-go v0.42.0 // indirect github.com/go-jose/go-jose/v4 v4.1.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect github.com/go-logr/zerologr v1.2.3 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect github.com/google/btree v1.1.3 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/uuid v1.6.0 // indirect @@ -87,7 +98,6 @@ require ( github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect github.com/spf13/afero v1.15.0 // indirect github.com/spf13/cast v1.10.0 // indirect - github.com/spf13/pflag v1.0.10 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/vektah/gqlparser/v2 v2.5.31 // indirect @@ -101,6 +111,7 @@ require ( go.opentelemetry.io/otel/sdk v1.40.0 // indirect go.opentelemetry.io/otel/trace v1.40.0 // indirect go.opentelemetry.io/proto/otlp v1.9.0 // indirect + go.uber.org/multierr v1.11.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/crypto v0.47.0 // indirect @@ -122,5 +133,5 @@ require ( k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 // indirect ) diff --git a/go.sum b/go.sum index 44ace198..ae42ebe2 100644 --- a/go.sum +++ b/go.sum @@ -76,6 +76,8 @@ github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZ github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63YCY= github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= @@ -114,10 +116,12 @@ github.com/kcp-dev/apimachinery/v2 v2.30.0 h1:bj7lVVPJj5UnQFCWhXVAKC+eNaIMKGGxpq github.com/kcp-dev/apimachinery/v2 v2.30.0/go.mod h1:DOv0iw5tcgzFBhudwLFe2WHCLqtlgNkuO4AcqbZ4zVo= github.com/kcp-dev/logicalcluster/v3 v3.0.5 h1:JbYakokb+5Uinz09oTXomSUJVQsqfxEvU4RyHUYxHOU= github.com/kcp-dev/logicalcluster/v3 v3.0.5/go.mod h1:EWBUBxdr49fUB1cLMO4nOdBWmYifLbP1LfoL20KkXYY= -github.com/kcp-dev/multicluster-provider v0.4.0 h1:Segd0b2bTkBaSfodq3IFUbaUAA28S8KIl71W9Bftn3Y= -github.com/kcp-dev/multicluster-provider v0.4.0/go.mod h1:4QGU39wyNztoYNatdWqbdOV6/R9ZzaIh4DdSj30dm9o= +github.com/kcp-dev/multicluster-provider v0.5.0 h1:G5YW2POVftsnxUfK2vo7anX5R1I3gVjjNbo/4i5ttbo= +github.com/kcp-dev/multicluster-provider v0.5.0/go.mod h1:eJohrSXqLmpjfTSFBbZMoq4Osr57UKg9ZokvhCPNmHc= github.com/kcp-dev/sdk v0.30.0 h1:BdDiKJ7SeVfzLIxueQwbADTrH7bfZ7b5ACYSrx6P93Y= github.com/kcp-dev/sdk v0.30.0/go.mod h1:H3PkpM33QqwPMgGOOw3dfqbQ8dF2gu4NeIsufSlS5KE= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -217,6 +221,8 @@ github.com/vektah/gqlparser/v2 v2.5.31 h1:YhWGA1mfTjID7qJhd1+Vxhpk5HTgydrGU9IgkW github.com/vektah/gqlparser/v2 v2.5.31/go.mod h1:c1I28gSOVNzlfc4WuDlqU7voQnsqI6OG2amkBAFmgts= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= @@ -247,18 +253,33 @@ go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8= golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A= golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a h1:ovFr6Z0MNmU7nH8VaX5xqw+05ST2uO1exVfZPVqRC5o= golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= golang.org/x/oauth2 v0.35.0 h1:Mv2mzuHuZuY2+bkyWXIHMfhNdJAdwW3FuWeCPYN5GVQ= golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -266,12 +287,22 @@ golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY= golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k= golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= @@ -296,33 +327,33 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.35.1 h1:0PO/1FhlK/EQNVK5+txc4FuhQibV25VLSdLMmGpDE/Q= -k8s.io/api v0.35.1/go.mod h1:28uR9xlXWml9eT0uaGo6y71xK86JBELShLy4wR1XtxM= +k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE= +k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug= k8s.io/apiextensions-apiserver v0.35.1 h1:p5vvALkknlOcAqARwjS20kJffgzHqwyQRM8vHLwgU7w= k8s.io/apiextensions-apiserver v0.35.1/go.mod h1:2CN4fe1GZ3HMe4wBr25qXyJnJyZaquy4nNlNmb3R7AQ= k8s.io/apimachinery v0.35.1 h1:yxO6gV555P1YV0SANtnTjXYfiivaTPvCTKX6w6qdDsU= k8s.io/apimachinery v0.35.1/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= -k8s.io/apiserver v0.35.1 h1:potxdhhTL4i6AYAa2QCwtlhtB1eCdWQFvJV6fXgJzxs= -k8s.io/apiserver v0.35.1/go.mod h1:BiL6Dd3A2I/0lBnteXfWmCFobHM39vt5+hJQd7Lbpi4= -k8s.io/client-go v0.35.1 h1:+eSfZHwuo/I19PaSxqumjqZ9l5XiTEKbIaJ+j1wLcLM= -k8s.io/client-go v0.35.1/go.mod h1:1p1KxDt3a0ruRfc/pG4qT/3oHmUj1AhSHEcxNSGg+OA= -k8s.io/component-base v0.35.1 h1:XgvpRf4srp037QWfGBLFsYMUQJkE5yMa94UsJU7pmcE= -k8s.io/component-base v0.35.1/go.mod h1:HI/6jXlwkiOL5zL9bqA3en1Ygv60F03oEpnuU1G56Bs= +k8s.io/apiserver v0.34.0 h1:Z51fw1iGMqN7uJ1kEaynf2Aec1Y774PqU+FVWCFV3Jg= +k8s.io/apiserver v0.34.0/go.mod h1:52ti5YhxAvewmmpVRqlASvaqxt0gKJxvCeW7ZrwgazQ= +k8s.io/client-go v0.34.0 h1:YoWv5r7bsBfb0Hs2jh8SOvFbKzzxyNo0nSb0zC19KZo= +k8s.io/client-go v0.34.0/go.mod h1:ozgMnEKXkRjeMvBZdV1AijMHLTh3pbACPvK7zFR+QQY= +k8s.io/component-base v0.34.0 h1:bS8Ua3zlJzapklsB1dZgjEJuJEeHjj8yTu1gxE2zQX8= +k8s.io/component-base v0.34.0/go.mod h1:RSCqUdvIjjrEm81epPcjQ/DS+49fADvGSCkIP3IC6vg= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2 h1:AZYQSJemyQB5eRxqcPky+/7EdBj0xi3g0ZcxxJ7vbWU= k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk= -sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= -sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= +sigs.k8s.io/controller-runtime v0.23.1 h1:TjJSM80Nf43Mg21+RCy3J70aj/W6KyvDtOlpKf+PupE= +sigs.k8s.io/controller-runtime v0.23.1/go.mod h1:B6COOxKptp+YaUT5q4l6LqUJTRpizbgf9KSRNdQGns0= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= -sigs.k8s.io/multicluster-runtime v0.22.4-beta.1 h1:0XWbDINepM9UOyLkqhG4g7BtGBFKCDvZFyPsw1vufKE= -sigs.k8s.io/multicluster-runtime v0.22.4-beta.1/go.mod h1:zSMb4mC8MAZK42l8eE1ywkeX6vjuNRenYzJ1w+GPdfI= +sigs.k8s.io/multicluster-runtime v0.23.1 h1:isjVh6zBuk/U1HjYm22knRZmFsn6sFinmyvV+/4puCc= +sigs.k8s.io/multicluster-runtime v0.23.1/go.mod h1:ri1Gvx7Qehy5nis6OnTgSpJIWaf2SuorHDwF/jvbWvM= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 h1:2WOzJpHUBVrrkDjU4KBT8n5LDcj824eX0I5UKcgeRUs= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/internal/client/all_platformmesh.go b/internal/client/all_platformmesh.go new file mode 100644 index 00000000..af67d066 --- /dev/null +++ b/internal/client/all_platformmesh.go @@ -0,0 +1,52 @@ +package client + +import ( + "context" + "fmt" + "net/url" + + "sigs.k8s.io/controller-runtime/pkg/client" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + + "github.com/kcp-dev/logicalcluster/v3" + kcpapisv1alpha1 "github.com/kcp-dev/sdk/apis/apis/v1alpha1" +) + +const ( + corePlatformMeshIOAPIExportEndpointSlice = "core.platform-mesh.io" + platformMeshSystemWorkspace = "root:platform-mesh-system" +) + +// NewForAllPlatformMeshResources returns a client that can query all resources +// of the core.platform-mesh.io APIExportEndpoint slice, based on a given KCP +// base config. +func NewForAllPlatformMeshResources(ctx context.Context, config *rest.Config, scheme *runtime.Scheme) (client.Client, error) { + platformMeshClient, err := NewForLogicalCluster(config, scheme, logicalcluster.Name(platformMeshSystemWorkspace)) + if err != nil { + return nil, fmt.Errorf("creating %s client: %w", platformMeshSystemWorkspace, err) + } + + var apiExportEndpointSlice kcpapisv1alpha1.APIExportEndpointSlice + if err := platformMeshClient.Get(ctx, types.NamespacedName{Name: corePlatformMeshIOAPIExportEndpointSlice}, &apiExportEndpointSlice); err != nil { + return nil, fmt.Errorf("getting %s APIExportEndpointSlice: %w", corePlatformMeshIOAPIExportEndpointSlice, err) + } + + if len(apiExportEndpointSlice.Status.APIExportEndpoints) == 0 { + return nil, fmt.Errorf("no endpoints found in %s APIExportEndpointSlice", corePlatformMeshIOAPIExportEndpointSlice) + } + + virtualWorkspaceUrl, err := url.Parse(apiExportEndpointSlice.Status.APIExportEndpoints[0].URL) + if err != nil { + return nil, fmt.Errorf("parsing virtual workspace URL: %w", err) + } + + path, err := url.JoinPath(virtualWorkspaceUrl.Path, "clusters", logicalcluster.Wildcard.String()) + if err != nil { + return nil, fmt.Errorf("joining path: %w", err) + } + + return clientForPath(config, scheme, path) +} diff --git a/internal/client/logicalcluster.go b/internal/client/logicalcluster.go new file mode 100644 index 00000000..72c951f1 --- /dev/null +++ b/internal/client/logicalcluster.go @@ -0,0 +1,37 @@ +package client + +import ( + "fmt" + "net/url" + + "sigs.k8s.io/controller-runtime/pkg/client" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + + "github.com/kcp-dev/logicalcluster/v3" +) + +// NewForLogicalCluster returns a client for a given logical cluster name or +// path, based on a KCP base config. +func NewForLogicalCluster(config *rest.Config, scheme *runtime.Scheme, clusterKey logicalcluster.Name) (client.Client, error) { + path := fmt.Sprintf("/clusters/%s", clusterKey) + + return clientForPath(config, scheme, path) +} + +// clientForPath returns a client for a give raw URL path. +func clientForPath(config *rest.Config, scheme *runtime.Scheme, path string) (client.Client, error) { + copy := rest.CopyConfig(config) + + parsed, err := url.Parse(copy.Host) + if err != nil { + return nil, fmt.Errorf("parsing host from config: %w", err) + } + parsed.Path = path + copy.Host = parsed.String() + + return client.New(copy, client.Options{ + Scheme: scheme, + }) +} diff --git a/internal/config/config.go b/internal/config/config.go index 2afc8af3..e4e23658 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -16,7 +16,10 @@ type InitializerConfig struct { // Config struct to hold the app config type Config struct { FGA struct { - Target string `mapstructure:"fga-target"` + Target string `mapstructure:"fga-target"` + ObjectType string `mapstructure:"fga-object-type" default:"core_platform-mesh_io_account"` + ParentRelation string `mapstructure:"fga-parent-relation" default:"parent"` + CreatorRelation string `mapstructure:"fga-creator-relation" default:"owner"` } `mapstructure:",squash"` KCP struct { Kubeconfig string `mapstructure:"kcp-kubeconfig" default:"/api-kubeconfig/kubeconfig"` @@ -61,3 +64,7 @@ type Config struct { func (config Config) InitializerName() string { return config.WorkspacePath + ":" + config.WorkspaceTypeName } + +func (config Config) TerminatorName() string { + return config.WorkspacePath + ":" + config.WorkspaceTypeName +} diff --git a/internal/controller/accountlogicalcluster_controller.go b/internal/controller/accountlogicalcluster_controller.go new file mode 100644 index 00000000..f9641695 --- /dev/null +++ b/internal/controller/accountlogicalcluster_controller.go @@ -0,0 +1,52 @@ +package controller + +import ( + "context" + + openfgav1 "github.com/openfga/api/proto/openfga/v1" + platformeshconfig "github.com/platform-mesh/golang-commons/config" + "github.com/platform-mesh/golang-commons/controller/lifecycle/builder" + "github.com/platform-mesh/golang-commons/controller/lifecycle/multicluster" + lifecyclesubroutine "github.com/platform-mesh/golang-commons/controller/lifecycle/subroutine" + "github.com/platform-mesh/golang-commons/logger" + "github.com/platform-mesh/security-operator/internal/config" + "github.com/platform-mesh/security-operator/internal/subroutine" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/predicate" + mccontext "sigs.k8s.io/multicluster-runtime/pkg/context" + mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" + mcreconcile "sigs.k8s.io/multicluster-runtime/pkg/reconcile" + + mcclient "github.com/kcp-dev/multicluster-provider/client" + kcpcorev1alpha1 "github.com/kcp-dev/sdk/apis/core/v1alpha1" +) + +// AccountLogicalClusterReconciler acts as an initializer for account workspaces. +type AccountLogicalClusterReconciler struct { + log *logger.Logger + + mclifecycle *multicluster.LifecycleManager +} + +func NewAccountLogicalClusterReconciler(log *logger.Logger, cfg config.Config, fga openfgav1.OpenFGAServiceClient, mcc mcclient.ClusterClient, mgr mcmanager.Manager) *AccountLogicalClusterReconciler { + return &AccountLogicalClusterReconciler{ + log: log, + mclifecycle: builder.NewBuilder("security", "AccountLogicalClusterReconciler", []lifecyclesubroutine.Subroutine{ + subroutine.NewAccountTuplesSubroutine(mcc, mgr, fga, cfg.FGA.CreatorRelation, cfg.FGA.ParentRelation, cfg.FGA.ObjectType), + }, log). + WithReadOnly(). + WithStaticThenExponentialRateLimiter(). + WithInitializer(cfg.InitializerName()). + WithTerminator(cfg.TerminatorName()). + BuildMultiCluster(mgr), + } +} + +func (r *AccountLogicalClusterReconciler) Reconcile(ctx context.Context, req mcreconcile.Request) (ctrl.Result, error) { + ctxWithCluster := mccontext.WithCluster(ctx, req.ClusterName) + return r.mclifecycle.Reconcile(ctxWithCluster, req, &kcpcorev1alpha1.LogicalCluster{}) +} + +func (r *AccountLogicalClusterReconciler) SetupWithManager(mgr mcmanager.Manager, cfg *platformeshconfig.CommonServiceConfig, evp ...predicate.Predicate) error { + return r.mclifecycle.SetupWithManager(mgr, cfg.MaxConcurrentReconciles, "AccountLogicalCluster", &kcpcorev1alpha1.LogicalCluster{}, cfg.DebugLabelValue, r, r.log, evp...) +} diff --git a/internal/controller/apibinding_controller.go b/internal/controller/apibinding_controller.go index bd10e729..7bfdb353 100644 --- a/internal/controller/apibinding_controller.go +++ b/internal/controller/apibinding_controller.go @@ -9,6 +9,7 @@ import ( "github.com/platform-mesh/golang-commons/controller/lifecycle/multicluster" lifecyclesubroutine "github.com/platform-mesh/golang-commons/controller/lifecycle/subroutine" "github.com/platform-mesh/golang-commons/logger" + iclient "github.com/platform-mesh/security-operator/internal/client" "github.com/platform-mesh/security-operator/internal/subroutine" "github.com/rs/zerolog/log" ctrl "sigs.k8s.io/controller-runtime" @@ -76,8 +77,8 @@ func GetAllClient(config *rest.Config, schema *runtime.Scheme) (client.Client, e return allClient, nil } -func NewAPIBindingReconciler(logger *logger.Logger, mcMgr mcmanager.Manager) *APIBindingReconciler { - allclient, err := GetAllClient(mcMgr.GetLocalManager().GetConfig(), mcMgr.GetLocalManager().GetScheme()) +func NewAPIBindingReconciler(ctx context.Context, logger *logger.Logger, mcMgr mcmanager.Manager) *APIBindingReconciler { + allclient, err := iclient.NewForAllPlatformMeshResources(ctx, mcMgr.GetLocalManager().GetConfig(), mcMgr.GetLocalManager().GetScheme()) if err != nil { log.Fatal().Err(err).Msg("unable to create new client") } diff --git a/internal/controller/initializer_controller.go b/internal/controller/orglogicalcluster_controller.go similarity index 71% rename from internal/controller/initializer_controller.go rename to internal/controller/orglogicalcluster_controller.go index e907a791..6840f8c6 100644 --- a/internal/controller/initializer_controller.go +++ b/internal/controller/orglogicalcluster_controller.go @@ -20,17 +20,17 @@ import ( kcpcorev1alpha1 "github.com/kcp-dev/sdk/apis/core/v1alpha1" ) -type LogicalClusterReconciler struct { +type OrgLogicalClusterReconciler struct { log *logger.Logger mclifecycle *multicluster.LifecycleManager } -func NewLogicalClusterReconciler(log *logger.Logger, orgClient client.Client, cfg config.Config, inClusterClient client.Client, mgr mcmanager.Manager) *LogicalClusterReconciler { +func NewOrgLogicalClusterReconciler(log *logger.Logger, orgClient client.Client, cfg config.Config, inClusterClient client.Client, mgr mcmanager.Manager) *OrgLogicalClusterReconciler { var subroutines []lifecyclesubroutine.Subroutine if cfg.Initializer.WorkspaceInitializerEnabled { - subroutines = append(subroutines, subroutine.NewWorkspaceInitializer(orgClient, cfg, mgr)) + subroutines = append(subroutines, subroutine.NewWorkspaceInitializer(orgClient, cfg, mgr, cfg.FGA.CreatorRelation, cfg.FGA.ObjectType)) } if cfg.Initializer.IDPEnabled { subroutines = append(subroutines, subroutine.NewIDPSubroutine(orgClient, mgr, cfg)) @@ -41,23 +41,22 @@ func NewLogicalClusterReconciler(log *logger.Logger, orgClient client.Client, cf if cfg.Initializer.WorkspaceAuthEnabled { subroutines = append(subroutines, subroutine.NewWorkspaceAuthConfigurationSubroutine(orgClient, inClusterClient, mgr, cfg)) } - // RemoveInitializer is always included - it's the final cleanup step - subroutines = append(subroutines, subroutine.NewRemoveInitializer(mgr, cfg)) - return &LogicalClusterReconciler{ + return &OrgLogicalClusterReconciler{ log: log, - mclifecycle: builder.NewBuilder("logicalcluster", "LogicalClusterReconciler", subroutines, log). + mclifecycle: builder.NewBuilder("logicalcluster", "OrgLogicalClusterReconciler", subroutines, log). WithReadOnly(). WithStaticThenExponentialRateLimiter(). + WithInitializer(cfg.InitializerName()). BuildMultiCluster(mgr), } } -func (r *LogicalClusterReconciler) Reconcile(ctx context.Context, req mcreconcile.Request) (ctrl.Result, error) { +func (r *OrgLogicalClusterReconciler) Reconcile(ctx context.Context, req mcreconcile.Request) (ctrl.Result, error) { ctxWithCluster := mccontext.WithCluster(ctx, req.ClusterName) return r.mclifecycle.Reconcile(ctxWithCluster, req, &kcpcorev1alpha1.LogicalCluster{}) } -func (r *LogicalClusterReconciler) SetupWithManager(mgr mcmanager.Manager, cfg *platformeshconfig.CommonServiceConfig, evp ...predicate.Predicate) error { +func (r *OrgLogicalClusterReconciler) SetupWithManager(mgr mcmanager.Manager, cfg *platformeshconfig.CommonServiceConfig, evp ...predicate.Predicate) error { return r.mclifecycle.SetupWithManager(mgr, cfg.MaxConcurrentReconciles, "LogicalCluster", &kcpcorev1alpha1.LogicalCluster{}, cfg.DebugLabelValue, r, r.log, evp...) } diff --git a/internal/controller/store_controller.go b/internal/controller/store_controller.go index 912ff60c..f0daa877 100644 --- a/internal/controller/store_controller.go +++ b/internal/controller/store_controller.go @@ -10,6 +10,7 @@ import ( lifecyclesubroutine "github.com/platform-mesh/golang-commons/controller/lifecycle/subroutine" "github.com/platform-mesh/golang-commons/logger" corev1alpha1 "github.com/platform-mesh/security-operator/api/v1alpha1" + iclient "github.com/platform-mesh/security-operator/internal/client" "github.com/platform-mesh/security-operator/internal/subroutine" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -35,8 +36,8 @@ type StoreReconciler struct { mclifecycle *multicluster.LifecycleManager } -func NewStoreReconciler(log *logger.Logger, fga openfgav1.OpenFGAServiceClient, mcMgr mcmanager.Manager) *StoreReconciler { - allClient, err := GetAllClient(mcMgr.GetLocalManager().GetConfig(), mcMgr.GetLocalManager().GetScheme()) +func NewStoreReconciler(ctx context.Context, log *logger.Logger, fga openfgav1.OpenFGAServiceClient, mcMgr mcmanager.Manager) *StoreReconciler { + allClient, err := iclient.NewForAllPlatformMeshResources(ctx, mcMgr.GetLocalManager().GetConfig(), mcMgr.GetLocalManager().GetScheme()) if err != nil { log.Fatal().Err(err).Msg("unable to create new client") } diff --git a/internal/predicates/accounttype.go b/internal/predicates/accounttype.go new file mode 100644 index 00000000..a162d91c --- /dev/null +++ b/internal/predicates/accounttype.go @@ -0,0 +1,26 @@ +package predicates + +import ( + "strings" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + kcpcorev1alpha1 "github.com/kcp-dev/sdk/apis/core/v1alpha1" +) + +const kcpPathAnnotation = "kcp.io/path" + +// LogicalClusterIsAccountTypeOrg returns a predicate that filters for +// LogicalClusters belonging to an Account of type "org", i.e. is a child of the +// "root:orgs" cluster. +func LogicalClusterIsAccountTypeOrg() predicate.Predicate { + return predicate.NewPredicateFuncs(func(object client.Object) bool { + lc := object.(*kcpcorev1alpha1.LogicalCluster) + p := lc.Annotations[kcpPathAnnotation] + + parts := strings.Split(p, ":") + + return len(parts) == 3 && parts[0] == "root" && parts[1] == "orgs" + }) +} diff --git a/internal/subroutine/account_tuples.go b/internal/subroutine/account_tuples.go new file mode 100644 index 00000000..5bf2c95d --- /dev/null +++ b/internal/subroutine/account_tuples.go @@ -0,0 +1,191 @@ +package subroutine + +import ( + "context" + "fmt" + + openfgav1 "github.com/openfga/api/proto/openfga/v1" + accountsv1alpha1 "github.com/platform-mesh/account-operator/api/v1alpha1" + "github.com/platform-mesh/golang-commons/controller/lifecycle/runtimeobject" + lifecyclesubroutine "github.com/platform-mesh/golang-commons/controller/lifecycle/subroutine" + "github.com/platform-mesh/golang-commons/errors" + "github.com/platform-mesh/golang-commons/logger" + iclient "github.com/platform-mesh/security-operator/internal/client" + "github.com/platform-mesh/security-operator/pkg/fga" + kerrors "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + mccontext "sigs.k8s.io/multicluster-runtime/pkg/context" + mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" + + "github.com/kcp-dev/logicalcluster/v3" + mcclient "github.com/kcp-dev/multicluster-provider/client" + kcpcore "github.com/kcp-dev/sdk/apis/core" + kcpcorev1alpha1 "github.com/kcp-dev/sdk/apis/core/v1alpha1" +) + +const accountTuplesTerminatorFinalizer = "core.platform-mesh.io/account-tuples-terminator" + +// AccountTuplesSubroutine creates FGA tuples for Accounts not of the +// "org"-type when initializing, and deletes them when terminating. +type AccountTuplesSubroutine struct { + mgr mcmanager.Manager + mcc mcclient.ClusterClient + fga openfgav1.OpenFGAServiceClient + + objectType string + parentRelation string + creatorRelation string +} + +// Process implements lifecycle.Subroutine as no-op since Initialize handles the +// work when not in deletion. +func (s *AccountTuplesSubroutine) Process(ctx context.Context, instance runtimeobject.RuntimeObject) (ctrl.Result, errors.OperatorError) { + return ctrl.Result{}, nil +} + +// Initialize implements lifecycle.Initializer. +func (s *AccountTuplesSubroutine) Initialize(ctx context.Context, instance runtimeobject.RuntimeObject) (ctrl.Result, errors.OperatorError) { + lc := instance.(*kcpcorev1alpha1.LogicalCluster) + acc, ai, opErr := AccountAndInfoForLogicalCluster(ctx, s.mgr, lc) + if opErr != nil { + return ctrl.Result{}, opErr + } + + if updated := controllerutil.AddFinalizer(&ai, accountTuplesTerminatorFinalizer); updated { + lcID, ok := mccontext.ClusterFrom(ctx) + if !ok { + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("cluster name not found in context"), true, true) + } + + lcClient, err := iclient.NewForLogicalCluster(s.mgr.GetLocalManager().GetConfig(), s.mgr.GetLocalManager().GetScheme(), logicalcluster.Name(lcID)) + if err != nil { + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("getting client: %w", err), true, true) + } + + if err := lcClient.Update(ctx, &ai); err != nil { + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("updating AccountInfo to set finalizer: %w", err), true, true) + } + } + + // Ensure the necessary tuples in OpenFGA. + tuples, err := fga.TuplesForAccount(acc, ai, s.creatorRelation, s.parentRelation, s.objectType) + if err != nil { + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("building tuples for account: %w", err), true, true) + } + if err := fga.NewTupleManager(s.fga, ai.Spec.FGA.Store.Id, fga.AuthorizationModelIDLatest, logger.LoadLoggerFromContext(ctx)).Apply(ctx, tuples); err != nil { + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("applying tuples for Account: %w", err), true, true) + } + + return ctrl.Result{}, nil +} + +// Terminate implements lifecycle.Terminator. +func (s *AccountTuplesSubroutine) Terminate(ctx context.Context, instance runtimeobject.RuntimeObject) (ctrl.Result, errors.OperatorError) { + lc := instance.(*kcpcorev1alpha1.LogicalCluster) + acc, ai, opErr := AccountAndInfoForLogicalCluster(ctx, s.mgr, lc) + if opErr != nil { + return ctrl.Result{}, opErr + } + + // Delete the corresponding tuples in OpenFGA. + tuples, err := fga.TuplesForAccount(acc, ai, s.creatorRelation, s.parentRelation, s.objectType) + if err != nil { + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("building tuples for account: %w", err), true, true) + } + if err := fga.NewTupleManager(s.fga, ai.Spec.FGA.Store.Id, fga.AuthorizationModelIDLatest, logger.LoadLoggerFromContext(ctx)).Delete(ctx, tuples); err != nil { + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("deleting tuples for Account: %w", err), true, true) + } + + if updated := controllerutil.RemoveFinalizer(&ai, accountTuplesTerminatorFinalizer); updated { + lcID, ok := mccontext.ClusterFrom(ctx) + if !ok { + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("cluster name not found in context"), true, true) + } + + lcClient, err := iclient.NewForLogicalCluster(s.mgr.GetLocalManager().GetConfig(), s.mgr.GetLocalManager().GetScheme(), logicalcluster.Name(lcID)) + if err != nil { + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("getting client: %w", err), true, true) + } + + if err := lcClient.Update(ctx, &ai); err != nil { + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("updating AccountInfo to remove finalizer: %w", err), true, true) + } + } + + return ctrl.Result{}, nil +} + +// Finalize implements lifecycle.Subroutine. +func (s *AccountTuplesSubroutine) Finalize(ctx context.Context, instance runtimeobject.RuntimeObject) (ctrl.Result, errors.OperatorError) { + return ctrl.Result{}, nil +} + +// Finalizers implements lifecycle.Subroutine. +func (s *AccountTuplesSubroutine) Finalizers(_ runtimeobject.RuntimeObject) []string { + return []string{} +} + +// GetName implements lifecycle.Subroutine. +func (s *AccountTuplesSubroutine) GetName() string { return "AccountTuplesSubroutine" } + +func NewAccountTuplesSubroutine(mcc mcclient.ClusterClient, mgr mcmanager.Manager, fga openfgav1.OpenFGAServiceClient, creatorRelation, parentRelation, objectType string) *AccountTuplesSubroutine { + return &AccountTuplesSubroutine{ + mgr: mgr, + mcc: mcc, + fga: fga, + creatorRelation: creatorRelation, + parentRelation: parentRelation, + objectType: objectType, + } +} + +var ( + _ lifecyclesubroutine.Subroutine = &AccountTuplesSubroutine{} + _ lifecyclesubroutine.Initializer = &AccountTuplesSubroutine{} + _ lifecyclesubroutine.Terminator = &AccountTuplesSubroutine{} +) + +// AccountAndInfoForLogicalCluster fetches the AccountInfo from the +// LogicalCluster and the corresponding Account from the parent account's +// workspace. +func AccountAndInfoForLogicalCluster(ctx context.Context, mgr mcmanager.Manager, lc *kcpcorev1alpha1.LogicalCluster) (accountsv1alpha1.Account, accountsv1alpha1.AccountInfo, errors.OperatorError) { + if lc.Annotations[kcpcore.LogicalClusterPathAnnotationKey] == "" { + return accountsv1alpha1.Account{}, accountsv1alpha1.AccountInfo{}, errors.NewOperatorError(fmt.Errorf("annotation on LogicalCluster is not set"), true, true) + } + lcID, ok := mccontext.ClusterFrom(ctx) + if !ok { + return accountsv1alpha1.Account{}, accountsv1alpha1.AccountInfo{}, errors.NewOperatorError(fmt.Errorf("cluster name not found in context"), true, true) + } + + // The AccountInfo in the logical cluster belongs to the Account the + // Workspace was created for + lcClient, err := iclient.NewForLogicalCluster(mgr.GetLocalManager().GetConfig(), mgr.GetLocalManager().GetScheme(), logicalcluster.Name(lcID)) + if err != nil { + return accountsv1alpha1.Account{}, accountsv1alpha1.AccountInfo{}, errors.NewOperatorError(fmt.Errorf("getting client: %w", err), true, true) + } + var ai accountsv1alpha1.AccountInfo + if err := lcClient.Get(ctx, client.ObjectKey{ + Name: "account", + }, &ai); err != nil && !kerrors.IsNotFound(err) { + return accountsv1alpha1.Account{}, accountsv1alpha1.AccountInfo{}, errors.NewOperatorError(fmt.Errorf("getting AccountInfo for LogicalCluster: %w", err), true, true) + } else if kerrors.IsNotFound(err) { + return accountsv1alpha1.Account{}, accountsv1alpha1.AccountInfo{}, errors.NewOperatorError(fmt.Errorf("AccountInfo not found"), true, true) + } + + // The actual Account resource belonging to the Workspace needs to be + // fetched from the parent Account's Workspace + parentAccountClient, err := iclient.NewForLogicalCluster(mgr.GetLocalManager().GetConfig(), mgr.GetLocalManager().GetScheme(), logicalcluster.Name(ai.Spec.ParentAccount.Path)) + if err != nil { + return accountsv1alpha1.Account{}, accountsv1alpha1.AccountInfo{}, errors.NewOperatorError(fmt.Errorf("getting parent account cluster client: %w", err), true, true) + } + var acc accountsv1alpha1.Account + if err := parentAccountClient.Get(ctx, client.ObjectKey{ + Name: ai.Spec.Account.Name, + }, &acc); err != nil { + return accountsv1alpha1.Account{}, accountsv1alpha1.AccountInfo{}, errors.NewOperatorError(fmt.Errorf("getting Account in parent account cluster: %w", err), true, true) + } + + return acc, ai, nil +} diff --git a/internal/subroutine/idp.go b/internal/subroutine/idp.go index e0312e0d..6ffae6fc 100644 --- a/internal/subroutine/idp.go +++ b/internal/subroutine/idp.go @@ -48,7 +48,10 @@ func NewIDPSubroutine(orgsClient client.Client, mgr mcmanager.Manager, cfg confi } } -var _ lifecyclesubroutine.Subroutine = &IDPSubroutine{} +var ( + _ lifecyclesubroutine.Subroutine = &IDPSubroutine{} + _ lifecyclesubroutine.Initializer = &IDPSubroutine{} +) type IDPSubroutine struct { orgsClient client.Client @@ -70,7 +73,14 @@ func (i *IDPSubroutine) Finalizers(_ runtimeobject.RuntimeObject) []string { func (i *IDPSubroutine) GetName() string { return "IDPSubroutine" } +// Process implements lifecycle.Subroutine as no-op since Initialize handles the +// work. func (i *IDPSubroutine) Process(ctx context.Context, instance runtimeobject.RuntimeObject) (ctrl.Result, errors.OperatorError) { + return ctrl.Result{}, nil +} + +// Initialize implements lifecycle.Initializer. +func (i *IDPSubroutine) Initialize(ctx context.Context, instance runtimeobject.RuntimeObject) (ctrl.Result, errors.OperatorError) { lc := instance.(*kcpcorev1alpha1.LogicalCluster) workspaceName := getWorkspaceName(lc) diff --git a/internal/subroutine/invite.go b/internal/subroutine/invite.go index 766c7f74..d4d6f1d5 100644 --- a/internal/subroutine/invite.go +++ b/internal/subroutine/invite.go @@ -31,7 +31,10 @@ func NewInviteSubroutine(orgsClient client.Client, mgr mcmanager.Manager) *invit } } -var _ lifecyclesubroutine.Subroutine = &inviteSubroutine{} +var ( + _ lifecyclesubroutine.Subroutine = &inviteSubroutine{} + _ lifecyclesubroutine.Initializer = &inviteSubroutine{} +) type inviteSubroutine struct { orgsClient client.Client @@ -48,7 +51,14 @@ func (w *inviteSubroutine) Finalizers(_ runtimeobject.RuntimeObject) []string { func (w *inviteSubroutine) GetName() string { return "InviteInitilizationSubroutine" } +// Process implements lifecycle.Subroutine as no-op since Initialize handles the +// work. func (w *inviteSubroutine) Process(ctx context.Context, instance runtimeobject.RuntimeObject) (ctrl.Result, errors.OperatorError) { + return ctrl.Result{}, nil +} + +// Initialize implements lifecycle.Initializer. +func (w *inviteSubroutine) Initialize(ctx context.Context, instance runtimeobject.RuntimeObject) (ctrl.Result, errors.OperatorError) { lc := instance.(*kcpcorev1alpha1.LogicalCluster) wsName := getWorkspaceName(lc) diff --git a/internal/subroutine/invite/subroutine.go b/internal/subroutine/invite/subroutine.go index c9f66f3a..543143f3 100644 --- a/internal/subroutine/invite/subroutine.go +++ b/internal/subroutine/invite/subroutine.go @@ -214,13 +214,12 @@ func (s *subroutine) Process(ctx context.Context, instance runtimeobject.Runtime res, err = s.keycloak.Post(fmt.Sprintf("%s/admin/realms/%s/users", s.keycloakBaseURL, realm), "application/json", &buffer) if err != nil { // coverage-ignore - log.Err(err).Msg("Failed to create user") - return ctrl.Result{}, errors.NewOperatorError(err, true, true) + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("posting to Keycloak to create user: %w", err), true, true) } defer res.Body.Close() //nolint:errcheck if res.StatusCode != http.StatusCreated { - return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("failed to create user: %s", res.Status), true, true) + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("keycloak returned non-201 status code: %s", res.Status), true, true) } res, err = s.keycloak.Get(fmt.Sprintf("%s/admin/realms/%s/users?%s", s.keycloakBaseURL, realm, v.Encode())) diff --git a/internal/subroutine/mocks/mock_CTRLManager.go b/internal/subroutine/mocks/mock_CTRLManager.go index 642ad2bc..fbc68059 100644 --- a/internal/subroutine/mocks/mock_CTRLManager.go +++ b/internal/subroutine/mocks/mock_CTRLManager.go @@ -10,17 +10,18 @@ import ( "github.com/go-logr/logr" mock "github.com/stretchr/testify/mock" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/events" + "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/config" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/webhook" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/webhook/conversion" ) // NewCTRLManager creates a new instance of CTRLManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. @@ -546,6 +547,105 @@ func (_c *CTRLManager_GetControllerOptions_Call) RunAndReturn(run func() config. return _c } +// GetConverterRegistry provides a mock function for the type CTRLManager +func (_mock *CTRLManager) GetConverterRegistry() conversion.Registry { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for GetConverterRegistry") + } + + var r0 conversion.Registry + if returnFunc, ok := ret.Get(0).(func() conversion.Registry); ok { + r0 = returnFunc() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(conversion.Registry) + } + } + return r0 +} + +// CTRLManager_GetConverterRegistry_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetConverterRegistry' +type CTRLManager_GetConverterRegistry_Call struct { + *mock.Call +} + +// GetConverterRegistry is a helper method to define mock.On call +func (_e *CTRLManager_Expecter) GetConverterRegistry() *CTRLManager_GetConverterRegistry_Call { + return &CTRLManager_GetConverterRegistry_Call{Call: _e.mock.On("GetConverterRegistry")} +} + +func (_c *CTRLManager_GetConverterRegistry_Call) Run(run func()) *CTRLManager_GetConverterRegistry_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *CTRLManager_GetConverterRegistry_Call) Return(registry conversion.Registry) *CTRLManager_GetConverterRegistry_Call { + _c.Call.Return(registry) + return _c +} + +func (_c *CTRLManager_GetConverterRegistry_Call) RunAndReturn(run func() conversion.Registry) *CTRLManager_GetConverterRegistry_Call { + _c.Call.Return(run) + return _c +} + +// GetEventRecorder provides a mock function for the type CTRLManager +func (_mock *CTRLManager) GetEventRecorder(name string) events.EventRecorder { + ret := _mock.Called(name) + + if len(ret) == 0 { + panic("no return value specified for GetEventRecorder") + } + + var r0 events.EventRecorder + if returnFunc, ok := ret.Get(0).(func(string) events.EventRecorder); ok { + r0 = returnFunc(name) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(events.EventRecorder) + } + } + return r0 +} + +// CTRLManager_GetEventRecorder_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEventRecorder' +type CTRLManager_GetEventRecorder_Call struct { + *mock.Call +} + +// GetEventRecorder is a helper method to define mock.On call +// - name string +func (_e *CTRLManager_Expecter) GetEventRecorder(name interface{}) *CTRLManager_GetEventRecorder_Call { + return &CTRLManager_GetEventRecorder_Call{Call: _e.mock.On("GetEventRecorder", name)} +} + +func (_c *CTRLManager_GetEventRecorder_Call) Run(run func(name string)) *CTRLManager_GetEventRecorder_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 string + if args[0] != nil { + arg0 = args[0].(string) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *CTRLManager_GetEventRecorder_Call) Return(v events.EventRecorder) *CTRLManager_GetEventRecorder_Call { + _c.Call.Return(v) + return _c +} + +func (_c *CTRLManager_GetEventRecorder_Call) RunAndReturn(run func(name string) events.EventRecorder) *CTRLManager_GetEventRecorder_Call { + _c.Call.Return(run) + return _c +} + // GetEventRecorderFor provides a mock function for the type CTRLManager func (_mock *CTRLManager) GetEventRecorderFor(name string) record.EventRecorder { ret := _mock.Called(name) diff --git a/internal/subroutine/mocks/mock_Client.go b/internal/subroutine/mocks/mock_Client.go index e17b5984..510476f5 100644 --- a/internal/subroutine/mocks/mock_Client.go +++ b/internal/subroutine/mocks/mock_Client.go @@ -8,11 +8,10 @@ import ( "context" mock "github.com/stretchr/testify/mock" - "sigs.k8s.io/controller-runtime/pkg/client" - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" ) // NewMockClient creates a new instance of MockClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. diff --git a/internal/subroutine/mocks/mock_Cluster.go b/internal/subroutine/mocks/mock_Cluster.go index ee0f1236..9a0fc522 100644 --- a/internal/subroutine/mocks/mock_Cluster.go +++ b/internal/subroutine/mocks/mock_Cluster.go @@ -9,13 +9,13 @@ import ( "net/http" mock "github.com/stretchr/testify/mock" - "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/client" - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/rest" + "k8s.io/client-go/tools/events" "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" ) // NewMockCluster creates a new instance of MockCluster. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. @@ -229,6 +229,59 @@ func (_c *MockCluster_GetConfig_Call) RunAndReturn(run func() *rest.Config) *Moc return _c } +// GetEventRecorder provides a mock function for the type MockCluster +func (_mock *MockCluster) GetEventRecorder(name string) events.EventRecorder { + ret := _mock.Called(name) + + if len(ret) == 0 { + panic("no return value specified for GetEventRecorder") + } + + var r0 events.EventRecorder + if returnFunc, ok := ret.Get(0).(func(string) events.EventRecorder); ok { + r0 = returnFunc(name) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(events.EventRecorder) + } + } + return r0 +} + +// MockCluster_GetEventRecorder_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEventRecorder' +type MockCluster_GetEventRecorder_Call struct { + *mock.Call +} + +// GetEventRecorder is a helper method to define mock.On call +// - name string +func (_e *MockCluster_Expecter) GetEventRecorder(name interface{}) *MockCluster_GetEventRecorder_Call { + return &MockCluster_GetEventRecorder_Call{Call: _e.mock.On("GetEventRecorder", name)} +} + +func (_c *MockCluster_GetEventRecorder_Call) Run(run func(name string)) *MockCluster_GetEventRecorder_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 string + if args[0] != nil { + arg0 = args[0].(string) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *MockCluster_GetEventRecorder_Call) Return(v events.EventRecorder) *MockCluster_GetEventRecorder_Call { + _c.Call.Return(v) + return _c +} + +func (_c *MockCluster_GetEventRecorder_Call) RunAndReturn(run func(name string) events.EventRecorder) *MockCluster_GetEventRecorder_Call { + _c.Call.Return(run) + return _c +} + // GetEventRecorderFor provides a mock function for the type MockCluster func (_mock *MockCluster) GetEventRecorderFor(name string) record.EventRecorder { ret := _mock.Called(name) diff --git a/internal/subroutine/mocks/mock_DiscoveryInterface.go b/internal/subroutine/mocks/mock_DiscoveryInterface.go index dd626a52..371e5925 100644 --- a/internal/subroutine/mocks/mock_DiscoveryInterface.go +++ b/internal/subroutine/mocks/mock_DiscoveryInterface.go @@ -7,7 +7,6 @@ package mocks import ( "github.com/google/gnostic-models/openapiv2" mock "github.com/stretchr/testify/mock" - "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/version" "k8s.io/client-go/discovery" diff --git a/internal/subroutine/remove_initializer.go b/internal/subroutine/remove_initializer.go deleted file mode 100644 index 5356720e..00000000 --- a/internal/subroutine/remove_initializer.go +++ /dev/null @@ -1,72 +0,0 @@ -package subroutine - -import ( - "context" - "fmt" - "slices" - - "github.com/platform-mesh/golang-commons/controller/lifecycle/runtimeobject" - "github.com/platform-mesh/golang-commons/controller/lifecycle/subroutine" - "github.com/platform-mesh/golang-commons/errors" - "github.com/platform-mesh/security-operator/internal/config" - "github.com/rs/zerolog/log" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" - - "github.com/kcp-dev/sdk/apis/cache/initialization" - kcpcorev1alpha1 "github.com/kcp-dev/sdk/apis/core/v1alpha1" -) - -type removeInitializer struct { - initializerName string - mgr mcmanager.Manager -} - -// Finalize implements subroutine.Subroutine. -func (r *removeInitializer) Finalize(ctx context.Context, instance runtimeobject.RuntimeObject) (ctrl.Result, errors.OperatorError) { - return ctrl.Result{}, nil -} - -// Finalizers implements subroutine.Subroutine. -func (r *removeInitializer) Finalizers(_ runtimeobject.RuntimeObject) []string { return []string{} } - -// GetName implements subroutine.Subroutine. -func (r *removeInitializer) GetName() string { return "RemoveInitializer" } - -// Process implements subroutine.Subroutine. -func (r *removeInitializer) Process(ctx context.Context, instance runtimeobject.RuntimeObject) (ctrl.Result, errors.OperatorError) { - lc := instance.(*kcpcorev1alpha1.LogicalCluster) - - initializer := kcpcorev1alpha1.LogicalClusterInitializer(r.initializerName) - - cluster, err := r.mgr.ClusterFromContext(ctx) - if err != nil { - return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("unable to get cluster from context: %w", err), true, false) - } - - if !slices.Contains(lc.Status.Initializers, initializer) { - log.Info().Msg("Initializer already absent, skipping patch") - return ctrl.Result{}, nil - } - - patch := client.MergeFrom(lc.DeepCopy()) - - lc.Status.Initializers = initialization.EnsureInitializerAbsent(initializer, lc.Status.Initializers) - if err := cluster.GetClient().Status().Patch(ctx, lc, patch); err != nil { - return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("unable to patch out initializers: %w", err), true, true) - } - - log.Info().Msg(fmt.Sprintf("Removed initializer from LogicalCluster status, name %s,uuid %s", lc.Name, lc.UID)) - - return ctrl.Result{}, nil -} - -func NewRemoveInitializer(mgr mcmanager.Manager, cfg config.Config) *removeInitializer { - return &removeInitializer{ - initializerName: cfg.InitializerName(), - mgr: mgr, - } -} - -var _ subroutine.Subroutine = &removeInitializer{} diff --git a/internal/subroutine/remove_initializer_test.go b/internal/subroutine/remove_initializer_test.go deleted file mode 100644 index 5ef69162..00000000 --- a/internal/subroutine/remove_initializer_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package subroutine_test - -import ( - "context" - "testing" - - "github.com/platform-mesh/security-operator/internal/config" - "github.com/platform-mesh/security-operator/internal/subroutine" - "github.com/platform-mesh/security-operator/internal/subroutine/mocks" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "sigs.k8s.io/controller-runtime/pkg/client" - - kcpcorev1alpha1 "github.com/kcp-dev/sdk/apis/core/v1alpha1" -) - -// fakeStatusWriter implements client.SubResourceWriter to intercept Status().Patch calls -type fakeStatusWriter struct { - t *testing.T - expectClear kcpcorev1alpha1.LogicalClusterInitializer - err error -} - -func (f *fakeStatusWriter) Create(ctx context.Context, obj client.Object, subResource client.Object, opts ...client.SubResourceCreateOption) error { - return nil -} - -func (f *fakeStatusWriter) Update(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { - return nil -} - -func (f *fakeStatusWriter) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error { - lc := obj.(*kcpcorev1alpha1.LogicalCluster) - // Ensure initializer was removed before patch - for _, init := range lc.Status.Initializers { - if init == f.expectClear { - f.t.Fatalf("initializer %q should have been removed prior to Patch", string(init)) - } - } - return f.err -} - -func TestRemoveInitializer_Process(t *testing.T) { - cfg := config.Config{ - WorkspacePath: "root", - WorkspaceTypeName: "foo.initializer.kcp.dev", - } - initializerName := cfg.InitializerName() - - t.Run("skips when initializer is absent", func(t *testing.T) { - mgr := mocks.NewMockManager(t) - cluster := mocks.NewMockCluster(t) - mgr.EXPECT().ClusterFromContext(mock.Anything).Return(cluster, nil) - - lc := &kcpcorev1alpha1.LogicalCluster{} - lc.Status.Initializers = []kcpcorev1alpha1.LogicalClusterInitializer{"other.initializer"} - - r := subroutine.NewRemoveInitializer(mgr, cfg) - _, err := r.Process(context.Background(), lc) - assert.Nil(t, err) - }) - - t.Run("removes initializer and patches status", func(t *testing.T) { - mgr := mocks.NewMockManager(t) - cluster := mocks.NewMockCluster(t) - k8s := mocks.NewMockClient(t) - - mgr.EXPECT().ClusterFromContext(mock.Anything).Return(cluster, nil) - cluster.EXPECT().GetClient().Return(k8s) - k8s.EXPECT().Status().Return(&fakeStatusWriter{t: t, expectClear: kcpcorev1alpha1.LogicalClusterInitializer(initializerName), err: nil}) - - lc := &kcpcorev1alpha1.LogicalCluster{} - lc.Status.Initializers = []kcpcorev1alpha1.LogicalClusterInitializer{ - kcpcorev1alpha1.LogicalClusterInitializer(initializerName), - "another.initializer", - } - - r := subroutine.NewRemoveInitializer(mgr, cfg) - _, err := r.Process(context.Background(), lc) - assert.Nil(t, err) - for _, init := range lc.Status.Initializers { - assert.NotEqual(t, initializerName, string(init)) - } - }) - - t.Run("returns error when status patch fails", func(t *testing.T) { - mgr := mocks.NewMockManager(t) - cluster := mocks.NewMockCluster(t) - k8s := mocks.NewMockClient(t) - - mgr.EXPECT().ClusterFromContext(mock.Anything).Return(cluster, nil) - cluster.EXPECT().GetClient().Return(k8s) - k8s.EXPECT().Status().Return(&fakeStatusWriter{t: t, expectClear: kcpcorev1alpha1.LogicalClusterInitializer(initializerName), err: assert.AnError}) - - lc := &kcpcorev1alpha1.LogicalCluster{} - lc.Status.Initializers = []kcpcorev1alpha1.LogicalClusterInitializer{ - kcpcorev1alpha1.LogicalClusterInitializer(initializerName), - } - - r := subroutine.NewRemoveInitializer(mgr, cfg) - _, err := r.Process(context.Background(), lc) - assert.NotNil(t, err) - }) -} - -func TestRemoveInitializer_Misc(t *testing.T) { - mgr := mocks.NewMockManager(t) - r := subroutine.NewRemoveInitializer(mgr, config.Config{WorkspacePath: "root", WorkspaceTypeName: "foo.initializer.kcp.dev"}) - - assert.Equal(t, "RemoveInitializer", r.GetName()) - assert.Equal(t, []string{}, r.Finalizers(nil)) - - _, err := r.Finalize(context.Background(), &kcpcorev1alpha1.LogicalCluster{}) - assert.Nil(t, err) -} - -func TestRemoveInitializer_ManagerError(t *testing.T) { - mgr := mocks.NewMockManager(t) - mgr.EXPECT().ClusterFromContext(mock.Anything).Return(nil, assert.AnError) - - r := subroutine.NewRemoveInitializer(mgr, config.Config{WorkspacePath: "root", WorkspaceTypeName: "foo.initializer.kcp.dev"}) - _, err := r.Process(context.Background(), &kcpcorev1alpha1.LogicalCluster{}) - assert.NotNil(t, err) -} diff --git a/internal/subroutine/tuples.go b/internal/subroutine/tuples.go index c6dfc2a3..4b18b6d1 100644 --- a/internal/subroutine/tuples.go +++ b/internal/subroutine/tuples.go @@ -9,9 +9,9 @@ import ( "github.com/platform-mesh/golang-commons/controller/lifecycle/runtimeobject" lifecyclesubroutine "github.com/platform-mesh/golang-commons/controller/lifecycle/subroutine" "github.com/platform-mesh/golang-commons/errors" - "github.com/platform-mesh/golang-commons/fga/helpers" "github.com/platform-mesh/golang-commons/logger" securityv1alpha1 "github.com/platform-mesh/security-operator/api/v1alpha1" + "github.com/platform-mesh/security-operator/pkg/fga" ctrl "sigs.k8s.io/controller-runtime" mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" @@ -56,27 +56,9 @@ func (t *tupleSubroutine) Finalize(ctx context.Context, instance runtimeobject.R authorizationModelID = store.Status.AuthorizationModelID } - for _, tuple := range managedTuples { - _, err := t.fga.Write(ctx, &openfgav1.WriteRequest{ - StoreId: storeID, - AuthorizationModelId: authorizationModelID, - Deletes: &openfgav1.WriteRequestDeletes{ - TupleKeys: []*openfgav1.TupleKeyWithoutCondition{ - { - Object: tuple.Object, - Relation: tuple.Relation, - User: tuple.User, - }, - }, - }, - }) - if helpers.IsDuplicateWriteError(err) { // coverage-ignore - log.Info().Stringer("tuple", tuple).Msg("tuple already deleted") - continue - } - if err != nil { - return ctrl.Result{}, errors.NewOperatorError(err, false, true) - } + tm := fga.NewTupleManager(t.fga, storeID, authorizationModelID, log) + if err := tm.Delete(ctx, managedTuples); err != nil { + return ctrl.Result{}, errors.NewOperatorError(err, false, true) } switch obj := instance.(type) { @@ -134,57 +116,21 @@ func (t *tupleSubroutine) Process(ctx context.Context, instance runtimeobject.Ru authorizationModelID = store.Status.AuthorizationModelID } - for _, tuple := range specTuples { - _, err := t.fga.Write(ctx, &openfgav1.WriteRequest{ - StoreId: storeID, - AuthorizationModelId: authorizationModelID, - Writes: &openfgav1.WriteRequestWrites{ - TupleKeys: []*openfgav1.TupleKey{ - { - Object: tuple.Object, - Relation: tuple.Relation, - User: tuple.User, - }, - }, - }, - }) - if helpers.IsDuplicateWriteError(err) { // coverage-ignore - log.Info().Stringer("tuple", tuple).Msg("tuple already exists") - continue - } - if err != nil { - return ctrl.Result{}, errors.NewOperatorError(err, false, true) - } + tm := fga.NewTupleManager(t.fga, storeID, authorizationModelID, log) + if err := tm.Apply(ctx, specTuples); err != nil { + return ctrl.Result{}, errors.NewOperatorError(err, false, true) } + var tuplesToDelete []securityv1alpha1.Tuple for _, tuple := range managedTuples { - if idx := slices.IndexFunc(specTuples, func(t securityv1alpha1.Tuple) bool { - return t.Object == tuple.Object && t.Relation == tuple.Relation && t.User == tuple.User - }); idx != -1 { - continue + if slices.IndexFunc(specTuples, func(s securityv1alpha1.Tuple) bool { + return s.Object == tuple.Object && s.Relation == tuple.Relation && s.User == tuple.User + }) == -1 { + tuplesToDelete = append(tuplesToDelete, tuple) } - - _, err := t.fga.Write(ctx, &openfgav1.WriteRequest{ - StoreId: storeID, - AuthorizationModelId: authorizationModelID, - Deletes: &openfgav1.WriteRequestDeletes{ - TupleKeys: []*openfgav1.TupleKeyWithoutCondition{ - { - Object: tuple.Object, - Relation: tuple.Relation, - User: tuple.User, - }, - }, - }, - }) - if helpers.IsDuplicateWriteError(err) { // coverage-ignore - log.Info().Stringer("tuple", tuple).Msg("tuple already deleted") - continue - } - if err != nil { // coverage-ignore - return ctrl.Result{}, errors.NewOperatorError(err, false, true) - } - + } + if err := tm.Delete(ctx, tuplesToDelete); err != nil { + return ctrl.Result{}, errors.NewOperatorError(err, false, true) } switch obj := instance.(type) { diff --git a/internal/subroutine/tuples_test.go b/internal/subroutine/tuples_test.go index fe53d42c..7301da5f 100644 --- a/internal/subroutine/tuples_test.go +++ b/internal/subroutine/tuples_test.go @@ -70,7 +70,7 @@ func TestTupleProcessWithStore(t *testing.T) { }, }, fgaMocks: func(fga *mocks.MockOpenFGAServiceClient) { - fga.EXPECT().Write(mock.Anything, mock.Anything).Return(nil, nil).Times(3) + fga.EXPECT().Write(mock.Anything, mock.Anything).Return(nil, nil) }, }, { @@ -108,11 +108,8 @@ func TestTupleProcessWithStore(t *testing.T) { }, }, fgaMocks: func(fga *mocks.MockOpenFGAServiceClient) { - // write calls - fga.EXPECT().Write(mock.Anything, mock.Anything).Return(nil, nil).Times(3) - - // delete call - fga.EXPECT().Write(mock.Anything, mock.Anything).Return(nil, nil) + // Apply (batch write) + Delete (batch delete) + fga.EXPECT().Write(mock.Anything, mock.Anything).Return(nil, nil).Twice() }, }, { @@ -223,7 +220,7 @@ func TestTupleProcessWithAuthorizationModel(t *testing.T) { }, }, fgaMocks: func(fga *mocks.MockOpenFGAServiceClient) { - fga.EXPECT().Write(mock.Anything, mock.Anything).Return(nil, nil).Times(3) + fga.EXPECT().Write(mock.Anything, mock.Anything).Return(nil, nil) }, k8sMocks: func(k8s *mocks.MockClient) { // Not used for AuthorizationModel @@ -287,11 +284,8 @@ func TestTupleProcessWithAuthorizationModel(t *testing.T) { }, }, fgaMocks: func(fga *mocks.MockOpenFGAServiceClient) { - // write calls - fga.EXPECT().Write(mock.Anything, mock.Anything).Return(nil, nil).Times(3) - - // delete call - fga.EXPECT().Write(mock.Anything, mock.Anything).Return(nil, nil) + // Apply (batch write) + Delete (batch delete) + fga.EXPECT().Write(mock.Anything, mock.Anything).Return(nil, nil).Twice() }, k8sMocks: func(k8s *mocks.MockClient) { // Not used for AuthorizationModel diff --git a/internal/subroutine/workspace_authorization.go b/internal/subroutine/workspace_authorization.go index bd445957..b2dd282f 100644 --- a/internal/subroutine/workspace_authorization.go +++ b/internal/subroutine/workspace_authorization.go @@ -13,7 +13,6 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" corev1 "k8s.io/api/core/v1" @@ -42,7 +41,10 @@ func NewWorkspaceAuthConfigurationSubroutine(orgClient, runtimeClient client.Cli } } -var _ lifecyclesubroutine.Subroutine = &workspaceAuthSubroutine{} +var ( + _ lifecyclesubroutine.Subroutine = &workspaceAuthSubroutine{} + _ lifecyclesubroutine.Initializer = &workspaceAuthSubroutine{} +) func (r *workspaceAuthSubroutine) GetName() string { return "workspaceAuthConfiguration" } @@ -50,11 +52,18 @@ func (r *workspaceAuthSubroutine) Finalizers(_ runtimeobject.RuntimeObject) []st return []string{} } -func (r *workspaceAuthSubroutine) Finalize(ctx context.Context, instance runtimeobject.RuntimeObject) (reconcile.Result, errors.OperatorError) { - return reconcile.Result{}, nil +func (r *workspaceAuthSubroutine) Finalize(ctx context.Context, instance runtimeobject.RuntimeObject) (ctrl.Result, errors.OperatorError) { + return ctrl.Result{}, nil +} + +// Process implements lifecycle.Subroutine as no-op since Initialize handles the +// work. +func (r *workspaceAuthSubroutine) Process(ctx context.Context, instance runtimeobject.RuntimeObject) (ctrl.Result, errors.OperatorError) { + return ctrl.Result{}, nil } -func (r *workspaceAuthSubroutine) Process(ctx context.Context, instance runtimeobject.RuntimeObject) (reconcile.Result, errors.OperatorError) { +// Initialize implements lifecycle.Initializer. +func (r *workspaceAuthSubroutine) Initialize(ctx context.Context, instance runtimeobject.RuntimeObject) (ctrl.Result, errors.OperatorError) { lc := instance.(*kcpcorev1alpha1.LogicalCluster) workspaceName := getWorkspaceName(lc) @@ -66,33 +75,33 @@ func (r *workspaceAuthSubroutine) Process(ctx context.Context, instance runtimeo if r.cfg.DomainCALookup { err := r.runtimeClient.Get(ctx, client.ObjectKey{Name: "domain-certificate-ca", Namespace: "platform-mesh-system"}, &domainCASecret) if err != nil { - return reconcile.Result{}, errors.NewOperatorError(fmt.Errorf("failed to get domain CA secret: %w", err), true, false) + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("failed to get domain CA secret: %w", err), true, false) } } cluster, err := r.mgr.ClusterFromContext(ctx) if err != nil { - return reconcile.Result{}, errors.NewOperatorError(fmt.Errorf("failed to get cluster from context %w", err), true, true) + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("failed to get cluster from context %w", err), true, true) } var idpConfig v1alpha1.IdentityProviderConfiguration err = cluster.GetClient().Get(ctx, types.NamespacedName{Name: workspaceName}, &idpConfig) if err != nil { - return reconcile.Result{}, errors.NewOperatorError(fmt.Errorf("failed to get IdentityProviderConfiguration: %w", err), true, true) + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("failed to get IdentityProviderConfiguration: %w", err), true, true) } if len(idpConfig.Spec.Clients) == 0 || len(idpConfig.Status.ManagedClients) == 0 { - return reconcile.Result{}, errors.NewOperatorError(fmt.Errorf("IdentityProviderConfiguration %s has no clients in spec or status", workspaceName), true, false) + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("IdentityProviderConfiguration %s has no clients in spec or status", workspaceName), true, false) } audiences := make([]string, 0, len(idpConfig.Spec.Clients)) for _, specClient := range idpConfig.Spec.Clients { managedClient, ok := idpConfig.Status.ManagedClients[specClient.ClientName] if !ok { - return reconcile.Result{}, errors.NewOperatorError(fmt.Errorf("managed client %s not found in IdentityProviderConfiguration status", specClient.ClientName), true, false) + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("managed client %s not found in IdentityProviderConfiguration status", specClient.ClientName), true, false) } if managedClient.ClientID == "" { - return reconcile.Result{}, errors.NewOperatorError(fmt.Errorf("managed client %s has empty ClientID in IdentityProviderConfiguration status", specClient.ClientName), true, false) + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("managed client %s has empty ClientID in IdentityProviderConfiguration status", specClient.ClientName), true, false) } audiences = append(audiences, managedClient.ClientID) } @@ -146,12 +155,12 @@ func (r *workspaceAuthSubroutine) Process(ctx context.Context, instance runtimeo return nil }) if err != nil { - return reconcile.Result{}, errors.NewOperatorError(fmt.Errorf("failed to create WorkspaceAuthConfiguration resource: %w", err), true, true) + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("failed to create WorkspaceAuthConfiguration resource: %w", err), true, true) } err = r.patchWorkspaceTypes(ctx, r.orgClient, workspaceName) if err != nil { - return reconcile.Result{}, errors.NewOperatorError(fmt.Errorf("failed to patch workspace types: %w", err), true, true) + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("failed to patch workspace types: %w", err), true, true) } return ctrl.Result{}, nil diff --git a/internal/subroutine/workspace_initializer.go b/internal/subroutine/workspace_initializer.go index 60c9b2a4..dba9b64c 100644 --- a/internal/subroutine/workspace_initializer.go +++ b/internal/subroutine/workspace_initializer.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "os" + "slices" "strings" accountsv1alpha1 "github.com/platform-mesh/account-operator/api/v1alpha1" @@ -11,18 +12,24 @@ import ( lifecyclesubroutine "github.com/platform-mesh/golang-commons/controller/lifecycle/subroutine" "github.com/platform-mesh/golang-commons/errors" "github.com/platform-mesh/security-operator/api/v1alpha1" + iclient "github.com/platform-mesh/security-operator/internal/client" "github.com/platform-mesh/security-operator/internal/config" + "github.com/platform-mesh/security-operator/pkg/fga" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + mccontext "sigs.k8s.io/multicluster-runtime/pkg/context" mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" + kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/kcp-dev/logicalcluster/v3" + kcpcore "github.com/kcp-dev/sdk/apis/core" kcpcorev1alpha1 "github.com/kcp-dev/sdk/apis/core/v1alpha1" ) -func NewWorkspaceInitializer(orgsClient client.Client, cfg config.Config, mgr mcmanager.Manager) *workspaceInitializer { +func NewWorkspaceInitializer(orgsClient client.Client, cfg config.Config, mgr mcmanager.Manager, creatorRelation, objectType string) *workspaceInitializer { // read file from path res, err := os.ReadFile(cfg.CoreModulePath) if err != nil { @@ -35,10 +42,15 @@ func NewWorkspaceInitializer(orgsClient client.Client, cfg config.Config, mgr mc initializerName: cfg.InitializerName(), mgr: mgr, cfg: cfg, + creatorRelation: creatorRelation, + objectType: objectType, } } -var _ lifecyclesubroutine.Subroutine = &workspaceInitializer{} +var ( + _ lifecyclesubroutine.Subroutine = &workspaceInitializer{} + _ lifecyclesubroutine.Initializer = &workspaceInitializer{} +) type workspaceInitializer struct { orgsClient client.Client @@ -46,6 +58,9 @@ type workspaceInitializer struct { cfg config.Config coreModule string initializerName string + + objectType string + creatorRelation string } func (w *workspaceInitializer) Finalize(ctx context.Context, instance runtimeobject.RuntimeObject) (ctrl.Result, errors.OperatorError) { @@ -59,37 +74,87 @@ func (w *workspaceInitializer) Finalizers(_ runtimeobject.RuntimeObject) []strin func (w *workspaceInitializer) GetName() string { return "WorkspaceInitializer" } +// Process implements lifecycle.Subroutine as no-op since Initialize handles the +// work. func (w *workspaceInitializer) Process(ctx context.Context, instance runtimeobject.RuntimeObject) (ctrl.Result, errors.OperatorError) { + return ctrl.Result{}, nil +} + +// Initialize implements lifecycle.Initializer. +func (w *workspaceInitializer) Initialize(ctx context.Context, instance runtimeobject.RuntimeObject) (ctrl.Result, errors.OperatorError) { lc := instance.(*kcpcorev1alpha1.LogicalCluster) + p := lc.Annotations[kcpcore.LogicalClusterPathAnnotationKey] + if p == "" { + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("annotation on LogicalCluster is not set"), true, true) + } + lcID, _ := mccontext.ClusterFrom(ctx) + + lcClient, err := iclient.NewForLogicalCluster(w.mgr.GetLocalManager().GetConfig(), w.mgr.GetLocalManager().GetScheme(), logicalcluster.Name(lcID)) + if err != nil { + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("getting client: %w", err), true, true) + } + + var ai accountsv1alpha1.AccountInfo + if err := lcClient.Get(ctx, client.ObjectKey{ + Name: "account", + }, &ai); err != nil && !kerrors.IsNotFound(err) { + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("getting AccountInfo for LogicalCluster: %w", err), true, true) + } else if kerrors.IsNotFound(err) { + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("AccountInfo not found yet, requeueing"), true, false) + } + + orgsClient, err := iclient.NewForLogicalCluster(w.mgr.GetLocalManager().GetConfig(), w.mgr.GetLocalManager().GetScheme(), logicalcluster.Name("root:orgs")) + if err != nil { + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("getting parent organisation client: %w", err), true, true) + } + + var acc accountsv1alpha1.Account + if err := orgsClient.Get(ctx, client.ObjectKey{ + Name: ai.Spec.Account.Name, + }, &acc); err != nil { + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("getting Account in platform-mesh-system: %w", err), true, true) + } store := v1alpha1.Store{ ObjectMeta: metav1.ObjectMeta{Name: generateStoreName(lc)}, } - _, err := controllerutil.CreateOrUpdate(ctx, w.orgsClient, &store, func() error { + tuples, err := fga.TuplesForOrganization(acc, ai, w.creatorRelation, w.objectType) + if err != nil { + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("building tuples for organization: %w", err), true, true) + } + if w.cfg.AllowMemberTuplesEnabled { // TODO: remove this flag once the feature is tested and stable + tuples = append(tuples, []v1alpha1.Tuple{ + { + Object: "role:authenticated", + Relation: "assignee", + User: "user:*", + }, + { + Object: fmt.Sprintf("core_platform-mesh_io_account:%s/%s", lc.Spec.Owner.Cluster, lc.Spec.Owner.Name), + Relation: "member", + User: "role:authenticated#assignee", + }, + }...) + } + if result, err := controllerutil.CreateOrUpdate(ctx, w.orgsClient, &store, func() error { store.Spec = v1alpha1.StoreSpec{ CoreModule: w.coreModule, } - - if w.cfg.AllowMemberTuplesEnabled { // TODO: remove this flag once the feature is tested and stable - store.Spec.Tuples = []v1alpha1.Tuple{ - { - Object: "role:authenticated", - Relation: "assignee", - User: "user:*", - }, - { - Object: fmt.Sprintf("core_platform-mesh_io_account:%s/%s", lc.Spec.Owner.Cluster, lc.Spec.Owner.Name), - Relation: "member", - User: "role:authenticated#assignee", - }, - } - } + store.Spec.Tuples = tuples return nil - }) - if err != nil { + }); err != nil { return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("unable to create/update store: %w", err), true, true) + } else if result == controllerutil.OperationResultCreated || result == controllerutil.OperationResultUpdated { + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("store needed to be updated, requeueing"), true, false) + } + + // Check if Store applied tuple changes + for _, t := range tuples { + if !slices.Contains(store.Status.ManagedTuples, t) { + return ctrl.Result{}, errors.NewOperatorError(fmt.Errorf("store does not yet contain all specified tuples, requeueing"), true, false) + } } if store.Status.StoreID == "" { diff --git a/internal/terminatingworkspaces/provider.go b/internal/terminatingworkspaces/provider.go new file mode 100644 index 00000000..cb39b6be --- /dev/null +++ b/internal/terminatingworkspaces/provider.go @@ -0,0 +1,105 @@ +package terminatingworkspaces + +import ( + "github.com/go-logr/logr" + + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/log" + + "sigs.k8s.io/multicluster-runtime/pkg/clusters" + "sigs.k8s.io/multicluster-runtime/pkg/multicluster" + + "github.com/kcp-dev/logicalcluster/v3" + kcpcorev1alpha1 "github.com/kcp-dev/sdk/apis/core/v1alpha1" + kcptenancyv1alpha1 "github.com/kcp-dev/sdk/apis/tenancy/v1alpha1" + + mcpcache "github.com/kcp-dev/multicluster-provider/pkg/cache" + "github.com/kcp-dev/multicluster-provider/pkg/events/recorder" + "github.com/kcp-dev/multicluster-provider/pkg/provider" +) + +var _ multicluster.Provider = &Provider{} +var _ multicluster.ProviderRunnable = &Provider{} + +// Provider reconciles LogicalClusters that are in deletion and have a specific +// terminator. +// It is a slightly modified version of +// github.com/kcp-dev/multicluster-provider/initializingworkspaces. +type Provider struct { + provider.Factory +} + +// Options are the options for creating a new instance of the terminating workspaces provider. +type Options struct { + // Scheme is the scheme to use for the provider. If this is nil, it defaults + // to the client-go scheme. + Scheme *runtime.Scheme + + // Log is the logger to use for the provider. If this is nil, it defaults + // to the controller-runtime default logger. + Log *logr.Logger +} + +// New creates a new kcp terminating workspaces provider. +func New(cfg *rest.Config, workspaceTypeName string, options Options) (*Provider, error) { + // Do the defaulting controller-runtime would do for those fields we need. + if options.Scheme == nil { + options.Scheme = scheme.Scheme + } + + if options.Log == nil { + options.Log = ptr.To(log.Log.WithName("kcp-terminatingworkspaces-cluster-provider")) + } + + c, err := cache.New(cfg, cache.Options{ + Scheme: options.Scheme, + ByObject: map[client.Object]cache.ByObject{ + &kcptenancyv1alpha1.WorkspaceType{}: { + Field: fields.SelectorFromSet(fields.Set{"metadata.name": workspaceTypeName}), + }, + }, + }) + if err != nil { + return nil, err + } + + return &Provider{ + Factory: provider.Factory{ + Clusters: ptr.To(clusters.New[cluster.Cluster]()), + Providers: map[string]*provider.Provider{}, + + Log: *options.Log, + + GetVWs: func(obj client.Object) ([]string, error) { + wst := obj.(*kcptenancyv1alpha1.WorkspaceType) + var urls []string + for _, endpoint := range wst.Status.VirtualWorkspaces { + if endpoint.Type != "terminating" { + continue + } + urls = append(urls, endpoint.URL) + } + return urls, nil + }, + + Config: cfg, + Scheme: options.Scheme, + Outer: &kcptenancyv1alpha1.WorkspaceType{}, + Inner: &kcpcorev1alpha1.LogicalCluster{}, + Cache: c, + // ensure the generic provider builds a per-cluster cache instead of a wildcard-based + // cache, since this virtual workspace does not offer anything but logicalclusters on + // the wildcard endpoint + NewCluster: func(cfg *rest.Config, clusterName logicalcluster.Name, wildcardCA mcpcache.WildcardCache, scheme *runtime.Scheme, _ recorder.EventRecorderGetter) (*mcpcache.ScopedCluster, error) { + return mcpcache.NewScopedInitializingCluster(cfg, clusterName, wildcardCA, scheme) + }, + }, + }, nil +} diff --git a/pkg/fga/tuple_manager.go b/pkg/fga/tuple_manager.go new file mode 100644 index 00000000..3e477522 --- /dev/null +++ b/pkg/fga/tuple_manager.go @@ -0,0 +1,94 @@ +package fga + +import ( + "context" + + openfgav1 "github.com/openfga/api/proto/openfga/v1" + "github.com/platform-mesh/golang-commons/logger" + "github.com/platform-mesh/security-operator/api/v1alpha1" +) + +// AuthorizationModelIDLatest is to explicitely acknowledge that no ID means +// latest. +const AuthorizationModelIDLatest = "" + +// TupleManager wraps around FGA attributes to write and delete sets of tuples. +type TupleManager struct { + client openfgav1.OpenFGAServiceClient + storeID string + authorizationModelID string + logger logger.Logger +} + +func NewTupleManager(client openfgav1.OpenFGAServiceClient, storeID, authorizationModelID string, log *logger.Logger) *TupleManager { + return &TupleManager{ + client: client, + storeID: storeID, + authorizationModelID: authorizationModelID, + logger: *log.ComponentLogger("tuple_manager").MustChildLoggerWithAttributes("store_id", storeID, "authorization_model", authorizationModelID), + } +} + +// Apply writes a given set of tuples within a single transaction and ignores +// duplicate writes. +func (m *TupleManager) Apply(ctx context.Context, tuples []v1alpha1.Tuple) error { + if len(tuples) == 0 { + return nil + } + + tupleKeys := make([]*openfgav1.TupleKey, 0, len(tuples)) + for _, t := range tuples { + tupleKeys = append(tupleKeys, &openfgav1.TupleKey{ + Object: t.Object, + Relation: t.Relation, + User: t.User, + }) + } + + _, err := m.client.Write(ctx, &openfgav1.WriteRequest{ + StoreId: m.storeID, + AuthorizationModelId: m.authorizationModelID, + Writes: &openfgav1.WriteRequestWrites{ + TupleKeys: tupleKeys, + OnDuplicate: "ignore", + }, + }) + if err != nil { + return err + } + + m.logger.Debug().Int("count", len(tuples)).Msg("Wrote tuples") + return nil +} + +// Delete deletes a given set of tuples within a single transaction and ignores +// duplicate deletions. +func (m *TupleManager) Delete(ctx context.Context, tuples []v1alpha1.Tuple) error { + if len(tuples) == 0 { + return nil + } + + tupleKeys := make([]*openfgav1.TupleKeyWithoutCondition, 0, len(tuples)) + for _, t := range tuples { + tupleKeys = append(tupleKeys, &openfgav1.TupleKeyWithoutCondition{ + Object: t.Object, + Relation: t.Relation, + User: t.User, + }) + } + + _, err := m.client.Write(ctx, &openfgav1.WriteRequest{ + StoreId: m.storeID, + AuthorizationModelId: m.authorizationModelID, + Deletes: &openfgav1.WriteRequestDeletes{ + TupleKeys: tupleKeys, + OnMissing: "ignore", + }, + }) + if err != nil { + return err + } + + m.logger.Debug().Int("count", len(tuples)).Msg("Deleted tuples") + return nil +} diff --git a/pkg/fga/tuple_manager_test.go b/pkg/fga/tuple_manager_test.go new file mode 100644 index 00000000..559696d7 --- /dev/null +++ b/pkg/fga/tuple_manager_test.go @@ -0,0 +1,177 @@ +package fga + +import ( + "context" + "errors" + "testing" + + openfgav1 "github.com/openfga/api/proto/openfga/v1" + "github.com/platform-mesh/golang-commons/logger/testlogger" + "github.com/platform-mesh/security-operator/api/v1alpha1" + "github.com/platform-mesh/security-operator/internal/subroutine/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" +) + +func TestTupleManager_Apply(t *testing.T) { + t.Run("returns nil for empty tuples", func(t *testing.T) { + client := mocks.NewMockOpenFGAServiceClient(t) + log := testlogger.New() + mgr := NewTupleManager(client, "store-id", "model-id", log.Logger) + + err := mgr.Apply(context.Background(), nil) + assert.NoError(t, err) + + err = mgr.Apply(context.Background(), []v1alpha1.Tuple{}) + assert.NoError(t, err) + }) + + t.Run("writes tuples successfully", func(t *testing.T) { + client := mocks.NewMockOpenFGAServiceClient(t) + client.EXPECT().Write(mock.Anything, mock.MatchedBy(func(req *openfgav1.WriteRequest) bool { + return req.StoreId == "store-id" && + req.AuthorizationModelId == "model-id" && + req.Writes != nil && + len(req.Writes.TupleKeys) == 2 && + req.Writes.OnDuplicate == "ignore" + })).Return(&openfgav1.WriteResponse{}, nil) + + log := testlogger.New() + mgr := NewTupleManager(client, "store-id", "model-id", log.Logger) + + tuples := []v1alpha1.Tuple{ + {Object: "doc:1", Relation: "viewer", User: "user:alice"}, + {Object: "doc:2", Relation: "owner", User: "user:bob"}, + } + + err := mgr.Apply(context.Background(), tuples) + assert.NoError(t, err) + }) + + t.Run("returns error when write fails", func(t *testing.T) { + client := mocks.NewMockOpenFGAServiceClient(t) + client.EXPECT().Write(mock.Anything, mock.Anything).Return(nil, errors.New("write failed")) + + log := testlogger.New() + mgr := NewTupleManager(client, "store-id", "model-id", log.Logger) + + tuples := []v1alpha1.Tuple{ + {Object: "doc:1", Relation: "viewer", User: "user:alice"}, + } + + err := mgr.Apply(context.Background(), tuples) + assert.Error(t, err) + }) +} + +func TestTupleManager_Delete(t *testing.T) { + t.Run("returns nil for empty tuples", func(t *testing.T) { + client := mocks.NewMockOpenFGAServiceClient(t) + log := testlogger.New() + mgr := NewTupleManager(client, "store-id", "model-id", log.Logger) + + err := mgr.Delete(context.Background(), nil) + assert.NoError(t, err) + + err = mgr.Delete(context.Background(), []v1alpha1.Tuple{}) + assert.NoError(t, err) + }) + + t.Run("deletes tuples successfully", func(t *testing.T) { + client := mocks.NewMockOpenFGAServiceClient(t) + client.EXPECT().Write(mock.Anything, mock.MatchedBy(func(req *openfgav1.WriteRequest) bool { + return req.StoreId == "store-id" && + req.AuthorizationModelId == "model-id" && + req.Deletes != nil && + len(req.Deletes.TupleKeys) == 2 && + req.Deletes.OnMissing == "ignore" + })).Return(&openfgav1.WriteResponse{}, nil) + + log := testlogger.New() + mgr := NewTupleManager(client, "store-id", "model-id", log.Logger) + + tuples := []v1alpha1.Tuple{ + {Object: "doc:1", Relation: "viewer", User: "user:alice"}, + {Object: "doc:2", Relation: "owner", User: "user:bob"}, + } + + err := mgr.Delete(context.Background(), tuples) + assert.NoError(t, err) + }) + + t.Run("returns error when delete fails", func(t *testing.T) { + client := mocks.NewMockOpenFGAServiceClient(t) + client.EXPECT().Write(mock.Anything, mock.Anything).Return(nil, errors.New("delete failed")) + + log := testlogger.New() + mgr := NewTupleManager(client, "store-id", "model-id", log.Logger) + + tuples := []v1alpha1.Tuple{ + {Object: "doc:1", Relation: "viewer", User: "user:alice"}, + } + + err := mgr.Delete(context.Background(), tuples) + assert.Error(t, err) + }) +} + +func TestTupleManager_Apply_verifies_tuple_contents(t *testing.T) { + var capturedReq *openfgav1.WriteRequest + client := mocks.NewMockOpenFGAServiceClient(t) + client.EXPECT().Write(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, req *openfgav1.WriteRequest, opts ...grpc.CallOption) (*openfgav1.WriteResponse, error) { + capturedReq = req + return &openfgav1.WriteResponse{}, nil + }) + + log := testlogger.New() + mgr := NewTupleManager(client, "store-id", "model-id", log.Logger) + + tuples := []v1alpha1.Tuple{ + {Object: "doc:1", Relation: "viewer", User: "user:alice"}, + {Object: "doc:2", Relation: "owner", User: "user:bob"}, + } + + err := mgr.Apply(context.Background(), tuples) + require.NoError(t, err) + require.NotNil(t, capturedReq) + require.NotNil(t, capturedReq.Writes) + require.Len(t, capturedReq.Writes.TupleKeys, 2) + + // Verify both tuples are in the request + keys := capturedReq.Writes.TupleKeys + assert.True(t, (keys[0].Object == "doc:1" && keys[0].Relation == "viewer" && keys[0].User == "user:alice") || + (keys[1].Object == "doc:1" && keys[1].Relation == "viewer" && keys[1].User == "user:alice")) + assert.True(t, (keys[0].Object == "doc:2" && keys[0].Relation == "owner" && keys[0].User == "user:bob") || + (keys[1].Object == "doc:2" && keys[1].Relation == "owner" && keys[1].User == "user:bob")) +} + +func TestTupleManager_Delete_verifies_tuple_contents(t *testing.T) { + var capturedReq *openfgav1.WriteRequest + client := mocks.NewMockOpenFGAServiceClient(t) + client.EXPECT().Write(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, req *openfgav1.WriteRequest, opts ...grpc.CallOption) (*openfgav1.WriteResponse, error) { + capturedReq = req + return &openfgav1.WriteResponse{}, nil + }) + + log := testlogger.New() + mgr := NewTupleManager(client, "store-id", "model-id", log.Logger) + + tuples := []v1alpha1.Tuple{ + {Object: "doc:1", Relation: "viewer", User: "user:alice"}, + {Object: "doc:2", Relation: "owner", User: "user:bob"}, + } + + err := mgr.Delete(context.Background(), tuples) + require.NoError(t, err) + require.NotNil(t, capturedReq) + require.NotNil(t, capturedReq.Deletes) + require.Len(t, capturedReq.Deletes.TupleKeys, 2) + + keys := capturedReq.Deletes.TupleKeys + assert.True(t, (keys[0].Object == "doc:1" && keys[0].Relation == "viewer" && keys[0].User == "user:alice") || + (keys[1].Object == "doc:1" && keys[1].Relation == "viewer" && keys[1].User == "user:alice")) + assert.True(t, (keys[0].Object == "doc:2" && keys[0].Relation == "owner" && keys[0].User == "user:bob") || + (keys[1].Object == "doc:2" && keys[1].Relation == "owner" && keys[1].User == "user:bob")) +} diff --git a/pkg/fga/tuples.go b/pkg/fga/tuples.go new file mode 100644 index 00000000..bb6d6943 --- /dev/null +++ b/pkg/fga/tuples.go @@ -0,0 +1,54 @@ +package fga + +import ( + "errors" + "fmt" + "strings" + + accountv1alpha1 "github.com/platform-mesh/account-operator/api/v1alpha1" + "github.com/platform-mesh/security-operator/api/v1alpha1" +) + +// TuplesForAccount returns FGA tuples for an account not of type organization. +func TuplesForAccount(acc accountv1alpha1.Account, ai accountv1alpha1.AccountInfo, creatorRelation, parentRelation, objectType string) ([]v1alpha1.Tuple, error) { + base, err := baseTuples(acc, ai, creatorRelation, objectType) + if err != nil { + return nil, err + } + tuples := append(base, v1alpha1.Tuple{ + User: fmt.Sprintf("%s:%s/%s", objectType, ai.Spec.ParentAccount.OriginClusterId, ai.Spec.ParentAccount.Name), + Relation: parentRelation, + Object: fmt.Sprintf("%s:%s/%s", objectType, ai.Spec.Account.OriginClusterId, ai.Spec.Account.Name), + }) + return tuples, nil +} + +// TuplesForOrganization returns FGA tuples for an Account of type organization. +func TuplesForOrganization(acc accountv1alpha1.Account, ai accountv1alpha1.AccountInfo, creatorRelation, objectType string) ([]v1alpha1.Tuple, error) { + return baseTuples(acc, ai, creatorRelation, objectType) +} + +func baseTuples(acc accountv1alpha1.Account, ai accountv1alpha1.AccountInfo, creatorRelation, objectType string) ([]v1alpha1.Tuple, error) { + if acc.Spec.Creator == nil { + return nil, errors.New("account creator is nil") + } + + return []v1alpha1.Tuple{ + { + User: fmt.Sprintf("user:%s", formatUser(*acc.Spec.Creator)), + Relation: "assignee", + Object: fmt.Sprintf("role:%s/%s/%s/owner", objectType, ai.Spec.Account.OriginClusterId, ai.Spec.Account.Name), + }, + { + User: fmt.Sprintf("role:%s/%s/%s/owner#assignee", objectType, ai.Spec.Account.OriginClusterId, ai.Spec.Account.Name), + Relation: creatorRelation, + Object: fmt.Sprintf("%s:%s/%s", objectType, ai.Spec.Account.OriginClusterId, ai.Spec.Account.Name), + }, + }, nil +} + +// formatUser formats a user to be stored in an FGA tuple, i.e. replaces colons +// with dots. +func formatUser(user string) string { + return strings.ReplaceAll(user, ":", ".") +}