From b086db2ed03ff81a4ab6e0ae3548638f912eb0d1 Mon Sep 17 00:00:00 2001 From: Ying Zhang Date: Fri, 13 Mar 2026 15:24:05 +0800 Subject: [PATCH] HYPERFLEET-599 | test: add the tier1 case automation and test logic --- .gitignore | 1 + Makefile | 4 +- e2e/adapter/adapter_failover.go | 189 ++++++ e2e/adapter/adapter_with_maestro.go | 2 +- e2e/cluster/creation.go | 2 +- e2e/nodepool/creation.go | 4 +- go.mod | 17 + go.sum | 106 ++++ pkg/config/config.go | 542 ++++++++++-------- pkg/helper/adapter.go | 452 +++++++++++++++ pkg/helper/constants.go | 13 + pkg/helper/git.go | 155 +++++ pkg/helper/helper.go | 7 + .../{adapter.md => adapter-failover.md} | 50 +- .../adapter-with-maestro-transport.md | 4 +- .../cl-invalid-resource/adapter-config.yaml | 23 + .../adapter-task-config.yaml | 149 +++++ .../cl-invalid-resource/values.yaml | 32 ++ 18 files changed, 1464 insertions(+), 288 deletions(-) create mode 100644 e2e/adapter/adapter_failover.go create mode 100644 pkg/helper/adapter.go create mode 100644 pkg/helper/constants.go create mode 100644 pkg/helper/git.go rename test-design/testcases/{adapter.md => adapter-failover.md} (59%) create mode 100644 testdata/adapter-configs/cl-invalid-resource/adapter-config.yaml create mode 100644 testdata/adapter-configs/cl-invalid-resource/adapter-task-config.yaml create mode 100644 testdata/adapter-configs/cl-invalid-resource/values.yaml diff --git a/.gitignore b/.gitignore index 949e4ab..c07f462 100644 --- a/.gitignore +++ b/.gitignore @@ -40,6 +40,7 @@ vendor/ *.bak *.log .deploy-work/ +.test-work/ # Environment files .env diff --git a/Makefile b/Makefile index 0ca249e..7f4d8b6 100644 --- a/Makefile +++ b/Makefile @@ -95,12 +95,12 @@ test-coverage: test ## Run tests and generate HTML coverage report .PHONY: e2e e2e: build ## Run all E2E tests - ./$(BINARY_NAME) test + TESTDATA_DIR=$(PWD)/testdata ./$(BINARY_NAME) test .PHONY: e2e-ci e2e-ci: build ## Run E2E tests with CI configuration mkdir -p $(OUTPUT_DIR) - ./$(BINARY_NAME) test --configs ci --junit-report $(OUTPUT_DIR)/junit.xml + TESTDATA_DIR=$(PWD)/testdata ./$(BINARY_NAME) test --configs ci --junit-report $(OUTPUT_DIR)/junit.xml ##@ Code Quality diff --git a/e2e/adapter/adapter_failover.go b/e2e/adapter/adapter_failover.go new file mode 100644 index 0000000..02255b7 --- /dev/null +++ b/e2e/adapter/adapter_failover.go @@ -0,0 +1,189 @@ +package adapter + +import ( + "context" + "os" + + "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" //nolint:staticcheck // dot import for test readability + + "github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/api/openapi" + "github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/client" + "github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/helper" + "github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/labels" +) + +var _ = ginkgo.Describe("[Suite: adapter-failures][negative] Adapter framework can detect and report failures to cluster API endpoints", + ginkgo.Label(labels.Tier1), + func() { + var ( + h *helper.Helper + chartPath string + baseDeployOpts helper.AdapterDeploymentOptions + ) + + ginkgo.BeforeEach(func(ctx context.Context) { + h = helper.New() + + // Clone adapter Helm chart repository (shared across all tests) + ginkgo.By("Clone adapter Helm chart repository") + var cleanupChart func() error + var err error + chartPath, cleanupChart, err = h.CloneHelmChart(ctx, helper.HelmChartCloneOptions{ + Component: "adapter", + RepoURL: h.Cfg.AdapterDeployment.ChartRepo, + Ref: h.Cfg.AdapterDeployment.ChartRef, + ChartPath: h.Cfg.AdapterDeployment.ChartPath, + WorkDir: helper.TestWorkDir, + }) + Expect(err).NotTo(HaveOccurred(), "failed to clone adapter Helm chart") + ginkgo.GinkgoWriter.Printf("Cloned adapter chart to: %s\n", chartPath) + + // Ensure chart cleanup after test + ginkgo.DeferCleanup(func(ctx context.Context) { + ginkgo.By("Cleanup cloned Helm chart") + if err := cleanupChart(); err != nil { + ginkgo.GinkgoWriter.Printf("Warning: failed to cleanup chart: %v\n", err) + } + }) + + // Set up base deployment options with common fields + baseDeployOpts = helper.AdapterDeploymentOptions{ + Namespace: h.Cfg.Namespace, + ChartPath: chartPath, + } + }) + + ginkgo.It("should detect invalid K8s resource and report failure with clear error message", + func(ctx context.Context) { + // Test-specific adapter configuration + adapterName := "cl-invalid-resource" + + // Set environment variable for envsubst expansion in values.yaml + err := os.Setenv("ADAPTER_NAME", adapterName) + Expect(err).NotTo(HaveOccurred(), "failed to set ADAPTER_NAME environment variable") + ginkgo.DeferCleanup(func() { + _ = os.Unsetenv("ADAPTER_NAME") + }) + + // Generate unique release name for this deployment + releaseName := helper.GenerateAdapterReleaseName(helper.ResourceTypeClusters, adapterName) + + // Deploy the test adapter with invalid K8s resource configuration + ginkgo.By("Deploy test adapter with invalid K8s resource configuration") + + // Create deployment options from base and add test-specific fields + deployOpts := baseDeployOpts + deployOpts.ReleaseName = releaseName + deployOpts.AdapterName = adapterName + + err = h.DeployAdapter(ctx, deployOpts) + // Ensure adapter cleanup happens after this test + ginkgo.DeferCleanup(func(ctx context.Context) { + ginkgo.By("Uninstall test adapter") + if err := h.UninstallAdapter(ctx, releaseName, h.Cfg.Namespace); err != nil { + ginkgo.GinkgoWriter.Printf("Warning: failed to uninstall adapter %s: %v\n", releaseName, err) + } else { + ginkgo.GinkgoWriter.Printf("Successfully uninstalled adapter: %s\n", releaseName) + } + }) + Expect(err).NotTo(HaveOccurred(), "failed to deploy test adapter") + ginkgo.GinkgoWriter.Printf("Successfully deployed adapter: %s (release: %s)\n", adapterName, releaseName) + + // Create cluster after adapter is deployed + ginkgo.By("Create test cluster") + cluster, err := h.Client.CreateClusterFromPayload(ctx, h.TestDataPath("payloads/clusters/cluster-request.json")) + Expect(err).NotTo(HaveOccurred(), "failed to create cluster") + Expect(cluster.Id).NotTo(BeNil(), "cluster ID should be generated") + Expect(cluster.Name).NotTo(BeEmpty(), "cluster name should be present") + clusterID := *cluster.Id + clusterName := cluster.Name + ginkgo.GinkgoWriter.Printf("Created cluster ID: %s, Name: %s\n", clusterID, clusterName) + + // Ensure cluster cleanup happens after this test + ginkgo.DeferCleanup(func(ctx context.Context) { + ginkgo.By("Cleanup test cluster " + clusterID) + if err := h.CleanupTestCluster(ctx, clusterID); err != nil { + ginkgo.GinkgoWriter.Printf("Warning: failed to cleanup cluster %s: %v\n", clusterID, err) + } + }) + + ginkgo.By("Verify initial status of cluster") + // Verify initial conditions are False + // Use Eventually to handle async condition propagation + Eventually(func(g Gomega) { + cluster, err = h.Client.GetCluster(ctx, clusterID) + g.Expect(err).NotTo(HaveOccurred(), "failed to get cluster") + g.Expect(cluster.Status).NotTo(BeNil(), "cluster status should be present") + + hasReadyFalse := h.HasResourceCondition(cluster.Status.Conditions, + client.ConditionTypeReady, openapi.ResourceConditionStatusFalse) + g.Expect(hasReadyFalse).To(BeTrue(), + "initial cluster conditions should have Ready=False") + + hasAvailableFalse := h.HasResourceCondition(cluster.Status.Conditions, + client.ConditionTypeAvailable, openapi.ResourceConditionStatusFalse) + g.Expect(hasAvailableFalse).To(BeTrue(), + "initial cluster conditions should have Available=False") + }, h.Cfg.Timeouts.Adapter.Processing, h.Cfg.Polling.Interval).Should(Succeed()) + + ginkgo.By("Verify adapter execution detects failure and reports error") + // Wait for adapter to process the cluster and report failure status + Eventually(func(g Gomega) { + statuses, err := h.Client.GetClusterStatuses(ctx, clusterID) + g.Expect(err).NotTo(HaveOccurred(), "failed to get cluster statuses") + g.Expect(statuses.Items).NotTo(BeEmpty(), "adapter should have reported status") + + // Find the test adapter status + var adapterStatus *openapi.AdapterStatus + for i, adapter := range statuses.Items { + if adapter.Adapter == adapterName { + adapterStatus = &statuses.Items[i] + break + } + } + + g.Expect(adapterStatus).NotTo(BeNil(), + "adapter %s should be present in adapter statuses", adapterName) + + // Validate adapter metadata + g.Expect(adapterStatus.CreatedTime).NotTo(BeZero(), + "adapter should have valid created_time") + g.Expect(adapterStatus.LastReportTime).NotTo(BeZero(), + "adapter should have valid last_report_time") + g.Expect(adapterStatus.ObservedGeneration).To(Equal(int32(1)), + "adapter should have observed_generation=1") + + // Find Available condition + var availableCondition *openapi.AdapterCondition + for i, condition := range adapterStatus.Conditions { + if condition.Type == client.ConditionTypeAvailable { + availableCondition = &adapterStatus.Conditions[i] + break + } + } + + g.Expect(availableCondition).NotTo(BeNil(), + "adapter should have Available condition") + + // Verify Available condition reports failure + g.Expect(availableCondition.Status).To(Equal(openapi.AdapterConditionStatusFalse), + "adapter Available condition should be False due to invalid K8s resource") + + // Verify error details are present in reason and message + g.Expect(availableCondition.Reason).NotTo(BeNil(), + "adapter Available condition should have reason") + g.Expect(*availableCondition.Reason).NotTo(BeEmpty(), + "adapter Available condition reason should not be empty") + + g.Expect(availableCondition.Message).NotTo(BeNil(), + "adapter Available condition should have message") + g.Expect(*availableCondition.Message).NotTo(BeEmpty(), + "adapter Available condition message should not be empty") + + }, h.Cfg.Timeouts.Adapter.Processing, h.Cfg.Polling.Interval).Should(Succeed()) + + ginkgo.GinkgoWriter.Printf("Successfully validated adapter failure detection and reporting\n") + }) + }, +) diff --git a/e2e/adapter/adapter_with_maestro.go b/e2e/adapter/adapter_with_maestro.go index 0e5e8c9..5bb3102 100644 --- a/e2e/adapter/adapter_with_maestro.go +++ b/e2e/adapter/adapter_with_maestro.go @@ -28,7 +28,7 @@ var _ = ginkgo.Describe("[Suite: adapter][maestro-transport] Adapter Framework - ginkgo.BeforeEach(func(ctx context.Context) { h = helper.New() // Create cluster for all tests in this suite - cluster, err := h.Client.CreateClusterFromPayload(ctx, "testdata/payloads/clusters/cluster-request.json") + cluster, err := h.Client.CreateClusterFromPayload(ctx, h.TestDataPath("payloads/clusters/cluster-request.json")) Expect(err).NotTo(HaveOccurred(), "failed to create cluster") Expect(cluster.Id).NotTo(BeNil(), "cluster ID should be generated") Expect(cluster.Name).NotTo(BeEmpty(), "cluster name should be present") diff --git a/e2e/cluster/creation.go b/e2e/cluster/creation.go index 684add5..4156b81 100644 --- a/e2e/cluster/creation.go +++ b/e2e/cluster/creation.go @@ -25,7 +25,7 @@ var _ = ginkgo.Describe("[Suite: cluster][baseline] Cluster Resource Type Lifecy h = helper.New() // Create cluster for all tests in this suite - cluster, err := h.Client.CreateClusterFromPayload(ctx, "testdata/payloads/clusters/cluster-request.json") + cluster, err := h.Client.CreateClusterFromPayload(ctx, h.TestDataPath("payloads/clusters/cluster-request.json")) Expect(err).NotTo(HaveOccurred(), "failed to create cluster") Expect(cluster.Id).NotTo(BeNil(), "cluster ID should be generated") Expect(cluster.Name).NotTo(BeEmpty(), "cluster name should be present") diff --git a/e2e/nodepool/creation.go b/e2e/nodepool/creation.go index ce9d049..9d8ea82 100644 --- a/e2e/nodepool/creation.go +++ b/e2e/nodepool/creation.go @@ -27,12 +27,12 @@ var _ = ginkgo.Describe("[Suite: nodepool][baseline] NodePool Resource Type Life // Get or create cluster for nodepool tests var err error - clusterID, err = h.GetTestCluster(ctx, "testdata/payloads/clusters/cluster-request.json") + clusterID, err = h.GetTestCluster(ctx, h.TestDataPath("payloads/clusters/cluster-request.json")) Expect(err).NotTo(HaveOccurred(), "failed to get test cluster") ginkgo.GinkgoWriter.Printf("Using cluster ID: %s\n", clusterID) // Create nodepool for all tests in this suite - nodepool, err := h.Client.CreateNodePoolFromPayload(ctx, clusterID, "testdata/payloads/nodepools/nodepool-request.json") + nodepool, err := h.Client.CreateNodePoolFromPayload(ctx, clusterID, h.TestDataPath("payloads/nodepools/nodepool-request.json")) Expect(err).NotTo(HaveOccurred(), "failed to create nodepool") Expect(nodepool.Id).NotTo(BeNil(), "nodepool ID should be generated") Expect(nodepool.Name).NotTo(BeEmpty(), "nodepool name should be present") diff --git a/go.mod b/go.mod index 1e94624..7df1141 100644 --- a/go.mod +++ b/go.mod @@ -15,24 +15,35 @@ require ( ) require ( + dario.cat/mergo v1.0.0 // indirect github.com/Masterminds/semver/v3 v3.4.0 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/ProtonMail/go-crypto v1.0.0 // indirect github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect + github.com/cloudflare/circl v1.3.7 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/emirpasic/gods v1.18.1 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect + github.com/go-git/go-billy/v5 v5.5.0 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/gnostic-models v0.7.0 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/kevinburke/ssh_config v1.2.0 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect @@ -40,18 +51,23 @@ require ( github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect + github.com/skeema/knownhosts v1.2.2 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.9 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/x448/float16 v0.8.4 // indirect + github.com/xanzy/ssh-agent v0.3.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.9.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/crypto v0.44.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/mod v0.29.0 // indirect golang.org/x/net v0.47.0 // indirect @@ -66,6 +82,7 @@ require ( gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect diff --git a/go.sum b/go.sum index 700983f..47fa599 100644 --- a/go.sum +++ b/go.sum @@ -1,17 +1,38 @@ +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= +github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= @@ -24,6 +45,16 @@ github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZ github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk= github.com/gkampitakis/go-snaps v0.5.15 h1:amyJrvM1D33cPHwVrjo9jQxX8g/7E2wYdZ+01KS3zGE= github.com/gkampitakis/go-snaps v0.5.15/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc= +github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE= +github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= +github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= +github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= @@ -38,6 +69,8 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= @@ -51,6 +84,8 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE= @@ -58,6 +93,9 @@ github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -91,6 +129,10 @@ github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -101,6 +143,11 @@ github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6ke github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= +github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= @@ -120,7 +167,9 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= @@ -139,6 +188,9 @@ github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= @@ -147,29 +199,79 @@ go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU= +golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= @@ -178,6 +280,10 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/config/config.go b/pkg/config/config.go index 74e6e9d..3e10bc8 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -1,353 +1,407 @@ package config import ( - "fmt" - "log/slog" - "net/url" - "reflect" - "time" - - "github.com/spf13/viper" + "fmt" + "log/slog" + "net/url" + "os" + "reflect" + "time" + + "github.com/spf13/viper" ) const ( - // TagMapstructure is the struct tag used by Viper for configuration mapping - TagMapstructure = "mapstructure" + // TagMapstructure is the struct tag used by Viper for configuration mapping + TagMapstructure = "mapstructure" - // EnvPrefix is the prefix for all environment variables (without trailing underscore) - // Viper automatically adds underscore when using SetEnvPrefix() - EnvPrefix = "HYPERFLEET" + // EnvPrefix is the prefix for all environment variables (without trailing underscore) + // Viper automatically adds underscore when using SetEnvPrefix() + EnvPrefix = "HYPERFLEET" - // RedactedPlaceholder is used to mask sensitive information in logs - RedactedPlaceholder = "**REDACTED**" + // RedactedPlaceholder is used to mask sensitive information in logs + RedactedPlaceholder = "**REDACTED**" - // NotSetPlaceholder indicates a configuration value has not been set - NotSetPlaceholder = "" + // NotSetPlaceholder indicates a configuration value has not been set + NotSetPlaceholder = "" ) // EnvVar constructs an environment variable name with the HYPERFLEET prefix // Example: EnvVar("LOG_LEVEL") returns "HYPERFLEET_LOG_LEVEL" func EnvVar(name string) string { - return EnvPrefix + "_" + name + return EnvPrefix + "_" + name } // API config keys var API = struct { - // URL is the HyperFleet API base URL - // Env: HYPERFLEET_API_URL - URL string + // URL is the HyperFleet API base URL + // Env: HYPERFLEET_API_URL + URL string }{ - URL: "api.url", + URL: "api.url", } // Tests config keys for Ginkgo test execution var Tests = struct { - // GinkgoLabelFilter is the label filter for Ginkgo tests - // Env: GINKGO_LABEL_FILTER - GinkgoLabelFilter string + // GinkgoLabelFilter is the label filter for Ginkgo tests + // Env: GINKGO_LABEL_FILTER + GinkgoLabelFilter string - // GinkgoFocus is a regex to focus on specific tests - // Env: GINKGO_FOCUS - GinkgoFocus string + // GinkgoFocus is a regex to focus on specific tests + // Env: GINKGO_FOCUS + GinkgoFocus string - // GinkgoSkip is a regex to skip specific tests - // Env: GINKGO_SKIP - GinkgoSkip string + // GinkgoSkip is a regex to skip specific tests + // Env: GINKGO_SKIP + GinkgoSkip string - // SuiteTimeout is the timeout for the entire test suite (Go duration format: "2h", "90m", etc.) - // Env: SUITE_TIMEOUT - SuiteTimeout string + // SuiteTimeout is the timeout for the entire test suite (Go duration format: "2h", "90m", etc.) + // Env: SUITE_TIMEOUT + SuiteTimeout string - // JUnitReportPath is the path to write JUnit XML report - // Env: JUNIT_REPORT_PATH - JUnitReportPath string + // JUnitReportPath is the path to write JUnit XML report + // Env: JUNIT_REPORT_PATH + JUnitReportPath string }{ - GinkgoLabelFilter: "tests.ginkgoLabelFilter", - GinkgoFocus: "tests.focus", - GinkgoSkip: "tests.ginkgoSkip", - SuiteTimeout: "tests.suiteTimeout", - JUnitReportPath: "tests.junitReportPath", + GinkgoLabelFilter: "tests.ginkgoLabelFilter", + GinkgoFocus: "tests.focus", + GinkgoSkip: "tests.ginkgoSkip", + SuiteTimeout: "tests.suiteTimeout", + JUnitReportPath: "tests.junitReportPath", } // Log config keys var Log = struct { - // Level is the minimum log level - // Env: HYPERFLEET_LOG_LEVEL - Level string + // Level is the minimum log level + // Env: HYPERFLEET_LOG_LEVEL + Level string - // Format is the log output format - // Env: HYPERFLEET_LOG_FORMAT - Format string + // Format is the log output format + // Env: HYPERFLEET_LOG_FORMAT + Format string - // Output is the log destination - // Env: HYPERFLEET_LOG_OUTPUT - Output string + // Output is the log destination + // Env: HYPERFLEET_LOG_OUTPUT + Output string }{ - Level: "log.level", - Format: "log.format", - Output: "log.output", + Level: "log.level", + Format: "log.format", + Output: "log.output", } // AdaptersConfig contains required adapters for each resource type type AdaptersConfig struct { - Cluster []string `yaml:"cluster" mapstructure:"cluster"` // Required adapters for cluster resources - NodePool []string `yaml:"nodepool" mapstructure:"nodepool"` // Required adapters for nodepool resources + Cluster []string `yaml:"cluster" mapstructure:"cluster"` // Required adapters for cluster resources + NodePool []string `yaml:"nodepool" mapstructure:"nodepool"` // Required adapters for nodepool resources +} + +// AdapterDeploymentConfig contains configuration for deploying adapters via Helm in tests. +type AdapterDeploymentConfig struct { + ChartRepo string `yaml:"chartRepo" mapstructure:"chartRepo"` + ChartRef string `yaml:"chartRef" mapstructure:"chartRef"` + ChartPath string `yaml:"chartPath" mapstructure:"chartPath"` } // Config represents the e2e test configuration type Config struct { - API APIConfig `yaml:"api" mapstructure:"api"` - Timeouts TimeoutsConfig `yaml:"timeouts" mapstructure:"timeouts"` - Polling PollingConfig `yaml:"polling" mapstructure:"polling"` - Log LogConfig `yaml:"log" mapstructure:"log"` - Adapters AdaptersConfig `yaml:"adapters" mapstructure:"adapters"` + Namespace string `yaml:"namespace" mapstructure:"namespace"` + GCPProjectID string `yaml:"gcpProjectId" mapstructure:"gcpProjectId"` + OutputDir string `yaml:"outputDir" mapstructure:"outputDir"` + TestDataDir string `yaml:"testDataDir" mapstructure:"testDataDir"` + API APIConfig `yaml:"api" mapstructure:"api"` + Timeouts TimeoutsConfig `yaml:"timeouts" mapstructure:"timeouts"` + Polling PollingConfig `yaml:"polling" mapstructure:"polling"` + Log LogConfig `yaml:"log" mapstructure:"log"` + Adapters AdaptersConfig `yaml:"adapters" mapstructure:"adapters"` + AdapterDeployment AdapterDeploymentConfig `yaml:"adapterDeployment" mapstructure:"adapterDeployment"` } // APIConfig contains API-related configuration type APIConfig struct { - URL string `yaml:"url" mapstructure:"url"` + URL string `yaml:"url" mapstructure:"url"` } // TimeoutsConfig contains timeout configurations type TimeoutsConfig struct { - Cluster ClusterTimeouts `yaml:"cluster" mapstructure:"cluster"` - NodePool NodePoolTimeouts `yaml:"nodepool" mapstructure:"nodepool"` - Adapter AdapterTimeouts `yaml:"adapter" mapstructure:"adapter"` + Cluster ClusterTimeouts `yaml:"cluster" mapstructure:"cluster"` + NodePool NodePoolTimeouts `yaml:"nodepool" mapstructure:"nodepool"` + Adapter AdapterTimeouts `yaml:"adapter" mapstructure:"adapter"` } // ClusterTimeouts contains cluster-related timeouts type ClusterTimeouts struct { - Ready time.Duration `yaml:"ready" mapstructure:"ready"` + Ready time.Duration `yaml:"ready" mapstructure:"ready"` } // NodePoolTimeouts contains nodepool-related timeouts type NodePoolTimeouts struct { - Ready time.Duration `yaml:"ready" mapstructure:"ready"` + Ready time.Duration `yaml:"ready" mapstructure:"ready"` } // AdapterTimeouts contains adapter-related timeouts type AdapterTimeouts struct { - Processing time.Duration `yaml:"processing" mapstructure:"processing"` + Processing time.Duration `yaml:"processing" mapstructure:"processing"` } // PollingConfig contains polling configuration type PollingConfig struct { - Interval time.Duration `yaml:"interval" mapstructure:"interval"` + Interval time.Duration `yaml:"interval" mapstructure:"interval"` } // LogConfig contains logging configuration type LogConfig struct { - Level string `yaml:"level" mapstructure:"level"` // debug, info, warn, error - Format string `yaml:"format" mapstructure:"format"` // text, json - Output string `yaml:"output" mapstructure:"output"` // stdout, stderr + Level string `yaml:"level" mapstructure:"level"` // debug, info, warn, error + Format string `yaml:"format" mapstructure:"format"` // text, json + Output string `yaml:"output" mapstructure:"output"` // stdout, stderr } // Load loads configuration from viper with improved validation func Load() (*Config, error) { - cfg := &Config{} + cfg := &Config{} - // Use Unmarshal (not UnmarshalExact) to allow runtime test parameters (tests.*) - // to coexist with persistent configuration. Test parameters (label-filter, focus, skip) - // are set via flags/env vars and should not appear in config files. - if err := viper.Unmarshal(cfg); err != nil { - return nil, fmt.Errorf("configuration error: %w\nPlease check your config file", err) - } + // Use Unmarshal (not UnmarshalExact) to allow runtime test parameters (tests.*) + // to coexist with persistent configuration. Test parameters (label-filter, focus, skip) + // are set via flags/env vars and should not appear in config files. + if err := viper.Unmarshal(cfg); err != nil { + return nil, fmt.Errorf("configuration error: %w\nPlease check your config file", err) + } - // WORKAROUND: viper.Unmarshal doesn't always respect env var bindings for nested structs - // Use reflection to automatically apply all values from viper to the config struct - applyViperValues(reflect.ValueOf(cfg).Elem(), "") + // WORKAROUND: viper.Unmarshal doesn't always respect env var bindings for nested structs + // Use reflection to automatically apply all values from viper to the config struct + applyViperValues(reflect.ValueOf(cfg).Elem(), "") - // Apply defaults - cfg.applyDefaults() + // Apply defaults + cfg.applyDefaults() - // Validate with detailed errors - if err := cfg.Validate(); err != nil { - return nil, err - } + // Validate with detailed errors + if err := cfg.Validate(); err != nil { + return nil, err + } - // Note: Display() is called after logger initialization in e2e.RunTests() - // to ensure structured logging is properly configured + // Note: Display() is called after logger initialization in e2e.RunTests() + // to ensure structured logging is properly configured - return cfg, nil + return cfg, nil } // applyViperValues recursively applies values from viper to the config struct using reflection // This ensures environment variables and flags properly override config file values func applyViperValues(v reflect.Value, prefix string) { - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - fieldType := t.Field(i) - - tag := fieldType.Tag.Get(TagMapstructure) - if tag == "" { - continue - } - - var configPath string - if prefix == "" { - configPath = tag - } else { - configPath = prefix + "." + tag - } - - if field.Kind() == reflect.Struct && field.Type() != reflect.TypeOf(time.Duration(0)) { - applyViperValues(field, configPath) - continue - } - - if !field.CanSet() { - continue - } - - // Apply value from viper based on field type - switch field.Kind() { - case reflect.String: - if viperVal := viper.GetString(configPath); viperVal != "" { - field.SetString(viperVal) - } - case reflect.Bool: - // For bool, only apply if the key is explicitly set in viper - // This preserves the priority order: flags > env > config > defaults - if viper.IsSet(configPath) { - field.SetBool(viper.GetBool(configPath)) - } - case reflect.Slice: - // Handle string slices - if field.Type().Elem().Kind() == reflect.String { - if viper.IsSet(configPath) { - viperVal := viper.GetStringSlice(configPath) - if len(viperVal) > 0 { - field.Set(reflect.ValueOf(viperVal)) - } - } - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - // Special handling for time.Duration (which is int64) - if field.Type() == reflect.TypeOf(time.Duration(0)) { - if viperVal := viper.GetDuration(configPath); viperVal != 0 { - field.SetInt(int64(viperVal)) - } - } else { - if viperVal := viper.GetInt64(configPath); viperVal != 0 { - field.SetInt(viperVal) - } - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - if viperVal := viper.GetUint64(configPath); viperVal != 0 { - field.SetUint(viperVal) - } - case reflect.Float32, reflect.Float64: - if viperVal := viper.GetFloat64(configPath); viperVal != 0 { - field.SetFloat(viperVal) - } - } - } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + fieldType := t.Field(i) + + tag := fieldType.Tag.Get(TagMapstructure) + if tag == "" { + continue + } + + var configPath string + if prefix == "" { + configPath = tag + } else { + configPath = prefix + "." + tag + } + + if field.Kind() == reflect.Struct && field.Type() != reflect.TypeOf(time.Duration(0)) { + applyViperValues(field, configPath) + continue + } + + if !field.CanSet() { + continue + } + + // Apply value from viper based on field type + switch field.Kind() { + case reflect.String: + if viperVal := viper.GetString(configPath); viperVal != "" { + field.SetString(viperVal) + } + case reflect.Bool: + // For bool, only apply if the key is explicitly set in viper + // This preserves the priority order: flags > env > config > defaults + if viper.IsSet(configPath) { + field.SetBool(viper.GetBool(configPath)) + } + case reflect.Slice: + // Handle string slices + if field.Type().Elem().Kind() == reflect.String { + if viper.IsSet(configPath) { + viperVal := viper.GetStringSlice(configPath) + if len(viperVal) > 0 { + field.Set(reflect.ValueOf(viperVal)) + } + } + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // Special handling for time.Duration (which is int64) + if field.Type() == reflect.TypeOf(time.Duration(0)) { + if viperVal := viper.GetDuration(configPath); viperVal != 0 { + field.SetInt(int64(viperVal)) + } + } else { + if viperVal := viper.GetInt64(configPath); viperVal != 0 { + field.SetInt(viperVal) + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if viperVal := viper.GetUint64(configPath); viperVal != 0 { + field.SetUint(viperVal) + } + case reflect.Float32, reflect.Float64: + if viperVal := viper.GetFloat64(configPath); viperVal != 0 { + field.SetFloat(viperVal) + } + } + } } // applyDefaults applies default values for unset fields func (c *Config) applyDefaults() { - // Apply timeout defaults - if c.Timeouts.Cluster.Ready == 0 { - c.Timeouts.Cluster.Ready = DefaultClusterReadyTimeout - } - if c.Timeouts.NodePool.Ready == 0 { - c.Timeouts.NodePool.Ready = DefaultNodePoolReadyTimeout - } - if c.Timeouts.Adapter.Processing == 0 { - c.Timeouts.Adapter.Processing = DefaultAdapterProcessingTimeout - } - if c.Polling.Interval == 0 { - c.Polling.Interval = DefaultPollInterval - } - - // Apply log defaults - if c.Log.Level == "" { - c.Log.Level = DefaultLogLevel - } - if c.Log.Format == "" { - c.Log.Format = DefaultLogFormat - } - if c.Log.Output == "" { - c.Log.Output = DefaultLogOutput - } - - // Apply adapter defaults - if c.Adapters.Cluster == nil { - c.Adapters.Cluster = DefaultClusterAdapters - } - if c.Adapters.NodePool == nil { - c.Adapters.NodePool = DefaultNodePoolAdapters - } + // Apply timeout defaults + if c.Timeouts.Cluster.Ready == 0 { + c.Timeouts.Cluster.Ready = DefaultClusterReadyTimeout + } + if c.Timeouts.NodePool.Ready == 0 { + c.Timeouts.NodePool.Ready = DefaultNodePoolReadyTimeout + } + if c.Timeouts.Adapter.Processing == 0 { + c.Timeouts.Adapter.Processing = DefaultAdapterProcessingTimeout + } + if c.Polling.Interval == 0 { + c.Polling.Interval = DefaultPollInterval + } + + // Apply log defaults + if c.Log.Level == "" { + c.Log.Level = DefaultLogLevel + } + if c.Log.Format == "" { + c.Log.Format = DefaultLogFormat + } + if c.Log.Output == "" { + c.Log.Output = DefaultLogOutput + } + + // Apply general configuration defaults from environment variables or config file + // Priority: config file values > environment variables > empty + // If config file value is empty, fall back to environment variable + + // Namespace: from config file or NAMESPACE env var + if c.Namespace == "" { + c.Namespace = os.Getenv("NAMESPACE") + } + + // GCPProjectID: from config file or GCP_PROJECT_ID env var + if c.GCPProjectID == "" { + c.GCPProjectID = os.Getenv("GCP_PROJECT_ID") + } + + // OutputDir: from config file, OUTPUT_DIR env var, or default to "output" + if c.OutputDir == "" { + if envVal := os.Getenv("OUTPUT_DIR"); envVal != "" { + c.OutputDir = envVal + } else { + c.OutputDir = "output" + } + } + + // TestDataDir: from config file, TESTDATA_DIR env var, or default to "testdata" + if c.TestDataDir == "" { + if envVal := os.Getenv("TESTDATA_DIR"); envVal != "" { + c.TestDataDir = envVal + } else { + c.TestDataDir = "testdata" + } + } + + // Apply adapter deployment values from environment variables or config file + + // ChartRepo: from ADAPTER_CHART_REPO env var or config file + if c.AdapterDeployment.ChartRepo == "" { + c.AdapterDeployment.ChartRepo = os.Getenv("ADAPTER_CHART_REPO") + } + + // ChartRef: from ADAPTER_CHART_REF env var or config file + if c.AdapterDeployment.ChartRef == "" { + c.AdapterDeployment.ChartRef = os.Getenv("ADAPTER_CHART_REF") + } + + // ChartPath: from ADAPTER_CHART_PATH env var or config file + if c.AdapterDeployment.ChartPath == "" { + c.AdapterDeployment.ChartPath = os.Getenv("ADAPTER_CHART_PATH") + } } // Validate validates configuration with detailed error messages func (c *Config) Validate() error { - // Validate API URL requirement - if c.API.URL == "" { - return fmt.Errorf(`configuration validation failed: + // Validate API URL requirement + if c.API.URL == "" { + return fmt.Errorf(`configuration validation failed: - Field 'Config.API.URL' is required Please provide API URL (in order of priority): • Flag: --api-url • Environment variable: HYPERFLEET_API_URL • Config file: api.url: `) - } + } - return nil + return nil } // Display logs the merged configuration using structured logging func (c *Config) Display() { - slog.Info("Loaded configuration", - "api_url", redactURL(c.API.URL), - "timeout_cluster_ready", c.Timeouts.Cluster.Ready, - "timeout_nodepool_ready", c.Timeouts.NodePool.Ready, - "timeout_adapter_processing", c.Timeouts.Adapter.Processing, - "polling_interval", c.Polling.Interval, - "log_level", c.Log.Level, - "log_format", c.Log.Format, - "log_output", c.Log.Output, - "adapters_cluster", c.Adapters.Cluster, - "adapters_nodepool", c.Adapters.NodePool, - ) + slog.Info("Loaded configuration", + "api_url", redactURL(c.API.URL), + "timeout_cluster_ready", c.Timeouts.Cluster.Ready, + "timeout_nodepool_ready", c.Timeouts.NodePool.Ready, + "timeout_adapter_processing", c.Timeouts.Adapter.Processing, + "polling_interval", c.Polling.Interval, + "log_level", c.Log.Level, + "log_format", c.Log.Format, + "log_output", c.Log.Output, + "adapters_cluster", c.Adapters.Cluster, + "adapters_nodepool", c.Adapters.NodePool, + ) } // redactURL redacts credentials from URLs func redactURL(rawURL string) string { - if rawURL == "" { - return NotSetPlaceholder - } - - // Parse the URL to safely handle credentials - u, err := url.Parse(rawURL) - if err != nil { - // If parsing fails, redact entirely for safety - return RedactedPlaceholder - } - - // If URL contains user credentials, redact them - if u.User != nil { - // Clear the User field and manually build the redacted URL - u.User = nil - redactedURL := u.String() - - // Insert RedactedPlaceholder after the scheme:// - if u.Scheme != "" { - redactedURL = u.Scheme + "://" + RedactedPlaceholder + "@" + u.Host - if u.Path != "" { - redactedURL += u.Path - } - if u.RawQuery != "" { - redactedURL += "?" + u.RawQuery - } - if u.Fragment != "" { - redactedURL += "#" + u.Fragment - } - } - return redactedURL - } - - // Return the URL as-is if no credentials present - return u.String() + if rawURL == "" { + return NotSetPlaceholder + } + + // Parse the URL to safely handle credentials + u, err := url.Parse(rawURL) + if err != nil { + // If parsing fails, redact entirely for safety + return RedactedPlaceholder + } + + // If URL contains user credentials, redact them + if u.User != nil { + // Clear the User field and manually build the redacted URL + u.User = nil + redactedURL := u.String() + + // Insert RedactedPlaceholder after the scheme:// + if u.Scheme != "" { + redactedURL = u.Scheme + "://" + RedactedPlaceholder + "@" + u.Host + if u.Path != "" { + redactedURL += u.Path + } + if u.RawQuery != "" { + redactedURL += "?" + u.RawQuery + } + if u.Fragment != "" { + redactedURL += "#" + u.Fragment + } + } + return redactedURL + } + + // Return the URL as-is if no credentials present + return u.String() } diff --git a/pkg/helper/adapter.go b/pkg/helper/adapter.go new file mode 100644 index 0000000..6f805d7 --- /dev/null +++ b/pkg/helper/adapter.go @@ -0,0 +1,452 @@ +package helper + +import ( + "bytes" + "context" + "crypto/rand" + "fmt" + "math/big" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/logger" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// generateRandomString generates a random alphanumeric string of the specified length +func generateRandomString(length int) string { + const charset = "abcdefghijklmnopqrstuvwxyz0123456789" + b := make([]byte, length) + for i := range b { + n, err := rand.Int(rand.Reader, big.NewInt(int64(len(charset)))) + if err != nil { + // Fallback to timestamp-based randomness if crypto/rand fails + b[i] = charset[i%len(charset)] + } else { + b[i] = charset[n.Int64()] + } + } + return string(b) +} + +// AdapterDeploymentOptions contains configuration for deploying an adapter via Helm +type AdapterDeploymentOptions struct { + ReleaseName string + Namespace string + ChartPath string + AdapterName string + Timeout time.Duration +} + +// GenerateAdapterReleaseName generates a unique Helm release name for an adapter deployment +// The release name format is: adapter--- +// The random suffix prevents conflicts when multiple tests run concurrently or when cleanup from previous runs is incomplete +// The name is truncated to 48 characters to leave room for Helm's deployment/pod suffixes (Kubernetes has a 63-char limit) +// If truncation is needed, the random suffix is always preserved to maintain uniqueness +func GenerateAdapterReleaseName(resourceType, adapterName string) string { + randomSuffix := generateRandomString(5) + + // Kubernetes resource names have a 63-character limit + // Reserve ~15 characters for Helm's deployment/pod suffixes + maxReleaseNameLength := 48 + + // Build the base name without the suffix first + baseWithoutSuffix := fmt.Sprintf("adapter-%s-%s", resourceType, adapterName) + + // Calculate how much space we have for the base (reserve space for "-" + suffix) + maxBaseLength := maxReleaseNameLength - len(randomSuffix) - 1 + + // Truncate the base if necessary, but always keep the suffix + if len(baseWithoutSuffix) > maxBaseLength { + baseWithoutSuffix = baseWithoutSuffix[:maxBaseLength] + } + + releaseName := fmt.Sprintf("%s-%s", baseWithoutSuffix, randomSuffix) + return releaseName +} + +// DeployAdapter deploys an adapter using Helm upgrade --install +// This is a common function that can be reused across test cases +// The release name must be provided via opts.ReleaseName - use GenerateAdapterReleaseName() to create a unique name +func (h *Helper) DeployAdapter(ctx context.Context, opts AdapterDeploymentOptions) error { + // Validate required fields + if opts.Namespace == "" { + return fmt.Errorf("AdapterDeploymentOptions.Namespace is required") + } + if opts.ChartPath == "" { + return fmt.Errorf("AdapterDeploymentOptions.ChartPath is required") + } + if opts.AdapterName == "" { + return fmt.Errorf("AdapterDeploymentOptions.AdapterName is required") + } + if opts.ReleaseName == "" { + return fmt.Errorf("AdapterDeploymentOptions.ReleaseName is required - use GenerateAdapterReleaseName() to create a unique name") + } + + // Set default timeout if not specified + if opts.Timeout == 0 { + opts.Timeout = 5 * time.Minute + } + + releaseName := opts.ReleaseName + + logger.Info("deploying adapter via Helm", + "adapter_name", opts.AdapterName, + "release_name", releaseName, + "namespace", opts.Namespace) + + // Copy adapter config folder to chart directory + sourceAdapterDir := filepath.Join(h.Cfg.TestDataDir, AdapterConfigsDir, opts.AdapterName) + destAdapterDir := filepath.Join(opts.ChartPath, opts.AdapterName) + + // Remove existing adapter config directory if it exists + if _, err := os.Stat(destAdapterDir); err == nil { + logger.Info("removing existing adapter config directory", "path", destAdapterDir) + if err := os.RemoveAll(destAdapterDir); err != nil { + return fmt.Errorf("failed to remove existing adapter config directory: %w", err) + } + } + + // Copy adapter config directory to chart + logger.Info("copying adapter config", "from", sourceAdapterDir, "to", destAdapterDir) + if err := copyDir(sourceAdapterDir, destAdapterDir); err != nil { + return fmt.Errorf("failed to copy adapter config directory: %w", err) + } + + // Determine the values.yaml file path in the copied adapter directory + valuesFilePath := filepath.Join(destAdapterDir, "values.yaml") + + // Expand environment variables in values.yaml in-place using envsubst + logger.Info("expanding environment variables in values.yaml in-place", "values_file", valuesFilePath) + + // Expand environment variables in values.yaml using envsubst + expandedContent, err := expandEnvVarsInYAMLToBytes(valuesFilePath) + if err != nil { + return fmt.Errorf("failed to expand environment variables in values.yaml: %w", err) + } + + // Overwrite values.yaml with expanded content + if err := os.WriteFile(valuesFilePath, expandedContent, 0600); err != nil { + return fmt.Errorf("failed to overwrite values.yaml with expanded content: %w", err) + } + + logger.Info("successfully expanded environment variables in values.yaml") + + // Build Helm command with single values file + helmArgs := []string{ + "upgrade", "--install", + releaseName, + opts.ChartPath, + "--namespace", opts.Namespace, + "--create-namespace", + "--wait", + "--timeout", opts.Timeout.String(), + "-f", valuesFilePath, + } + + // Add fullnameOverride to ensure consistent release naming + helmArgs = append(helmArgs, + "--set", fmt.Sprintf("fullnameOverride=%s", releaseName), + ) + + logger.Info("executing Helm command", "args", helmArgs) + + // Create context with timeout + cmdCtx, cancel := context.WithTimeout(ctx, opts.Timeout+30*time.Second) + defer cancel() + + // Execute Helm command + cmd := exec.CommandContext(cmdCtx, "helm", helmArgs...) // #nosec G204 -- helmArgs is constructed from trusted config + output, err := cmd.CombinedOutput() + if err != nil { + logger.Error("helm upgrade failed", "error", err, "output", string(output)) + + // Collect diagnostic information when deployment fails + h.saveDiagnosticLogs(ctx, opts.AdapterName, releaseName, opts.Namespace) + + return fmt.Errorf("helm upgrade failed: %w (output: %s)", err, string(output)) + } + + logger.Info("adapter deployed successfully", + "release_name", releaseName, + "output", string(output)) + + return nil +} + +// UninstallAdapter uninstalls an adapter using Helm uninstall +// This is a common function that can be reused across test cases +func (h *Helper) UninstallAdapter(ctx context.Context, releaseName, namespace string) error { + logger.Info("uninstalling adapter via Helm", + "release_name", releaseName, + "namespace", namespace) + + // Create context with timeout + cmdCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + + // Execute Helm uninstall command + cmd := exec.CommandContext(cmdCtx, "helm", "uninstall", releaseName, + "-n", namespace, + "--wait", + "--timeout", "5m") + + output, err := cmd.CombinedOutput() + if err != nil { + // Check if the error is because the release doesn't exist + if strings.Contains(string(output), "not found") { + logger.Info("adapter release not found, skipping uninstall", "release_name", releaseName) + // Clean up orphaned cluster-scoped resources even when release is not found + // This handles cases like interrupted installs or manual deletions + h.cleanupClusterScopedResources(ctx, releaseName) + return nil + } + logger.Error("helm uninstall failed", "error", err, "output", string(output)) + return fmt.Errorf("helm uninstall failed: %w (output: %s)", err, string(output)) + } + + logger.Info("adapter uninstalled successfully", + "release_name", releaseName, + "output", string(output)) + + // Clean up any orphaned cluster-scoped resources (ClusterRoles, ClusterRoleBindings) + // These can be left behind if a previous test run failed or was interrupted + h.cleanupClusterScopedResources(ctx, releaseName) + + return nil +} + +// cleanupClusterScopedResources removes orphaned cluster-scoped resources that may be left +// after Helm uninstall. This is a best-effort cleanup and logs errors without failing. +func (h *Helper) cleanupClusterScopedResources(ctx context.Context, releaseName string) { + cmdCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + // Try to delete ClusterRole + clusterRoleCmd := exec.CommandContext(cmdCtx, "kubectl", "delete", "clusterrole", releaseName, + "--ignore-not-found=true") + if output, err := clusterRoleCmd.CombinedOutput(); err != nil { + logger.Info("could not delete ClusterRole (may not exist)", + "release_name", releaseName, + "output", string(output)) + } else { + logger.Info("cleaned up ClusterRole", "release_name", releaseName) + } + + // Try to delete ClusterRoleBinding + clusterRoleBindingCmd := exec.CommandContext(cmdCtx, "kubectl", "delete", "clusterrolebinding", releaseName, + "--ignore-not-found=true") + if output, err := clusterRoleBindingCmd.CombinedOutput(); err != nil { + logger.Info("could not delete ClusterRoleBinding (may not exist)", + "release_name", releaseName, + "output", string(output)) + } else { + logger.Info("cleaned up ClusterRoleBinding", "release_name", releaseName) + } +} + +// saveDiagnosticLogs saves diagnostic information when adapter deployment fails +// Saves to /-/ directory +// outputDir is configured via OUTPUT_DIR env var or config file (defaults to "output") +func (h *Helper) saveDiagnosticLogs(ctx context.Context, adapterName, releaseName, namespace string) { + // Generate output directory with adapter name and random suffix + randomSuffix := generateRandomString(4) + outputDir := filepath.Join(h.Cfg.OutputDir, fmt.Sprintf("%s-%s", adapterName, randomSuffix)) + + // Create output directory + if err := os.MkdirAll(outputDir, 0750); err != nil { + logger.Error("failed to create diagnostic output directory", + "error", err, + "output_dir", outputDir) + return + } + + logger.Info("saving diagnostic logs", + "adapter_name", adapterName, + "release_name", releaseName, + "namespace", namespace, + "output_dir", outputDir) + + cmdCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + // 1. Get pods using client-go + pods, err := h.K8sClient.CoreV1().Pods(namespace).List(cmdCtx, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("app.kubernetes.io/instance=%s", releaseName), + }) + if err != nil { + logger.Error("failed to list pods", "error", err) + return + } + + if len(pods.Items) == 0 { + logger.Info("no pods found for release", "release_name", releaseName) + return + } + + logger.Info("found pods for release", + "total_pods", len(pods.Items), + "release_name", releaseName) + + // Save logs and description for unhealthy pods only + for _, pod := range pods.Items { + // Check if pod is healthy (Running and all containers ready) + isHealthy := pod.Status.Phase == "Running" + if isHealthy && len(pod.Status.ContainerStatuses) > 0 { + for _, cs := range pod.Status.ContainerStatuses { + if !cs.Ready { + isHealthy = false + break + } + } + } + + // Skip healthy pods + if isHealthy { + logger.Info("skipping healthy pod", "pod", pod.Name) + continue + } + + podName := pod.Name + logger.Info("saving logs for unhealthy pod", + "pod", podName, + "phase", pod.Status.Phase) + + // Save pod logs using kubectl command + podLogFile := filepath.Join(outputDir, fmt.Sprintf("%s.log", podName)) + podLogCmd := exec.CommandContext(cmdCtx, "kubectl", "logs", // #nosec G204 -- podName and namespace are from trusted k8s API + podName, + "-n", namespace, + "--tail=200") + + var logContent string + logContent += fmt.Sprintf("$ %s\n\n", podLogCmd.String()) + logOutput, err := podLogCmd.CombinedOutput() + if err != nil { + logContent += fmt.Sprintf("Error: %v\n", err) + logContent += string(logOutput) + } else { + logContent += string(logOutput) + } + + if err := os.WriteFile(podLogFile, []byte(logContent), 0600); err != nil { + logger.Error("failed to write pod log file", + "pod", podName, + "error", err) + } else { + logger.Info("saved pod logs", + "pod", podName, + "file", podLogFile) + } + + // Save pod description using kubectl describe command + podDescFile := filepath.Join(outputDir, fmt.Sprintf("%s-describe.txt", podName)) + podDescCmd := exec.CommandContext(cmdCtx, "kubectl", "describe", "pod", // #nosec G204 -- podName and namespace are from trusted k8s API + podName, + "-n", namespace) + + var descContent string + descContent += fmt.Sprintf("$ %s\n\n", podDescCmd.String()) + descOutput, err := podDescCmd.CombinedOutput() + if err != nil { + descContent += fmt.Sprintf("Error: %v\n", err) + descContent += string(descOutput) + } else { + descContent += string(descOutput) + } + + if err := os.WriteFile(podDescFile, []byte(descContent), 0600); err != nil { + logger.Error("failed to write pod description file", + "pod", podName, + "error", err) + } else { + logger.Info("saved pod description", + "pod", podName, + "file", podDescFile) + } + } + + logger.Info("diagnostic logs saved successfully", "output_dir", outputDir) +} + +// expandEnvVarsInYAMLToBytes expands environment variables in a YAML file using envsubst +// Returns the expanded content as bytes +func expandEnvVarsInYAMLToBytes(yamlPath string) ([]byte, error) { + // Read the YAML file + content, err := os.ReadFile(yamlPath) // #nosec G304 -- yamlPath is constructed from trusted config + if err != nil { + return nil, fmt.Errorf("failed to read YAML file: %w", err) + } + + // Use envsubst command to expand environment variables + cmd := exec.Command("envsubst") + cmd.Stdin = bytes.NewReader(content) + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("envsubst failed: %w (stderr: %s)", err, stderr.String()) + } + + return stdout.Bytes(), nil +} + +// copyDir recursively copies a directory tree +func copyDir(src, dst string) error { + // Get source directory info + srcInfo, err := os.Stat(src) + if err != nil { + return err + } + + // Create destination directory + if err := os.MkdirAll(dst, srcInfo.Mode()); err != nil { + return err + } + + // Read source directory contents + entries, err := os.ReadDir(src) + if err != nil { + return err + } + + // Copy each entry + for _, entry := range entries { + srcPath := filepath.Join(src, entry.Name()) + dstPath := filepath.Join(dst, entry.Name()) + + if entry.IsDir() { + // Recursively copy subdirectory + if err := copyDir(srcPath, dstPath); err != nil { + return err + } + } else { + // Copy file + if err := copyFile(srcPath, dstPath); err != nil { + return err + } + } + } + + return nil +} + +// copyFile copies a single file +func copyFile(src, dst string) error { + srcData, err := os.ReadFile(src) // #nosec G304 -- src is constructed from trusted config + if err != nil { + return err + } + + srcInfo, err := os.Stat(src) + if err != nil { + return err + } + + return os.WriteFile(dst, srcData, srcInfo.Mode()) +} diff --git a/pkg/helper/constants.go b/pkg/helper/constants.go new file mode 100644 index 0000000..e775fc4 --- /dev/null +++ b/pkg/helper/constants.go @@ -0,0 +1,13 @@ +package helper + +const ( + // AdapterConfigsDir is the directory for adapter configurations under testdata + AdapterConfigsDir = "adapter-configs" + // TestWorkDir is the working directory for test execution and temporary test files + TestWorkDir = ".test-work" +) + +const ( + ResourceTypeClusters = "clusters" + ResourceTypeNodepools = "nodepools" +) diff --git a/pkg/helper/git.go b/pkg/helper/git.go new file mode 100644 index 0000000..a3147d4 --- /dev/null +++ b/pkg/helper/git.go @@ -0,0 +1,155 @@ +package helper + +import ( + "context" + "fmt" + "net/url" + "os" + "os/exec" + "path/filepath" + + "github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/logger" +) + +// HelmChartCloneOptions contains configuration for cloning a Helm chart repository +type HelmChartCloneOptions struct { + // Component is the component name (e.g., "adapter", "api", "sentinel") + Component string + + // RepoURL is the Git repository URL + RepoURL string + + // Ref is the branch or tag to clone + // Note: Commit SHAs are not supported due to git clone --branch limitations + Ref string + + // ChartPath is the path within the repository to the chart directory + // This will be used for sparse checkout to minimize download size + ChartPath string + + // WorkDir is the base work directory for cloning + // If empty, uses "./test-work" in current directory + WorkDir string +} + +// CloneHelmChart clones a Helm chart repository using sparse checkout to minimize download size. +// It returns the full path to the cloned chart and a cleanup function. +func (h *Helper) CloneHelmChart(ctx context.Context, opts HelmChartCloneOptions) (chartPath string, cleanup func() error, err error) { + // Validate required fields + if opts.Component == "" { + return "", nil, fmt.Errorf("component is required") + } + if opts.RepoURL == "" { + return "", nil, fmt.Errorf("repoURL is required") + } + if opts.Ref == "" { + return "", nil, fmt.Errorf("ref is required") + } + if opts.ChartPath == "" { + return "", nil, fmt.Errorf("ChartPath is required") + } + + // Set default work directory if not specified + workDir := opts.WorkDir + if workDir == "" { + // Default to ./.test-work in current directory + cwd, err := os.Getwd() + if err != nil { + return "", nil, fmt.Errorf("failed to get current directory: %w", err) + } + workDir = filepath.Join(cwd, TestWorkDir) + } + + // Ensure work directory exists before cloning + if err := os.MkdirAll(workDir, 0750); err != nil { + return "", nil, fmt.Errorf("failed to create work directory: %w", err) + } + + // Create an isolated component-specific directory per invocation + // This prevents race conditions when parallel tests clone the same component + componentDir, err := os.MkdirTemp(workDir, opts.Component+"-") + if err != nil { + return "", nil, fmt.Errorf("failed to create component work directory: %w", err) + } + + // Cleanup function to remove the cloned repository + cleanup = func() error { + logger.Info("cleaning up cloned Helm chart", "path", componentDir) + if err := os.RemoveAll(componentDir); err != nil { + return fmt.Errorf("failed to remove cloned chart directory: %w", err) + } + return nil + } + + // Redact credentials from RepoURL before logging + redactedRepo := opts.RepoURL + if u, err := url.Parse(opts.RepoURL); err == nil && u.User != nil { + u.User = url.User("***") + redactedRepo = u.String() + } + + logger.Info("cloning Helm chart repository", + "component", opts.Component, + "repo", redactedRepo, + "ref", opts.Ref, + "chart_path", opts.ChartPath, + "dest", componentDir) + + // Step 1: Clone with sparse checkout (no files yet) + logger.Info("executing sparse checkout git clone") + cmd := exec.CommandContext(ctx, "git", "clone", // #nosec G204 -- opts are from trusted config + "--depth", "1", + "--filter=blob:none", + "--sparse", + "--no-checkout", + "--branch", opts.Ref, + opts.RepoURL, + componentDir) + + if output, err := cmd.CombinedOutput(); err != nil { + _ = cleanup() + return "", nil, fmt.Errorf("git clone failed: %w\nOutput: %s", err, string(output)) + } + + // Step 2: Configure sparse checkout - only checkout the chart path + logger.Info("configuring sparse checkout", "sparse_path", opts.ChartPath) + + // Initialize sparse checkout (no cone mode) + cmd = exec.CommandContext(ctx, "git", "sparse-checkout", "init", "--no-cone") + cmd.Dir = componentDir + if output, err := cmd.CombinedOutput(); err != nil { + _ = cleanup() + return "", nil, fmt.Errorf("sparse-checkout init failed: %w\nOutput: %s", err, string(output)) + } + + // Set sparse checkout path + cmd = exec.CommandContext(ctx, "git", "sparse-checkout", "set", opts.ChartPath) // #nosec G204 -- opts.ChartPath is from trusted config + cmd.Dir = componentDir + if output, err := cmd.CombinedOutput(); err != nil { + _ = cleanup() + return "", nil, fmt.Errorf("sparse-checkout set failed: %w\nOutput: %s", err, string(output)) + } + + // Checkout the files + logger.Info("checking out files") + cmd = exec.CommandContext(ctx, "git", "checkout", opts.Ref) // #nosec G204 -- opts.Ref is from trusted config + cmd.Dir = componentDir + if output, err := cmd.CombinedOutput(); err != nil { + _ = cleanup() + return "", nil, fmt.Errorf("git checkout failed: %w\nOutput: %s", err, string(output)) + } + + // Verify Chart.yaml exists in the cloned chart directory + fullChartPath := filepath.Join(componentDir, opts.ChartPath) + chartYamlPath := filepath.Join(fullChartPath, "Chart.yaml") + if _, err := os.Stat(chartYamlPath); err != nil { + _ = cleanup() + return "", nil, fmt.Errorf("chart.yaml not found at %s (verify ChartPath is correct): %w", fullChartPath, err) + } + + logger.Info("Helm chart cloned successfully", + "component", opts.Component, + "chart_path", fullChartPath) + + return fullChartPath, cleanup, nil +} diff --git a/pkg/helper/helper.go b/pkg/helper/helper.go index d4dfe77..8f966cc 100644 --- a/pkg/helper/helper.go +++ b/pkg/helper/helper.go @@ -3,6 +3,7 @@ package helper import ( "context" "fmt" + "path/filepath" "github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/api/openapi" "github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/client" @@ -20,6 +21,12 @@ type Helper struct { MaestroClient *maestro.Client } +// TestDataPath resolves a relative path within the testdata directory +// This ensures testdata paths work correctly whether invoked via go test or the e2e binary +func (h *Helper) TestDataPath(relativePath string) string { + return filepath.Join(h.Cfg.TestDataDir, relativePath) +} + // GetTestCluster creates a new temporary test cluster func (h *Helper) GetTestCluster(ctx context.Context, payloadPath string) (string, error) { cluster, err := h.Client.CreateClusterFromPayload(ctx, payloadPath) diff --git a/test-design/testcases/adapter.md b/test-design/testcases/adapter-failover.md similarity index 59% rename from test-design/testcases/adapter.md rename to test-design/testcases/adapter-failover.md index adce183..1b683e8 100644 --- a/test-design/testcases/adapter.md +++ b/test-design/testcases/adapter-failover.md @@ -1,17 +1,17 @@ -# Feature: Adapter Framework - Customization +# Feature: Adapter Failover scenarios ## Table of Contents -1. [Adapter framework can detect and report failures to cluster API endpoints](#test-title-adapter-framework-can-detect-and-report-failures-to-cluster-api-endpoints) -2. [Adapter framework can detect and handle resource timeouts](#test-title-adapter-framework-can-detect-and-handle-resource-timeouts) +1. [Adapter can detect and report failures to cluster API endpoints](#test-title-adapter-can-detect-and-report-failures-to-cluster-api-endpoints) +2. [Adapter can detect and handle resource timeouts to cluster API endpoints](#test-title-adapter-can-detect-and-handle-resource-timeouts-to-cluster-api-endpoints) --- -## Test Title: Adapter framework can detect and report failures to cluster API endpoints +## Test Title: Adapter can detect and report failures to cluster API endpoints ### Description -This test validates that the adapter framework correctly detects and reports failures when attempting to create invalid Kubernetes resources on the target cluster. It ensures that when an adapter's configuration contains invalid K8s resource objects, the framework properly handles the API server rejection, logs meaningful error messages, and reports the failure status back to the HyperFleet API with appropriate condition states and error details. +This test validates that the adapter correctly detects and reports failures when attempting to create invalid Kubernetes resources on the target cluster. It ensures that when an adapter's configuration contains invalid K8s resource objects, the framework properly handles the API server rejection, logs meaningful error messages, and reports the failure status back to the HyperFleet API with appropriate condition states and error details. --- @@ -19,12 +19,12 @@ This test validates that the adapter framework correctly detects and reports fai | **Field** | **Value** | |-----------|-----------| | **Pos/Neg** | Negative | -| **Priority** | Tier2 | +| **Priority** | Tier1 | | **Status** | Draft | -| **Automation** | Not Automated | +| **Automation** | Automated | | **Version** | MVP | | **Created** | 2026-01-30 | -| **Updated** | 2026-01-30 | +| **Updated** | 2026-03-13 | --- @@ -43,8 +43,7 @@ This test validates that the adapter framework correctly detects and reports fai - Deploy the test adapter **Expected Result:** -- Adapter detects template rendering error -- Log reports failure with clear error message +- Adapter pods are running successfully #### Step 2: Send POST request to create a new cluster **Action:** @@ -64,6 +63,8 @@ curl -X POST ${API_URL}/api/hyperfleet/v1/clusters \ - Verify adapter status **Expected Result:** +- The related error message should be shown in statuses +- All the type condition status should be 'False' ```bash curl -X GET ${API_URL}/api/hyperfleet/v1/clusters//statuses \ | jq -r '.items[] | select(.adapter=="") | .conditions[] | select(.type=="Available")' @@ -80,11 +81,11 @@ curl -X POST ${API_URL}/api/hyperfleet/v1/clusters \ --- -## Test Title: Adapter framework can detect and handle resource timeouts +## Test Title: Adapter can detect and handle resource timeouts to cluster API endpoints ### Description -This test validates that the adapter framework correctly detects and handles resource timeouts when adapter Jobs exceed configured timeout limits. +This test validates that the adapter correctly detects and handles resource timeouts when adapter Jobs exceed configured timeout limits. --- @@ -111,34 +112,11 @@ This test validates that the adapter framework correctly detects and handles res #### Step 1: Configure adapter with timeout setting **Action:** -- Configure AdapterConfig with non-existed conditions that can't meet the precondition -```yaml - preconditions: - - name: "clusterStatus" - apiCall: - method: "GET" - url: "{{ .hyperfleetApiBaseUrl }}/api/hyperfleet/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}" - timeout: 10s - retryAttempts: 3 - retryBackoff: "exponential" - capture: - - name: "clusterName" - field: "name" - - name: "clusterPhase" - field: "status.phase" - - name: "generationId" - field: "generation" - conditions: - - field: "clusterPhase" - operator: "in" - values: ["NotReady", "Ready"] -``` +- Simulate a scenario where the adapter will be stuck - Deploy the test adapter **Expected Result:** -- Adapter loads configuration successfully - Adapter pods are running successfully -- Adapter logs show successful initialization #### Step 2: Send POST request to create a new cluster **Action:** diff --git a/test-design/testcases/adapter-with-maestro-transport.md b/test-design/testcases/adapter-with-maestro-transport.md index d3d319d..8238774 100644 --- a/test-design/testcases/adapter-with-maestro-transport.md +++ b/test-design/testcases/adapter-with-maestro-transport.md @@ -70,7 +70,7 @@ This test validates the complete Maestro transport happy path: creating a cluste | **Pos/Neg** | Positive | | **Priority** | Tier0 | | **Status** | Draft | -| **Automation** | Not Automated | +| **Automation** | Automated | | **Version** | MVP | | **Created** | 2026-02-12 | | **Updated** | 2026-03-02 | @@ -430,7 +430,7 @@ This test validates that the adapter can route ManifestWorks to different Maestr | **Pos/Neg** | Positive | | **Priority** | Tier1 | | **Status** | Draft | -| **Automation** | Not Automated | +| **Automation** | Automated | | **Version** | MVP | | **Created** | 2026-02-12 | | **Updated** | 2026-02-26 | diff --git a/testdata/adapter-configs/cl-invalid-resource/adapter-config.yaml b/testdata/adapter-configs/cl-invalid-resource/adapter-config.yaml new file mode 100644 index 0000000..050e8e1 --- /dev/null +++ b/testdata/adapter-configs/cl-invalid-resource/adapter-config.yaml @@ -0,0 +1,23 @@ +adapter: + name: cl-invalid-resource + #version: "0.1.0" + +# Log the full merged configuration after load (default: false) +debug_config: false +log: + level: debug + +clients: + hyperfleet_api: + base_url: http://hyperfleet-api:8000 + version: v1 + timeout: 2s + retry_attempts: 3 + retry_backoff: exponential + + broker: + subscription_id: CHANGE_ME + topic: CHANGE_ME + + kubernetes: + api_version: "v1" diff --git a/testdata/adapter-configs/cl-invalid-resource/adapter-task-config.yaml b/testdata/adapter-configs/cl-invalid-resource/adapter-task-config.yaml new file mode 100644 index 0000000..2e955e4 --- /dev/null +++ b/testdata/adapter-configs/cl-invalid-resource/adapter-task-config.yaml @@ -0,0 +1,149 @@ +# Test adapter configuration with invalid K8s resource to test error detection + +# Parameters with all required variables +params: + - name: "clusterId" + source: "event.id" + type: "string" + required: true + - name: "testRunId" + source: "env.TEST_RUN_ID" + type: "string" + required: false + default: "TEST_RUN_ID" + - name: "ci" + source: "env.CI" + type: "string" + required: false + default: "false" + +# Preconditions with valid operators and CEL expressions +preconditions: + - name: "clusterStatus" + api_call: + method: "GET" + url: "/clusters/{{ .clusterId }}" + timeout: 10s + retry_attempts: 3 + retry_backoff: "exponential" + capture: + - name: "clusterName" + field: "name" + - name: "generationSpec" + field: "generation" + - name: "readyConditionStatus" + expression: | + status.conditions.filter(c, c.type == "Ready").size() > 0 + ? status.conditions.filter(c, c.type == "Ready")[0].status + : "False" + # Structured conditions with valid operators + conditions: + - field: "readyConditionStatus" + operator: "equals" + value: "False" + + - name: "validationCheck" + # Valid CEL expression + expression: | + readyConditionStatus == "False" + +# Resources with INVALID K8s manifest - this will cause API server rejection +resources: + - name: "invalidConfigMap" + transport: + client: "kubernetes" + manifest: + apiVersion: v1 + kind: ConfigMap + metadata: + # Invalid name: contains uppercase letters which violate DNS-1123 subdomain naming rules + name: "INVALID-NAME-{{ .clusterId }}" + namespace: "{{ .clusterId }}" + labels: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/cluster-name: "{{ .clusterName }}" + e2e.hyperfleet.io/test-run-id: "{{ .testRunId }}" + e2e.hyperfleet.io/ci: "{{ .ci }}" + e2e.hyperfleet.io/managed-by: "test-framework" + annotations: + hyperfleet.io/generation: "{{ .generationSpec }}" + data: + test-key: "test-value" + discovery: + namespace: "{{ .clusterId }}" + by_selectors: + label_selector: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + +# Post-processing to report the status (will report failure) +post: + payloads: + - name: "clusterStatusPayload" + build: + adapter: "{{ .adapter.name }}" + conditions: + # Applied: Resource creation attempt + - type: "Applied" + status: + expression: | + resources.?invalidConfigMap.?metadata.?name.orValue("") != "" ? "True" : "False" + reason: + expression: | + resources.?invalidConfigMap.?metadata.?name.orValue("") != "" + ? "ResourceCreated" + : "ResourceFailed" + message: + expression: | + resources.?invalidConfigMap.?metadata.?name.orValue("") != "" + ? "ConfigMap created successfully" + : "ConfigMap creation failed" + # Available: Check resource readiness + - type: "Available" + status: + expression: | + resources.?invalidConfigMap.?metadata.?name.orValue("") != "" ? "True" : "False" + reason: + expression: | + adapter.?errorReason.orValue("") != "" + ? adapter.?errorReason.orValue("") + : (resources.?invalidConfigMap.?metadata.?name.orValue("") != "" + ? "ResourceReady" + : "ResourceNotReady") + message: + expression: | + adapter.?errorMessage.orValue("") != "" + ? adapter.?errorMessage.orValue("") + : (resources.?invalidConfigMap.?metadata.?name.orValue("") != "" + ? "ConfigMap is ready" + : "ConfigMap is not ready") + # Health: Adapter execution status (runtime) + - type: "Health" + status: + expression: | + adapter.?executionStatus.orValue("") == "success" ? "True" : "False" + reason: + expression: | + adapter.?errorReason.orValue("") != "" ? adapter.?errorReason.orValue("") : "Healthy" + message: + expression: | + adapter.?errorMessage.orValue("") != "" ? adapter.?errorMessage.orValue("") : "All adapter operations completed successfully" + # Event generation ID metadata field needs to use expression to avoid interpolation issues + observed_generation: + expression: "generationSpec" + observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" + + data: + configmap: + name: + expression: | + resources.?invalidConfigMap.?metadata.?name.orValue("") + + post_actions: + - name: "reportClusterStatus" + api_call: + method: "POST" + url: "/clusters/{{ .clusterId }}/statuses" + headers: + - name: "Content-Type" + value: "application/json" + body: "{{ .clusterStatusPayload }}" diff --git a/testdata/adapter-configs/cl-invalid-resource/values.yaml b/testdata/adapter-configs/cl-invalid-resource/values.yaml new file mode 100644 index 0000000..559755a --- /dev/null +++ b/testdata/adapter-configs/cl-invalid-resource/values.yaml @@ -0,0 +1,32 @@ +adapterConfig: + create: true + files: + adapter-config.yaml: cl-invalid-resource/adapter-config.yaml + log: + level: debug + +adapterTaskConfig: + create: true + files: + task-config.yaml: cl-invalid-resource/adapter-task-config.yaml + +broker: + create: true + googlepubsub: + project_id: ${GCP_PROJECT_ID} + subscription_id: ${NAMESPACE}-clusters-${ADAPTER_NAME} + topic: ${NAMESPACE}-clusters + deadLetter_topic: ${NAMESPACE}-clusters-dlq + create_topic_if_missing: ${CREATE_TOPIC_IF_MISSING} + create_subscription_if_missing: ${CREATE_SUBSCRIPTION_IF_MISSING} + +image: + registry: ${IMAGE_REGISTRY} + repository: hyperfleet-adapter + pullPolicy: Always + tag: latest + +rbac: + resources: + - namespaces + - configmaps