From 92e0696b6b576bc07d524123fe1ad9d662c5a813 Mon Sep 17 00:00:00 2001 From: Angel Marin Date: Thu, 5 Mar 2026 09:22:42 +0100 Subject: [PATCH] HYPERFLEET-551 - feat: use configuration standard --- README.md | 20 +- charts/README.md | 20 +- charts/examples/README.md | 31 +- charts/examples/kubernetes/README.md | 12 +- .../examples/kubernetes/adapter-config.yaml | 45 +- .../kubernetes/adapter-task-config.yaml | 446 ++++--- ...adapter-task-resource-job-rolebinding.yaml | 2 +- charts/examples/kubernetes/values.yaml | 20 +- .../maestro-kubernetes/adapter-config.yaml | 66 - .../adapter-task-config.yaml | 194 --- .../adapter-task-resource-manifestwork.yaml | 138 --- .../adapter-task-resource-namespace.yaml | 9 - charts/examples/maestro/README.md | 12 +- charts/examples/maestro/adapter-config.yaml | 123 +- .../examples/maestro/adapter-task-config.yaml | 452 ++++--- .../adapter-task-resource-manifestwork.yaml | 25 +- charts/examples/maestro/values.yaml | 6 +- charts/templates/configmap-broker.yaml | 12 +- charts/templates/deployment.yaml | 6 +- charts/values.yaml | 337 +++-- cmd/adapter/main.go | 241 +++- configs/adapter-config-template.yaml | 396 ++---- configs/adapter-deployment-config.yaml | 115 -- configs/adapter-task-config-template.yaml | 510 ++++---- configs/templates/cluster-status-payload.yaml | 16 - configs/templates/deployment.yaml | 37 - configs/templates/job.yaml | 29 - configuration.md | 169 --- docs/adapter-authoring-guide.md | 178 ++- docs/configuration.md | 231 ++++ internal/config_loader/README.md | 47 +- internal/config_loader/accessors.go | 59 +- internal/config_loader/constants.go | 28 +- internal/config_loader/loader.go | 54 +- internal/config_loader/loader_test.go | 1099 ++++++----------- internal/config_loader/types.go | 186 ++- internal/config_loader/validator.go | 113 +- internal/config_loader/validator_test.go | 208 ++-- internal/config_loader/viper_loader.go | 166 ++- internal/criteria/README.md | 8 +- internal/criteria/cel_evaluator_test.go | 21 +- internal/criteria/evaluator_test.go | 37 +- internal/executor/README.md | 24 +- internal/executor/executor.go | 28 +- internal/executor/executor_test.go | 133 +- internal/executor/param_extractor.go | 264 +--- internal/executor/types.go | 2 +- internal/executor/utils.go | 6 +- internal/executor/utils_test.go | 160 +-- internal/hyperfleet_api/types.go | 12 +- internal/manifest/generation.go | 10 - internal/manifest/manifest.go | 31 - pkg/health/server.go | 31 +- scripts/test-config-loading.sh | 430 +++++++ .../config_criteria_integration_test.go | 14 +- .../config-loader/loader_template_test.go | 43 +- .../testdata/adapter-config-template.yaml | 680 +++++----- .../testdata/adapter_config_valid.yaml | 197 --- .../templates/cluster-status-payload.yaml | 4 +- .../config-loader/testdata/templates/job.yaml | 2 +- .../executor/executor_integration_test.go | 504 ++++---- .../executor/executor_k8s_integration_test.go | 343 +++-- .../testdata/test-adapter-config.yaml | 87 -- .../client_tls_config_integration_test.go | 81 +- test/integration/maestro_client/main_test.go | 2 +- test/integration/maestro_client/setup_test.go | 15 +- test/integration/testutil/mock_api_server.go | 12 +- test/testdata/adapter-config.yaml | 25 +- test/testdata/adapter_config_valid.yaml | 181 --- .../dryrun-cel-showcase-task-config.yaml | 624 +++++----- .../dryrun-kubernetes-adapter-config.yaml | 31 +- ...ubernetes-adatepr-task-config-invalid.yaml | 74 +- .../dryrun/dryrun-kubernetes-task-config.yaml | 402 +++--- .../dryrun/dryrun-maestro-adapter-config.yaml | 56 +- .../dryrun-maestro-adapter-task-config.yaml | 635 +++++----- test/testdata/task-config.yaml | 255 ++-- test/testdata/templates/deployment.yaml | 2 +- 77 files changed, 4957 insertions(+), 6367 deletions(-) delete mode 100644 charts/examples/maestro-kubernetes/adapter-config.yaml delete mode 100644 charts/examples/maestro-kubernetes/adapter-task-config.yaml delete mode 100644 charts/examples/maestro-kubernetes/adapter-task-resource-manifestwork.yaml delete mode 100644 charts/examples/maestro-kubernetes/adapter-task-resource-namespace.yaml delete mode 100644 configs/adapter-deployment-config.yaml delete mode 100644 configs/templates/cluster-status-payload.yaml delete mode 100644 configs/templates/deployment.yaml delete mode 100644 configs/templates/job.yaml delete mode 100644 configuration.md create mode 100644 docs/configuration.md delete mode 100644 internal/manifest/manifest.go create mode 100755 scripts/test-config-loading.sh delete mode 100644 test/integration/config-loader/testdata/adapter_config_valid.yaml delete mode 100644 test/integration/executor/testdata/test-adapter-config.yaml delete mode 100644 test/testdata/adapter_config_valid.yaml diff --git a/README.md b/README.md index 5109ad9..d8a5b0e 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ cd hyperfleet-adapter ### Install Dependencies ```bash -make tidy +make mod-tidy ``` ### Build @@ -128,9 +128,9 @@ hyperfleet-adapter/ | `make image-push` | Build and push container image to registry | | `make image-dev` | Build and push to personal Quay registry (requires QUAY_USER) | | `make fmt` | Format code | -| `make tidy` | Tidy Go module dependencies | +| `make mod-tidy` | Tidy Go module dependencies | | `make clean` | Clean build artifacts | -| `make verify` | Run format check and go vet | +| `make verify` | Run lint and test | 💡 **Tip:** Use `make help` to see all available targets with descriptions @@ -169,7 +169,7 @@ A HyperFleet Adapter requires several files for configuration: - **Adapter Task config**: Configures the adapter task steps that will create resources - **Broker configuration**: Configures the specific broker to use by the adapter framework to receive CloudEvents -To see all configuration options read [configuration.md](configuration.md) file +To see all configuration options read [configuration.md](docs/configuration.md) file #### Adapter configuration @@ -178,14 +178,16 @@ settings for the adapter process, such as client connections, retries, and broke subscription details. It is loaded with Viper, so values can be overridden by CLI flags and environment variables in this priority order: CLI flags > env vars > file > defaults. +Fields use **snake_case** naming. + - **Path**: `HYPERFLEET_ADAPTER_CONFIG` (required) -- **Common fields**: `spec.adapter.version`, `spec.debugConfig`, `spec.clients.*` - (HyperFleet API, Maestro, broker, Kubernetes) +- **Common fields**: `adapter.name`, `adapter.version`, `debug_config`, `clients.*` + (HyperFleet API: `clients.hyperfleet_api`, Maestro: `clients.maestro`, broker: `clients.broker`, Kubernetes: `clients.kubernetes`) Reference examples: - `configs/adapter-deployment-config.yaml` (full reference with env/flag notes) -- `charts/examples/adapter-config.yaml` (minimal deployment example) +- `charts/examples/kubernetes/adapter-config.yaml` (minimal deployment example) #### Adapter task configuration @@ -194,12 +196,12 @@ processing events: parameters, preconditions, resources to create, and post-acti This file is loaded as **static YAML** (no Viper overrides) and is required at runtime. - **Path**: `HYPERFLEET_TASK_CONFIG` (required) -- **Key sections**: `spec.params`, `spec.preconditions`, `spec.resources`, `spec.post` +- **Key sections**: `params`, `preconditions`, `resources`, `post` - **Resource manifests**: inline YAML or external file via `manifest.ref` Reference examples: -- `charts/examples/adapter-task-config.yaml` (worked example) +- `charts/examples/kubernetes/adapter-task-config.yaml` (worked example) - `configs/adapter-task-config-template.yaml` (complete schema reference) ### Broker Configuration diff --git a/charts/README.md b/charts/README.md index e975f16..a6fb4c3 100644 --- a/charts/README.md +++ b/charts/README.md @@ -62,15 +62,15 @@ Beware of template resolution within files referenced in an `AdapterTaskConfig`. | `adapterConfig.configMapName` | AdapterConfig ConfigMap name | `""` | | `adapterConfig.yaml` | AdapterConfig YAML content | `""` | | `adapterConfig.files` | AdapterConfig YAML files packaged with chart | `{}` | -| `adapterConfig.hyperfleetApi.baseUrl` | HyperFleet API base URL (HYPERFLEET_API_BASE_URL) | `"http://hyperfleet-api:8000"` | -| `adapterConfig.hyperfleetApi.version` | API version (HYPERFLEET_API_VERSION) | `"v1"` | +| `adapterConfig.hyperfleet_api.base_url` | HyperFleet API base URL (HYPERFLEET_API_BASE_URL) | `"http://hyperfleet-api:8000"` | +| `adapterConfig.hyperfleet_api.version` | API version (HYPERFLEET_API_VERSION) | `"v1"` | | `adapterConfig.log.level` | Adapter log level | `"info"` | | `adapterTaskConfig.create` | Enable AdapterTaskConfig ConfigMap | `true` | | `adapterTaskConfig.configMapName` | AdapterTaskConfig ConfigMap name | `""` | | `adapterTaskConfig.yaml` | AdapterTaskConfig YAML content | `""` | | `adapterTaskConfig.files` | AdapterTaskConfig YAML files packaged with chart | `{}` | -AdapterConfig supports `spec.debugConfig` to log the full merged configuration after load +AdapterConfig supports `debug_config` to log the full merged configuration after load (default: `false`). It can also be set via `HYPERFLEET_DEBUG_CONFIG` or `--debug-config`. ### Broker Configuration @@ -91,8 +91,8 @@ The `ConfigMap` will be: |-----------|-------------|---------| | `broker.create` | Create broker ConfigMap | `true` | | `broker.configMapName` | Broker ConfigMap name | `""` | -| `broker.googlepubsub.projectId` | Google Cloud project ID | `""` | -| `broker.googlepubsub.subscriptionId` | Subscription ID override (HYPERFLEET_BROKER_SUBSCRIPTION_ID) | `""` | +| `broker.googlepubsub.project_id` | Google Cloud project ID | `""` | +| `broker.googlepubsub.subscription_id` | Subscription ID override (HYPERFLEET_BROKER_SUBSCRIPTION_ID) | `""` | | `broker.googlepubsub.topic` | Topic name override (HYPERFLEET_BROKER_TOPIC) | `""` | | `broker.yaml` | Broker YAML config content | `""` | @@ -149,8 +149,8 @@ helm install hyperfleet-adapter ./charts/ \ -f ./charts/examples/values.yaml \ --set image.registry=quay.io/my-quay-registry \ --set broker.create=true \ - --set broker.googlepubsub.projectId=my-gcp-project \ - --set broker.googlepubsub.subscriptionId=my-subscription \ + --set broker.googlepubsub.project_id=my-gcp-project \ + --set broker.googlepubsub.subscription_id=my-subscription \ --set broker.googlepubsub.topic=my-topic ``` @@ -160,10 +160,10 @@ The deployment sets these environment variables automatically: | Variable | Value | Condition | |----------|-------|-----------| -| `HYPERFLEET_API_BASE_URL` | From `adapterConfig.hyperfleetApi.baseUrl` | When `adapterConfig.hyperfleetApi.baseUrl` is set | -| `HYPERFLEET_API_VERSION` | From `adapterConfig.hyperfleetApi.version` | Always (default: v1) | +| `HYPERFLEET_API_BASE_URL` | From `adapterConfig.hyperfleet_api.base_url` | When `adapterConfig.hyperfleet_api.base_url` is set | +| `HYPERFLEET_API_VERSION` | From `adapterConfig.hyperfleet_api.version` | Always (default: v1) | | `BROKER_CONFIG_FILE` | `/etc/broker/broker.yaml` | When `broker.yaml` is set | -| `HYPERFLEET_BROKER_SUBSCRIPTION_ID` | From values | When `broker.googlepubsub.subscriptionId` is set | +| `HYPERFLEET_BROKER_SUBSCRIPTION_ID` | From values | When `broker.googlepubsub.subscription_id` is set | | `HYPERFLEET_BROKER_TOPIC` | From values | When `broker.googlepubsub.topic` is set | ## GCP Workload Identity Setup diff --git a/charts/examples/README.md b/charts/examples/README.md index daaab0d..496a262 100644 --- a/charts/examples/README.md +++ b/charts/examples/README.md @@ -8,7 +8,6 @@ This directory contains example configurations for deploying the HyperFleet Adap |-----------|-----------|-------------| | [`kubernetes/`](./kubernetes/) | Kubernetes only | Creates resources directly in the local cluster using the Kubernetes client | | [`maestro/`](./maestro/) | Maestro only | Deploys resources to a remote cluster via Maestro using ManifestWork | -| [`maestro-kubernetes/`](./maestro-kubernetes/) | Maestro + Kubernetes | Hybrid example combining both transport clients in a single task | --- @@ -22,6 +21,7 @@ Creates the following resources directly in the local cluster via the Kubernetes - An Nginx Deployment in the adapter's own namespace **Key features demonstrated:** + - Inline manifests and external file references (`ref:`) - Preconditions with Hyperfleet API calls and CEL expressions - Resource discovery by name and label selectors @@ -42,9 +42,10 @@ Deploys resources to a remote cluster through Maestro (Open Cluster Management) - Nested resource discovery within the ManifestWork result **Key features demonstrated:** + - Maestro transport client configuration (gRPC + HTTP) - ManifestWork template with external file reference (`ref:`) -- Resource discovery by name and by label selectors (`nestedDiscoveries`) +- Resource discovery by name and by label selectors (`nested_discoveries`) - Post-processing with CEL expressions on nested ManifestWork status - Status reporting back to the Hyperfleet API @@ -52,20 +53,6 @@ See [`maestro/README.md`](./maestro/README.md) for full details. --- -### `maestro-kubernetes/` — Hybrid Maestro + Kubernetes - -Combines both transport clients in a single adapter task: - -- A ManifestWork delivered via Maestro to a remote cluster (Namespace + ConfigMap) -- A Namespace created directly in the local cluster via the Kubernetes client - -**Key features demonstrated:** -- Using multiple transport clients (`maestro` and `kubernetes`) within the same task -- Per-resource transport selection via the `transport.client` field -- Kubernetes transport as the default fallback when `transport` is omitted - ---- - ## Common Configuration All examples share the same broker and image placeholders that must be updated before deployment. @@ -75,10 +62,10 @@ All examples share the same broker and image placeholders that must be updated b ```yaml broker: googlepubsub: - projectId: CHANGE_ME - subscriptionId: CHANGE_ME + project_id: CHANGE_ME + subscription_id: CHANGE_ME topic: CHANGE_ME - deadLetterTopic: CHANGE_ME + dead_letter_topic: CHANGE_ME ``` ### Image @@ -97,8 +84,8 @@ image: helm install ./charts -f charts/examples//values.yaml \ --namespace \ --set image.registry=quay.io/ \ - --set broker.googlepubsub.projectId= \ - --set broker.googlepubsub.subscriptionId= \ + --set broker.googlepubsub.project_id= \ + --set broker.googlepubsub.subscription_id= \ --set broker.googlepubsub.topic= \ - --set broker.googlepubsub.deadLetterTopic= + --set broker.googlepubsub.dead_letter_topic= ``` diff --git a/charts/examples/kubernetes/README.md b/charts/examples/kubernetes/README.md index 2aa0ae7..58710d4 100644 --- a/charts/examples/kubernetes/README.md +++ b/charts/examples/kubernetes/README.md @@ -118,10 +118,10 @@ Update the `broker.googlepubsub` section in `values.yaml` with your GCP Pub/Sub ```yaml broker: googlepubsub: - projectId: CHANGE_ME - subscriptionId: CHANGE_ME + project_id: CHANGE_ME + subscription_id: CHANGE_ME topic: CHANGE_ME - deadLetterTopic: CHANGE_ME + dead_letter_topic: CHANGE_ME ``` ### Image Configuration @@ -142,10 +142,10 @@ image: helm install ./charts -f charts/examples/values.yaml \ --namespace \ --set image.registry=quay.io/ \ - --set broker.googlepubsub.projectId= \ - --set broker.googlepubsub.subscriptionId= \ + --set broker.googlepubsub.subscription_id= \ - --set broker.googlepubsub.deadLetterTopic= + --set broker.googlepubsub.dead_letter_topic= ``` ## How It Works diff --git a/charts/examples/kubernetes/adapter-config.yaml b/charts/examples/kubernetes/adapter-config.yaml index e3276d8..0d6115f 100644 --- a/charts/examples/kubernetes/adapter-config.yaml +++ b/charts/examples/kubernetes/adapter-config.yaml @@ -1,32 +1,25 @@ # Example HyperFleet Adapter deployment configuration -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: +adapter: name: kubernetes-example - labels: - hyperfleet.io/adapter-type: kubernetes-example - hyperfleet.io/component: adapter -spec: - adapter: - version: "0.1.0" + version: "0.2.0" - # Log the full merged configuration after load (default: false) - debugConfig: true - log: - level: debug +# Log the full merged configuration after load (default: false) +debug_config: true +log: + level: debug - clients: - hyperfleetApi: - baseUrl: http://hyperfleet-api:8000 - version: v1 - timeout: 2s - retryAttempts: 3 - retryBackoff: exponential +clients: + hyperfleet_api: + base_url: http://hyperfleet-api:8000 + version: v1 + timeout: 2s + retry_attempts: 3 + retry_backoff: exponential - broker: - subscriptionId: "CHANGE_ME" - topic: "CHANGE_ME" + broker: + subscription_id: "CHANGE_ME" + topic: "CHANGE_ME" - kubernetes: - apiVersion: "v1" - #kubeConfigPath: PATH_TO_KUBECONFIG # for local development + kubernetes: + api_version: "v1" + #kube_config_path: PATH_TO_KUBECONFIG # for local development diff --git a/charts/examples/kubernetes/adapter-task-config.yaml b/charts/examples/kubernetes/adapter-task-config.yaml index 2380013..2702263 100644 --- a/charts/examples/kubernetes/adapter-task-config.yaml +++ b/charts/examples/kubernetes/adapter-task-config.yaml @@ -1,227 +1,221 @@ # Example HyperFleet Adapter task configuration -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: kubernetes-example - labels: - hyperfleet.io/adapter-type: kubernetes-example - hyperfleet.io/component: adapter -spec: - # Parameters with all required variables - params: - - name: "hyperfleetApiBaseUrl" - source: "env.HYPERFLEET_API_BASE_URL" - type: "string" - required: true - - - name: "hyperfleetApiVersion" - source: "env.HYPERFLEET_API_VERSION" - type: "string" - default: "v1" - - - name: "clusterId" - source: "event.id" - type: "string" - required: true - - - name: "generation" - source: "event.generation" - type: "int" - required: true - - - name: "namespace" - source: "env.NAMESPACE" - type: "string" - required: true - - - name: "serviceAccountName" - source: "env.SERVICE_ACCOUNT" - type: "string" - required: true - - - name: "simulateResult" - source: "env.SIMULATE_RESULT" - type: "string" - required: true - - # Preconditions with valid operators and CEL expressions - preconditions: - - name: "clusterStatus" - apiCall: - method: "GET" - url: "/clusters/{{ .clusterId }}" - timeout: 10s - retryAttempts: 3 - retryBackoff: "exponential" - capture: - - name: "clusterName" - field: "name" - - name: "generation" - field: "generation" - - name: "readyConditionStatus" - expression: | - status.conditions.filter(c, c.type == "Ready").size() > 0 - ? status.conditions.filter(c, c.type == "Ready")[0].status - : "False" - # Structured conditions with valid operators - conditions: - - field: "readyConditionStatus" - operator: "equals" - value: "False" - - - name: "validationCheck" - # Valid CEL expression - expression: | - readyConditionStatus == "False" - - # Resources with valid K8s manifests - resources: - - name: "clusterNamespace" - transport: - client: "kubernetes" - manifest: - apiVersion: v1 - kind: Namespace - metadata: - name: "{{ .clusterId }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/cluster-name: "{{ .clusterName }}" - annotations: - hyperfleet.io/generation: "{{ .generation }}" - discovery: - bySelectors: - labelSelector: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/cluster-name: "{{ .clusterName }}" - - # the following configuration is for a job that will be created in the cluster - # in the namespace created above - # it will require a service account to be created in that namespace as well as a role and rolebinding - - name: "jobServiceAccount" - transport: - client: "kubernetes" - manifest: - ref: "/etc/adapter/job-serviceaccount.yaml" - discovery: - bySelectors: - labelSelector: - hyperfleet.io/resource-type: "service-account" - hyperfleet.io/cluster-id: "{{ .clusterId }}" - - - name: "job_role" - transport: - client: "kubernetes" - manifest: - ref: "/etc/adapter/job-role.yaml" - discovery: - bySelectors: - labelSelector: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/resource-type: "role" - - - name: "job_rolebinding" - transport: - client: "kubernetes" - manifest: - ref: "/etc/adapter/job-rolebinding.yaml" - discovery: - bySelectors: - labelSelector: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/resource-type: "role-binding" - - - name: "jobNamespace" - transport: - client: "kubernetes" - manifest: - ref: "/etc/adapter/job.yaml" - discovery: - bySelectors: - labelSelector: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/resource-type: "job" - - # the following configuration is for a deployment that will be created in the cluster - # in the same namespace as the adapter - # and using the same service account as the adapter - - - name: "deploymentNamespace" - transport: - client: "kubernetes" - manifest: - ref: "/etc/adapter/deployment.yaml" - discovery: - bySelectors: - labelSelector: - hyperfleet.io/resource-type: "deployment" - hyperfleet.io/cluster-id: "{{ .clusterId }}" - - # Post-processing with valid CEL expressions - # This example contains multiple resources, we will only report on the conditions of the jobNamespace not to overcomplicate the example - post: - payloads: - - name: "clusterStatusPayload" - build: - adapter: "{{ .metadata.name }}" - conditions: - # Applied: Job successfully created - - type: "Applied" - status: - expression: | - has(resources.jobNamespace) ? "True" : "False" - reason: - expression: | - has(resources.jobNamespace) - ? "JobApplied" - : "JobPending" - message: - expression: | - has(resources.jobNamespace) - ? "jobNamespace manifest applied successfully" - : "jobNamespace is pending to be applied" - # Available: Check job status conditions - - type: "Available" - status: - expression: | - has(resources.jobNamespace) ? - ( resources.?jobNamespace.?status.?conditions.orValue([]).exists(c, c.type == "Available") - ? resources.jobNamespace.status.conditions.filter(c, c.type == "Available")[0].status : "Unknown") - : "Unknown" - reason: - expression: | - resources.?jobNamespace.?status.?conditions.orValue([]).exists(c, c.type == "Available") - ? resources.jobNamespace.status.conditions.filter(c, c.type == "Available")[0].reason - : resources.?jobNamespace.?status.?conditions.orValue([]).exists(c, c.type == "Failed") ? "ValidationFailed" - : resources.?jobNamespace.?status.hasValue() ? "ValidationInProgress" : "ValidationPending" - message: - expression: | - resources.?jobNamespace.?status.?conditions.orValue([]).exists(c, c.type == "Available") - ? resources.jobNamespace.status.conditions.filter(c, c.type == "Available")[0].message - : resources.?jobNamespace.?status.?conditions.orValue([]).exists(c, c.type == "Failed") ? "Validation failed" - : resources.?jobNamespace.?status.hasValue() ? "Validation in progress" : "Validation is pending" - # Health: Adapter execution status (runtime) - - type: "Health" - status: - expression: | - adapter.?executionStatus.orValue("") == "success" ? "True" : "False" - reason: - expression: | - adapter.?errorReason.orValue("") != "" ? adapter.?errorReason.orValue("") : "Healthy" - message: - expression: | - adapter.?errorMessage.orValue("") != "" ? adapter.?errorMessage.orValue("") : "All adapter operations in progress or completed successfully" - # Event generation ID metadata field needs to use expression to avoid interpolation issues - observed_generation: - expression: "generation" - observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" - - postActions: - - name: "reportClusterStatus" - apiCall: - method: "POST" - url: "/clusters/{{ .clusterId }}/statuses" - headers: - - name: "Content-Type" - value: "application/json" - body: "{{ .clusterStatusPayload }}" +# Parameters with all required variables +params: + + - name: "clusterId" + source: "event.id" + type: "string" + required: true + + - name: "namespace" + source: "env.NAMESPACE" + type: "string" + required: true + + - name: "serviceAccountName" + source: "env.SERVICE_ACCOUNT" + type: "string" + required: true + + - name: "simulateResult" + source: "env.SIMULATE_RESULT" + type: "string" + required: true + +# Preconditions with valid operators and CEL expressions +preconditions: + - name: "clusterStatus" + api_call: + method: "GET" + url: "/clusters/{{ .clusterId }}" + timeout: 10s + retry_attempts: 3 + retry_backoff: "exponential" + capture: + - name: "clusterName" + field: "name" + - name: "generation" + field: "generation" + - name: "readyConditionStatus" + expression: | + status.conditions.filter(c, c.type == "Ready").size() > 0 + ? status.conditions.filter(c, c.type == "Ready")[0].status + : "False" + # Structured conditions with valid operators + conditions: + - field: "readyConditionStatus" + operator: "equals" + value: "False" + + - name: "validationCheck" + # Valid CEL expression + expression: | + readyConditionStatus == "False" + +# Resources with valid K8s manifests +resources: + - name: "clusterNamespace" + transport: + client: "kubernetes" + manifest: + apiVersion: v1 + kind: Namespace + metadata: + name: "{{ .clusterId }}" + labels: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/cluster-name: "{{ .clusterName }}" + annotations: + hyperfleet.io/generation: "{{ .generation }}" + discovery: + by_selectors: + label_selector: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/cluster-name: "{{ .clusterName }}" + + # the following configuration is for a job that will be created in the cluster + # in the namespace created above + # it will require a service account to be created in that namespace as well as a role and rolebinding + - name: "jobServiceAccount" + transport: + client: "kubernetes" + manifest: + ref: "/etc/adapter/job-serviceaccount.yaml" + discovery: + by_selectors: + label_selector: + hyperfleet.io/resource-type: "service-account" + hyperfleet.io/cluster-id: "{{ .clusterId }}" + + - name: "job_role" + transport: + client: "kubernetes" + manifest: + ref: "/etc/adapter/job-role.yaml" + discovery: + by_selectors: + label_selector: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/resource-type: "role" + + - name: "job_rolebinding" + transport: + client: "kubernetes" + manifest: + ref: "/etc/adapter/job-rolebinding.yaml" + discovery: + by_selectors: + label_selector: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/resource-type: "role-binding" + + - name: "jobNamespace" + transport: + client: "kubernetes" + manifest: + ref: "/etc/adapter/job.yaml" + discovery: + by_selectors: + label_selector: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/resource-type: "job" + + # the following configuration is for a deployment that will be created in the cluster + # in the same namespace as the adapter + # and using the same service account as the adapter + + - name: "deploymentNamespace" + transport: + client: "kubernetes" + manifest: + ref: "/etc/adapter/deployment.yaml" + discovery: + by_selectors: + label_selector: + hyperfleet.io/resource-type: "deployment" + hyperfleet.io/cluster-id: "{{ .clusterId }}" + +# Post-processing with valid CEL expressions +# This example contains multiple resources, we will only report on the conditions of the jobNamespace not to overcomplicate the example +post: + payloads: + - name: "clusterStatusPayload" + build: + adapter: "{{ .adapter.name }}" + conditions: + # Applied: Job successfully created + - type: "Applied" + status: + expression: | + has(resources.jobNamespace) ? "True" : "False" + reason: + expression: | + has(resources.jobNamespace) + ? "JobApplied" + : "JobPending" + message: + expression: | + has(resources.jobNamespace) + ? "jobNamespace manifest applied successfully" + : "jobNamespace is pending to be applied" + # Available: Check job status conditions + - type: "Available" + status: + expression: | + has(resources.jobNamespace) ? + ( resources.?jobNamespace.?status.?conditions.orValue([]).exists(c, c.type == "Available") + ? resources.jobNamespace.status.conditions.filter(c, c.type == "Available")[0].status : "Unknown") + : "Unknown" + reason: + expression: | + resources.?jobNamespace.?status.?conditions.orValue([]).exists(c, c.type == "Available") + ? resources.jobNamespace.status.conditions.filter(c, c.type == "Available")[0].reason + : resources.?jobNamespace.?status.?conditions.orValue([]).exists(c, c.type == "Failed") ? "ValidationFailed" + : resources.?jobNamespace.?status.hasValue() ? "ValidationInProgress" : "ValidationPending" + message: + expression: | + resources.?jobNamespace.?status.?conditions.orValue([]).exists(c, c.type == "Available") + ? resources.jobNamespace.status.conditions.filter(c, c.type == "Available")[0].message + : resources.?jobNamespace.?status.?conditions.orValue([]).exists(c, c.type == "Failed") ? "Validation failed" + : resources.?jobNamespace.?status.hasValue() ? "Validation in progress" : "Validation is pending" + # Health: Adapter execution status (runtime) + - type: "Health" + status: + expression: | + adapter.?executionStatus.orValue("") == "success" + && !adapter.?resourcesSkipped.orValue(false) + ? "True" + : "False" + reason: + expression: | + adapter.?executionStatus.orValue("") != "success" + ? "ExecutionFailed:" + adapter.?executionError.?phase.orValue("unknown") + : adapter.?resourcesSkipped.orValue(false) + ? "ResourcesSkipped" + : "Healthy" + message: + expression: | + adapter.?executionStatus.orValue("") != "success" + ? "Adapter failed at phase [" + + adapter.?executionError.?phase.orValue("unknown") + + "] step [" + + adapter.?executionError.?step.orValue("unknown") + + "]: " + + adapter.?executionError.?message.orValue(adapter.?errorMessage.orValue("no details")) + : adapter.?resourcesSkipped.orValue(false) + ? "Resources skipped: " + adapter.?skipReason.orValue("unknown reason") + : "Adapter execution completed successfully" + # Event generation ID metadata field needs to use expression to avoid interpolation issues + observed_generation: + expression: "generation" + observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" + + post_actions: + - name: "reportClusterStatus" + api_call: + method: "POST" + url: "/clusters/{{ .clusterId }}/statuses" + headers: + - name: "Content-Type" + value: "application/json" + body: "{{ .clusterStatusPayload }}" diff --git a/charts/examples/kubernetes/adapter-task-resource-job-rolebinding.yaml b/charts/examples/kubernetes/adapter-task-resource-job-rolebinding.yaml index 599b1f5..aa5fe4f 100644 --- a/charts/examples/kubernetes/adapter-task-resource-job-rolebinding.yaml +++ b/charts/examples/kubernetes/adapter-task-resource-job-rolebinding.yaml @@ -7,7 +7,7 @@ metadata: hyperfleet.io/cluster-id: "{{ .clusterId }}" hyperfleet.io/resource-type: "role-binding" annotations: - hyperfleet.io/generation: "{{ .generationSpec }}" + hyperfleet.io/generation: "{{ .generation }}" roleRef: apiGroup: rbac.authorization.k8s.io kind: Role diff --git a/charts/examples/kubernetes/values.yaml b/charts/examples/kubernetes/values.yaml index 1ff840c..11a19c2 100644 --- a/charts/examples/kubernetes/values.yaml +++ b/charts/examples/kubernetes/values.yaml @@ -1,27 +1,27 @@ adapterConfig: create: true files: - adapter-config.yaml: examples/adapter-config.yaml + adapter-config.yaml: examples/kubernetes/adapter-config.yaml log: level: debug adapterTaskConfig: create: true files: - task-config.yaml: examples/adapter-task-config.yaml - job.yaml: examples/adapter-task-resource-job.yaml - job-serviceaccount.yaml: examples/adapter-task-resource-job-serviceaccount.yaml - job-role.yaml: examples/adapter-task-resource-job-role.yaml - job-rolebinding.yaml: examples/adapter-task-resource-job-rolebinding.yaml - deployment.yaml: examples/adapter-task-resource-deployment.yaml + task-config.yaml: examples/kubernetes/adapter-task-config.yaml + job.yaml: examples/kubernetes/adapter-task-resource-job.yaml + job-serviceaccount.yaml: examples/kubernetes/adapter-task-resource-job-serviceaccount.yaml + job-role.yaml: examples/kubernetes/adapter-task-resource-job-role.yaml + job-rolebinding.yaml: examples/kubernetes/adapter-task-resource-job-rolebinding.yaml + deployment.yaml: examples/kubernetes/adapter-task-resource-deployment.yaml broker: create: true googlepubsub: - projectId: CHANGE_ME - subscriptionId: CHANGE_ME + project_id: CHANGE_ME + subscription_id: CHANGE_ME topic: CHANGE_ME - deadLetterTopic: CHANGE_ME + dead_letter_topic: CHANGE_ME image: registry: CHANGE_ME diff --git a/charts/examples/maestro-kubernetes/adapter-config.yaml b/charts/examples/maestro-kubernetes/adapter-config.yaml deleted file mode 100644 index a56a4a2..0000000 --- a/charts/examples/maestro-kubernetes/adapter-config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -# Example HyperFleet Adapter deployment configuration -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: - name: maestro-kubernetes-example - labels: - hyperfleet.io/adapter-type: maestro-kubernetes-example - hyperfleet.io/component: adapter -spec: - adapter: - version: "0.1.0" - - # Log the full merged configuration after load (default: false) - debugConfig: true - log: - level: debug - - clients: - hyperfleetApi: - baseUrl: http://hyperfleet-api:8000 - version: v1 - timeout: 2s - retryAttempts: 3 - retryBackoff: exponential - - broker: - subscriptionId: CHANGE_ME - topic: CHANGE_ME - - maestro: - grpcServerAddress: "maestro-grpc.maestro.svc.cluster.local:8090" - - # HTTPS server address for REST API operations (optional) - # Environment variable: HYPERFLEET_MAESTRO_HTTP_SERVER_ADDRESS - httpServerAddress: "http://maestro.maestro.svc.cluster.local:8000" - - # Source identifier for CloudEvents routing (must be unique across adapters) - # Environment variable: HYPERFLEET_MAESTRO_SOURCE_ID - sourceId: "hyperfleet-adapter" - - # Client identifier (defaults to sourceId if not specified) - # Environment variable: HYPERFLEET_MAESTRO_CLIENT_ID - clientId: "hyperfleet-adapter-client" - insecure: true - - # Authentication configuration - #auth: - # type: "tls" # TLS certificate-based mTLS - # - # tlsConfig: - # # gRPC TLS configuration - # # Certificate paths (mounted from Kubernetes secrets) - # # Environment variable: HYPERFLEET_MAESTRO_CA_FILE - # caFile: "/etc/maestro/certs/grpc/ca.crt" - # - # # Environment variable: HYPERFLEET_MAESTRO_CERT_FILE - # certFile: "/etc/maestro/certs/grpc/client.crt" - # - # # Environment variable: HYPERFLEET_MAESTRO_KEY_FILE - # keyFile: "/etc/maestro/certs/grpc/client.key" - # - # # HTTP API CA certificate (if HTTPS uses a different CA than gRPC) - # # Falls back to caFile when not set - # # Environment variable: HYPERFLEET_MAESTRO_HTTP_CA_FILE - # httpCaFile: "/etc/maestro/certs/https/ca.crt" - diff --git a/charts/examples/maestro-kubernetes/adapter-task-config.yaml b/charts/examples/maestro-kubernetes/adapter-task-config.yaml deleted file mode 100644 index 8d8b269..0000000 --- a/charts/examples/maestro-kubernetes/adapter-task-config.yaml +++ /dev/null @@ -1,194 +0,0 @@ -# Example HyperFleet Adapter task configuration -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: maestro-kubernetes-example - labels: - hyperfleet.io/adapter-type: maestro-kubernetes-example - hyperfleet.io/component: adapter -spec: - # Parameters with all required variables - params: - - - name: "clusterId" - source: "event.id" - type: "string" - required: true - - - name: "generationId" - source: "event.generation" - type: "int" - required: true - - - name: "simulateResult" - source: "env.SIMULATE_RESULT" - type: "string" - required: true - - - # Preconditions with valid operators and CEL expressions - preconditions: - - name: "clusterStatus" - apiCall: - method: "GET" - url: "/clusters/{{ .clusterId }}" - timeout: 10s - retryAttempts: 3 - retryBackoff: "exponential" - capture: - - name: "clusterName" - field: "name" - - name: "generationId" - field: "generation" - - name: "timestamp" - field: "created_time" - - name: "readyConditionStatus" - expression: | - status.conditions.filter(c, c.type == "Ready").size() > 0 - ? status.conditions.filter(c, c.type == "Ready")[0].status - : "False" - - - name: "placementClusterName" - expression: "\"cluster1\"" # TBC coming from placement adapter - description: "Unique identifier for the target maestro" - - - name: "adapterName" - expression: "\"adapter1\"" # TBC coming from config passed to params - description: "Unique identifier for the adapter" - - # Structured conditions with valid operators - conditions: - - field: "readyConditionStatus" - operator: "equals" - value: "False" - - - name: "validationCheck" - # Valid CEL expression - expression: | - readyConditionStatus == "False" - - # Resources with valid K8s manifests - resources: - - name: "agentNamespaceManifestWork" - transport: - client: "maestro" - maestro: - targetCluster: "{{ .placementClusterName }}" - - # ManifestWork is a kind of manifest that can be used to create resources on the cluster. - # It is a collection of resources that are created together. - # It is created by the adapter and can be used to create resources on the cluster. - # It is created by the adapter and can be used to create resources on the cluster. - manifest: - ref: "/etc/adapter/manifestwork.yaml" - - # Discover the manifestWork by name - # This is used to get the manifestWork object from the cluster. - # With this approach, the manifestWork object can be used to get the resources inside the manifestWork. - # When try to dig Manifest - discovery: - byName: "mw-{{ .clusterId }}" - - # Discover sub-resources within the manifestWork - # This approach can be used to use the discovery name to parameter level - # This can support jsonPath to dig into the resource status. like mgmtNamespace.status.conditions[?(@.type=="Ready")].status - nestedDiscoveries: - - name: "mgmtNamespace" - discovery: - bySelectors: - labelSelector: - hyperfleet.io/resource-type: "namespace" - hyperfleet.io/cluster-id: "{{ .clusterId }}" - - name: "mgmtConfigMap" - discovery: - byName: "{{ .clusterId }}" - - - # Namespace resource creation with k8s transport client on the adapter deployment cluster' - # transport will be fallback to kubernetes if not set. - - name: "namespace" - transport: - client: "kubernetes" - manifest: - ref: "/etc/adapter/adapter-task-resource-namespace.yaml" - discovery: - byName: "{{ .clusterId }}" - - # Post-processing with valid CEL expressions - # This example contains multiple resources, we will only report on the conditions of the jobNamespace not to overcomplicate the example - post: - payloads: - - name: "clusterStatusPayload" - build: - # Adapter name for tracking which adapter reported this status - adapter: "{{ .metadata.name }}" - - # Conditions array - each condition has type, status, reason, message - # Use CEL optional chaining ?.orValue() for safe field access - conditions: - # Applied: Resources successfully created - - type: "Applied" - status: - expression: | - resources.?agentNamespaceManifestWork.agentNamespace.?status.?phase.orValue("") == "Active" ? "True" : "False" - reason: - expression: | - resources.?agentNamespaceManifestWork.agentNamespace.?status.?phase.orValue("") == "Active" - ? "NamespaceCreated" - : "NamespacePending" - message: - expression: | - resources.?agentNamespaceManifestWork.agentNamespace.?status.?phase.orValue("") == "Active" - ? "Namespace created successfully" - : "Namespace creation in progress" - - # Available: Resources are active and ready - - type: "Available" - status: - expression: | - resources.?agentNamespaceManifestWork.agentNamespace.?status.?phase.orValue("") == "Active" ? "True" : "False" - reason: - expression: | - resources.?agentNamespaceManifestWork.agentNamespace.?status.?phase.orValue("") == "Active" ? "NamespaceReady" : "NamespaceNotReady" - message: - expression: | - resources.?agentNamespaceManifestWork.agentNamespace.?status.?phase.orValue("") == "Active" ? "Namespace is active and ready" : "Namespace is not active and ready" - - # Health: Adapter execution status (runtime) Don't need to update this. This can be reused from the adapter config. - - type: "Health" - status: - expression: | - adapter.?executionStatus.orValue("") == "success" ? "True" : (adapter.?executionStatus.orValue("") == "failed" ? "False" : "Unknown") - reason: - expression: | - adapter.?errorReason.orValue("") != "" ? adapter.?errorReason.orValue("") : "Healthy" - message: - expression: | - adapter.?errorMessage.orValue("") != "" ? adapter.?errorMessage.orValue("") : "All adapter operations completed successfully" - - # Use CEL expression for numeric fields to preserve type (not Go template which outputs strings) - observed_generation: - expression: "generationId" - - # Use Go template with now and date functions for timestamps - observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" - - # Optional data field for adapter-specific metrics extracted from resources - data: - namespace: - name: - expression: | - resources.?clusterNamespace.?metadata.?name.orValue("") - status: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") - - postActions: - - name: "reportClusterStatus" - apiCall: - method: "POST" - url: "/clusters/{{ .clusterId }}/statuses" - headers: - - name: "Content-Type" - value: "application/json" - body: "{{ .clusterStatusPayload }}" diff --git a/charts/examples/maestro-kubernetes/adapter-task-resource-manifestwork.yaml b/charts/examples/maestro-kubernetes/adapter-task-resource-manifestwork.yaml deleted file mode 100644 index 5571088..0000000 --- a/charts/examples/maestro-kubernetes/adapter-task-resource-manifestwork.yaml +++ /dev/null @@ -1,138 +0,0 @@ -# ManifestWork Template for External Reference -# File: manifestwork-ref.yaml -# -# This template file defines the ManifestWork structure that wraps Kubernetes manifests -# for deployment via Maestro transport. It's referenced from business logic configs -# using the 'ref' approach for clean separation of concerns. -# -# Template Variables Available: -# - .clusterId: Target cluster identifier -# - .generationId: Resource generation for conflict resolution -# - .adapterName: Name of the adapter creating this ManifestWork -# - .placementCluster: Target cluster name (becomes ManifestWork namespace) -# - .timestamp: Creation timestamp -# - .manifests: Array of rendered Kubernetes manifests (injected by framework) - -apiVersion: work.open-cluster-management.io/v1 -kind: ManifestWork -metadata: - # ManifestWork name - must be unique within consumer namespace - name: "mw-{{ .clusterId }}" - - # Labels for identification, filtering, and management - labels: - # HyperFleet tracking labels - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/adapter: "{{ .adapterName }}" - hyperfleet.io/component: "infrastructure" - hyperfleet.io/generation: "{{ .generationId }}" - hyperfleet.io/resource-group: "cluster-setup" - - # Maestro-specific labels - maestro.io/source-id: "{{ .adapterName }}" - maestro.io/resource-type: "manifestwork" - maestro.io/priority: "normal" - - # Standard Kubernetes application labels - app.kubernetes.io/name: "aro-hcp-cluster" - app.kubernetes.io/instance: "{{ .clusterId }}" - app.kubernetes.io/version: "v1.0.0" - app.kubernetes.io/component: "infrastructure" - app.kubernetes.io/part-of: "hyperfleet" - app.kubernetes.io/managed-by: "hyperfleet-adapter" - app.kubernetes.io/created-by: "{{ .adapterName }}" - - # Annotations for metadata and operational information - annotations: - # Tracking and lifecycle - hyperfleet.io/created-by: "hyperfleet-adapter-framework" - hyperfleet.io/managed-by: "{{ .adapterName }}" - hyperfleet.io/generation: "{{ .generationId }}" - hyperfleet.io/cluster-name: "{{ .clusterId }}" - hyperfleet.io/deployment-time: "{{ .timestamp }}" - - # Maestro-specific annotations - maestro.io/applied-time: "{{ .timestamp }}" - maestro.io/source-adapter: "{{ .adapterName }}" - - # Operational annotations - deployment.hyperfleet.io/strategy: "rolling" - deployment.hyperfleet.io/timeout: "300s" - monitoring.hyperfleet.io/enabled: "true" - - # Documentation - description: "Complete cluster setup including namespace, configuration, and RBAC" - documentation: "https://docs.hyperfleet.io/adapters/aro-hcp" - -# ManifestWork specification -spec: - # ============================================================================ - # Workload - Contains the Kubernetes manifests to deploy - # ============================================================================ - workload: - # Kubernetes manifests array - injected by framework from business logic config - manifests: - - apiVersion: v1 - kind: Namespace - metadata: - name: "{{ .clusterId | lower }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/managed-by: "{{ .metadata.name }}" - hyperfleet.io/resource-type: "namespace" - annotations: - hyperfleet.io/created-by: "hyperfleet-adapter" - hyperfleet.io/generation: "{{ .generationId }}" - - apiVersion: v1 - kind: ConfigMap - metadata: - name: "cluster-config" - namespace: "{{ .clusterId }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - annotations: - hyperfleet.io/generation: "{{ .generationId }}" - data: - cluster_id: "{{ .clusterId }}" - cluster_name: "{{ .clusterName }}" - - # ============================================================================ - # Delete Options - How resources should be removed - # ============================================================================ - deleteOption: - # Propagation policy for resource deletion - # - "Foreground": Wait for dependent resources to be deleted first - # - "Background": Delete immediately, let cluster handle dependents - # - "Orphan": Leave resources on cluster when ManifestWork is deleted - propagationPolicy: "Foreground" - - # Grace period for graceful deletion (seconds) - gracePeriodSeconds: 30 - - # ============================================================================ - # Manifest Configurations - Per-resource settings for update and feedback - # ============================================================================ - manifestConfigs: - # ======================================================================== - # Configuration for Namespace resources - # ======================================================================== - - resourceIdentifier: - group: "" # Core API group (empty for v1 resources) - resource: "namespaces" # Resource type - name: "{{ .clusterId | lower }}" # Specific resource name - updateStrategy: - type: "ServerSideApply" # Use server-side apply for namespaces - serverSideApply: - fieldManager: "hyperfleet-adapter" # Field manager name for conflict resolution - force: false # Don't force conflicts (fail on conflicts) - feedbackRules: - - type: "JSONPaths" # Use JSON path expressions for status feedback - jsonPaths: - - name: "phase" # Namespace phase (Active, Terminating) - path: ".status.phase" - - name: "conditions" # Namespace conditions array - path: ".status.conditions" - - name: "creationTimestamp" # When namespace was created - path: ".metadata.creationTimestamp" - - diff --git a/charts/examples/maestro-kubernetes/adapter-task-resource-namespace.yaml b/charts/examples/maestro-kubernetes/adapter-task-resource-namespace.yaml deleted file mode 100644 index 1971930..0000000 --- a/charts/examples/maestro-kubernetes/adapter-task-resource-namespace.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: "{{ .clusterId }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/cluster-name: "{{ .clusterName }}" - annotations: - hyperfleet.io/generation: "{{ .generationId }}" diff --git a/charts/examples/maestro/README.md b/charts/examples/maestro/README.md index 2aa0ae7..58710d4 100644 --- a/charts/examples/maestro/README.md +++ b/charts/examples/maestro/README.md @@ -118,10 +118,10 @@ Update the `broker.googlepubsub` section in `values.yaml` with your GCP Pub/Sub ```yaml broker: googlepubsub: - projectId: CHANGE_ME - subscriptionId: CHANGE_ME + project_id: CHANGE_ME + subscription_id: CHANGE_ME topic: CHANGE_ME - deadLetterTopic: CHANGE_ME + dead_letter_topic: CHANGE_ME ``` ### Image Configuration @@ -142,10 +142,10 @@ image: helm install ./charts -f charts/examples/values.yaml \ --namespace \ --set image.registry=quay.io/ \ - --set broker.googlepubsub.projectId= \ - --set broker.googlepubsub.subscriptionId= \ + --set broker.googlepubsub.subscription_id= \ - --set broker.googlepubsub.deadLetterTopic= + --set broker.googlepubsub.dead_letter_topic= ``` ## How It Works diff --git a/charts/examples/maestro/adapter-config.yaml b/charts/examples/maestro/adapter-config.yaml index 69649d7..6131a07 100644 --- a/charts/examples/maestro/adapter-config.yaml +++ b/charts/examples/maestro/adapter-config.yaml @@ -1,71 +1,70 @@ # Example HyperFleet Adapter deployment configuration -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: +adapter: name: maestro-example - labels: - hyperfleet.io/adapter-type: maestro-example - hyperfleet.io/component: adapter -spec: - adapter: - version: "0.1.0" + version: "0.2.0" - # Log the full merged configuration after load (default: false) - debugConfig: true +# Log the full merged configuration after load (default: false) +debug_config: true +log: + level: debug - clients: - hyperfleetApi: - baseUrl: http://hyperfleet-api:8000 - version: v1 - timeout: 2s - retryAttempts: 3 - retryBackoff: exponential +clients: + hyperfleet_api: + base_url: http://hyperfleet-api:8000 + version: v1 + timeout: 2s + retry_attempts: 3 + retry_backoff: exponential - broker: - subscriptionId: CHANGE_ME - topic: CHANGE_ME + broker: + subscription_id: CHANGE_ME + topic: CHANGE_ME - maestro: - grpcServerAddress: "maestro-grpc.maestro.svc.cluster.local:8090" - - # HTTPS server address for REST API operations (optional) - # Environment variable: HYPERFLEET_MAESTRO_HTTP_SERVER_ADDRESS - httpServerAddress: "http://maestro.maestro.svc.cluster.local:8000" - - # Source identifier for CloudEvents routing (must be unique across adapters) - # Environment variable: HYPERFLEET_MAESTRO_SOURCE_ID - sourceId: "hyperfleet-adapter" - - # Client identifier (defaults to sourceId if not specified) - # Environment variable: HYPERFLEET_MAESTRO_CLIENT_ID - clientId: "hyperfleet-adapter-client" - insecure: true + maestro: + grpc_server_address: "maestro-grpc.maestro.svc.cluster.local:8090" - # HTTP timeout (default: 10s) - # Environment variable: HYPERFLEET_MAESTRO_TIMEOUT - timeout: "30s" + # HTTPS server address for REST API operations (optional) + # Environment variable: HYPERFLEET_MAESTRO_HTTP_SERVER_ADDRESS + http_server_address: "http://maestro.maestro.svc.cluster.local:8000" - # gRPC server healthiness check timeout (default: 20s) - # Environment variable: HYPERFLEET_MAESTRO_SERVER_HEALTHINESS_TIMEOUT - serverHealthinessTimeout: "20s" - - # Authentication configuration - #auth: - # type: "tls" # TLS certificate-based mTLS - # - # tlsConfig: - # # Certificate paths (mounted from Kubernetes secrets) - # # Environment variable: HYPERFLEET_MAESTRO_CA_FILE - # caFile: "/etc/maestro/certs/grpc/ca.crt" - # - # # Environment variable: HYPERFLEET_MAESTRO_CERT_FILE - # certFile: "/etc/maestro/certs/grpc/client.crt" - # - # # Environment variable: HYPERFLEET_MAESTRO_KEY_FILE - # keyFile: "/etc/maestro/certs/grpc/client.key" - # - # # HTTP API CA certificate (if HTTPS uses a different CA than gRPC) - # # Falls back to caFile when not set - # # Environment variable: HYPERFLEET_MAESTRO_HTTP_CA_FILE - # httpCaFile: "/etc/maestro/certs/https/ca.crt" + # Source identifier for CloudEvents routing (must be unique across adapters) + # Environment variable: HYPERFLEET_MAESTRO_SOURCE_ID + source_id: "hyperfleet-adapter" + # Client identifier (defaults to source_id if not specified) + # Environment variable: HYPERFLEET_MAESTRO_CLIENT_ID + client_id: "hyperfleet-adapter-client" + insecure: true + + # HTTP timeout (default: 10s) + # Environment variable: HYPERFLEET_MAESTRO_TIMEOUT + timeout: "30s" + + # gRPC server healthiness check timeout (default: 20s) + # Environment variable: HYPERFLEET_MAESTRO_SERVER_HEALTHINESS_TIMEOUT + server_healthiness_timeout: "20s" + + # Authentication configuration + #auth: + # type: "tls" # TLS certificate-based mTLS + # + # tls_config: + # # gRPC TLS configuration + # # Certificate paths (mounted from Kubernetes secrets) + # # Environment variable: HYPERFLEET_MAESTRO_CA_FILE + # ca_file: "/etc/maestro/certs/grpc/ca.crt" + # + # # Environment variable: HYPERFLEET_MAESTRO_CERT_FILE + # cert_file: "/etc/maestro/certs/grpc/client.crt" + # + # # Environment variable: HYPERFLEET_MAESTRO_KEY_FILE + # key_file: "/etc/maestro/certs/grpc/client.key" + # + # # Server name for TLS verification + # # Environment variable: HYPERFLEET_MAESTRO_SERVER_NAME + # server_name: "maestro-grpc.maestro.svc.cluster.local" + # + # # HTTP API TLS configuration (may use different CA than gRPC) + # # If not set, falls back to ca_file for backwards compatibility + # # Environment variable: HYPERFLEET_MAESTRO_HTTP_CA_FILE + # http_ca_file: "/etc/maestro/certs/https/ca.crt" diff --git a/charts/examples/maestro/adapter-task-config.yaml b/charts/examples/maestro/adapter-task-config.yaml index a31dba5..394bb44 100644 --- a/charts/examples/maestro/adapter-task-config.yaml +++ b/charts/examples/maestro/adapter-task-config.yaml @@ -1,244 +1,212 @@ # Example HyperFleet Adapter task configuration -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: maestro-example - labels: - hyperfleet.io/adapter-type: maestro-example - hyperfleet.io/component: adapter -spec: - # Parameters with all required variables - params: +# Parameters with all required variables +params: + - name: clusterId + required: true + source: event.id + type: string + - name: generationId + required: true + source: event.generation + type: int +# Preconditions with valid operators and CEL expressions +preconditions: + - api_call: + method: GET + retry_attempts: 3 + retry_backoff: exponential + timeout: 10s + url: /clusters/{{ .clusterId }} + capture: + - field: name + name: clusterName + - field: generation + name: generationId + - field: created_time + name: timestamp + - expression: | + status.conditions.filter(c, c.type == "Ready").size() > 0 + ? status.conditions.filter(c, c.type == "Ready")[0].status + : "False" + name: readyConditionStatus + - name: placementClusterName + expression: '"cluster1"' # TBC coming from placement adapter + # Structured conditions with valid operators + conditions: + - field: readyConditionStatus + operator: equals + value: "False" + name: clusterStatus + - # Valid CEL expression + expression: | + readyConditionStatus == "False" + name: validationCheck +# Resources with valid K8s manifests +resources: + - # Discover the manifestWork by name + # This is used to get the manifestWork object from the cluster. + # With this approach, the manifestWork object can be used to get the resources inside the manifestWork. + # When try to dig Manifest + discovery: + by_name: mw-{{ .clusterId }} + # ManifestWork is a kind of manifest that can be used to create resources on the cluster. + # It is a collection of resources that are created together. + # It is created by the adapter and can be used to create resources on the cluster. + # It is created by the adapter and can be used to create resources on the cluster. + manifest: + ref: /etc/adapter/manifestwork.yaml + name: agentNamespaceManifestWork + # Discover sub-resources within the manifestWork + # This approach can be used to use the discovery name to parameter level + # This can support jsonPath to dig into the resource status. like mgmtNamespace.status.conditions[?(@.type=="Ready")].status + nested_discoveries: + - discovery: + by_selectors: + label_selector: + hyperfleet.io/cluster-id: '{{ .clusterId }}' + hyperfleet.io/resource-type: namespace + name: mgmtNamespace + - discovery: + by_name: '{{ .clusterId }}' + name: mgmtConfigMap + transport: + client: maestro + maestro: + target_cluster: '{{ .placementClusterName }}' +# Post-processing with valid CEL expressions +# This example contains multiple resources, we will only report on the conditions of the jobNamespace not to overcomplicate the example +post: + payloads: + - build: + # Adapter name for tracking which adapter reported this status + adapter: '{{ .adapter.name }}' + # Conditions array - each condition has type, status, reason, message + # Use CEL optional chaining ?.orValue() for safe field access + conditions: + # Applied: Check if the ManifestWork has been applied by Maestro + # The ManifestWork's own status.conditions contains the Applied condition + - type: Applied + message: + expression: | + has(resources.agentNamespaceManifestWork) && has(resources.agentNamespaceManifestWork.status) && has(resources.agentNamespaceManifestWork.status.conditions) + && resources.agentNamespaceManifestWork.status.conditions.exists(c, has(c.type) && c.type == "Applied" && has(c.status)) + ? resources.agentNamespaceManifestWork.status.conditions.filter(c, c.type == "Applied")[0].message + : "ManifestWork not discovered from Maestro or no Applied condition" + reason: + expression: | + has(resources.agentNamespaceManifestWork) && has(resources.agentNamespaceManifestWork.status) && has(resources.agentNamespaceManifestWork.status.conditions) + && resources.agentNamespaceManifestWork.status.conditions.exists(c, has(c.type) && c.type == "Applied" && has(c.status)) + ? resources.agentNamespaceManifestWork.status.conditions.filter(c, c.type == "Applied")[0].reason + : "ManifestWorkNotDiscovered" + status: + expression: | + has(resources.agentNamespaceManifestWork) && has(resources.agentNamespaceManifestWork.status) && has(resources.agentNamespaceManifestWork.status.conditions) + && resources.agentNamespaceManifestWork.status.conditions.exists(c, has(c.type) && c.type == "Applied" && has(c.status)) + ? resources.agentNamespaceManifestWork.status.conditions.filter(c, c.type == "Applied")[0].status + : "False" + # Available: Check if nested discovered manifests are available on the spoke cluster + # Each nested discovery object is enriched with top-level "conditions" from status.resourceStatus.manifests[] + - type: Available + message: + expression: | + !(has(resources.mgmtNamespace) && has(resources.mgmtNamespace.conditions)) + ? "Namespace not discovered from ManifestWork" + : !resources.mgmtNamespace.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") + ? "Namespace not yet available on spoke cluster" + : !(has(resources.mgmtConfigMap) && has(resources.mgmtConfigMap.conditions)) + ? "ConfigMap not discovered from ManifestWork" + : !resources.mgmtConfigMap.conditions.exists(c, c.type == "Available" && has(c.status) && c.status == "True") + ? "ConfigMap not yet available on spoke cluster" + : "All manifests (namespace, configmap) are available on spoke cluster" + reason: + expression: | + !(has(resources.mgmtNamespace) && has(resources.mgmtNamespace.conditions)) + ? "NamespaceNotDiscovered" + : !resources.mgmtNamespace.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") + ? "NamespaceNotAvailable" + : !(has(resources.mgmtConfigMap) && has(resources.mgmtConfigMap.conditions)) + ? "ConfigMapNotDiscovered" + : !resources.mgmtConfigMap.conditions.exists(c, c.type == "Available" && has(c.status) && c.status == "True") + ? "ConfigMapNotAvailable" + : "AllResourcesAvailable" + status: + expression: | + has(resources.mgmtNamespace) && has(resources.mgmtNamespace.conditions) + && resources.mgmtNamespace.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") + && has(resources.mgmtConfigMap) && has(resources.mgmtConfigMap.conditions) + && resources.mgmtConfigMap.conditions.exists(c, c.type == "Available" && has(c.status) && c.status == "True") + ? "True" + : "False" + # Health: Adapter execution status (runtime) + - type: Health + message: + expression: | + adapter.?executionStatus.orValue("") != "success" + ? "Adapter failed at phase [" + + adapter.?executionError.?phase.orValue("unknown") + + "] step [" + + adapter.?executionError.?step.orValue("unknown") + + "]: " + + adapter.?executionError.?message.orValue(adapter.?errorMessage.orValue("no details")) + : adapter.?resourcesSkipped.orValue(false) + ? "Resources skipped: " + adapter.?skipReason.orValue("unknown reason") + : "Adapter execution completed successfully" + reason: + expression: | + adapter.?executionStatus.orValue("") != "success" + ? "ExecutionFailed:" + adapter.?executionError.?phase.orValue("unknown") + : adapter.?resourcesSkipped.orValue(false) + ? "ResourcesSkipped" + : "Healthy" + status: + expression: | + adapter.?executionStatus.orValue("") == "success" + && !adapter.?resourcesSkipped.orValue(false) + ? "True" + : "False" + data: + configmap: + name: + expression: | + has(resources.mgmtConfigMap) && has(resources.mgmtConfigMap.metadata) + ? resources.mgmtConfigMap.metadata.name + : "" + manifestwork: + consumer: + expression: | + has(resources.agentNamespaceManifestWork) && has(resources.agentNamespaceManifestWork.metadata) + ? resources.agentNamespaceManifestWork.metadata.namespace + : placementClusterName + name: + expression: | + has(resources.agentNamespaceManifestWork) && has(resources.agentNamespaceManifestWork.metadata) + ? resources.agentNamespaceManifestWork.metadata.name + : "" + namespace: + name: + expression: | + has(resources.mgmtNamespace) && has(resources.mgmtNamespace.metadata) + ? resources.mgmtNamespace.metadata.name + : "" + phase: + expression: | + has(resources.mgmtNamespace) && has(resources.mgmtNamespace.statusFeedback) && has(resources.mgmtNamespace.statusFeedback.values) + && resources.mgmtNamespace.statusFeedback.values.exists(v, has(v.name) && v.name == "phase" && has(v.fieldValue)) + ? resources.mgmtNamespace.statusFeedback.values.filter(v, v.name == "phase")[0].fieldValue.string + : "Unknown" - - name: "clusterId" - source: "event.id" - type: "string" - required: true - - - name: "generationId" - source: "event.generation" - type: "int" - required: true - - - name: "simulateResult" - source: "env.SIMULATE_RESULT" - type: "string" - required: true - - # Preconditions with valid operators and CEL expressions - preconditions: - - name: "clusterStatus" - apiCall: - method: "GET" - url: "/clusters/{{ .clusterId }}" - timeout: 10s - retryAttempts: 3 - retryBackoff: "exponential" - capture: - - name: "clusterName" - field: "name" - - name: "generationId" - field: "generation" - - name: "timestamp" - field: "created_time" - - name: "readyConditionStatus" - expression: | - status.conditions.filter(c, c.type == "Ready").size() > 0 - ? status.conditions.filter(c, c.type == "Ready")[0].status - : "False" - - - name: "placementClusterName" - expression: "\"cluster1\"" # TBC coming from placement adapter - description: "Unique identifier for the target maestro" - - - name: "adapterName" - expression: "\"adapter1\"" # TBC coming from config passed to params - description: "Unique identifier for the adapter" - - # Structured conditions with valid operators - conditions: - - field: "readyConditionStatus" - operator: "equals" - value: "False" - - - name: "validationCheck" - # Valid CEL expression - expression: | - readyConditionStatus == "False" - - # Resources with valid K8s manifests - resources: - - name: "agentNamespaceManifestWork" - transport: - client: "maestro" - maestro: - targetCluster: "{{ .placementClusterName }}" - - # ManifestWork is a kind of manifest that can be used to create resources on the cluster. - # It is a collection of resources that are created together. - # It is created by the adapter and can be used to create resources on the cluster. - # It is created by the adapter and can be used to create resources on the cluster. - manifest: - ref: "/etc/adapter/manifestwork.yaml" - - # Discover the manifestWork by name - # This is used to get the manifestWork object from the cluster. - # With this approach, the manifestWork object can be used to get the resources inside the manifestWork. - # When try to dig Manifest - discovery: - byName: "mw-{{ .clusterId }}" - - # Discover sub-resources within the manifestWork - # This approach can be used to use the discovery name to parameter level - # This can support jsonPath to dig into the resource status. like mgmtNamespace.status.conditions[?(@.type=="Ready")].status - nestedDiscoveries: - - name: "mgmtNamespace" - discovery: - bySelectors: - labelSelector: - hyperfleet.io/resource-type: "namespace" - hyperfleet.io/cluster-id: "{{ .clusterId }}" - - name: "mgmtConfigMap" - discovery: - byName: "{{ .clusterId }}" - - # Post-processing: build status payload and report to HyperFleet API - post: - payloads: - - name: "clusterStatusPayload" - build: - # Adapter name for tracking which adapter reported this status - adapter: "{{ .metadata.name }}" - - conditions: - # Applied: Check if the ManifestWork has been applied by Maestro - # The ManifestWork's own status.conditions contains the Applied condition - - type: "Applied" - status: - expression: | - has(resources.agentNamespaceManifestWork) && has(resources.agentNamespaceManifestWork.status) && has(resources.agentNamespaceManifestWork.status.conditions) - && resources.agentNamespaceManifestWork.status.conditions.exists(c, has(c.type) && c.type == "Applied" && has(c.status)) - ? resources.agentNamespaceManifestWork.status.conditions.filter(c, c.type == "Applied")[0].status - : "False" - reason: - expression: | - has(resources.agentNamespaceManifestWork) && has(resources.agentNamespaceManifestWork.status) && has(resources.agentNamespaceManifestWork.status.conditions) - && resources.agentNamespaceManifestWork.status.conditions.exists(c, has(c.type) && c.type == "Applied" && has(c.status)) - ? resources.agentNamespaceManifestWork.status.conditions.filter(c, c.type == "Applied")[0].reason - : "ManifestWorkNotDiscovered" - message: - expression: | - has(resources.agentNamespaceManifestWork) && has(resources.agentNamespaceManifestWork.status) && has(resources.agentNamespaceManifestWork.status.conditions) - && resources.agentNamespaceManifestWork.status.conditions.exists(c, has(c.type) && c.type == "Applied" && has(c.status)) - ? resources.agentNamespaceManifestWork.status.conditions.filter(c, c.type == "Applied")[0].message - : "ManifestWork not discovered from Maestro or no Applied condition" - - # Available: Check if nested discovered manifests are available on the spoke cluster - # Each nested discovery object is enriched with top-level "conditions" from status.resourceStatus.manifests[] - - type: "Available" - status: - expression: | - has(resources.mgmtNamespace) && has(resources.mgmtNamespace.conditions) - && resources.mgmtNamespace.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") - && has(resources.mgmtConfigMap) && has(resources.mgmtConfigMap.conditions) - && resources.mgmtConfigMap.conditions.exists(c, c.type == "Available" && has(c.status) && c.status == "True") - ? "True" - : "False" - reason: - expression: | - !(has(resources.mgmtNamespace) && has(resources.mgmtNamespace.conditions)) - ? "NamespaceNotDiscovered" - : !resources.mgmtNamespace.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") - ? "NamespaceNotAvailable" - : !(has(resources.mgmtConfigMap) && has(resources.mgmtConfigMap.conditions)) - ? "ConfigMapNotDiscovered" - : !resources.mgmtConfigMap.conditions.exists(c, c.type == "Available" && has(c.status) && c.status == "True") - ? "ConfigMapNotAvailable" - : "AllResourcesAvailable" - message: - expression: | - !(has(resources.mgmtNamespace) && has(resources.mgmtNamespace.conditions)) - ? "Namespace not discovered from ManifestWork" - : !resources.mgmtNamespace.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") - ? "Namespace not yet available on spoke cluster" - : !(has(resources.mgmtConfigMap) && has(resources.mgmtConfigMap.conditions)) - ? "ConfigMap not discovered from ManifestWork" - : !resources.mgmtConfigMap.conditions.exists(c, c.type == "Available" && has(c.status) && c.status == "True") - ? "ConfigMap not yet available on spoke cluster" - : "All manifests (namespace, configmap) are available on spoke cluster" - - # Health: Adapter execution status (runtime) - - type: "Health" - status: - expression: | - adapter.?executionStatus.orValue("") == "success" - && !adapter.?resourcesSkipped.orValue(false) - ? "True" - : "False" - reason: - expression: | - adapter.?executionStatus.orValue("") != "success" - ? "ExecutionFailed:" + adapter.?executionError.?phase.orValue("unknown") - : adapter.?resourcesSkipped.orValue(false) - ? "ResourcesSkipped" - : "Healthy" - message: - expression: | - adapter.?executionStatus.orValue("") != "success" - ? "Adapter failed at phase [" - + adapter.?executionError.?phase.orValue("unknown") - + "] step [" - + adapter.?executionError.?step.orValue("unknown") - + "]: " - + adapter.?executionError.?message.orValue(adapter.?errorMessage.orValue("no details")) - : adapter.?resourcesSkipped.orValue(false) - ? "Resources skipped: " + adapter.?skipReason.orValue("unknown reason") - : "Adapter execution completed successfully" - - # Use CEL expression for numeric fields to preserve type (not Go template which outputs strings) - observed_generation: - expression: "generationId" - - # Use Go template with now and date functions for timestamps - observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" - - data: - manifestwork: - name: - expression: | - has(resources.agentNamespaceManifestWork) && has(resources.agentNamespaceManifestWork.metadata) - ? resources.agentNamespaceManifestWork.metadata.name - : "" - consumer: - expression: | - has(resources.agentNamespaceManifestWork) && has(resources.agentNamespaceManifestWork.metadata) - ? resources.agentNamespaceManifestWork.metadata.namespace - : placementClusterName - namespace: - name: - expression: | - has(resources.mgmtNamespace) && has(resources.mgmtNamespace.metadata) - ? resources.mgmtNamespace.metadata.name - : "" - phase: - expression: | - has(resources.mgmtNamespace) && has(resources.mgmtNamespace.statusFeedback) && has(resources.mgmtNamespace.statusFeedback.values) - && resources.mgmtNamespace.statusFeedback.values.exists(v, has(v.name) && v.name == "phase" && has(v.fieldValue)) - ? resources.mgmtNamespace.statusFeedback.values.filter(v, v.name == "phase")[0].fieldValue.string - : "Unknown" - configmap: - name: - expression: | - has(resources.mgmtConfigMap) && has(resources.mgmtConfigMap.metadata) - ? resources.mgmtConfigMap.metadata.name - : "" - - postActions: - - name: "reportClusterStatus" - apiCall: - method: "POST" - url: "/clusters/{{ .clusterId }}/statuses" - headers: - - name: "Content-Type" - value: "application/json" - body: "{{ .clusterStatusPayload }}" + # Use CEL expression for numeric fields to preserve type (not Go template which outputs strings) + observed_generation: + expression: generationId + # Use Go template with now and date functions for timestamps + observed_time: '{{ now | date "2006-01-02T15:04:05Z07:00" }}' + name: clusterStatusPayload + post_actions: + - api_call: + body: '{{ .clusterStatusPayload }}' + headers: + - name: Content-Type + value: application/json + method: POST + url: /clusters/{{ .clusterId }}/statuses + name: reportClusterStatus diff --git a/charts/examples/maestro/adapter-task-resource-manifestwork.yaml b/charts/examples/maestro/adapter-task-resource-manifestwork.yaml index 0c9ab29..9961449 100644 --- a/charts/examples/maestro/adapter-task-resource-manifestwork.yaml +++ b/charts/examples/maestro/adapter-task-resource-manifestwork.yaml @@ -8,7 +8,7 @@ # Template Variables Available: # - .clusterId: Target cluster identifier # - .generationId: Resource generation for conflict resolution -# - .adapterName: Name of the adapter creating this ManifestWork +# - .adapter.name: Name of the adapter creating this ManifestWork # - .placementCluster: Target cluster name (becomes ManifestWork namespace) # - .timestamp: Creation timestamp # - .manifests: Array of rendered Kubernetes manifests (injected by framework) @@ -23,13 +23,14 @@ metadata: labels: # HyperFleet tracking labels hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/adapter: "{{ .adapterName }}" + hyperfleet.io/cluster-name: "{{ .clusterName }}" + hyperfleet.io/adapter: "{{ .adapter.name }}" hyperfleet.io/component: "infrastructure" hyperfleet.io/generation: "{{ .generationId }}" hyperfleet.io/resource-group: "cluster-setup" # Maestro-specific labels - maestro.io/source-id: "{{ .adapterName }}" + maestro.io/source-id: "{{ .adapter.name }}" maestro.io/resource-type: "manifestwork" maestro.io/priority: "normal" @@ -39,21 +40,21 @@ metadata: app.kubernetes.io/version: "v1.0.0" app.kubernetes.io/component: "infrastructure" app.kubernetes.io/part-of: "hyperfleet" - app.kubernetes.io/managed-by: "{{ .adapterName }}" - app.kubernetes.io/created-by: "{{ .adapterName }}" + app.kubernetes.io/managed-by: "{{ .adapter.name }}" + app.kubernetes.io/created-by: "{{ .adapter.name }}" # Annotations for metadata and operational information annotations: # Tracking and lifecycle hyperfleet.io/created-by: "hyperfleet-adapter-framework" - hyperfleet.io/managed-by: "{{ .adapterName }}" + hyperfleet.io/managed-by: "{{ .adapter.name }}" hyperfleet.io/generation: "{{ .generationId }}" - hyperfleet.io/cluster-name: "{{ .clusterId }}" + hyperfleet.io/cluster-name: "{{ .clusterName }}" hyperfleet.io/deployment-time: "{{ .timestamp }}" # Maestro-specific annotations maestro.io/applied-time: "{{ .timestamp }}" - maestro.io/source-adapter: "{{ .adapterName }}" + maestro.io/source-adapter: "{{ .adapter.name }}" # Operational annotations deployment.hyperfleet.io/strategy: "rolling" @@ -75,10 +76,11 @@ spec: - apiVersion: v1 kind: Namespace metadata: - name: "{{ .clusterId | lower }}" + name: "{{ .clusterId }}" labels: hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/managed-by: "{{ .metadata.name }}" + hyperfleet.io/cluster-name: "{{ .clusterName }}" + hyperfleet.io/managed-by: "{{ .adapter.name }}" hyperfleet.io/resource-type: "namespace" annotations: hyperfleet.io/created-by: "hyperfleet-adapter" @@ -90,6 +92,7 @@ spec: namespace: "{{ .clusterId }}" labels: hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/cluster-name: "{{ .clusterName }}" annotations: hyperfleet.io/generation: "{{ .generationId }}" data: @@ -119,7 +122,7 @@ spec: - resourceIdentifier: group: "" # Core API group (empty for v1 resources) resource: "namespaces" # Resource type - name: "{{ .clusterId | lower }}" # Specific resource name + name: "{{ .clusterId }}" # Specific resource name updateStrategy: type: "ServerSideApply" # Use server-side apply for namespaces serverSideApply: diff --git a/charts/examples/maestro/values.yaml b/charts/examples/maestro/values.yaml index 95d47e4..fbc966e 100644 --- a/charts/examples/maestro/values.yaml +++ b/charts/examples/maestro/values.yaml @@ -12,10 +12,10 @@ adapterTaskConfig: broker: create: true googlepubsub: - projectId: CHANGE_ME - subscriptionId: CHANGE_ME + project_id: CHANGE_ME + subscription_id: CHANGE_ME topic: CHANGE_ME - deadLetterTopic: CHANGE_ME + dead_letter_topic: CHANGE_ME image: registry: CHANGE_ME diff --git a/charts/templates/configmap-broker.yaml b/charts/templates/configmap-broker.yaml index 6652591..d58e5d8 100644 --- a/charts/templates/configmap-broker.yaml +++ b/charts/templates/configmap-broker.yaml @@ -31,11 +31,11 @@ data: broker: type: googlepubsub googlepubsub: - project_id: {{ .Values.broker.googlepubsub.projectId | quote }} + project_id: {{ .Values.broker.googlepubsub.project_id | quote }} topic: {{ .Values.broker.googlepubsub.topic | quote }} - deadLetterTopic: {{ .Values.broker.googlepubsub.deadLetterTopic | quote }} - create_topic_if_missing: {{ .Values.broker.googlepubsub.createTopicIfMissing }} - create_subscription_if_missing: {{ .Values.broker.googlepubsub.createSubscriptionIfMissing }} + dead_letter_topic: {{ .Values.broker.googlepubsub.dead_letter_topic | quote }} + create_topic_if_missing: {{ .Values.broker.googlepubsub.create_topic_if_missing }} + create_subscription_if_missing: {{ .Values.broker.googlepubsub.create_subscription_if_missing }} subscriber: parallelism: 1 {{- else if eq $brokerType "rabbitmq" }} @@ -47,8 +47,8 @@ data: url: {{ .Values.broker.rabbitmq.url | quote }} queue: {{ .Values.broker.rabbitmq.queue | quote }} exchange: {{ .Values.broker.rabbitmq.exchange | quote }} - routing_key: {{ .Values.broker.rabbitmq.routingKey | quote }} - exchange_type: {{ .Values.broker.rabbitmq.exchangeType | default "topic" | quote }} + routing_key: {{ .Values.broker.rabbitmq.routing_key | quote }} + exchange_type: {{ .Values.broker.rabbitmq.exchange_type | default "topic" | quote }} {{- end }} {{- end }} {{- end }} diff --git a/charts/templates/deployment.yaml b/charts/templates/deployment.yaml index 0bb3bc7..26bd787 100644 --- a/charts/templates/deployment.yaml +++ b/charts/templates/deployment.yaml @@ -91,15 +91,15 @@ spec: - name: LOG_LEVEL value: {{ .Values.adapterConfig.log.level }} - name: HYPERFLEET_API_BASE_URL - value: {{ .Values.adapterConfig.hyperfleetApi.baseUrl | quote }} + value: {{ .Values.adapterConfig.hyperfleet_api.base_url | quote }} - name: HYPERFLEET_API_VERSION - value: {{ .Values.adapterConfig.hyperfleetApi.version | quote }} + value: {{ .Values.adapterConfig.hyperfleet_api.version | quote }} - name: BROKER_CONFIG_FILE value: /etc/broker/broker.yaml {{- $brokerType := include "hyperfleet-adapter.brokerType" . }} {{- if eq $brokerType "googlepubsub" }} - name: HYPERFLEET_BROKER_SUBSCRIPTION_ID - value: {{ .Values.broker.googlepubsub.subscriptionId | quote }} + value: {{ .Values.broker.googlepubsub.subscription_id | quote }} - name: HYPERFLEET_BROKER_TOPIC value: {{ .Values.broker.googlepubsub.topic | quote }} {{- end }} diff --git a/charts/values.yaml b/charts/values.yaml index c04d2f8..f1a384f 100644 --- a/charts/values.yaml +++ b/charts/values.yaml @@ -9,49 +9,58 @@ # - option3: from a set of files by setting files adapterConfig: create: true - # option1: ConfigMap name (if different from default, and set create: false) #configMapName: "" # option2: AdapterConfig YAML (creates adapter-config.yaml key in ConfigMap) # yaml: - # apiVersion: hyperfleet.redhat.com/v1alpha1 - # kind: AdapterConfig + # adapter: + # name: my-adapter + # version: "0.1.0" # option3: AdapterConfig YAML files packaged with the chart # files: - # adapter-config.yaml: examples/adapter-config.yaml + # adapter-config.yaml: examples/kubernetes/adapter-config.yaml # HyperFleet API env vars used by task params - hyperfleetApi: + hyperfleet_api: # Base URL for HyperFleet API (HYPERFLEET_API_BASE_URL) - baseUrl: http://hyperfleet-api:8000 + base_url: http://hyperfleet-api:8000 # API version (HYPERFLEET_API_VERSION), default: v1 - version: "v1" - + version: v1 log: level: info - # AdapterTaskConfig (business logic) can be created: # - option1: from an existing ConfigMap by setting configMapName # - option2: from a YAML file by setting yaml # - option3: from a set of files by setting files adapterTaskConfig: create: true - # option1: ConfigMap name (if different from default, and set create: false) #configMapName: "" # option2: AdapterTaskConfig YAML (creates task-config.yaml key in ConfigMap) # yaml: - # apiVersion: hyperfleet.redhat.com/v1alpha1 - # kind: AdapterTaskConfig + # params: [] + # resources: [] # option3: AdapterTaskConfig YAML files packaged with the chart # files: - # task-config.yaml: examples/adapter-task-config.yaml - - + # task-config.yaml: examples/kubernetes/adapter-task-config.yaml +affinity: {} +# Override default args +args: + - serve + - --config + - /etc/adapter/adapter-config.yaml + - --task-config + - /etc/adapter/task-config.yaml +autoscaling: + enabled: false + maxReplicas: 10 + minReplicas: 1 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 # Broker configuration can be created: # - option1: from an existing ConfigMap by setting configMapName # - option2: from a YAML file by setting yaml @@ -60,97 +69,115 @@ broker: create: true # option1 # configMapName: "" - + # option2 # yaml: "" # option3: Broker type identifier (googlepubsub, rabbitmq, etc.) - used as label # Subscription/topic are used to set adapter broker config overrides googlepubsub: - projectId: "" - subscriptionId: "" - topic: "" - deadLetterTopic: "" + create_subscription_if_missing: false # Auto-creation flags (default: false - infrastructure must pre-exist) # Set to true for development/testing; use false in production with pre-provisioned resources - createTopicIfMissing: false - createSubscriptionIfMissing: false + create_topic_if_missing: false + dead_letter_topic: "" + project_id: "" + subscription_id: "" + topic: "" # option3: for rabbitmq #rabbitmq: # url: "" # queue: "" # exchange: "" - # exchangeType: "topic" - # routingKey: "" - - + # exchange_type: "topic" + # routing_key: "" +# Override default command +command: + - /app/adapter +# Container ports for health and metrics endpoints +containerPorts: + - containerPort: 8080 + name: http + protocol: TCP + - containerPort: 9090 + name: metrics + protocol: TCP +deploymentAnnotations: {} +# Deployment configuration +deploymentLabels: {} +# Environment variables +env: [] +# - name: ADAPTER_CONFIG_PATH +# value: /path/to/custom/config.yaml +# - name: MY_SECRET +# valueFrom: +# secretKeyRef: +# name: my-secret +# key: secret-key -## Kubernetes configuration section -## ================================ +# Volume mounts for additional configs +extraVolumeMounts: [] +# - name: custom-config +# mountPath: /etc/custom +# readOnly: true -replicaCount: 1 +# Additional volumes +extraVolumes: [] +# - name: custom-config +# configMap: +# name: custom-configmap +fullnameOverride: "" # This is the image for the adapter framework image: + pullPolicy: Always registry: CHANGE_ME # e.g. quay.io/openshift-hyperfleet repository: CHANGE_ME # e.g. hyperfleet-adapter - pullPolicy: Always tag: CHANGE_ME # e.g. "latest" - imagePullSecrets: [] -nameOverride: "" -fullnameOverride: "" - -# Deployment configuration -deploymentLabels: {} -deploymentAnnotations: {} - -# Pod labels (in addition to selector labels) -podLabels: {} +# Init containers +initContainers: [] +# - name: init-myservice +# image: busybox:1.28 +# command: ['sh', '-c', 'until nslookup myservice; do echo waiting; sleep 2; done'] -# Deployment strategy -strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 1 - maxUnavailable: 0 +# Lifecycle hooks +lifecycle: {} +# preStop: +# exec: +# command: +# - /bin/sh +# - -c +# - sleep 10 +# Liveness probe configuration (per HyperFleet health-endpoints standard) +# Uses K8s defaults: initialDelaySeconds=0, periodSeconds=10, timeoutSeconds=1, failureThreshold=3 +livenessProbe: + enabled: true + httpGet: + path: /healthz + port: 8080 # Minimum number of seconds for which a newly created pod should be ready minReadySeconds: 0 - -# The number of old ReplicaSets to retain for rollback -revisionHistoryLimit: 10 - -# Container ports for health and metrics endpoints -containerPorts: - - name: http - containerPort: 8080 - protocol: TCP - - name: metrics - containerPort: 9090 - protocol: TCP - -# Override default command -command: - - /app/adapter - -# Override default args -args: - - serve - - --config - - /etc/adapter/adapter-config.yaml - - --task-config - - /etc/adapter/task-config.yaml - -serviceAccount: - # Specifies whether a service account should be created - create: true - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: "" - +nameOverride: "" +nodeSelector: {} +podAnnotations: {} +# Pod Disruption Budget +# PDB protects availability during voluntary disruptions (node maintenance, cluster upgrades). +# Set minAvailable OR maxUnavailable (not both). Use maxUnavailable with HPA for smoother scaling. +podDisruptionBudget: + enabled: true + # minAvailable: 1 + maxUnavailable: 1 + # unhealthyPodEvictionPolicy: IfHealthyBudget +# Pod labels (in addition to selector labels) +podLabels: {} +podSecurityContext: + fsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 +# Priority class name for pod scheduling +priorityClassName: "" # RBAC permissions are needed if the adapter task needs to interact with CLM cluster # e.g. if triggering a job that must update its status to report conditions rbac: @@ -169,39 +196,6 @@ rbac: # - jobs/status # Additional custom rules (appended to auto-generated rules) rules: [] - -podAnnotations: {} - -podSecurityContext: - fsGroup: 65532 - runAsNonRoot: true - runAsUser: 65532 - -securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - seccompProfile: - type: RuntimeDefault - -resources: - limits: - cpu: 500m - memory: 512Mi - requests: - cpu: 100m - memory: 128Mi - -# Liveness probe configuration (per HyperFleet health-endpoints standard) -# Uses K8s defaults: initialDelaySeconds=0, periodSeconds=10, timeoutSeconds=1, failureThreshold=3 -livenessProbe: - enabled: true - httpGet: - path: /healthz - port: 8080 - # Readiness probe configuration (per HyperFleet health-endpoints standard) # Uses K8s defaults: initialDelaySeconds=0, periodSeconds=10, timeoutSeconds=1, failureThreshold=3 readinessProbe: @@ -209,6 +203,40 @@ readinessProbe: httpGet: path: /readyz port: 8080 +## Kubernetes configuration section +## ================================ +replicaCount: 1 +resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 100m + memory: 128Mi +# The number of old ReplicaSets to retain for rollback +revisionHistoryLimit: 10 +securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault +serviceAccount: + # Annotations to add to the service account + annotations: {} + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" +# Sidecar containers +sidecarContainers: [] +# - name: sidecar +# image: nginx:1.19 +# ports: +# - containerPort: 80 # Startup probe configuration (useful for slow-starting containers) # Uses K8s defaults when enabled @@ -217,83 +245,12 @@ startupProbe: httpGet: path: /healthz port: 8080 - -# Lifecycle hooks -lifecycle: {} - # preStop: - # exec: - # command: - # - /bin/sh - # - -c - # - sleep 10 - -# Priority class name for pod scheduling -priorityClassName: "" - +# Deployment strategy +strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate # Termination grace period in seconds terminationGracePeriodSeconds: 30 - -# Init containers -initContainers: [] - # - name: init-myservice - # image: busybox:1.28 - # command: ['sh', '-c', 'until nslookup myservice; do echo waiting; sleep 2; done'] - -# Sidecar containers -sidecarContainers: [] - # - name: sidecar - # image: nginx:1.19 - # ports: - # - containerPort: 80 - -nodeSelector: {} - tolerations: [] - -affinity: {} - -autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 10 - targetCPUUtilizationPercentage: 80 - targetMemoryUtilizationPercentage: 80 - - -# Environment variables -env: [] - # - name: ADAPTER_CONFIG_PATH - # value: /path/to/custom/config.yaml - # - name: MY_SECRET - # valueFrom: - # secretKeyRef: - # name: my-secret - # key: secret-key - -# envFrom for loading entire ConfigMaps/Secrets as environment variables -envFrom: [] - # - configMapRef: - # name: my-configmap - # - secretRef: - # name: my-secret - -# Volume mounts for additional configs -extraVolumeMounts: [] - # - name: custom-config - # mountPath: /etc/custom - # readOnly: true - -# Additional volumes -extraVolumes: [] - # - name: custom-config - # configMap: - # name: custom-configmap - -# Pod Disruption Budget -# PDB protects availability during voluntary disruptions (node maintenance, cluster upgrades). -# Set minAvailable OR maxUnavailable (not both). Use maxUnavailable with HPA for smoother scaling. -podDisruptionBudget: - enabled: true - # minAvailable: 1 - maxUnavailable: 1 - # unhealthyPodEvictionPolicy: IfHealthyBudget diff --git a/cmd/adapter/main.go b/cmd/adapter/main.go index 8b5595c..e5fa670 100644 --- a/cmd/adapter/main.go +++ b/cmd/adapter/main.go @@ -6,6 +6,7 @@ import ( "fmt" "os" "os/signal" + "strings" "syscall" "time" @@ -34,7 +35,6 @@ var ( logLevel string logFormat string logOutput string - serveFlags *pflag.FlagSet // Dry-run flags dryRunEvent string // Path to CloudEvent JSON file @@ -93,47 +93,21 @@ Dry-run mode: Optionally pass --dry-run-api-responses to configure mock API responses.`, RunE: func(cmd *cobra.Command, args []string) error { if isDryRun() { - return runDryRun() + return runDryRun(cmd.Flags()) } - return runServe() + return runServe(cmd.Flags()) }, } - - // Add config flags to serve command - serveCmd.Flags().StringVarP(&configPath, "config", "c", "", - fmt.Sprintf("Path to adapter deployment config file (can also use %s env var)", config_loader.EnvAdapterConfig)) - serveCmd.Flags().StringVarP(&taskConfigPath, "task-config", "t", "", - fmt.Sprintf("Path to adapter task config file (can also use %s env var)", config_loader.EnvTaskConfigPath)) - serveFlags = serveCmd.Flags() - - // Add Maestro override flags - serveCmd.Flags().String("maestro-grpc-server-address", "", "Maestro gRPC server address") - serveCmd.Flags().String("maestro-http-server-address", "", "Maestro HTTP server address") - serveCmd.Flags().String("maestro-source-id", "", "Maestro source ID") - serveCmd.Flags().String("maestro-client-id", "", "Maestro client ID") - serveCmd.Flags().String("maestro-ca-file", "", "Maestro CA certificate file") - serveCmd.Flags().String("maestro-cert-file", "", "Maestro client certificate file") - serveCmd.Flags().String("maestro-key-file", "", "Maestro client key file") - serveCmd.Flags().String("maestro-timeout", "", "Maestro client timeout") - serveCmd.Flags().Bool("maestro-insecure", false, "Use insecure connection to Maestro") - - // Add HyperFleet API override flags - serveCmd.Flags().String("hyperfleet-api-timeout", "", "HyperFleet API timeout") - serveCmd.Flags().Int("hyperfleet-api-retry", 0, "HyperFleet API retry attempts") - - // Add config debug override flags + addConfigPathFlags(serveCmd) + addOverrideFlags(serveCmd) serveCmd.Flags().Bool("debug-config", false, "Log the full merged configuration after load. Env: HYPERFLEET_DEBUG_CONFIG") - - // Add logging flags to serve command serveCmd.Flags().StringVar(&logLevel, "log-level", "", "Log level (debug, info, warn, error). Env: LOG_LEVEL") serveCmd.Flags().StringVar(&logFormat, "log-format", "", "Log format (text, json). Env: LOG_FORMAT") serveCmd.Flags().StringVar(&logOutput, "log-output", "", "Log output (stdout, stderr). Env: LOG_OUTPUT") - - // Add dry-run flags to serve command serveCmd.Flags().StringVar(&dryRunEvent, "dry-run-event", "", "Path to CloudEvent JSON file for dry-run mode") serveCmd.Flags().StringVar(&dryRunAPIResponses, "dry-run-api-responses", "", @@ -145,6 +119,32 @@ Dry-run mode: serveCmd.Flags().StringVar(&dryRunOutput, "dry-run-output", "text", "Dry-run output format: text or json") + // Config-dump command: loads config and prints the merged result as YAML, then exits. + // Useful for debugging and verifying that config files, env vars, and CLI flags load correctly. + configDumpCmd := &cobra.Command{ + Use: "config-dump", + Short: "Load and print the merged adapter configuration as YAML", + Long: `Load the adapter configuration from config files, environment variables, +and CLI flags, then print the merged result as YAML to stdout. +Sensitive fields (certificates, keys) are redacted. +Exits with code 0 on success, non-zero on error. + +Priority order (lowest to highest): config file < env vars < CLI flags`, + RunE: func(cmd *cobra.Command, args []string) error { + return runConfigDump(cmd.Flags()) + }, + } + addConfigPathFlags(configDumpCmd) + addOverrideFlags(configDumpCmd) + configDumpCmd.Flags().Bool("debug-config", false, + "Include debug_config field in output. Env: HYPERFLEET_DEBUG_CONFIG") + configDumpCmd.Flags().StringVar(&logLevel, "log-level", "", + "Log level (debug, info, warn, error). Env: LOG_LEVEL") + configDumpCmd.Flags().StringVar(&logFormat, "log-format", "", + "Log format (text, json). Env: LOG_FORMAT") + configDumpCmd.Flags().StringVar(&logOutput, "log-output", "", + "Log output (stdout, stderr). Env: LOG_OUTPUT") + // Version command versionCmd := &cobra.Command{ Use: "version", @@ -161,6 +161,7 @@ Dry-run mode: // Add subcommands rootCmd.AddCommand(serveCmd) + rootCmd.AddCommand(configDumpCmd) rootCmd.AddCommand(versionCmd) // Execute @@ -178,12 +179,37 @@ func isDryRun() bool { // Configuration loading (shared between serve and dry-run) // ----------------------------------------------------------------------------- -// buildLoggerConfig creates a logger configuration from environment variables -// and command-line flags. Flags take precedence over environment variables. -func buildLoggerConfig(component string) logger.Config { - cfg := logger.ConfigFromEnv() +// buildLoggerConfig creates a logger configuration with the following priority +// (lowest to highest): config file < LOG_* env vars < --log-* CLI flags. +// Pass logCfg=nil for the bootstrap logger (before config is loaded). +func buildLoggerConfig(component string, logCfg *config_loader.LogConfig) logger.Config { + cfg := logger.DefaultConfig() - // Override with command-line flags if provided + // Apply config file values (lowest priority) + if logCfg != nil { + if logCfg.Level != "" { + cfg.Level = logCfg.Level + } + if logCfg.Format != "" { + cfg.Format = logCfg.Format + } + if logCfg.Output != "" { + cfg.Output = logCfg.Output + } + } + + // Apply environment variables (override config file) + if level := os.Getenv("LOG_LEVEL"); level != "" { + cfg.Level = strings.ToLower(level) + } + if format := os.Getenv("LOG_FORMAT"); format != "" { + cfg.Format = strings.ToLower(format) + } + if output := os.Getenv("LOG_OUTPUT"); output != "" { + cfg.Output = output + } + + // Apply CLI flags (highest priority) if logLevel != "" { cfg.Level = logLevel } @@ -201,13 +227,13 @@ func buildLoggerConfig(component string) logger.Config { } // loadConfig loads the unified adapter configuration from both config files. -func loadConfig(ctx context.Context, log logger.Logger) (*config_loader.Config, error) { +func loadConfig(ctx context.Context, log logger.Logger, flags *pflag.FlagSet) (*config_loader.Config, error) { log.Info(ctx, "Loading adapter configuration...") config, err := config_loader.LoadConfig( config_loader.WithAdapterConfigPath(configPath), config_loader.WithTaskConfigPath(taskConfigPath), config_loader.WithAdapterVersion(version.Version), - config_loader.WithFlags(serveFlags), + config_loader.WithFlags(flags), ) if err != nil { errCtx := logger.WithErrorField(ctx, err) @@ -270,9 +296,9 @@ func createAPIClient(apiConfig config_loader.HyperfleetAPIConfig, log logger.Log // createTransportClient creates the appropriate transport client based on config. func createTransportClient(ctx context.Context, config *config_loader.Config, log logger.Logger) (transport_client.TransportClient, error) { - if config.Spec.Clients.Maestro != nil { + if config.Clients.Maestro != nil { log.Info(ctx, "Creating Maestro transport client...") - client, err := createMaestroClient(ctx, config.Spec.Clients.Maestro, log) + client, err := createMaestroClient(ctx, config.Clients.Maestro, log) if err != nil { return nil, fmt.Errorf("failed to create Maestro client: %w", err) } @@ -281,7 +307,7 @@ func createTransportClient(ctx context.Context, config *config_loader.Config, lo } log.Info(ctx, "Creating Kubernetes transport client...") - client, err := createK8sClient(ctx, config.Spec.Clients.Kubernetes, log) + client, err := createK8sClient(ctx, config.Clients.Kubernetes, log) if err != nil { return nil, fmt.Errorf("failed to create Kubernetes client: %w", err) } @@ -350,13 +376,13 @@ func buildExecutor(config *config_loader.Config, apiClient hyperfleet_api.Client // ----------------------------------------------------------------------------- // runServe contains the main application logic for the serve command -func runServe() error { +func runServe(flags *pflag.FlagSet) error { // Create context that cancels on system signals ctx, cancel := context.WithCancel(context.Background()) defer cancel() // Create bootstrap logger (before config is loaded) - log, err := logger.NewLogger(buildLoggerConfig("hyperfleet-adapter")) + log, err := logger.NewLogger(buildLoggerConfig("hyperfleet-adapter", nil)) if err != nil { return fmt.Errorf("failed to create logger: %w", err) } @@ -364,35 +390,36 @@ func runServe() error { log.Infof(ctx, "Starting Hyperfleet Adapter version=%s commit=%s built=%s tag=%s", version.Version, version.Commit, version.BuildDate, version.Tag) // Load unified configuration (deployment + task configs) - config, err := loadConfig(ctx, log) + config, err := loadConfig(ctx, log, flags) if err != nil { return err } - // Recreate logger with component name from config - log, err = logger.NewLogger(buildLoggerConfig(config.Metadata.Name)) + // Recreate logger with component name and log settings from config + log, err = logger.NewLogger(buildLoggerConfig(config.Adapter.Name, &config.Log)) if err != nil { return fmt.Errorf("failed to create logger with adapter config: %w", err) } log.Infof(ctx, "Adapter configuration loaded successfully: name=%s ", - config.Metadata.Name) - log.Infof(ctx, "HyperFleet API client configured: timeout=%s retryAttempts=%d", - config.Spec.Clients.HyperfleetAPI.Timeout.String(), - config.Spec.Clients.HyperfleetAPI.RetryAttempts) - if config.Spec.DebugConfig { - configBytes, err := yaml.Marshal(config) - if err != nil { + config.Adapter.Name) + log.Infof(ctx, "HyperFleet API client configured: timeout=%s retry_attempts=%d", + config.Clients.HyperfleetAPI.Timeout.String(), + config.Clients.HyperfleetAPI.RetryAttempts) + var redactedConfigBytes []byte + if config.DebugConfig { + if data, err := yaml.Marshal(config.Redacted()); err != nil { errCtx := logger.WithErrorField(ctx, err) log.Warnf(errCtx, "Failed to marshal adapter configuration for logging") } else { - log.Infof(ctx, "Loaded adapter configuration:\n%s", string(configBytes)) + redactedConfigBytes = data + log.Infof(ctx, "Loaded adapter configuration:\n%s", string(redactedConfigBytes)) } } // Initialize OpenTelemetry sampleRatio := otel.GetTraceSampleRatio(log, ctx) - tp, err := otel.InitTracer(config.Metadata.Name, version.Version, sampleRatio) + tp, err := otel.InitTracer(config.Adapter.Name, version.Version, sampleRatio) if err != nil { errCtx := logger.WithErrorField(ctx, err) log.Errorf(errCtx, "Failed to initialize OpenTelemetry") @@ -408,13 +435,16 @@ func runServe() error { }() // Start health server - healthServer := health.NewServer(log, HealthServerPort, config.Metadata.Name) + healthServer := health.NewServer(log, HealthServerPort, config.Adapter.Name) if err := healthServer.Start(ctx); err != nil { errCtx := logger.WithErrorField(ctx, err) log.Errorf(errCtx, "Failed to start health server") return fmt.Errorf("failed to start health server: %w", err) } healthServer.SetConfigLoaded() + if len(redactedConfigBytes) > 0 { + healthServer.SetConfig(redactedConfigBytes) + } defer func() { shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), HealthServerShutdownTimeout) defer shutdownCancel() @@ -426,7 +456,7 @@ func runServe() error { // Start metrics server metricsServer := health.NewMetricsServer(log, MetricsServerPort, health.MetricsConfig{ - Component: config.Metadata.Name, + Component: config.Adapter.Name, Version: version.Version, Commit: version.Commit, }) @@ -445,11 +475,11 @@ func runServe() error { }() // Create adapter metrics recorder - metricsRecorder := metrics.NewRecorder(config.Metadata.Name, version.Version, nil) + metricsRecorder := metrics.NewRecorder(config.Adapter.Name, version.Version, nil) // Create real clients log.Info(ctx, "Creating HyperFleet API client...") - apiClient, err := createAPIClient(config.Spec.Clients.HyperfleetAPI, log) + apiClient, err := createAPIClient(config.Clients.HyperfleetAPI, log) if err != nil { errCtx := logger.WithErrorField(ctx, err) log.Errorf(errCtx, "Failed to create HyperFleet API client") @@ -492,24 +522,24 @@ func runServe() error { }() // Get broker config - subscriptionID := config.Spec.Clients.Broker.SubscriptionID + subscriptionID := config.Clients.Broker.SubscriptionID if subscriptionID == "" { - err := fmt.Errorf("spec.clients.broker.subscriptionId is required") + err := fmt.Errorf("clients.broker.subscription_id is required") errCtx := logger.WithErrorField(ctx, err) log.Errorf(errCtx, "Missing required broker configuration") return err } - topic := config.Spec.Clients.Broker.Topic + topic := config.Clients.Broker.Topic if topic == "" { - err := fmt.Errorf("spec.clients.broker.topic is required") + err := fmt.Errorf("clients.broker.topic is required") errCtx := logger.WithErrorField(ctx, err) log.Errorf(errCtx, "Missing required broker configuration") return err } // Create broker metrics recorder - brokerMetrics := broker.NewMetricsRecorder(config.Metadata.Name, version.Version, nil) + brokerMetrics := broker.NewMetricsRecorder(config.Adapter.Name, version.Version, nil) // Create broker subscriber and subscribe log.Info(ctx, "Creating broker subscriber...") @@ -594,7 +624,7 @@ func runServe() error { // ----------------------------------------------------------------------------- // runDryRun processes a single CloudEvent from file using mock clients. -func runDryRun() error { +func runDryRun(flags *pflag.FlagSet) error { ctx := context.Background() // Create logger on stderr so stdout is reserved for trace output @@ -609,7 +639,7 @@ func runDryRun() error { } // Load config (same path as serve) - config, err := loadConfig(ctx, log) + config, err := loadConfig(ctx, log, flags) if err != nil { return err } @@ -686,3 +716,82 @@ func runDryRun() error { return nil } + +// ----------------------------------------------------------------------------- +// Config-dump mode +// ----------------------------------------------------------------------------- + +// runConfigDump loads the full adapter configuration and prints it as YAML to stdout. +// Sensitive fields are redacted. Exits 0 on success. +func runConfigDump(flags *pflag.FlagSet) error { + ctx := context.Background() + log, err := logger.NewLogger(buildLoggerConfig("config-dump", nil)) + if err != nil { + return fmt.Errorf("failed to create logger: %w", err) + } + + config, err := loadConfig(ctx, log, flags) + if err != nil { + return err + } + + data, err := yaml.Marshal(config.Redacted()) + if err != nil { + return fmt.Errorf("failed to marshal config: %w", err) + } + fmt.Print(string(data)) + return nil +} + +// ----------------------------------------------------------------------------- +// Flag registration helpers (shared between serve and config-dump) +// ----------------------------------------------------------------------------- + +// addConfigPathFlags registers the --config and --task-config path flags. +func addConfigPathFlags(cmd *cobra.Command) { + cmd.Flags().StringVarP(&configPath, "config", "c", "", + fmt.Sprintf("Path to adapter deployment config file (can also use %s env var)", config_loader.EnvAdapterConfig)) + cmd.Flags().StringVarP(&taskConfigPath, "task-config", "t", "", + fmt.Sprintf("Path to adapter task config file (can also use %s env var)", config_loader.EnvTaskConfigPath)) +} + +// addOverrideFlags registers all configuration override flags (Maestro, API, broker, Kubernetes). +// These flags are available on both the serve and config-dump commands. +func addOverrideFlags(cmd *cobra.Command) { + // Maestro override flags + cmd.Flags().String("maestro-grpc-server-address", "", "Maestro gRPC server address. Env: HYPERFLEET_MAESTRO_GRPC_SERVER_ADDRESS") + cmd.Flags().String("maestro-http-server-address", "", "Maestro HTTP server address. Env: HYPERFLEET_MAESTRO_HTTP_SERVER_ADDRESS") + cmd.Flags().String("maestro-source-id", "", "Maestro source ID. Env: HYPERFLEET_MAESTRO_SOURCE_ID") + cmd.Flags().String("maestro-client-id", "", "Maestro client ID. Env: HYPERFLEET_MAESTRO_CLIENT_ID") + cmd.Flags().String("maestro-auth-type", "", "Maestro auth type (tls, none). Env: HYPERFLEET_MAESTRO_AUTH_TYPE") + cmd.Flags().String("maestro-ca-file", "", "Maestro gRPC CA certificate file. Env: HYPERFLEET_MAESTRO_CA_FILE") + cmd.Flags().String("maestro-cert-file", "", "Maestro gRPC client certificate file. Env: HYPERFLEET_MAESTRO_CERT_FILE") + cmd.Flags().String("maestro-key-file", "", "Maestro gRPC client key file. Env: HYPERFLEET_MAESTRO_KEY_FILE") + cmd.Flags().String("maestro-http-ca-file", "", "Maestro HTTP CA certificate file. Env: HYPERFLEET_MAESTRO_HTTP_CA_FILE") + cmd.Flags().String("maestro-timeout", "", "Maestro client timeout (e.g. 10s). Env: HYPERFLEET_MAESTRO_TIMEOUT") + cmd.Flags().String("maestro-server-healthiness-timeout", "", "Maestro server healthiness check timeout (e.g. 20s). Env: HYPERFLEET_MAESTRO_SERVER_HEALTHINESS_TIMEOUT") + cmd.Flags().Int("maestro-retry-attempts", 0, "Maestro retry attempts. Env: HYPERFLEET_MAESTRO_RETRY_ATTEMPTS") + cmd.Flags().String("maestro-keepalive-time", "", "Maestro gRPC keepalive ping interval (e.g. 30s). Env: HYPERFLEET_MAESTRO_KEEPALIVE_TIME") + cmd.Flags().String("maestro-keepalive-timeout", "", "Maestro gRPC keepalive ping timeout (e.g. 10s). Env: HYPERFLEET_MAESTRO_KEEPALIVE_TIMEOUT") + cmd.Flags().Bool("maestro-insecure", false, "Use insecure connection to Maestro. Env: HYPERFLEET_MAESTRO_INSECURE") + + // HyperFleet API override flags + cmd.Flags().String("hyperfleet-api-base-url", "", "HyperFleet API base URL. Env: HYPERFLEET_API_BASE_URL") + cmd.Flags().String("hyperfleet-api-version", "", "HyperFleet API version (e.g. v1). Env: HYPERFLEET_API_VERSION") + cmd.Flags().String("hyperfleet-api-timeout", "", "HyperFleet API timeout (e.g. 10s). Env: HYPERFLEET_API_TIMEOUT") + cmd.Flags().Int("hyperfleet-api-retry", 0, "HyperFleet API retry attempts. Env: HYPERFLEET_API_RETRY_ATTEMPTS") + cmd.Flags().String("hyperfleet-api-retry-backoff", "", "HyperFleet API retry backoff strategy (exponential, linear, constant). Env: HYPERFLEET_API_RETRY_BACKOFF") + cmd.Flags().String("hyperfleet-api-base-delay", "", "HyperFleet API retry base delay (e.g. 1s). Env: HYPERFLEET_API_BASE_DELAY") + cmd.Flags().String("hyperfleet-api-max-delay", "", "HyperFleet API retry max delay (e.g. 30s). Env: HYPERFLEET_API_MAX_DELAY") + + // Broker override flags + cmd.Flags().String("broker-subscription-id", "", "Broker subscription ID. Env: HYPERFLEET_BROKER_SUBSCRIPTION_ID") + cmd.Flags().String("broker-topic", "", "Broker topic. Env: HYPERFLEET_BROKER_TOPIC") + + // Kubernetes override flags + cmd.Flags().String("kubernetes-kube-config-path", "", + "Path to kubeconfig file (empty = in-cluster auth). Env: HYPERFLEET_KUBERNETES_KUBE_CONFIG_PATH") + cmd.Flags().String("kubernetes-api-version", "", "Kubernetes API version. Env: HYPERFLEET_KUBERNETES_API_VERSION") + cmd.Flags().Float64("kubernetes-qps", 0, "Kubernetes client QPS rate limit. Env: HYPERFLEET_KUBERNETES_QPS") + cmd.Flags().Int("kubernetes-burst", 0, "Kubernetes client burst rate limit. Env: HYPERFLEET_KUBERNETES_BURST") +} diff --git a/configs/adapter-config-template.yaml b/configs/adapter-config-template.yaml index 4f2ce1b..72ce184 100644 --- a/configs/adapter-config-template.yaml +++ b/configs/adapter-config-template.yaml @@ -1,293 +1,131 @@ -# HyperFleet Adapter Task Configuration Template (MVP) +# HyperFleet Adapter Deployment Configuration # -# This is a Configuration Template for configuring cloud provider adapters -# using the HyperFleet Adapter Framework with CEL (Common Expression Language). +# This file contains ONLY infrastructure and deployment-related settings: +# - Client connections (Maestro, HyperFleet API, Kubernetes) +# - Authentication and TLS configuration +# - Connection timeouts and retry policies # -# TEMPLATE SYNTAX: -# ================ -# 1. Go Templates ({{ .var }}) - Variable interpolation throughout -# 2. field: "path" - Simple JSON path extraction (translated to CEL internally) -# 3. expression: "cel" - Full CEL expressions for complex logic +# NOTE: This is a SAMPLE configuration file for reference and local development. +# It is NOT automatically packaged with the container image (see Dockerfile). # -# CONDITION SYNTAX (when:): -# ========================= -# Option 1: Expression syntax (CEL) -# when: -# expression: | -# clusterPhase == "Terminating" +# In production, provide configuration via one of these methods: +# 1. ADAPTER_CONFIG_PATH environment variable pointing to a config file (highest priority) +# 2. ConfigMap mounted at /etc/adapter/config/adapter-deployment-config.yaml # -# Option 2: Structured conditions (field + operator + value) -# when: -# conditions: -# - field: "clusterPhase" -# operator: "equals" -# value: "Terminating" +# Example Kubernetes deployment: +# env: +# - name: ADAPTER_CONFIG_PATH +# value: /etc/adapter/config/adapter-deployment-config.yaml +# volumeMounts: +# - name: config +# mountPath: /etc/adapter/config # -# Supported operators: equals, notEquals, in, notIn, contains, greaterThan, lessThan, exists -# -# CEL OPTIONAL CHAINING: -# ====================== -# Use optional chaining with orValue() to safely access potentially missing fields: -# resources.?clusterNamespace.?status.?phase.orValue("") -# adapter.?executionStatus.orValue("") -# -# Copy this file to your adapter repository and customize for your needs. +# For business logic configuration (params, preconditions, resources, post-actions), +# use a separate business config file. See configs/adapter-task-config-template.yaml + +adapter: + name: hyperfleet-adapter + version: "0.1.0" + +# Log the full merged configuration after load (default: false) +# Environment variable: HYPERFLEET_DEBUG_CONFIG +# Flag: --debug-config +debug_config: false + +# Logging configuration +# Priority: CLI flag > LOG_LEVEL/LOG_FORMAT/LOG_OUTPUT env vars > this file > defaults +log: + # Log level: debug, info, warn, error (default: info) + # Environment variable: LOG_LEVEL + # Flag: --log-level + level: "info" + + # Log format: text, json (default: text) + # Environment variable: LOG_FORMAT + # Flag: --log-format + format: "json" + + # Log output: stdout, stderr (default: stdout) + # Environment variable: LOG_OUTPUT + # Flag: --log-output + output: "stdout" + +# Client configurations for external services +clients: + # Maestro transport client configuration + maestro: + # gRPC server address + # Environment variable: HYPERFLEET_MAESTRO_GRPC_SERVER_ADDRESS + # Flag: --maestro-grpc-server-address + grpc_server_address: "maestro-grpc.maestro.svc.cluster.local:8090" + + # HTTPS server address for REST API operations (optional) + # Environment variable: HYPERFLEET_MAESTRO_HTTP_SERVER_ADDRESS + http_server_address: "https://maestro-api.maestro.svc.cluster.local" + + # Source identifier for CloudEvents routing (must be unique across adapters) + # Environment variable: HYPERFLEET_MAESTRO_SOURCE_ID + source_id: "hyperfleet-adapter" + + # Client identifier (defaults to source_id if not specified) + # Environment variable: HYPERFLEET_MAESTRO_CLIENT_ID + client_id: "hyperfleet-adapter-client" + + # Authentication configuration + auth: + type: "tls" # TLS certificate-based mTLS + + tls_config: + # gRPC TLS configuration + # Certificate paths (mounted from Kubernetes secrets) + # Environment variable: HYPERFLEET_MAESTRO_CA_FILE + ca_file: "/etc/maestro/certs/grpc/ca.crt" + + # Environment variable: HYPERFLEET_MAESTRO_CERT_FILE + cert_file: "/etc/maestro/certs/grpc/client.crt" -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - # Adapter name (used as resource name and in logs/metrics) - name: example-adapter - labels: - hyperfleet.io/adapter-type: example - hyperfleet.io/component: adapter + # Environment variable: HYPERFLEET_MAESTRO_KEY_FILE + key_file: "/etc/maestro/certs/grpc/client.key" -# ============================================================================ -# Task Specification -# ============================================================================ -spec: - # ============================================================================ - # Global params - # ============================================================================ - # params to extract from CloudEvent and environment variables - # - # SUPPORTED TYPES: - # ================ - # - string: Default, any value converted to string - # - int/int64: Integer value (strings parsed, floats truncated) - # - float/float64: Floating point value - # - bool: Boolean (supports: true/false, yes/no, on/off, 1/0) - # - params: - # Environment variables from deployment - - name: "hyperfleetApiBaseUrl" - source: "env.HYPERFLEET_API_BASE_URL" - type: "string" - description: "Base URL for the HyperFleet API" - required: true - - - name: "hyperfleetApiVersion" - source: "env.HYPERFLEET_API_VERSION" - type: "string" - default: "v1" - description: "API version to use" - - # Extract from CloudEvent data - - name: "clusterId" - source: "event.id" - type: "string" - description: "Unique identifier for the target cluster" - required: true - - # Example: Extract and convert to int - # - name: "nodeCount" - # source: "event.spec.nodeCount" - # type: "int" - # default: 3 - # description: "Number of nodes in the cluster" - - # Example: Extract and convert to bool - # - name: "enableFeature" - # source: "env.ENABLE_FEATURE" - # type: "bool" - # default: false - # description: "Enable experimental feature" - + # HTTP API TLS configuration (may use different CA than gRPC) + # If not set, falls back to ca_file for backwards compatibility + # Environment variable: HYPERFLEET_MAESTRO_HTTP_CA_FILE + http_ca_file: "/etc/maestro/certs/https/ca.crt" - # ============================================================================ - # Global Preconditions - # ============================================================================ - # These preconditions run sequentially and validate cluster state before resource operations. - # - # DATA SCOPES: - # ============ - # Capture scope (field/expression): API response data only - # - Access: status.phase, items[0].name, etc. - # - # Conditions scope (conditions/expression): Full execution context - # - params.* : Original extracted params - # - .*: Full API response (e.g., clusterStatus.status.phase) - # - capturedField : Explicitly captured values - # - adapter.* : Adapter metadata - # - resources.* : Created resources (empty during preconditions) - # - preconditions: - # ========================================================================== - # Step 1: Get cluster status - # ========================================================================== - - name: "clusterStatus" - apiCall: - method: "GET" - # NOTE: API path includes /api/hyperfleet/ prefix - url: "{{ .hyperfleetApiBaseUrl }}/api/hyperfleet/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}" - timeout: 10s - retryAttempts: 3 - retryBackoff: "exponential" - # Capture fields from the API response. Captured values become variables for use in resources section. - # SCOPE: API response data only - # Supports two modes: - # - field: Simple dot notation or JSONPath expression for extracting values - # - expression: CEL expression for computed values - # Only one of 'field' or 'expression' can be set per capture. - capture: - # Simple dot notation - - name: "clusterName" - field: "name" - - name: "clusterPhase" - field: "status.phase" - - name: "generationId" - field: "generation" - - # JSONPath for complex extraction (filter by field value) - # See: https://kubernetes.io/docs/reference/kubectl/jsonpath/ - # - name: "lzNamespaceStatus" - # field: "{.items[?(@.adapter=='landing-zone-adapter')].data.namespace.status}" - - # CEL expression for computed values - # - name: "activeItemCount" - # expression: "items.filter(i, i.status == 'active').size()" - - # Conditions to check. SCOPE: Full execution context - # You can access: - # - Captured values: clusterPhase, clusterName, etc. - # - Full API response: clusterStatus.status.phase, clusterStatus.spec.nodeCount - # - Params: clusterId, hyperfleetApiBaseUrl, etc. - conditions: - # Using captured value - - field: "clusterPhase" - operator: "equals" - value: "NotReady" - - # Or dig directly into API response using precondition name - # - field: "clusterStatus.status.nodeCount" - # operator: "greaterThan" - # value: 0 - - # Alternative: CEL expression with full access - # expression: | - # clusterStatus.status.phase == "Ready" && - # clusterStatus.spec.nodeCount > 0 - - # ============================================================================ - # Resources (Create/Update Resources) - # ============================================================================ - # All resources are created/updated sequentially in the order defined below - resources: - # ========================================================================== - # Resource 1: Cluster Namespace - # ========================================================================== - - name: "clusterNamespace" - manifest: - apiVersion: v1 - kind: Namespace - metadata: - # Use | lower to ensure valid K8s resource name (lowercase RFC 1123) - name: "{{ .clusterId | lower }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/managed-by: "{{ .metadata.name }}" - hyperfleet.io/resource-type: "namespace" - annotations: - hyperfleet.io/created-by: "hyperfleet-adapter" - hyperfleet.io/generation: "{{ .generationId }}" - discovery: - # The "namespace" field within discovery is optional: - # - For namespaced resources: set namespace to target the specific namespace - # - For cluster-scoped resources (like Namespace, ClusterRole): omit or leave empty - # Here we omit it since Namespace is cluster-scoped - bySelectors: - labelSelector: - hyperfleet.io/resource-type: "namespace" - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/managed-by: "{{ .metadata.name }}" + # Connection settings + timeout: "30s" + # Timeout for the server healthiness check before starting (optional) + # Environment variable: HYPERFLEET_MAESTRO_SERVER_HEALTHINESS_TIMEOUT + # Flag: --maestro-server-healthiness-timeout + server_healthiness_timeout: "20s" - # ============================================================================ - # Post-Processing - # ============================================================================ - post: - payloads: - # Build status payload inline - - name: "clusterStatusPayload" - build: - # Adapter name for tracking which adapter reported this status - adapter: "{{ .metadata.name }}" - - # Conditions array - each condition has type, status, reason, message - # Use CEL optional chaining ?.orValue() for safe field access - conditions: - # Applied: Resources successfully created - - type: "Applied" - status: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" ? "True" : "False" - reason: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" - ? "NamespaceCreated" - : "NamespacePending" - message: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" - ? "Namespace created successfully" - : "Namespace creation in progress" + retry_attempts: 3 - # Available: Resources are active and ready - - type: "Available" - status: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" ? "True" : "False" - reason: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" ? "NamespaceReady" : "NamespaceNotReady" - message: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" ? "Namespace is active and ready" : "Namespace is not active and ready" + # Keep-alive for long-lived gRPC connections + keepalive: + time: "30s" + timeout: "10s" - # Health: Adapter execution status (runtime) Don't need to update this. This can be reused from the adapter config. - - type: "Health" - status: - expression: | - adapter.?executionStatus.orValue("") == "success" ? "True" : (adapter.?executionStatus.orValue("") == "failed" ? "False" : "Unknown") - reason: - expression: | - adapter.?errorReason.orValue("") != "" ? adapter.?errorReason.orValue("") : "Healthy" - message: - expression: | - adapter.?errorMessage.orValue("") != "" ? adapter.?errorMessage.orValue("") : "All adapter operations completed successfully" - - # Use CEL expression for numeric fields to preserve type (not Go template which outputs strings) - observed_generation: - expression: "generationId" - - # Use Go template with now and date functions for timestamps - observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" + # HyperFleet HTTP API client + hyperfleet_api: + base_url: http://hyperfleet-api:8000 + version: v1 + timeout: 2s + retry_attempts: 3 + retry_backoff: exponential - # Optional data field for adapter-specific metrics extracted from resources - data: - namespace: - name: - expression: | - resources.?clusterNamespace.?metadata.?name.orValue("") - status: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") + # Broker consumer configuration (adapter-level) + broker: + subscription_id: "my-adapter-subscription" + topic: "my-clusters-topic" - # ============================================================================ - # Post Actions - # ============================================================================ - # Post actions are executed after resources are created/updated - postActions: - # Report cluster status to HyperFleet API (always executed) - - name: "reportClusterStatus" - apiCall: - method: "POST" - # NOTE: API path includes /api/hyperfleet/ prefix and ends with /statuses - url: "{{ .hyperfleetApiBaseUrl }}/api/hyperfleet/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}/statuses" - body: "{{ .clusterStatusPayload }}" - timeout: 30s - retryAttempts: 3 - retryBackoff: "exponential" - headers: - - name: "Content-Type" - value: "application/json" + # Kubernetes client (for direct K8s resources) + kubernetes: + api_version: "v1" + # Uses in-cluster service account by default + # Set kube_config_path for out-of-cluster access + kube_config_path: PATH_TO_KUBECONFIG_FILE + # Optional rate limits (0 uses defaults) + qps: 100 + burst: 200 diff --git a/configs/adapter-deployment-config.yaml b/configs/adapter-deployment-config.yaml deleted file mode 100644 index 2eec4bd..0000000 --- a/configs/adapter-deployment-config.yaml +++ /dev/null @@ -1,115 +0,0 @@ -# HyperFleet Adapter Deployment Configuration -# -# This file contains ONLY infrastructure and deployment-related settings: -# - Client connections (Maestro, HyperFleet API, Kubernetes) -# - Authentication and TLS configuration -# - Connection timeouts and retry policies -# -# NOTE: This is a SAMPLE configuration file for reference and local development. -# It is NOT automatically packaged with the container image (see Dockerfile). -# -# In production, provide configuration via one of these methods: -# 1. ADAPTER_CONFIG_PATH environment variable pointing to a config file (highest priority) -# 2. ConfigMap mounted at /etc/adapter/config/adapter-deployment-config.yaml -# -# Example Kubernetes deployment: -# env: -# - name: ADAPTER_CONFIG_PATH -# value: /etc/adapter/config/adapter-deployment-config.yaml -# volumeMounts: -# - name: config -# mountPath: /etc/adapter/config -# -# For business logic configuration (params, preconditions, resources, post-actions), -# use a separate business config file. See configs/adapter-config-template.yaml - -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: - name: hyperfleet-adapter - labels: - hyperfleet.io/component: adapter - -spec: - adapter: - version: "0.1.0" - - # Log the full merged configuration after load (default: false) - # Environment variable: HYPERFLEET_DEBUG_CONFIG - # Flag: --debug-config - debugConfig: false - - # Client configurations for external services - clients: - # Maestro transport client configuration - maestro: - # gRPC server address - # Environment variable: HYPERFLEET_MAESTRO_GRPC_SERVER_ADDRESS - # Flag: --maestro-grpc-server-address - grpcServerAddress: "maestro-grpc.maestro.svc.cluster.local:8090" - - # HTTPS server address for REST API operations (optional) - # Environment variable: HYPERFLEET_MAESTRO_HTTP_SERVER_ADDRESS - httpServerAddress: "https://maestro-api.maestro.svc.cluster.local" - - # Source identifier for CloudEvents routing (must be unique across adapters) - # Environment variable: HYPERFLEET_MAESTRO_SOURCE_ID - sourceId: "hyperfleet-adapter" - - # Client identifier (defaults to sourceId if not specified) - # Environment variable: HYPERFLEET_MAESTRO_CLIENT_ID - clientId: "hyperfleet-adapter-client" - - # Authentication configuration - auth: - type: "tls" # TLS certificate-based mTLS - - tlsConfig: - # Certificate paths (mounted from Kubernetes secrets) - # Environment variable: HYPERFLEET_MAESTRO_CA_FILE - caFile: "/etc/maestro/certs/grpc/ca.crt" - - # Environment variable: HYPERFLEET_MAESTRO_CERT_FILE - certFile: "/etc/maestro/certs/grpc/client.crt" - - # Environment variable: HYPERFLEET_MAESTRO_KEY_FILE - keyFile: "/etc/maestro/certs/grpc/client.key" - - # HTTP API CA certificate (if HTTPS uses a different CA than gRPC) - # Falls back to caFile when not set - # Environment variable: HYPERFLEET_MAESTRO_HTTP_CA_FILE - httpCaFile: "/etc/maestro/certs/https/ca.crt" - - # Connection settings - timeout: "30s" - retryAttempts: 3 - retryBackoff: "exponential" - - # Keep-alive for long-lived gRPC connections - keepalive: - time: "30s" - timeout: "10s" - permitWithoutStream: true - - # HyperFleet HTTP API client - hyperfleetApi: - baseUrl: http://hyperfleet-api:8000 - version: v1 - timeout: 2s - retryAttempts: 3 - retryBackoff: exponential - - # Broker consumer configuration (adapter-level) - broker: - subscriptionId: "amarin-ns1-clusters-validation-gcp-adapter" - topic: "amarin-ns1-clusters" - - # Kubernetes client (for direct K8s resources) - kubernetes: - apiVersion: "v1" - # Uses in-cluster service account by default - # Set kubeConfigPath for out-of-cluster access - kubeConfigPath: PATH_TO_KUBECONFIG_FILE - # Optional rate limits (0 uses defaults) - qps: 100 - burst: 200 diff --git a/configs/adapter-task-config-template.yaml b/configs/adapter-task-config-template.yaml index eb0e6e3..373ce1e 100644 --- a/configs/adapter-task-config-template.yaml +++ b/configs/adapter-task-config-template.yaml @@ -9,19 +9,17 @@ # 2. field: "path" - Simple JSON path extraction (translated to CEL internally) # 3. expression: "cel" - Full CEL expressions for complex logic # -# CONDITION SYNTAX (when:): -# ========================= -# Option 1: Expression syntax (CEL) -# when: -# expression: | -# readyConditionStatus == "False" +# CONDITION SYNTAX: +# ================= +# Option 1: CEL expression (direct field on precondition) +# expression: | +# readyConditionStatus == "False" # -# Option 2: Structured conditions (field + operator + value) -# when: -# conditions: -# - field: "readyConditionStatus" -# operator: "equals" -# value: "Terminating" +# Option 2: Structured conditions (direct field on precondition) +# conditions: +# - field: "readyConditionStatus" +# operator: "equals" +# value: "Terminating" # # Supported operators: equals, notEquals, in, notIn, contains, greaterThan, lessThan, exists # @@ -33,264 +31,250 @@ # # Copy this file to your adapter repository and customize for your needs. -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - # Adapter name (used as resource name and in logs/metrics) - name: example-adapter - labels: - hyperfleet.io/adapter-type: example - hyperfleet.io/component: adapter +# ============================================================================ +# Global params +# ============================================================================ +# params to extract from CloudEvent and environment variables +# +# SUPPORTED TYPES: +# ================ +# - string: Default, any value converted to string +# - int/int64: Integer value (strings parsed, floats truncated) +# - float/float64: Floating point value +# - bool: Boolean (supports: true/false, yes/no, on/off, 1/0) +# +params: + # Environment variables from deployment + - name: "hyperfleetApiBaseUrl" + source: "env.HYPERFLEET_API_BASE_URL" + type: "string" + description: "Base URL for the HyperFleet API" + required: true + + - name: "hyperfleetApiVersion" + source: "env.HYPERFLEET_API_VERSION" + type: "string" + default: "v1" + description: "API version to use" + + # Extract from CloudEvent data + - name: "clusterId" + source: "event.id" + type: "string" + description: "Unique identifier for the target cluster" + required: true + + # Example: Extract and convert to int + # - name: "nodeCount" + # source: "event.spec.nodeCount" + # type: "int" + # default: 3 + # description: "Number of nodes in the cluster" + + # Example: Extract and convert to bool + # - name: "enableFeature" + # source: "env.ENABLE_FEATURE" + # type: "bool" + # default: false + # description: "Enable experimental feature" + # ============================================================================ -# Task Specification +# Global Preconditions # ============================================================================ -spec: - # ============================================================================ - # Global params - # ============================================================================ - # params to extract from CloudEvent and environment variables - # - # SUPPORTED TYPES: - # ================ - # - string: Default, any value converted to string - # - int/int64: Integer value (strings parsed, floats truncated) - # - float/float64: Floating point value - # - bool: Boolean (supports: true/false, yes/no, on/off, 1/0) - # - params: - # Environment variables from deployment - - name: "hyperfleetApiBaseUrl" - source: "env.HYPERFLEET_API_BASE_URL" - type: "string" - description: "Base URL for the HyperFleet API" - required: true - - - name: "hyperfleetApiVersion" - source: "env.HYPERFLEET_API_VERSION" - type: "string" - default: "v1" - description: "API version to use" - - # Extract from CloudEvent data - - name: "clusterId" - source: "event.id" - type: "string" - description: "Unique identifier for the target cluster" - required: true - - # Example: Extract and convert to int - # - name: "nodeCount" - # source: "event.spec.nodeCount" - # type: "int" - # default: 3 - # description: "Number of nodes in the cluster" - - # Example: Extract and convert to bool - # - name: "enableFeature" - # source: "env.ENABLE_FEATURE" - # type: "bool" - # default: false - # description: "Enable experimental feature" - +# These preconditions run sequentially and validate cluster state before resource operations. +# +# DATA SCOPES: +# ============ +# Capture scope (field/expression): API response data only +# - Access: status.conditions, items[0].name, etc. +# +# Conditions scope (conditions/expression): Full execution context +# - params.* : Original extracted params +# - .*: Full API response (e.g., clusterStatus.status.conditions) +# - capturedField : Explicitly captured values +# - adapter.* : Adapter metadata +# - resources.* : Created resources (empty during preconditions) +# +preconditions: + # ========================================================================== + # Step 1: Get cluster status + # ========================================================================== + - name: "clusterStatus" + api_call: + method: "GET" + # NOTE: API path includes /api/hyperfleet/ prefix + url: "/clusters/{{ .clusterId }}" + timeout: 10s + retry_attempts: 3 + retry_backoff: "exponential" + # Capture fields from the API response. Captured values become variables for use in resources section. + # SCOPE: API response data only + # Supports two modes: + # - field: Simple dot notation or JSONPath expression for extracting values + # - expression: CEL expression for computed values + # Only one of 'field' or 'expression' can be set per capture. + capture: + # Simple dot notation + - name: "clusterName" + field: "name" + - name: "readyConditionStatus" + expression: | + status.conditions.filter(c, c.type == "Ready").size() > 0 + ? status.conditions.filter(c, c.type == "Ready")[0].status + : "False" + - name: "generationId" + field: "generation" - # ============================================================================ - # Global Preconditions - # ============================================================================ - # These preconditions run sequentially and validate cluster state before resource operations. - # - # DATA SCOPES: - # ============ - # Capture scope (field/expression): API response data only - # - Access: status.conditions, items[0].name, etc. - # - # Conditions scope (conditions/expression): Full execution context - # - params.* : Original extracted params - # - .*: Full API response (e.g., clusterStatus.status.conditions) - # - capturedField : Explicitly captured values - # - adapter.* : Adapter metadata - # - resources.* : Created resources (empty during preconditions) - # - preconditions: - # ========================================================================== - # Step 1: Get cluster status - # ========================================================================== - - name: "clusterStatus" - apiCall: - method: "GET" - # NOTE: API path includes /api/hyperfleet/ prefix - url: "{{ .hyperfleetApiBaseUrl }}/api/hyperfleet/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}" - timeout: 10s - retryAttempts: 3 - retryBackoff: "exponential" - # Capture fields from the API response. Captured values become variables for use in resources section. - # SCOPE: API response data only - # Supports two modes: - # - field: Simple dot notation or JSONPath expression for extracting values - # - expression: CEL expression for computed values - # Only one of 'field' or 'expression' can be set per capture. - capture: - # Simple dot notation - - name: "clusterName" - field: "name" - - name: "readyConditionStatus" - expression: | - status.conditions.filter(c, c.type == "Ready").size() > 0 - ? status.conditions.filter(c, c.type == "Ready")[0].status - : "False" - - name: "generationId" - field: "generation" - - # JSONPath for complex extraction (filter by field value) - # See: https://kubernetes.io/docs/reference/kubectl/jsonpath/ - # - name: "lzNamespaceStatus" - # field: "{.items[?(@.adapter=='landing-zone-adapter')].data.namespace.status}" - - # CEL expression for computed values - # - name: "activeItemCount" - # expression: "items.filter(i, i.status == 'active').size()" - - # Conditions to check. SCOPE: Full execution context - # You can access: - # - Captured values: readyConditionStatus, clusterName, etc. - # - Full API response: clusterStatus.status.conditions, clusterStatus.spec.nodeCount - # - Params: clusterId, hyperfleetApiBaseUrl, etc. - conditions: - # Using captured value - - field: "readyConditionStatus" - operator: "equals" - value: "True" - - # Or dig directly into API response using precondition name - # - field: "clusterStatus.status.nodeCount" - # operator: "greaterThan" - # value: 0 - - # Alternative: CEL expression with full access - # expression: | - # clusterStatus.status.conditions.filter(c, c.type == "Ready")[0].status == "True" && - # clusterStatus.spec.nodeCount > 0 - - # ============================================================================ - # Resources (Create/Update Resources) - # ============================================================================ - # All resources are created/updated sequentially in the order defined below - resources: - # ========================================================================== - # Resource 1: Cluster Namespace - # ========================================================================== - - name: "clusterNamespace" - manifest: - apiVersion: v1 - kind: Namespace - metadata: - # Use | lower to ensure valid K8s resource name (lowercase RFC 1123) - name: "{{ .clusterId | lower }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/managed-by: "{{ .metadata.name }}" - hyperfleet.io/resource-type: "namespace" - annotations: - hyperfleet.io/created-by: "hyperfleet-adapter" - hyperfleet.io/generation: "{{ .generationId }}" - discovery: - # The "namespace" field within discovery is optional: - # - For namespaced resources: set namespace to target the specific namespace - # - For cluster-scoped resources (like Namespace, ClusterRole): omit or leave empty - # Here we omit it since Namespace is cluster-scoped - bySelectors: - labelSelector: - hyperfleet.io/resource-type: "namespace" - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/managed-by: "{{ .metadata.name }}" + # JSONPath for complex extraction (filter by field value) + # See: https://kubernetes.io/docs/reference/kubectl/jsonpath/ + # - name: "lzNamespaceStatus" + # field: "{.items[?(@.adapter=='landing-zone-adapter')].data.namespace.status}" + # CEL expression for computed values + # - name: "activeItemCount" + # expression: "items.filter(i, i.status == 'active').size()" - # ============================================================================ - # Post-Processing - # ============================================================================ - post: - payloads: - # Build status payload inline - - name: "clusterStatusPayload" - build: - # Adapter name for tracking which adapter reported this status - adapter: "{{ .metadata.name }}" - - # Conditions array - each condition has type, status, reason, message - # Use CEL optional chaining ?.orValue() for safe field access - conditions: - # Applied: Resources successfully created - - type: "Applied" - status: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" ? "True" : "False" - reason: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" - ? "NamespaceCreated" - : "NamespacePending" - message: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" - ? "Namespace created successfully" - : "Namespace creation in progress" + # Conditions to check. SCOPE: Full execution context + # You can access: + # - Captured values: readyConditionStatus, clusterName, etc. + # - Full API response: clusterStatus.status.conditions, clusterStatus.spec.nodeCount + # - Params: clusterId, hyperfleetApiBaseUrl, etc. + conditions: + # Using captured value + - field: "readyConditionStatus" + operator: "equals" + value: "True" - # Available: Resources are active and ready - - type: "Available" - status: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" ? "True" : "False" - reason: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" ? "NamespaceReady" : "NamespaceNotReady" - message: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" ? "Namespace is active and ready" : "Namespace is not active and ready" + # Or dig directly into API response using precondition name + # - field: "clusterStatus.status.nodeCount" + # operator: "greaterThan" + # value: 0 - # Health: Adapter execution status (runtime) Don't need to update this. This can be reused from the adapter config. - - type: "Health" - status: - expression: | - adapter.?executionStatus.orValue("") == "success" ? "True" : (adapter.?executionStatus.orValue("") == "failed" ? "False" : "Unknown") - reason: - expression: | - adapter.?errorReason.orValue("") != "" ? adapter.?errorReason.orValue("") : "Healthy" - message: - expression: | - adapter.?errorMessage.orValue("") != "" ? adapter.?errorMessage.orValue("") : "All adapter operations completed successfully" - - # Use CEL expression for numeric fields to preserve type (not Go template which outputs strings) - observed_generation: - expression: "generationId" - - # Use Go template with now and date functions for timestamps - observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" + # Alternative: CEL expression with full access + # expression: | + # clusterStatus.status.conditions.filter(c, c.type == "Ready")[0].status == "True" && + # clusterStatus.spec.nodeCount > 0 - # Optional data field for adapter-specific metrics extracted from resources - data: - namespace: - name: - expression: | - resources.?clusterNamespace.?metadata.?name.orValue("") - status: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") +# ============================================================================ +# Resources (Create/Update Resources) +# ============================================================================ +# All resources are created/updated sequentially in the order defined below +resources: + # ========================================================================== + # Resource 1: Cluster Namespace + # ========================================================================== + - name: "clusterNamespace" + manifest: + apiVersion: v1 + kind: Namespace + metadata: + name: "{{ .clusterId }}" + labels: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/managed-by: "{{ .adapter.name }}" + hyperfleet.io/resource-type: "namespace" + annotations: + hyperfleet.io/created-by: "hyperfleet-adapter" + hyperfleet.io/generation: "{{ .generationId }}" + discovery: + # The "namespace" field within discovery is optional: + # - For namespaced resources: set namespace to target the specific namespace + # - For cluster-scoped resources (like Namespace, ClusterRole): omit or leave empty + # Here we omit it since Namespace is cluster-scoped + by_selectors: + label_selector: + hyperfleet.io/resource-type: "namespace" + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/managed-by: "{{ .adapter.name }}" + + +# ============================================================================ +# Post-Processing +# ============================================================================ +post: + payloads: + # Build status payload inline + - name: "clusterStatusPayload" + build: + # Adapter name for tracking which adapter reported this status + adapter: "{{ .adapter.name }}" - # ============================================================================ - # Post Actions - # ============================================================================ - # Post actions are executed after resources are created/updated - postActions: - # Report cluster status to HyperFleet API (always executed) - - name: "reportClusterStatus" - apiCall: - method: "POST" - # NOTE: API path includes /api/hyperfleet/ prefix and ends with /statuses - url: "{{ .hyperfleetApiBaseUrl }}/api/hyperfleet/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}/statuses" - body: "{{ .clusterStatusPayload }}" - timeout: 30s - retryAttempts: 3 - retryBackoff: "exponential" - headers: - - name: "Content-Type" - value: "application/json" + # Conditions array - each condition has type, status, reason, message + # Use CEL optional chaining ?.orValue() for safe field access + conditions: + # Applied: Resources successfully created + - type: "Applied" + status: + expression: | + resources.?clusterNamespace.?status.?phase.orValue("") == "Active" ? "True" : "False" + reason: + expression: | + resources.?clusterNamespace.?status.?phase.orValue("") == "Active" + ? "NamespaceCreated" + : "NamespacePending" + message: + expression: | + resources.?clusterNamespace.?status.?phase.orValue("") == "Active" + ? "Namespace created successfully" + : "Namespace creation in progress" + + # Available: Resources are active and ready + - type: "Available" + status: + expression: | + resources.?clusterNamespace.?status.?phase.orValue("") == "Active" ? "True" : "False" + reason: + expression: | + resources.?clusterNamespace.?status.?phase.orValue("") == "Active" ? "NamespaceReady" : "NamespaceNotReady" + message: + expression: | + resources.?clusterNamespace.?status.?phase.orValue("") == "Active" ? "Namespace is active and ready" : "Namespace is not active and ready" + + # Health: Adapter execution status (runtime) Don't need to update this. This can be reused from the adapter config. + - type: "Health" + status: + expression: | + adapter.?executionStatus.orValue("") == "success" ? "True" : (adapter.?executionStatus.orValue("") == "failed" ? "False" : "Unknown") + reason: + expression: | + has(adapter.executionError) && adapter.executionError != null ? "ExecutionFailed" : (adapter.?errorReason.orValue("") != "" ? adapter.?errorReason.orValue("") : (adapter.?resourcesSkipped.orValue(false) ? "ResourcesSkipped" : "Healthy")) + message: + expression: | + has(adapter.executionError) && adapter.executionError != null && adapter.executionError.message != "" ? adapter.executionError.message : (adapter.?errorMessage.orValue("") != "" ? adapter.?errorMessage.orValue("") : (adapter.?resourcesSkipped.orValue(false) ? "Some resources were skipped" : "All adapter operations completed successfully")) + + # Use CEL expression for numeric fields to preserve type (not Go template which outputs strings) + observed_generation: + expression: "generationId" + + # Use Go template with now and date functions for timestamps + observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" + + # Optional data field for adapter-specific metrics extracted from resources + data: + namespace: + name: + expression: | + resources.?clusterNamespace.?metadata.?name.orValue("") + status: + expression: | + resources.?clusterNamespace.?status.?phase.orValue("") + + # ============================================================================ + # Post Actions + # ============================================================================ + # Post actions are executed after resources are created/updated + post_actions: + # Report cluster status to HyperFleet API (always executed) + - name: "reportClusterStatus" + api_call: + method: "POST" + # NOTE: API path includes /api/hyperfleet/ prefix and ends with /statuses + url: "/clusters/{{ .clusterId }}/statuses" + body: "{{ .clusterStatusPayload }}" + timeout: 30s + retry_attempts: 3 + retry_backoff: "exponential" + headers: + - name: "Content-Type" + value: "application/json" diff --git a/configs/templates/cluster-status-payload.yaml b/configs/templates/cluster-status-payload.yaml deleted file mode 100644 index 6786fc3..0000000 --- a/configs/templates/cluster-status-payload.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# Cluster Status Payload Template -# Used for reporting cluster status back to HyperFleet API -status: "{{ .status }}" -message: "{{ .message }}" -observedGeneration: "{{ .generation }}" -lastUpdated: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" -conditions: - - type: "Ready" - status: "{{ .readyStatus | default \"Unknown\" }}" - reason: "{{ .readyReason | default \"Pending\" }}" - message: "{{ .readyMessage | default \"Cluster status is being determined\" }}" - - type: "Configured" - status: "{{ .configuredStatus | default \"Unknown\" }}" - reason: "{{ .configuredReason | default \"Pending\" }}" - message: "{{ .configuredMessage | default \"Configuration is being applied\" }}" - diff --git a/configs/templates/deployment.yaml b/configs/templates/deployment.yaml deleted file mode 100644 index 4e76962..0000000 --- a/configs/templates/deployment.yaml +++ /dev/null @@ -1,37 +0,0 @@ -# Cluster Controller Deployment Template -apiVersion: apps/v1 -kind: Deployment -metadata: - name: "cluster-controller-{{ .clusterId }}" - namespace: "cluster-{{ .clusterId }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/component: "controller" -spec: - replicas: 1 - selector: - matchLabels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/component: "controller" - template: - metadata: - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/component: "controller" - spec: - containers: - - name: controller - image: "quay.io/hyperfleet/controller:{{ .imageTag }}" - env: - - name: CLUSTER_ID - value: "{{ .clusterId }}" - - name: RESOURCE_ID - value: "{{ .resourceId }}" - resources: - requests: - cpu: "100m" - memory: "128Mi" - limits: - cpu: "500m" - memory: "512Mi" - diff --git a/configs/templates/job.yaml b/configs/templates/job.yaml deleted file mode 100644 index 0309bba..0000000 --- a/configs/templates/job.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Validation Job Template -# This job is used to validate cluster configuration -apiVersion: batch/v1 -kind: Job -metadata: - name: "validation-{{ .clusterId }}" - namespace: "cluster-{{ .clusterId }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/job-type: "validation" - hyperfleet.io/resource-type: "job" - hyperfleet.io/managed-by: "{{ .metadata.name }}" -spec: - template: - metadata: - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/job-type: "validation" - spec: - restartPolicy: Never - containers: - - name: validator - image: "quay.io/hyperfleet/validator:v1.0.0" - env: - - name: CLUSTER_ID - value: "{{ .clusterId }}" - - name: GENERATION_ID - value: "{{ .generation }}" - diff --git a/configuration.md b/configuration.md deleted file mode 100644 index c8f3625..0000000 --- a/configuration.md +++ /dev/null @@ -1,169 +0,0 @@ -# Adapter Configuration Reference - -This document describes the deployment-level `AdapterConfig` options and how to set them -in three formats: YAML, command-line flags, and environment variables. - -Overrides are applied in this order: CLI flags > environment variables > YAML file > defaults. - -## Config file location - -You can point the adapter at a deployment config file with either: - -- CLI: `--config` (or `-c`) -- Env: `HYPERFLEET_ADAPTER_CONFIG` - -Task config is separate (`--task-config` / `HYPERFLEET_TASK_CONFIG`) and not covered here. - -## YAML options (AdapterConfig) - -All configuration is nested under `apiVersion`, `kind`, `metadata`, and `spec`. - -```yaml -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: - name: example-adapter - namespace: hyperfleet-system - labels: - hyperfleet.io/component: adapter -spec: - adapter: - version: "0.1.0" - debugConfig: false - clients: - maestro: - grpcServerAddress: "maestro-grpc.maestro.svc.cluster.local:8090" - httpServerAddress: "https://maestro-api.maestro.svc.cluster.local" - sourceId: "hyperfleet-adapter" - clientId: "hyperfleet-adapter-client" - auth: - type: "tls" - tlsConfig: - caFile: "/etc/maestro/certs/grpc/ca.crt" - certFile: "/etc/maestro/certs/grpc/client.crt" - keyFile: "/etc/maestro/certs/grpc/client.key" - timeout: "30s" - retryAttempts: 3 - keepalive: - time: "30s" - timeout: "10s" - insecure: false - hyperfleetApi: - baseUrl: "http://hyperfleet-api:8000" - version: "v1" - timeout: "10s" - retryAttempts: 3 - retryBackoff: "exponential" - baseDelay: "1s" - maxDelay: "30s" - defaultHeaders: - X-Example: "value" - broker: - subscriptionId: "example-subscription" - topic: "example-topic" - kubernetes: - apiVersion: "v1" - kubeConfigPath: "/path/to/kubeconfig" - qps: 100 - burst: 200 -``` - -### Top-level fields - -- `apiVersion` (string, required): Must be `hyperfleet.redhat.com/v1alpha1`. -- `kind` (string, required): Must be `AdapterConfig`. -- `metadata.name` (string, required): Adapter name. -- `metadata.labels` (map[string]string, optional): Labels for the adapter metadata. - -### Spec fields - -- `spec.adapter.version` (string, required): Adapter version expected by the binary. -- `spec.debugConfig` (bool, optional): Log the merged config after load. Default: `false`. - -### Maestro client (`spec.clients.maestro`) - -- `grpcServerAddress` (string): Maestro gRPC endpoint. -- `httpServerAddress` (string): Maestro HTTP API endpoint. -- `sourceId` (string): CloudEvents source identifier. -- `clientId` (string): Maestro client identifier. -- `auth.type` (string): Authentication type (`tls` or `none`). -- `auth.tlsConfig.caFile` (string): CA certificate path. -- `auth.tlsConfig.certFile` (string): Client certificate path. -- `auth.tlsConfig.keyFile` (string): Client key path. -- `timeout` (duration string): Request timeout (e.g. `30s`). -- `retryAttempts` (int): Number of retry attempts. -- `keepalive.time` (duration string): gRPC keepalive time. -- `keepalive.timeout` (duration string): gRPC keepalive timeout. -- `insecure` (bool): Allow insecure connection. - -### HyperFleet API client (`spec.clients.hyperfleetApi`) - -- `baseUrl` (string): Base URL for HyperFleet API requests. -- `version` (string): API version. Default: `v1`. -- `timeout` (duration string): HTTP client timeout. Default: `10s`. -- `retryAttempts` (int): Retry attempts. Default: `3`. -- `retryBackoff` (string): Backoff strategy (`exponential`, `linear`, `constant`). Default: `exponential`. -- `baseDelay` (duration string): Initial retry delay. Default: `1s`. -- `maxDelay` (duration string): Maximum retry delay. Default: `30s`. -- `defaultHeaders` (map[string]string): Headers added to all API requests. - -### Broker (`spec.clients.broker`) - -- `subscriptionId` (string): Broker subscription ID (required at runtime). -- `topic` (string): Broker topic (required at runtime). - -#### Broker Metrics - -See [Observability](docs/observability.md) for the full list of broker metrics exposed on the `/metrics` endpoint. - -### Kubernetes (`spec.clients.kubernetes`) - -- `apiVersion` (string): Kubernetes API version. -- `kubeConfigPath` (string): Path to kubeconfig (empty uses in-cluster auth). -- `qps` (float): Client-side QPS limit (0 uses defaults). -- `burst` (int): Client-side burst limit (0 uses defaults). - -## Command-line parameters - -The following CLI flags override YAML values: - -- `--debug-config` -> `spec.debugConfig` -- `--maestro-grpc-server-address` -> `spec.clients.maestro.grpcServerAddress` -- `--maestro-http-server-address` -> `spec.clients.maestro.httpServerAddress` -- `--maestro-source-id` -> `spec.clients.maestro.sourceId` -- `--maestro-client-id` -> `spec.clients.maestro.clientId` -- `--maestro-ca-file` -> `spec.clients.maestro.auth.tlsConfig.caFile` -- `--maestro-cert-file` -> `spec.clients.maestro.auth.tlsConfig.certFile` -- `--maestro-key-file` -> `spec.clients.maestro.auth.tlsConfig.keyFile` -- `--maestro-timeout` -> `spec.clients.maestro.timeout` -- `--maestro-insecure` -> `spec.clients.maestro.insecure` -- `--hyperfleet-api-timeout` -> `spec.clients.hyperfleetApi.timeout` -- `--hyperfleet-api-retry` -> `spec.clients.hyperfleetApi.retryAttempts` - -## Environment variables - -All deployment overrides use the `HYPERFLEET_` prefix unless noted. - -- `HYPERFLEET_DEBUG_CONFIG` -> `spec.debugConfig` -- `HYPERFLEET_MAESTRO_GRPC_SERVER_ADDRESS` -> `spec.clients.maestro.grpcServerAddress` -- `HYPERFLEET_MAESTRO_HTTP_SERVER_ADDRESS` -> `spec.clients.maestro.httpServerAddress` -- `HYPERFLEET_MAESTRO_SOURCE_ID` -> `spec.clients.maestro.sourceId` -- `HYPERFLEET_MAESTRO_CLIENT_ID` -> `spec.clients.maestro.clientId` -- `HYPERFLEET_MAESTRO_CA_FILE` -> `spec.clients.maestro.auth.tlsConfig.caFile` -- `HYPERFLEET_MAESTRO_CERT_FILE` -> `spec.clients.maestro.auth.tlsConfig.certFile` -- `HYPERFLEET_MAESTRO_KEY_FILE` -> `spec.clients.maestro.auth.tlsConfig.keyFile` -- `HYPERFLEET_MAESTRO_TIMEOUT` -> `spec.clients.maestro.timeout` -- `HYPERFLEET_MAESTRO_RETRY_ATTEMPTS` -> `spec.clients.maestro.retryAttempts` -- `HYPERFLEET_MAESTRO_INSECURE` -> `spec.clients.maestro.insecure` -- `HYPERFLEET_API_BASE_URL` -> `spec.clients.hyperfleetApi.baseUrl` -- `HYPERFLEET_API_VERSION` -> `spec.clients.hyperfleetApi.version` -- `HYPERFLEET_API_TIMEOUT` -> `spec.clients.hyperfleetApi.timeout` -- `HYPERFLEET_API_RETRY_ATTEMPTS` -> `spec.clients.hyperfleetApi.retryAttempts` -- `HYPERFLEET_API_RETRY_BACKOFF` -> `spec.clients.hyperfleetApi.retryBackoff` -- `HYPERFLEET_BROKER_SUBSCRIPTION_ID` -> `spec.clients.broker.subscriptionId` -- `HYPERFLEET_BROKER_TOPIC` -> `spec.clients.broker.topic` - -Legacy broker environment variables (used only if the prefixed version is unset): - -- `BROKER_SUBSCRIPTION_ID` -> `spec.clients.broker.subscriptionId` -- `BROKER_TOPIC` -> `spec.clients.broker.topic` diff --git a/docs/adapter-authoring-guide.md b/docs/adapter-authoring-guide.md index 41cc920..f57a392 100644 --- a/docs/adapter-authoring-guide.md +++ b/docs/adapter-authoring-guide.md @@ -18,13 +18,13 @@ customer updates cluster -> event -> adapter task -> k8s object performs work -> ### What you produce -Every adapter requires three YAML files: +Every adapter requires configuring 3 main elements: -| File | Kind | Purpose | -|------|------|---------| -| `adapter-config.yaml` | `AdapterConfig` | Deployment settings: API client config, broker subscription, timeouts, retries | -| `adapter-task-config.yaml` | `AdapterTaskConfig` | Business logic: what to extract, check, create, and report | -| `broker.yaml` | `` | Broker configuration: Configures broker system (pubsub, rabbitmq) | +| Concern | Purpose | +|-------|---------| +| Adapter Config | Deployment settings: API client config, broker subscription, timeouts, retries | +| Adapter Task Config | Business logic: what to extract, check, create, and report | +| Broker config | Broker configuration: Configures broker system (pubsub, rabbitmq) | The `AdapterConfig` is pretty straightforward, it defines the name of the adapter as well as client configs to interact with HyperFleet API, Kubernetes or Maestro. @@ -126,21 +126,12 @@ Three languages appear in adapter configs, each for a different purpose: ### File skeleton ```yaml -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: my-adapter # Unique adapter name (used in status reports and logs) - labels: - hyperfleet.io/adapter-type: my-adapter - hyperfleet.io/component: adapter - -spec: - params: [] # Phase 1: Extract variables from event and environment - preconditions: [] # Phase 2: Validate state via API calls - resources: [] # Phase 3: Create/update Kubernetes resources - post: # Phase 4: Report status - payloads: [] # Build status JSON - postActions: [] # Send status to API +params: [] # Phase 1: Extract variables from event and environment +preconditions: [] # Phase 2: Validate state via API calls +resources: [] # Phase 3: Create/update Kubernetes resources +post: # Phase 4: Report status + payloads: [] # Build status JSON + post_actions: [] # Send status to API ``` ### Execution flow and error handling @@ -181,24 +172,23 @@ The `adapter.*` context is populated automatically and available in your post-ac Parameters are variables extracted from the incoming CloudEvent and the runtime environment. They become available as Go Template variables (`{{ .paramName }}`) and CEL variables throughout the rest of the config. ```yaml -spec: - params: - # From the CloudEvent data - - name: "clusterId" - source: "event.id" - type: "string" - required: true - - - name: "generation" - source: "event.generation" - type: "int" - required: true - - # From environment variables (set in Helm values or deployment) - - name: "region" - source: "env.REGION" - type: "string" - default: "us-east-1" +params: + # From the CloudEvent data + - name: "clusterId" + source: "event.id" + type: "string" + required: true + + - name: "generation" + source: "event.generation" + type: "int" + required: true + + # From environment variables (set in Helm values or deployment) + - name: "region" + source: "env.REGION" + type: "string" + default: "us-east-1" ``` ### Sources @@ -242,15 +232,15 @@ The state of the cluster contains information about all adapters in the form of ```yaml preconditions: - name: "clusterStatus" - apiCall: + api_call: method: "GET" url: "/api/hyperfleet/v1/clusters/{{ .clusterId }}" timeout: 10s - retryAttempts: 3 - retryBackoff: "exponential" # also: linear, constant + retry_attempts: 3 + retry_backoff: "exponential" # also: linear, constant ``` -URLs are **relative** — the base URL comes from the `AdapterConfig` `clients.hyperfleetApi.baseUrl` setting. You only write the path. +URLs are **relative** — the base URL comes from the `AdapterConfig` `clients.hyperfleet_api.base_url` setting. You only write the path. ### Capturing fields @@ -321,14 +311,14 @@ Preconditions execute in order. Data flows forward — a captured field from pre ```yaml preconditions: - name: "getCluster" - apiCall: + api_call: url: "/api/hyperfleet/v1/clusters/{{ .clusterId }}" capture: - name: "clusterName" field: "name" - name: "getStatuses" - apiCall: + api_call: url: "/api/hyperfleet/v1/clusters/{{ .clusterId }}/statuses" capture: - name: "lzReady" @@ -358,15 +348,15 @@ resources: apiVersion: v1 kind: Namespace metadata: - name: "{{ .clusterId | lower }}" + name: "{{ .clusterId }}" labels: hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/managed-by: "{{ .metadata.name }}" + hyperfleet.io/managed-by: "{{ .adapter.name }}" hyperfleet.io/resource-type: "namespace" annotations: hyperfleet.io/generation: "{{ .generation }}" discovery: - byName: "{{ .clusterId | lower }}" + by_name: "{{ .clusterId }}" ``` ### External manifest files @@ -382,8 +372,8 @@ resources: ref: "/etc/adapter/job.yaml" discovery: namespace: "{{ .clusterId }}" - bySelectors: - labelSelector: + by_selectors: + label_selector: hyperfleet.io/cluster-id: "{{ .clusterId }}" hyperfleet.io/resource-type: "job" ``` @@ -401,7 +391,7 @@ The framework determines the operation automatically: | `create` | Resource doesn't exist | Apply the manifest | | `update` | Resource exists, generation changed | Patch the resource | | `skip` | Resource exists, generation unchanged | No-op (idempotent) | -| `recreate` | `recreateOnChange: true` is set | Delete then create | +| `recreate` | `recreate_on_change: true` is set | Delete then create | ### Discovery @@ -412,13 +402,13 @@ Two discovery modes: ```yaml # By name (direct lookup) discovery: - byName: "{{ .clusterId | lower }}" + by_name: "{{ .clusterId }}" # By label selector discovery: - namespace: "{{ .clusterId }}" # omit for all namespaces / cluster-scoped - bySelectors: - labelSelector: + namespace: "{{ .clusterId }}" # omit or "*" for cluster-scoped + by_selectors: + label_selector: hyperfleet.io/cluster-id: "{{ .clusterId }}" hyperfleet.io/resource-type: "namespace" ``` @@ -470,7 +460,7 @@ resources: transport: client: "maestro" maestro: - targetCluster: "{{ .placementClusterName }}" + target_cluster: "{{ .placementClusterName }}" manifest: apiVersion: work.open-cluster-management.io/v1 kind: ManifestWork @@ -484,7 +474,7 @@ resources: - apiVersion: v1 kind: Namespace metadata: - name: "{{ .clusterId | lower }}" + name: "{{ .clusterId }}" labels: hyperfleet.io/cluster-id: "{{ .clusterId }}" hyperfleet.io/resource-type: "namespace" @@ -492,14 +482,14 @@ resources: kind: ConfigMap metadata: name: "{{ .clusterId }}-config" - namespace: "{{ .clusterId | lower }}" + namespace: "{{ .clusterId }}" data: cluster_id: "{{ .clusterId }}" manifestConfigs: - resourceIdentifier: group: "" resource: "namespaces" - name: "{{ .clusterId | lower }}" + name: "{{ .clusterId }}" updateStrategy: type: "ServerSideApply" feedbackRules: @@ -508,8 +498,8 @@ resources: - name: "phase" path: ".status.phase" discovery: - bySelectors: - labelSelector: + by_selectors: + label_selector: hyperfleet.io/cluster-id: "{{ .clusterId }}" ``` @@ -517,18 +507,18 @@ resources: #### Nested discovery (Maestro) -A ManifestWork bundles multiple sub-resources. To inspect those sub-resources individually in your post-action CEL expressions without traversing the whole resources tree, you can use `nestedDiscoveries`: +A ManifestWork bundles multiple sub-resources. To inspect those sub-resources individually in your post-action CEL expressions without traversing the whole resources tree, you can use `nested_discoveries`: ```yaml - nestedDiscoveries: + nested_discoveries: - name: "namespace0" discovery: - bySelectors: - labelSelector: + by_selectors: + label_selector: hyperfleet.io/resource-type: "namespace" - name: "configmap0" discovery: - byName: "{{ .clusterId }}-config" + by_name: "{{ .clusterId }}-config" ``` Nested discoveries are **promoted to top-level keys** in the `resources` map. Access them as `resources.namespace0`, not `resources.clusterSetup.namespace0`. This keeps CEL expressions clean. @@ -721,7 +711,7 @@ post: payloads: - name: "statusPayload" build: - adapter: "{{ .metadata.name }}" + adapter: "{{ .adapter.name }}" conditions: - type: "Applied" status: @@ -757,9 +747,9 @@ post: expression: "generation" observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" - postActions: + post_actions: - name: "reportStatus" - apiCall: + api_call: method: "POST" url: "/api/hyperfleet/v1/clusters/{{ .clusterId }}/statuses" body: "{{ .statusPayload }}" @@ -1038,10 +1028,10 @@ NodePool adapters typically wait for the parent cluster to be fully set up. Quer ```yaml preconditions: - name: "nodepoolStatus" - apiCall: + api_call: url: "/api/hyperfleet/v1/clusters/{{ .clusterId }}/nodepools/{{ .nodepoolId }}" capture: - - name: "generationSpec" + - name: "generation" field: "generation" - name: "readyStatus" expression: | @@ -1054,7 +1044,7 @@ preconditions: value: "False" - name: "clusterAdapterStatus" - apiCall: + api_call: url: "/api/hyperfleet/v1/clusters/{{ .clusterId }}/statuses" capture: - name: "clusterNamespaceStatus" @@ -1072,9 +1062,9 @@ preconditions: Post-actions target the NodePool status endpoint instead of the cluster one: ```yaml -postActions: +post_actions: - name: "reportNodepoolStatus" - apiCall: + api_call: method: "POST" url: "/api/hyperfleet/v1/clusters/{{ .clusterId }}/nodepools/{{ .nodepoolId }}/statuses" body: "{{ .nodepoolStatusPayload }}" @@ -1094,7 +1084,7 @@ The framework validates your config at load time in two passes: - Required fields present (`name`, `source`, `method`, etc.) - Valid operator values -- Mutual exclusivity (`field` vs `expression`, `build` vs `buildRef`) +- Mutual exclusivity (`field` vs `expression`, `build` vs `build_ref`) - Valid Kubernetes resource names **Semantic validation** — checked by default (can be skipped): @@ -1137,24 +1127,20 @@ The adapter will run preconditions, skip straight to post-actions, and report st
Example minimal adapter-config ```yaml -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: +adapter: name: my-adapter -spec: - adapter: - version: "0.1.0" - clients: - hyperfleetApi: - baseUrl: "http://hyperfleet-api:8000" - timeout: 10s - retryAttempts: 3 - retryBackoff: exponential - broker: - subscriptionId: "my-adapter-sub" - topic: "cluster-events" - kubernetes: - apiVersion: "v1" + version: "0.1.0" +clients: + hyperfleet_api: + base_url: "http://hyperfleet-api:8000" + timeout: 10s + retry_attempts: 3 + retry_backoff: exponential + broker: + subscription_id: "my-adapter-sub" + topic: "cluster-events" + kubernetes: + api_version: "v1" ```
@@ -1169,7 +1155,7 @@ spec: More information about deployment can be found in [Architecture repository - HyperFleet Adapter Framework - Deployment Guide](https://github.com/openshift-hyperfleet/architecture/blob/main/hyperfleet/components/adapter/framework/adapter-deployment.md) -5. **Verify broker metrics** — the adapter automatically exposes broker metrics on the `/metrics` endpoint (port 9090). No additional configuration is needed. See [Observability](observability.md) for the full list of available metrics. +1. **Verify broker metrics** — the adapter automatically exposes broker metrics on the `/metrics` endpoint (port 9090). No additional configuration is needed. See [Observability](observability.md) for the full list of available metrics. --- @@ -1200,7 +1186,7 @@ condition ? "yes" : "no" "prefix-" + clusterId + "-suffix" # Numeric comparison (use expression for observed_generation) -generationSpec +generation # JSON serialization (debugging) toJson(resources.resource0) @@ -1256,7 +1242,7 @@ has(resources.namespace0) && has(resources.configmap0) {{ .variableName }} Variable interpolation {{ .clusterId | lower }} Lowercase filter {{ now | date "2006-01-02T15:04:05Z07:00" }} Current timestamp (RFC 3339) -{{ .metadata.name }} Adapter name from config metadata +{{ .adapter.name }} Adapter name from config ``` Go Templates are used in: URLs, manifest field values, direct string values in payloads, and external template files. @@ -1288,6 +1274,6 @@ Go Templates are used in: URLs, manifest field values, direct string values in p | Status update rejected by API | Stale `observed_generation` | Your adapter is reporting an older generation than what's already stored. Ensure `observed_generation` uses the generation from the API response, not the event. | | `template variable not found` | Variable referenced in `{{ .foo }}` but never defined | Add `foo` to params or captures. Check spelling. | | `CEL expression parse error` | Invalid CEL syntax | Verify parentheses, string quoting, and optional chaining syntax (`?.` for safe field access). | -| Discovery returns empty | Labels don't match or wrong namespace | Verify `discovery.namespace` is correct. Use `byName` for a simpler lookup. Check resource labels match the selector exactly. | +| Discovery returns empty | Labels don't match or wrong namespace | Verify `discovery.namespace` is correct. Use `by_name` for a simpler lookup. Check resource labels match the selector exactly. | | `observed_generation` is a string | Using Go Template instead of CEL expression | Use `expression: "generation"` instead of `"{{ .generation }}"`. | | Post-action API call returns 404 | Wrong status endpoint path | Cluster statuses: `/clusters/{id}/statuses`. NodePool statuses: `/clusters/{id}/nodepools/{id}/statuses`. | diff --git a/docs/configuration.md b/docs/configuration.md new file mode 100644 index 0000000..e77ab2b --- /dev/null +++ b/docs/configuration.md @@ -0,0 +1,231 @@ +# Adapter Configuration Reference + +This document describes the deployment-level `AdapterConfig` options and how to set them +in three formats: YAML, command-line flags, and environment variables. + +Overrides are applied in this order: CLI flags > environment variables > YAML file > defaults. + +## Config file location + +You can point the adapter at a deployment config file with either: + +- CLI: `--config` (or `-c`) +- Env: `HYPERFLEET_ADAPTER_CONFIG` + +Task config is separate (`--task-config` / `HYPERFLEET_TASK_CONFIG`) and not covered here. + +## YAML options (AdapterConfig) + +All fields use **snake_case** naming. + +```yaml +adapter: + name: example-adapter + version: "0.1.0" + +debug_config: false + +log: + level: "info" + format: "json" + output: "stdout" + +clients: + maestro: + grpc_server_address: "maestro-grpc.maestro.svc.cluster.local:8090" + http_server_address: "https://maestro-api.maestro.svc.cluster.local" + source_id: "hyperfleet-adapter" + client_id: "hyperfleet-adapter-client" + auth: + type: "tls" + tls_config: + ca_file: "/etc/maestro/certs/grpc/ca.crt" + cert_file: "/etc/maestro/certs/grpc/client.crt" + key_file: "/etc/maestro/certs/grpc/client.key" + http_ca_file: "/etc/maestro/certs/https/ca.crt" + timeout: "30s" + server_healthiness_timeout: "20s" + retry_attempts: 3 + keepalive: + time: "30s" + timeout: "10s" + insecure: false + hyperfleet_api: + base_url: "http://hyperfleet-api:8000" + version: "v1" + timeout: "10s" + retry_attempts: 3 + retry_backoff: "exponential" + base_delay: "1s" + max_delay: "30s" + default_headers: + X-Example: "value" + broker: + subscription_id: "example-subscription" + topic: "example-topic" + kubernetes: + api_version: "v1" + kube_config_path: "/path/to/kubeconfig" + qps: 100 + burst: 200 +``` + +### Top-level fields + +- `adapter.name` (string, required): Adapter name. +- `adapter.version` (string, optional): when set, the binary validates it matches the running version. +- `debug_config` (bool, optional): Log the merged config after load. Default: `false`. + +### Logging (`log`) + +- `log.level` (string, optional): Log level (`debug`, `info`, `warn`, `error`). Default: `info`. +- `log.format` (string, optional): Log format (`text`, `json`). Default: `text`. +- `log.output` (string, optional): Log output destination (`stdout`, `stderr`). Default: `stdout`. + +### Maestro client (`clients.maestro`) + +- `grpc_server_address` (string): Maestro gRPC endpoint. +- `http_server_address` (string): Maestro HTTP API endpoint. +- `source_id` (string): CloudEvents source identifier. +- `client_id` (string): Maestro client identifier. +- `auth.type` (string): Authentication type (`tls` or `none`). +- `auth.tls_config.ca_file` (string): gRPC CA certificate path. +- `auth.tls_config.cert_file` (string): gRPC client certificate path. +- `auth.tls_config.key_file` (string): gRPC client key path. +- `auth.tls_config.http_ca_file` (string, optional): CA certificate for the HTTP API. Falls back to `ca_file` if unset. +- `timeout` (duration string): Request timeout (e.g. `30s`). +- `server_healthiness_timeout` (duration string, optional): Timeout for the server healthiness check (e.g. `20s`). +- `retry_attempts` (int): Number of retry attempts. +- `keepalive.time` (duration string): gRPC keepalive ping interval. +- `keepalive.timeout` (duration string): gRPC keepalive ping timeout. +- `insecure` (bool): Allow insecure connection. + +### HyperFleet API client (`clients.hyperfleet_api`) + +- `base_url` (string): Base URL for HyperFleet API requests. +- `version` (string): API version. Default: `v1`. +- `timeout` (duration string): HTTP client timeout. Default: `10s`. +- `retry_attempts` (int): Retry attempts. Default: `3`. +- `retry_backoff` (string): Backoff strategy (`exponential`, `linear`, `constant`). Default: `exponential`. +- `base_delay` (duration string): Initial retry delay. Default: `1s`. +- `max_delay` (duration string): Maximum retry delay. Default: `30s`. +- `default_headers` (map[string]string): Headers added to all API requests. + +### Broker (`clients.broker`) + +- `subscription_id` (string): Broker subscription ID (required at runtime). +- `topic` (string): Broker topic (required at runtime). + +### Kubernetes (`clients.kubernetes`) + +- `api_version` (string): Kubernetes API version. +- `kube_config_path` (string): Path to kubeconfig (empty uses in-cluster auth). +- `qps` (float): Client-side QPS limit (0 uses defaults). +- `burst` (int): Client-side burst limit (0 uses defaults). + +## Command-line parameters + +The following CLI flags override YAML values: + +**General** + +- `--debug-config` -> `debug_config` +- `--log-level` -> `log.level` +- `--log-format` -> `log.format` +- `--log-output` -> `log.output` + +**Maestro** + +- `--maestro-grpc-server-address` -> `clients.maestro.grpc_server_address` +- `--maestro-http-server-address` -> `clients.maestro.http_server_address` +- `--maestro-source-id` -> `clients.maestro.source_id` +- `--maestro-client-id` -> `clients.maestro.client_id` +- `--maestro-auth-type` -> `clients.maestro.auth.type` +- `--maestro-ca-file` -> `clients.maestro.auth.tls_config.ca_file` +- `--maestro-cert-file` -> `clients.maestro.auth.tls_config.cert_file` +- `--maestro-key-file` -> `clients.maestro.auth.tls_config.key_file` +- `--maestro-http-ca-file` -> `clients.maestro.auth.tls_config.http_ca_file` +- `--maestro-timeout` -> `clients.maestro.timeout` +- `--maestro-server-healthiness-timeout` -> `clients.maestro.server_healthiness_timeout` +- `--maestro-retry-attempts` -> `clients.maestro.retry_attempts` +- `--maestro-keepalive-time` -> `clients.maestro.keepalive.time` +- `--maestro-keepalive-timeout` -> `clients.maestro.keepalive.timeout` +- `--maestro-insecure` -> `clients.maestro.insecure` + +**HyperFleet API** + +- `--hyperfleet-api-base-url` -> `clients.hyperfleet_api.base_url` +- `--hyperfleet-api-version` -> `clients.hyperfleet_api.version` +- `--hyperfleet-api-timeout` -> `clients.hyperfleet_api.timeout` +- `--hyperfleet-api-retry` -> `clients.hyperfleet_api.retry_attempts` +- `--hyperfleet-api-retry-backoff` -> `clients.hyperfleet_api.retry_backoff` +- `--hyperfleet-api-base-delay` -> `clients.hyperfleet_api.base_delay` +- `--hyperfleet-api-max-delay` -> `clients.hyperfleet_api.max_delay` + +**Broker** + +- `--broker-subscription-id` -> `clients.broker.subscription_id` +- `--broker-topic` -> `clients.broker.topic` + +**Kubernetes** + +- `--kubernetes-api-version` -> `clients.kubernetes.api_version` +- `--kubernetes-kube-config-path` -> `clients.kubernetes.kube_config_path` +- `--kubernetes-qps` -> `clients.kubernetes.qps` +- `--kubernetes-burst` -> `clients.kubernetes.burst` + +## Environment variables + +All deployment overrides use the `HYPERFLEET_` prefix unless noted. + +**General** + +- `HYPERFLEET_DEBUG_CONFIG` -> `debug_config` +- `LOG_LEVEL` -> `log.level` +- `LOG_FORMAT` -> `log.format` +- `LOG_OUTPUT` -> `log.output` + +**Maestro** + +- `HYPERFLEET_MAESTRO_GRPC_SERVER_ADDRESS` -> `clients.maestro.grpc_server_address` +- `HYPERFLEET_MAESTRO_HTTP_SERVER_ADDRESS` -> `clients.maestro.http_server_address` +- `HYPERFLEET_MAESTRO_SOURCE_ID` -> `clients.maestro.source_id` +- `HYPERFLEET_MAESTRO_CLIENT_ID` -> `clients.maestro.client_id` +- `HYPERFLEET_MAESTRO_AUTH_TYPE` -> `clients.maestro.auth.type` +- `HYPERFLEET_MAESTRO_CA_FILE` -> `clients.maestro.auth.tls_config.ca_file` +- `HYPERFLEET_MAESTRO_CERT_FILE` -> `clients.maestro.auth.tls_config.cert_file` +- `HYPERFLEET_MAESTRO_KEY_FILE` -> `clients.maestro.auth.tls_config.key_file` +- `HYPERFLEET_MAESTRO_HTTP_CA_FILE` -> `clients.maestro.auth.tls_config.http_ca_file` +- `HYPERFLEET_MAESTRO_TIMEOUT` -> `clients.maestro.timeout` +- `HYPERFLEET_MAESTRO_SERVER_HEALTHINESS_TIMEOUT` -> `clients.maestro.server_healthiness_timeout` +- `HYPERFLEET_MAESTRO_RETRY_ATTEMPTS` -> `clients.maestro.retry_attempts` +- `HYPERFLEET_MAESTRO_KEEPALIVE_TIME` -> `clients.maestro.keepalive.time` +- `HYPERFLEET_MAESTRO_KEEPALIVE_TIMEOUT` -> `clients.maestro.keepalive.timeout` +- `HYPERFLEET_MAESTRO_INSECURE` -> `clients.maestro.insecure` + +**HyperFleet API** + +- `HYPERFLEET_API_BASE_URL` -> `clients.hyperfleet_api.base_url` +- `HYPERFLEET_API_VERSION` -> `clients.hyperfleet_api.version` +- `HYPERFLEET_API_TIMEOUT` -> `clients.hyperfleet_api.timeout` +- `HYPERFLEET_API_RETRY_ATTEMPTS` -> `clients.hyperfleet_api.retry_attempts` +- `HYPERFLEET_API_RETRY_BACKOFF` -> `clients.hyperfleet_api.retry_backoff` +- `HYPERFLEET_API_BASE_DELAY` -> `clients.hyperfleet_api.base_delay` +- `HYPERFLEET_API_MAX_DELAY` -> `clients.hyperfleet_api.max_delay` + +**Broker** + +- `HYPERFLEET_BROKER_SUBSCRIPTION_ID` -> `clients.broker.subscription_id` +- `HYPERFLEET_BROKER_TOPIC` -> `clients.broker.topic` + +**Kubernetes** + +- `HYPERFLEET_KUBERNETES_API_VERSION` -> `clients.kubernetes.api_version` +- `HYPERFLEET_KUBERNETES_KUBE_CONFIG_PATH` -> `clients.kubernetes.kube_config_path` +- `HYPERFLEET_KUBERNETES_QPS` -> `clients.kubernetes.qps` +- `HYPERFLEET_KUBERNETES_BURST` -> `clients.kubernetes.burst` + +Legacy broker environment variables (used only if the prefixed version is unset): + +- `BROKER_SUBSCRIPTION_ID` -> `clients.broker.subscription_id` +- `BROKER_TOPIC` -> `clients.broker.topic` diff --git a/internal/config_loader/README.md b/internal/config_loader/README.md index ee2a7e9..328ac6a 100644 --- a/internal/config_loader/README.md +++ b/internal/config_loader/README.md @@ -37,10 +37,10 @@ config, err := config_loader.Load("config.yaml", config_loader.WithAdapterVersio ```go // Metadata -config.Metadata.Name +config.Adapter.Name // API config -timeout := config.Spec.Clients.HyperfleetAPI.Timeout +timeout := config.Clients.HyperfleetAPI.Timeout // Query helpers config.GetRequiredParams() @@ -52,23 +52,21 @@ config.GetPostActionByName("reportStatus") ## Configuration Structure ```yaml -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: +# adapter-config.yaml (deployment config) +adapter: name: example-adapter - namespace: hyperfleet-system -spec: - adapter: - version: "0.1.0" - clients: - hyperfleetApi: - timeout: 2s - retryAttempts: 3 - retryBackoff: exponential - params: [...] - preconditions: [...] - resources: [...] - post: {...} + version: "0.1.0" +clients: + hyperfleet_api: + timeout: 2s + retry_attempts: 3 + retry_backoff: exponential + +# adapter-task-config.yaml (task config — merged at runtime) +params: [...] +preconditions: [...] +resources: [...] +post: {...} ``` See `configs/adapter-task-config-template.yaml` for the complete configuration reference. @@ -116,11 +114,12 @@ Operator string `yaml:"operator" validate:"required,validoperator"` ### Error Messages Validation errors are descriptive: -``` -spec.params[0].name is required -spec.preconditions[1].apiCall.method "INVALID" is invalid (allowed: GET, POST, PUT, PATCH, DELETE) -spec.resources[0].name "my-resource": must start with lowercase letter and contain only letters, numbers, underscores (no hyphens) -spec.preconditions[0].capture[0]: must have either 'field' or 'expression' set + +```text +params[0].name is required +preconditions[1].api_call.method "INVALID" is invalid (allowed: GET, POST, PUT, PATCH, DELETE) +resources[0].name "my-resource": must start with lowercase letter and contain only letters, numbers, underscores (no hyphens) +preconditions[0].capture[0]: must have either 'field' or 'expression' set ``` ## Types @@ -146,7 +145,7 @@ The package uses struct embedding to reduce duplication: // ActionBase - common fields for actions (preconditions, post-actions) type ActionBase struct { Name string `yaml:"name" validate:"required"` - APICall *APICall `yaml:"apiCall,omitempty"` + APICall *APICall `yaml:"api_call,omitempty"` } // FieldExpressionDef - field OR expression (mutually exclusive) diff --git a/internal/config_loader/accessors.go b/internal/config_loader/accessors.go index c10fa0b..65bdfc7 100644 --- a/internal/config_loader/accessors.go +++ b/internal/config_loader/accessors.go @@ -10,8 +10,7 @@ import ( // builtinVariables is the list of built-in variables always available in templates/CEL var builtinVariables = []string{ - "metadata", "metadata.name", "metadata.namespace", "metadata.labels", - "now", "date", + "adapter", "config", "now", "date", } // BuiltinVariables returns the list of built-in variables always available in templates/CEL @@ -25,8 +24,8 @@ func BuiltinVariables() []string { // GetDefinedVariables returns all variables defined in the config that can be used // in templates and CEL expressions. This includes: -// - Built-in variables (metadata, now, date) -// - Parameters from spec.params +// - Built-in variables (adapter, now, date) +// - Parameters from params // - Captured variables from preconditions // - Post payloads // - Resource aliases (resources.) @@ -42,15 +41,15 @@ func (c *Config) GetDefinedVariables() map[string]bool { vars[b] = true } - // Parameters from spec.params - for _, p := range c.Spec.Params { + // Parameters from params + for _, p := range c.Params { if p.Name != "" { vars[p.Name] = true } } // Variables from precondition captures - for _, precond := range c.Spec.Preconditions { + for _, precond := range c.Preconditions { for _, capture := range precond.Capture { if capture.Name != "" { vars[capture.Name] = true @@ -59,8 +58,8 @@ func (c *Config) GetDefinedVariables() map[string]bool { } // Post payloads - if c.Spec.Post != nil { - for _, p := range c.Spec.Post.Payloads { + if c.Post != nil { + for _, p := range c.Post.Payloads { if p.Name != "" { vars[p.Name] = true } @@ -68,7 +67,7 @@ func (c *Config) GetDefinedVariables() map[string]bool { } // Resource aliases - for _, r := range c.Spec.Resources { + for _, r := range c.Resources { if r.Name != "" { vars[FieldResources+"."+r.Name] = true } @@ -77,26 +76,26 @@ func (c *Config) GetDefinedVariables() map[string]bool { return vars } -// GetParamByName returns a parameter by name from spec.params, or nil if not found +// GetParamByName returns a parameter by name from params, or nil if not found func (c *Config) GetParamByName(name string) *Parameter { if c == nil { return nil } - for i := range c.Spec.Params { - if c.Spec.Params[i].Name == name { - return &c.Spec.Params[i] + for i := range c.Params { + if c.Params[i].Name == name { + return &c.Params[i] } } return nil } -// GetRequiredParams returns all parameters marked as required from spec.params +// GetRequiredParams returns all parameters marked as required from params func (c *Config) GetRequiredParams() []Parameter { if c == nil { return nil } var required []Parameter - for _, p := range c.Spec.Params { + for _, p := range c.Params { if p.Required { required = append(required, p) } @@ -109,9 +108,9 @@ func (c *Config) GetResourceByName(name string) *Resource { if c == nil { return nil } - for i := range c.Spec.Resources { - if c.Spec.Resources[i].Name == name { - return &c.Spec.Resources[i] + for i := range c.Resources { + if c.Resources[i].Name == name { + return &c.Resources[i] } } return nil @@ -122,9 +121,9 @@ func (c *Config) GetPreconditionByName(name string) *Precondition { if c == nil { return nil } - for i := range c.Spec.Preconditions { - if c.Spec.Preconditions[i].Name == name { - return &c.Spec.Preconditions[i] + for i := range c.Preconditions { + if c.Preconditions[i].Name == name { + return &c.Preconditions[i] } } return nil @@ -132,12 +131,12 @@ func (c *Config) GetPreconditionByName(name string) *Precondition { // GetPostActionByName returns a post action by name, or nil if not found func (c *Config) GetPostActionByName(name string) *PostAction { - if c == nil || c.Spec.Post == nil { + if c == nil || c.Post == nil { return nil } - for i := range c.Spec.Post.PostActions { - if c.Spec.Post.PostActions[i].Name == name { - return &c.Spec.Post.PostActions[i] + for i := range c.Post.PostActions { + if c.Post.PostActions[i].Name == name { + return &c.Post.PostActions[i] } } return nil @@ -148,8 +147,8 @@ func (c *Config) ParamNames() []string { if c == nil { return nil } - names := make([]string, len(c.Spec.Params)) - for i, p := range c.Spec.Params { + names := make([]string, len(c.Params)) + for i, p := range c.Params { names[i] = p.Name } return names @@ -160,8 +159,8 @@ func (c *Config) ResourceNames() []string { if c == nil { return nil } - names := make([]string, len(c.Spec.Resources)) - for i, r := range c.Spec.Resources { + names := make([]string, len(c.Resources)) + for i, r := range c.Resources { names[i] = r.Name } return names diff --git a/internal/config_loader/constants.go b/internal/config_loader/constants.go index 453e872..ac4189e 100644 --- a/internal/config_loader/constants.go +++ b/internal/config_loader/constants.go @@ -4,16 +4,10 @@ package config_loader // These constants define the known field names used in adapter configuration // to avoid hardcoding strings throughout the codebase. -// Top-level field names -const ( - FieldSpec = "spec" - FieldMetadata = "metadata" -) - -// Spec section field names +// Field names const ( FieldAdapter = "adapter" - FieldHyperfleetAPI = "hyperfleetApi" + FieldHyperfleetAPI = "hyperfleet_api" FieldKubernetes = "kubernetes" FieldParams = "params" FieldPreconditions = "preconditions" @@ -40,12 +34,12 @@ const ( const ( FieldPayloads = "payloads" FieldBuild = "build" - FieldBuildRef = "buildRef" + FieldBuildRef = "build_ref" ) // Precondition field names const ( - FieldAPICall = "apiCall" + FieldAPICall = "api_call" FieldCapture = "capture" FieldConditions = "conditions" FieldExpression = "expression" @@ -78,7 +72,7 @@ const ( FieldTransport = "transport" FieldClient = "client" FieldMaestro = "maestro" - FieldTargetCluster = "targetCluster" + FieldTargetCluster = "target_cluster" ) // Transport client types @@ -90,9 +84,9 @@ const ( // Resource field names const ( FieldManifest = "manifest" - FieldRecreateOnChange = "recreateOnChange" + FieldRecreateOnChange = "recreate_on_change" FieldDiscovery = "discovery" - FieldNestedDiscoveries = "nestedDiscoveries" + FieldNestedDiscoveries = "nested_discoveries" ) // Manifest reference field names @@ -103,18 +97,18 @@ const ( // Discovery field names const ( FieldNamespace = "namespace" - FieldByName = "byName" - FieldBySelectors = "bySelectors" + FieldByName = "by_name" + FieldBySelectors = "by_selectors" ) // Selector field names const ( - FieldLabelSelector = "labelSelector" + FieldLabelSelector = "label_selector" ) // Post config field names const ( - FieldPostActions = "postActions" + FieldPostActions = "post_actions" ) // Kubernetes manifest field names diff --git a/internal/config_loader/loader.go b/internal/config_loader/loader.go index f09ae10..ea1d95c 100644 --- a/internal/config_loader/loader.go +++ b/internal/config_loader/loader.go @@ -12,29 +12,12 @@ import ( // Constants // ----------------------------------------------------------------------------- -// API version constants -const ( - APIVersionV1Alpha1 = "hyperfleet.redhat.com/v1alpha1" -) - -// Kind constants for configuration types -const ( - ExpectedKindAdapter = "AdapterConfig" // Deployment config kind - ExpectedKindTask = "AdapterTaskConfig" // Task config kind - ExpectedKindConfig = "Config" // Unified merged config kind -) - // Environment variable for config file paths const ( EnvAdapterConfig = "HYPERFLEET_ADAPTER_CONFIG" // Path to deployment config EnvTaskConfigPath = "HYPERFLEET_TASK_CONFIG" // Path to task config ) -// SupportedAPIVersions contains all supported apiVersion values -var SupportedAPIVersions = []string{ - APIVersionV1Alpha1, -} - // ValidHTTPMethods defines allowed HTTP methods for API calls var ValidHTTPMethods = []string{"GET", "POST", "PUT", "PATCH", "DELETE"} @@ -102,23 +85,16 @@ func LoadConfig(opts ...LoadOption) (*Config, error) { } // 1. Load AdapterConfig with Viper (env/CLI overrides) - adapterCfg, err := loadAdapterConfigWithViperGeneric(o.adapterConfigPath, o.flags) + // resolvedAdapterConfigPath is the actual path used (may come from standardConfigPaths fallback) + resolvedAdapterConfigPath, adapterCfg, err := loadAdapterConfigWithViperGeneric(o.adapterConfigPath, o.flags) if err != nil { return nil, fmt.Errorf("failed to load adapter config: %w", err) } - // Get base directory from adapter config path for file references - adapterConfigPath := o.adapterConfigPath - if adapterConfigPath == "" { - adapterConfigPath = os.Getenv(EnvAdapterConfig) - } - adapterBaseDir := "" - if adapterConfigPath != "" { - var errBaseDir error - adapterBaseDir, errBaseDir = getBaseDir(adapterConfigPath) - if errBaseDir != nil { - return nil, fmt.Errorf("failed to get base directory for adapter config: %w", errBaseDir) - } + // Get base directory from the resolved config path for file references + adapterBaseDir, errBaseDir := getBaseDir(resolvedAdapterConfigPath) + if errBaseDir != nil { + return nil, fmt.Errorf("failed to get base directory for adapter config: %w", errBaseDir) } // Validate AdapterConfig structure @@ -193,9 +169,9 @@ func LoadConfig(opts ...LoadOption) (*Config, error) { // loadTaskConfigFileReferences loads content from file references into the task config func loadTaskConfigFileReferences(config *AdapterTaskConfig, baseDir string) error { - // Load manifest.ref in spec.resources - for i := range config.Spec.Resources { - resource := &config.Spec.Resources[i] + // Load manifest.ref in resources + for i := range config.Resources { + resource := &config.Resources[i] ref := resource.GetManifestRef() if ref == "" { continue @@ -203,21 +179,21 @@ func loadTaskConfigFileReferences(config *AdapterTaskConfig, baseDir string) err content, err := loadYAMLFile(baseDir, ref) if err != nil { - return fmt.Errorf("%s.%s[%d].%s.%s: %w", FieldSpec, FieldResources, i, FieldManifest, FieldRef, err) + return fmt.Errorf("%s[%d].%s.%s: %w", FieldResources, i, FieldManifest, FieldRef, err) } // Replace manifest with loaded content resource.Manifest = content } - // Load buildRef in spec.post.payloads - if config.Spec.Post != nil { - for i := range config.Spec.Post.Payloads { - payload := &config.Spec.Post.Payloads[i] + // Load buildRef in post.payloads + if config.Post != nil { + for i := range config.Post.Payloads { + payload := &config.Post.Payloads[i] if payload.BuildRef != "" { content, err := loadYAMLFile(baseDir, payload.BuildRef) if err != nil { - return fmt.Errorf("%s.%s.%s[%d].%s: %w", FieldSpec, FieldPost, FieldPayloads, i, FieldBuildRef, err) + return fmt.Errorf("%s.%s[%d].%s: %w", FieldPost, FieldPayloads, i, FieldBuildRef, err) } payload.BuildRefContent = content } diff --git a/internal/config_loader/loader_test.go b/internal/config_loader/loader_test.go index 1da20a3..e5a8f3b 100644 --- a/internal/config_loader/loader_test.go +++ b/internal/config_loader/loader_test.go @@ -31,53 +31,40 @@ func TestLoadConfig(t *testing.T) { tmpDir := t.TempDir() adapterYAML := ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: +adapter: name: deployment-config - namespace: hyperfleet-system -spec: - adapter: - version: "0.1.0" - clients: - hyperfleetApi: - baseUrl: "https://test.example.com" - timeout: 2s - retryAttempts: 3 - retryBackoff: exponential - kubernetes: - apiVersion: "v1" + version: "0.1.0" +clients: + hyperfleet_api: + base_url: "https://test.example.com" + timeout: 2s + retry_attempts: 3 + retry_backoff: exponential + kubernetes: + api_version: "v1" ` taskYAML := ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter - namespace: hyperfleet-system - labels: - hyperfleet.io/adapter-type: test -spec: - params: - - name: "clusterId" - source: "event.id" - type: "string" - required: true - preconditions: - - name: "clusterStatus" - apiCall: - method: "GET" - url: "https://api.example.com/clusters/{{ .clusterId }}" - resources: - - name: "testNamespace" - manifest: - apiVersion: v1 - kind: Namespace - metadata: - name: "test-ns" - discovery: - namespace: "*" - byName: "test-ns" +params: + - name: "clusterId" + source: "event.id" + type: "string" + required: true +preconditions: + - name: "clusterStatus" + api_call: + method: "GET" + url: "https://api.example.com/clusters/{{ .clusterId }}" +resources: + - name: "testNamespace" + manifest: + apiVersion: v1 + kind: Namespace + metadata: + name: "test-ns" + discovery: + namespace: "*" + by_name: "test-ns" ` adapterPath, taskPath := createTestConfigFiles(t, tmpDir, adapterYAML, taskYAML) @@ -92,34 +79,25 @@ spec: require.NotNil(t, config) // Verify merged config fields - assert.Equal(t, "hyperfleet.redhat.com/v1alpha1", config.APIVersion) - assert.Equal(t, "Config", config.Kind) - // Metadata comes from adapter config (takes precedence) - assert.Equal(t, "deployment-config", config.Metadata.Name) // Adapter info comes from adapter config - assert.Equal(t, "0.1.0", config.Spec.Adapter.Version) + assert.Equal(t, "deployment-config", config.Adapter.Name) + assert.Equal(t, "0.1.0", config.Adapter.Version) // Clients config comes from adapter config - assert.Equal(t, "https://test.example.com", config.Spec.Clients.HyperfleetAPI.BaseURL) - assert.Equal(t, 2*time.Second, config.Spec.Clients.HyperfleetAPI.Timeout) + assert.Equal(t, "https://test.example.com", config.Clients.HyperfleetAPI.BaseURL) + assert.Equal(t, 2*time.Second, config.Clients.HyperfleetAPI.Timeout) // Task fields come from task config - require.Len(t, config.Spec.Params, 1) - assert.Equal(t, "clusterId", config.Spec.Params[0].Name) - require.Len(t, config.Spec.Preconditions, 1) - assert.Equal(t, "clusterStatus", config.Spec.Preconditions[0].Name) - require.Len(t, config.Spec.Resources, 1) - assert.Equal(t, "testNamespace", config.Spec.Resources[0].Name) + require.Len(t, config.Params, 1) + assert.Equal(t, "clusterId", config.Params[0].Name) + require.Len(t, config.Preconditions, 1) + assert.Equal(t, "clusterStatus", config.Preconditions[0].Name) + require.Len(t, config.Resources, 1) + assert.Equal(t, "testNamespace", config.Resources[0].Name) } func TestLoadConfigMissingAdapterConfig(t *testing.T) { tmpDir := t.TempDir() taskPath := filepath.Join(tmpDir, "task-config.yaml") - err := os.WriteFile(taskPath, []byte(` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: {} -`), 0644) + err := os.WriteFile(taskPath, []byte(`{}`), 0644) require.NoError(t, err) config, err := LoadConfig( @@ -135,18 +113,14 @@ func TestLoadConfigMissingTaskConfig(t *testing.T) { tmpDir := t.TempDir() adapterPath := filepath.Join(tmpDir, "adapter-config.yaml") err := os.WriteFile(adapterPath, []byte(` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: +adapter: name: test-adapter -spec: - adapter: - version: "1.0.0" - clients: - hyperfleetApi: - timeout: 5s - kubernetes: - apiVersion: v1 + version: "1.0.0" +clients: + hyperfleet_api: + timeout: 5s + kubernetes: + api_version: v1 `), 0644) require.NoError(t, err) @@ -169,87 +143,39 @@ func TestAdapterConfigValidation(t *testing.T) { { name: "valid minimal adapter config", yaml: ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: +adapter: name: test-adapter -spec: - adapter: - version: "1.0.0" - clients: - hyperfleetApi: - timeout: 5s - kubernetes: - apiVersion: "v1" + version: "1.0.0" +clients: + hyperfleet_api: + timeout: 5s + kubernetes: + api_version: "v1" `, wantError: false, }, { - name: "missing apiVersion", - yaml: ` -kind: AdapterConfig -metadata: - name: test-adapter -spec: - adapter: - version: "1.0.0" -`, - wantError: true, - errorMsg: "apiVersion is required", - }, - { - name: "missing kind", + name: "missing adapter.name", yaml: ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -metadata: - name: test-adapter -spec: - adapter: - version: "1.0.0" -`, - wantError: true, - errorMsg: "kind is required", - }, - { - name: "missing metadata.name", - yaml: ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: - namespace: test -spec: - adapter: - version: "1.0.0" +adapter: + version: "1.0.0" +clients: + hyperfleet_api: + timeout: 5s `, wantError: true, - errorMsg: "metadata.name is required", + errorMsg: "name is required", }, { name: "missing adapter.version", yaml: ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: - name: test-adapter -spec: - adapter: {} -`, - wantError: true, - errorMsg: "spec.adapter.version is required", - }, - { - name: "unsupported apiVersion", - yaml: ` -apiVersion: hyperfleet.redhat.com/v2 -kind: AdapterConfig -metadata: +adapter: name: test-adapter -spec: - adapter: - version: "1.0.0" +clients: + hyperfleet_api: + timeout: 5s `, - wantError: true, - errorMsg: "unsupported apiVersion", + wantError: false, }, } @@ -282,78 +208,35 @@ func TestTaskConfigValidation(t *testing.T) { errorMsg string }{ { - name: "valid minimal task config", - yaml: ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: {} -`, + name: "valid minimal task config", + yaml: `{}`, wantError: false, }, { name: "valid task config with params", yaml: ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: - params: - - name: "clusterId" - source: "event.id" - required: true +params: + - name: "clusterId" + source: "event.id" + required: true `, wantError: false, }, - { - name: "missing apiVersion", - yaml: ` -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: {} -`, - wantError: true, - errorMsg: "apiVersion is required", - }, - { - name: "missing kind", - yaml: ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -metadata: - name: test-adapter -spec: {} -`, - wantError: true, - errorMsg: "kind is required", - }, { name: "parameter without name", yaml: ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: - params: - - source: "event.id" +params: + - source: "event.id" `, wantError: true, - errorMsg: "spec.params[0].name is required", + errorMsg: "params[0].name is required", }, { name: "parameter without source", yaml: ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: - params: - - name: "clusterId" - required: true +params: + - name: "clusterId" + required: true `, wantError: true, errorMsg: "source is required", @@ -391,61 +274,41 @@ func TestValidatePreconditionsInTaskConfig(t *testing.T) { { name: "valid precondition with API call", yaml: ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: - preconditions: - - name: "checkCluster" - apiCall: - method: "GET" - url: "https://api.example.com/clusters" +preconditions: + - name: "checkCluster" + api_call: + method: "GET" + url: "https://api.example.com/clusters" `, wantError: false, }, { name: "precondition without name", yaml: ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: - preconditions: - - apiCall: - method: "GET" - url: "https://api.example.com/clusters" +preconditions: + - api_call: + method: "GET" + url: "https://api.example.com/clusters" `, wantError: true, - errorMsg: "spec.preconditions[0].name is required", + errorMsg: "preconditions[0].name is required", }, { name: "precondition without apiCall or expression", yaml: ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: - preconditions: - - name: "checkCluster" +preconditions: + - name: "checkCluster" `, wantError: true, - errorMsg: "spec.preconditions[0]: must specify apiCall, conditions", + errorMsg: "preconditions[0]: must specify api_call, conditions", }, { name: "API call without method", yaml: ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: - preconditions: - - name: "checkCluster" - apiCall: - url: "https://api.example.com/clusters" +preconditions: + - name: "checkCluster" + api_call: + url: "https://api.example.com/clusters" `, wantError: true, errorMsg: "method is required", @@ -453,16 +316,11 @@ spec: { name: "API call with invalid method", yaml: ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: - preconditions: - - name: "checkCluster" - apiCall: - method: "INVALID" - url: "https://api.example.com/clusters" +preconditions: + - name: "checkCluster" + api_call: + method: "INVALID" + url: "https://api.example.com/clusters" `, wantError: true, errorMsg: "is invalid (allowed:", @@ -500,52 +358,37 @@ func TestValidateResourcesInTaskConfig(t *testing.T) { { name: "valid resource with manifest", yaml: ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: - resources: - - name: "testNamespace" - manifest: - apiVersion: v1 - kind: Namespace - metadata: - name: "test-ns" - discovery: - namespace: "*" - byName: "test-ns" +resources: + - name: "testNamespace" + manifest: + apiVersion: v1 + kind: Namespace + metadata: + name: "test-ns" + discovery: + namespace: "*" + by_name: "test-ns" `, wantError: false, }, { name: "resource without name", yaml: ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: - resources: - - manifest: - apiVersion: v1 - kind: Namespace +resources: + - manifest: + apiVersion: v1 + kind: Namespace `, wantError: true, - errorMsg: "spec.resources[0].name is required", + errorMsg: "resources[0].name is required", }, { name: "resource without manifest - kubernetes transport requires manifest in semantic validation", yaml: ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: - resources: - - name: "testNamespace" - discovery: - byName: "test-ns" +resources: + - name: "testNamespace" + discovery: + by_name: "test-ns" `, wantError: false, // Manifest is no longer structurally required (validated semantically based on transport type) }, @@ -574,75 +417,57 @@ spec: func TestMergeConfigs(t *testing.T) { adapterCfg := &AdapterConfig{ - APIVersion: "hyperfleet.redhat.com/v1alpha1", - Kind: "AdapterConfig", - Metadata: Metadata{ - Name: "adapter-deployment", + Adapter: AdapterInfo{ + Name: "adapter-deployment", + Version: "1.0.0", }, - Spec: AdapterConfigSpec{ - Adapter: AdapterInfo{ - Version: "1.0.0", + Clients: ClientsConfig{ + HyperfleetAPI: HyperfleetAPIConfig{ + BaseURL: "https://api.example.com", + Timeout: 5 * time.Second, + RetryAttempts: 3, }, - Clients: ClientsConfig{ - HyperfleetAPI: HyperfleetAPIConfig{ - BaseURL: "https://api.example.com", - Timeout: 5 * time.Second, - RetryAttempts: 3, - }, - Kubernetes: KubernetesConfig{ - APIVersion: "v1", - }, + Kubernetes: KubernetesConfig{ + APIVersion: "v1", }, }, } taskCfg := &AdapterTaskConfig{ - APIVersion: "hyperfleet.redhat.com/v1alpha1", - Kind: "AdapterTaskConfig", - Metadata: Metadata{ - Name: "task-processor", + Params: []Parameter{ + {Name: "clusterId", Source: "event.id", Required: true}, }, - Spec: AdapterTaskSpec{ - Params: []Parameter{ - {Name: "clusterId", Source: "event.id", Required: true}, - }, - Preconditions: []Precondition{ - {ActionBase: ActionBase{Name: "checkStatus"}}, - }, - Resources: []Resource{ - {Name: "namespace"}, - }, + Preconditions: []Precondition{ + {ActionBase: ActionBase{Name: "checkStatus"}}, + }, + Resources: []Resource{ + {Name: "namespace"}, }, } merged := Merge(adapterCfg, taskCfg) // Verify merged config - assert.Equal(t, "hyperfleet.redhat.com/v1alpha1", merged.APIVersion) - assert.Equal(t, "Config", merged.Kind) - // Metadata comes from adapter config - assert.Equal(t, "adapter-deployment", merged.Metadata.Name) // Adapter info from adapter config - assert.Equal(t, "1.0.0", merged.Spec.Adapter.Version) + assert.Equal(t, "adapter-deployment", merged.Adapter.Name) + assert.Equal(t, "1.0.0", merged.Adapter.Version) // Clients from adapter config - assert.Equal(t, "https://api.example.com", merged.Spec.Clients.HyperfleetAPI.BaseURL) - assert.Equal(t, 5*time.Second, merged.Spec.Clients.HyperfleetAPI.Timeout) + assert.Equal(t, "https://api.example.com", merged.Clients.HyperfleetAPI.BaseURL) + assert.Equal(t, 5*time.Second, merged.Clients.HyperfleetAPI.Timeout) // Task fields from task config - require.Len(t, merged.Spec.Params, 1) - assert.Equal(t, "clusterId", merged.Spec.Params[0].Name) - require.Len(t, merged.Spec.Preconditions, 1) - assert.Equal(t, "checkStatus", merged.Spec.Preconditions[0].Name) - require.Len(t, merged.Spec.Resources, 1) - assert.Equal(t, "namespace", merged.Spec.Resources[0].Name) + require.Len(t, merged.Params, 1) + assert.Equal(t, "clusterId", merged.Params[0].Name) + require.Len(t, merged.Preconditions, 1) + assert.Equal(t, "checkStatus", merged.Preconditions[0].Name) + require.Len(t, merged.Resources, 1) + assert.Equal(t, "namespace", merged.Resources[0].Name) } func TestGetRequiredParams(t *testing.T) { config := &Config{ - Spec: ConfigSpec{ - Params: []Parameter{ - {Name: "clusterId", Source: "event.id", Required: true}, - {Name: "optional", Source: "event.optional", Required: false}, - }, + Params: []Parameter{ + {Name: "clusterId", Source: "event.id", Required: true}, + {Name: "optional", Source: "event.optional", Required: false}, }, } @@ -653,11 +478,9 @@ func TestGetRequiredParams(t *testing.T) { func TestGetResourceByName(t *testing.T) { config := &Config{ - Spec: ConfigSpec{ - Resources: []Resource{ - {Name: "namespace1"}, - {Name: "namespace2"}, - }, + Resources: []Resource{ + {Name: "namespace1"}, + {Name: "namespace2"}, }, } @@ -671,11 +494,9 @@ func TestGetResourceByName(t *testing.T) { func TestGetPreconditionByName(t *testing.T) { config := &Config{ - Spec: ConfigSpec{ - Preconditions: []Precondition{ - {ActionBase: ActionBase{Name: "precond1"}}, - {ActionBase: ActionBase{Name: "precond2"}}, - }, + Preconditions: []Precondition{ + {ActionBase: ActionBase{Name: "precond1"}}, + {ActionBase: ActionBase{Name: "precond2"}}, }, } @@ -689,10 +510,9 @@ func TestGetPreconditionByName(t *testing.T) { func TestValidateAdapterVersion(t *testing.T) { config := &AdapterConfig{ - Spec: AdapterConfigSpec{ - Adapter: AdapterInfo{ - Version: "1.0.0", - }, + Adapter: AdapterInfo{ + Name: "test-adapter", + Version: "1.0.0", }, } @@ -728,16 +548,24 @@ func TestValidateAdapterVersion(t *testing.T) { err = ValidateAdapterVersion(config, "v0.0.0-dev") assert.NoError(t, err) + // Empty config version (not provided in adapter config - skip validation) + noVersionConfig := &AdapterConfig{ + Adapter: AdapterInfo{ + Name: "test-adapter", + }, + } + err = ValidateAdapterVersion(noVersionConfig, "1.0.0") + assert.NoError(t, err) + // Pre-release version with same major.minor - should pass err = ValidateAdapterVersion(config, "1.0.1-rc.1") assert.NoError(t, err) // Invalid config version invalidConfig := &AdapterConfig{ - Spec: AdapterConfigSpec{ - Adapter: AdapterInfo{ - Version: "not-a-version", - }, + Adapter: AdapterInfo{ + Name: "test-adapter", + Version: "not-a-version", }, } err = ValidateAdapterVersion(invalidConfig, "1.0.0") @@ -750,23 +578,6 @@ func TestValidateAdapterVersion(t *testing.T) { assert.Contains(t, err.Error(), "invalid expected adapter version") } -func TestIsSupportedAPIVersion(t *testing.T) { - // Supported version - assert.True(t, IsSupportedAPIVersion("hyperfleet.redhat.com/v1alpha1")) - - // Unsupported versions - assert.False(t, IsSupportedAPIVersion("hyperfleet.redhat.com/v1")) - assert.False(t, IsSupportedAPIVersion("hyperfleet.redhat.com/v2")) - assert.False(t, IsSupportedAPIVersion("other.io/v1alpha1")) - assert.False(t, IsSupportedAPIVersion("")) -} - -func TestSupportedAPIVersions(t *testing.T) { - // Verify the constant is in the supported list - assert.Contains(t, SupportedAPIVersions, APIVersionV1Alpha1) - assert.Equal(t, "hyperfleet.redhat.com/v1alpha1", APIVersionV1Alpha1) -} - func TestValidateFileReferencesInTaskConfig(t *testing.T) { // Create temporary directory with test files tmpDir := t.TempDir() @@ -787,14 +598,9 @@ func TestValidateFileReferencesInTaskConfig(t *testing.T) { { name: "valid payload buildRef", config: &AdapterTaskConfig{ - APIVersion: "hyperfleet.redhat.com/v1alpha1", - Kind: "AdapterTaskConfig", - Metadata: Metadata{Name: "test"}, - Spec: AdapterTaskSpec{ - Post: &PostConfig{ - Payloads: []Payload{ - {Name: "test", BuildRef: "templates/test-template.yaml"}, - }, + Post: &PostConfig{ + Payloads: []Payload{ + {Name: "test", BuildRef: "templates/test-template.yaml"}, }, }, }, @@ -804,14 +610,9 @@ func TestValidateFileReferencesInTaskConfig(t *testing.T) { { name: "invalid payload buildRef - file not found", config: &AdapterTaskConfig{ - APIVersion: "hyperfleet.redhat.com/v1alpha1", - Kind: "AdapterTaskConfig", - Metadata: Metadata{Name: "test"}, - Spec: AdapterTaskSpec{ - Post: &PostConfig{ - Payloads: []Payload{ - {Name: "test", BuildRef: "templates/nonexistent.yaml"}, - }, + Post: &PostConfig{ + Payloads: []Payload{ + {Name: "test", BuildRef: "templates/nonexistent.yaml"}, }, }, }, @@ -822,14 +623,9 @@ func TestValidateFileReferencesInTaskConfig(t *testing.T) { { name: "invalid payload buildRef - is a directory", config: &AdapterTaskConfig{ - APIVersion: "hyperfleet.redhat.com/v1alpha1", - Kind: "AdapterTaskConfig", - Metadata: Metadata{Name: "test"}, - Spec: AdapterTaskSpec{ - Post: &PostConfig{ - Payloads: []Payload{ - {Name: "test", BuildRef: "templates"}, - }, + Post: &PostConfig{ + Payloads: []Payload{ + {Name: "test", BuildRef: "templates"}, }, }, }, @@ -840,16 +636,11 @@ func TestValidateFileReferencesInTaskConfig(t *testing.T) { { name: "valid manifest.ref", config: &AdapterTaskConfig{ - APIVersion: "hyperfleet.redhat.com/v1alpha1", - Kind: "AdapterTaskConfig", - Metadata: Metadata{Name: "test"}, - Spec: AdapterTaskSpec{ - Resources: []Resource{ - { - Name: "test", - Manifest: map[string]interface{}{ - "ref": "templates/test-template.yaml", - }, + Resources: []Resource{ + { + Name: "test", + Manifest: map[string]interface{}{ + "ref": "templates/test-template.yaml", }, }, }, @@ -860,16 +651,11 @@ func TestValidateFileReferencesInTaskConfig(t *testing.T) { { name: "invalid manifest.ref - file not found", config: &AdapterTaskConfig{ - APIVersion: "hyperfleet.redhat.com/v1alpha1", - Kind: "AdapterTaskConfig", - Metadata: Metadata{Name: "test"}, - Spec: AdapterTaskSpec{ - Resources: []Resource{ - { - Name: "test", - Manifest: map[string]interface{}{ - "ref": "templates/nonexistent.yaml", - }, + Resources: []Resource{ + { + Name: "test", + Manifest: map[string]interface{}{ + "ref": "templates/nonexistent.yaml", }, }, }, @@ -881,15 +667,10 @@ func TestValidateFileReferencesInTaskConfig(t *testing.T) { { name: "valid multiple payloads with buildRef", config: &AdapterTaskConfig{ - APIVersion: "hyperfleet.redhat.com/v1alpha1", - Kind: "AdapterTaskConfig", - Metadata: Metadata{Name: "test"}, - Spec: AdapterTaskSpec{ - Post: &PostConfig{ - Payloads: []Payload{ - {Name: "payload1", BuildRef: "templates/test-template.yaml"}, - {Name: "payload2", BuildRef: "templates/test-template.yaml"}, - }, + Post: &PostConfig{ + Payloads: []Payload{ + {Name: "payload1", BuildRef: "templates/test-template.yaml"}, + {Name: "payload2", BuildRef: "templates/test-template.yaml"}, }, }, }, @@ -899,13 +680,8 @@ func TestValidateFileReferencesInTaskConfig(t *testing.T) { { name: "no file references - should pass", config: &AdapterTaskConfig{ - APIVersion: "hyperfleet.redhat.com/v1alpha1", - Kind: "AdapterTaskConfig", - Metadata: Metadata{Name: "test"}, - Spec: AdapterTaskSpec{ - Params: []Parameter{ - {Name: "test", Source: "event.test"}, - }, + Params: []Parameter{ + {Name: "test", Source: "event.test"}, }, }, baseDir: tmpDir, @@ -941,47 +717,38 @@ status: "{{ .status }}" // Create adapter config file adapterYAML := ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: +adapter: name: test-adapter -spec: - adapter: - version: "0.1.0" - clients: - hyperfleetApi: - baseUrl: "https://test.example.com" - timeout: 2s - kubernetes: - apiVersion: "v1" + version: "0.1.0" +clients: + hyperfleet_api: + base_url: "https://test.example.com" + timeout: 2s + kubernetes: + api_version: "v1" ` adapterPath := filepath.Join(tmpDir, "adapter-config.yaml") require.NoError(t, os.WriteFile(adapterPath, []byte(adapterYAML), 0644)) - // Create task config file with buildRef + // Create task config file with build_ref taskYAML := ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: - params: - - name: "clusterId" - source: "event.id" - resources: - - name: "testNamespace" - manifest: - apiVersion: v1 - kind: Namespace - metadata: - name: test - discovery: - namespace: "*" - byName: "test" - post: - payloads: - - name: "statusPayload" - buildRef: "templates/status-payload.yaml" +params: + - name: "clusterId" + source: "event.id" +resources: + - name: "testNamespace" + manifest: + apiVersion: v1 + kind: Namespace + metadata: + name: test + discovery: + namespace: "*" + by_name: "test" +post: + payloads: + - name: "statusPayload" + build_ref: "templates/status-payload.yaml" ` taskPath := filepath.Join(tmpDir, "task-config.yaml") require.NoError(t, os.WriteFile(taskPath, []byte(taskYAML), 0644)) @@ -994,37 +761,32 @@ spec: ) require.NoError(t, err) require.NotNil(t, config) - assert.Equal(t, "test-adapter", config.Metadata.Name) + assert.Equal(t, "test-adapter", config.Adapter.Name) // Verify buildRef content was loaded - require.NotNil(t, config.Spec.Post) - require.Len(t, config.Spec.Post.Payloads, 1) - assert.NotNil(t, config.Spec.Post.Payloads[0].BuildRefContent) + require.NotNil(t, config.Post) + require.Len(t, config.Post.Payloads, 1) + assert.NotNil(t, config.Post.Payloads[0].BuildRefContent) // Now test with non-existent buildRef taskYAMLBad := ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: - params: - - name: "clusterId" - source: "event.id" - resources: - - name: "testNamespace" - manifest: - apiVersion: v1 - kind: Namespace - metadata: - name: test - discovery: - namespace: "*" - byName: "test" - post: - payloads: - - name: "statusPayload" - buildRef: "templates/nonexistent.yaml" +params: + - name: "clusterId" + source: "event.id" +resources: + - name: "testNamespace" + manifest: + apiVersion: v1 + kind: Namespace + metadata: + name: test + discovery: + namespace: "*" + by_name: "test" +post: + payloads: + - name: "statusPayload" + build_ref: "templates/nonexistent.yaml" ` taskPathBad := filepath.Join(tmpDir, "task-config-bad.yaml") require.NoError(t, os.WriteFile(taskPathBad, []byte(taskYAMLBad), 0644)) @@ -1067,46 +829,37 @@ spec: // Create adapter config adapterYAML := ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: +adapter: name: test-adapter -spec: - adapter: - version: "0.1.0" - clients: - hyperfleetApi: - baseUrl: "https://test.example.com" - timeout: 2s - kubernetes: - apiVersion: "v1" + version: "0.1.0" +clients: + hyperfleet_api: + base_url: "https://test.example.com" + timeout: 2s + kubernetes: + api_version: "v1" ` adapterPath := filepath.Join(tmpDir, "adapter-config.yaml") require.NoError(t, os.WriteFile(adapterPath, []byte(adapterYAML), 0644)) - // Create task config file with both buildRef and manifest.ref + // Create task config file with both build_ref and manifest.ref taskYAML := ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: - params: - - name: "clusterId" - source: "event.id" - resources: - - name: "deployment" - manifest: - ref: "templates/deployment.yaml" - discovery: - namespace: "*" - bySelectors: - labelSelector: - app: "test" - post: - payloads: - - name: "statusPayload" - buildRef: "templates/status-payload.yaml" +params: + - name: "clusterId" + source: "event.id" +resources: + - name: "deployment" + manifest: + ref: "templates/deployment.yaml" + discovery: + namespace: "*" + by_selectors: + label_selector: + app: "test" +post: + payloads: + - name: "statusPayload" + build_ref: "templates/status-payload.yaml" ` taskPath := filepath.Join(tmpDir, "task-config.yaml") require.NoError(t, os.WriteFile(taskPath, []byte(taskYAML), 0644)) @@ -1121,8 +874,8 @@ spec: require.NotNil(t, config) // Verify manifest.ref was loaded and replaced - require.Len(t, config.Spec.Resources, 1) - manifest, ok := config.Spec.Resources[0].Manifest.(map[string]interface{}) + require.Len(t, config.Resources, 1) + manifest, ok := config.Resources[0].Manifest.(map[string]interface{}) require.True(t, ok, "Manifest should be a map after loading ref") assert.Equal(t, "apps/v1", manifest["apiVersion"]) assert.Equal(t, "Deployment", manifest["kind"]) @@ -1131,25 +884,20 @@ spec: assert.False(t, hasRef, "ref should be replaced with actual content") // Verify buildRef content was loaded into BuildRefContent - require.NotNil(t, config.Spec.Post) - require.Len(t, config.Spec.Post.Payloads, 1) - assert.NotNil(t, config.Spec.Post.Payloads[0].BuildRefContent) - assert.Equal(t, "{{ .status }}", config.Spec.Post.Payloads[0].BuildRefContent["status"]) - assert.Equal(t, "Operation completed", config.Spec.Post.Payloads[0].BuildRefContent["message"]) + require.NotNil(t, config.Post) + require.Len(t, config.Post.Payloads, 1) + assert.NotNil(t, config.Post.Payloads[0].BuildRefContent) + assert.Equal(t, "{{ .status }}", config.Post.Payloads[0].BuildRefContent["status"]) + assert.Equal(t, "Operation completed", config.Post.Payloads[0].BuildRefContent["message"]) // Original BuildRef path should still be preserved - assert.Equal(t, "templates/status-payload.yaml", config.Spec.Post.Payloads[0].BuildRef) + assert.Equal(t, "templates/status-payload.yaml", config.Post.Payloads[0].BuildRef) } func TestValidateResourceDiscoveryInTaskConfig(t *testing.T) { // Helper to create a valid task config with given resources configWithResources := func(resources []Resource) *AdapterTaskConfig { return &AdapterTaskConfig{ - APIVersion: "hyperfleet.redhat.com/v1alpha1", - Kind: "AdapterTaskConfig", - Metadata: Metadata{Name: "test-adapter"}, - Spec: AdapterTaskSpec{ - Resources: resources, - }, + Resources: resources, } } @@ -1260,7 +1008,7 @@ func TestValidateResourceDiscoveryInTaskConfig(t *testing.T) { }, }, wantErr: true, - errMsg: "spec.resources[0].discovery: must have either 'byName' or 'bySelectors' set", + errMsg: "resources[0].discovery: must have either 'by_name' or 'by_selectors' set", }, { name: "invalid - bySelectors without labelSelector defined", @@ -1277,7 +1025,7 @@ func TestValidateResourceDiscoveryInTaskConfig(t *testing.T) { }, }, wantErr: true, - errMsg: "spec.resources[0].discovery.bySelectors.labelSelector is required", + errMsg: "resources[0].discovery.by_selectors.label_selector is required", }, } @@ -1376,22 +1124,17 @@ func TestTransportConfigYAMLParsing(t *testing.T) { { name: "resource with kubernetes transport", yaml: ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: - resources: - - name: "testResource" - transport: - client: "kubernetes" - manifest: - apiVersion: v1 - kind: Namespace - metadata: - name: "test-ns" - discovery: - byName: "test-ns" +resources: + - name: "testResource" + transport: + client: "kubernetes" + manifest: + apiVersion: v1 + kind: Namespace + metadata: + name: "test-ns" + discovery: + by_name: "test-ns" `, wantError: false, wantClient: "kubernetes", @@ -1400,24 +1143,19 @@ spec: { name: "resource with maestro transport", yaml: ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: - resources: - - name: "testResource" - transport: - client: "maestro" - maestro: - targetCluster: "cluster1" - manifestWork: - apiVersion: work.open-cluster-management.io/v1 - kind: ManifestWork - metadata: - name: "test-mw" - discovery: - byName: "test-mw" +resources: + - name: "testResource" + transport: + client: "maestro" + maestro: + target_cluster: "cluster1" + manifestWork: + apiVersion: work.open-cluster-management.io/v1 + kind: ManifestWork + metadata: + name: "test-mw" + discovery: + by_name: "test-mw" `, wantError: false, wantClient: "maestro", @@ -1427,21 +1165,16 @@ spec: { name: "resource with maestro transport and manifestWork ref", yaml: ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: - resources: - - name: "testResource" - transport: - client: "maestro" - maestro: - targetCluster: "{{ .clusterName }}" - manifestWork: - ref: "/path/to/manifestwork.yaml" - discovery: - byName: "test-mw" +resources: + - name: "testResource" + transport: + client: "maestro" + maestro: + target_cluster: "{{ .clusterName }}" + manifestWork: + ref: "/path/to/manifestwork.yaml" + discovery: + by_name: "test-mw" `, wantError: false, wantClient: "maestro", @@ -1451,20 +1184,15 @@ spec: { name: "resource without transport (defaults to kubernetes)", yaml: ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: - resources: - - name: "testResource" - manifest: - apiVersion: v1 - kind: Namespace - metadata: - name: "test-ns" - discovery: - byName: "test-ns" +resources: + - name: "testResource" + manifest: + apiVersion: v1 + kind: Namespace + metadata: + name: "test-ns" + discovery: + by_name: "test-ns" `, wantError: false, wantClient: "kubernetes", @@ -1482,9 +1210,9 @@ spec: return } require.NoError(t, err) - require.Len(t, config.Spec.Resources, 1) + require.Len(t, config.Resources, 1) - resource := config.Spec.Resources[0] + resource := config.Resources[0] assert.Equal(t, tt.wantClient, resource.GetTransportClient()) if tt.wantMaestroNil { @@ -1581,42 +1309,33 @@ spec: `), 0644)) adapterYAML := ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: +adapter: name: test-adapter -spec: - adapter: - version: "0.1.0" - clients: - hyperfleetApi: - baseUrl: "https://test.example.com" - timeout: 2s - kubernetes: - apiVersion: "v1" + version: "0.1.0" +clients: + hyperfleet_api: + base_url: "https://test.example.com" + timeout: 2s + kubernetes: + api_version: "v1" ` taskYAML := ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: - params: - - name: "clusterName" - source: "event.name" - resources: - - name: "testManifestWork" - transport: - client: "maestro" - maestro: - targetCluster: "{{ .clusterName }}" - manifest: - ref: "manifestwork.yaml" - discovery: - bySelectors: - labelSelector: - app: "test" +params: + - name: "clusterName" + source: "event.name" +resources: + - name: "testManifestWork" + transport: + client: "maestro" + maestro: + target_cluster: "{{ .clusterName }}" + manifest: + ref: "manifestwork.yaml" + discovery: + by_selectors: + label_selector: + app: "test" ` adapterPath, taskPath := createTestConfigFiles(t, tmpDir, adapterYAML, taskYAML) @@ -1630,8 +1349,8 @@ spec: require.NotNil(t, config) // Verify manifest ref was loaded and replaced with ManifestWork content - require.Len(t, config.Spec.Resources, 1) - resource := config.Spec.Resources[0] + require.Len(t, config.Resources, 1) + resource := config.Resources[0] mw, ok := resource.Manifest.(map[string]interface{}) require.True(t, ok, "Manifest should be a map after loading ref") @@ -1647,39 +1366,30 @@ func TestLoadConfigWithManifestWorkRefNotFound(t *testing.T) { tmpDir := t.TempDir() adapterYAML := ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: +adapter: name: test-adapter -spec: - adapter: - version: "0.1.0" - clients: - hyperfleetApi: - baseUrl: "https://test.example.com" - timeout: 2s - kubernetes: - apiVersion: "v1" + version: "0.1.0" +clients: + hyperfleet_api: + base_url: "https://test.example.com" + timeout: 2s + kubernetes: + api_version: "v1" ` taskYAML := ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: - resources: - - name: "testManifestWork" - transport: - client: "maestro" - maestro: - targetCluster: "cluster1" - manifest: - ref: "nonexistent-manifestwork.yaml" - discovery: - bySelectors: - labelSelector: - app: "test" +resources: + - name: "testManifestWork" + transport: + client: "maestro" + maestro: + target_cluster: "cluster1" + manifest: + ref: "nonexistent-manifestwork.yaml" + discovery: + by_selectors: + label_selector: + app: "test" ` adapterPath, taskPath := createTestConfigFiles(t, tmpDir, adapterYAML, taskYAML) @@ -1698,48 +1408,39 @@ func TestLoadConfigWithInlineManifestWork(t *testing.T) { tmpDir := t.TempDir() adapterYAML := ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: +adapter: name: test-adapter -spec: - adapter: - version: "0.1.0" - clients: - hyperfleetApi: - baseUrl: "https://test.example.com" - timeout: 2s - kubernetes: - apiVersion: "v1" + version: "0.1.0" +clients: + hyperfleet_api: + base_url: "https://test.example.com" + timeout: 2s + kubernetes: + api_version: "v1" ` taskYAML := ` -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: test-adapter -spec: - params: - - name: "clusterName" - source: "event.name" - resources: - - name: "testManifestWork" - transport: - client: "maestro" - maestro: - targetCluster: "{{ .clusterName }}" - manifest: - apiVersion: work.open-cluster-management.io/v1 - kind: ManifestWork - metadata: - name: "inline-mw" - spec: - workload: - manifests: [] - discovery: - bySelectors: - labelSelector: - app: "test" +params: + - name: "clusterName" + source: "event.name" +resources: + - name: "testManifestWork" + transport: + client: "maestro" + maestro: + target_cluster: "{{ .clusterName }}" + manifest: + apiVersion: work.open-cluster-management.io/v1 + kind: ManifestWork + metadata: + name: "inline-mw" + spec: + workload: + manifests: [] + discovery: + by_selectors: + label_selector: + app: "test" ` adapterPath, taskPath := createTestConfigFiles(t, tmpDir, adapterYAML, taskYAML) @@ -1753,8 +1454,8 @@ spec: require.NotNil(t, config) // Verify inline manifest (ManifestWork) is preserved as-is - require.Len(t, config.Spec.Resources, 1) - resource := config.Spec.Resources[0] + require.Len(t, config.Resources, 1) + resource := config.Resources[0] mw, ok := resource.Manifest.(map[string]interface{}) require.True(t, ok, "Manifest should be a map") diff --git a/internal/config_loader/types.go b/internal/config_loader/types.go index a0a482a..f1adcc2 100644 --- a/internal/config_loader/types.go +++ b/internal/config_loader/types.go @@ -11,44 +11,17 @@ import ( // Config is the unified configuration passed throughout the application. // Created by merging AdapterConfig (deployment) and AdapterTaskConfig (task). type Config struct { - APIVersion string `yaml:"apiVersion"` - Kind string `yaml:"kind"` - Metadata Metadata `yaml:"metadata"` - Spec ConfigSpec `yaml:"spec"` -} - -// ConfigSpec contains the merged specification from both deployment and task configs -type ConfigSpec struct { - // From AdapterConfig (deployment) - Adapter AdapterInfo `yaml:"adapter"` - Clients ClientsConfig `yaml:"clients"` - DebugConfig bool `yaml:"debugConfig,omitempty"` - - // From AdapterTaskConfig (business logic) + Adapter AdapterInfo `yaml:"adapter"` + Clients ClientsConfig `yaml:"clients"` + DebugConfig bool `yaml:"debug_config,omitempty"` + Log LogConfig `yaml:"log,omitempty"` Params []Parameter `yaml:"params,omitempty"` Preconditions []Precondition `yaml:"preconditions,omitempty"` Resources []Resource `yaml:"resources,omitempty"` Post *PostConfig `yaml:"post,omitempty"` } -// GetParams returns the parameters from the config spec -func (c *Config) GetParams() []Parameter { - if c == nil { - return nil - } - return c.Spec.Params -} - -// GetMetadata returns the metadata from the config -func (c *Config) GetMetadata() Metadata { - if c == nil { - return Metadata{} - } - return c.Metadata -} - // Merge combines AdapterConfig (deployment) and AdapterTaskConfig (task) into a unified Config. -// The metadata is taken from the adapter config since it takes precedence. // The adapter info and clients come from the deployment config. // The params, preconditions, resources, and post-processing come from the task config. func Merge(adapterCfg *AdapterConfig, taskCfg *AdapterTaskConfig) *Config { @@ -57,23 +30,54 @@ func Merge(adapterCfg *AdapterConfig, taskCfg *AdapterTaskConfig) *Config { } return &Config{ - APIVersion: adapterCfg.APIVersion, - Kind: ExpectedKindConfig, - Metadata: adapterCfg.Metadata, // Adapter config takes precedence - Spec: ConfigSpec{ - // From deployment config - Adapter: adapterCfg.Spec.Adapter, - Clients: adapterCfg.Spec.Clients, - DebugConfig: adapterCfg.Spec.DebugConfig, - // From task config - Params: taskCfg.Spec.Params, - Preconditions: taskCfg.Spec.Preconditions, - Resources: taskCfg.Spec.Resources, - Post: taskCfg.Spec.Post, - }, + Adapter: adapterCfg.Adapter, + Clients: adapterCfg.Clients, + DebugConfig: adapterCfg.DebugConfig, + Log: adapterCfg.Log, + Params: taskCfg.Params, + Preconditions: taskCfg.Preconditions, + Resources: taskCfg.Resources, + Post: taskCfg.Post, } } +const redactedValue = "**REDACTED**" + +// Redacted returns a copy of Config with sensitive fields replaced by redactedValue. +func (c *Config) Redacted() *Config { + if c == nil { + return nil + } + copy := *c + copy.Clients = redactedClients(c.Clients) + return © +} + +func redactedClients(clients ClientsConfig) ClientsConfig { + copy := clients + if clients.Maestro != nil { + maestroCopy := *clients.Maestro + if maestroCopy.Auth.TLSConfig != nil { + tlsCopy := *maestroCopy.Auth.TLSConfig + if tlsCopy.CAFile != "" { + tlsCopy.CAFile = redactedValue + } + if tlsCopy.CertFile != "" { + tlsCopy.CertFile = redactedValue + } + if tlsCopy.KeyFile != "" { + tlsCopy.KeyFile = redactedValue + } + if tlsCopy.HTTPCAFile != "" { + tlsCopy.HTTPCAFile = redactedValue + } + maestroCopy.Auth.TLSConfig = &tlsCopy + } + copy.Maestro = &maestroCopy + } + return copy +} + // FieldExpressionDef represents a common pattern for value extraction. // Used when a value should be computed via field extraction (JSONPath) or CEL expression. // Only one of Field or Expression should be set. @@ -132,15 +136,18 @@ func ParseValueDef(v any) (*ValueDef, bool) { return &valueDef, true } -// Metadata contains the adapter metadata -type Metadata struct { - Name string `yaml:"name" mapstructure:"name" validate:"required"` - Labels map[string]string `yaml:"labels,omitempty" mapstructure:"labels"` -} - // AdapterInfo contains basic adapter information type AdapterInfo struct { - Version string `yaml:"version" mapstructure:"version" validate:"required"` + Name string `yaml:"name" mapstructure:"name" validate:"required"` + Version string `yaml:"version,omitempty" mapstructure:"version"` +} + +// LogConfig contains logging configuration. +// Priority (lowest to highest): config file < LOG_LEVEL env < --log-level flag +type LogConfig struct { + Level string `yaml:"level,omitempty" mapstructure:"level"` + Format string `yaml:"format,omitempty" mapstructure:"format"` + Output string `yaml:"output,omitempty" mapstructure:"output"` } // HyperfleetAPIConfig is the HyperFleet API client configuration. @@ -149,15 +156,15 @@ type HyperfleetAPIConfig = hyperfleet_api.ClientConfig // BrokerConfig contains broker consumer configuration type BrokerConfig struct { - SubscriptionID string `yaml:"subscriptionId,omitempty" mapstructure:"subscriptionId"` + SubscriptionID string `yaml:"subscription_id,omitempty" mapstructure:"subscription_id"` Topic string `yaml:"topic,omitempty" mapstructure:"topic"` } // KubernetesConfig contains Kubernetes configuration type KubernetesConfig struct { - APIVersion string `yaml:"apiVersion" mapstructure:"apiVersion"` + APIVersion string `yaml:"api_version" mapstructure:"api_version"` // KubeConfigPath is the path to a kubeconfig file. Empty means in-cluster auth. - KubeConfigPath string `yaml:"kubeConfigPath,omitempty" mapstructure:"kubeConfigPath"` + KubeConfigPath string `yaml:"kube_config_path,omitempty" mapstructure:"kube_config_path"` // QPS is the client-side rate limit. Zero uses defaults. QPS float32 `yaml:"qps,omitempty" mapstructure:"qps"` // Burst is the client-side burst rate. Zero uses defaults. @@ -190,7 +197,7 @@ type Payload struct { Build interface{} `yaml:"build,omitempty" validate:"required_without=BuildRef,excluded_with=BuildRef"` // BuildRef references an external YAML file containing the build definition. // Mutually exclusive with Build. - BuildRef string `yaml:"buildRef,omitempty" validate:"required_without=Build,excluded_with=Build"` + BuildRef string `yaml:"build_ref,omitempty" validate:"required_without=Build,excluded_with=Build"` // BuildRefContent holds the loaded content from BuildRef file (populated by loader) BuildRefContent map[string]interface{} `yaml:"-"` } @@ -201,10 +208,10 @@ func (p *Payload) Validate() error { hasBuildRef := p.BuildRef != "" if !hasBuild && !hasBuildRef { - return fmt.Errorf("either 'build' or 'buildRef' must be set") + return fmt.Errorf("either 'build' or 'build_ref' must be set") } if hasBuild && hasBuildRef { - return fmt.Errorf("'build' and 'buildRef' are mutually exclusive") + return fmt.Errorf("'build' and 'build_ref' are mutually exclusive") } return nil } @@ -213,7 +220,7 @@ func (p *Payload) Validate() error { // Used by Precondition and PostAction to reduce duplication. type ActionBase struct { Name string `yaml:"name" validate:"required"` - APICall *APICall `yaml:"apiCall,omitempty" validate:"omitempty"` + APICall *APICall `yaml:"api_call,omitempty" validate:"omitempty"` Log *LogAction `yaml:"log,omitempty"` } @@ -231,8 +238,8 @@ type APICall struct { Method string `yaml:"method" validate:"required,oneof=GET POST PUT PATCH DELETE"` URL string `yaml:"url" validate:"required"` Timeout string `yaml:"timeout,omitempty"` - RetryAttempts int `yaml:"retryAttempts,omitempty"` - RetryBackoff string `yaml:"retryBackoff,omitempty"` + RetryAttempts int `yaml:"retry_attempts,omitempty"` + RetryBackoff string `yaml:"retry_backoff,omitempty"` Headers []Header `yaml:"headers,omitempty"` Body string `yaml:"body,omitempty"` } @@ -304,7 +311,7 @@ type TransportConfig struct { // MaestroTransportConfig contains maestro-specific transport settings type MaestroTransportConfig struct { // TargetCluster is the name of the target cluster (consumer) for ManifestWork delivery - TargetCluster string `yaml:"targetCluster" validate:"required"` + TargetCluster string `yaml:"target_cluster" validate:"required"` } // Resource represents a resource configuration. @@ -315,11 +322,11 @@ type Resource struct { Name string `yaml:"name" validate:"required,resourcename"` Transport *TransportConfig `yaml:"transport,omitempty"` Manifest interface{} `yaml:"manifest,omitempty"` - RecreateOnChange bool `yaml:"recreateOnChange,omitempty"` + RecreateOnChange bool `yaml:"recreate_on_change,omitempty"` Discovery *DiscoveryConfig `yaml:"discovery,omitempty" validate:"required"` // NestedDiscoveries defines how to discover individual sub-resources within the applied manifest. // For example, discovering resources inside a ManifestWork's workload. - NestedDiscoveries []NestedDiscovery `yaml:"nestedDiscoveries,omitempty" validate:"dive"` + NestedDiscoveries []NestedDiscovery `yaml:"nested_discoveries,omitempty" validate:"dive"` } // NestedDiscovery defines a named discovery for a sub-resource within the parent manifest. @@ -331,19 +338,19 @@ type NestedDiscovery struct { // DiscoveryConfig represents resource discovery configuration type DiscoveryConfig struct { Namespace string `yaml:"namespace,omitempty"` - ByName string `yaml:"byName,omitempty" validate:"required_without=BySelectors"` - BySelectors *SelectorConfig `yaml:"bySelectors,omitempty" validate:"required_without=ByName,omitempty"` + ByName string `yaml:"by_name,omitempty" validate:"required_without=BySelectors"` + BySelectors *SelectorConfig `yaml:"by_selectors,omitempty" validate:"required_without=ByName,omitempty"` } // SelectorConfig represents label selector configuration type SelectorConfig struct { - LabelSelector map[string]string `yaml:"labelSelector,omitempty" validate:"required,min=1"` + LabelSelector map[string]string `yaml:"label_selector,omitempty" validate:"required,min=1"` } // PostConfig represents post-processing configuration type PostConfig struct { Payloads []Payload `yaml:"payloads,omitempty" validate:"dive"` - PostActions []PostAction `yaml:"postActions,omitempty" validate:"dive"` + PostActions []PostAction `yaml:"post_actions,omitempty" validate:"dive"` } // PostAction represents a post-processing action @@ -424,37 +431,30 @@ func (ve *ValidationErrors) HasErrors() bool { // Contains infrastructure settings that can be overridden via environment variables // and CLI flags using Viper. type AdapterConfig struct { - APIVersion string `yaml:"apiVersion" mapstructure:"apiVersion" validate:"required"` - Kind string `yaml:"kind" mapstructure:"kind" validate:"required,eq=AdapterConfig"` - Metadata Metadata `yaml:"metadata" mapstructure:"metadata"` - Spec AdapterConfigSpec `yaml:"spec" mapstructure:"spec"` -} - -// AdapterConfigSpec contains the deployment specification -type AdapterConfigSpec struct { Adapter AdapterInfo `yaml:"adapter" mapstructure:"adapter"` Clients ClientsConfig `yaml:"clients" mapstructure:"clients"` - DebugConfig bool `yaml:"debugConfig,omitempty" mapstructure:"debugConfig"` + DebugConfig bool `yaml:"debug_config,omitempty" mapstructure:"debug_config"` + Log LogConfig `yaml:"log,omitempty" mapstructure:"log"` } // ClientsConfig contains configuration for all external clients type ClientsConfig struct { Maestro *MaestroClientConfig `yaml:"maestro,omitempty" mapstructure:"maestro"` - HyperfleetAPI HyperfleetAPIConfig `yaml:"hyperfleetApi" mapstructure:"hyperfleetApi"` + HyperfleetAPI HyperfleetAPIConfig `yaml:"hyperfleet_api" mapstructure:"hyperfleet_api"` Broker BrokerConfig `yaml:"broker,omitempty" mapstructure:"broker"` Kubernetes KubernetesConfig `yaml:"kubernetes" mapstructure:"kubernetes"` } // MaestroClientConfig contains Maestro client configuration type MaestroClientConfig struct { - GRPCServerAddress string `yaml:"grpcServerAddress" mapstructure:"grpcServerAddress"` - HTTPServerAddress string `yaml:"httpServerAddress" mapstructure:"httpServerAddress"` - SourceID string `yaml:"sourceId" mapstructure:"sourceId"` - ClientID string `yaml:"clientId" mapstructure:"clientId"` + GRPCServerAddress string `yaml:"grpc_server_address" mapstructure:"grpc_server_address"` + HTTPServerAddress string `yaml:"http_server_address" mapstructure:"http_server_address"` + SourceID string `yaml:"source_id" mapstructure:"source_id"` + ClientID string `yaml:"client_id" mapstructure:"client_id"` Auth MaestroAuthConfig `yaml:"auth" mapstructure:"auth"` Timeout string `yaml:"timeout" mapstructure:"timeout"` - ServerHealthinessTimeout string `yaml:"serverHealthinessTimeout,omitempty" mapstructure:"serverHealthinessTimeout"` - RetryAttempts int `yaml:"retryAttempts" mapstructure:"retryAttempts"` + ServerHealthinessTimeout string `yaml:"server_healthiness_timeout,omitempty" mapstructure:"server_healthiness_timeout"` + RetryAttempts int `yaml:"retry_attempts" mapstructure:"retry_attempts"` Keepalive *KeepaliveConfig `yaml:"keepalive,omitempty" mapstructure:"keepalive"` Insecure bool `yaml:"insecure,omitempty" mapstructure:"insecure"` } @@ -462,15 +462,15 @@ type MaestroClientConfig struct { // MaestroAuthConfig contains authentication configuration for Maestro type MaestroAuthConfig struct { Type string `yaml:"type" mapstructure:"type"` // "tls" or "none" - TLSConfig *TLSConfig `yaml:"tlsConfig,omitempty" mapstructure:"tlsConfig"` + TLSConfig *TLSConfig `yaml:"tls_config,omitempty" mapstructure:"tls_config"` } // TLSConfig contains TLS certificate configuration type TLSConfig struct { - CAFile string `yaml:"caFile" mapstructure:"caFile"` - CertFile string `yaml:"certFile" mapstructure:"certFile"` - KeyFile string `yaml:"keyFile" mapstructure:"keyFile"` - HTTPCAFile string `yaml:"httpCaFile,omitempty" mapstructure:"httpCaFile"` + CAFile string `yaml:"ca_file" mapstructure:"ca_file"` + CertFile string `yaml:"cert_file" mapstructure:"cert_file"` + KeyFile string `yaml:"key_file" mapstructure:"key_file"` + HTTPCAFile string `yaml:"http_ca_file,omitempty" mapstructure:"http_ca_file"` } // KeepaliveConfig contains gRPC keepalive configuration @@ -483,14 +483,6 @@ type KeepaliveConfig struct { // Contains params, preconditions, resources, and post-processing actions. // This config is loaded from YAML without environment variable overrides. type AdapterTaskConfig struct { - APIVersion string `yaml:"apiVersion" validate:"required"` - Kind string `yaml:"kind" validate:"required,eq=AdapterTaskConfig"` - Metadata Metadata `yaml:"metadata"` - Spec AdapterTaskSpec `yaml:"spec"` -} - -// AdapterTaskSpec contains the task specification -type AdapterTaskSpec struct { Params []Parameter `yaml:"params,omitempty" validate:"dive"` Preconditions []Precondition `yaml:"preconditions,omitempty" validate:"dive"` Resources []Resource `yaml:"resources,omitempty" validate:"unique=Name,dive"` diff --git a/internal/config_loader/validator.go b/internal/config_loader/validator.go index e93987b..24e4a81 100644 --- a/internal/config_loader/validator.go +++ b/internal/config_loader/validator.go @@ -42,17 +42,11 @@ func (v *AdapterConfigValidator) ValidateStructure() error { return fmt.Errorf("adapter config is nil") } - // Phase 1: Struct tag validation + // Struct tag validation if errs := ValidateStruct(v.config); errs != nil && errs.HasErrors() { return fmt.Errorf("%s", errs.First()) } - // Phase 2: API version validation - if !IsSupportedAPIVersion(v.config.APIVersion) { - return fmt.Errorf("unsupported apiVersion %q (supported: %s)", - v.config.APIVersion, strings.Join(SupportedAPIVersions, ", ")) - } - return nil } @@ -80,17 +74,11 @@ func (v *TaskConfigValidator) ValidateStructure() error { return fmt.Errorf("task config is nil") } - // Phase 1: Struct tag validation + // Struct tag validation if errs := ValidateStruct(v.config); errs != nil && errs.HasErrors() { return fmt.Errorf("%s", errs.First()) } - // Phase 2: API version validation - if !IsSupportedAPIVersion(v.config.APIVersion) { - return fmt.Errorf("unsupported apiVersion %q (supported: %s)", - v.config.APIVersion, strings.Join(SupportedAPIVersions, ", ")) - } - return nil } @@ -102,11 +90,11 @@ func (v *TaskConfigValidator) ValidateFileReferences() error { var errors []string - // Validate buildRef in spec.post.payloads - if v.config.Spec.Post != nil { - for i, payload := range v.config.Spec.Post.Payloads { + // Validate build_ref in post.payloads + if v.config.Post != nil { + for i, payload := range v.config.Post.Payloads { if payload.BuildRef != "" { - path := fmt.Sprintf("%s.%s.%s[%d].%s", FieldSpec, FieldPost, FieldPayloads, i, FieldBuildRef) + path := fmt.Sprintf("%s.%s[%d].%s", FieldPost, FieldPayloads, i, FieldBuildRef) if err := v.validateFileExists(payload.BuildRef, path); err != nil { errors = append(errors, err.Error()) } @@ -114,11 +102,11 @@ func (v *TaskConfigValidator) ValidateFileReferences() error { } } - // Validate manifest.ref in spec.resources - for i, resource := range v.config.Spec.Resources { + // Validate manifest.ref in resources + for i, resource := range v.config.Resources { ref := resource.GetManifestRef() if ref != "" { - path := fmt.Sprintf("%s.%s[%d].%s.%s", FieldSpec, FieldResources, i, FieldManifest, FieldRef) + path := fmt.Sprintf("%s[%d].%s.%s", FieldResources, i, FieldManifest, FieldRef) if err := v.validateFileExists(ref, path); err != nil { errors = append(errors, err.Error()) } @@ -199,15 +187,15 @@ func (c *AdapterTaskConfig) GetDefinedVariables() map[string]bool { vars[b] = true } - // Parameters from spec.params - for _, p := range c.Spec.Params { + // Parameters from params + for _, p := range c.Params { if p.Name != "" { vars[p.Name] = true } } // Variables from precondition captures - for _, precond := range c.Spec.Preconditions { + for _, precond := range c.Preconditions { for _, capture := range precond.Capture { if capture.Name != "" { vars[capture.Name] = true @@ -216,8 +204,8 @@ func (c *AdapterTaskConfig) GetDefinedVariables() map[string]bool { } // Post payloads - if c.Spec.Post != nil { - for _, p := range c.Spec.Post.Payloads { + if c.Post != nil { + for _, p := range c.Post.Payloads { if p.Name != "" { vars[p.Name] = true } @@ -225,7 +213,7 @@ func (c *AdapterTaskConfig) GetDefinedVariables() map[string]bool { } // Resource aliases - for _, r := range c.Spec.Resources { + for _, r := range c.Resources { if r.Name != "" { vars[FieldResources+"."+r.Name] = true } @@ -271,8 +259,8 @@ func (v *TaskConfigValidator) initCELEnv() error { } func (v *TaskConfigValidator) validateTransportConfig() { - for i, resource := range v.config.Spec.Resources { - basePath := fmt.Sprintf("%s.%s[%d]", FieldSpec, FieldResources, i) + for i, resource := range v.config.Resources { + basePath := fmt.Sprintf("%s[%d]", FieldResources, i) if resource.Transport != nil { transportPath := basePath + "." + FieldTransport @@ -296,12 +284,12 @@ func (v *TaskConfigValidator) validateTransportConfig() { maestroPath := transportPath + "." + TransportClientMaestro - // Validate targetCluster is set + // Validate target_cluster is set if resource.Transport.Maestro.TargetCluster == "" { v.errors.Add(maestroPath+"."+FieldTargetCluster, - "targetCluster is required for maestro transport") + "target_cluster is required for maestro transport") } else { - // Validate template variables in targetCluster + // Validate template variables in target_cluster v.validateTemplateString(resource.Transport.Maestro.TargetCluster, maestroPath+"."+FieldTargetCluster) } @@ -323,9 +311,9 @@ func (v *TaskConfigValidator) validateTransportConfig() { } func (v *TaskConfigValidator) validateConditionValues() { - for i, precond := range v.config.Spec.Preconditions { + for i, precond := range v.config.Preconditions { for j, cond := range precond.Conditions { - path := fmt.Sprintf("%s.%s[%d].%s[%d]", FieldSpec, FieldPreconditions, i, FieldConditions, j) + path := fmt.Sprintf("%s[%d].%s[%d]", FieldPreconditions, i, FieldConditions, j) v.validateConditionValue(cond.Operator, cond.Value, path) } } @@ -354,10 +342,10 @@ func (v *TaskConfigValidator) validateConditionValue(operator string, value inte } func (v *TaskConfigValidator) validateCaptureFieldExpressions() { - for i, precond := range v.config.Spec.Preconditions { + for i, precond := range v.config.Preconditions { for j, capture := range precond.Capture { if capture.Expression != "" && v.celEnv != nil { - path := fmt.Sprintf("%s.%s[%d].%s[%d].%s", FieldSpec, FieldPreconditions, i, FieldCapture, j, FieldExpression) + path := fmt.Sprintf("%s[%d].%s[%d].%s", FieldPreconditions, i, FieldCapture, j, FieldExpression) v.validateCELExpression(capture.Expression, path) } } @@ -366,9 +354,9 @@ func (v *TaskConfigValidator) validateCaptureFieldExpressions() { func (v *TaskConfigValidator) validateTemplateVariables() { // Validate precondition API call URLs and bodies - for i, precond := range v.config.Spec.Preconditions { + for i, precond := range v.config.Preconditions { if precond.APICall != nil { - basePath := fmt.Sprintf("%s.%s[%d].%s", FieldSpec, FieldPreconditions, i, FieldAPICall) + basePath := fmt.Sprintf("%s[%d].%s", FieldPreconditions, i, FieldAPICall) v.validateTemplateString(precond.APICall.URL, basePath+"."+FieldURL) v.validateTemplateString(precond.APICall.Body, basePath+"."+FieldBody) for j, header := range precond.APICall.Headers { @@ -379,8 +367,8 @@ func (v *TaskConfigValidator) validateTemplateVariables() { } // Validate resource manifests and transport config templates - for i, resource := range v.config.Spec.Resources { - resourcePath := fmt.Sprintf("%s.%s[%d]", FieldSpec, FieldResources, i) + for i, resource := range v.config.Resources { + resourcePath := fmt.Sprintf("%s[%d]", FieldResources, i) if manifest, ok := resource.Manifest.(map[string]interface{}); ok { v.validateTemplateMap(manifest, resourcePath+"."+FieldManifest) } @@ -415,10 +403,10 @@ func (v *TaskConfigValidator) validateTemplateVariables() { } // Validate post action API calls - if v.config.Spec.Post != nil { - for i, action := range v.config.Spec.Post.PostActions { + if v.config.Post != nil { + for i, action := range v.config.Post.PostActions { if action.APICall != nil { - basePath := fmt.Sprintf("%s.%s.%s[%d].%s", FieldSpec, FieldPost, FieldPostActions, i, FieldAPICall) + basePath := fmt.Sprintf("%s.%s[%d].%s", FieldPost, FieldPostActions, i, FieldAPICall) v.validateTemplateString(action.APICall.URL, basePath+"."+FieldURL) v.validateTemplateString(action.APICall.Body, basePath+"."+FieldBody) for j, header := range action.APICall.Headers { @@ -429,10 +417,10 @@ func (v *TaskConfigValidator) validateTemplateVariables() { } // Validate post payload build value templates - for i, payload := range v.config.Spec.Post.Payloads { + for i, payload := range v.config.Post.Payloads { if payload.Build != nil { if buildMap, ok := payload.Build.(map[string]interface{}); ok { - v.validateTemplateMap(buildMap, fmt.Sprintf("%s.%s.%s[%d].%s", FieldSpec, FieldPost, FieldPayloads, i, FieldBuild)) + v.validateTemplateMap(buildMap, fmt.Sprintf("%s.%s[%d].%s", FieldPost, FieldPayloads, i, FieldBuild)) } } } @@ -505,18 +493,18 @@ func (v *TaskConfigValidator) validateCELExpressions() { return } - for i, precond := range v.config.Spec.Preconditions { + for i, precond := range v.config.Preconditions { if precond.Expression != "" { - path := fmt.Sprintf("%s.%s[%d].%s", FieldSpec, FieldPreconditions, i, FieldExpression) + path := fmt.Sprintf("%s[%d].%s", FieldPreconditions, i, FieldExpression) v.validateCELExpression(precond.Expression, path) } } - if v.config.Spec.Post != nil { - for i, payload := range v.config.Spec.Post.Payloads { + if v.config.Post != nil { + for i, payload := range v.config.Post.Payloads { if payload.Build != nil { if buildMap, ok := payload.Build.(map[string]interface{}); ok { - v.validateBuildExpressions(buildMap, fmt.Sprintf("%s.%s.%s[%d].%s", FieldSpec, FieldPost, FieldPayloads, i, FieldBuild)) + v.validateBuildExpressions(buildMap, fmt.Sprintf("%s.%s[%d].%s", FieldPost, FieldPayloads, i, FieldBuild)) } } } @@ -558,7 +546,7 @@ func (v *TaskConfigValidator) validateBuildExpressions(m map[string]interface{}, } func (v *TaskConfigValidator) validateK8sManifests() { - for i, resource := range v.config.Spec.Resources { + for i, resource := range v.config.Resources { // Skip K8s manifest validation for maestro transport — manifest holds ManifestWork content if resource.IsMaestroTransport() { continue @@ -568,7 +556,7 @@ func (v *TaskConfigValidator) validateK8sManifests() { continue } - path := fmt.Sprintf("%s.%s[%d].%s", FieldSpec, FieldResources, i, FieldManifest) + path := fmt.Sprintf("%s[%d].%s", FieldResources, i, FieldManifest) if manifest, ok := resource.Manifest.(map[string]interface{}); ok { if ref, hasRef := manifest[FieldRef].(string); hasRef { @@ -583,7 +571,7 @@ func (v *TaskConfigValidator) validateK8sManifests() { } func (v *TaskConfigValidator) validateK8sManifest(manifest map[string]interface{}, path string) { - requiredFields := []string{FieldAPIVersion, FieldKind, FieldMetadata} + requiredFields := []string{FieldAPIVersion, FieldKind, "metadata"} for _, field := range requiredFields { if _, ok := manifest[field]; !ok { @@ -591,9 +579,9 @@ func (v *TaskConfigValidator) validateK8sManifest(manifest map[string]interface{ } } - if metadata, ok := manifest[FieldMetadata].(map[string]interface{}); ok { + if metadata, ok := manifest["metadata"].(map[string]interface{}); ok { if _, hasName := metadata[FieldName]; !hasName { - v.errors.Add(path+"."+FieldMetadata, fmt.Sprintf("missing required field %q", FieldName)) + v.errors.Add(path+"."+"metadata", fmt.Sprintf("missing required field %q", FieldName)) } } @@ -622,16 +610,6 @@ func isSliceOrArray(value interface{}) bool { return kind == reflect.Slice || kind == reflect.Array } -// IsSupportedAPIVersion checks if the given apiVersion is supported -func IsSupportedAPIVersion(apiVersion string) bool { - for _, v := range SupportedAPIVersions { - if v == apiVersion { - return true - } - } - return false -} - // ValidateAdapterVersion validates that the config's adapter version is compatible // with the expected adapter version. Only major and minor versions are compared; // patch version differences are allowed (patch releases are bug fixes only). @@ -641,7 +619,10 @@ func ValidateAdapterVersion(config *AdapterConfig, expectedVersion string) error return nil } - configVersion := config.Spec.Adapter.Version + configVersion := config.Adapter.Version + if configVersion == "" { + return nil + } configSemver, err := semver.NewVersion(configVersion) if err != nil { diff --git a/internal/config_loader/validator_test.go b/internal/config_loader/validator_test.go index 5a84901..2c1198f 100644 --- a/internal/config_loader/validator_test.go +++ b/internal/config_loader/validator_test.go @@ -13,12 +13,7 @@ import ( // baseTaskConfig returns a minimal valid AdapterTaskConfig for testing. // Tests can modify the returned config to set up specific scenarios. func baseTaskConfig() *AdapterTaskConfig { - return &AdapterTaskConfig{ - APIVersion: "hyperfleet.redhat.com/v1alpha1", - Kind: "AdapterTaskConfig", - Metadata: Metadata{Name: "test-adapter"}, - Spec: AdapterTaskSpec{}, - } + return &AdapterTaskConfig{} } // newTaskValidator is a helper that creates a TaskConfigValidator with semantic validation @@ -30,7 +25,7 @@ func TestValidateConditionOperators(t *testing.T) { // Helper to create task config with a single condition withCondition := func(cond Condition) *AdapterTaskConfig { cfg := baseTaskConfig() - cfg.Spec.Preconditions = []Precondition{{ + cfg.Preconditions = []Precondition{{ ActionBase: ActionBase{Name: "checkStatus"}, Conditions: []Condition{cond}, }} @@ -39,7 +34,7 @@ func TestValidateConditionOperators(t *testing.T) { t.Run("valid operators", func(t *testing.T) { cfg := baseTaskConfig() - cfg.Spec.Preconditions = []Precondition{{ + cfg.Preconditions = []Precondition{{ ActionBase: ActionBase{Name: "checkStatus"}, Conditions: []Condition{ {Field: "status", Operator: "equals", Value: "Ready"}, @@ -140,11 +135,11 @@ func TestValidateConditionOperators(t *testing.T) { func TestValidateTemplateVariables(t *testing.T) { t.Run("defined variables", func(t *testing.T) { cfg := baseTaskConfig() - cfg.Spec.Params = []Parameter{ + cfg.Params = []Parameter{ {Name: "clusterId", Source: "event.id"}, {Name: "apiUrl", Source: "env.API_URL"}, } - cfg.Spec.Preconditions = []Precondition{{ + cfg.Preconditions = []Precondition{{ ActionBase: ActionBase{ Name: "checkCluster", APICall: &APICall{Method: "GET", URL: "{{ .apiUrl }}/clusters/{{ .clusterId }}"}, @@ -157,8 +152,8 @@ func TestValidateTemplateVariables(t *testing.T) { t.Run("undefined variable in URL", func(t *testing.T) { cfg := baseTaskConfig() - cfg.Spec.Params = []Parameter{{Name: "clusterId", Source: "event.id"}} - cfg.Spec.Preconditions = []Precondition{{ + cfg.Params = []Parameter{{Name: "clusterId", Source: "event.id"}} + cfg.Preconditions = []Precondition{{ ActionBase: ActionBase{ Name: "checkCluster", APICall: &APICall{Method: "GET", URL: "{{ .undefinedVar }}/clusters/{{ .clusterId }}"}, @@ -173,8 +168,8 @@ func TestValidateTemplateVariables(t *testing.T) { t.Run("undefined variable in resource manifest", func(t *testing.T) { cfg := baseTaskConfig() - cfg.Spec.Params = []Parameter{{Name: "clusterId", Source: "event.id"}} - cfg.Spec.Resources = []Resource{{ + cfg.Params = []Parameter{{Name: "clusterId", Source: "event.id"}} + cfg.Resources = []Resource{{ Name: "testNs", Manifest: map[string]interface{}{ "apiVersion": "v1", @@ -192,15 +187,15 @@ func TestValidateTemplateVariables(t *testing.T) { t.Run("captured variable is available for resources", func(t *testing.T) { cfg := baseTaskConfig() - cfg.Spec.Params = []Parameter{{Name: "apiUrl", Source: "env.API_URL"}} - cfg.Spec.Preconditions = []Precondition{{ + cfg.Params = []Parameter{{Name: "apiUrl", Source: "env.API_URL"}} + cfg.Preconditions = []Precondition{{ ActionBase: ActionBase{ Name: "getCluster", APICall: &APICall{Method: "GET", URL: "{{ .apiUrl }}/clusters"}, }, - Capture: []CaptureField{{Name: "clusterName", FieldExpressionDef: FieldExpressionDef{Field: "metadata.name"}}}, + Capture: []CaptureField{{Name: "clusterName", FieldExpressionDef: FieldExpressionDef{Field: "name"}}}, }} - cfg.Spec.Resources = []Resource{{ + cfg.Resources = []Resource{{ Name: "testNs", Manifest: map[string]interface{}{ "apiVersion": "v1", @@ -219,7 +214,7 @@ func TestValidateCELExpressions(t *testing.T) { // Helper to create config with a CEL expression precondition withExpression := func(expr string) *AdapterTaskConfig { cfg := baseTaskConfig() - cfg.Spec.Preconditions = []Precondition{{ActionBase: ActionBase{Name: "check"}, Expression: expr}} + cfg.Preconditions = []Precondition{{ActionBase: ActionBase{Name: "check"}, Expression: expr}} return cfg } @@ -240,7 +235,7 @@ func TestValidateCELExpressions(t *testing.T) { }) t.Run("valid CEL with has() function", func(t *testing.T) { - cfg := withExpression(`has(cluster.status) && cluster.status.phase == "Ready"`) + cfg := withExpression(`has(cluster.status) && cluster.status.conditions.exists(c, c.type == "Ready" && c.status == "True")`) v := newTaskValidator(cfg) require.NoError(t, v.ValidateStructure()) require.NoError(t, v.ValidateSemantic()) @@ -251,7 +246,7 @@ func TestValidateK8sManifests(t *testing.T) { // Helper to create config with a resource manifest withResource := func(manifest map[string]interface{}) *AdapterTaskConfig { cfg := baseTaskConfig() - cfg.Spec.Resources = []Resource{{ + cfg.Resources = []Resource{{ Name: "testResource", Manifest: manifest, Discovery: &DiscoveryConfig{Namespace: "*", ByName: "test"}, @@ -366,11 +361,11 @@ func TestValidationErrorsFormat(t *testing.T) { func TestValidateSemantic(t *testing.T) { // Test that ValidateSemantic catches multiple errors cfg := baseTaskConfig() - cfg.Spec.Preconditions = []Precondition{ + cfg.Preconditions = []Precondition{ {ActionBase: ActionBase{Name: "check1"}, Conditions: []Condition{{Field: "status", Operator: "badOperator", Value: "Ready"}}}, {ActionBase: ActionBase{Name: "check2"}, Expression: "invalid ))) syntax"}, } - cfg.Spec.Resources = []Resource{{ + cfg.Resources = []Resource{{ Name: "testNs", Manifest: map[string]interface{}{ "kind": "Namespace", // missing apiVersion @@ -387,19 +382,19 @@ func TestValidateSemantic(t *testing.T) { } func TestBuiltinVariables(t *testing.T) { - // Test that builtin variables (like metadata.name) are recognized + // Test that builtin variables (like adapter.name) are recognized cfg := baseTaskConfig() - cfg.Spec.Resources = []Resource{{ + cfg.Resources = []Resource{{ Name: "testNs", Manifest: map[string]interface{}{ "apiVersion": "v1", "kind": "Namespace", "metadata": map[string]interface{}{ - "name": "ns-{{ .metadata.name }}", - "labels": map[string]interface{}{"adapter": "{{ .metadata.name }}"}, + "name": "ns-{{ .adapter.name }}", + "labels": map[string]interface{}{"adapter": "{{ .adapter.name }}"}, }, }, - Discovery: &DiscoveryConfig{Namespace: "*", ByName: "ns-{{ .metadata.name }}"}, + Discovery: &DiscoveryConfig{Namespace: "*", ByName: "ns-{{ .adapter.name }}"}, }} v := newTaskValidator(cfg) require.NoError(t, v.ValidateStructure()) @@ -445,7 +440,7 @@ func TestPayloadValidate(t *testing.T) { Name: "test", }, wantError: true, - errorMsg: "must have either 'build' or 'buildRef' set", + errorMsg: "must have either 'build' or 'build_ref' set", }, } @@ -469,7 +464,7 @@ func TestValidateCaptureFields(t *testing.T) { // Helper to create config with capture fields withCapture := func(captures []CaptureField) *AdapterTaskConfig { cfg := baseTaskConfig() - cfg.Spec.Preconditions = []Precondition{{ + cfg.Preconditions = []Precondition{{ ActionBase: ActionBase{ Name: "getStatus", APICall: &APICall{Method: "GET", URL: "http://example.com/api"}, @@ -481,7 +476,7 @@ func TestValidateCaptureFields(t *testing.T) { t.Run("valid capture with field only", func(t *testing.T) { cfg := withCapture([]CaptureField{ - {Name: "clusterName", FieldExpressionDef: FieldExpressionDef{Field: "metadata.name"}}, + {Name: "clusterName", FieldExpressionDef: FieldExpressionDef{Field: "name"}}, {Name: "clusterPhase", FieldExpressionDef: FieldExpressionDef{Field: "status.phase"}}, }) v := newTaskValidator(cfg) @@ -497,7 +492,7 @@ func TestValidateCaptureFields(t *testing.T) { }) t.Run("invalid - both field and expression set", func(t *testing.T) { - cfg := withCapture([]CaptureField{{Name: "conflicting", FieldExpressionDef: FieldExpressionDef{Field: "metadata.name", Expression: "1 + 1"}}}) + cfg := withCapture([]CaptureField{{Name: "conflicting", FieldExpressionDef: FieldExpressionDef{Field: "name", Expression: "1 + 1"}}}) err := newTaskValidator(cfg).ValidateStructure() require.Error(t, err) assert.Contains(t, err.Error(), "mutually exclusive") @@ -511,7 +506,7 @@ func TestValidateCaptureFields(t *testing.T) { }) t.Run("invalid - capture name missing", func(t *testing.T) { - cfg := withCapture([]CaptureField{{FieldExpressionDef: FieldExpressionDef{Field: "metadata.name"}}}) + cfg := withCapture([]CaptureField{{FieldExpressionDef: FieldExpressionDef{Field: "name"}}}) err := newTaskValidator(cfg).ValidateStructure() require.Error(t, err) assert.Contains(t, err.Error(), "name is required") @@ -526,14 +521,14 @@ func TestYamlFieldName(t *testing.T) { goFieldName string expectedYaml string }{ - {"ByName", "byName"}, - {"BySelectors", "bySelectors"}, + {"ByName", "by_name"}, + {"BySelectors", "by_selectors"}, {"Field", "field"}, {"Expression", "expression"}, - {"APIVersion", "apiVersion"}, + {"APIVersion", "api_version"}, {"Name", "name"}, {"Namespace", "namespace"}, - {"LabelSelector", "labelSelector"}, + {"LabelSelector", "label_selector"}, } for _, tt := range tests { @@ -551,7 +546,7 @@ func TestFieldNameCachePopulated(t *testing.T) { // Verify key fields are in the cache expectedFields := []string{ "ByName", "BySelectors", "Field", "Expression", - "Name", "Namespace", "APIVersion", "Kind", + "Name", "Namespace", "APIVersion", } for _, field := range expectedFields { @@ -569,7 +564,7 @@ func TestFieldNameCachePopulated(t *testing.T) { func TestValidateTransportConfig(t *testing.T) { t.Run("valid kubernetes transport", func(t *testing.T) { cfg := baseTaskConfig() - cfg.Spec.Resources = []Resource{{ + cfg.Resources = []Resource{{ Name: "testNs", Transport: &TransportConfig{ Client: TransportClientKubernetes, @@ -588,7 +583,7 @@ func TestValidateTransportConfig(t *testing.T) { t.Run("valid maestro transport with inline manifest (ManifestWork)", func(t *testing.T) { cfg := baseTaskConfig() - cfg.Spec.Resources = []Resource{{ + cfg.Resources = []Resource{{ Name: "testMW", Transport: &TransportConfig{ Client: TransportClientMaestro, @@ -614,7 +609,7 @@ func TestValidateTransportConfig(t *testing.T) { t.Run("unsupported transport client", func(t *testing.T) { cfg := baseTaskConfig() - cfg.Spec.Resources = []Resource{{ + cfg.Resources = []Resource{{ Name: "testNs", Transport: &TransportConfig{ Client: "unsupported", @@ -635,7 +630,7 @@ func TestValidateTransportConfig(t *testing.T) { t.Run("maestro transport missing maestro config", func(t *testing.T) { cfg := baseTaskConfig() - cfg.Spec.Resources = []Resource{{ + cfg.Resources = []Resource{{ Name: "testMW", Transport: &TransportConfig{ Client: TransportClientMaestro, @@ -656,7 +651,7 @@ func TestValidateTransportConfig(t *testing.T) { t.Run("maestro transport missing targetCluster", func(t *testing.T) { cfg := baseTaskConfig() - cfg.Spec.Resources = []Resource{{ + cfg.Resources = []Resource{{ Name: "testMW", Transport: &TransportConfig{ Client: TransportClientMaestro, @@ -678,12 +673,12 @@ func TestValidateTransportConfig(t *testing.T) { // targetCluster is structurally required err := v.ValidateStructure() require.Error(t, err) - assert.Contains(t, err.Error(), "targetCluster") + assert.Contains(t, err.Error(), "target_cluster") }) t.Run("maestro transport missing manifest", func(t *testing.T) { cfg := baseTaskConfig() - cfg.Spec.Resources = []Resource{{ + cfg.Resources = []Resource{{ Name: "testMW", Transport: &TransportConfig{ Client: TransportClientMaestro, @@ -707,7 +702,7 @@ func TestValidateTransportConfig(t *testing.T) { t.Run("kubernetes transport missing manifest", func(t *testing.T) { cfg := baseTaskConfig() - cfg.Spec.Resources = []Resource{{ + cfg.Resources = []Resource{{ Name: "testNs", Transport: &TransportConfig{ Client: TransportClientKubernetes, @@ -724,7 +719,7 @@ func TestValidateTransportConfig(t *testing.T) { t.Run("no transport defaults to kubernetes - manifest required", func(t *testing.T) { cfg := baseTaskConfig() - cfg.Spec.Resources = []Resource{{ + cfg.Resources = []Resource{{ Name: "testNs", // No Transport (defaults to kubernetes) // No Manifest @@ -739,8 +734,8 @@ func TestValidateTransportConfig(t *testing.T) { t.Run("maestro transport with template variable in targetCluster", func(t *testing.T) { cfg := baseTaskConfig() - cfg.Spec.Params = []Parameter{{Name: "clusterName", Source: "event.name"}} - cfg.Spec.Resources = []Resource{{ + cfg.Params = []Parameter{{Name: "clusterName", Source: "event.name"}} + cfg.Resources = []Resource{{ Name: "testMW", Transport: &TransportConfig{ Client: TransportClientMaestro, @@ -766,7 +761,7 @@ func TestValidateTransportConfig(t *testing.T) { t.Run("maestro transport with undefined template variable in targetCluster", func(t *testing.T) { cfg := baseTaskConfig() - cfg.Spec.Resources = []Resource{{ + cfg.Resources = []Resource{{ Name: "testMW", Transport: &TransportConfig{ Client: TransportClientMaestro, @@ -795,7 +790,7 @@ func TestValidateTransportConfig(t *testing.T) { t.Run("maestro transport skips K8s manifest validation", func(t *testing.T) { // Maestro resources use manifest for ManifestWork content - should skip K8s apiVersion/kind validation cfg := baseTaskConfig() - cfg.Spec.Resources = []Resource{{ + cfg.Resources = []Resource{{ Name: "testMW", Transport: &TransportConfig{ Client: TransportClientMaestro, @@ -824,9 +819,9 @@ func TestValidateFileReferencesManifestRef(t *testing.T) { // Create a test manifest file (ManifestWork content) manifestDir := filepath.Join(tmpDir, "templates") - require.NoError(t, os.MkdirAll(manifestDir, 0755)) + require.NoError(t, os.MkdirAll(manifestDir, 0o755)) manifestFile := filepath.Join(manifestDir, "manifestwork.yaml") - require.NoError(t, os.WriteFile(manifestFile, []byte("apiVersion: work.open-cluster-management.io/v1\nkind: ManifestWork"), 0644)) + require.NoError(t, os.WriteFile(manifestFile, []byte("apiVersion: work.open-cluster-management.io/v1\nkind: ManifestWork"), 0o644)) tests := []struct { name string @@ -837,56 +832,46 @@ func TestValidateFileReferencesManifestRef(t *testing.T) { { name: "valid manifest ref for maestro transport", config: &AdapterTaskConfig{ - APIVersion: "hyperfleet.redhat.com/v1alpha1", - Kind: "AdapterTaskConfig", - Metadata: Metadata{Name: "test"}, - Spec: AdapterTaskSpec{ - Resources: []Resource{{ - Name: "test", - Transport: &TransportConfig{ - Client: TransportClientMaestro, - Maestro: &MaestroTransportConfig{ - TargetCluster: "cluster1", - }, - }, - Manifest: map[string]interface{}{ - "ref": "templates/manifestwork.yaml", + Resources: []Resource{{ + Name: "test", + Transport: &TransportConfig{ + Client: TransportClientMaestro, + Maestro: &MaestroTransportConfig{ + TargetCluster: "cluster1", }, - Discovery: &DiscoveryConfig{ - BySelectors: &SelectorConfig{ - LabelSelector: map[string]string{"app": "test"}, - }, + }, + Manifest: map[string]interface{}{ + "ref": "templates/manifestwork.yaml", + }, + Discovery: &DiscoveryConfig{ + BySelectors: &SelectorConfig{ + LabelSelector: map[string]string{"app": "test"}, }, - }}, - }, + }, + }}, }, wantErr: false, }, { name: "invalid manifest ref - file not found", config: &AdapterTaskConfig{ - APIVersion: "hyperfleet.redhat.com/v1alpha1", - Kind: "AdapterTaskConfig", - Metadata: Metadata{Name: "test"}, - Spec: AdapterTaskSpec{ - Resources: []Resource{{ - Name: "test", - Transport: &TransportConfig{ - Client: TransportClientMaestro, - Maestro: &MaestroTransportConfig{ - TargetCluster: "cluster1", - }, - }, - Manifest: map[string]interface{}{ - "ref": "templates/nonexistent.yaml", + Resources: []Resource{{ + Name: "test", + Transport: &TransportConfig{ + Client: TransportClientMaestro, + Maestro: &MaestroTransportConfig{ + TargetCluster: "cluster1", }, - Discovery: &DiscoveryConfig{ - BySelectors: &SelectorConfig{ - LabelSelector: map[string]string{"app": "test"}, - }, + }, + Manifest: map[string]interface{}{ + "ref": "templates/nonexistent.yaml", + }, + Discovery: &DiscoveryConfig{ + BySelectors: &SelectorConfig{ + LabelSelector: map[string]string{"app": "test"}, }, - }}, - }, + }, + }}, }, wantErr: true, errMsg: "does not exist", @@ -894,29 +879,24 @@ func TestValidateFileReferencesManifestRef(t *testing.T) { { name: "inline manifest - no file reference validation needed", config: &AdapterTaskConfig{ - APIVersion: "hyperfleet.redhat.com/v1alpha1", - Kind: "AdapterTaskConfig", - Metadata: Metadata{Name: "test"}, - Spec: AdapterTaskSpec{ - Resources: []Resource{{ - Name: "test", - Transport: &TransportConfig{ - Client: TransportClientMaestro, - Maestro: &MaestroTransportConfig{ - TargetCluster: "cluster1", - }, - }, - Manifest: map[string]interface{}{ - "apiVersion": "work.open-cluster-management.io/v1", - "kind": "ManifestWork", + Resources: []Resource{{ + Name: "test", + Transport: &TransportConfig{ + Client: TransportClientMaestro, + Maestro: &MaestroTransportConfig{ + TargetCluster: "cluster1", }, - Discovery: &DiscoveryConfig{ - BySelectors: &SelectorConfig{ - LabelSelector: map[string]string{"app": "test"}, - }, + }, + Manifest: map[string]interface{}{ + "apiVersion": "work.open-cluster-management.io/v1", + "kind": "ManifestWork", + }, + Discovery: &DiscoveryConfig{ + BySelectors: &SelectorConfig{ + LabelSelector: map[string]string{"app": "test"}, }, - }}, - }, + }, + }}, }, wantErr: false, }, diff --git a/internal/config_loader/viper_loader.go b/internal/config_loader/viper_loader.go index 8d1fa46..8222821 100644 --- a/internal/config_loader/viper_loader.go +++ b/internal/config_loader/viper_loader.go @@ -19,49 +19,85 @@ const EnvPrefix = "HYPERFLEET" // The full env var name is EnvPrefix + "_" + suffix // Note: Uses "::" as key delimiter to avoid conflicts with dots in YAML keys var viperKeyMappings = map[string]string{ - "spec::debugConfig": "DEBUG_CONFIG", - "spec::clients::maestro::grpcServerAddress": "MAESTRO_GRPC_SERVER_ADDRESS", - "spec::clients::maestro::httpServerAddress": "MAESTRO_HTTP_SERVER_ADDRESS", - "spec::clients::maestro::sourceId": "MAESTRO_SOURCE_ID", - "spec::clients::maestro::clientId": "MAESTRO_CLIENT_ID", - "spec::clients::maestro::auth::tlsConfig::caFile": "MAESTRO_CA_FILE", - "spec::clients::maestro::auth::tlsConfig::certFile": "MAESTRO_CERT_FILE", - "spec::clients::maestro::auth::tlsConfig::keyFile": "MAESTRO_KEY_FILE", - "spec::clients::maestro::auth::tlsConfig::httpCaFile": "MAESTRO_HTTP_CA_FILE", - "spec::clients::maestro::timeout": "MAESTRO_TIMEOUT", - "spec::clients::maestro::serverHealthinessTimeout": "MAESTRO_SERVER_HEALTHINESS_TIMEOUT", - "spec::clients::maestro::retryAttempts": "MAESTRO_RETRY_ATTEMPTS", - "spec::clients::maestro::insecure": "MAESTRO_INSECURE", - "spec::clients::hyperfleetApi::baseUrl": "API_BASE_URL", - "spec::clients::hyperfleetApi::version": "API_VERSION", - "spec::clients::hyperfleetApi::timeout": "API_TIMEOUT", - "spec::clients::hyperfleetApi::retryAttempts": "API_RETRY_ATTEMPTS", - "spec::clients::hyperfleetApi::retryBackoff": "API_RETRY_BACKOFF", - "spec::clients::broker::subscriptionId": "BROKER_SUBSCRIPTION_ID", - "spec::clients::broker::topic": "BROKER_TOPIC", + "debug_config": "DEBUG_CONFIG", + "clients::maestro::grpc_server_address": "MAESTRO_GRPC_SERVER_ADDRESS", + "clients::maestro::http_server_address": "MAESTRO_HTTP_SERVER_ADDRESS", + "clients::maestro::source_id": "MAESTRO_SOURCE_ID", + "clients::maestro::client_id": "MAESTRO_CLIENT_ID", + "clients::maestro::auth::type": "MAESTRO_AUTH_TYPE", + "clients::maestro::auth::tls_config::ca_file": "MAESTRO_CA_FILE", + "clients::maestro::auth::tls_config::cert_file": "MAESTRO_CERT_FILE", + "clients::maestro::auth::tls_config::key_file": "MAESTRO_KEY_FILE", + "clients::maestro::auth::tls_config::http_ca_file": "MAESTRO_HTTP_CA_FILE", + "clients::maestro::timeout": "MAESTRO_TIMEOUT", + "clients::maestro::server_healthiness_timeout": "MAESTRO_SERVER_HEALTHINESS_TIMEOUT", + "clients::maestro::retry_attempts": "MAESTRO_RETRY_ATTEMPTS", + "clients::maestro::keepalive::time": "MAESTRO_KEEPALIVE_TIME", + "clients::maestro::keepalive::timeout": "MAESTRO_KEEPALIVE_TIMEOUT", + "clients::maestro::insecure": "MAESTRO_INSECURE", + "clients::hyperfleet_api::base_url": "API_BASE_URL", + "clients::hyperfleet_api::version": "API_VERSION", + "clients::hyperfleet_api::timeout": "API_TIMEOUT", + "clients::hyperfleet_api::retry_attempts": "API_RETRY_ATTEMPTS", + "clients::hyperfleet_api::retry_backoff": "API_RETRY_BACKOFF", + "clients::hyperfleet_api::base_delay": "API_BASE_DELAY", + "clients::hyperfleet_api::max_delay": "API_MAX_DELAY", + "clients::broker::subscription_id": "BROKER_SUBSCRIPTION_ID", + "clients::broker::topic": "BROKER_TOPIC", + "clients::kubernetes::kube_config_path": "KUBERNETES_KUBE_CONFIG_PATH", + "clients::kubernetes::api_version": "KUBERNETES_API_VERSION", + "clients::kubernetes::qps": "KUBERNETES_QPS", + "clients::kubernetes::burst": "KUBERNETES_BURST", } // cliFlags defines mappings from CLI flag names to config paths // Note: Uses "::" as key delimiter to avoid conflicts with dots in YAML keys var cliFlags = map[string]string{ - "debug-config": "spec::debugConfig", - "maestro-grpc-server-address": "spec::clients::maestro::grpcServerAddress", - "maestro-http-server-address": "spec::clients::maestro::httpServerAddress", - "maestro-source-id": "spec::clients::maestro::sourceId", - "maestro-client-id": "spec::clients::maestro::clientId", - "maestro-ca-file": "spec::clients::maestro::auth::tlsConfig::caFile", - "maestro-cert-file": "spec::clients::maestro::auth::tlsConfig::certFile", - "maestro-key-file": "spec::clients::maestro::auth::tlsConfig::keyFile", - "maestro-timeout": "spec::clients::maestro::timeout", - "maestro-insecure": "spec::clients::maestro::insecure", - "hyperfleet-api-timeout": "spec::clients::hyperfleetApi::timeout", - "hyperfleet-api-retry": "spec::clients::hyperfleetApi::retryAttempts", + "debug-config": "debug_config", + "maestro-grpc-server-address": "clients::maestro::grpc_server_address", + "maestro-http-server-address": "clients::maestro::http_server_address", + "maestro-source-id": "clients::maestro::source_id", + "maestro-client-id": "clients::maestro::client_id", + "maestro-auth-type": "clients::maestro::auth::type", + "maestro-ca-file": "clients::maestro::auth::tls_config::ca_file", + "maestro-cert-file": "clients::maestro::auth::tls_config::cert_file", + "maestro-key-file": "clients::maestro::auth::tls_config::key_file", + "maestro-http-ca-file": "clients::maestro::auth::tls_config::http_ca_file", + "maestro-timeout": "clients::maestro::timeout", + "maestro-server-healthiness-timeout": "clients::maestro::server_healthiness_timeout", + "maestro-retry-attempts": "clients::maestro::retry_attempts", + "maestro-keepalive-time": "clients::maestro::keepalive::time", + "maestro-keepalive-timeout": "clients::maestro::keepalive::timeout", + "maestro-insecure": "clients::maestro::insecure", + "hyperfleet-api-base-url": "clients::hyperfleet_api::base_url", + "hyperfleet-api-version": "clients::hyperfleet_api::version", + "hyperfleet-api-timeout": "clients::hyperfleet_api::timeout", + "hyperfleet-api-retry": "clients::hyperfleet_api::retry_attempts", + "hyperfleet-api-retry-backoff": "clients::hyperfleet_api::retry_backoff", + "hyperfleet-api-base-delay": "clients::hyperfleet_api::base_delay", + "hyperfleet-api-max-delay": "clients::hyperfleet_api::max_delay", + "broker-subscription-id": "clients::broker::subscription_id", + "broker-topic": "clients::broker::topic", + "kubernetes-kube-config-path": "clients::kubernetes::kube_config_path", + "kubernetes-api-version": "clients::kubernetes::api_version", + "kubernetes-qps": "clients::kubernetes::qps", + "kubernetes-burst": "clients::kubernetes::burst", + "log-level": "log::level", + "log-format": "log::format", + "log-output": "log::output", +} + +// standardConfigPaths are tried when no explicit config path is provided +var standardConfigPaths = []string{ + "/etc/hyperfleet/config.yaml", // production + "./configs/config.yaml", // development } // loadAdapterConfigWithViper loads the deployment configuration from a YAML file // with environment variable and CLI flag overrides using Viper. // Priority: CLI flags > Environment variables > Config file > Defaults -func loadAdapterConfigWithViper(filePath string, flags *pflag.FlagSet) (*AdapterConfig, error) { +// Returns the resolved config file path alongside the loaded config. +func loadAdapterConfigWithViper(filePath string, flags *pflag.FlagSet) (string, *AdapterConfig, error) { // Use "::" as key delimiter to avoid conflicts with dots in YAML keys // (e.g., "hyperfleet.io/component" in metadata.labels) v := viper.NewWithOptions(viper.KeyDelimiter("::")) @@ -71,33 +107,45 @@ func loadAdapterConfigWithViper(filePath string, flags *pflag.FlagSet) (*Adapter filePath = os.Getenv(EnvAdapterConfig) } + // Try standard paths if no path configured + if filePath == "" { + for _, p := range standardConfigPaths { + if _, err := os.Stat(p); err == nil { + filePath = p + break + } + } + } + if filePath == "" { - return nil, fmt.Errorf("adapter config file path is required (use --config flag or %s env var)", + return "", nil, fmt.Errorf("adapter config file path is required (use --config flag or %s env var)", EnvAdapterConfig) } // Read the YAML file first to get base configuration data, err := os.ReadFile(filePath) if err != nil { - return nil, fmt.Errorf("failed to read adapter config file %q: %w", filePath, err) + return "", nil, fmt.Errorf("failed to read adapter config file %q: %w", filePath, err) } - // Parse YAML into a map for Viper - var configMap map[string]interface{} - - reader := bytes.NewReader(data) - decoder := yaml.NewDecoder(reader) - - decoder.KnownFields(true) + // Pre-validate YAML against the AdapterConfig struct to catch unknown fields. + // KnownFields only works when decoding into a struct, not a map. + preValidator := yaml.NewDecoder(bytes.NewReader(data)) + preValidator.KnownFields(true) + var validateConfig AdapterConfig + if err := preValidator.Decode(&validateConfig); err != nil { + return "", nil, fmt.Errorf("failed to parse adapter config YAML: %w", err) + } - if err := decoder.Decode(&configMap); err != nil { - // if err := yaml.Unmarshal(data, &configMap); err != nil { - return nil, fmt.Errorf("failed to parse adapter config YAML: %w", err) + // Parse YAML into a map for Viper (env/CLI overrides are applied next) + var configMap map[string]interface{} + if err := yaml.Unmarshal(data, &configMap); err != nil { + return "", nil, fmt.Errorf("failed to parse adapter config YAML: %w", err) } // Load the map into Viper if err := v.MergeConfigMap(configMap); err != nil { - return nil, fmt.Errorf("failed to merge config map: %w", err) + return "", nil, fmt.Errorf("failed to merge config map: %w", err) } // Bind environment variables @@ -117,15 +165,26 @@ func loadAdapterConfigWithViper(filePath string, flags *pflag.FlagSet) (*Adapter // Legacy broker env vars without HYPERFLEET_ prefix (kept for compatibility) if os.Getenv(EnvPrefix+"_BROKER_SUBSCRIPTION_ID") == "" { if val := os.Getenv("BROKER_SUBSCRIPTION_ID"); val != "" { - v.Set("spec::clients::broker::subscriptionId", val) + v.Set("clients::broker::subscription_id", val) } } if os.Getenv(EnvPrefix+"_BROKER_TOPIC") == "" { if val := os.Getenv("BROKER_TOPIC"); val != "" { - v.Set("spec::clients::broker::topic", val) + v.Set("clients::broker::topic", val) } } + // Log env vars use LOG_ prefix without HYPERFLEET_ (consistent with serve mode) + if val := os.Getenv("LOG_LEVEL"); val != "" { + v.Set("log::level", strings.ToLower(val)) + } + if val := os.Getenv("LOG_FORMAT"); val != "" { + v.Set("log::format", strings.ToLower(val)) + } + if val := os.Getenv("LOG_OUTPUT"); val != "" { + v.Set("log::output", val) + } + // Bind CLI flags if provided if flags != nil { for flagName, configPath := range cliFlags { @@ -138,10 +197,10 @@ func loadAdapterConfigWithViper(filePath string, flags *pflag.FlagSet) (*Adapter // Unmarshal into AdapterConfig struct var config AdapterConfig if err := v.Unmarshal(&config); err != nil { - return nil, fmt.Errorf("failed to unmarshal adapter config: %w", err) + return "", nil, fmt.Errorf("failed to unmarshal adapter config: %w", err) } - return &config, nil + return filePath, &config, nil } // loadTaskConfig loads the task configuration from a YAML file without Viper overrides. @@ -162,7 +221,9 @@ func loadTaskConfig(filePath string) (*AdapterTaskConfig, error) { } var config AdapterTaskConfig - if err := yaml.Unmarshal(data, &config); err != nil { + decoder := yaml.NewDecoder(bytes.NewReader(data)) + decoder.KnownFields(true) + if err := decoder.Decode(&config); err != nil { return nil, fmt.Errorf("failed to parse task config YAML: %w", err) } @@ -179,7 +240,8 @@ func getBaseDir(filePath string) (string, error) { } // loadAdapterConfigWithViperGeneric wraps loadAdapterConfigWithViper, binding CLI flags if provided and of correct type. -func loadAdapterConfigWithViperGeneric(filePath string, flags interface{}) (*AdapterConfig, error) { +// Returns the resolved config file path alongside the loaded config. +func loadAdapterConfigWithViperGeneric(filePath string, flags interface{}) (string, *AdapterConfig, error) { if pflags, ok := flags.(*pflag.FlagSet); ok && pflags != nil { return loadAdapterConfigWithViper(filePath, pflags) } diff --git a/internal/criteria/README.md b/internal/criteria/README.md index 3613d60..433c17e 100644 --- a/internal/criteria/README.md +++ b/internal/criteria/README.md @@ -103,7 +103,7 @@ The `ExtractField` function supports both simple dot notation and Kubernetes JSO ```go // Simple dot notation (auto-converted to JSONPath internally) -result, err := criteria.ExtractField(data, "metadata.name") +result, err := criteria.ExtractField(data, ".name") if err != nil { // Parse error (invalid JSONPath syntax) } @@ -120,6 +120,7 @@ result, err := criteria.ExtractField(data, "{.items[?(@.adapter=='landing-zone-a ``` **FieldResult structure:** + - `Value`: The extracted value (nil if field not found or empty) - `Error`: Runtime extraction error (e.g., field not found) - not a parse error @@ -127,7 +128,7 @@ result, err := criteria.ExtractField(data, "{.items[?(@.adapter=='landing-zone-a | Syntax | Description | Example | |--------|-------------|---------| -| `.field` | Child field | `{.metadata.name}` | +| `.field` | Child field | `{.name}` | | `[n]` | Array index | `{.items[0]}` | | `[*]` | All elements | `{.items[*].name}` | | `[?(@.x=='y')]` | Filter by value | `{.items[?(@.status=='Ready')]}` | @@ -148,11 +149,13 @@ result, err = evaluator.ExtractValue("", "items.filter(i, i.status == 'active'). ``` The `ExtractValueResult` contains: + - `Value`: The extracted value (nil if field not found or empty) - `Source`: The field path or expression used - `Error`: Runtime extraction error (if any) **Error handling:** + - Returns `error` (2nd return) only for **parse errors** (invalid JSONPath/CEL syntax) - Field not found → `result.Value = nil` (allows caller to use default value) @@ -326,4 +329,3 @@ preconditions: - field: "vpcId" operator: "exists" ``` - diff --git a/internal/criteria/cel_evaluator_test.go b/internal/criteria/cel_evaluator_test.go index 7d0083f..a324e68 100644 --- a/internal/criteria/cel_evaluator_test.go +++ b/internal/criteria/cel_evaluator_test.go @@ -123,7 +123,9 @@ func TestCELEvaluatorWithNestedData(t *testing.T) { ctx := NewEvaluationContext() ctx.Set("cluster", map[string]interface{}{ "status": map[string]interface{}{ - "phase": "Ready", + "conditions": []interface{}{ + map[string]interface{}{"type": "Ready", "status": "True"}, + }, }, "spec": map[string]interface{}{ "replicas": 3, @@ -134,7 +136,7 @@ func TestCELEvaluatorWithNestedData(t *testing.T) { require.NoError(t, err) // Test nested field access - result, err := evaluator.EvaluateSafe(`cluster.status.phase == "Ready"`) + result, err := evaluator.EvaluateSafe(`cluster.status.conditions.exists(c, c.type == "Ready" && c.status == "True")`) require.NoError(t, err) assert.False(t, result.HasError()) assert.True(t, result.Matched) @@ -150,7 +152,9 @@ func TestCELEvaluatorEvaluateSafe(t *testing.T) { ctx := NewEvaluationContext() ctx.Set("cluster", map[string]interface{}{ "status": map[string]interface{}{ - "phase": "Ready", + "conditions": []interface{}{ + map[string]interface{}{"type": "Ready", "status": "True"}, + }, }, }) ctx.Set("nullValue", nil) @@ -159,7 +163,7 @@ func TestCELEvaluatorEvaluateSafe(t *testing.T) { require.NoError(t, err) t.Run("successful evaluation", func(t *testing.T) { - result, err := evaluator.EvaluateSafe(`cluster.status.phase == "Ready"`) + result, err := evaluator.EvaluateSafe(`cluster.status.conditions.exists(c, c.type == "Ready" && c.status == "True")`) require.NoError(t, err, "EvaluateSafe should not return error for valid expression") assert.False(t, result.HasError()) assert.True(t, result.Matched) @@ -295,7 +299,9 @@ func TestCELEvaluatorCustomFunctions(t *testing.T) { ctx.Set("resources", map[string]interface{}{ "managedCluster": map[string]interface{}{ "status": map[string]interface{}{ - "phase": "Ready", + "conditions": []interface{}{ + map[string]interface{}{"type": "Ready", "status": "True"}, + }, }, }, "manifestWork": map[string]interface{}{ @@ -322,10 +328,11 @@ func TestCELEvaluatorCustomFunctions(t *testing.T) { }) t.Run("dig safely reads nested fields", func(t *testing.T) { - result, err := evaluator.EvaluateSafe(`dig(resources, "managedCluster.status.phase")`) + result, err := evaluator.EvaluateSafe(`dig(resources, "managedCluster.status.conditions")`) require.NoError(t, err) require.False(t, result.HasError()) - assert.Equal(t, "Ready", result.Value) + assert.NotNil(t, result.Value) + assert.Equal(t, []interface{}{map[string]interface{}{"type": "Ready", "status": "True"}}, result.Value) }) t.Run("dig returns null for missing path", func(t *testing.T) { diff --git a/internal/criteria/evaluator_test.go b/internal/criteria/evaluator_test.go index e44e01e..6ad982f 100644 --- a/internal/criteria/evaluator_test.go +++ b/internal/criteria/evaluator_test.go @@ -39,8 +39,8 @@ func TestEvaluationContextGetField(t *testing.T) { ctx := NewEvaluationContext() ctx.Set("cluster", map[string]interface{}{ "status": map[string]interface{}{ - "phase": "Ready", "conditions": []interface{}{ + map[string]interface{}{"type": "Ready", "status": "True"}, map[string]interface{}{"type": "Available", "status": "True"}, }, }, @@ -57,8 +57,11 @@ func TestEvaluationContextGetField(t *testing.T) { }{ { name: "simple nested field", - path: "cluster.status.phase", - want: "Ready", + path: "cluster.status.conditions", + want: []interface{}{ + map[string]interface{}{"type": "Ready", "status": "True"}, + map[string]interface{}{"type": "Available", "status": "True"}, + }, }, { name: "deeply nested field", @@ -749,7 +752,7 @@ func TestExtractFieldJSONPath(t *testing.T) { }, }, }, - "metadata": map[string]interface{}{ + "adapter": map[string]interface{}{ "name": "test-resource", }, } @@ -792,7 +795,7 @@ func TestExtractFieldJSONPath(t *testing.T) { }, { name: "Simple path still works", - path: "metadata.name", + path: "adapter.name", want: "test-resource", }, } @@ -917,11 +920,11 @@ func TestNewEvaluatorErrorsWithNilParams(t *testing.T) { func TestExtractValue(t *testing.T) { ctx := NewEvaluationContext() ctx.Set("cluster", map[string]interface{}{ - "metadata": map[string]interface{}{ - "name": "test-cluster", - }, + "name": "test-cluster", "status": map[string]interface{}{ - "phase": "Ready", + "conditions": []interface{}{ + map[string]interface{}{"type": "Ready", "status": "True"}, + }, }, }) @@ -929,14 +932,14 @@ func TestExtractValue(t *testing.T) { require.NoError(t, err) // Get existing field - result, err := evaluator.ExtractValue("cluster.metadata.name", "") + result, err := evaluator.ExtractValue("cluster.name", "") require.NoError(t, err) assert.Equal(t, "test-cluster", result.Value) // Get nested field - result, err = evaluator.ExtractValue("cluster.status.phase", "") + result, err = evaluator.ExtractValue("cluster.status.conditions", "") require.NoError(t, err) - assert.Equal(t, "Ready", result.Value) + assert.Equal(t, []interface{}{map[string]interface{}{"type": "Ready", "status": "True"}}, result.Value) // Get non-existent field - returns nil value (not error) result, err = evaluator.ExtractValue("cluster.nonexistent", "") @@ -1046,9 +1049,7 @@ func TestEvaluationResultStruct(t *testing.T) { func TestNullHandling(t *testing.T) { ctx := NewEvaluationContext() ctx.Set("cluster", map[string]interface{}{ - "metadata": map[string]interface{}{ - "name": "test-cluster", - }, + "name": "test-cluster", "status": nil, // null value "spec": map[string]interface{}{ "provider": nil, // null nested value @@ -1062,8 +1063,8 @@ func TestNullHandling(t *testing.T) { require.NoError(t, err) t.Run("access field on null parent returns nil value", func(t *testing.T) { - // Accessing cluster.status.phase when status is null - returns nil value (not error) - result, err := evaluator.ExtractValue("cluster.status.phase", "") + // Accessing cluster.status.conditions when status is null - returns nil value (not error) + result, err := evaluator.ExtractValue("cluster.status.conditions", "") assert.NoError(t, err) // No parse error assert.Nil(t, result.Value) // Value is nil (field not found) }) @@ -1075,7 +1076,7 @@ func TestNullHandling(t *testing.T) { }) t.Run("existing field still works", func(t *testing.T) { - result, err := evaluator.EvaluateCondition("cluster.metadata.name", OperatorEquals, "test-cluster") + result, err := evaluator.EvaluateCondition("cluster.name", OperatorEquals, "test-cluster") assert.NoError(t, err) assert.True(t, result.Matched) }) diff --git a/internal/executor/README.md b/internal/executor/README.md index 7c2fb93..cd1701c 100644 --- a/internal/executor/README.md +++ b/internal/executor/README.md @@ -173,7 +173,7 @@ Executes preconditions with optional API calls and condition evaluation: ```yaml preconditions: - name: "checkClusterStatus" - apiCall: + api_call: method: "GET" url: "{{ .apiBaseUrl }}/clusters/{{ .clusterId }}" capture: @@ -237,7 +237,7 @@ Preconditions have **two different data scopes** for capture and conditions: ```yaml preconditions: - name: "getCluster" - apiCall: + api_call: url: "{{ .apiBaseUrl }}/clusters/{{ .clusterId }}" method: GET # No need to capture everything - conditions can access full response @@ -298,15 +298,15 @@ resources: metadata: name: "cluster-{{ .clusterId }}" discovery: - byName: "cluster-{{ .clusterId }}" - + by_name: "cluster-{{ .clusterId }}" + - name: "externalTemplate" manifest: ref: "templates/deployment.yaml" discovery: namespace: "cluster-{{ .clusterId }}" - bySelectors: - labelSelector: + by_selectors: + label_selector: app: "myapp" ``` @@ -318,7 +318,7 @@ resources: |-----------|------|-------------| | `create` | Resource doesn't exist | Creates new resource | | `update` | Resource exists | Updates existing resource | -| `recreate` | `recreateOnChange: true` | Deletes and recreates | +| `recreate` | `recreate_on_change: true` | Deletes and recreates | | `skip` | No changes needed | No operation performed | | `dry_run` | Dry run mode | Simulated operation | @@ -341,9 +341,9 @@ post: field: "adapter.errorMessage" # JSONPath extraction default: "" # Fallback if field not found - postActions: + post_actions: - name: "reportStatus" - apiCall: + api_call: method: "POST" url: "{{ .apiBaseUrl }}/clusters/{{ .clusterId }}/statuses" body: "{{ .statusPayload }}" @@ -442,9 +442,9 @@ post: expression: "adapter.skipReason != '' ? adapter.skipReason : (adapter.errorMessage != '' ? adapter.errorMessage : 'Success')" default: "No message" observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" - postActions: + post_actions: - name: "reportStatus" - apiCall: + api_call: method: "POST" url: "{{ .apiBaseUrl }}/clusters/{{ .clusterId }}/statuses" body: "{{ .statusPayload }}" @@ -477,7 +477,7 @@ url: "{{ .apiBaseUrl }}/api/{{ .apiVersion }}/clusters/{{ .clusterId }}" |--------|---------| | Extracted params | `{{ .clusterId }}` | | Captured fields | `{{ .readyConditionStatus }}` | -| Adapter metadata | `{{ .metadata.name }}` | +| Adapter metadata | `{{ .adapter.name }}` | | Event metadata | `{{ .eventMetadata.id }}` | ## Integration diff --git a/internal/executor/executor.go b/internal/executor/executor.go index ce92c60..c152f63 100644 --- a/internal/executor/executor.go +++ b/internal/executor/executor.go @@ -119,7 +119,7 @@ func (e *Executor) Execute(ctx context.Context, data interface{}) *ExecutionResu // Phase 2: Preconditions result.CurrentPhase = PhasePreconditions - preconditions := e.config.Config.Spec.Preconditions + preconditions := e.config.Config.Preconditions e.log.Infof(ctx, "Phase %s: RUNNING - %d configured", result.CurrentPhase, len(preconditions)) precondOutcome := e.precondExecutor.ExecuteAll(ctx, preconditions, execCtx) result.PreconditionResults = precondOutcome.Results @@ -149,7 +149,7 @@ func (e *Executor) Execute(ctx context.Context, data interface{}) *ExecutionResu // Phase 3: Resources (skip if preconditions not met or previous error) result.CurrentPhase = PhaseResources - resources := e.config.Config.Spec.Resources + resources := e.config.Config.Resources e.log.Infof(ctx, "Phase %s: RUNNING - %d configured", result.CurrentPhase, len(resources)) if !result.ResourcesSkipped { resourceResults, err := e.resourceExecutor.ExecuteAll(ctx, resources, execCtx) @@ -172,7 +172,7 @@ func (e *Executor) Execute(ctx context.Context, data interface{}) *ExecutionResu // Phase 4: Post Actions (always execute for error reporting) result.CurrentPhase = PhasePostActions - postConfig := e.config.Config.Spec.Post + postConfig := e.config.Config.Post postActionCount := 0 if postConfig != nil { postActionCount = len(postConfig.PostActions) @@ -211,15 +211,23 @@ func (e *Executor) Execute(ctx context.Context, data interface{}) *ExecutionResu // executeParamExtraction extracts parameters from the event and environment func (e *Executor) executeParamExtraction(execCtx *ExecutionContext) error { - // Extract configured parameters - if err := extractConfigParams(e.config.Config, execCtx); err != nil { - return err + configMap, err := configToMap(e.config.Config) + if err != nil { + return NewExecutorError(PhaseParamExtraction, "config", "failed to marshal config", err) } - // Add metadata params - addMetadataParams(e.config.Config, execCtx) + // Use a redacted config map for template-accessible params to avoid exposing sensitive + // values (e.g. TLS cert paths) in rendered manifests or logs. + redactedMap, err := configToMap(e.config.Config.Redacted()) + if err != nil { + return NewExecutorError(PhaseParamExtraction, "config", "failed to marshal redacted config", err) + } - return nil + addAdapterParams(e.config.Config, execCtx, redactedMap) + + // config.* param sources resolve against the real (unredacted) config so that + // sensitive fields like cert paths can still be explicitly extracted when needed. + return extractConfigParams(e.config.Config, execCtx, configMap) } // startTracedExecution creates an OTel span and adds trace context to logs. @@ -230,7 +238,7 @@ func (e *Executor) executeParamExtraction(execCtx *ExecutionContext) error { // - Adds trace_id and span_id to logger context (for log correlation) // - The trace context is automatically propagated to outgoing HTTP requests func (e *Executor) startTracedExecution(ctx context.Context) (context.Context, trace.Span) { - componentName := e.config.Config.Metadata.Name + componentName := e.config.Config.Adapter.Name ctx, span := otel.Tracer(componentName).Start(ctx, "Execute") // Add trace_id and span_id to logger context for log correlation diff --git a/internal/executor/executor_test.go b/internal/executor/executor_test.go index 8ac23df..e17a717 100644 --- a/internal/executor/executor_test.go +++ b/internal/executor/executor_test.go @@ -85,8 +85,9 @@ func TestNewExecutor(t *testing.T) { func TestExecutorBuilder(t *testing.T) { config := &config_loader.Config{ - Metadata: config_loader.Metadata{ - Name: "test-adapter", + Adapter: config_loader.AdapterInfo{ + Name: "test-adapter", + Version: "1.0.0", }, } @@ -239,21 +240,20 @@ func TestExecute_ParamExtraction(t *testing.T) { t.Setenv("TEST_VAR", "test-value") config := &config_loader.Config{ - Metadata: config_loader.Metadata{ - Name: "test-adapter", - }, - Spec: config_loader.ConfigSpec{ - Params: []config_loader.Parameter{ - { - Name: "testParam", - Source: "env.TEST_VAR", - Required: true, - }, - { - Name: "eventParam", - Source: "event.id", - Required: true, - }, + Adapter: config_loader.AdapterInfo{ + Name: "test-adapter", + Version: "1.0.0", + }, + Params: []config_loader.Parameter{ + { + Name: "testParam", + Source: "env.TEST_VAR", + Required: true, + }, + { + Name: "eventParam", + Source: "event.id", + Required: true, }, }, } @@ -264,7 +264,6 @@ func TestExecute_ParamExtraction(t *testing.T) { WithTransportClient(k8s_client.NewMockK8sClient()). WithLogger(logger.NewTestLogger()). Build() - if err != nil { t.Fatalf("unexpected error creating executor: %v", err) } @@ -348,6 +347,37 @@ func TestParamExtractor(t *testing.T) { }, expectError: true, }, + { + name: "extract from config", + params: []config_loader.Parameter{ + {Name: "adapterName", Source: "config.adapter.name"}, + }, + expectKey: "adapterName", + expectValue: "test", + }, + { + name: "extract nested from config", + params: []config_loader.Parameter{ + {Name: "adapterVersion", Source: "config.adapter.version"}, + }, + expectKey: "adapterVersion", + expectValue: "1.0.0", + }, + { + name: "use default for missing optional config field", + params: []config_loader.Parameter{ + {Name: "optional", Source: "config.nonexistent", Default: "fallback"}, + }, + expectKey: "optional", + expectValue: "fallback", + }, + { + name: "fail on missing required config field", + params: []config_loader.Parameter{ + {Name: "required", Source: "config.nonexistent", Required: true}, + }, + expectError: true, + }, } for _, tt := range tests { @@ -357,16 +387,17 @@ func TestParamExtractor(t *testing.T) { // Create config with test params config := &config_loader.Config{ - Metadata: config_loader.Metadata{ - Name: "test", - }, - Spec: config_loader.ConfigSpec{ - Params: tt.params, + Adapter: config_loader.AdapterInfo{ + Name: "test", + Version: "1.0.0", }, + Params: tt.params, } // Extract params using pure function - err := extractConfigParams(config, execCtx) + configMap, err := configToMap(config) + require.NoError(t, err) + err = extractConfigParams(config, execCtx, configMap) if tt.expectError { assert.Error(t, err) @@ -501,12 +532,11 @@ func TestSequentialExecution_Preconditions(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config := &config_loader.Config{ - Metadata: config_loader.Metadata{ - Name: "test-adapter", - }, - Spec: config_loader.ConfigSpec{ - Preconditions: tt.preconditions, + Adapter: config_loader.AdapterInfo{ + Name: "test-adapter", + Version: "1.0.0", }, + Preconditions: tt.preconditions, } exec, err := NewBuilder(). @@ -515,7 +545,6 @@ func TestSequentialExecution_Preconditions(t *testing.T) { WithTransportClient(k8s_client.NewMockK8sClient()). WithLogger(logger.NewTestLogger()). Build() - if err != nil { t.Fatalf("unexpected error creating executor: %v", err) } @@ -603,12 +632,11 @@ func TestSequentialExecution_Resources(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config := &config_loader.Config{ - Metadata: config_loader.Metadata{ - Name: "test-adapter", - }, - Spec: config_loader.ConfigSpec{ - Resources: tt.resources, + Adapter: config_loader.AdapterInfo{ + Name: "test-adapter", + Version: "1.0.0", }, + Resources: tt.resources, } exec, err := NewBuilder(). @@ -617,7 +645,6 @@ func TestSequentialExecution_Resources(t *testing.T) { WithTransportClient(k8s_client.NewMockK8sClient()). WithLogger(logger.NewTestLogger()). Build() - if err != nil { t.Fatalf("unexpected error creating executor: %v", err) } @@ -668,12 +695,11 @@ func TestSequentialExecution_PostActions(t *testing.T) { } config := &config_loader.Config{ - Metadata: config_loader.Metadata{ - Name: "test-adapter", - }, - Spec: config_loader.ConfigSpec{ - Post: postConfig, + Adapter: config_loader.AdapterInfo{ + Name: "test-adapter", + Version: "1.0.0", }, + Post: postConfig, } mockClient := newMockAPIClient() @@ -688,7 +714,6 @@ func TestSequentialExecution_PostActions(t *testing.T) { WithTransportClient(k8s_client.NewMockK8sClient()). WithLogger(logger.NewTestLogger()). Build() - if err != nil { t.Fatalf("unexpected error creating executor: %v", err) } @@ -744,12 +769,11 @@ func TestSequentialExecution_SkipReasonCapture(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config := &config_loader.Config{ - Metadata: config_loader.Metadata{ - Name: "test-adapter", - }, - Spec: config_loader.ConfigSpec{ - Preconditions: tt.preconditions, + Adapter: config_loader.AdapterInfo{ + Name: "test-adapter", + Version: "1.0.0", }, + Preconditions: tt.preconditions, } exec, err := NewBuilder(). @@ -758,7 +782,6 @@ func TestSequentialExecution_SkipReasonCapture(t *testing.T) { WithTransportClient(k8s_client.NewMockK8sClient()). WithLogger(logger.NewTestLogger()). Build() - if err != nil { t.Fatalf("unexpected error creating executor: %v", err) } @@ -812,8 +835,8 @@ func TestCreateHandler_MetricsRecording(t *testing.T) { recorder := metrics.NewRecorder("test-adapter", "v0.1.0", registry) config := &config_loader.Config{ - Metadata: config_loader.Metadata{Name: "test-adapter"}, - Spec: config_loader.ConfigSpec{Preconditions: tt.preconditions}, + Adapter: config_loader.AdapterInfo{Name: "test-adapter", Version: "v0.1.0"}, + Preconditions: tt.preconditions, } exec, err := NewBuilder(). @@ -860,11 +883,9 @@ func TestCreateHandler_MetricsRecording_Failed(t *testing.T) { recorder := metrics.NewRecorder("test-adapter", "v0.1.0", registry) config := &config_loader.Config{ - Metadata: config_loader.Metadata{Name: "test-adapter"}, - Spec: config_loader.ConfigSpec{ - Params: []config_loader.Parameter{ - {Name: "required", Source: "env.MISSING_VAR", Required: true}, - }, + Adapter: config_loader.AdapterInfo{Name: "test-adapter", Version: "v0.1.0"}, + Params: []config_loader.Parameter{ + {Name: "required", Source: "env.MISSING_VAR", Required: true}, }, } @@ -905,7 +926,7 @@ func TestCreateHandler_MetricsRecording_Failed(t *testing.T) { // TestCreateHandler_NilMetricsRecorder verifies handler works without a metrics recorder func TestCreateHandler_NilMetricsRecorder(t *testing.T) { config := &config_loader.Config{ - Metadata: config_loader.Metadata{Name: "test-adapter"}, + Adapter: config_loader.AdapterInfo{Name: "test-adapter", Version: "v0.1.0"}, } exec, err := NewBuilder(). diff --git a/internal/executor/param_extractor.go b/internal/executor/param_extractor.go index 08763d9..8f8779f 100644 --- a/internal/executor/param_extractor.go +++ b/internal/executor/param_extractor.go @@ -2,25 +2,19 @@ package executor import ( "fmt" - "math" "os" - "strconv" "strings" + "github.com/go-viper/mapstructure/v2" "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/utils" ) -// ParamConfig interface allows extractConfigParams to work with both AdapterConfig and Config -type ParamConfig interface { - GetParams() []config_loader.Parameter - GetMetadata() config_loader.Metadata -} - // extractConfigParams extracts all configured parameters and populates execCtx.Params // This is a pure function that directly modifies execCtx for simplicity -func extractConfigParams(config ParamConfig, execCtx *ExecutionContext) error { - for _, param := range config.GetParams() { - value, err := extractParam(param, execCtx.EventData) +func extractConfigParams(config *config_loader.Config, execCtx *ExecutionContext, configMap map[string]interface{}) error { + for _, param := range config.Params { + value, err := extractParam(param, execCtx.EventData, configMap) if err != nil { if param.Required { return NewExecutorError(PhaseParamExtraction, param.Name, @@ -68,7 +62,7 @@ func extractConfigParams(config ParamConfig, execCtx *ExecutionContext) error { } // extractParam extracts a single parameter based on its source -func extractParam(param config_loader.Parameter, eventData map[string]interface{}) (interface{}, error) { +func extractParam(param config_loader.Parameter, eventData map[string]interface{}, configMap map[string]interface{}) (interface{}, error) { source := param.Source // Handle different source types @@ -76,16 +70,36 @@ func extractParam(param config_loader.Parameter, eventData map[string]interface{ case strings.HasPrefix(source, "env."): return extractFromEnv(source[4:]) case strings.HasPrefix(source, "event."): - return extractFromEvent(source[6:], eventData) + return utils.GetNestedValue(eventData, source[6:]) + case strings.HasPrefix(source, "config."): + return utils.GetNestedValue(configMap, source[7:]) case source == "": // No source specified, return default or nil return param.Default, nil default: // Try to extract from event data directly - return extractFromEvent(source, eventData) + return utils.GetNestedValue(eventData, source) } } +// configToMap converts a Config to map[string]interface{} using the yaml struct tags for key names. +// mapstructure reads the "yaml" tag for key names but ignores the omitempty option, so zero-valued +// fields like debug_config=false are preserved in the resulting map. +func configToMap(cfg *config_loader.Config) (map[string]interface{}, error) { + var m map[string]interface{} + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + TagName: "yaml", + Result: &m, + }) + if err != nil { + return nil, fmt.Errorf("failed to create config decoder: %w", err) + } + if err := decoder.Decode(cfg); err != nil { + return nil, fmt.Errorf("failed to convert config to map: %w", err) + } + return m, nil +} + // extractFromEnv extracts a value from environment variables func extractFromEnv(envVar string) (interface{}, error) { value, exists := os.LookupEnv(envVar) @@ -95,232 +109,34 @@ func extractFromEnv(envVar string) (interface{}, error) { return value, nil } -// extractFromEvent extracts a value from event data using dot notation -func extractFromEvent(path string, eventData map[string]interface{}) (interface{}, error) { - parts := strings.Split(path, ".") - var current interface{} = eventData - - for i, part := range parts { - switch v := current.(type) { - case map[string]interface{}: - val, ok := v[part] - if !ok { - return nil, fmt.Errorf("field '%s' not found at path '%s'", part, strings.Join(parts[:i+1], ".")) - } - current = val - case map[interface{}]interface{}: - val, ok := v[part] - if !ok { - return nil, fmt.Errorf("field '%s' not found at path '%s'", part, strings.Join(parts[:i+1], ".")) - } - current = val - default: - return nil, fmt.Errorf("cannot access field '%s': parent is not a map (got %T)", part, current) - } - } - - return current, nil -} - -// addMetadataParams adds adapter and event metadata to execCtx.Params -func addMetadataParams(config ParamConfig, execCtx *ExecutionContext) { - metadata := config.GetMetadata() - // Add metadata from adapter config - execCtx.Params["metadata"] = map[string]interface{}{ - "name": metadata.Name, - "labels": metadata.Labels, +// addAdapterParams adds adapter info and the full config map to execCtx.Params +func addAdapterParams(config *config_loader.Config, execCtx *ExecutionContext, configMap map[string]interface{}) { + execCtx.Params["adapter"] = map[string]interface{}{ + "name": config.Adapter.Name, + "version": config.Adapter.Version, } + execCtx.Params["config"] = configMap } -// convertParamType converts a value to the specified type +// convertParamType converts a value to the specified type. // Supported types: string, int, int64, float, float64, bool func convertParamType(value interface{}, targetType string) (interface{}, error) { - // If value is already the target type, return as-is - switch targetType { - case "string": - return convertToString(value) - case "int", "int64": - return convertToInt64(value) - case "float", "float64": - return convertToFloat64(value) - case "bool": - return convertToBool(value) - default: - return nil, fmt.Errorf("unsupported type: %s (supported: string, int, int64, float, float64, bool)", targetType) - } + return utils.ConvertToType(value, targetType) } -// convertToString converts a value to string -// //nolint:unparam // error kept for API consistency with convertToInt64 func convertToString(value interface{}) (string, error) { - switch v := value.(type) { - case string: - return v, nil - case int, int8, int16, int32, int64: - return fmt.Sprintf("%d", v), nil - case uint, uint8, uint16, uint32, uint64: - return fmt.Sprintf("%d", v), nil - case float32: - return strconv.FormatFloat(float64(v), 'f', -1, 32), nil - case float64: - return strconv.FormatFloat(v, 'f', -1, 64), nil - case bool: - return strconv.FormatBool(v), nil - default: - return fmt.Sprintf("%v", v), nil - } + return utils.ConvertToString(value) } -// convertToInt64 converts a value to int64 func convertToInt64(value interface{}) (int64, error) { - switch v := value.(type) { - case int: - return int64(v), nil - case uint64: - if v > math.MaxInt64 { - return 0, fmt.Errorf("uint64 value %d overflows int64", v) - } - return int64(v), nil - case int8: - return int64(v), nil - case int16: - return int64(v), nil - case int32: - return int64(v), nil - case int64: - return v, nil - case uint: - if v > uint(math.MaxInt64) { - return 0, fmt.Errorf("uint value %d overflows int64", v) - } - return int64(v), nil - case uint8: - return int64(v), nil - case uint16: - return int64(v), nil - case uint32: - return int64(v), nil - case float32: - return int64(v), nil - case float64: - return int64(v), nil - case string: - // Try parsing as int first - if i, err := strconv.ParseInt(v, 10, 64); err == nil { - return i, nil - } - // Try parsing as float and convert - if f, err := strconv.ParseFloat(v, 64); err == nil { - return int64(f), nil - } - return 0, fmt.Errorf("cannot convert string '%s' to int", v) - case bool: - if v { - return 1, nil - } - return 0, nil - default: - return 0, fmt.Errorf("cannot convert %T to int", value) - } + return utils.ConvertToInt64(value) } -// convertToFloat64 converts a value to float64 func convertToFloat64(value interface{}) (float64, error) { - switch v := value.(type) { - case float32: - return float64(v), nil - case float64: - return v, nil - case int: - return float64(v), nil - case int8: - return float64(v), nil - case int16: - return float64(v), nil - case int32: - return float64(v), nil - case int64: - return float64(v), nil - case uint: - return float64(v), nil - case uint8: - return float64(v), nil - case uint16: - return float64(v), nil - case uint32: - return float64(v), nil - case uint64: - return float64(v), nil - case string: - f, err := strconv.ParseFloat(v, 64) - if err != nil { - return 0, fmt.Errorf("cannot convert string '%s' to float: %w", v, err) - } - return f, nil - case bool: - if v { - return 1.0, nil - } - return 0.0, nil - default: - return 0, fmt.Errorf("cannot convert %T to float", value) - } + return utils.ConvertToFloat64(value) } -// convertToBool converts a value to bool func convertToBool(value interface{}) (bool, error) { - switch v := value.(type) { - case bool: - return v, nil - case string: - // Empty string is treated as false - if v == "" { - return false, nil - } - b, err := strconv.ParseBool(v) - if err != nil { - // Handle common truthy/falsy strings - lower := strings.ToLower(v) - switch lower { - case "yes", "y", "on", "1": - return true, nil - case "no", "n", "off", "0": - return false, nil - } - return false, fmt.Errorf("cannot convert string '%s' to bool", v) - } - return b, nil - // NOTE: Each numeric type needs its own case arm. In Go type switches, combined - // cases like "case int, int8, int16:" keep v as interface{}, so "v != 0" would - // compare interface{}(int8(0)) with interface{}(int(0)) - different types that - // are never equal, causing int8(0) to incorrectly return true. - // With separate arms, v is bound to the concrete type, enabling correct comparison. - case int: - return v != 0, nil - case int8: - return v != 0, nil - case int16: - return v != 0, nil - case int32: - return v != 0, nil - case int64: - return v != 0, nil - case uint: - return v != 0, nil - case uint8: - return v != 0, nil - case uint16: - return v != 0, nil - case uint32: - return v != 0, nil - case uint64: - return v != 0, nil - case float32: - return v != 0, nil - case float64: - return v != 0, nil - default: - return false, fmt.Errorf("cannot convert %T to bool", value) - } + return utils.ConvertToBool(value) } diff --git a/internal/executor/types.go b/internal/executor/types.go index 083e491..4b06ad9 100644 --- a/internal/executor/types.go +++ b/internal/executor/types.go @@ -139,7 +139,7 @@ type ResourceResult struct { // Operation is the operation performed (create, update, recreate, skip) Operation manifest.Operation // OperationReason explains why this operation was performed - // Examples: "resource not found", "generation changed from 1 to 2", "generation 1 unchanged", "recreateOnChange=true" + // Examples: "resource not found", "generation changed from 1 to 2", "generation 1 unchanged", "recreate_on_change=true" OperationReason string // Error is the error if Status is StatusFailed Error error diff --git a/internal/executor/utils.go b/internal/executor/utils.go index b8d5b97..6f87033 100644 --- a/internal/executor/utils.go +++ b/internal/executor/utils.go @@ -243,7 +243,7 @@ func buildHyperfleetAPICallURL(apiCallURL string, execCtx *ExecutionContext) str // If the URL is absolute (has a scheme like http:// or https://) if parsedURL.Scheme != "" { // Parse the baseURL to extract its path for comparison - baseURLStr := execCtx.Config.Spec.Clients.HyperfleetAPI.BaseURL + baseURLStr := execCtx.Config.Clients.HyperfleetAPI.BaseURL if baseURLStr == "" { return apiCallURL } @@ -273,7 +273,7 @@ func buildHyperfleetAPICallURL(apiCallURL string, execCtx *ExecutionContext) str } // For relative URLs, ensure proper formatting - baseURLStr := execCtx.Config.Spec.Clients.HyperfleetAPI.BaseURL + baseURLStr := execCtx.Config.Clients.HyperfleetAPI.BaseURL if baseURLStr == "" { return apiCallURL } @@ -288,7 +288,7 @@ func buildHyperfleetAPICallURL(apiCallURL string, execCtx *ExecutionContext) str } // Build the full API path using path.Join for clean path handling - version := execCtx.Config.Spec.Clients.HyperfleetAPI.Version + version := execCtx.Config.Clients.HyperfleetAPI.Version if version == "" { version = "v1" } diff --git a/internal/executor/utils_test.go b/internal/executor/utils_test.go index 4f95f8a..ff068d0 100644 --- a/internal/executor/utils_test.go +++ b/internal/executor/utils_test.go @@ -1123,12 +1123,10 @@ func TestBuildHyperfleetAPICallURL(t *testing.T) { url: "http://localhost:8000/api/hyperfleet/v1/clusters/abc123", execCtx: &ExecutionContext{ Config: &config_loader.Config{ - Spec: config_loader.ConfigSpec{ - Clients: config_loader.ClientsConfig{ - HyperfleetAPI: config_loader.HyperfleetAPIConfig{ - BaseURL: "http://localhost:8000", - Version: "v1", - }, + Clients: config_loader.ClientsConfig{ + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + BaseURL: "http://localhost:8000", + Version: "v1", }, }, }, @@ -1140,12 +1138,10 @@ func TestBuildHyperfleetAPICallURL(t *testing.T) { url: "http://localhost:8000/api/hyperfleet/v1/clusters/abc123", execCtx: &ExecutionContext{ Config: &config_loader.Config{ - Spec: config_loader.ConfigSpec{ - Clients: config_loader.ClientsConfig{ - HyperfleetAPI: config_loader.HyperfleetAPIConfig{ - BaseURL: "http://localhost:8000/api/hyperfleet/v1", - Version: "v1", - }, + Clients: config_loader.ClientsConfig{ + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + BaseURL: "http://localhost:8000/api/hyperfleet/v1", + Version: "v1", }, }, }, @@ -1157,12 +1153,10 @@ func TestBuildHyperfleetAPICallURL(t *testing.T) { url: "http://localhost:8000/api/hyperfleet/v1/clusters/abc123", execCtx: &ExecutionContext{ Config: &config_loader.Config{ - Spec: config_loader.ConfigSpec{ - Clients: config_loader.ClientsConfig{ - HyperfleetAPI: config_loader.HyperfleetAPIConfig{ - BaseURL: "http://localhost:8000/api/hyperfleet/v1/", - Version: "v1", - }, + Clients: config_loader.ClientsConfig{ + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + BaseURL: "http://localhost:8000/api/hyperfleet/v1/", + Version: "v1", }, }, }, @@ -1174,12 +1168,10 @@ func TestBuildHyperfleetAPICallURL(t *testing.T) { url: "http://other-host:9000/api/clusters/abc123", execCtx: &ExecutionContext{ Config: &config_loader.Config{ - Spec: config_loader.ConfigSpec{ - Clients: config_loader.ClientsConfig{ - HyperfleetAPI: config_loader.HyperfleetAPIConfig{ - BaseURL: "http://localhost:8000", - Version: "v1", - }, + Clients: config_loader.ClientsConfig{ + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + BaseURL: "http://localhost:8000", + Version: "v1", }, }, }, @@ -1191,12 +1183,10 @@ func TestBuildHyperfleetAPICallURL(t *testing.T) { url: "https://localhost:8000/api/clusters/abc123", execCtx: &ExecutionContext{ Config: &config_loader.Config{ - Spec: config_loader.ConfigSpec{ - Clients: config_loader.ClientsConfig{ - HyperfleetAPI: config_loader.HyperfleetAPIConfig{ - BaseURL: "http://localhost:8000", - Version: "v1", - }, + Clients: config_loader.ClientsConfig{ + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + BaseURL: "http://localhost:8000", + Version: "v1", }, }, }, @@ -1208,12 +1198,10 @@ func TestBuildHyperfleetAPICallURL(t *testing.T) { url: "api/hyperfleet/v1/clusters/abc123", execCtx: &ExecutionContext{ Config: &config_loader.Config{ - Spec: config_loader.ConfigSpec{ - Clients: config_loader.ClientsConfig{ - HyperfleetAPI: config_loader.HyperfleetAPIConfig{ - BaseURL: "http://localhost:8000", - Version: "v1", - }, + Clients: config_loader.ClientsConfig{ + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + BaseURL: "http://localhost:8000", + Version: "v1", }, }, }, @@ -1225,12 +1213,10 @@ func TestBuildHyperfleetAPICallURL(t *testing.T) { url: "/api/hyperfleet/v1/clusters/abc123", execCtx: &ExecutionContext{ Config: &config_loader.Config{ - Spec: config_loader.ConfigSpec{ - Clients: config_loader.ClientsConfig{ - HyperfleetAPI: config_loader.HyperfleetAPIConfig{ - BaseURL: "http://localhost:8000", - Version: "v1", - }, + Clients: config_loader.ClientsConfig{ + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + BaseURL: "http://localhost:8000", + Version: "v1", }, }, }, @@ -1242,12 +1228,10 @@ func TestBuildHyperfleetAPICallURL(t *testing.T) { url: "clusters/abc123", execCtx: &ExecutionContext{ Config: &config_loader.Config{ - Spec: config_loader.ConfigSpec{ - Clients: config_loader.ClientsConfig{ - HyperfleetAPI: config_loader.HyperfleetAPIConfig{ - BaseURL: "http://localhost:8000", - Version: "v1", - }, + Clients: config_loader.ClientsConfig{ + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + BaseURL: "http://localhost:8000", + Version: "v1", }, }, }, @@ -1259,12 +1243,10 @@ func TestBuildHyperfleetAPICallURL(t *testing.T) { url: "/clusters/abc123", execCtx: &ExecutionContext{ Config: &config_loader.Config{ - Spec: config_loader.ConfigSpec{ - Clients: config_loader.ClientsConfig{ - HyperfleetAPI: config_loader.HyperfleetAPIConfig{ - BaseURL: "http://localhost:8000", - Version: "v1", - }, + Clients: config_loader.ClientsConfig{ + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + BaseURL: "http://localhost:8000", + Version: "v1", }, }, }, @@ -1276,12 +1258,10 @@ func TestBuildHyperfleetAPICallURL(t *testing.T) { url: "clusters/abc123", execCtx: &ExecutionContext{ Config: &config_loader.Config{ - Spec: config_loader.ConfigSpec{ - Clients: config_loader.ClientsConfig{ - HyperfleetAPI: config_loader.HyperfleetAPIConfig{ - BaseURL: "http://localhost:8000", - Version: "v2", - }, + Clients: config_loader.ClientsConfig{ + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + BaseURL: "http://localhost:8000", + Version: "v2", }, }, }, @@ -1293,12 +1273,10 @@ func TestBuildHyperfleetAPICallURL(t *testing.T) { url: "clusters/abc123", execCtx: &ExecutionContext{ Config: &config_loader.Config{ - Spec: config_loader.ConfigSpec{ - Clients: config_loader.ClientsConfig{ - HyperfleetAPI: config_loader.HyperfleetAPIConfig{ - BaseURL: "http://localhost:8000", - Version: "", - }, + Clients: config_loader.ClientsConfig{ + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + BaseURL: "http://localhost:8000", + Version: "", }, }, }, @@ -1310,12 +1288,10 @@ func TestBuildHyperfleetAPICallURL(t *testing.T) { url: "clusters/abc123", execCtx: &ExecutionContext{ Config: &config_loader.Config{ - Spec: config_loader.ConfigSpec{ - Clients: config_loader.ClientsConfig{ - HyperfleetAPI: config_loader.HyperfleetAPIConfig{ - BaseURL: "", - Version: "v1", - }, + Clients: config_loader.ClientsConfig{ + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + BaseURL: "", + Version: "v1", }, }, }, @@ -1327,12 +1303,10 @@ func TestBuildHyperfleetAPICallURL(t *testing.T) { url: "clusters/abc123/", execCtx: &ExecutionContext{ Config: &config_loader.Config{ - Spec: config_loader.ConfigSpec{ - Clients: config_loader.ClientsConfig{ - HyperfleetAPI: config_loader.HyperfleetAPIConfig{ - BaseURL: "http://localhost:8000", - Version: "v1", - }, + Clients: config_loader.ClientsConfig{ + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + BaseURL: "http://localhost:8000", + Version: "v1", }, }, }, @@ -1344,12 +1318,10 @@ func TestBuildHyperfleetAPICallURL(t *testing.T) { url: "/clusters/../clusters/abc123", execCtx: &ExecutionContext{ Config: &config_loader.Config{ - Spec: config_loader.ConfigSpec{ - Clients: config_loader.ClientsConfig{ - HyperfleetAPI: config_loader.HyperfleetAPIConfig{ - BaseURL: "http://localhost:8000", - Version: "v1", - }, + Clients: config_loader.ClientsConfig{ + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + BaseURL: "http://localhost:8000", + Version: "v1", }, }, }, @@ -1361,12 +1333,10 @@ func TestBuildHyperfleetAPICallURL(t *testing.T) { url: "http://localhost:8000/api/hyperfleet/v1/clusters/abc123", execCtx: &ExecutionContext{ Config: &config_loader.Config{ - Spec: config_loader.ConfigSpec{ - Clients: config_loader.ClientsConfig{ - HyperfleetAPI: config_loader.HyperfleetAPIConfig{ - BaseURL: "", - Version: "v1", - }, + Clients: config_loader.ClientsConfig{ + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + BaseURL: "", + Version: "v1", }, }, }, @@ -1378,12 +1348,10 @@ func TestBuildHyperfleetAPICallURL(t *testing.T) { url: "clusters/abc123/statuses", execCtx: &ExecutionContext{ Config: &config_loader.Config{ - Spec: config_loader.ConfigSpec{ - Clients: config_loader.ClientsConfig{ - HyperfleetAPI: config_loader.HyperfleetAPIConfig{ - BaseURL: "http://localhost:8000", - Version: "v1", - }, + Clients: config_loader.ClientsConfig{ + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + BaseURL: "http://localhost:8000", + Version: "v1", }, }, }, diff --git a/internal/hyperfleet_api/types.go b/internal/hyperfleet_api/types.go index e0b021e..3e044e6 100644 --- a/internal/hyperfleet_api/types.go +++ b/internal/hyperfleet_api/types.go @@ -38,21 +38,21 @@ const ( type ClientConfig struct { // BaseURL is the base URL for all API requests (must be set by caller) // Relative URLs in requests will be prefixed with this - BaseURL string `yaml:"baseUrl,omitempty" mapstructure:"baseUrl"` + BaseURL string `yaml:"base_url,omitempty" mapstructure:"base_url"` // Version is the HyperFleet API version (e.g., "v1") Version string `yaml:"version,omitempty" mapstructure:"version"` // Timeout is the HTTP client timeout for requests Timeout time.Duration `yaml:"timeout,omitempty" mapstructure:"timeout"` // RetryAttempts is the number of retry attempts for failed requests - RetryAttempts int `yaml:"retryAttempts,omitempty" mapstructure:"retryAttempts"` + RetryAttempts int `yaml:"retry_attempts,omitempty" mapstructure:"retry_attempts"` // RetryBackoff is the backoff strategy for retries - RetryBackoff BackoffStrategy `yaml:"retryBackoff,omitempty" mapstructure:"retryBackoff"` + RetryBackoff BackoffStrategy `yaml:"retry_backoff,omitempty" mapstructure:"retry_backoff"` // BaseDelay is the initial delay for retry backoff - BaseDelay time.Duration `yaml:"baseDelay,omitempty" mapstructure:"baseDelay"` + BaseDelay time.Duration `yaml:"base_delay,omitempty" mapstructure:"base_delay"` // MaxDelay is the maximum delay for retry backoff - MaxDelay time.Duration `yaml:"maxDelay,omitempty" mapstructure:"maxDelay"` + MaxDelay time.Duration `yaml:"max_delay,omitempty" mapstructure:"max_delay"` // DefaultHeaders are headers added to all requests - DefaultHeaders map[string]string `yaml:"defaultHeaders,omitempty" mapstructure:"defaultHeaders"` + DefaultHeaders map[string]string `yaml:"default_headers,omitempty" mapstructure:"default_headers"` } // DefaultClientConfig returns a ClientConfig with default values diff --git a/internal/manifest/generation.go b/internal/manifest/generation.go index d7638fc..2273316 100644 --- a/internal/manifest/generation.go +++ b/internal/manifest/generation.go @@ -238,16 +238,6 @@ func ValidateGenerationFromUnstructured(obj *unstructured.Unstructured) error { return nil } -// ValidateManifest validates a Kubernetes manifest has all required fields and annotations. -// Returns error if: -// - Object is nil -// - apiVersion is missing -// - kind is missing -// - metadata.name is missing -// - generation annotation is missing or invalid -// -// This is used by both k8s_client (for direct K8s resources) and maestro_client (for ManifestWork payloads). - // GetLatestGenerationFromList returns the resource with the highest generation annotation from a list. // It sorts by generation annotation (descending) and uses metadata.name as a secondary sort key // for deterministic behavior when generations are equal. diff --git a/internal/manifest/manifest.go b/internal/manifest/manifest.go deleted file mode 100644 index e913961..0000000 --- a/internal/manifest/manifest.go +++ /dev/null @@ -1,31 +0,0 @@ -package manifest - -import ( - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - - apperrors "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/errors" -) - -func ValidateManifest(obj *unstructured.Unstructured) error { - if obj == nil { - return apperrors.Validation("manifest cannot be nil").AsError() - } - - // Validate required Kubernetes fields - if obj.GetAPIVersion() == "" { - return apperrors.Validation("manifest missing apiVersion").AsError() - } - if obj.GetKind() == "" { - return apperrors.Validation("manifest missing kind").AsError() - } - if obj.GetName() == "" { - return apperrors.Validation("manifest missing metadata.name").AsError() - } - - // Validate required generation annotation - if err := ValidateGenerationFromUnstructured(obj); err != nil { - return err - } - - return nil -} diff --git a/pkg/health/server.go b/pkg/health/server.go index 6a5904b..a29221d 100644 --- a/pkg/health/server.go +++ b/pkg/health/server.go @@ -46,8 +46,9 @@ type Server struct { // This follows the HyperFleet Graceful Shutdown Standard. shuttingDown atomic.Bool - mu sync.RWMutex - checks map[string]CheckStatus + mu sync.RWMutex + checks map[string]CheckStatus + configYAML []byte // set only when debug_config is true } // NewServer creates a new health check server. @@ -65,6 +66,7 @@ func NewServer(log logger.Logger, port string, component string) *Server { mux := http.NewServeMux() mux.HandleFunc("/healthz", s.healthzHandler) mux.HandleFunc("/readyz", s.readyzHandler) + mux.HandleFunc("/config", s.configHandler) s.server = &http.Server{ Addr: ":" + port, @@ -116,6 +118,14 @@ func (s *Server) SetConfigLoaded() { s.SetCheck("config", CheckOK) } +// SetConfig stores pre-marshaled YAML config to serve at /config. +// Only call this when debug_config is enabled — the endpoint returns 404 otherwise. +func (s *Server) SetConfig(data []byte) { + s.mu.Lock() + defer s.mu.Unlock() + s.configYAML = data +} + // SetShuttingDown marks the server as shutting down. // When set to true, /readyz will immediately return 503 Service Unavailable // regardless of other check statuses. This follows the HyperFleet Graceful @@ -202,3 +212,20 @@ func (s *Server) readyzHandler(w http.ResponseWriter, r *http.Request) { Checks: checks, }) } + +// configHandler serves the current adapter configuration as YAML. +// Returns 404 if debug_config is not enabled (SetConfig was never called). +func (s *Server) configHandler(w http.ResponseWriter, r *http.Request) { + s.mu.RLock() + data := s.configYAML + s.mu.RUnlock() + + if data == nil { + http.NotFound(w, r) + return + } + + w.Header().Set("Content-Type", "application/yaml") + w.WriteHeader(http.StatusOK) + _, _ = w.Write(data) //nolint:errcheck // best-effort response +} diff --git a/scripts/test-config-loading.sh b/scripts/test-config-loading.sh new file mode 100755 index 0000000..8c84916 --- /dev/null +++ b/scripts/test-config-loading.sh @@ -0,0 +1,430 @@ +#!/usr/bin/env bash +# test-config-loading.sh - Verifies that every config parameter loads correctly from +# all available forms: config file, environment variable, and CLI flag. +# +# Usage: +# ./scripts/test-config-loading.sh [--verbose] +# +# Output: one PASS/FAIL line per test, plus a summary at the end. +# Exit code: 0 if all tests pass, 1 if any fail. + +set -euo pipefail + +VERBOSE=0 +for arg in "$@"; do + [[ "$arg" == "--verbose" || "$arg" == "-v" ]] && VERBOSE=1 +done + +# ─── Colours ────────────────────────────────────────────────────────────────── +RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; CYAN='\033[0;36m'; NC='\033[0m' + +PASS=0; FAIL=0; declare -a ERRORS=() + +pass() { echo -e " ${GREEN}PASS${NC} $1"; PASS=$((PASS+1)); } +fail() { + local name="$1" pattern="$2" output="$3" + echo -e " ${RED}FAIL${NC} $name" + echo " expected pattern: ${pattern}" + FAIL=$((FAIL+1)); ERRORS+=("$name") + if [[ $VERBOSE -eq 1 ]]; then + echo " output:" + echo "$output" | sed 's/^/ /' + fi +} + +section() { echo -e "\n${CYAN}══ $1 ══${NC}"; } + +# assert_contains +assert_contains() { + local name="$1" output="$2" pattern="$3" + if echo "$output" | grep -qF "$pattern"; then + pass "$name" + else + fail "$name" "$pattern" "$output" + fi +} + +# assert_not_contains +assert_not_contains() { + local name="$1" output="$2" pattern="$3" + if echo "$output" | grep -qF "$pattern"; then + fail "$name" "NOT: $pattern" "$output" + else + pass "$name" + fi +} + +# ─── Setup ──────────────────────────────────────────────────────────────────── +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ROOT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" +ADAPTER_BIN="$(mktemp /tmp/adapter-test-XXXXXX)" +TMPDIR_TEST="$(mktemp -d)" + +cleanup() { rm -f "$ADAPTER_BIN"; rm -rf "$TMPDIR_TEST"; } +trap cleanup EXIT + +echo -e "${YELLOW}Building adapter binary...${NC}" +(cd "$ROOT_DIR" && go build -o "$ADAPTER_BIN" ./cmd/adapter) +echo " Built: $ADAPTER_BIN" + +# Minimal task config (required by config-dump; task params are not under test here) +TASK_CONFIG="$TMPDIR_TEST/task.yaml" +cat > "$TASK_CONFIG" <<'YAML' +params: [] +YAML + +# ─── Config-dump wrapper ─────────────────────────────────────────────────────── +# cfg_dump [extra CLI flags...] +# Caller must set env vars in the calling environment (use subshells). +cfg_dump() { + local config="$1"; shift + "$ADAPTER_BIN" config-dump -c "$config" -t "$TASK_CONFIG" "$@" 2>/dev/null +} + +# ─── Config file factories ──────────────────────────────────────────────────── + +# k8s_config [extra yaml lines...] +# Creates a minimal kubernetes-transport adapter config. +# Extra args are appended verbatim after "clients:" so 2-space-indented args +# become children of clients, and 0-space-indented args become root-level keys. +k8s_config() { + local file="$1"; shift + { + cat <<'YAML' +adapter: + name: test-adapter + version: "0.1.0" +clients: +YAML + printf '%s\n' "$@" + } > "$file" +} + +# maestro_config [extra yaml lines...] +# Creates a minimal maestro-transport adapter config. +# Extra args are appended verbatim after " maestro:" so 4-space-indented args +# become children of maestro. +maestro_config() { + local file="$1"; shift + { + cat <<'YAML' +adapter: + name: test-adapter + version: "0.1.0" +clients: + hyperfleet_api: + base_url: "https://base.example.com" + broker: + subscription_id: "base-sub" + topic: "base-topic" + maestro: +YAML + printf '%s\n' "$@" + } > "$file" +} + +CFG="$TMPDIR_TEST/adapter.yaml" # reused across tests (overwritten each time) + +# ───────────────────────────────────────────────────────────────────────────── +section "Adapter identity (config file only)" +# ───────────────────────────────────────────────────────────────────────────── + +k8s_config "$CFG" +out=$(cfg_dump "$CFG") +assert_contains "adapter.name from file" "$out" "name: test-adapter" +assert_contains "adapter.version from file" "$out" "version: 0.1.0" + +# ───────────────────────────────────────────────────────────────────────────── +section "HyperFleet API" +# ───────────────────────────────────────────────────────────────────────────── + +# base_url +k8s_config "$CFG" " hyperfleet_api:" " base_url: https://file-api.example.com" " timeout: 5s" +assert_contains "api.base_url [file]" "$(cfg_dump "$CFG")" "base_url: https://file-api.example.com" +assert_contains "api.base_url [env]" "$(HYPERFLEET_API_BASE_URL=https://env-api.example.com cfg_dump "$CFG")" "base_url: https://env-api.example.com" +assert_contains "api.base_url [cli]" "$(cfg_dump "$CFG" --hyperfleet-api-base-url=https://cli-api.example.com)" "base_url: https://cli-api.example.com" +assert_contains "api.base_url [cli>env]" "$(HYPERFLEET_API_BASE_URL=https://env-api.example.com cfg_dump "$CFG" --hyperfleet-api-base-url=https://cli-api.example.com)" "base_url: https://cli-api.example.com" + +# version +k8s_config "$CFG" " hyperfleet_api:" " base_url: https://base.example.com" " timeout: 5s" " version: file-v99" +assert_contains "api.version [file]" "$(cfg_dump "$CFG")" "version: file-v99" +assert_contains "api.version [env]" "$(HYPERFLEET_API_VERSION=env-v88 cfg_dump "$CFG")" "version: env-v88" +assert_contains "api.version [cli]" "$(cfg_dump "$CFG" --hyperfleet-api-version=cli-v77)" "version: cli-v77" +assert_contains "api.version [cli>env]" "$(HYPERFLEET_API_VERSION=env-v88 cfg_dump "$CFG" --hyperfleet-api-version=cli-v77)" "version: cli-v77" + +# timeout +k8s_config "$CFG" " hyperfleet_api:" " base_url: https://base.example.com" " timeout: 11s" +assert_contains "api.timeout [file]" "$(cfg_dump "$CFG")" "timeout: 11s" +assert_contains "api.timeout [env]" "$(HYPERFLEET_API_TIMEOUT=22s cfg_dump "$CFG")" "timeout: 22s" +assert_contains "api.timeout [cli]" "$(cfg_dump "$CFG" --hyperfleet-api-timeout=33s)" "timeout: 33s" +assert_contains "api.timeout [cli>env]" "$(HYPERFLEET_API_TIMEOUT=22s cfg_dump "$CFG" --hyperfleet-api-timeout=33s)" "timeout: 33s" + +# retry_attempts +k8s_config "$CFG" " hyperfleet_api:" " base_url: https://base.example.com" " timeout: 5s" " retry_attempts: 11" +assert_contains "api.retry_attempts [file]" "$(cfg_dump "$CFG")" "retry_attempts: 11" +assert_contains "api.retry_attempts [env]" "$(HYPERFLEET_API_RETRY_ATTEMPTS=22 cfg_dump "$CFG")" "retry_attempts: 22" +assert_contains "api.retry_attempts [cli]" "$(cfg_dump "$CFG" --hyperfleet-api-retry=33)" "retry_attempts: 33" +assert_contains "api.retry_attempts [cli>env]" "$(HYPERFLEET_API_RETRY_ATTEMPTS=22 cfg_dump "$CFG" --hyperfleet-api-retry=33)" "retry_attempts: 33" + +# retry_backoff +k8s_config "$CFG" " hyperfleet_api:" " base_url: https://base.example.com" " timeout: 5s" " retry_backoff: linear" +assert_contains "api.retry_backoff [file]" "$(cfg_dump "$CFG")" "retry_backoff: linear" +assert_contains "api.retry_backoff [env]" "$(HYPERFLEET_API_RETRY_BACKOFF=constant cfg_dump "$CFG")" "retry_backoff: constant" +assert_contains "api.retry_backoff [cli]" "$(cfg_dump "$CFG" --hyperfleet-api-retry-backoff=exponential)" "retry_backoff: exponential" +assert_contains "api.retry_backoff [cli>env]" "$(HYPERFLEET_API_RETRY_BACKOFF=constant cfg_dump "$CFG" --hyperfleet-api-retry-backoff=exponential)" "retry_backoff: exponential" + +# base_delay +k8s_config "$CFG" " hyperfleet_api:" " base_url: https://base.example.com" " timeout: 5s" " base_delay: 11s" +assert_contains "api.base_delay [file]" "$(cfg_dump "$CFG")" "base_delay: 11s" +assert_contains "api.base_delay [env]" "$(HYPERFLEET_API_BASE_DELAY=22s cfg_dump "$CFG")" "base_delay: 22s" +assert_contains "api.base_delay [cli]" "$(cfg_dump "$CFG" --hyperfleet-api-base-delay=33s)" "base_delay: 33s" +assert_contains "api.base_delay [cli>env]" "$(HYPERFLEET_API_BASE_DELAY=22s cfg_dump "$CFG" --hyperfleet-api-base-delay=33s)" "base_delay: 33s" + +# max_delay — use sub-60s values since time.Duration.String() reformats e.g. 111s → 1m51s +k8s_config "$CFG" " hyperfleet_api:" " base_url: https://base.example.com" " timeout: 5s" " max_delay: 51s" +assert_contains "api.max_delay [file]" "$(cfg_dump "$CFG")" "max_delay: 51s" +assert_contains "api.max_delay [env]" "$(HYPERFLEET_API_MAX_DELAY=52s cfg_dump "$CFG")" "max_delay: 52s" +assert_contains "api.max_delay [cli]" "$(cfg_dump "$CFG" --hyperfleet-api-max-delay=53s)" "max_delay: 53s" +assert_contains "api.max_delay [cli>env]" "$(HYPERFLEET_API_MAX_DELAY=52s cfg_dump "$CFG" --hyperfleet-api-max-delay=53s)" "max_delay: 53s" + +# ───────────────────────────────────────────────────────────────────────────── +section "Broker" +# ───────────────────────────────────────────────────────────────────────────── + +# subscription_id — standard env var +k8s_config "$CFG" " broker:" " subscription_id: file-sub-id" " topic: file-topic" +assert_contains "broker.subscription_id [file]" "$(cfg_dump "$CFG")" "subscription_id: file-sub-id" +assert_contains "broker.subscription_id [env]" "$(HYPERFLEET_BROKER_SUBSCRIPTION_ID=env-sub-id cfg_dump "$CFG")" "subscription_id: env-sub-id" +assert_contains "broker.subscription_id [cli]" "$(cfg_dump "$CFG" --broker-subscription-id=cli-sub-id)" "subscription_id: cli-sub-id" +assert_contains "broker.subscription_id [cli>env]" "$(HYPERFLEET_BROKER_SUBSCRIPTION_ID=env-sub-id cfg_dump "$CFG" --broker-subscription-id=cli-sub-id)" "subscription_id: cli-sub-id" + +# subscription_id — legacy env var (BROKER_SUBSCRIPTION_ID without HYPERFLEET_ prefix) +assert_contains "broker.subscription_id [legacy-env]" "$(BROKER_SUBSCRIPTION_ID=legacy-sub-id cfg_dump "$CFG")" "subscription_id: legacy-sub-id" +# standard env should take precedence over legacy env +assert_contains "broker.subscription_id [std-env>legacy-env]" "$(HYPERFLEET_BROKER_SUBSCRIPTION_ID=std-sub-id BROKER_SUBSCRIPTION_ID=legacy-sub-id cfg_dump "$CFG")" "subscription_id: std-sub-id" + +# topic — standard env var +assert_contains "broker.topic [file]" "$(cfg_dump "$CFG")" "topic: file-topic" +assert_contains "broker.topic [env]" "$(HYPERFLEET_BROKER_TOPIC=env-topic cfg_dump "$CFG")" "topic: env-topic" +assert_contains "broker.topic [cli]" "$(cfg_dump "$CFG" --broker-topic=cli-topic)" "topic: cli-topic" +assert_contains "broker.topic [cli>env]" "$(HYPERFLEET_BROKER_TOPIC=env-topic cfg_dump "$CFG" --broker-topic=cli-topic)" "topic: cli-topic" + +# topic — legacy env var +assert_contains "broker.topic [legacy-env]" "$(BROKER_TOPIC=legacy-topic cfg_dump "$CFG")" "topic: legacy-topic" +assert_contains "broker.topic [std-env>legacy-env]" "$(HYPERFLEET_BROKER_TOPIC=std-topic BROKER_TOPIC=legacy-topic cfg_dump "$CFG")" "topic: std-topic" + +# ───────────────────────────────────────────────────────────────────────────── +section "Kubernetes" +# ───────────────────────────────────────────────────────────────────────────── + +# api_version +k8s_config "$CFG" " kubernetes:" " api_version: file-k8s-v1" +assert_contains "kubernetes.api_version [file]" "$(cfg_dump "$CFG")" "api_version: file-k8s-v1" +assert_contains "kubernetes.api_version [env]" "$(HYPERFLEET_KUBERNETES_API_VERSION=env-k8s-v2 cfg_dump "$CFG")" "api_version: env-k8s-v2" +assert_contains "kubernetes.api_version [cli]" "$(cfg_dump "$CFG" --kubernetes-api-version=cli-k8s-v3)" "api_version: cli-k8s-v3" +assert_contains "kubernetes.api_version [cli>env]" "$(HYPERFLEET_KUBERNETES_API_VERSION=env-k8s-v2 cfg_dump "$CFG" --kubernetes-api-version=cli-k8s-v3)" "api_version: cli-k8s-v3" + +# kube_config_path +k8s_config "$CFG" " kubernetes:" " api_version: v1" " kube_config_path: /file/kubeconfig" +assert_contains "kubernetes.kube_config_path [file]" "$(cfg_dump "$CFG")" "kube_config_path: /file/kubeconfig" +assert_contains "kubernetes.kube_config_path [env]" "$(HYPERFLEET_KUBERNETES_KUBE_CONFIG_PATH=/env/kubeconfig cfg_dump "$CFG")" "kube_config_path: /env/kubeconfig" +assert_contains "kubernetes.kube_config_path [cli]" "$(cfg_dump "$CFG" --kubernetes-kube-config-path=/cli/kubeconfig)" "kube_config_path: /cli/kubeconfig" +assert_contains "kubernetes.kube_config_path [cli>env]" "$(HYPERFLEET_KUBERNETES_KUBE_CONFIG_PATH=/env/kubeconfig cfg_dump "$CFG" --kubernetes-kube-config-path=/cli/kubeconfig)" "kube_config_path: /cli/kubeconfig" + +# qps +k8s_config "$CFG" " kubernetes:" " api_version: v1" " qps: 11.5" +assert_contains "kubernetes.qps [file]" "$(cfg_dump "$CFG")" "qps: 11.5" +assert_contains "kubernetes.qps [env]" "$(HYPERFLEET_KUBERNETES_QPS=22.5 cfg_dump "$CFG")" "qps: 22.5" +assert_contains "kubernetes.qps [cli]" "$(cfg_dump "$CFG" --kubernetes-qps=33.5)" "qps: 33.5" +assert_contains "kubernetes.qps [cli>env]" "$(HYPERFLEET_KUBERNETES_QPS=22.5 cfg_dump "$CFG" --kubernetes-qps=33.5)" "qps: 33.5" + +# burst +k8s_config "$CFG" " kubernetes:" " api_version: v1" " burst: 11" +assert_contains "kubernetes.burst [file]" "$(cfg_dump "$CFG")" "burst: 11" +assert_contains "kubernetes.burst [env]" "$(HYPERFLEET_KUBERNETES_BURST=22 cfg_dump "$CFG")" "burst: 22" +assert_contains "kubernetes.burst [cli]" "$(cfg_dump "$CFG" --kubernetes-burst=33)" "burst: 33" +assert_contains "kubernetes.burst [cli>env]" "$(HYPERFLEET_KUBERNETES_BURST=22 cfg_dump "$CFG" --kubernetes-burst=33)" "burst: 33" + +# ───────────────────────────────────────────────────────────────────────────── +section "Log" +# ───────────────────────────────────────────────────────────────────────────── + +# level +k8s_config "$CFG" "log:" " level: debug" +assert_contains "log.level [file]" "$(cfg_dump "$CFG")" "level: debug" +assert_contains "log.level [env]" "$(LOG_LEVEL=warn cfg_dump "$CFG")" "level: warn" +assert_contains "log.level [cli]" "$(cfg_dump "$CFG" --log-level=error)" "level: error" +assert_contains "log.level [cli>env]" "$(LOG_LEVEL=warn cfg_dump "$CFG" --log-level=error)" "level: error" +# env overrides file +assert_contains "log.level [env>file]" "$(LOG_LEVEL=warn cfg_dump "$CFG")" "level: warn" + +# format +k8s_config "$CFG" "log:" " format: json" +assert_contains "log.format [file]" "$(cfg_dump "$CFG")" "format: json" +assert_contains "log.format [env]" "$(LOG_FORMAT=text cfg_dump "$CFG")" "format: text" +assert_contains "log.format [cli]" "$(cfg_dump "$CFG" --log-format=json)" "format: json" +assert_contains "log.format [cli>env]" "$(LOG_FORMAT=text cfg_dump "$CFG" --log-format=json)" "format: json" + +# output +k8s_config "$CFG" "log:" " output: stderr" +assert_contains "log.output [file]" "$(cfg_dump "$CFG")" "output: stderr" +assert_contains "log.output [env]" "$(LOG_OUTPUT=stdout cfg_dump "$CFG")" "output: stdout" +assert_contains "log.output [cli]" "$(cfg_dump "$CFG" --log-output=stderr)" "output: stderr" +assert_contains "log.output [cli>env]" "$(LOG_OUTPUT=stdout cfg_dump "$CFG" --log-output=stderr)" "output: stderr" + +# ───────────────────────────────────────────────────────────────────────────── +section "Maestro — addressing & identity" +# ───────────────────────────────────────────────────────────────────────────── + +# grpc_server_address +maestro_config "$CFG" " grpc_server_address: file-grpc:8090" +assert_contains "maestro.grpc_server_address [file]" "$(cfg_dump "$CFG")" "grpc_server_address: file-grpc:8090" +assert_contains "maestro.grpc_server_address [env]" "$(HYPERFLEET_MAESTRO_GRPC_SERVER_ADDRESS=env-grpc:8090 cfg_dump "$CFG")" "grpc_server_address: env-grpc:8090" +assert_contains "maestro.grpc_server_address [cli]" "$(cfg_dump "$CFG" --maestro-grpc-server-address=cli-grpc:8090)" "grpc_server_address: cli-grpc:8090" +assert_contains "maestro.grpc_server_address [cli>env]" "$(HYPERFLEET_MAESTRO_GRPC_SERVER_ADDRESS=env-grpc:8090 cfg_dump "$CFG" --maestro-grpc-server-address=cli-grpc:8090)" "grpc_server_address: cli-grpc:8090" + +# http_server_address +maestro_config "$CFG" " http_server_address: http://file-http:8000" +assert_contains "maestro.http_server_address [file]" "$(cfg_dump "$CFG")" "http_server_address: http://file-http:8000" +assert_contains "maestro.http_server_address [env]" "$(HYPERFLEET_MAESTRO_HTTP_SERVER_ADDRESS=http://env-http:8000 cfg_dump "$CFG")" "http_server_address: http://env-http:8000" +assert_contains "maestro.http_server_address [cli]" "$(cfg_dump "$CFG" --maestro-http-server-address=http://cli-http:8000)" "http_server_address: http://cli-http:8000" +assert_contains "maestro.http_server_address [cli>env]" "$(HYPERFLEET_MAESTRO_HTTP_SERVER_ADDRESS=http://env-http:8000 cfg_dump "$CFG" --maestro-http-server-address=http://cli-http:8000)" "http_server_address: http://cli-http:8000" + +# source_id +maestro_config "$CFG" " source_id: file-source-id" +assert_contains "maestro.source_id [file]" "$(cfg_dump "$CFG")" "source_id: file-source-id" +assert_contains "maestro.source_id [env]" "$(HYPERFLEET_MAESTRO_SOURCE_ID=env-source-id cfg_dump "$CFG")" "source_id: env-source-id" +assert_contains "maestro.source_id [cli]" "$(cfg_dump "$CFG" --maestro-source-id=cli-source-id)" "source_id: cli-source-id" +assert_contains "maestro.source_id [cli>env]" "$(HYPERFLEET_MAESTRO_SOURCE_ID=env-source-id cfg_dump "$CFG" --maestro-source-id=cli-source-id)" "source_id: cli-source-id" + +# client_id +maestro_config "$CFG" " client_id: file-client-id" +assert_contains "maestro.client_id [file]" "$(cfg_dump "$CFG")" "client_id: file-client-id" +assert_contains "maestro.client_id [env]" "$(HYPERFLEET_MAESTRO_CLIENT_ID=env-client-id cfg_dump "$CFG")" "client_id: env-client-id" +assert_contains "maestro.client_id [cli]" "$(cfg_dump "$CFG" --maestro-client-id=cli-client-id)" "client_id: cli-client-id" +assert_contains "maestro.client_id [cli>env]" "$(HYPERFLEET_MAESTRO_CLIENT_ID=env-client-id cfg_dump "$CFG" --maestro-client-id=cli-client-id)" "client_id: cli-client-id" + +# ───────────────────────────────────────────────────────────────────────────── +section "Maestro — timeouts & retries" +# ───────────────────────────────────────────────────────────────────────────── + +# timeout +maestro_config "$CFG" " timeout: 11s" +assert_contains "maestro.timeout [file]" "$(cfg_dump "$CFG")" "timeout: 11s" +assert_contains "maestro.timeout [env]" "$(HYPERFLEET_MAESTRO_TIMEOUT=22s cfg_dump "$CFG")" "timeout: 22s" +assert_contains "maestro.timeout [cli]" "$(cfg_dump "$CFG" --maestro-timeout=33s)" "timeout: 33s" +assert_contains "maestro.timeout [cli>env]" "$(HYPERFLEET_MAESTRO_TIMEOUT=22s cfg_dump "$CFG" --maestro-timeout=33s)" "timeout: 33s" + +# server_healthiness_timeout +maestro_config "$CFG" " server_healthiness_timeout: 11s" +assert_contains "maestro.server_healthiness_timeout [file]" "$(cfg_dump "$CFG")" "server_healthiness_timeout: 11s" +assert_contains "maestro.server_healthiness_timeout [env]" "$(HYPERFLEET_MAESTRO_SERVER_HEALTHINESS_TIMEOUT=22s cfg_dump "$CFG")" "server_healthiness_timeout: 22s" +assert_contains "maestro.server_healthiness_timeout [cli]" "$(cfg_dump "$CFG" --maestro-server-healthiness-timeout=33s)" "server_healthiness_timeout: 33s" +assert_contains "maestro.server_healthiness_timeout [cli>env]" "$(HYPERFLEET_MAESTRO_SERVER_HEALTHINESS_TIMEOUT=22s cfg_dump "$CFG" --maestro-server-healthiness-timeout=33s)" "server_healthiness_timeout: 33s" + +# retry_attempts +maestro_config "$CFG" " retry_attempts: 11" +assert_contains "maestro.retry_attempts [file]" "$(cfg_dump "$CFG")" "retry_attempts: 11" +assert_contains "maestro.retry_attempts [env]" "$(HYPERFLEET_MAESTRO_RETRY_ATTEMPTS=22 cfg_dump "$CFG")" "retry_attempts: 22" +assert_contains "maestro.retry_attempts [cli]" "$(cfg_dump "$CFG" --maestro-retry-attempts=33)" "retry_attempts: 33" +assert_contains "maestro.retry_attempts [cli>env]" "$(HYPERFLEET_MAESTRO_RETRY_ATTEMPTS=22 cfg_dump "$CFG" --maestro-retry-attempts=33)" "retry_attempts: 33" + +# insecure (boolean) +# insecure: false is the zero value; yaml omitempty suppresses it in marshaled output +maestro_config "$CFG" " insecure: false" +assert_not_contains "maestro.insecure [file=false]" "$(cfg_dump "$CFG")" "insecure: true" +maestro_config "$CFG" " insecure: true" +assert_contains "maestro.insecure [file=true]" "$(cfg_dump "$CFG")" "insecure: true" +maestro_config "$CFG" " insecure: false" +assert_contains "maestro.insecure [env=true]" "$(HYPERFLEET_MAESTRO_INSECURE=true cfg_dump "$CFG")" "insecure: true" +assert_contains "maestro.insecure [cli=true]" "$(cfg_dump "$CFG" --maestro-insecure)" "insecure: true" + +# ───────────────────────────────────────────────────────────────────────────── +section "Maestro — keepalive" +# ───────────────────────────────────────────────────────────────────────────── + +# keepalive.time (tests that nested pointer struct is created from env/CLI) +maestro_config "$CFG" " keepalive:" " time: 11s" " timeout: 5s" +assert_contains "maestro.keepalive.time [file]" "$(cfg_dump "$CFG")" "time: 11s" +assert_contains "maestro.keepalive.time [env]" "$(HYPERFLEET_MAESTRO_KEEPALIVE_TIME=22s cfg_dump "$CFG")" "time: 22s" +assert_contains "maestro.keepalive.time [cli]" "$(cfg_dump "$CFG" --maestro-keepalive-time=33s)" "time: 33s" +assert_contains "maestro.keepalive.time [cli>env]" "$(HYPERFLEET_MAESTRO_KEEPALIVE_TIME=22s cfg_dump "$CFG" --maestro-keepalive-time=33s)" "time: 33s" + +# keepalive.timeout +assert_contains "maestro.keepalive.timeout [file]" "$(cfg_dump "$CFG")" "timeout: 5s" +assert_contains "maestro.keepalive.timeout [env]" "$(HYPERFLEET_MAESTRO_KEEPALIVE_TIMEOUT=12s cfg_dump "$CFG")" "timeout: 12s" +assert_contains "maestro.keepalive.timeout [cli]" "$(cfg_dump "$CFG" --maestro-keepalive-timeout=24s)" "timeout: 24s" +assert_contains "maestro.keepalive.timeout [cli>env]" "$(HYPERFLEET_MAESTRO_KEEPALIVE_TIMEOUT=12s cfg_dump "$CFG" --maestro-keepalive-timeout=24s)" "timeout: 24s" + +# ───────────────────────────────────────────────────────────────────────────── +section "Maestro — auth" +# ───────────────────────────────────────────────────────────────────────────── + +# auth.type +maestro_config "$CFG" " auth:" " type: file-auth-type" +assert_contains "maestro.auth.type [file]" "$(cfg_dump "$CFG")" "type: file-auth-type" +assert_contains "maestro.auth.type [env]" "$(HYPERFLEET_MAESTRO_AUTH_TYPE=env-auth-type cfg_dump "$CFG")" "type: env-auth-type" +assert_contains "maestro.auth.type [cli]" "$(cfg_dump "$CFG" --maestro-auth-type=cli-auth-type)" "type: cli-auth-type" +assert_contains "maestro.auth.type [cli>env]" "$(HYPERFLEET_MAESTRO_AUTH_TYPE=env-auth-type cfg_dump "$CFG" --maestro-auth-type=cli-auth-type)" "type: cli-auth-type" + +# TLS cert fields — values are redacted in output; test that the field IS set (shows REDACTED) +maestro_config "$CFG" " auth:" " type: tls" " tls_config:" " ca_file: /file/ca.crt" " cert_file: /file/client.crt" " key_file: /file/client.key" " http_ca_file: /file/http-ca.crt" +assert_contains "maestro.tls.ca_file [file→redacted]" "$(cfg_dump "$CFG")" "ca_file: " +assert_contains "maestro.tls.cert_file [file→redacted]" "$(cfg_dump "$CFG")" "cert_file: " +assert_contains "maestro.tls.key_file [file→redacted]" "$(cfg_dump "$CFG")" "key_file: " +assert_contains "maestro.tls.http_ca_file [file→redacted]" "$(cfg_dump "$CFG")" "http_ca_file: " + +# TLS via env vars (also redacted) +maestro_config "$CFG" " auth:" " type: tls" +assert_contains "maestro.tls.ca_file [env→redacted]" "$(HYPERFLEET_MAESTRO_CA_FILE=/env/ca.crt cfg_dump "$CFG")" "ca_file: " +assert_contains "maestro.tls.cert_file [env→redacted]" "$(HYPERFLEET_MAESTRO_CERT_FILE=/env/client.crt cfg_dump "$CFG")" "cert_file: " +assert_contains "maestro.tls.key_file [env→redacted]" "$(HYPERFLEET_MAESTRO_KEY_FILE=/env/client.key cfg_dump "$CFG")" "key_file: " +assert_contains "maestro.tls.http_ca_file [env→redacted]" "$(HYPERFLEET_MAESTRO_HTTP_CA_FILE=/env/http.crt cfg_dump "$CFG")" "http_ca_file: " + +# TLS via CLI flags (also redacted) +assert_contains "maestro.tls.ca_file [cli→redacted]" "$(cfg_dump "$CFG" --maestro-ca-file=/cli/ca.crt)" "ca_file: " +assert_contains "maestro.tls.cert_file [cli→redacted]" "$(cfg_dump "$CFG" --maestro-cert-file=/cli/client.crt)" "cert_file: " +assert_contains "maestro.tls.key_file [cli→redacted]" "$(cfg_dump "$CFG" --maestro-key-file=/cli/client.key)" "key_file: " +assert_contains "maestro.tls.http_ca_file [cli→redacted]" "$(cfg_dump "$CFG" --maestro-http-ca-file=/cli/http.crt)" "http_ca_file: " + +# ───────────────────────────────────────────────────────────────────────────── +section "debug_config flag" +# ───────────────────────────────────────────────────────────────────────────── + +k8s_config "$CFG" "debug_config: true" +assert_contains "debug_config [file=true]" "$(cfg_dump "$CFG")" "debug_config: true" +k8s_config "$CFG" +assert_contains "debug_config [env=true]" "$(HYPERFLEET_DEBUG_CONFIG=true cfg_dump "$CFG")" "debug_config: true" +assert_contains "debug_config [cli=true]" "$(cfg_dump "$CFG" --debug-config)" "debug_config: true" + +# ───────────────────────────────────────────────────────────────────────────── +section "Priority verification (cross-parameter)" +# ───────────────────────────────────────────────────────────────────────────── +# Use api.base_url as the representative parameter for all priority checks. + +k8s_config "$CFG" " hyperfleet_api:" " base_url: https://file.example.com" " timeout: 5s" + +assert_contains "priority: file only → file value" "$(cfg_dump "$CFG")" "base_url: https://file.example.com" +assert_contains "priority: env > file" "$(HYPERFLEET_API_BASE_URL=https://env.example.com cfg_dump "$CFG")" "base_url: https://env.example.com" +assert_contains "priority: cli > file" "$(cfg_dump "$CFG" --hyperfleet-api-base-url=https://cli.example.com)" "base_url: https://cli.example.com" +assert_contains "priority: cli > env" "$(HYPERFLEET_API_BASE_URL=https://env.example.com cfg_dump "$CFG" --hyperfleet-api-base-url=https://cli.example.com)" "base_url: https://cli.example.com" +assert_contains "priority: cli > env > file" "$(HYPERFLEET_API_BASE_URL=https://env.example.com cfg_dump "$CFG" --hyperfleet-api-base-url=https://cli.example.com)" "base_url: https://cli.example.com" +# Verify env does NOT bleed into CLI-set value +assert_contains "priority: env does not override cli" "$(HYPERFLEET_API_BASE_URL=https://env.example.com cfg_dump "$CFG" --hyperfleet-api-base-url=https://cli.example.com)" "base_url: https://cli.example.com" + +# ───────────────────────────────────────────────────────────────────────────── +# Summary +# ───────────────────────────────────────────────────────────────────────────── +echo "" +echo "─────────────────────────────────────────" +TOTAL=$((PASS+FAIL)) +if [[ $FAIL -eq 0 ]]; then + echo -e "${GREEN}All $TOTAL tests passed.${NC}" +else + echo -e "${RED}$FAIL/$TOTAL tests FAILED:${NC}" + for e in "${ERRORS[@]}"; do + echo " - $e" + done +fi +echo "" +[[ $FAIL -eq 0 ]] diff --git a/test/integration/config-loader/config_criteria_integration_test.go b/test/integration/config-loader/config_criteria_integration_test.go index 8a130c6..af09910 100644 --- a/test/integration/config-loader/config_criteria_integration_test.go +++ b/test/integration/config-loader/config_criteria_integration_test.go @@ -76,9 +76,9 @@ func TestConfigLoadAndCriteriaEvaluation(t *testing.T) { // Simulate cluster details response ctx.Set("clusterDetails", map[string]interface{}{ - "metadata": map[string]interface{}{ - "name": "test-cluster", - }, + "id": "test-cluster-id", + "name": "test-cluster", + "kind": "Cluster", "spec": map[string]interface{}{ "provider": "aws", "region": "us-east-1", @@ -210,7 +210,7 @@ func TestConfigResourceDiscoveryFields(t *testing.T) { config := loadTestConfig(t) t.Run("verify resource discovery configs", func(t *testing.T) { - for _, resource := range config.Spec.Resources { + for _, resource := range config.Resources { t.Logf("Resource: %s", resource.Name) if resource.Discovery != nil { @@ -233,7 +233,7 @@ func TestConfigResourceDiscoveryFields(t *testing.T) { func TestConfigPostProcessingEvaluation(t *testing.T) { config := loadTestConfig(t) - require.NotNil(t, config.Spec.Post, "config should have post processing") + require.NotNil(t, config.Post, "config should have post processing") t.Run("simulate post-processing with k8s resource data", func(t *testing.T) { ctx := criteria.NewEvaluationContext() @@ -328,7 +328,7 @@ func TestConfigPostProcessingEvaluation(t *testing.T) { func TestConfigNullSafetyWithMissingResources(t *testing.T) { config := loadTestConfig(t) // Verify config has resources defined (the actual resources are tested for null safety below) - require.NotEmpty(t, config.Spec.Resources, "config should have resources defined") + require.NotEmpty(t, config.Resources, "config should have resources defined") t.Run("handle missing resource gracefully", func(t *testing.T) { ctx := criteria.NewEvaluationContext() @@ -397,7 +397,7 @@ func TestConfigParameterExtraction(t *testing.T) { }) t.Run("verify parameter sources", func(t *testing.T) { - for _, param := range config.Spec.Params { + for _, param := range config.Params { if param.Source != "" { // Check source format assert.True(t, diff --git a/test/integration/config-loader/loader_template_test.go b/test/integration/config-loader/loader_template_test.go index da4805e..c6fdc7d 100644 --- a/test/integration/config-loader/loader_template_test.go +++ b/test/integration/config-loader/loader_template_test.go @@ -55,23 +55,18 @@ func TestLoadSplitConfig(t *testing.T) { require.NotNil(t, config) // Verify merged structure - assert.Equal(t, "hyperfleet.redhat.com/v1alpha1", config.APIVersion) - assert.Equal(t, "Config", config.Kind) - - // Metadata comes from adapter config (takes precedence) - assert.Equal(t, "test-adapter", config.Metadata.Name) - // Adapter info comes from adapter config - assert.Equal(t, "0.1.0", config.Spec.Adapter.Version) + assert.Equal(t, "test-adapter", config.Adapter.Name) + assert.Equal(t, "0.1.0", config.Adapter.Version) // Clients config comes from adapter config - assert.Equal(t, 2*time.Second, config.Spec.Clients.HyperfleetAPI.Timeout) - assert.Equal(t, 3, config.Spec.Clients.HyperfleetAPI.RetryAttempts) - assert.Equal(t, hyperfleet_api.BackoffExponential, config.Spec.Clients.HyperfleetAPI.RetryBackoff) + assert.Equal(t, 2*time.Second, config.Clients.HyperfleetAPI.Timeout) + assert.Equal(t, 3, config.Clients.HyperfleetAPI.RetryAttempts) + assert.Equal(t, hyperfleet_api.BackoffExponential, config.Clients.HyperfleetAPI.RetryBackoff) // Verify params exist (from task config) - assert.NotEmpty(t, config.Spec.Params) - assert.GreaterOrEqual(t, len(config.Spec.Params), 3, "should have at least 3 parameters") + assert.NotEmpty(t, config.Params) + assert.GreaterOrEqual(t, len(config.Params), 1, "should have at least 1 parameter") // Check specific params (using accessor method) clusterIdParam := config.GetParamByName("clusterId") @@ -80,11 +75,11 @@ func TestLoadSplitConfig(t *testing.T) { assert.True(t, clusterIdParam.Required) // Verify preconditions (from task config) - assert.NotEmpty(t, config.Spec.Preconditions) - assert.GreaterOrEqual(t, len(config.Spec.Preconditions), 1, "should have at least 1 precondition") + assert.NotEmpty(t, config.Preconditions) + assert.GreaterOrEqual(t, len(config.Preconditions), 1, "should have at least 1 precondition") // Check first precondition - firstPrecond := config.Spec.Preconditions[0] + firstPrecond := config.Preconditions[0] assert.Equal(t, "clusterStatus", firstPrecond.Name) assert.NotNil(t, firstPrecond.APICall) assert.Equal(t, "GET", firstPrecond.APICall.Method) @@ -94,7 +89,7 @@ func TestLoadSplitConfig(t *testing.T) { // Verify captured fields clusterNameCapture := findCaptureByName(firstPrecond.Capture, "clusterName") require.NotNil(t, clusterNameCapture) - assert.Equal(t, "metadata.name", clusterNameCapture.Field) + assert.Equal(t, "name", clusterNameCapture.Field) // Verify conditions in precondition assert.GreaterOrEqual(t, len(firstPrecond.Conditions), 1) @@ -103,23 +98,23 @@ func TestLoadSplitConfig(t *testing.T) { assert.Equal(t, "equals", firstCondition.Operator) // Verify resources (from task config) - assert.NotEmpty(t, config.Spec.Resources) - assert.GreaterOrEqual(t, len(config.Spec.Resources), 1, "should have at least 1 resource") + assert.NotEmpty(t, config.Resources) + assert.GreaterOrEqual(t, len(config.Resources), 1, "should have at least 1 resource") // Check first resource - firstResource := config.Spec.Resources[0] + firstResource := config.Resources[0] assert.Equal(t, "clusterNamespace", firstResource.Name) assert.NotNil(t, firstResource.Manifest) assert.NotNil(t, firstResource.Discovery) // Verify post configuration (from task config) - if config.Spec.Post != nil { - assert.NotEmpty(t, config.Spec.Post.Payloads) - assert.NotEmpty(t, config.Spec.Post.PostActions) + if config.Post != nil { + assert.NotEmpty(t, config.Post.Payloads) + assert.NotEmpty(t, config.Post.PostActions) // Check post action - if len(config.Spec.Post.PostActions) > 0 { - firstAction := config.Spec.Post.PostActions[0] + if len(config.Post.PostActions) > 0 { + firstAction := config.Post.PostActions[0] assert.NotEmpty(t, firstAction.Name) if firstAction.APICall != nil { assert.NotEmpty(t, firstAction.APICall.Method) diff --git a/test/integration/config-loader/testdata/adapter-config-template.yaml b/test/integration/config-loader/testdata/adapter-config-template.yaml index 467ffaa..9e6a5b0 100644 --- a/test/integration/config-loader/testdata/adapter-config-template.yaml +++ b/test/integration/config-loader/testdata/adapter-config-template.yaml @@ -9,15 +9,13 @@ # 2. field: "path" - Simple JSON path extraction (translated to CEL internally) # 3. expression: "cel" - Full CEL expressions for complex logic # -# CONDITION SYNTAX (when:): +# CONDITION SYNTAX : # ========================= # Option 1: Expression syntax (CEL) -# when: # expression: | # readyConditionStatus == "False" # # Option 2: Structured conditions (field + operator + value) -# when: # conditions: # - field: "readyConditionStatus" # operator: "equals" @@ -27,365 +25,357 @@ # # Copy this file to your adapter repository and customize for your needs. -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: +# ============================================================================ +# Adapter Specification +# ============================================================================ + +# Adapter Information +adapter: # Adapter name (used as resource name and in logs/metrics) name: example-adapter - labels: - hyperfleet.io/adapter-type: example - hyperfleet.io/component: adapter + # Adapter version + version: "0.1.0" # ============================================================================ -# Adapter Specification +# HyperFleet API Configuration # ============================================================================ -spec: - # Adapter Information - adapter: - # Adapter version - version: "0.1.0" - - # ============================================================================ - # HyperFleet API Configuration - # ============================================================================ - hyperfleetApi: - # HTTP client timeout for API calls - timeout: 2s - # Number of retry attempts for failed API calls - retryAttempts: 3 - # Retry backoff strategy: exponential, linear, constant - retryBackoff: exponential +hyperfleet_api: + # HTTP client timeout for API calls + timeout: 2s + # Number of retry attempts for failed API calls + retry_attempts: 3 + # Retry backoff strategy: exponential, linear, constant + retry_backoff: exponential - # ============================================================================ - # Kubernetes Configuration - # ============================================================================ - kubernetes: - apiVersion: "v1" - - # ============================================================================ - # Global params - # ============================================================================ - # params to extract from CloudEvent and environment variables - params: - # Environment variables from deployment - - name: "hyperfleetApiBaseUrl" - source: "env.HYPERFLEET_API_BASE_URL" - type: "string" - description: "Base URL for the HyperFleet API" - required: true - - - name: "hyperfleetApiVersion" - source: "env.HYPERFLEET_API_VERSION" - type: "string" - default: "v1" - description: "API version to use" - required: true - - - name: "hyperfleetApiToken" - source: "env.HYPERFLEET_API_TOKEN" - type: "string" - description: "Authentication token for API access" - required: true - # Recommended: use Secret instead: "secret.hyperfleet-adapter-token.token" - - # Extract from CloudEvent data - - name: "clusterId" - source: "event.id" - type: "string" - description: "Unique identifier for the target cluster" - required: true - - - name: "resourceId" - source: "event.resource_id" - type: "string" - description: "Unique identifier for the resource" - required: true - - - name: "resourceType" - source: "event.resource_type" - type: "string" - description: "Type of the resource being managed" - required: true - - - name: "eventGenerationId" - source: "event.generation" - type: "string" - description: "Event generation ID for idempotency checks" - required: true +# ============================================================================ +# Kubernetes Configuration +# ============================================================================ +kubernetes: + api_version: "v1" - - name: "eventHref" - source: "event.href" - type: "string" - description: "Reference URL for the resource" - required: true +# ============================================================================ +# Global params +# ============================================================================ +# params to extract from CloudEvent and environment variables +params: + # Environment variables from deployment + - name: "hyperfleetApiBaseUrl" + source: "env.HYPERFLEET_API_BASE_URL" + type: "string" + description: "Base URL for the HyperFleet API" + required: true - - name: "imageTag" - source: "env.IMAGE_TAG" - type: "string" - default: "v1.0.0" - description: "Tag for container images" - required: false + - name: "hyperfleetApiVersion" + source: "env.HYPERFLEET_API_VERSION" + type: "string" + default: "v1" + description: "API version to use" + required: true - # ============================================================================ - # Global Preconditions - # ============================================================================ - # These preconditions run sequentially and validate cluster state before resource operations - preconditions: - # ========================================================================== - # Step 1: Get cluster status - # ========================================================================== - - name: "clusterStatus" - apiCall: - method: "GET" - url: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}" - timeout: 10s - retryAttempts: 3 - retryBackoff: "exponential" - # Capture fields from the API response. Captured values become variables for use in resources section. - capture: - - name: "clusterName" - field: "metadata.name" - - name: "readyConditionStatus" - expression: | - status.conditions.filter(c, c.type == "Ready").size() > 0 - ? status.conditions.filter(c, c.type == "Ready")[0].status - : "False" - - name: "region" - field: "spec.region" - - name: "cloudProvider" - field: "spec.provider" - - name: "vpcId" - field: "spec.vpc_id" - - name: "nodeCount" - field: "spec.node_count" - conditions: - - field: "readyConditionStatus" - operator: "equals" - value: "True" - - field: "cloudProvider" - operator: "in" - value: ["aws", "gcp", "azure"] - - field: "vpcId" - operator: "exists" + - name: "hyperfleetApiToken" + source: "env.HYPERFLEET_API_TOKEN" + type: "string" + description: "Authentication token for API access" + required: true + # Recommended: use Secret instead: "secret.hyperfleet-adapter-token.token" - # ========================================================================== - # Step 2: Check validation availability - # ========================================================================== - - name: "validationAvailability" - apiCall: - method: "GET" - url: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}/validation/availability" - timeout: 10s - retryAttempts: 3 - retryBackoff: "exponential" - capture: - - name: "availabilityStatus" - field: "status" - expression: | - availabilityStatus == "available" - - # ============================================================================ - # Resources (Create/Update Resources). - # This is just a fake template for the resources structure that will be created/updated . - # In a real adapter, you would define the resources here. but not exactly the same - # ============================================================================ - # All resources are created/updated sequentially in the order defined below - resources: - # ========================================================================== - # Resource 1: Cluster Namespace - # ========================================================================== - - name: "clusterNamespace" - manifest: - apiVersion: v1 - kind: Namespace - metadata: - name: "cluster-{{ .clusterId }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/cluster-name: "{{ .clusterName }}" - hyperfleet.io/region: "{{ .region }}" - hyperfleet.io/provider: "{{ .cloudProvider }}" - hyperfleet.io/managed-by: "{{ .metadata.name }}" - hyperfleet.io/resource-type: "namespace" - annotations: - hyperfleet.io/vpc-id: "{{ .vpcId }}" - hyperfleet.io/created-by: "hyperfleet-adapter" - hyperfleet.io/generation: "{{ .eventGenerationId }}" - hyperfleet.io/resource-href: "{{ .eventHref }}" - spec: - finalizers: - - "hyperfleet.io/cluster-finalizer" - discovery: - namespace: "cluster-{{ .clusterId }}" - bySelectors: - labelSelector: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/resource-type: "namespace" - hyperfleet.io/managed-by: "{{ .metadata.name }}" + # Extract from CloudEvent data + - name: "clusterId" + source: "event.id" + type: "string" + description: "Unique identifier for the target cluster" + required: true - # ========================================================================== - # Resource 2: ConfigMap - # ========================================================================== - - name: "clusterConfigMap" - manifest: - apiVersion: v1 - kind: ConfigMap - metadata: - name: "cluster-config-{{ .clusterId }}" - namespace: "cluster-{{ .clusterId }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/resource-type: "configmap" - hyperfleet.io/managed-by: "{{ .metadata.name }}" - data: - cluster.yaml: | - clusterId: "{{ .clusterId }}" - clusterName: "{{ .clusterName }}" - region: "{{ .region }}" - provider: "{{ .cloudProvider }}" - vpcId: "{{ .vpcId }}" - apiEndpoint: "{{ .hyperfleetApiBaseUrl }}" - discovery: - namespace: "cluster-{{ .clusterId }}" - bySelectors: - labelSelector: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/resource-type: "configmap" - # ========================================================================== - # Resource 3: Secret - # ========================================================================== - - name: "clusterSecret" - manifest: - apiVersion: v1 - kind: Secret - metadata: - name: "cluster-credentials-{{ .clusterId }}" - namespace: "cluster-{{ .clusterId }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/resource-type: "secret" - hyperfleet.io/managed-by: "{{ .metadata.name }}" - type: Opaque - stringData: - api-token: "{{ .hyperfleetApiToken }}" - cluster-id: "{{ .clusterId }}" - discovery: - namespace: "cluster-{{ .clusterId }}" - byName: "cluster-credentials-{{ .clusterId }}" + - name: "resourceId" + source: "event.resource_id" + type: "string" + description: "Unique identifier for the resource" + required: true + + - name: "resourceType" + source: "event.resource_type" + type: "string" + description: "Type of the resource being managed" + required: true + + - name: "eventGenerationId" + source: "event.generation" + type: "int" + description: "Event generation ID for idempotency checks" + required: true + + - name: "eventHref" + source: "event.href" + type: "string" + description: "Reference URL for the resource" + required: true + + - name: "imageTag" + source: "env.IMAGE_TAG" + type: "string" + default: "v1.0.0" + description: "Tag for container images" + required: false - - name: "validationJob" - recreateOnChange: true # Recreate the job if the generationId changes - manifest: - ref: "templates/job.yaml" - discovery: +# ============================================================================ +# Global Preconditions +# ============================================================================ +# These preconditions run sequentially and validate cluster state before resource operations +preconditions: + # ========================================================================== + # Step 1: Get cluster status + # ========================================================================== + - name: "clusterStatus" + api_call: + method: "GET" + url: "/clusters/{{ .clusterId }}" + timeout: 10s + retry_attempts: 3 + retry_backoff: "exponential" + # Capture fields from the API response. Captured values become variables for use in resources section. + capture: + - name: "clusterName" + field: "name" + - name: "readyConditionStatus" + expression: | + status.conditions.filter(c, c.type == "Ready").size() > 0 + ? status.conditions.filter(c, c.type == "Ready")[0].status + : "False" + - name: "region" + field: "spec.region" + - name: "cloudProvider" + field: "spec.provider" + - name: "vpcId" + field: "spec.vpc_id" + - name: "nodeCount" + field: "spec.node_count" + conditions: + - field: "readyConditionStatus" + operator: "equals" + value: "True" + - field: "cloudProvider" + operator: "in" + value: ["aws", "gcp", "azure"] + - field: "vpcId" + operator: "exists" + + # ========================================================================== + # Step 2: Check validation availability + # ========================================================================== + - name: "validationAvailability" + api_call: + method: "GET" + url: "/clusters/{{ .clusterId }}/validation/availability" + timeout: 10s + retry_attempts: 3 + retry_backoff: "exponential" + capture: + - name: "availabilityStatus" + field: "status" + expression: | + availabilityStatus == "available" + +# ============================================================================ +# Resources (Create/Update Resources). +# This is just a fake template for the resources structure that will be created/updated . +# In a real adapter, you would define the resources here. but not exactly the same +# ============================================================================ +# All resources are created/updated sequentially in the order defined below +resources: + # ========================================================================== + # Resource 1: Cluster Namespace + # ========================================================================== + - name: "clusterNamespace" + manifest: + apiVersion: v1 + kind: Namespace + metadata: + name: "cluster-{{ .clusterId }}" + labels: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/cluster-name: "{{ .clusterName }}" + hyperfleet.io/region: "{{ .region }}" + hyperfleet.io/provider: "{{ .cloudProvider }}" + hyperfleet.io/managed-by: "{{ .adapter.name }}" + hyperfleet.io/resource-type: "namespace" + annotations: + hyperfleet.io/vpc-id: "{{ .vpcId }}" + hyperfleet.io/created-by: "hyperfleet-adapter" + hyperfleet.io/generation: "{{ .eventGenerationId }}" + hyperfleet.io/resource-href: "{{ .eventHref }}" + spec: + finalizers: + - "hyperfleet.io/cluster-finalizer" + discovery: + by_selectors: + label_selector: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/resource-type: "namespace" + hyperfleet.io/managed-by: "{{ .adapter.name }}" + + # ========================================================================== + # Resource 2: ConfigMap + # ========================================================================== + - name: "clusterConfigMap" + manifest: + apiVersion: v1 + kind: ConfigMap + metadata: + name: "cluster-config-{{ .clusterId }}" namespace: "cluster-{{ .clusterId }}" - bySelectors: - labelSelector: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/resource-type: "job" - hyperfleet.io/managed-by: "{{ .metadata.name }}" - # ========================================================================== - # Resource 4: Deployment - # ========================================================================== - - name: "clusterController" - manifest: - ref: "templates/deployment.yaml" - discovery: + labels: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/resource-type: "configmap" + hyperfleet.io/managed-by: "{{ .adapter.name }}" + data: + cluster.yaml: | + clusterId: "{{ .clusterId }}" + clusterName: "{{ .clusterName }}" + region: "{{ .region }}" + provider: "{{ .cloudProvider }}" + vpcId: "{{ .vpcId }}" + apiEndpoint: "{{ .hyperfleetApiBaseUrl }}" + discovery: + namespace: "cluster-{{ .clusterId }}" + by_selectors: + label_selector: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/resource-type: "configmap" + # ========================================================================== + # Resource 3: Secret + # ========================================================================== + - name: "clusterSecret" + manifest: + apiVersion: v1 + kind: Secret + metadata: + name: "cluster-credentials-{{ .clusterId }}" namespace: "cluster-{{ .clusterId }}" - bySelectors: - labelSelector: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/resource-type: "controller" - hyperfleet.io/managed-by: "{{ .metadata.name }}" + labels: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/resource-type: "secret" + hyperfleet.io/managed-by: "{{ .adapter.name }}" + type: Opaque + stringData: + api-token: "{{ .hyperfleetApiToken }}" + cluster-id: "{{ .clusterId }}" + discovery: + namespace: "cluster-{{ .clusterId }}" + by_name: "cluster-credentials-{{ .clusterId }}" + - name: "validationJob" + recreate_on_change: true # Recreate the job if the generationId changes + manifest: + ref: "templates/job.yaml" + discovery: + namespace: "cluster-{{ .clusterId }}" + by_selectors: + label_selector: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/resource-type: "job" + hyperfleet.io/managed-by: "{{ .adapter.name }}" + # ========================================================================== + # Resource 4: Deployment + # ========================================================================== + - name: "clusterController" + manifest: + ref: "templates/deployment.yaml" + discovery: + namespace: "cluster-{{ .clusterId }}" + by_selectors: + label_selector: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/resource-type: "controller" + hyperfleet.io/managed-by: "{{ .adapter.name }}" - # ============================================================================ - # Post-Processing - # ============================================================================ - post: - payloads: - # Build status payload inline - - name: "clusterStatusPayload" - build: - conditions: - # Applied: Resources successfully created - applied: - status: - expression: | - resources.clusterNamespace.status.phase == "Active" && - resources.clusterController.status.conditions.filter(c, c.type == 'Available')[0].status == "True" - reason: - expression: | - has(resources.clusterController.status.conditions.filter(c, c.type == 'Available')[0].reason) - ? resources.clusterController.status.conditions.filter(c, c.type == 'Available')[0].reason - : "ResourcesCreated" - message: - expression: | - has(resources.clusterController.status.conditions.filter(c, c.type == 'Available')[0].message) - ? resources.clusterController.status.conditions.filter(c, c.type == 'Available')[0].message - : "All Kubernetes resources created successfully" - # Available: Deployment ready and serving - available: - status: - expression: | - resources.clusterController.status.readyReplicas > 0 && - resources.clusterController.status.replicas == resources.clusterController.status.readyReplicas - reason: - expression: | - has(resources.clusterController.status.conditions.filter(c, c.type == 'Available')[0].reason) - ? resources.clusterController.status.conditions.filter(c, c.type == 'Available')[0].reason - : "DeploymentReady" - message: - expression: | - has(resources.clusterController.status.conditions.filter(c, c.type == 'Available')[0].message) - ? resources.clusterController.status.conditions.filter(c, c.type == 'Available')[0].message - : "Deployment is available and serving traffic" +# ============================================================================ +# Post-Processing +# ============================================================================ +post: + payloads: + # Build status payload inline + - name: "clusterStatusPayload" + build: + conditions: + # Applied: Resources successfully created + applied: + status: + expression: | + resources.clusterNamespace.status.phase == "Active" && + resources.clusterController.status.conditions.filter(c, c.type == 'Available')[0].status == "True" + reason: + expression: | + has(resources.clusterController.status.conditions.filter(c, c.type == 'Available')[0].reason) + ? resources.clusterController.status.conditions.filter(c, c.type == 'Available')[0].reason + : "ResourcesCreated" + message: + expression: | + has(resources.clusterController.status.conditions.filter(c, c.type == 'Available')[0].message) + ? resources.clusterController.status.conditions.filter(c, c.type == 'Available')[0].message + : "All Kubernetes resources created successfully" - # Health: Adapter execution status (runtime) - health: - status: - expression: | - adapter.executionStatus == "success" - reason: - expression: | - has(adapter.errorReason) ? adapter.errorReason : "Healthy" - message: - expression: | - has(adapter.errorMessage) ? adapter.errorMessage : "All adapter operations completed successfully" - - # Extract additional data - data: - readyReplicas: + # Available: Deployment ready and serving + available: + status: expression: | - has(resources.clusterController) && - has(resources.clusterController.status) && - has(resources.clusterController.status.readyReplicas) - ? resources.clusterController.status.readyReplicas - : 0 - description: "Number of ready replicas" - - # Metadata fields - observed_generation: - value: "{{ .eventGenerationId }}" - description: "Event generation that was processed" - - lastUpdated: - value: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" - description: "Timestamp when status was reported" - - # Build status payload from external template reference - - name: "clusterStatusPayloadRef" - buildRef: "templates/cluster-status-payload.yaml" + resources.clusterController.status.readyReplicas > 0 && + resources.clusterController.status.replicas == resources.clusterController.status.readyReplicas + reason: + expression: | + has(resources.clusterController.status.conditions.filter(c, c.type == 'Available')[0].reason) + ? resources.clusterController.status.conditions.filter(c, c.type == 'Available')[0].reason + : "DeploymentReady" + message: + expression: | + has(resources.clusterController.status.conditions.filter(c, c.type == 'Available')[0].message) + ? resources.clusterController.status.conditions.filter(c, c.type == 'Available')[0].message + : "Deployment is available and serving traffic" - # ============================================================================ - # Post Actions - # ============================================================================ - # Post actions are executed after resources are created/updated - postActions: - # Report cluster status to HyperFleet API (always executed) - - name: "reportClusterStatus" - apiCall: - method: "POST" - url: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}/statuses" - body: "{{ .clusterStatusPayload }}" + # Health: Adapter execution status (runtime) + health: + status: + expression: | + adapter.executionStatus == "success" + reason: + expression: | + has(adapter.errorReason) ? adapter.errorReason : "Healthy" + message: + expression: | + has(adapter.errorMessage) ? adapter.errorMessage : "All adapter operations completed successfully" + + # Extract additional data + data: + readyReplicas: + expression: | + has(resources.clusterController) && + has(resources.clusterController.status) && + has(resources.clusterController.status.readyReplicas) + ? resources.clusterController.status.readyReplicas + : 0 + description: "Number of ready replicas" + + # Metadata fields + observed_generation: + value: "{{ .eventGenerationId }}" + description: "Event generation that was processed" + + last_updated: + value: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" + description: "Timestamp when status was reported" + + # Build status payload from external template reference + - name: "clusterStatusPayloadRef" + build_ref: "templates/cluster-status-payload.yaml" + + # ============================================================================ + # Post Actions + # ============================================================================ + # Post actions are executed after resources are created/updated + post_actions: + # Report cluster status to HyperFleet API (always executed) + - name: "reportClusterStatus" + api_call: + method: "POST" + url: "/clusters/{{ .clusterId }}/statuses" + body: "{{ .clusterStatusPayload }}" diff --git a/test/integration/config-loader/testdata/adapter_config_valid.yaml b/test/integration/config-loader/testdata/adapter_config_valid.yaml deleted file mode 100644 index 8aa1bd2..0000000 --- a/test/integration/config-loader/testdata/adapter_config_valid.yaml +++ /dev/null @@ -1,197 +0,0 @@ -# Simple valid HyperFleet Adapter Configuration for testing - -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: - name: example-adapter - labels: - hyperfleet.io/adapter-type: example - hyperfleet.io/component: adapter - -spec: - adapter: - version: "0.1.0" - - hyperfleetApi: - timeout: 2s - retryAttempts: 3 - retryBackoff: exponential - - kubernetes: - apiVersion: "v1" - - # Parameters with all required variables - params: - - name: "hyperfleetApiBaseUrl" - source: "env.HYPERFLEET_API_BASE_URL" - type: "string" - required: true - - - name: "hyperfleetApiVersion" - source: "env.HYPERFLEET_API_VERSION" - type: "string" - default: "v1" - - - name: "hyperfleetApiToken" - source: "env.HYPERFLEET_API_TOKEN" - type: "string" - required: true - - - name: "clusterId" - source: "event.id" - type: "string" - required: true - - - name: "resourceId" - source: "event.resource_id" - type: "string" - required: true - - # Preconditions with valid operators and CEL expressions - preconditions: - - name: "clusterStatus" - apiCall: - method: "GET" - url: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}" - timeout: 10s - retryAttempts: 3 - retryBackoff: "exponential" - capture: - - name: "clusterName" - field: "metadata.name" - - name: "readyConditionStatus" - expression: | - status.conditions.filter(c, c.type == "Ready").size() > 0 - ? status.conditions.filter(c, c.type == "Ready")[0].status - : "False" - - name: "region" - field: "spec.region" - - name: "cloudProvider" - field: "spec.provider" - - name: "vpcId" - field: "spec.vpc_id" - # Structured conditions with valid operators - conditions: - - field: "readyConditionStatus" - operator: "equals" - value: "True" - - field: "cloudProvider" - operator: "in" - value: ["aws", "gcp", "azure"] - - field: "vpcId" - operator: "exists" - - - name: "validationCheck" - # Valid CEL expression - expression: | - readyConditionStatus == "True" - - # Resources with valid K8s manifests - resources: - - name: "clusterNamespace" - manifest: - apiVersion: v1 - kind: Namespace - metadata: - name: "cluster-{{ .clusterId }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/cluster-name: "{{ .clusterName }}" - hyperfleet.io/region: "{{ .region }}" - hyperfleet.io/provider: "{{ .cloudProvider }}" - hyperfleet.io/managed-by: "{{ .metadata.name }}" - annotations: - hyperfleet.io/vpc-id: "{{ .vpcId }}" - discovery: - namespace: "*" # Cluster-scoped resource (Namespace) - bySelectors: - labelSelector: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/managed-by: "{{ .metadata.name }}" - - - name: "clusterConfigMap" - manifest: - apiVersion: v1 - kind: ConfigMap - metadata: - name: "cluster-config-{{ .clusterId }}" - namespace: "cluster-{{ .clusterId }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - discovery: - namespace: "cluster-{{ .clusterId }}" - byName: "cluster-config-{{ .clusterId }}" - - - name: "externalTemplate" - manifest: - ref: "templates/deployment.yaml" - discovery: - namespace: "cluster-{{ .clusterId }}" - bySelectors: - labelSelector: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - - # Post-processing with valid CEL expressions - post: - payloads: - - name: "clusterStatusPayload" - build: - conditions: - applied: - status: - expression: | - resources.clusterNamespace.status.phase == "Active" - reason: - expression: | - has(resources.clusterNamespace.status.phase) ? "ResourcesCreated" : "Pending" - message: - expression: | - "Namespace status: " + resources.clusterNamespace.status.phase - - available: - status: - expression: | - resources.clusterConfigMap != null - reason: - expression: | - "ConfigMapReady" - message: - expression: | - "ConfigMap is available" - - health: - status: - expression: | - true - reason: - expression: | - "Healthy" - message: - expression: | - "All health checks passed" - - data: - clusterReady: - expression: | - resources.clusterNamespace.status.phase == "Active" - description: "Cluster namespace is active" - - observed_generation: - value: "{{ .resourceId }}" - description: "Resource ID processed" - - lastUpdated: - value: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" - description: "Timestamp of status update" - - postActions: - - name: "reportClusterStatus" - apiCall: - method: "POST" - url: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}/statuses" - headers: - - name: "Authorization" - value: "Bearer {{ .hyperfleetApiToken }}" - - name: "Content-Type" - value: "application/json" - body: "{{ .clusterStatusPayload }}" diff --git a/test/integration/config-loader/testdata/templates/cluster-status-payload.yaml b/test/integration/config-loader/testdata/templates/cluster-status-payload.yaml index cc3ef59..10bd28e 100644 --- a/test/integration/config-loader/testdata/templates/cluster-status-payload.yaml +++ b/test/integration/config-loader/testdata/templates/cluster-status-payload.yaml @@ -2,8 +2,8 @@ # Used for reporting cluster status back to HyperFleet API status: "{{ .status }}" message: "{{ .message }}" -observedGeneration: "{{ .eventGenerationId }}" -lastUpdated: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" +observed_generation: "{{ .eventGenerationId }}" +last_updated: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" conditions: - type: "Ready" status: "{{ .readyStatus | default \"Unknown\" }}" diff --git a/test/integration/config-loader/testdata/templates/job.yaml b/test/integration/config-loader/testdata/templates/job.yaml index 03478ef..f7f0fc8 100644 --- a/test/integration/config-loader/testdata/templates/job.yaml +++ b/test/integration/config-loader/testdata/templates/job.yaml @@ -9,7 +9,7 @@ metadata: hyperfleet.io/cluster-id: "{{ .clusterId }}" hyperfleet.io/job-type: "validation" hyperfleet.io/resource-type: "job" - hyperfleet.io/managed-by: "{{ .metadata.name }}" + hyperfleet.io/managed-by: "{{ .adapter.name }}" spec: template: metadata: diff --git a/test/integration/executor/executor_integration_test.go b/test/integration/executor/executor_integration_test.go index f0900ed..4025c65 100644 --- a/test/integration/executor/executor_integration_test.go +++ b/test/integration/executor/executor_integration_test.go @@ -56,93 +56,89 @@ func createTestEvent(clusterId string) *event.Event { func createTestConfig(apiBaseURL string) *config_loader.Config { _ = apiBaseURL // Kept for compatibility; base URL comes from env params. return &config_loader.Config{ - APIVersion: config_loader.APIVersionV1Alpha1, - Kind: config_loader.ExpectedKindConfig, - Metadata: config_loader.Metadata{ - Name: "test-adapter", + Adapter: config_loader.AdapterInfo{ + Name: "test-adapter", + Version: "1.0.0", }, - Spec: config_loader.ConfigSpec{ - Adapter: config_loader.AdapterInfo{Version: "1.0.0"}, - Clients: config_loader.ClientsConfig{ - HyperfleetAPI: config_loader.HyperfleetAPIConfig{ - Timeout: 10 * time.Second, - RetryAttempts: 1, - RetryBackoff: hyperfleet_api.BackoffConstant, - }, - }, - Params: []config_loader.Parameter{ - {Name: "hyperfleetApiBaseUrl", Source: "env.HYPERFLEET_API_BASE_URL", Required: true}, - {Name: "hyperfleetApiVersion", Source: "env.HYPERFLEET_API_VERSION", Default: "v1", Required: false}, - {Name: "clusterId", Source: "event.id", Required: true}, + Clients: config_loader.ClientsConfig{ + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + Timeout: 10 * time.Second, + RetryAttempts: 1, + RetryBackoff: hyperfleet_api.BackoffConstant, }, - Preconditions: []config_loader.Precondition{ - { - ActionBase: config_loader.ActionBase{ - Name: "clusterStatus", - APICall: &config_loader.APICall{ - Method: "GET", - URL: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}", - Timeout: "5s", - }, + }, + Params: []config_loader.Parameter{ + {Name: "hyperfleetApiBaseUrl", Source: "env.HYPERFLEET_API_BASE_URL", Required: true}, + {Name: "hyperfleetApiVersion", Source: "env.HYPERFLEET_API_VERSION", Default: "v1", Required: false}, + {Name: "clusterId", Source: "event.id", Required: true}, + }, + Preconditions: []config_loader.Precondition{ + { + ActionBase: config_loader.ActionBase{ + Name: "clusterStatus", + APICall: &config_loader.APICall{ + Method: "GET", + URL: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}", + Timeout: "5s", }, - Capture: []config_loader.CaptureField{ - {Name: "clusterName", FieldExpressionDef: config_loader.FieldExpressionDef{Field: "metadata.name"}}, - { - Name: "readyConditionStatus", - FieldExpressionDef: config_loader.FieldExpressionDef{ - Expression: `status.conditions.filter(c, c.type == "Ready").size() > 0 + }, + Capture: []config_loader.CaptureField{ + {Name: "clusterName", FieldExpressionDef: config_loader.FieldExpressionDef{Field: "name"}}, + { + Name: "readyConditionStatus", + FieldExpressionDef: config_loader.FieldExpressionDef{ + Expression: `status.conditions.filter(c, c.type == "Ready").size() > 0 ? status.conditions.filter(c, c.type == "Ready")[0].status : "False"`, - }, }, - {Name: "region", FieldExpressionDef: config_loader.FieldExpressionDef{Field: "spec.region"}}, - {Name: "cloudProvider", FieldExpressionDef: config_loader.FieldExpressionDef{Field: "spec.provider"}}, - {Name: "vpcId", FieldExpressionDef: config_loader.FieldExpressionDef{Field: "spec.vpc_id"}}, - }, - Conditions: []config_loader.Condition{ - {Field: "readyConditionStatus", Operator: "equals", Value: "True"}, - {Field: "cloudProvider", Operator: "in", Value: []interface{}{"aws", "gcp", "azure"}}, }, + {Name: "region", FieldExpressionDef: config_loader.FieldExpressionDef{Field: "spec.region"}}, + {Name: "cloudProvider", FieldExpressionDef: config_loader.FieldExpressionDef{Field: "spec.provider"}}, + {Name: "vpcId", FieldExpressionDef: config_loader.FieldExpressionDef{Field: "spec.vpc_id"}}, + }, + Conditions: []config_loader.Condition{ + {Field: "readyConditionStatus", Operator: "equals", Value: "True"}, + {Field: "cloudProvider", Operator: "in", Value: []interface{}{"aws", "gcp", "azure"}}, }, }, - Resources: []config_loader.Resource{}, - Post: &config_loader.PostConfig{ - Payloads: []config_loader.Payload{ - { - Name: "clusterStatusPayload", - Build: map[string]interface{}{ - "conditions": map[string]interface{}{ - "health": map[string]interface{}{ - "status": map[string]interface{}{ - "expression": `adapter.executionStatus == "success" && !adapter.resourcesSkipped`, - }, - "reason": map[string]interface{}{ - "expression": `adapter.resourcesSkipped ? "PreconditionNotMet" : (adapter.errorReason != "" ? adapter.errorReason : "Healthy")`, - }, - "message": map[string]interface{}{ - "expression": `adapter.skipReason != "" ? adapter.skipReason : (adapter.errorMessage != "" ? adapter.errorMessage : "All adapter operations completed successfully")`, - }, + }, + Resources: []config_loader.Resource{}, + Post: &config_loader.PostConfig{ + Payloads: []config_loader.Payload{ + { + Name: "clusterStatusPayload", + Build: map[string]interface{}{ + "conditions": map[string]interface{}{ + "health": map[string]interface{}{ + "status": map[string]interface{}{ + "expression": `adapter.executionStatus == "success" && !adapter.resourcesSkipped`, + }, + "reason": map[string]interface{}{ + "expression": `adapter.resourcesSkipped ? "PreconditionNotMet" : (adapter.errorReason != "" ? adapter.errorReason : "Healthy")`, + }, + "message": map[string]interface{}{ + "expression": `adapter.skipReason != "" ? adapter.skipReason : (adapter.errorMessage != "" ? adapter.errorMessage : "All adapter operations completed successfully")`, }, }, - "clusterId": map[string]interface{}{ - "value": "{{ .clusterId }}", - }, - "clusterName": map[string]interface{}{ - "expression": `clusterName != "" ? clusterName : "unknown"`, - }, + }, + "clusterId": map[string]interface{}{ + "value": "{{ .clusterId }}", + }, + "clusterName": map[string]interface{}{ + "expression": `clusterName != "" ? clusterName : "unknown"`, }, }, }, - PostActions: []config_loader.PostAction{ - { - ActionBase: config_loader.ActionBase{ - Name: "reportClusterStatus", - APICall: &config_loader.APICall{ - Method: "POST", - URL: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}/statuses", - Body: "{{ .clusterStatusPayload }}", - Timeout: "5s", - }, + }, + PostActions: []config_loader.PostAction{ + { + ActionBase: config_loader.ActionBase{ + Name: "reportClusterStatus", + APICall: &config_loader.APICall{ + Method: "POST", + URL: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}/statuses", + Body: "{{ .clusterStatusPayload }}", + Timeout: "5s", }, }, }, @@ -281,9 +277,9 @@ func TestExecutor_PreconditionNotMet(t *testing.T) { // Set cluster to a phase that doesn't match conditions mockAPI.SetClusterResponse(map[string]interface{}{ - "metadata": map[string]interface{}{ - "name": "test-cluster", - }, + "id": "test-cluster-id", + "name": "test-cluster", + "kind": "Cluster", "spec": map[string]interface{}{ "region": "us-east-1", "provider": "aws", @@ -499,7 +495,7 @@ func TestExecutor_CELExpressionEvaluation(t *testing.T) { // Create config with CEL expression precondition config := createTestConfig(mockAPI.URL()) - config.Spec.Preconditions = []config_loader.Precondition{ + config.Preconditions = []config_loader.Precondition{ { ActionBase: config_loader.ActionBase{ Name: "clusterStatus", @@ -510,7 +506,7 @@ func TestExecutor_CELExpressionEvaluation(t *testing.T) { }, }, Capture: []config_loader.CaptureField{ - {Name: "clusterName", FieldExpressionDef: config_loader.FieldExpressionDef{Field: "metadata.name"}}, + {Name: "clusterName", FieldExpressionDef: config_loader.FieldExpressionDef{Field: "name"}}, { Name: "readyConditionStatus", FieldExpressionDef: config_loader.FieldExpressionDef{ @@ -666,9 +662,11 @@ func TestExecutor_Handler_PreconditionNotMet_ReturnsNil(t *testing.T) { defer mockAPI.Close() mockAPI.SetClusterResponse(map[string]interface{}{ - "metadata": map[string]interface{}{"name": "test"}, - "spec": map[string]interface{}{"region": "us-east-1", "provider": "aws"}, - "status": map[string]interface{}{"phase": "Terminating"}, // Won't match + "id": "test-id", + "name": "test", + "kind": "Cluster", + "spec": map[string]interface{}{"region": "us-east-1", "provider": "aws"}, + "status": map[string]interface{}{"phase": "Terminating"}, // Won't match }) t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) @@ -924,85 +922,81 @@ func TestExecutor_LogAction(t *testing.T) { // Create config with log actions in preconditions and post-actions config := &config_loader.Config{ - APIVersion: config_loader.APIVersionV1Alpha1, - Kind: config_loader.ExpectedKindConfig, - Metadata: config_loader.Metadata{ - Name: "log-test-adapter", + Adapter: config_loader.AdapterInfo{ + Name: "log-test-adapter", + Version: "1.0.0", + }, + Clients: config_loader.ClientsConfig{ + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + Timeout: 10 * time.Second, RetryAttempts: 1, + }, + }, + Params: []config_loader.Parameter{ + {Name: "hyperfleetApiBaseUrl", Source: "env.HYPERFLEET_API_BASE_URL", Required: true}, + {Name: "hyperfleetApiVersion", Default: "v1"}, + {Name: "clusterId", Source: "event.id", Required: true}, }, - Spec: config_loader.ConfigSpec{ - Adapter: config_loader.AdapterInfo{Version: "1.0.0"}, - Clients: config_loader.ClientsConfig{ - HyperfleetAPI: config_loader.HyperfleetAPIConfig{ - Timeout: 10 * time.Second, RetryAttempts: 1, + Preconditions: []config_loader.Precondition{ + { + // Log action only - no API call or conditions + ActionBase: config_loader.ActionBase{ + Name: "logStart", + Log: &config_loader.LogAction{ + Message: "Starting processing for cluster {{ .clusterId }}", + Level: "info", + }, }, }, - Params: []config_loader.Parameter{ - {Name: "hyperfleetApiBaseUrl", Source: "env.HYPERFLEET_API_BASE_URL", Required: true}, - {Name: "hyperfleetApiVersion", Default: "v1"}, - {Name: "clusterId", Source: "event.id", Required: true}, + { + // Log action before API call + ActionBase: config_loader.ActionBase{ + Name: "logBeforeAPICall", + Log: &config_loader.LogAction{ + Message: "About to check cluster status for {{ .clusterId }}", + Level: "debug", + }, + }, }, - Preconditions: []config_loader.Precondition{ - { - // Log action only - no API call or conditions - ActionBase: config_loader.ActionBase{ - Name: "logStart", - Log: &config_loader.LogAction{ - Message: "Starting processing for cluster {{ .clusterId }}", - Level: "info", + { + ActionBase: config_loader.ActionBase{ + Name: "checkCluster", + APICall: &config_loader.APICall{ + Method: "GET", + URL: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}", + }, + }, + Capture: []config_loader.CaptureField{ + { + Name: "readyConditionStatus", + FieldExpressionDef: config_loader.FieldExpressionDef{ + Expression: `status.conditions.filter(c, c.type == "Ready").size() > 0 ? status.conditions.filter(c, c.type == "Ready")[0].status : "False"`, }, }, }, + Conditions: []config_loader.Condition{ + {Field: "readyConditionStatus", Operator: "equals", Value: "True"}, + }, + }, + }, + Post: &config_loader.PostConfig{ + PostActions: []config_loader.PostAction{ { - // Log action before API call + // Log action in post-actions ActionBase: config_loader.ActionBase{ - Name: "logBeforeAPICall", + Name: "logCompletion", Log: &config_loader.LogAction{ - Message: "About to check cluster status for {{ .clusterId }}", - Level: "debug", + Message: "Completed processing cluster {{ .clusterId }} with resource {{ .resourceId }}", + Level: "info", }, }, }, { + // Log with warning level ActionBase: config_loader.ActionBase{ - Name: "checkCluster", - APICall: &config_loader.APICall{ - Method: "GET", - URL: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}", - }, - }, - Capture: []config_loader.CaptureField{ - { - Name: "readyConditionStatus", - FieldExpressionDef: config_loader.FieldExpressionDef{ - Expression: `status.conditions.filter(c, c.type == "Ready").size() > 0 ? status.conditions.filter(c, c.type == "Ready")[0].status : "False"`, - }, - }, - }, - Conditions: []config_loader.Condition{ - {Field: "readyConditionStatus", Operator: "equals", Value: "True"}, - }, - }, - }, - Post: &config_loader.PostConfig{ - PostActions: []config_loader.PostAction{ - { - // Log action in post-actions - ActionBase: config_loader.ActionBase{ - Name: "logCompletion", - Log: &config_loader.LogAction{ - Message: "Completed processing cluster {{ .clusterId }} with resource {{ .resourceId }}", - Level: "info", - }, - }, - }, - { - // Log with warning level - ActionBase: config_loader.ActionBase{ - Name: "logWarning", - Log: &config_loader.LogAction{ - Message: "This is a warning for cluster {{ .clusterId }}", - Level: "warning", - }, + Name: "logWarning", + Log: &config_loader.LogAction{ + Message: "This is a warning for cluster {{ .clusterId }}", + Level: "warning", }, }, }, @@ -1023,7 +1017,7 @@ func TestExecutor_LogAction(t *testing.T) { } // Execute - evt := createTestEvent("log-test-cluster") + evt := createTestEvent("log-test-clusterx") result := exec.Execute(context.Background(), evt) // Should succeed @@ -1175,88 +1169,84 @@ func TestExecutor_ExecutionError_CELAccess(t *testing.T) { // Create config with CEL expressions that access adapter.executionError config := &config_loader.Config{ - APIVersion: config_loader.APIVersionV1Alpha1, - Kind: config_loader.ExpectedKindConfig, - Metadata: config_loader.Metadata{ - Name: "executionError-cel-test", + Adapter: config_loader.AdapterInfo{ + Name: "executionError-cel-test", + Version: "1.0.0", }, - Spec: config_loader.ConfigSpec{ - Adapter: config_loader.AdapterInfo{Version: "1.0.0"}, - Clients: config_loader.ClientsConfig{ - HyperfleetAPI: config_loader.HyperfleetAPIConfig{ - Timeout: 10 * time.Second, RetryAttempts: 1, RetryBackoff: hyperfleet_api.BackoffConstant, - }, + Clients: config_loader.ClientsConfig{ + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + Timeout: 10 * time.Second, RetryAttempts: 1, RetryBackoff: hyperfleet_api.BackoffConstant, }, - Params: []config_loader.Parameter{ - {Name: "hyperfleetApiBaseUrl", Source: "env.HYPERFLEET_API_BASE_URL", Required: true}, - {Name: "hyperfleetApiVersion", Default: "v1"}, - {Name: "clusterId", Source: "event.id", Required: true}, - }, - Preconditions: []config_loader.Precondition{ - { - ActionBase: config_loader.ActionBase{ - Name: "clusterStatus", - APICall: &config_loader.APICall{ - Method: "GET", - URL: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}", - Timeout: "5s", - }, + }, + Params: []config_loader.Parameter{ + {Name: "hyperfleetApiBaseUrl", Source: "env.HYPERFLEET_API_BASE_URL", Required: true}, + {Name: "hyperfleetApiVersion", Default: "v1"}, + {Name: "clusterId", Source: "event.id", Required: true}, + }, + Preconditions: []config_loader.Precondition{ + { + ActionBase: config_loader.ActionBase{ + Name: "clusterStatus", + APICall: &config_loader.APICall{ + Method: "GET", + URL: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}", + Timeout: "5s", }, - Capture: []config_loader.CaptureField{ - { - Name: "readyConditionStatus", - FieldExpressionDef: config_loader.FieldExpressionDef{ - Expression: `status.conditions.filter(c, c.type == "Ready").size() > 0 ? status.conditions.filter(c, c.type == "Ready")[0].status : "False"`, - }, + }, + Capture: []config_loader.CaptureField{ + { + Name: "readyConditionStatus", + FieldExpressionDef: config_loader.FieldExpressionDef{ + Expression: `status.conditions.filter(c, c.type == "Ready").size() > 0 ? status.conditions.filter(c, c.type == "Ready")[0].status : "False"`, }, }, - Conditions: []config_loader.Condition{ - {Field: "readyConditionStatus", Operator: "equals", Value: "True"}, - }, + }, + Conditions: []config_loader.Condition{ + {Field: "readyConditionStatus", Operator: "equals", Value: "True"}, }, }, - Resources: []config_loader.Resource{}, - Post: &config_loader.PostConfig{ - Payloads: []config_loader.Payload{ - { - Name: "errorReportPayload", - Build: map[string]interface{}{ - // Test accessing adapter.executionError fields via CEL - "hasError": map[string]interface{}{ - "expression": "has(adapter.executionError) && adapter.executionError != null", - }, - "errorPhase": map[string]interface{}{ - "expression": "has(adapter.executionError) && adapter.executionError != null ? adapter.executionError.phase : \"no_error\"", - }, - "errorStep": map[string]interface{}{ - "expression": "has(adapter.executionError) && adapter.executionError != null ? adapter.executionError.step : \"no_step\"", - }, - "errorMessage": map[string]interface{}{ - "expression": "has(adapter.executionError) && adapter.executionError != null ? adapter.executionError.message : \"no_message\"", - }, - // Also test that other adapter fields still work - "executionStatus": map[string]interface{}{ - "expression": "adapter.executionStatus", - }, - "errorReason": map[string]interface{}{ - "expression": "adapter.errorReason", - }, - "clusterId": map[string]interface{}{ - "value": "{{ .clusterId }}", - }, + }, + Resources: []config_loader.Resource{}, + Post: &config_loader.PostConfig{ + Payloads: []config_loader.Payload{ + { + Name: "errorReportPayload", + Build: map[string]interface{}{ + // Test accessing adapter.executionError fields via CEL + "hasError": map[string]interface{}{ + "expression": "has(adapter.executionError) && adapter.executionError != null", + }, + "errorPhase": map[string]interface{}{ + "expression": "has(adapter.executionError) && adapter.executionError != null ? adapter.executionError.phase : \"no_error\"", + }, + "errorStep": map[string]interface{}{ + "expression": "has(adapter.executionError) && adapter.executionError != null ? adapter.executionError.step : \"no_step\"", + }, + "errorMessage": map[string]interface{}{ + "expression": "has(adapter.executionError) && adapter.executionError != null ? adapter.executionError.message : \"no_message\"", + }, + // Also test that other adapter fields still work + "executionStatus": map[string]interface{}{ + "expression": "adapter.executionStatus", + }, + "errorReason": map[string]interface{}{ + "expression": "adapter.errorReason", + }, + "clusterId": map[string]interface{}{ + "value": "{{ .clusterId }}", }, }, }, - PostActions: []config_loader.PostAction{ - { - ActionBase: config_loader.ActionBase{ - Name: "reportError", - APICall: &config_loader.APICall{ - Method: "POST", - URL: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}/error-report", - Body: "{{ .errorReportPayload }}", - Timeout: "5s", - }, + }, + PostActions: []config_loader.PostAction{ + { + ActionBase: config_loader.ActionBase{ + Name: "reportError", + APICall: &config_loader.APICall{ + Method: "POST", + URL: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}/error-report", + Body: "{{ .errorReportPayload }}", + Timeout: "5s", }, }, }, @@ -1350,54 +1340,50 @@ func TestExecutor_PayloadBuildFailure(t *testing.T) { // Create config with invalid CEL expression in payload build (will cause build failure) config := &config_loader.Config{ - APIVersion: config_loader.APIVersionV1Alpha1, - Kind: config_loader.ExpectedKindConfig, - Metadata: config_loader.Metadata{ - Name: "payload-build-fail-test", + Adapter: config_loader.AdapterInfo{ + Name: "payload-build-fail-test", + Version: "1.0.0", }, - Spec: config_loader.ConfigSpec{ - Adapter: config_loader.AdapterInfo{Version: "1.0.0"}, - Clients: config_loader.ClientsConfig{ - HyperfleetAPI: config_loader.HyperfleetAPIConfig{ - Timeout: 10 * time.Second, RetryAttempts: 1, - }, - }, - Params: []config_loader.Parameter{ - {Name: "hyperfleetApiBaseUrl", Source: "env.HYPERFLEET_API_BASE_URL", Required: true}, - {Name: "hyperfleetApiVersion", Default: "v1"}, - {Name: "clusterId", Source: "event.id", Required: true}, + Clients: config_loader.ClientsConfig{ + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + Timeout: 10 * time.Second, RetryAttempts: 1, }, - Preconditions: []config_loader.Precondition{ - { - ActionBase: config_loader.ActionBase{Name: "simpleCheck"}, - Conditions: []config_loader.Condition{ - {Field: "clusterId", Operator: "equals", Value: "test-cluster"}, - }, + }, + Params: []config_loader.Parameter{ + {Name: "hyperfleetApiBaseUrl", Source: "env.HYPERFLEET_API_BASE_URL", Required: true}, + {Name: "hyperfleetApiVersion", Default: "v1"}, + {Name: "clusterId", Source: "event.id", Required: true}, + }, + Preconditions: []config_loader.Precondition{ + { + ActionBase: config_loader.ActionBase{Name: "simpleCheck"}, + Conditions: []config_loader.Condition{ + {Field: "clusterId", Operator: "equals", Value: "test-cluster"}, }, }, - Resources: []config_loader.Resource{}, - Post: &config_loader.PostConfig{ - Payloads: []config_loader.Payload{ - { - Name: "badPayload", - Build: map[string]interface{}{ - // Use template that references non-existent parameter - "field": map[string]interface{}{ - "value": "{{ .nonExistentParam }}", - }, + }, + Resources: []config_loader.Resource{}, + Post: &config_loader.PostConfig{ + Payloads: []config_loader.Payload{ + { + Name: "badPayload", + Build: map[string]interface{}{ + // Use template that references non-existent parameter + "field": map[string]interface{}{ + "value": "{{ .nonExistentParam }}", }, }, }, - PostActions: []config_loader.PostAction{ - { - ActionBase: config_loader.ActionBase{ - Name: "shouldNotExecute", - APICall: &config_loader.APICall{ - Method: "POST", - URL: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}/statuses", - Body: "{{ .badPayload }}", - Timeout: "5s", - }, + }, + PostActions: []config_loader.PostAction{ + { + ActionBase: config_loader.ActionBase{ + Name: "shouldNotExecute", + APICall: &config_loader.APICall{ + Method: "POST", + URL: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}/statuses", + Body: "{{ .badPayload }}", + Timeout: "5s", }, }, }, diff --git a/test/integration/executor/executor_k8s_integration_test.go b/test/integration/executor/executor_k8s_integration_test.go index abbd6d3..0324ad9 100644 --- a/test/integration/executor/executor_k8s_integration_test.go +++ b/test/integration/executor/executor_k8s_integration_test.go @@ -41,9 +41,9 @@ func newK8sTestAPIServer(t *testing.T) *k8sTestAPIServer { mock := &k8sTestAPIServer{ requests: make([]k8sTestRequest, 0), clusterResponse: map[string]interface{}{ - "metadata": map[string]interface{}{ - "name": "test-cluster", - }, + "id": "test-cluster-id", + "name": "test-cluster", + "kind": "Cluster", "spec": map[string]interface{}{ "region": "us-east-1", "provider": "aws", @@ -145,163 +145,157 @@ func createK8sTestEvent(clusterId string) *event.Event { func createK8sTestConfig(apiBaseURL, testNamespace string) *config_loader.Config { _ = apiBaseURL // Base URL is pulled from env params return &config_loader.Config{ - APIVersion: config_loader.APIVersionV1Alpha1, - Kind: config_loader.ExpectedKindConfig, - Metadata: config_loader.Metadata{ - Name: "k8s-test-adapter", + Adapter: config_loader.AdapterInfo{ + Name: "k8s-test-adapter", + Version: "1.0.0", }, - Spec: config_loader.ConfigSpec{ - Adapter: config_loader.AdapterInfo{ - Version: "1.0.0", + Clients: config_loader.ClientsConfig{ + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + Timeout: 10 * time.Second, + RetryAttempts: 1, + RetryBackoff: hyperfleet_api.BackoffConstant, }, - Clients: config_loader.ClientsConfig{ - HyperfleetAPI: config_loader.HyperfleetAPIConfig{ - Timeout: 10 * time.Second, - RetryAttempts: 1, - RetryBackoff: hyperfleet_api.BackoffConstant, - }, + }, + Params: []config_loader.Parameter{ + { + Name: "hyperfleetApiBaseUrl", + Source: "env.HYPERFLEET_API_BASE_URL", + Required: true, }, - Params: []config_loader.Parameter{ - { - Name: "hyperfleetApiBaseUrl", - Source: "env.HYPERFLEET_API_BASE_URL", - Required: true, - }, - { - Name: "hyperfleetApiVersion", - Source: "env.HYPERFLEET_API_VERSION", - Default: "v1", - Required: false, - }, - { - Name: "clusterId", - Source: "event.id", - Required: true, - }, - { - Name: "testNamespace", - Default: testNamespace, - Required: false, - }, + { + Name: "hyperfleetApiVersion", + Source: "env.HYPERFLEET_API_VERSION", + Default: "v1", + Required: false, }, - Preconditions: []config_loader.Precondition{ - { - ActionBase: config_loader.ActionBase{ - Name: "clusterStatus", - APICall: &config_loader.APICall{ - Method: "GET", - URL: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}", - Timeout: "5s", - }, + { + Name: "clusterId", + Source: "event.id", + Required: true, + }, + { + Name: "testNamespace", + Default: testNamespace, + Required: false, + }, + }, + Preconditions: []config_loader.Precondition{ + { + ActionBase: config_loader.ActionBase{ + Name: "clusterStatus", + APICall: &config_loader.APICall{ + Method: "GET", + URL: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}", + Timeout: "5s", }, - Capture: []config_loader.CaptureField{ - {Name: "clusterName", FieldExpressionDef: config_loader.FieldExpressionDef{Field: "metadata.name"}}, - { - Name: "readyConditionStatus", - FieldExpressionDef: config_loader.FieldExpressionDef{ - Expression: `status.conditions.filter(c, c.type == "Ready").size() > 0 ? status.conditions.filter(c, c.type == "Ready")[0].status : "False"`, - }, + }, + Capture: []config_loader.CaptureField{ + {Name: "clusterName", FieldExpressionDef: config_loader.FieldExpressionDef{Field: "name"}}, + { + Name: "readyConditionStatus", + FieldExpressionDef: config_loader.FieldExpressionDef{ + Expression: `status.conditions.filter(c, c.type == "Ready").size() > 0 ? status.conditions.filter(c, c.type == "Ready")[0].status : "False"`, }, - {Name: "region", FieldExpressionDef: config_loader.FieldExpressionDef{Field: "spec.region"}}, - {Name: "cloudProvider", FieldExpressionDef: config_loader.FieldExpressionDef{Field: "spec.provider"}}, - }, - Conditions: []config_loader.Condition{ - {Field: "readyConditionStatus", Operator: "equals", Value: "True"}, }, + {Name: "region", FieldExpressionDef: config_loader.FieldExpressionDef{Field: "spec.region"}}, + {Name: "cloudProvider", FieldExpressionDef: config_loader.FieldExpressionDef{Field: "spec.provider"}}, + }, + Conditions: []config_loader.Condition{ + {Field: "readyConditionStatus", Operator: "equals", Value: "True"}, }, }, - // K8s Resources to create - Resources: []config_loader.Resource{ - { - Name: "clusterConfigMap", - Manifest: map[string]interface{}{ - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": map[string]interface{}{ - "name": "cluster-config-{{ .clusterId }}", - "namespace": testNamespace, - "labels": map[string]interface{}{ - "hyperfleet.io/cluster-id": "{{ .clusterId }}", - "hyperfleet.io/managed-by": "{{ .metadata.name }}", - "test": "executor-integration", - }, - }, - "data": map[string]interface{}{ - "cluster-id": "{{ .clusterId }}", - "cluster-name": "{{ .clusterName }}", - "region": "{{ .region }}", - "provider": "{{ .cloudProvider }}", - "readyStatus": "{{ .readyConditionStatus }}", + }, + // K8s Resources to create + Resources: []config_loader.Resource{ + { + Name: "clusterConfigMap", + Manifest: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "cluster-config-{{ .clusterId }}", + "namespace": testNamespace, + "labels": map[string]interface{}{ + "hyperfleet.io/cluster-id": "{{ .clusterId }}", + "hyperfleet.io/managed-by": "{{ .adapter.name }}", + "test": "executor-integration", }, }, - Discovery: &config_loader.DiscoveryConfig{ - Namespace: testNamespace, - ByName: "cluster-config-{{ .clusterId }}", + "data": map[string]interface{}{ + "cluster-id": "{{ .clusterId }}", + "cluster-name": "{{ .clusterName }}", + "region": "{{ .region }}", + "provider": "{{ .cloudProvider }}", + "readyStatus": "{{ .readyConditionStatus }}", }, }, - { - Name: "clusterSecret", - Manifest: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Secret", - "metadata": map[string]interface{}{ - "name": "cluster-secret-{{ .clusterId }}", - "namespace": testNamespace, - "labels": map[string]interface{}{ - "hyperfleet.io/cluster-id": "{{ .clusterId }}", - "hyperfleet.io/managed-by": "{{ .metadata.name }}", - "test": "executor-integration", - }, - }, - "type": "Opaque", - "stringData": map[string]interface{}{ - "cluster-id": "{{ .clusterId }}", - "api-token": "test-token-{{ .clusterId }}", + Discovery: &config_loader.DiscoveryConfig{ + Namespace: testNamespace, + ByName: "cluster-config-{{ .clusterId }}", + }, + }, + { + Name: "clusterSecret", + Manifest: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Secret", + "metadata": map[string]interface{}{ + "name": "cluster-secret-{{ .clusterId }}", + "namespace": testNamespace, + "labels": map[string]interface{}{ + "hyperfleet.io/cluster-id": "{{ .clusterId }}", + "hyperfleet.io/managed-by": "{{ .adapter.name }}", + "test": "executor-integration", }, }, - Discovery: &config_loader.DiscoveryConfig{ - Namespace: testNamespace, - ByName: "cluster-secret-{{ .clusterId }}", + "type": "Opaque", + "stringData": map[string]interface{}{ + "cluster-id": "{{ .clusterId }}", + "api-token": "test-token-{{ .clusterId }}", }, }, + Discovery: &config_loader.DiscoveryConfig{ + Namespace: testNamespace, + ByName: "cluster-secret-{{ .clusterId }}", + }, }, - Post: &config_loader.PostConfig{ - Payloads: []config_loader.Payload{ - { - Name: "clusterStatusPayload", - Build: map[string]interface{}{ - "conditions": map[string]interface{}{ - "applied": map[string]interface{}{ - "status": map[string]interface{}{ - "expression": "adapter.executionStatus == \"success\"", - }, - "reason": map[string]interface{}{ - "expression": "has(adapter.errorReason) ? adapter.errorReason : \"ResourcesCreated\"", - }, - "message": map[string]interface{}{ - "expression": "has(adapter.errorMessage) ? adapter.errorMessage : \"ConfigMap and Secret created successfully\"", - }, + }, + Post: &config_loader.PostConfig{ + Payloads: []config_loader.Payload{ + { + Name: "clusterStatusPayload", + Build: map[string]interface{}{ + "conditions": map[string]interface{}{ + "applied": map[string]interface{}{ + "status": map[string]interface{}{ + "expression": "adapter.executionStatus == \"success\"", + }, + "reason": map[string]interface{}{ + "expression": "has(adapter.errorReason) ? adapter.errorReason : \"ResourcesCreated\"", + }, + "message": map[string]interface{}{ + "expression": "has(adapter.errorMessage) ? adapter.errorMessage : \"ConfigMap and Secret created successfully\"", }, - }, - "clusterId": map[string]interface{}{ - "value": "{{ .clusterId }}", - }, - "resourcesCreated": map[string]interface{}{ - "value": "2", }, }, + "clusterId": map[string]interface{}{ + "value": "{{ .clusterId }}", + }, + "resourcesCreated": map[string]interface{}{ + "value": "2", + }, }, }, - PostActions: []config_loader.PostAction{ - { - ActionBase: config_loader.ActionBase{ - Name: "reportClusterStatus", - APICall: &config_loader.APICall{ - Method: "POST", - URL: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}/statuses", - Body: "{{ .clusterStatusPayload }}", - Timeout: "5s", - }, + }, + PostActions: []config_loader.PostAction{ + { + ActionBase: config_loader.ActionBase{ + Name: "reportClusterStatus", + APICall: &config_loader.APICall{ + Method: "POST", + URL: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}/statuses", + Body: "{{ .clusterStatusPayload }}", + Timeout: "5s", }, }, }, @@ -482,7 +476,7 @@ func TestExecutor_K8s_UpdateExistingResource(t *testing.T) { // Create executor config := createK8sTestConfig(mockAPI.URL(), testNamespace) // Only include ConfigMap resource for this test - config.Spec.Resources = config.Spec.Resources[:1] + config.Resources = config.Resources[:1] apiClient, err := hyperfleet_api.NewClient(testLog()) require.NoError(t, err) @@ -560,7 +554,7 @@ func TestExecutor_K8s_DiscoveryByLabels(t *testing.T) { // Create config with label-based discovery config := createK8sTestConfig(mockAPI.URL(), testNamespace) // Modify to use label selector instead of byName - config.Spec.Resources = []config_loader.Resource{ + config.Resources = []config_loader.Resource{ { Name: "clusterConfigMap", Manifest: map[string]interface{}{ @@ -571,7 +565,7 @@ func TestExecutor_K8s_DiscoveryByLabels(t *testing.T) { "namespace": testNamespace, "labels": map[string]interface{}{ "hyperfleet.io/cluster-id": "{{ .clusterId }}", - "hyperfleet.io/managed-by": "{{ .metadata.name }}", + "hyperfleet.io/managed-by": "{{ .adapter.name }}", "app": "cluster-config", }, }, @@ -637,7 +631,7 @@ func TestExecutor_K8s_RecreateOnChange(t *testing.T) { // Create config with recreateOnChange config := createK8sTestConfig(mockAPI.URL(), testNamespace) - config.Spec.Resources = []config_loader.Resource{ + config.Resources = []config_loader.Resource{ { Name: "clusterConfigMap", RecreateOnChange: true, // Enable recreate @@ -881,44 +875,37 @@ func TestExecutor_K8s_MultipleMatchingResources(t *testing.T) { // Create config WITHOUT discovery - just create a new resource // Discovery-based update logic is not yet implemented config := &config_loader.Config{ - APIVersion: config_loader.APIVersionV1Alpha1, - Kind: config_loader.ExpectedKindConfig, - Metadata: config_loader.Metadata{ - Name: "multi-match-test", - }, - Spec: config_loader.ConfigSpec{ - Adapter: config_loader.AdapterInfo{Version: "1.0.0"}, - Clients: config_loader.ClientsConfig{ - HyperfleetAPI: config_loader.HyperfleetAPIConfig{ - Timeout: 10 * time.Second, RetryAttempts: 1, - }, + Adapter: config_loader.AdapterInfo{Name: "multi-match-test", Version: "1.0.0"}, + Clients: config_loader.ClientsConfig{ + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + Timeout: 10 * time.Second, RetryAttempts: 1, }, - Params: []config_loader.Parameter{ - {Name: "hyperfleetApiBaseUrl", Source: "env.HYPERFLEET_API_BASE_URL", Required: true}, - {Name: "hyperfleetApiVersion", Default: "v1"}, - {Name: "clusterId", Source: "event.id", Required: true}, - }, - // No preconditions - this test focuses on resource creation - Resources: []config_loader.Resource{ - { - Name: "clusterConfig", - Manifest: map[string]interface{}{ - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": map[string]interface{}{ - "name": "config-{{ .clusterId }}-new", - "labels": map[string]interface{}{ - "hyperfleet.io/cluster-id": "{{ .clusterId }}", - "app": "multi-match-test", - }, - }, - "data": map[string]interface{}{ - "cluster-id": "{{ .clusterId }}", - "created": "true", + }, + Params: []config_loader.Parameter{ + {Name: "hyperfleetApiBaseUrl", Source: "env.HYPERFLEET_API_BASE_URL", Required: true}, + {Name: "hyperfleetApiVersion", Default: "v1"}, + {Name: "clusterId", Source: "event.id", Required: true}, + }, + // No preconditions - this test focuses on resource creation + Resources: []config_loader.Resource{ + { + Name: "clusterConfig", + Manifest: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "config-{{ .clusterId }}-new", + "labels": map[string]interface{}{ + "hyperfleet.io/cluster-id": "{{ .clusterId }}", + "app": "multi-match-test", }, }, - // No Discovery - just create the resource + "data": map[string]interface{}{ + "cluster-id": "{{ .clusterId }}", + "created": "true", + }, }, + // No Discovery - just create the resource }, }, } @@ -979,8 +966,10 @@ func TestExecutor_K8s_PostActionsAfterPreconditionNotMet(t *testing.T) { // Set cluster to Ready condition False (won't match condition) mockAPI.clusterResponse = map[string]interface{}{ - "metadata": map[string]interface{}{"name": "test-cluster"}, - "spec": map[string]interface{}{"region": "us-east-1"}, + "id": "test-cluster-id", + "name": "test-cluster", + "kind": "Cluster", + "spec": map[string]interface{}{"region": "us-east-1"}, "status": map[string]interface{}{ "conditions": []map[string]interface{}{ { diff --git a/test/integration/executor/testdata/test-adapter-config.yaml b/test/integration/executor/testdata/test-adapter-config.yaml deleted file mode 100644 index 21212dc..0000000 --- a/test/integration/executor/testdata/test-adapter-config.yaml +++ /dev/null @@ -1,87 +0,0 @@ -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: - name: test-adapter - -spec: - adapter: - version: "1.0.0" - - hyperfleetAPI: - timeout: 10s - retryAttempts: 1 - retryBackoff: constant - - params: - - name: hyperfleetApiBaseUrl - source: env.HYPERFLEET_API_BASE_URL - required: true - - - name: hyperfleetApiVersion - source: env.HYPERFLEET_API_VERSION - default: v1 - required: false - - - name: clusterId - source: event.id - required: true - - preconditions: - - name: clusterStatus - apiCall: - method: GET - url: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}" - timeout: 5s - capture: - - name: clusterName - field: metadata.name - - name: readyConditionStatus - expression: | - status.conditions.filter(c, c.type == "Ready").size() > 0 - ? status.conditions.filter(c, c.type == "Ready")[0].status - : "False" - - name: region - field: spec.region - - name: cloudProvider - field: spec.provider - - name: vpcId - field: spec.vpc_id - conditions: - - field: readyConditionStatus - operator: equals - value: "True" - - field: cloudProvider - operator: in - value: ["aws", "gcp", "azure"] - - resources: [] # No K8s resources in this test - dry run mode - - post: - payloads: - - name: clusterStatusPayload - build: - conditions: - health: - status: - expression: | - adapter.executionStatus == "success" && !adapter.resourcesSkipped - reason: - expression: | - adapter.resourcesSkipped ? "PreconditionNotMet" : (adapter.errorReason != "" ? adapter.errorReason : "Healthy") - message: - expression: | - adapter.skipReason != "" ? adapter.skipReason : (adapter.errorMessage != "" ? adapter.errorMessage : "All adapter operations completed successfully") - clusterId: - value: "{{ .clusterId }}" - clusterName: - expression: | - clusterName != "" ? clusterName : "unknown" - - postActions: - - name: reportClusterStatus - apiCall: - method: POST - url: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}/statuses" - body: "{{ .clusterStatusPayload }}" - timeout: 5s - diff --git a/test/integration/maestro_client/client_tls_config_integration_test.go b/test/integration/maestro_client/client_tls_config_integration_test.go index 416b07e..0f0e457 100644 --- a/test/integration/maestro_client/client_tls_config_integration_test.go +++ b/test/integration/maestro_client/client_tls_config_integration_test.go @@ -57,13 +57,13 @@ func writeTestAdapterConfig(t *testing.T, dir string, opts map[string]string) st tlsBlock := "" if opts["caFile"] != "" { - tlsBlock = fmt.Sprintf(` auth: - type: "tls" - tlsConfig: - caFile: %q - certFile: %q - keyFile: %q - httpCaFile: %q`, opts["caFile"], opts["certFile"], opts["keyFile"], opts["httpCaFile"]) + tlsBlock = fmt.Sprintf(` auth: + type: "tls" + tls_config: + ca_file: %q + cert_file: %q + key_file: %q + http_ca_file: %q`, opts["caFile"], opts["certFile"], opts["keyFile"], opts["httpCaFile"]) } insecure := "false" @@ -71,30 +71,28 @@ func writeTestAdapterConfig(t *testing.T, dir string, opts map[string]string) st insecure = "true" } - yaml := fmt.Sprintf(`apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: + yaml := fmt.Sprintf(`adapter: name: tls-integration-test -spec: - adapter: - version: "0.1.0" - clients: - maestro: - grpcServerAddress: %q - httpServerAddress: %q - sourceId: %q - insecure: %s - timeout: "15s" - serverHealthinessTimeout: "25s" + version: "0.1.0" +clients: + maestro: + grpc_server_address: %q + http_server_address: %q + source_id: %q + insecure: %s + timeout: "15s" + server_healthiness_timeout: "25s" %s - hyperfleetApi: - baseUrl: http://localhost:8000 - version: v1 - timeout: 2s - retryAttempts: 1 - broker: - subscriptionId: test - topic: test + hyperfleet_api: + base_url: http://localhost:8000 + version: v1 + timeout: 2s + retry_attempts: 1 + broker: + subscription_id: test + topic: test + kubernetes: + api_version: v1 `, opts["grpcAddr"], opts["httpAddr"], opts["sourceId"], insecure, tlsBlock) path := filepath.Join(dir, "adapter-config.yaml") @@ -105,12 +103,7 @@ spec: // writeMinimalTaskConfig writes the smallest valid AdapterTaskConfig. func writeMinimalTaskConfig(t *testing.T, dir string) string { t.Helper() - yaml := `apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: tls-test-task -spec: - params: [] + yaml := `params: [] ` path := filepath.Join(dir, "task-config.yaml") require.NoError(t, os.WriteFile(path, []byte(yaml), 0o644)) @@ -143,9 +136,11 @@ func TestTLSConfigLoadAndConnect_MutualTLS(t *testing.T) { config_loader.WithSkipSemanticValidation(), ) require.NoError(t, err, "Config loading should succeed") - require.NotNil(t, cfg.Spec.Clients.Maestro, "Maestro config should be present") + require.NotNil(t, cfg, "Config should not be nil") + require.NotNil(t, cfg.Clients, "Clients config should not be nil") + require.NotNil(t, cfg.Clients.Maestro, "Maestro config should be present") - maestroCfg := cfg.Spec.Clients.Maestro + maestroCfg := cfg.Clients.Maestro assert.Equal(t, env.TLSMaestroGRPCAddr, maestroCfg.GRPCServerAddress) assert.Equal(t, env.TLSMaestroServerAddr, maestroCfg.HTTPServerAddress) assert.Equal(t, "config-tls-mtls", maestroCfg.SourceID) @@ -203,8 +198,10 @@ func TestTLSConfigLoadAndConnect_CAOnly(t *testing.T) { config_loader.WithSkipSemanticValidation(), ) require.NoError(t, err) + require.NotNil(t, cfg, "Config should not be nil") + require.NotNil(t, cfg.Clients, "Clients config should not be nil") - clientCfg, err := buildMaestroClientConfigFromLoaded(cfg.Spec.Clients.Maestro) + clientCfg, err := buildMaestroClientConfigFromLoaded(cfg.Clients.Maestro) require.NoError(t, err) assert.Equal(t, env.TLSCerts.CAFilePath(), clientCfg.CAFile) @@ -240,8 +237,10 @@ func TestTLSConfigLoadAndConnect_Insecure(t *testing.T) { config_loader.WithSkipSemanticValidation(), ) require.NoError(t, err) + require.NotNil(t, cfg, "Config should not be nil") + require.NotNil(t, cfg.Clients, "Clients config should not be nil") - maestroCfg := cfg.Spec.Clients.Maestro + maestroCfg := cfg.Clients.Maestro assert.True(t, maestroCfg.Insecure) clientCfg, err := buildMaestroClientConfigFromLoaded(maestroCfg) @@ -288,8 +287,10 @@ func TestTLSConfigLoadAndConnect_EnvOverride(t *testing.T) { config_loader.WithSkipSemanticValidation(), ) require.NoError(t, err) + require.NotNil(t, cfg, "Config should not be nil") + require.NotNil(t, cfg.Clients, "Clients config should not be nil") - maestroCfg := cfg.Spec.Clients.Maestro + maestroCfg := cfg.Clients.Maestro assert.Equal(t, env.TLSMaestroGRPCAddr, maestroCfg.GRPCServerAddress, "Env should override YAML") assert.Equal(t, env.TLSMaestroServerAddr, maestroCfg.HTTPServerAddress, "Env should override YAML") assert.Equal(t, "config-tls-env-override", maestroCfg.SourceID, "Env should override YAML") diff --git a/test/integration/maestro_client/main_test.go b/test/integration/maestro_client/main_test.go index b245ac0..7788f48 100644 --- a/test/integration/maestro_client/main_test.go +++ b/test/integration/maestro_client/main_test.go @@ -20,7 +20,7 @@ const ( MaestroImage = "quay.io/redhat-user-workloads/maestro-rhtap-tenant/maestro/maestro:latest" // PostgresImage is the PostgreSQL container image - PostgresImage = "docker.io/library/postgres:14.2" + PostgresImage = "quay.io/sclorg/postgresql-15-c9s:latest" // Default ports PostgresPort = "5432/tcp" diff --git a/test/integration/maestro_client/setup_test.go b/test/integration/maestro_client/setup_test.go index 7f81ecb..fee1361 100644 --- a/test/integration/maestro_client/setup_test.go +++ b/test/integration/maestro_client/setup_test.go @@ -119,17 +119,12 @@ func startPostgresContainer(ctx context.Context) (testcontainers.Container, erro Image: PostgresImage, ExposedPorts: []string{PostgresPort}, Env: map[string]string{ - "POSTGRES_DB": dbName, - "POSTGRES_USER": dbUser, - "POSTGRES_PASSWORD": dbPassword, + "POSTGRESQL_DATABASE": dbName, + "POSTGRESQL_USER": dbUser, + "POSTGRESQL_PASSWORD": dbPassword, }, - WaitingFor: wait.ForAll( - wait.ForLog("database system is ready to accept connections"). - WithOccurrence(2). - WithStartupTimeout(60*time.Second), - wait.ForListeningPort(nat.Port(PostgresPort)). - WithStartupTimeout(60*time.Second), - ), + WaitingFor: wait.ForListeningPort(nat.Port(PostgresPort)). + WithStartupTimeout(60 * time.Second), } container, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ diff --git a/test/integration/testutil/mock_api_server.go b/test/integration/testutil/mock_api_server.go index 038a2bd..bc2a5e7 100644 --- a/test/integration/testutil/mock_api_server.go +++ b/test/integration/testutil/mock_api_server.go @@ -50,9 +50,9 @@ func NewMockAPIServer(t *testing.T) *MockAPIServer { t: t, requests: make([]MockRequest, 0), clusterResponse: map[string]interface{}{ - "metadata": map[string]interface{}{ - "name": "test-cluster", - }, + "id": "test-cluster-id", + "name": "test-cluster", + "kind": "Cluster", "spec": map[string]interface{}{ "region": "us-east-1", "provider": "aws", @@ -214,9 +214,9 @@ func (m *MockAPIServer) Reset() { m.failPrecondition = false m.failPostAction = false m.clusterResponse = map[string]interface{}{ - "metadata": map[string]interface{}{ - "name": "test-cluster", - }, + "id": "test-cluster-id", + "name": "test-cluster", + "kind": "Cluster", "spec": map[string]interface{}{ "region": "us-east-1", "provider": "aws", diff --git a/test/testdata/adapter-config.yaml b/test/testdata/adapter-config.yaml index 888e8e1..b044e60 100644 --- a/test/testdata/adapter-config.yaml +++ b/test/testdata/adapter-config.yaml @@ -1,20 +1,13 @@ # HyperFleet Adapter Deployment Configuration for testing -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: +adapter: name: test-adapter - labels: - hyperfleet.io/component: adapter + version: "0.1.0" -spec: - adapter: - version: "0.1.0" +clients: + hyperfleet_api: + timeout: 2s + retry_attempts: 3 + retry_backoff: exponential - clients: - hyperfleetApi: - timeout: 2s - retryAttempts: 3 - retryBackoff: exponential - - kubernetes: - apiVersion: "v1" + kubernetes: + api_version: "v1" diff --git a/test/testdata/adapter_config_valid.yaml b/test/testdata/adapter_config_valid.yaml deleted file mode 100644 index 295ea46..0000000 --- a/test/testdata/adapter_config_valid.yaml +++ /dev/null @@ -1,181 +0,0 @@ -# Simple valid HyperFleet Adapter Configuration for testing - -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: - name: example-adapter - labels: - hyperfleet.io/adapter-type: example - hyperfleet.io/component: adapter - -spec: - adapter: - version: "0.1.0" - - hyperfleetApi: - timeout: 2s - retryAttempts: 3 - retryBackoff: exponential - - kubernetes: - apiVersion: "v1" - - # Parameters with all required variables - params: - - name: "hyperfleetApiBaseUrl" - source: "env.HYPERFLEET_API_BASE_URL" - type: "string" - required: true - - - name: "hyperfleetApiVersion" - source: "env.HYPERFLEET_API_VERSION" - type: "string" - default: "v1" - - - name: "clusterId" - source: "event.id" - type: "string" - required: true - - # Preconditions with valid operators and CEL expressions - preconditions: - - name: "clusterStatus" - apiCall: - method: "GET" - url: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}" - timeout: 10s - retryAttempts: 3 - retryBackoff: "exponential" - capture: - - name: "clusterName" - field: "metadata.name" - - name: "readyConditionStatus" - expression: | - status.conditions.filter(c, c.type == "Ready").size() > 0 - ? status.conditions.filter(c, c.type == "Ready")[0].status - : "False" - - name: "region" - field: "spec.region" - - name: "cloudProvider" - field: "spec.provider" - - name: "vpcId" - field: "spec.vpc_id" - # Structured conditions with valid operators - conditions: - - field: "readyConditionStatus" - operator: "equals" - value: "True" - - field: "cloudProvider" - operator: "in" - value: ["aws", "gcp", "azure"] - - field: "vpcId" - operator: "exists" - - - name: "validationCheck" - # Valid CEL expression - expression: | - readyConditionStatus == "True" - - # Resources with valid K8s manifests - resources: - - name: "clusterNamespace" - manifest: - apiVersion: v1 - kind: Namespace - metadata: - name: "cluster-{{ .clusterId }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/cluster-name: "{{ .clusterName }}" - hyperfleet.io/region: "{{ .region }}" - hyperfleet.io/provider: "{{ .cloudProvider }}" - hyperfleet.io/managed-by: "{{ .metadata.name }}" - annotations: - hyperfleet.io/vpc-id: "{{ .vpcId }}" - discovery: - namespace: "*" # Cluster-scoped resource (Namespace) - bySelectors: - labelSelector: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/managed-by: "{{ .metadata.name }}" - - - name: "clusterConfigMap" - manifest: - apiVersion: v1 - kind: ConfigMap - metadata: - name: "cluster-config-{{ .clusterId }}" - namespace: "cluster-{{ .clusterId }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - discovery: - namespace: "cluster-{{ .clusterId }}" - byName: "cluster-config-{{ .clusterId }}" - - - name: "externalTemplate" - manifest: - ref: "templates/deployment.yaml" - discovery: - namespace: "cluster-{{ .clusterId }}" - bySelectors: - labelSelector: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - - # Post-processing with valid CEL expressions - post: - payloads: - - name: "clusterStatusPayload" - build: - conditions: - applied: - status: - expression: | - resources.clusterNamespace.status.phase == "Active" - reason: - expression: | - has(resources.clusterNamespace.status.phase) ? "ResourcesCreated" : "Pending" - message: - expression: | - "Namespace status: " + resources.clusterNamespace.status.phase - - available: - status: - expression: | - resources.clusterConfigMap != null - reason: - expression: | - "ConfigMapReady" - message: - expression: | - "ConfigMap is available" - - health: - status: - expression: | - true - reason: - expression: | - "Healthy" - message: - expression: | - "All health checks passed" - - data: - clusterReady: - expression: | - resources.clusterNamespace.status.phase == "Active" - description: "Cluster namespace is active" - - observed_generation: "{{ .generationId }}" - - lastUpdated: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" - - postActions: - - name: "reportClusterStatus" - apiCall: - method: "POST" - url: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}/statuses" - headers: - - name: "Content-Type" - value: "application/json" - body: "{{ .clusterStatusPayload }}" diff --git a/test/testdata/dryrun/dryrun-cel-showcase-task-config.yaml b/test/testdata/dryrun/dryrun-cel-showcase-task-config.yaml index c0a291b..2fefc28 100644 --- a/test/testdata/dryrun/dryrun-cel-showcase-task-config.yaml +++ b/test/testdata/dryrun/dryrun-cel-showcase-task-config.yaml @@ -21,350 +21,342 @@ # 14. adapter.? — adapter execution metadata access # 15. Go templates — template rendering with {{ }} -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: cel-showcase-task - labels: - hyperfleet.io/adapter-type: cel-showcase +params: + - name: "clusterId" + source: "event.id" + type: "string" + required: true -spec: - params: - - name: "clusterId" - source: "event.id" - type: "string" - required: true + - name: "clusterKind" + source: "event.kind" + type: "string" + default: "Cluster" - - name: "clusterKind" - source: "event.kind" - type: "string" - default: "Cluster" + - name: "generation" + source: "event.generation" + type: "int" + required: true - - name: "generation" - source: "event.generation" - type: "string" - required: true + - name: "region" + source: "env.REGION" + type: "string" + default: "us-east-1" - - name: "region" - source: "env.REGION" - type: "string" - default: "us-east-1" + - name: "adapterName" + source: "env.ADAPTER_NAME" + type: "string" + default: "dry-run-adapter" - - name: "adapterName" - source: "env.ADAPTER_NAME" - type: "string" - default: "dry-run-adapter" +# =========================================================================== +# Preconditions — CEL capture patterns +# =========================================================================== +preconditions: + - name: "fetch-cluster" + api_call: + method: "GET" + url: "/api/hyperfleet/v1/clusters/{{ .clusterId }}" + timeout: 10s + capture: + # --------------------------------------------------------------- + # Pattern 1: field — simple dot-notation extraction + # Extracts a top-level field from the API response by name. + # --------------------------------------------------------------- + - name: "clusterName" + field: "name" - # =========================================================================== - # Preconditions — CEL capture patterns - # =========================================================================== - preconditions: - - name: "fetch-cluster" - apiCall: - method: "GET" - url: "/api/hyperfleet/v1/clusters/{{ .clusterId }}" - timeout: 10s - capture: - # --------------------------------------------------------------- - # Pattern 1: field — simple dot-notation extraction - # Extracts a top-level field from the API response by name. - # --------------------------------------------------------------- - - name: "clusterName" - field: "name" + # --------------------------------------------------------------- + # Pattern 1b: field — nested dot-notation extraction + # Extracts a nested field using dot-separated path. + # --------------------------------------------------------------- + - name: "clusterRegion" + field: "spec.region" - # --------------------------------------------------------------- - # Pattern 1b: field — nested dot-notation extraction - # Extracts a nested field using dot-separated path. - # --------------------------------------------------------------- - - name: "clusterRegion" - field: "spec.region" + # --------------------------------------------------------------- + # Pattern 2: filter() + ternary — list filtering with fallback + # Filters the conditions list for type=="Ready", returns status + # or "Unknown" if not found. + # --------------------------------------------------------------- + - name: "clusterStatus" + expression: | + status.conditions.filter(c, c.type == "Ready").size() > 0 + ? status.conditions.filter(c, c.type == "Ready")[0].status + : "Unknown" - # --------------------------------------------------------------- - # Pattern 2: filter() + ternary — list filtering with fallback - # Filters the conditions list for type=="Ready", returns status - # or "Unknown" if not found. - # --------------------------------------------------------------- - - name: "clusterStatus" - expression: | - status.conditions.filter(c, c.type == "Ready").size() > 0 - ? status.conditions.filter(c, c.type == "Ready")[0].status - : "Unknown" + # --------------------------------------------------------------- + # Pattern 3: exists() — boolean check on list elements + # Returns true if any tag matches "production". + # --------------------------------------------------------------- + - name: "isProduction" + expression: | + spec.tags.exists(t, t == "production") - # --------------------------------------------------------------- - # Pattern 3: exists() — boolean check on list elements - # Returns true if any tag matches "production". - # --------------------------------------------------------------- - - name: "isProduction" - expression: | - spec.tags.exists(t, t == "production") + # --------------------------------------------------------------- + # Pattern 4: size() — counting elements + # Returns the number of node pools defined in spec. + # --------------------------------------------------------------- + - name: "nodePoolCount" + expression: | + spec.node_pools.size() - # --------------------------------------------------------------- - # Pattern 4: size() — counting elements - # Returns the number of node pools defined in spec. - # --------------------------------------------------------------- - - name: "nodePoolCount" - expression: | - spec.node_pools.size() + # --------------------------------------------------------------- + # Pattern 5: string concatenation — building derived values + # Constructs a composite identifier from multiple fields. + # --------------------------------------------------------------- + - name: "resourcePrefix" + expression: | + spec.provider + "-" + spec.region + "-" + name - # --------------------------------------------------------------- - # Pattern 5: string concatenation — building derived values - # Constructs a composite identifier from multiple fields. - # --------------------------------------------------------------- - - name: "resourcePrefix" - expression: | - spec.provider + "-" + spec.region + "-" + name + # --------------------------------------------------------------- + # Pattern 6: type coercion — string() and int() + # Converts the numeric compute node count to string. + # --------------------------------------------------------------- + - name: "computeNodesStr" + expression: | + "compute=" + string(nodes.compute) - # --------------------------------------------------------------- - # Pattern 6: type coercion — string() and int() - # Converts the numeric compute node count to string. - # --------------------------------------------------------------- - - name: "computeNodesStr" - expression: | - "compute=" + string(nodes.compute) + # --------------------------------------------------------------- + # Pattern 7: in operator — membership check + # Checks if the provider is one of the supported cloud providers. + # --------------------------------------------------------------- + - name: "isSupportedProvider" + expression: | + spec.provider in ["aws", "gcp", "azure"] - # --------------------------------------------------------------- - # Pattern 7: in operator — membership check - # Checks if the provider is one of the supported cloud providers. - # --------------------------------------------------------------- - - name: "isSupportedProvider" - expression: | - spec.provider in ["aws", "gcp", "azure"] + # --------------------------------------------------------------- + # Pattern 8: map() — list transformation + # Transforms the node_pools list to extract just the names. + # --------------------------------------------------------------- + - name: "nodePoolNames" + expression: | + spec.node_pools.map(p, p.name) - # --------------------------------------------------------------- - # Pattern 8: map() — list transformation - # Transforms the node_pools list to extract just the names. - # --------------------------------------------------------------- - - name: "nodePoolNames" - expression: | - spec.node_pools.map(p, p.name) + # --------------------------------------------------------------- + # Pattern 4b: size() — counting with field extraction + # Extracts compute node count for use in conditions. + # --------------------------------------------------------------- + - name: "computeNodes" + field: "nodes.compute" - # --------------------------------------------------------------- - # Pattern 4b: size() — counting with field extraction - # Extracts compute node count for use in conditions. - # --------------------------------------------------------------- - - name: "computeNodes" - field: "nodes.compute" + # Static timestamp for dry-run reproducibility + - name: "timestamp" + expression: "\"2006-01-02T15:04:05Z07:00\"" - # Static timestamp for dry-run reproducibility - - name: "timestamp" - expression: "\"2006-01-02T15:04:05Z07:00\"" + conditions: + - field: "clusterStatus" + operator: "notEquals" + value: "True" - conditions: - - field: "clusterStatus" - operator: "notEquals" - value: "True" +# =========================================================================== +# Resources — Kubernetes manifests to apply +# =========================================================================== +resources: + # Resource 0: Namespace + - name: "namespace0" + transport: + client: kubernetes + manifest: + apiVersion: v1 + kind: Namespace + metadata: + # Pattern 15: Go template rendering + name: "{{ .clusterId | lower }}" + labels: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/managed-by: "{{ .adapterName }}" + hyperfleet.io/resource-type: "namespace" + annotations: + hyperfleet.io/created-by: "hyperfleet-adapter" + hyperfleet.io/generation: "{{ .generation }}" + discovery: + by_name: "{{ .clusterId | lower }}" - # =========================================================================== - # Resources — Kubernetes manifests to apply - # =========================================================================== - resources: - # Resource 0: Namespace - - name: "namespace0" - transport: - client: kubernetes - manifest: - apiVersion: v1 - kind: Namespace - metadata: - # Pattern 15: Go template rendering - name: "{{ .clusterId | lower }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/managed-by: "{{ .adapterName }}" - hyperfleet.io/resource-type: "namespace" - annotations: - hyperfleet.io/created-by: "hyperfleet-adapter" - hyperfleet.io/generation: "{{ .generation }}" - discovery: - byName: "{{ .clusterId | lower }}" - - # Resource 1: ConfigMap — cluster configuration - - name: "configmap0" - transport: - client: kubernetes - manifest: - apiVersion: v1 - kind: ConfigMap - metadata: - name: "{{ .clusterId }}-config" - namespace: "{{ .clusterId | lower }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - annotations: - hyperfleet.io/generation: "{{ .generation }}" - data: - cluster_id: "{{ .clusterId }}" - cluster_name: "{{ .clusterName }}" - discovery: + # Resource 1: ConfigMap — cluster configuration + - name: "configmap0" + transport: + client: kubernetes + manifest: + apiVersion: v1 + kind: ConfigMap + metadata: + name: "{{ .clusterId }}-config" namespace: "{{ .clusterId | lower }}" - byName: "{{ .clusterId }}-config" + labels: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + annotations: + hyperfleet.io/generation: "{{ .generation }}" + data: + cluster_id: "{{ .clusterId }}" + cluster_name: "{{ .clusterName }}" + discovery: + namespace: "{{ .clusterId | lower }}" + by_name: "{{ .clusterId }}-config" - # Resource 2: ConfigMap — extra configuration (for additional expression targets) - - name: "configmap1" - transport: - client: kubernetes - manifest: - apiVersion: v1 - kind: ConfigMap - metadata: - name: "{{ .clusterId }}-extra" - namespace: "{{ .clusterId | lower }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - annotations: - hyperfleet.io/generation: "{{ .generation }}" - data: - feature_flags: "ha-enabled,auto-scaling,monitoring" - max_replicas: "10" - endpoint: "https://{{ .clusterId }}.{{ .region }}.hyperfleet.io" - discovery: + # Resource 2: ConfigMap — extra configuration (for additional expression targets) + - name: "configmap1" + transport: + client: kubernetes + manifest: + apiVersion: v1 + kind: ConfigMap + metadata: + name: "{{ .clusterId }}-extra" namespace: "{{ .clusterId | lower }}" - byName: "{{ .clusterId }}-extra" + labels: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + annotations: + hyperfleet.io/generation: "{{ .generation }}" + data: + feature_flags: "ha-enabled,auto-scaling,monitoring" + max_replicas: "10" + endpoint: "https://{{ .clusterId }}.{{ .region }}.hyperfleet.io" + discovery: + namespace: "{{ .clusterId | lower }}" + by_name: "{{ .clusterId }}-extra" - # =========================================================================== - # Post-processing — Payload build with CEL expression patterns - # =========================================================================== - post: - payloads: - - name: "statusPayload" - build: - adapter: "cel-showcase" - conditions: - # --------------------------------------------------------- - # Pattern 9: has() + ternary — existence check with fallback - # Checks if all expected resources were discovered. - # --------------------------------------------------------- - - type: "Applied" - status: - expression: | - has(resources.namespace0) && has(resources.configmap0) && has(resources.configmap1) - ? "True" - : "False" - reason: - expression: | - has(resources.namespace0) && has(resources.configmap0) && has(resources.configmap1) - ? "AllResourcesApplied" - : "ResourcesNotDiscovered" - message: - # Pattern 5 (payload): string concatenation with conditional parts - expression: | - has(resources.namespace0) && has(resources.configmap0) && has(resources.configmap1) - ? "All resources applied: " - + resources.namespace0.metadata.name + ", " - + resources.configmap0.metadata.name + ", " - + resources.configmap1.metadata.name - : "One or more resources not yet discovered" - - # --------------------------------------------------------- - # Pattern 10: optional chaining ?.orValue() — safe navigation - # Safely reads namespace phase even if fields are missing. - # --------------------------------------------------------- - - type: "Available" - status: - expression: | - resources.?namespace0.?status.?phase.orValue("") == "Active" - ? "True" - : "False" - reason: - expression: | - resources.?namespace0.?status.?phase.orValue("Pending") - message: - expression: | - "Namespace phase: " + resources.?namespace0.?status.?phase.orValue("unknown") +# =========================================================================== +# Post-processing — Payload build with CEL expression patterns +# =========================================================================== +post: + payloads: + - name: "statusPayload" + build: + adapter: "cel-showcase" + conditions: + # --------------------------------------------------------- + # Pattern 9: has() + ternary — existence check with fallback + # Checks if all expected resources were discovered. + # --------------------------------------------------------- + - type: "Applied" + status: + expression: | + has(resources.namespace0) && has(resources.configmap0) && has(resources.configmap1) + ? "True" + : "False" + reason: + expression: | + has(resources.namespace0) && has(resources.configmap0) && has(resources.configmap1) + ? "AllResourcesApplied" + : "ResourcesNotDiscovered" + message: + # Pattern 5 (payload): string concatenation with conditional parts + expression: | + has(resources.namespace0) && has(resources.configmap0) && has(resources.configmap1) + ? "All resources applied: " + + resources.namespace0.metadata.name + ", " + + resources.configmap0.metadata.name + ", " + + resources.configmap1.metadata.name + : "One or more resources not yet discovered" - # --------------------------------------------------------- - # Pattern 14: adapter.? — adapter execution metadata access - # Accesses adapter execution status and error details. - # --------------------------------------------------------- - - type: "Health" - status: - expression: | - adapter.?executionStatus.orValue("") == "success" - && !adapter.?resourcesSkipped.orValue(false) - ? "True" - : "False" - reason: - expression: | - adapter.?executionStatus.orValue("") != "success" - ? "ExecutionFailed:" + adapter.?executionError.?phase.orValue("unknown") - : adapter.?resourcesSkipped.orValue(false) - ? "ResourcesSkipped" - : "Healthy" - message: - # Pattern 5 (payload): multi-part string building - expression: | - adapter.?executionStatus.orValue("") != "success" - ? "Adapter failed at phase [" - + adapter.?executionError.?phase.orValue("unknown") - + "] step [" - + adapter.?executionError.?step.orValue("unknown") - + "]: " - + adapter.?executionError.?message.orValue(adapter.?errorMessage.orValue("no details")) - : adapter.?resourcesSkipped.orValue(false) - ? "Resources skipped: " + adapter.?skipReason.orValue("unknown reason") - : "Adapter execution completed successfully" + # --------------------------------------------------------- + # Pattern 10: optional chaining ?.orValue() — safe navigation + # Safely reads namespace phase even if fields are missing. + # --------------------------------------------------------- + - type: "Available" + status: + expression: | + resources.?namespace0.?status.?phase.orValue("") == "Active" + ? "True" + : "False" + reason: + expression: | + resources.?namespace0.?status.?phase.orValue("Pending") + message: + expression: | + "Namespace phase: " + resources.?namespace0.?status.?phase.orValue("unknown") - # Pattern 15: Go template rendering in payload values - observed_generation: - expression: "generation" - observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" + # --------------------------------------------------------- + # Pattern 14: adapter.? — adapter execution metadata access + # Accesses adapter execution status and error details. + # --------------------------------------------------------- + - type: "Health" + status: + expression: | + adapter.?executionStatus.orValue("") == "success" + && !adapter.?resourcesSkipped.orValue(false) + ? "True" + : "False" + reason: + expression: | + adapter.?executionStatus.orValue("") != "success" + ? "ExecutionFailed:" + adapter.?executionError.?phase.orValue("unknown") + : adapter.?resourcesSkipped.orValue(false) + ? "ResourcesSkipped" + : "Healthy" + message: + # Pattern 5 (payload): multi-part string building + expression: | + adapter.?executionStatus.orValue("") != "success" + ? "Adapter failed at phase [" + + adapter.?executionError.?phase.orValue("unknown") + + "] step [" + + adapter.?executionError.?step.orValue("unknown") + + "]: " + + adapter.?executionError.?message.orValue(adapter.?errorMessage.orValue("no details")) + : adapter.?resourcesSkipped.orValue(false) + ? "Resources skipped: " + adapter.?skipReason.orValue("unknown reason") + : "Adapter execution completed successfully" - data: - # --------------------------------------------------------- - # Pattern 10 (payload): optional chaining for safe field access - # --------------------------------------------------------- - namespace: - name: - expression: | - resources.?namespace0.?metadata.?name.orValue("") - phase: - expression: | - resources.?namespace0.?status.?phase.orValue("") - labels_json: - # Pattern 13: toJson() — serialization of resource metadata - expression: | - has(resources.namespace0) - ? toJson(resources.namespace0.metadata.labels) - : "{}" + # Pattern 15: Go template rendering in payload values + observed_generation: + expression: "generation" + observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" - configmap: - name: - expression: | - resources.?configmap0.?metadata.?name.orValue("") - cluster_id: - # Pattern 12: dig() — safe deep traversal into nested structures - expression: | - dig(resources, "configmap0.data.cluster_id") - cluster_metadata_region: - # Pattern 12b: dig() — traversing into a nested path - expression: | - dig(resources, "configmap0.data.cluster_metadata") + data: + # --------------------------------------------------------- + # Pattern 10 (payload): optional chaining for safe field access + # --------------------------------------------------------- + namespace: + name: + expression: | + resources.?namespace0.?metadata.?name.orValue("") + phase: + expression: | + resources.?namespace0.?status.?phase.orValue("") + labels_json: + # Pattern 13: toJson() — serialization of resource metadata + expression: | + has(resources.namespace0) + ? toJson(resources.namespace0.metadata.labels) + : "{}" - extra: - feature_flags: - expression: | - resources.?configmap1.?data.?feature_flags.orValue("") - max_replicas: - expression: | - resources.?configmap1.?data.?max_replicas.orValue("0") - # Pattern 12c: dig() on secondary configmap - endpoint: - expression: | - dig(resources, "configmap1.data.endpoint") + configmap: + name: + expression: | + resources.?configmap0.?metadata.?name.orValue("") + cluster_id: + # Pattern 12: dig() — safe deep traversal into nested structures + expression: | + dig(resources, "configmap0.data.cluster_id") + cluster_name: + # Pattern 12b: dig() — traversing into a nested path + expression: | + dig(resources, "configmap0.data.cluster_name") - # --------------------------------------------------------- - # Pattern 9 (payload): nested ternary chains — multi-level branching - # --------------------------------------------------------- - overall_status: + extra: + feature_flags: + expression: | + resources.?configmap1.?data.?feature_flags.orValue("") + max_replicas: expression: | - has(resources.namespace0) && has(resources.configmap0) && has(resources.configmap1) ? (resources.?namespace0.?status.?phase.orValue("") == "Active" ? "Ready" : "Provisioning") : "Pending" + resources.?configmap1.?data.?max_replicas.orValue("0") + # Pattern 12c: dig() on secondary configmap + endpoint: + expression: | + dig(resources, "configmap1.data.endpoint") + + # --------------------------------------------------------- + # Pattern 9 (payload): nested ternary chains — multi-level branching + # --------------------------------------------------------- + overall_status: + expression: | + has(resources.namespace0) && has(resources.configmap0) && has(resources.configmap1) ? (resources.?namespace0.?status.?phase.orValue("") == "Active" ? "Ready" : "Provisioning") : "Pending" - # Pattern 15: Go template rendering for payload values - cluster_url: "https://console.{{ .region }}.hyperfleet.io/clusters/{{ .clusterId }}" + # Pattern 15: Go template rendering for payload values + cluster_url: "https://console.{{ .region }}.hyperfleet.io/clusters/{{ .clusterId }}" - postActions: - - name: "update-status" - apiCall: - method: "PATCH" - url: "/api/hyperfleet/v1/clusters/{{ .clusterId }}/statuses" - body: "{{ .statusPayload }}" + post_actions: + - name: "update-status" + api_call: + method: "PATCH" + url: "/api/hyperfleet/v1/clusters/{{ .clusterId }}/statuses" + body: "{{ .statusPayload }}" diff --git a/test/testdata/dryrun/dryrun-kubernetes-adapter-config.yaml b/test/testdata/dryrun/dryrun-kubernetes-adapter-config.yaml index 5a98bf9..97417ce 100644 --- a/test/testdata/dryrun/dryrun-kubernetes-adapter-config.yaml +++ b/test/testdata/dryrun/dryrun-kubernetes-adapter-config.yaml @@ -1,24 +1,17 @@ # Adapter deployment configuration for dry-run testing -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: +adapter: name: dryrun-adapter - labels: - hyperfleet.io/component: adapter + version: "0.1.0" -spec: - adapter: - version: "0.1.0" +clients: + hyperfleet_api: + timeout: 10s + retry_attempts: 3 + retry_backoff: exponential - clients: - hyperfleetApi: - timeout: 10s - retryAttempts: 3 - retryBackoff: exponential + broker: + subscription_id: "dryrun-sub" + topic: "cluster-events" - broker: - subscriptionId: "dryrun-sub" - topic: "cluster-events" - - kubernetes: - apiVersion: "v1" + kubernetes: + api_version: "v1" diff --git a/test/testdata/dryrun/dryrun-kubernetes-adatepr-task-config-invalid.yaml b/test/testdata/dryrun/dryrun-kubernetes-adatepr-task-config-invalid.yaml index 785d0b2..89db078 100644 --- a/test/testdata/dryrun/dryrun-kubernetes-adatepr-task-config-invalid.yaml +++ b/test/testdata/dryrun/dryrun-kubernetes-adatepr-task-config-invalid.yaml @@ -4,46 +4,40 @@ # THIS FILE IS INVALID ON PURPOSE # THIS FILE IS INVALID ON PURPOSE # THIS FILE IS INVALID ON PURPOSE -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: invalid-task +params: + - name: "clusterId" + source: "event.id" + type: "string" + required: true -spec: - params: - - name: "clusterId" - source: "event.id" - type: "string" - required: true +preconditions: + - name: "bad-cel" + expression: | + this is not valid CEL !!! - preconditions: - - name: "bad-cel" - expression: | - this is not valid CEL !!! +resources: + - name: "missingFields" + manifest: + apiVersion: v1 + kind: ConfigMap + # missing metadata.name — validation should catch this + metadata: + labels: + app: test + annotations: + hyperfleet.io/generation: "1" + discovery: + namespace: "default" + by_name: "test" - resources: - - name: "missingFields" - manifest: - apiVersion: v1 - kind: ConfigMap - # missing metadata.name — validation should catch this - metadata: - labels: - app: test - annotations: - hyperfleet.io/generation: "1" - discovery: - namespace: "default" - byName: "test" - - - name: "undefinedVar" - manifest: - apiVersion: v1 - kind: ConfigMap - metadata: - name: "{{ .nonExistentVariable }}" - annotations: - hyperfleet.io/generation: "1" - discovery: - namespace: "default" - byName: "test" + - name: "undefinedVar" + manifest: + apiVersion: v1 + kind: ConfigMap + metadata: + name: "{{ .nonExistentVariable }}" + annotations: + hyperfleet.io/generation: "1" + discovery: + namespace: "default" + by_name: "test" diff --git a/test/testdata/dryrun/dryrun-kubernetes-task-config.yaml b/test/testdata/dryrun/dryrun-kubernetes-task-config.yaml index 3fc40b5..d82e713 100644 --- a/test/testdata/dryrun/dryrun-kubernetes-task-config.yaml +++ b/test/testdata/dryrun/dryrun-kubernetes-task-config.yaml @@ -1,209 +1,201 @@ # Task configuration for dry-run testing with Kubernetes transport # Unlike the Maestro example, Kubernetes transport applies resources directly # (no ManifestWork wrapper). Each K8s resource is a separate resource entry. -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: dryrun-task - labels: - hyperfleet.io/adapter-type: dryrun - -spec: - params: - - name: "clusterId" - source: "event.id" - type: "string" - required: true - - - name: "clusterKind" - source: "event.kind" - type: "string" - default: "Cluster" - - - name: "generation" - source: "event.generation" - type: "string" - required: true - - - name: "region" - source: "env.REGION" - type: "string" - default: "us-east-1" - - - name: "adapterName" - source: "env.ADAPTER_NAME" - type: "string" - default: "dry-run-adapter" - - preconditions: - - name: "fetch-cluster" - apiCall: - method: "GET" - url: "/api/hyperfleet/v1/clusters/{{ .clusterId }}" - timeout: 10s - capture: - - name: "clusterName" - field: "name" - - name: "clusterStatus" - expression: | - status.conditions.filter(c, c.type == "Ready").size() > 0 - ? status.conditions.filter(c, c.type == "Ready")[0].status - : "False" - - name: "computeNodes" - field: "nodes.compute" - - name: "timestamp" - expression: "\"2006-01-02T15:04:05Z07:00\"" - - conditions: - - field: "clusterStatus" - operator: "notEquals" - value: "True" - - resources: - # Resource 0: Namespace — applied directly to the K8s cluster - - name: "namespace0" - transport: - client: kubernetes - manifest: - apiVersion: v1 - kind: Namespace - metadata: - name: "{{ .clusterId | lower }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/managed-by: "{{ .adapterName }}" - hyperfleet.io/resource-type: "namespace" - annotations: - hyperfleet.io/created-by: "hyperfleet-adapter" - hyperfleet.io/generation: "{{ .generation }}" - - discovery: - byName: "{{ .clusterId | lower }}" - - # Resource 1: ConfigMap — applied directly to the K8s cluster - - name: "configmap0" - transport: - client: kubernetes - manifest: - apiVersion: v1 - kind: ConfigMap - metadata: - name: "{{ .clusterId }}-config" - namespace: "{{ .clusterId | lower }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - annotations: - hyperfleet.io/generation: "{{ .generation }}" +params: + - name: "clusterId" + source: "event.id" + type: "string" + required: true + + - name: "clusterKind" + source: "event.kind" + type: "string" + default: "Cluster" + + - name: "generation" + source: "event.generation" + type: "int" + required: true + + - name: "region" + source: "env.REGION" + type: "string" + default: "us-east-1" + + - name: "adapterName" + source: "env.ADAPTER_NAME" + type: "string" + default: "dry-run-adapter" + +preconditions: + - name: "fetch-cluster" + api_call: + method: "GET" + url: "/api/hyperfleet/v1/clusters/{{ .clusterId }}" + timeout: 10s + capture: + - name: "clusterName" + field: "name" + - name: "clusterStatus" + expression: | + status.conditions.filter(c, c.type == "Ready").size() > 0 + ? status.conditions.filter(c, c.type == "Ready")[0].status + : "False" + - name: "computeNodes" + field: "nodes.compute" + - name: "timestamp" + expression: "\"2006-01-02T15:04:05Z07:00\"" + + conditions: + - field: "clusterStatus" + operator: "notEquals" + value: "True" + +resources: + # Resource 0: Namespace — applied directly to the K8s cluster + - name: "namespace0" + transport: + client: kubernetes + manifest: + apiVersion: v1 + kind: Namespace + metadata: + name: "{{ .clusterId }}" + labels: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/managed-by: "{{ .adapterName }}" + hyperfleet.io/resource-type: "namespace" + annotations: + hyperfleet.io/created-by: "hyperfleet-adapter" + hyperfleet.io/generation: "{{ .generation }}" + + discovery: + by_name: "{{ .clusterId }}" + + # Resource 1: ConfigMap — applied directly to the K8s cluster + - name: "configmap0" + transport: + client: kubernetes + manifest: + apiVersion: v1 + kind: ConfigMap + metadata: + name: "{{ .clusterId }}-config" + namespace: "{{ .clusterId }}" + labels: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + annotations: + hyperfleet.io/generation: "{{ .generation }}" + data: + cluster_id: "{{ .clusterId }}" + cluster_name: "{{ .clusterName }}" + + discovery: + namespace: "{{ .clusterId }}" + by_name: "{{ .clusterId }}-config" + +# Post-processing — no nested discoveries needed for direct K8s resources. +# Each resource is a top-level entry in the resources map. +post: + payloads: + - name: "statusPayload" + build: + adapter: "dryrun-kubernetes" + conditions: + # Applied: Were the K8s resources created? + - type: "Applied" + status: + expression: | + has(resources.namespace0) && has(resources.configmap0) + ? "True" + : "False" + reason: + expression: | + has(resources.namespace0) && has(resources.configmap0) + ? "ResourcesApplied" + : "ResourcesNotDiscovered" + message: + expression: | + has(resources.namespace0) && has(resources.configmap0) + ? "Namespace " + resources.namespace0.metadata.name + " and ConfigMap " + resources.configmap0.metadata.name + " applied" + : "One or more resources not yet discovered" + + # Available: Is the namespace active? + - type: "Available" + status: + expression: | + has(resources.namespace0) + && has(resources.namespace0.status) + && has(resources.namespace0.status.phase) + && resources.namespace0.status.phase == "Active" + ? "True" + : "False" + reason: + expression: | + has(resources.namespace0) + && has(resources.namespace0.status) + && has(resources.namespace0.status.phase) + ? resources.namespace0.status.phase + : "NamespaceNotReady" + message: + expression: | + has(resources.namespace0) + && has(resources.namespace0.status) + && has(resources.namespace0.status.phase) + ? "Namespace phase: " + resources.namespace0.status.phase + : "Namespace not yet discovered or status unavailable" + + # Health: Adapter execution status + - type: "Health" + status: + expression: | + adapter.?executionStatus.orValue("") == "success" + && !adapter.?resourcesSkipped.orValue(false) + ? "True" + : "False" + reason: + expression: | + adapter.?executionStatus.orValue("") != "success" + ? "ExecutionFailed:" + adapter.?executionError.?phase.orValue("unknown") + : adapter.?resourcesSkipped.orValue(false) + ? "ResourcesSkipped" + : "Healthy" + message: + expression: | + adapter.?executionStatus.orValue("") != "success" + ? "Adapter failed at phase [" + + adapter.?executionError.?phase.orValue("unknown") + + "] step [" + + adapter.?executionError.?step.orValue("unknown") + + "]: " + + adapter.?executionError.?message.orValue(adapter.?errorMessage.orValue("no details")) + : adapter.?resourcesSkipped.orValue(false) + ? "Resources skipped: " + adapter.?skipReason.orValue("unknown reason") + : "Adapter execution completed successfully" + + observed_generation: + expression: "generation" + observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" + data: - cluster_id: "{{ .clusterId }}" - cluster_name: "{{ .clusterName }}" - - discovery: - namespace: "{{ .clusterId | lower }}" - byName: "{{ .clusterId }}-config" - - # Post-processing — no nested discoveries needed for direct K8s resources. - # Each resource is a top-level entry in the resources map. - post: - payloads: - - name: "statusPayload" - build: - adapter: "dryrun-kubernetes" - conditions: - # Applied: Were the K8s resources created? - - type: "Applied" - status: - expression: | - has(resources.namespace0) && has(resources.configmap0) - ? "True" - : "False" - reason: - expression: | - has(resources.namespace0) && has(resources.configmap0) - ? "ResourcesApplied" - : "ResourcesNotDiscovered" - message: - expression: | - has(resources.namespace0) && has(resources.configmap0) - ? "Namespace " + resources.namespace0.metadata.name + " and ConfigMap " + resources.configmap0.metadata.name + " applied" - : "One or more resources not yet discovered" - - # Available: Is the namespace active? - - type: "Available" - status: - expression: | - has(resources.namespace0) - && has(resources.namespace0.status) - && has(resources.namespace0.status.phase) - && resources.namespace0.status.phase == "Active" - ? "True" - : "False" - reason: - expression: | - has(resources.namespace0) - && has(resources.namespace0.status) - && has(resources.namespace0.status.phase) - ? resources.namespace0.status.phase - : "NamespaceNotReady" - message: - expression: | - has(resources.namespace0) - && has(resources.namespace0.status) - && has(resources.namespace0.status.phase) - ? "Namespace phase: " + resources.namespace0.status.phase - : "Namespace not yet discovered or status unavailable" - - # Health: Adapter execution status - - type: "Health" - status: - expression: | - adapter.?executionStatus.orValue("") == "success" - && !adapter.?resourcesSkipped.orValue(false) - ? "True" - : "False" - reason: - expression: | - adapter.?executionStatus.orValue("") != "success" - ? "ExecutionFailed:" + adapter.?executionError.?phase.orValue("unknown") - : adapter.?resourcesSkipped.orValue(false) - ? "ResourcesSkipped" - : "Healthy" - message: - expression: | - adapter.?executionStatus.orValue("") != "success" - ? "Adapter failed at phase [" - + adapter.?executionError.?phase.orValue("unknown") - + "] step [" - + adapter.?executionError.?step.orValue("unknown") - + "]: " - + adapter.?executionError.?message.orValue(adapter.?errorMessage.orValue("no details")) - : adapter.?resourcesSkipped.orValue(false) - ? "Resources skipped: " + adapter.?skipReason.orValue("unknown reason") - : "Adapter execution completed successfully" - - observed_generation: - expression: "generation" - observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" - - data: - namespace: - name: - expression: | - resources.?namespace0.?metadata.?name.orValue("") - phase: - expression: | - resources.?namespace0.?status.?phase.orValue("") - configmap: - name: - expression: | - resources.?configmap0.?metadata.?name.orValue("") - clusterId: - expression: | - resources.?configmap0.?data.?cluster_id.orValue("") - - postActions: - - name: "update-status" - apiCall: - method: "PATCH" - url: "/api/hyperfleet/v1/clusters/{{ .clusterId }}/statuses" - body: "{{ .statusPayload }}" + namespace: + name: + expression: | + resources.?namespace0.?metadata.?name.orValue("") + phase: + expression: | + resources.?namespace0.?status.?phase.orValue("") + configmap: + name: + expression: | + resources.?configmap0.?metadata.?name.orValue("") + clusterId: + expression: | + resources.?configmap0.?data.?cluster_id.orValue("") + + post_actions: + - name: "update-status" + api_call: + method: "PATCH" + url: "/api/hyperfleet/v1/clusters/{{ .clusterId }}/statuses" + body: "{{ .statusPayload }}" diff --git a/test/testdata/dryrun/dryrun-maestro-adapter-config.yaml b/test/testdata/dryrun/dryrun-maestro-adapter-config.yaml index 4b7aa16..f884f10 100644 --- a/test/testdata/dryrun/dryrun-maestro-adapter-config.yaml +++ b/test/testdata/dryrun/dryrun-maestro-adapter-config.yaml @@ -1,37 +1,31 @@ # Adapter deployment configuration for dry-run testing -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: +adapter: name: dryrun-adapter - labels: - hyperfleet.io/component: adapter + version: "0.1.0" -spec: - adapter: - version: "0.1.0" +clients: + hyperfleet_api: + base_url: http://localhost:8000 + timeout: 10s + retry_attempts: 3 + retry_backoff: exponential - clients: - hyperfleetApi: - timeout: 10s - retryAttempts: 3 - retryBackoff: exponential + broker: + subscription_id: "dryrun-sub" + topic: "cluster-events" - broker: - subscriptionId: "dryrun-sub" - topic: "cluster-events" + maestro: + grpc_server_address: "localhost:8090" - maestro: - grpcServerAddress: "localhost:8090" - - # HTTPS server address for REST API operations (optional) - # Environment variable: HYPERFLEET_MAESTRO_HTTP_SERVER_ADDRESS - httpServerAddress: "http://localhost:8100" - - # Source identifier for CloudEvents routing (must be unique across adapters) - # Environment variable: HYPERFLEET_MAESTRO_SOURCE_ID - sourceId: "hyperfleet-adapter" - - # Client identifier (defaults to sourceId if not specified) - # Environment variable: HYPERFLEET_MAESTRO_CLIENT_ID - clientId: "hyperfleet-adapter-client" - insecure: true + # HTTPS server address for REST API operations (optional) + # Environment variable: HYPERFLEET_MAESTRO_HTTP_SERVER_ADDRESS + http_server_address: "http://localhost:8100" + + # Source identifier for CloudEvents routing (must be unique across adapters) + # Environment variable: HYPERFLEET_MAESTRO_SOURCE_ID + source_id: "hyperfleet-adapter" + + # Client identifier (defaults to source_id if not specified) + # Environment variable: HYPERFLEET_MAESTRO_CLIENT_ID + client_id: "hyperfleet-adapter-client" + insecure: true diff --git a/test/testdata/dryrun/dryrun-maestro-adapter-task-config.yaml b/test/testdata/dryrun/dryrun-maestro-adapter-task-config.yaml index b10f8ff..b22f884 100644 --- a/test/testdata/dryrun/dryrun-maestro-adapter-task-config.yaml +++ b/test/testdata/dryrun/dryrun-maestro-adapter-task-config.yaml @@ -1,344 +1,311 @@ # Task configuration for dry-run testing -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: dryrun-task - labels: - hyperfleet.io/adapter-type: dryrun - -spec: - params: - - name: "clusterId" - source: "event.id" - type: "string" - required: true - - - name: "clusterKind" - source: "event.kind" - type: "string" - default: "Cluster" - - - name: "generation" - source: "event.generation" - type: "string" - required: true - - - name: "region" - source: "env.REGION" - type: "string" - default: "us-east-1" - - - name: "adapterName" - source: "env.ADAPTER_NAME" - type: "string" - default: "dry-run-adapter" - - preconditions: - - name: "fetch-cluster" - apiCall: - method: "GET" - url: "/api/hyperfleet/v1/clusters/{{ .clusterId }}" - timeout: 10s - capture: - - name: "clusterName" - field: "name" - - name: "clusterStatus" - expression: | - status.conditions.filter(c, c.type == "Ready").size() > 0 - ? status.conditions.filter(c, c.type == "Ready")[0].status - : "False" - - name: "computeNodes" - field: "nodes.compute" - #TODO: research why we can not have {{ now }} as expression - - name: "timestamp" - expression: "\"2006-01-02T15:04:05Z07:00\"" - - conditions: - - field: "clusterStatus" - operator: "notEquals" - value: "True" - - resources: - - name: "resource0" - transport: - client: maestro - maestro: - targetCluster: cluster1 - manifest: - apiVersion: work.open-cluster-management.io/v1 - kind: ManifestWork - metadata: - # ManifestWork name - must be unique within consumer namespace - #name: "manifestwork-{{ .clusterId }}" - name: "manifestwork-symbol000" - - # Labels for identification, filtering, and management - labels: - # HyperFleet tracking labels - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/adapter: "{{ .adapterName }}" - hyperfleet.io/component: "infrastructure" - hyperfleet.io/generation: "{{ .generation }}" - hyperfleet.io/resource-group: "cluster-setup" - - # Maestro-specific labels - maestro.io/source-id: "{{ .adapterName }}" - maestro.io/resource-type: "manifestwork" - maestro.io/priority: "normal" - - # Standard Kubernetes application labels - app.kubernetes.io/name: "aro-hcp-cluster" - app.kubernetes.io/instance: "{{ .clusterId }}" - app.kubernetes.io/version: "v1.0.0" - app.kubernetes.io/component: "infrastructure" - app.kubernetes.io/part-of: "hyperfleet" - app.kubernetes.io/managed-by: "hyperfleet-adapter" - app.kubernetes.io/created-by: "{{ .adapterName }}" - - # Annotations for metadata and operational information - annotations: - # Tracking and lifecycle - hyperfleet.io/created-by: "hyperfleet-adapter-framework" - hyperfleet.io/managed-by: "{{ .adapterName }}" - hyperfleet.io/generation: "{{ .generation }}" - hyperfleet.io/cluster-name: "{{ .clusterId }}" - hyperfleet.io/deployment-time: "{{ .timestamp }}" - - # Maestro-specific annotations - maestro.io/applied-time: "{{ .timestamp }}" - maestro.io/source-adapter: "{{ .adapterName }}" - - # Operational annotations - deployment.hyperfleet.io/strategy: "rolling" - deployment.hyperfleet.io/timeout: "300s" - monitoring.hyperfleet.io/enabled: "true" - - # Documentation - description: "Complete cluster setup including namespace, configuration, and RBAC" - documentation: "https://docs.hyperfleet.io/adapters/aro-hcp" - - # ManifestWork specification - spec: - # ============================================================================ - # Workload - Contains the Kubernetes manifests to deploy - # ============================================================================ - workload: - # Kubernetes manifests array - injected by framework from business logic config - manifests: +params: + - name: clusterId + required: true + source: event.id + type: string + - default: Cluster + name: clusterKind + source: event.kind + type: string + - name: generation + required: true + source: event.generation + type: string + - default: us-east-1 + name: region + source: env.REGION + type: string + - default: dry-run-adapter + name: adapterName + source: env.ADAPTER_NAME + type: string +preconditions: + - api_call: + method: GET + timeout: 10s + url: /api/hyperfleet/v1/clusters/{{ .clusterId }} + capture: + - field: name + name: clusterName + - expression: | + status.conditions.filter(c, c.type == "Ready").size() > 0 + ? status.conditions.filter(c, c.type == "Ready")[0].status + : "False" + name: clusterStatus + - field: nodes.compute + name: computeNodes + #TODO: research why we can not have {{ now }} as expression + - expression: '"2006-01-02T15:04:05Z07:00"' + name: timestamp + conditions: + - field: clusterStatus + operator: notEquals + value: "True" + name: fetch-cluster +resources: + - discovery: + #by_name: "cluster-{{ .clusterId }}-config" + #by_name: "manifestwork-symbol000" + by_selectors: + label_selector: + maestro.io/resource-type: manifestwork + namespace: '*' + manifest: + apiVersion: work.open-cluster-management.io/v1 + kind: ManifestWork + metadata: + # Annotations for metadata and operational information + annotations: + # Operational annotations + deployment.hyperfleet.io/strategy: rolling + deployment.hyperfleet.io/timeout: 300s + # Documentation + description: Complete cluster setup including namespace, configuration, and RBAC + documentation: https://docs.hyperfleet.io/adapters/aro-hcp + hyperfleet.io/cluster-name: '{{ .clusterId }}' + # Tracking and lifecycle + hyperfleet.io/created-by: hyperfleet-adapter-framework + hyperfleet.io/deployment-time: '{{ .timestamp }}' + hyperfleet.io/generation: '{{ .generation }}' + hyperfleet.io/managed-by: '{{ .adapterName }}' + # Maestro-specific annotations + maestro.io/applied-time: '{{ .timestamp }}' + maestro.io/source-adapter: '{{ .adapterName }}' + monitoring.hyperfleet.io/enabled: "true" + # Labels for identification, filtering, and management + labels: + app.kubernetes.io/component: infrastructure + app.kubernetes.io/created-by: '{{ .adapterName }}' + app.kubernetes.io/instance: '{{ .clusterId }}' + app.kubernetes.io/managed-by: hyperfleet-adapter + # Standard Kubernetes application labels + app.kubernetes.io/name: aro-hcp-cluster + app.kubernetes.io/part-of: hyperfleet + app.kubernetes.io/version: v1.0.0 + hyperfleet.io/adapter: '{{ .adapterName }}' + # HyperFleet tracking labels + hyperfleet.io/cluster-id: '{{ .clusterId }}' + hyperfleet.io/component: infrastructure + hyperfleet.io/generation: '{{ .generation }}' + hyperfleet.io/resource-group: cluster-setup + maestro.io/priority: normal + maestro.io/resource-type: manifestwork + # Maestro-specific labels + maestro.io/source-id: '{{ .adapterName }}' + # ManifestWork name - must be unique within consumer namespace + #name: "manifestwork-{{ .clusterId }}" + name: manifestwork-symbol000 + # ManifestWork specification + spec: + # ============================================================================ + # Delete Options - How resources should be removed + # ============================================================================ + deleteOption: + # Grace period for graceful deletion (seconds) + gracePeriodSeconds: 30 + # Propagation policy for resource deletion + # - "Foreground": Wait for dependent resources to be deleted first + # - "Background": Delete immediately, let cluster handle dependents + # - "Orphan": Leave resources on cluster when ManifestWork is deleted + propagationPolicy: Foreground + # ============================================================================ + # Manifest Configurations - Per-resource settings for update and feedback + # ============================================================================ + manifestConfigs: + # ======================================================================== + # Configuration for Namespace resources + # ======================================================================== + - feedbackRules: + - jsonPaths: + - name: phase # Namespace phase (Active, Terminating) + path: .status.phase + type: JSONPaths # Use JSON path expressions for status feedback + resourceIdentifier: + group: "" # Core API group (empty for v1 resources) + name: '{{ .clusterId }}' # Specific resource name + resource: namespaces # Resource type + updateStrategy: + type: ServerSideApply # Use server-side apply for namespaces + - resourceIdentifier: + group: "" + name: '{{ .clusterId }}-config-symbol2222' + namespace: '{{ .clusterId }}' + resource: configmaps + updateStrategy: + type: ServerSideApply + # ============================================================================ + # Workload - Contains the Kubernetes manifests to deploy + # ============================================================================ + workload: + # Kubernetes manifests array - injected by framework from business logic config + manifests: - apiVersion: v1 kind: Namespace metadata: - name: "{{ .clusterId | lower }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/managed-by: "{{ .metadata.name }}" - hyperfleet.io/resource-type: "namespace" - hyperfleet.io/label-for-discovery: "namespace-symbol111" annotations: - hyperfleet.io/created-by: "hyperfleet-adapter" - hyperfleet.io/generation: "{{ .generation }}" + hyperfleet.io/created-by: hyperfleet-adapter + hyperfleet.io/generation: '{{ .generation }}' + labels: + hyperfleet.io/cluster-id: '{{ .clusterId }}' + hyperfleet.io/label-for-discovery: namespace-symbol111 + hyperfleet.io/managed-by: '{{ .adapter.name }}' + hyperfleet.io/resource-type: namespace + name: '{{ .clusterId }}' - apiVersion: v1 + data: + cluster_id: '{{ .clusterId }}' + cluster_name: '{{ .clusterName }}' kind: ConfigMap metadata: - name: "{{ .clusterId }}-config-symbol2222" - namespace: "{{ .clusterId }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" annotations: - hyperfleet.io/generation: "{{ .generation }}" - data: - cluster_id: "{{ .clusterId }}" - cluster_name: "{{ .clusterName }}" - - # ============================================================================ - # Delete Options - How resources should be removed - # ============================================================================ - deleteOption: - # Propagation policy for resource deletion - # - "Foreground": Wait for dependent resources to be deleted first - # - "Background": Delete immediately, let cluster handle dependents - # - "Orphan": Leave resources on cluster when ManifestWork is deleted - propagationPolicy: "Foreground" - - # Grace period for graceful deletion (seconds) - gracePeriodSeconds: 30 - - # ============================================================================ - # Manifest Configurations - Per-resource settings for update and feedback - # ============================================================================ - manifestConfigs: - # ======================================================================== - # Configuration for Namespace resources - # ======================================================================== - - resourceIdentifier: - group: "" # Core API group (empty for v1 resources) - resource: "namespaces" # Resource type - name: "{{ .clusterId | lower }}" # Specific resource name - updateStrategy: - type: "ServerSideApply" # Use server-side apply for namespaces - feedbackRules: - - type: "JSONPaths" # Use JSON path expressions for status feedback - jsonPaths: - - name: "phase" # Namespace phase (Active, Terminating) - path: ".status.phase" - - resourceIdentifier: - group: "" - resource: "configmaps" - namespace: "{{ .clusterId | lower }}" - name: "{{ .clusterId | lower }}-config-symbol2222" - updateStrategy: - type: "ServerSideApply" - - discovery: - #byName: "cluster-{{ .clusterId }}-config" - #byName: "manifestwork-symbol000" - bySelectors: - labelSelector: - maestro.io/resource-type: manifestwork - - # Discover sub-resources within the manifestWork - # This approach can be used to use the discovery name to parameter level - # This can support jsonPath to dig into the resource status. like discoveryNamespace.status.conditions[?(@.type=="Ready")].status - nestedDiscoveries: - - name: "namespace0" - discovery: - bySelectors: - labelSelector: - hyperfleet.io/label-for-discovery: "namespace-symbol111" - - name: "configmap0" - discovery: - byName: "{{ .clusterId }}-config-symbol2222" - - post: - payloads: - - name: "statusPayload" - build: - adapter: "dryrun-maestro" - conditions: - # Applied: Was the ManifestWork applied by Maestro? - - type: "Applied" - status: - expression: | - has(resources.resource0) - && has(resources.resource0.status) - && has(resources.resource0.status.conditions) - && resources.resource0.status.conditions.filter(c, c.type == "Applied").size() > 0 - ? resources.resource0.status.conditions.filter(c, c.type == "Applied")[0].status - : "False" - reason: - expression: | - has(resources.resource0) - && has(resources.resource0.status) - && has(resources.resource0.status.conditions) - && resources.resource0.status.conditions.filter(c, c.type == "Applied").size() > 0 - ? resources.resource0.status.conditions.filter(c, c.type == "Applied")[0].reason - : "ManifestWorkNotDiscovered" - message: - expression: | - has(resources.resource0) - && has(resources.resource0.status) - && has(resources.resource0.status.conditions) - && resources.resource0.status.conditions.filter(c, c.type == "Applied").size() > 0 - ? resources.resource0.status.conditions.filter(c, c.type == "Applied")[0].message - : "ManifestWork not yet discovered or status unavailable" - - # Available: Are all sub-resources available on the target cluster? - - type: "Available" - status: - expression: | - has(resources.resource0) - && has(resources.resource0.status) - && has(resources.resource0.status.conditions) - && resources.resource0.status.conditions.filter(c, c.type == "Available").size() > 0 - ? resources.resource0.status.conditions.filter(c, c.type == "Available")[0].status - : "False" - reason: - expression: | - has(resources.resource0) - && has(resources.resource0.status) - && has(resources.resource0.status.conditions) - && resources.resource0.status.conditions.filter(c, c.type == "Available").size() > 0 - ? resources.resource0.status.conditions.filter(c, c.type == "Available")[0].reason - : "ResourcesNotYetAvailable" - message: - expression: | - has(resources.resource0) - && has(resources.resource0.status) - && has(resources.resource0.status.conditions) - && resources.resource0.status.conditions.filter(c, c.type == "Available").size() > 0 - ? resources.resource0.status.conditions.filter(c, c.type == "Available")[0].message - : "ManifestWork not yet discovered or resources not available" - - # Health: Adapter execution status — surfaces errors from any phase - - type: "Health" - status: - expression: | - adapter.?executionStatus.orValue("") == "success" - && !adapter.?resourcesSkipped.orValue(false) - ? "True" - : "False" - reason: - expression: | - adapter.?executionStatus.orValue("") != "success" - ? "ExecutionFailed:" + adapter.?executionError.?phase.orValue("unknown") - : adapter.?resourcesSkipped.orValue(false) - ? "ResourcesSkipped" - : "Healthy" - message: - expression: | - adapter.?executionStatus.orValue("") != "success" - ? "Adapter failed at phase [" - + adapter.?executionError.?phase.orValue("unknown") - + "] step [" - + adapter.?executionError.?step.orValue("unknown") - + "]: " - + adapter.?executionError.?message.orValue(adapter.?errorMessage.orValue("no details")) - : adapter.?resourcesSkipped.orValue(false) - ? "Resources skipped: " + adapter.?skipReason.orValue("unknown reason") - : "Adapter execution completed successfully" - - observed_generation: - expression: "generation" - observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" - - # Data extracted from discovered resources (nested discoveries are top-level keys) - data: - configmap: - name: - expression: | - resources.?configmap0.?metadata.?name.orValue("") - clusterId: - expression: | - resources.?configmap0.?data.?cluster_id.orValue("") - namespace: - name: - expression: | - resources.?namespace0.?metadata.?name.orValue("") - # Namespace phase from statusFeedback merged onto the nested discovery object - phase: - expression: | - has(resources.namespace0) - && has(resources.namespace0.statusFeedback) - && has(resources.namespace0.statusFeedback.values) - && resources.namespace0.statusFeedback.values - .filter(v, v.name == "phase").size() > 0 - ? resources.namespace0.statusFeedback.values - .filter(v, v.name == "phase")[0].fieldValue.string - : "" - manifestwork: - name: - expression: | - resources.?resource0.?metadata.?name.orValue("") - appliedAt: - expression: | - resources.?resource0.?metadata.?creationTimestamp.orValue("") - debug: + hyperfleet.io/generation: '{{ .generation }}' + labels: + hyperfleet.io/cluster-id: '{{ .clusterId }}' + name: '{{ .clusterId }}-config-symbol2222' + namespace: '{{ .clusterId }}' + name: resource0 + # Discover sub-resources within the manifestWork + # This approach can be used to use the discovery name to parameter level + # This can support jsonPath to dig into the resource status. like discoveryNamespace.status.conditions[?(@.type=="Ready")].status + nested_discoveries: + - discovery: + by_selectors: + label_selector: + hyperfleet.io/label-for-discovery: namespace-symbol111 + name: namespace0 + - discovery: + by_name: '{{ .clusterId }}-config-symbol2222' + name: configmap0 + transport: + client: maestro + maestro: + target_cluster: cluster1 +post: + payloads: + - build: + adapter: dryrun-maestro + conditions: + # Applied: Was the ManifestWork applied by Maestro? + - message: + expression: | + has(resources.resource0) + && has(resources.resource0.status) + && has(resources.resource0.status.conditions) + && resources.resource0.status.conditions.filter(c, c.type == "Applied").size() > 0 + ? resources.resource0.status.conditions.filter(c, c.type == "Applied")[0].message + : "ManifestWork not yet discovered or status unavailable" + reason: + expression: | + has(resources.resource0) + && has(resources.resource0.status) + && has(resources.resource0.status.conditions) + && resources.resource0.status.conditions.filter(c, c.type == "Applied").size() > 0 + ? resources.resource0.status.conditions.filter(c, c.type == "Applied")[0].reason + : "ManifestWorkNotDiscovered" + status: + expression: | + has(resources.resource0) + && has(resources.resource0.status) + && has(resources.resource0.status.conditions) + && resources.resource0.status.conditions.filter(c, c.type == "Applied").size() > 0 + ? resources.resource0.status.conditions.filter(c, c.type == "Applied")[0].status + : "False" + type: Applied + # Available: Are all sub-resources available on the target cluster? + - message: + expression: | + has(resources.resource0) + && has(resources.resource0.status) + && has(resources.resource0.status.conditions) + && resources.resource0.status.conditions.filter(c, c.type == "Available").size() > 0 + ? resources.resource0.status.conditions.filter(c, c.type == "Available")[0].message + : "ManifestWork not yet discovered or resources not available" + reason: + expression: | + has(resources.resource0) + && has(resources.resource0.status) + && has(resources.resource0.status.conditions) + && resources.resource0.status.conditions.filter(c, c.type == "Available").size() > 0 + ? resources.resource0.status.conditions.filter(c, c.type == "Available")[0].reason + : "ResourcesNotYetAvailable" + status: + expression: | + has(resources.resource0) + && has(resources.resource0.status) + && has(resources.resource0.status.conditions) + && resources.resource0.status.conditions.filter(c, c.type == "Available").size() > 0 + ? resources.resource0.status.conditions.filter(c, c.type == "Available")[0].status + : "False" + type: Available + # Health: Adapter execution status — surfaces errors from any phase + - message: + expression: | + adapter.?executionStatus.orValue("") != "success" + ? "Adapter failed at phase [" + + adapter.?executionError.?phase.orValue("unknown") + + "] step [" + + adapter.?executionError.?step.orValue("unknown") + + "]: " + + adapter.?executionError.?message.orValue(adapter.?errorMessage.orValue("no details")) + : adapter.?resourcesSkipped.orValue(false) + ? "Resources skipped: " + adapter.?skipReason.orValue("unknown reason") + : "Adapter execution completed successfully" + reason: + expression: | + adapter.?executionStatus.orValue("") != "success" + ? "ExecutionFailed:" + adapter.?executionError.?phase.orValue("unknown") + : adapter.?resourcesSkipped.orValue(false) + ? "ResourcesSkipped" + : "Healthy" + status: + expression: | + adapter.?executionStatus.orValue("") == "success" + && !adapter.?resourcesSkipped.orValue(false) + ? "True" + : "False" + type: Health + # Data extracted from discovered resources (nested discoveries are top-level keys) + data: + configmap: + clusterId: + expression: | + resources.?configmap0.?data.?cluster_id.orValue("") + name: + expression: | + resources.?configmap0.?metadata.?name.orValue("") + debug: + expression: | + toJson(resources.resource0) + manifestwork: + appliedAt: + expression: | + resources.?resource0.?metadata.?creationTimestamp.orValue("") + name: + expression: | + resources.?resource0.?metadata.?name.orValue("") + namespace: + name: + expression: | + resources.?namespace0.?metadata.?name.orValue("") + # Namespace phase from statusFeedback merged onto the nested discovery object + phase: expression: | - toJson(resources.resource0) - - postActions: - - name: "update-status" - apiCall: - method: "POST" - url: "/api/hyperfleet/v1/clusters/{{ .clusterId }}/statuses" - body: "{{ .statusPayload }}" + has(resources.namespace0) + && has(resources.namespace0.statusFeedback) + && has(resources.namespace0.statusFeedback.values) + && resources.namespace0.statusFeedback.values + .filter(v, v.name == "phase").size() > 0 + ? resources.namespace0.statusFeedback.values + .filter(v, v.name == "phase")[0].fieldValue.string + : "" + observed_generation: + expression: generation + observed_time: '{{ now | date "2006-01-02T15:04:05Z07:00" }}' + name: statusPayload + post_actions: + - api_call: + body: '{{ .statusPayload }}' + method: POST + url: /api/hyperfleet/v1/clusters/{{ .clusterId }}/statuses + name: update-status diff --git a/test/testdata/task-config.yaml b/test/testdata/task-config.yaml index fbd9ecb..e404dbd 100644 --- a/test/testdata/task-config.yaml +++ b/test/testdata/task-config.yaml @@ -1,143 +1,116 @@ # HyperFleet Adapter Task Configuration for testing -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: example-adapter - labels: - hyperfleet.io/adapter-type: example - hyperfleet.io/component: adapter - -spec: - # Parameters with all required variables - params: - - name: "hyperfleetApiBaseUrl" - source: "env.HYPERFLEET_API_BASE_URL" - type: "string" - - - name: "hyperfleetApiVersion" - source: "env.HYPERFLEET_API_VERSION" - type: "string" - default: "v1" - - - name: "clusterId" - source: "event.id" - type: "string" - required: true - - # Preconditions with valid operators and CEL expressions - preconditions: - - name: "clusterStatus" - apiCall: - method: "GET" - url: "/clusters/{{ .clusterId }}" - timeout: 10s - retryAttempts: 3 - retryBackoff: "exponential" - capture: - - name: "clusterName" - field: "metadata.name" - - name: "readyConditionStatus" - expression: | - status.conditions.filter(c, c.type == "Ready").size() > 0 - ? status.conditions.filter(c, c.type == "Ready")[0].status - : "False" - - name: "generation" - field: "generation" - # Structured conditions with valid operators - conditions: - - field: "readyConditionStatus" - operator: "equals" - value: "False" - - # Resources - resources: - - name: "clusterNamespace" - manifest: - apiVersion: v1 - kind: Namespace - metadata: - name: "{{ .clusterId | lower }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/managed-by: "hyperfleet-adapter" - annotations: - hyperfleet.io/generation: "{{ .generation }}" - discovery: - namespace: "*" - byName: "{{ .clusterId | lower }}" - - - name: "clusterConfigMap" - manifest: - apiVersion: v1 - kind: ConfigMap - metadata: - name: "cluster-{{ .clusterId }}-config" - namespace: "{{ .clusterId | lower }}" - annotations: - hyperfleet.io/generation: "{{ .generation }}" - data: - cluster_id: "{{ .clusterId }}" - discovery: - namespace: "{{ .clusterId | lower }}" - byName: "cluster-{{ .clusterId }}-config" - - # Post configuration - post: - payloads: - - name: "clusterStatusPayload" - build: - adapter: "{{ .metadata.name }}" - conditions: - # Applied: Job successfully created - - type: "Applied" - status: - expression: | - has(resources.clusterConfigMap.spec) ? "True" : "False" - reason: - expression: | - has(resources.clusterConfigMap.spec) - ? "JobApplied" - : "JobPending" - message: - expression: | - has(resources.clusterConfigMap) - ? "clusterConfigMap manifest applied successfully" - : "clusterConfigMap is pending to be applied" - # Available: Check job status conditions - - type: "Available" - status: - expression: | - has(resources.clusterConfigMap.metadata.resourceVersion) ? "True" : "False" - reason: - expression: | - has(resources.clusterConfigMap.metadata.resourceVersion) ? "ConfigMapAvailable" : "ConfigMapPending" - message: - expression: | +# Parameters with all required variables +params: + - name: clusterId + required: true + source: event.id + type: string +# Preconditions with valid operators and CEL expressions +preconditions: + - api_call: + method: GET + retry_attempts: 3 + retry_backoff: exponential + timeout: 10s + url: /clusters/{{ .clusterId }} + capture: + - field: name + name: clusterName + - expression: | + status.conditions.filter(c, c.type == "Ready").size() > 0 + ? status.conditions.filter(c, c.type == "Ready")[0].status + : "False" + name: readyConditionStatus + - field: generation + name: generation + # Structured conditions with valid operators + conditions: + - field: readyConditionStatus + operator: equals + value: "False" + name: clusterStatus +# Resources +resources: + - discovery: + by_name: '{{ .clusterId | lower }}' + manifest: + apiVersion: v1 + kind: Namespace + metadata: + annotations: + hyperfleet.io/generation: '{{ .generation }}' + labels: + hyperfleet.io/cluster-id: '{{ .clusterId }}' + hyperfleet.io/managed-by: hyperfleet-adapter + name: '{{ .clusterId | lower }}' + name: clusterNamespace + - discovery: + by_name: cluster-{{ .clusterId }}-config + namespace: '{{ .clusterId | lower }}' + manifest: + apiVersion: v1 + data: + cluster_id: '{{ .clusterId }}' + kind: ConfigMap + metadata: + annotations: + hyperfleet.io/generation: '{{ .generation }}' + name: cluster-{{ .clusterId }}-config + namespace: '{{ .clusterId | lower }}' + name: clusterConfigMap +# Post configuration +post: + payloads: + - build: + adapter: '{{ .adapter.name }}' + conditions: + # Applied: Job successfully created + - type: Applied + message: + expression: | + has(resources.clusterConfigMap) + ? "clusterConfigMap manifest applied successfully" + : "clusterConfigMap is pending to be applied" + reason: + expression: | has(resources.clusterConfigMap.metadata.resourceVersion) - ? "clusterConfigMap has been created" - : "clusterConfigMap is not yet available" - # Health: Adapter execution status (runtime) - - type: "Health" - status: - expression: | - adapter.?executionStatus.orValue("") == "success" ? "True" : "False" - reason: - expression: | - adapter.?errorReason.orValue("") != "" ? adapter.?errorReason.orValue("") : "Healthy" - message: - expression: | - adapter.?errorMessage.orValue("") != "" ? adapter.?errorMessage.orValue("") : "All adapter operations in progress or completed successfully" - # Event generation ID metadata field needs to use expression to avoid interpolation issues - observed_generation: - expression: "generation" - observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" - - postActions: - - name: "updateStatus" - when: - expression: | - adapter.executionStatus == "success" - apiCall: - method: "POST" - url: "/clusters/{{ .clusterId }}/statuses" - body: "{{ .clusterStatusPayload }}" + ? "ConfigMapApplied" + : "ConfigMapPending" + status: + expression: | + has(resources.clusterConfigMap.metadata.resourceVersion) ? "True" : "False" + # Available: Check job status conditions + - type: Available + message: + expression: | + has(resources.clusterConfigMap.metadata.resourceVersion) + ? "clusterConfigMap has been created" + : "clusterConfigMap is not yet available" + reason: + expression: | + has(resources.clusterConfigMap.metadata.resourceVersion) ? "ConfigMapAvailable" : "ConfigMapPending" + status: + expression: | + has(resources.clusterConfigMap.metadata.resourceVersion) ? "True" : "False" + # Health: Adapter execution status (runtime) + - type: Health + message: + expression: | + adapter.?errorMessage.orValue("") != "" ? adapter.?errorMessage.orValue("") : "All adapter operations in progress or completed successfully" + reason: + expression: | + adapter.?errorReason.orValue("") != "" ? adapter.?errorReason.orValue("") : "Healthy" + status: + expression: | + adapter.?executionStatus.orValue("") == "success" ? "True" : "False" + # Event generation ID metadata field needs to use expression to avoid interpolation issues + observed_generation: + expression: generation + observed_time: '{{ now | date "2006-01-02T15:04:05Z07:00" }}' + name: clusterStatusPayload + post_actions: + - api_call: + body: '{{ .clusterStatusPayload }}' + method: POST + url: /clusters/{{ .clusterId }}/statuses + name: updateStatus diff --git a/test/testdata/templates/deployment.yaml b/test/testdata/templates/deployment.yaml index c41d312..b6844d9 100644 --- a/test/testdata/templates/deployment.yaml +++ b/test/testdata/templates/deployment.yaml @@ -6,7 +6,7 @@ metadata: namespace: "cluster-{{ .clusterId }}" labels: hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/managed-by: "{{ .metadata.name }}" + hyperfleet.io/managed-by: "{{ .adapter.name }}" spec: replicas: 1 selector: