From 338294204e7ddac932e6952426229321afe6d02a Mon Sep 17 00:00:00 2001 From: Angel Marin Date: Fri, 6 Mar 2026 12:09:02 +0100 Subject: [PATCH] HYPERFLEET-722 - fix: update adapter configs to follow new standard --- helm/adapter1/adapter-config.yaml | 47 +- helm/adapter1/adapter-task-config.yaml | 283 ++++++----- helm/adapter2/adapter-config.yaml | 120 +++-- helm/adapter2/adapter-task-config.yaml | 640 ++++++++++++------------- helm/adapter3/adapter-config.yaml | 47 +- helm/adapter3/adapter-task-config.yaml | 306 ++++++------ helm/sentinel-nodepools/values.yaml | 9 + 7 files changed, 700 insertions(+), 752 deletions(-) diff --git a/helm/adapter1/adapter-config.yaml b/helm/adapter1/adapter-config.yaml index 933d28b..7bdf7bc 100644 --- a/helm/adapter1/adapter-config.yaml +++ b/helm/adapter1/adapter-config.yaml @@ -1,33 +1,26 @@ # Example HyperFleet Adapter deployment configuration -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: +adapter: name: adapter1 - labels: - hyperfleet.io/adapter-type: kubernetes - hyperfleet.io/component: adapter -spec: - adapter: - version: "0.1.0" + version: "0.2.0" - # Log the full merged configuration after load (default: false) - debugConfig: true - log: - level: debug +# Log the full merged configuration after load (default: false) +debug_config: true +log: + level: debug - clients: - hyperfleetApi: - baseUrl: http://hyperfleet-api:8000 - version: v1 - timeout: 2s - retryAttempts: 3 - retryBackoff: exponential +clients: + hyperfleet_api: + base_url: http://hyperfleet-api:8000 + version: v1 + timeout: 2s + retry_attempts: 3 + retry_backoff: exponential - broker: - # These values are overridden at deploy time via env vars from Helm values - subscriptionId: "placeholder" - topic: "placeholder" + broker: + # These values are overridden at deploy time via env vars from Helm values + subscription_id: "placeholder" + topic: "placeholder" - kubernetes: - apiVersion: "v1" - #kubeConfigPath: PATH_TO_KUBECONFIG # for local development + kubernetes: + api_version: "v1" + #kube_config_path: PATH_TO_KUBECONFIG # for local development diff --git a/helm/adapter1/adapter-task-config.yaml b/helm/adapter1/adapter-task-config.yaml index be9e619..f992869 100644 --- a/helm/adapter1/adapter-task-config.yaml +++ b/helm/adapter1/adapter-task-config.yaml @@ -1,157 +1,146 @@ # Example HyperFleet Adapter task configuration -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: adapter1 - labels: - hyperfleet.io/adapter-type: adapter1 - hyperfleet.io/component: adapter -spec: - # Parameters with all required variables - params: +params: + - name: "clusterId" + source: "event.id" + type: "string" + required: true - - name: "clusterId" - source: "event.id" - type: "string" - required: true + - name: "generation" + source: "event.generation" + type: "int" + required: true - - name: "generation" - source: "event.generation" - type: "int" - required: true + - name: "namespace" + source: "env.NAMESPACE" + type: "string" - - name: "namespace" - source: "env.NAMESPACE" - type: "string" +# Preconditions with valid operators and CEL expressions +preconditions: + - name: "clusterStatus" + api_call: + method: "GET" + url: "/clusters/{{ .clusterId }}" + timeout: 10s + retry_attempts: 3 + retry_backoff: "exponential" + capture: + - name: "clusterName" + field: "name" + - name: "generation" + field: "generation" + - name: "readyConditionStatus" + expression: | + status.conditions.filter(c, c.type == "Ready").size() > 0 + ? status.conditions.filter(c, c.type == "Ready")[0].status + : "False" + # Structured conditions with valid operators + conditions: + - field: "readyConditionStatus" + operator: "equals" + value: "False" - # Preconditions with valid operators and CEL expressions - preconditions: - - name: "clusterStatus" - apiCall: - method: "GET" - url: "/clusters/{{ .clusterId }}" - timeout: 10s - retryAttempts: 3 - retryBackoff: "exponential" - capture: - - name: "clusterName" - field: "name" - - name: "generation" - field: "generation" - - name: "readyConditionStatus" - expression: | - status.conditions.filter(c, c.type == "Ready").size() > 0 - ? status.conditions.filter(c, c.type == "Ready")[0].status - : "False" - # Structured conditions with valid operators - conditions: - - field: "readyConditionStatus" - operator: "equals" - value: "False" + - name: "validationCheck" + # Valid CEL expression + expression: | + readyConditionStatus == "False" - - name: "validationCheck" - # Valid CEL expression - expression: | - readyConditionStatus == "False" - - # Resources with valid K8s manifests - resources: - - name: "resource0" - transport: - client: "kubernetes" - manifest: - apiVersion: v1 - kind: ConfigMap - data: - cluster_id: "{{ .clusterId }}" - cluster_name: "{{ .clusterName }}" - metadata: - name: "{{ .clusterId }}-adapter1-configmap" - namespace: "{{ .namespace }}" - labels: - app.kubernetes.io/component: adapter - app.kubernetes.io/instance: adapter1 - app.kubernetes.io/name: hyperfleet-adapter - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/transport: kubernetes - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/cluster-name: "{{ .clusterName }}" - discovery: +# Resources with valid K8s manifests +resources: + - name: "resource0" + transport: + client: "kubernetes" + manifest: + apiVersion: v1 + kind: ConfigMap + data: + cluster_id: "{{ .clusterId }}" + cluster_name: "{{ .clusterName }}" + metadata: + name: "{{ .clusterId }}-adapter1-configmap" namespace: "{{ .namespace }}" - bySelectors: - labelSelector: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/cluster-name: "{{ .clusterName }}" + labels: + app.kubernetes.io/component: adapter + app.kubernetes.io/instance: adapter1 + app.kubernetes.io/name: hyperfleet-adapter + app.kubernetes.io/version: 1.0.0 + app.kubernetes.io/transport: kubernetes + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/cluster-name: "{{ .clusterName }}" + discovery: + namespace: "{{ .namespace }}" + by_selectors: + label_selector: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/cluster-name: "{{ .clusterName }}" - # Post-processing with valid CEL expressions - # This example contains multiple resources, we will only report on the conditions of the jobNamespace not to overcomplicate the example - post: - payloads: - - name: "statusPayload" - build: - # TODO: this should come from config.adapter.metadata.name - adapter: "adapter1" - conditions: - # Applied: Job successfully created - - type: "Applied" - status: - expression: | - has(resources.resource0.metadata.creationTimestamp) ? "True" : "False" - reason: - expression: | - has(resources.resource0.metadata.creationTimestamp) ? "ConfigMapApplied" : "ConfigMapPending" - message: - expression: | - has(resources.resource0.metadata.creationTimestamp) - ? "ConfigMap has been applied correctly" - : "ConfigMap is pending to be applied" - # Available: Check job status conditions - - type: "Available" - status: - expression: | - has(resources.resource0.data.cluster_id) ? "True" : "False" - reason: - expression: | - has(resources.resource0.data.cluster_id) - ? "ConfigMap data available" - : "ConfigMap data not yet available" - message: - expression: | - has(resources.resource0.data.cluster_id) - ? "ConfigMap data available" - : "ConfigMap data not yet available" - # Health: Adapter execution status (runtime) - - type: "Health" - status: - expression: | - adapter.?executionStatus.orValue("") == "success" ? "True" : (adapter.?executionStatus.orValue("") == "failed" ? "False" : "Unknown") - reason: - expression: | - adapter.?errorReason.orValue("") != "" ? adapter.?errorReason.orValue("") : "Healthy" - message: - expression: | - adapter.?errorMessage.orValue("") != "" ? adapter.?errorMessage.orValue("") : "All adapter operations completed successfully" - # Event generation ID metadata field needs to use expression to avoid interpolation issues - observed_generation: - expression: "generation" - observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" - # Optional data field for adapter-specific metrics extracted from resources - data: - namespace: - name: - expression: | - resources.?resources.resource0.?metadata.?name.orValue("") - creationTimestamp: - expression: | - resources.?resources.resource0.?metadata.?creationTimestamp.orValue("") +# Post-processing with valid CEL expressions +# This example contains multiple resources, we will only report on the conditions of the jobNamespace not to overcomplicate the example +post: + payloads: + - name: "statusPayload" + build: + adapter: "{{ .adapter.name }}" + conditions: + # Applied: Job successfully created + - type: "Applied" + status: + expression: | + has(resources.resource0.metadata.creationTimestamp) ? "True" : "False" + reason: + expression: | + has(resources.resource0.metadata.creationTimestamp) ? "ConfigMapApplied" : "ConfigMapPending" + message: + expression: | + has(resources.resource0.metadata.creationTimestamp) + ? "ConfigMap has been applied correctly" + : "ConfigMap is pending to be applied" + # Available: Check job status conditions + - type: "Available" + status: + expression: | + has(resources.resource0.data.cluster_id) ? "True" : "False" + reason: + expression: | + has(resources.resource0.data.cluster_id) + ? "ConfigMap data available" + : "ConfigMap data not yet available" + message: + expression: | + has(resources.resource0.data.cluster_id) + ? "ConfigMap data available" + : "ConfigMap data not yet available" + # Health: Adapter execution status (runtime) + - type: "Health" + status: + expression: | + adapter.?executionStatus.orValue("") == "success" ? "True" : (adapter.?executionStatus.orValue("") == "failed" ? "False" : "Unknown") + reason: + expression: | + adapter.?errorReason.orValue("") != "" ? adapter.?errorReason.orValue("") : "Healthy" + message: + expression: | + adapter.?errorMessage.orValue("") != "" ? adapter.?errorMessage.orValue("") : "All adapter operations completed successfully" + # Event generation ID metadata field needs to use expression to avoid interpolation issues + observed_generation: + expression: "generation" + observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" + # Optional data field for adapter-specific metrics extracted from resources + data: + namespace: + name: + expression: | + resources.?resources.resource0.?metadata.?name.orValue("") + creationTimestamp: + expression: | + resources.?resources.resource0.?metadata.?creationTimestamp.orValue("") - postActions: - - name: "reportClusterStatus" - apiCall: - method: "POST" - url: "/clusters/{{ .clusterId }}/statuses" - headers: - - name: "Content-Type" - value: "application/json" - body: "{{ .statusPayload }}" + post_actions: + - name: "reportClusterStatus" + api_call: + method: "POST" + url: "/clusters/{{ .clusterId }}/statuses" + headers: + - name: "Content-Type" + value: "application/json" + body: "{{ .statusPayload }}" diff --git a/helm/adapter2/adapter-config.yaml b/helm/adapter2/adapter-config.yaml index 2239b1e..61e4a00 100644 --- a/helm/adapter2/adapter-config.yaml +++ b/helm/adapter2/adapter-config.yaml @@ -1,71 +1,63 @@ # Example HyperFleet Adapter deployment configuration -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: +adapter: name: adapter2 - labels: - hyperfleet.io/adapter-type: adapter2 - hyperfleet.io/component: adapter -spec: - adapter: - version: "0.1.0" + version: "0.2.0" - # Log the full merged configuration after load (default: false) - debugConfig: true - log: - level: debug +# Log the full merged configuration after load (default: false) +debug_config: true +log: + level: debug - clients: - hyperfleetApi: - baseUrl: http://hyperfleet-api:8000 - version: v1 - timeout: 2s - retryAttempts: 3 - retryBackoff: exponential +clients: + hyperfleet_api: + base_url: http://hyperfleet-api:8000 + version: v1 + timeout: 2s + retry_attempts: 3 + retry_backoff: exponential - broker: - # These values are overridden at deploy time via env vars from Helm values - subscriptionId: "placeholder" - topic: "placeholder" + broker: + # These values are overridden at deploy time via env vars from Helm values + subscription_id: "placeholder" + topic: "placeholder" - maestro: - grpcServerAddress: "maestro-grpc.maestro.svc.cluster.local:8090" - - # HTTPS server address for REST API operations (optional) - # Environment variable: HYPERFLEET_MAESTRO_HTTP_SERVER_ADDRESS - httpServerAddress: "http://maestro.maestro.svc.cluster.local:8000" - - # Source identifier for CloudEvents routing (must be unique across adapters) - # Environment variable: HYPERFLEET_MAESTRO_SOURCE_ID - sourceId: "hyperfleet-adapter" - - # Client identifier (defaults to sourceId if not specified) - # Environment variable: HYPERFLEET_MAESTRO_CLIENT_ID - clientId: "hyperfleet-adapter-client" - insecure: true - - # Authentication configuration - #auth: - # type: "tls" # TLS certificate-based mTLS - # - # tlsConfig: - # # gRPC TLS configuration - # # Certificate paths (mounted from Kubernetes secrets) - # # Environment variable: HYPERFLEET_MAESTRO_CA_FILE - # caFile: "/etc/maestro/certs/grpc/ca.crt" - # - # # Environment variable: HYPERFLEET_MAESTRO_CERT_FILE - # certFile: "/etc/maestro/certs/grpc/client.crt" - # - # # Environment variable: HYPERFLEET_MAESTRO_KEY_FILE - # keyFile: "/etc/maestro/certs/grpc/client.key" - # - # # Server name for TLS verification - # # Environment variable: HYPERFLEET_MAESTRO_SERVER_NAME - # serverName: "maestro-grpc.maestro.svc.cluster.local" - # - # # HTTP API TLS configuration (may use different CA than gRPC) - # # If not set, falls back to caFile for backwards compatibility - # # Environment variable: HYPERFLEET_MAESTRO_HTTP_CA_FILE - # httpCaFile: "/etc/maestro/certs/https/ca.crt" + maestro: + grpc_server_address: "maestro-grpc.maestro.svc.cluster.local:8090" + # HTTPS server address for REST API operations (optional) + # Environment variable: HYPERFLEET_MAESTRO_HTTP_SERVER_ADDRESS + http_server_address: "http://maestro.maestro.svc.cluster.local:8000" + + # Source identifier for CloudEvents routing (must be unique across adapters) + # Environment variable: HYPERFLEET_MAESTRO_SOURCE_ID + source_id: "hyperfleet-adapter" + + # Client identifier (defaults to source_id if not specified) + # Environment variable: HYPERFLEET_MAESTRO_CLIENT_ID + client_id: "hyperfleet-adapter-client" + insecure: true + + # Authentication configuration + #auth: + # type: "tls" # TLS certificate-based mTLS + # + # tls_config: + # # gRPC TLS configuration + # # Certificate paths (mounted from Kubernetes secrets) + # # Environment variable: HYPERFLEET_MAESTRO_CA_FILE + # ca_file: "/etc/maestro/certs/grpc/ca.crt" + # + # # Environment variable: HYPERFLEET_MAESTRO_CERT_FILE + # cert_file: "/etc/maestro/certs/grpc/client.crt" + # + # # Environment variable: HYPERFLEET_MAESTRO_KEY_FILE + # key_file: "/etc/maestro/certs/grpc/client.key" + # + # # Server name for TLS verification + # # Environment variable: HYPERFLEET_MAESTRO_SERVER_NAME + # server_name: "maestro-grpc.maestro.svc.cluster.local" + # + # # HTTP API TLS configuration (may use different CA than gRPC) + # # If not set, falls back to ca_file for backwards compatibility + # # Environment variable: HYPERFLEET_MAESTRO_HTTP_CA_FILE + # http_ca_file: "/etc/maestro/certs/https/ca.crt" diff --git a/helm/adapter2/adapter-task-config.yaml b/helm/adapter2/adapter-task-config.yaml index 57dc24d..653b15d 100644 --- a/helm/adapter2/adapter-task-config.yaml +++ b/helm/adapter2/adapter-task-config.yaml @@ -1,356 +1,340 @@ # Example HyperFleet Adapter task configuration -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: adapter2 - labels: - hyperfleet.io/adapter-type: maestro - hyperfleet.io/component: adapter -spec: - # Parameters with all required variables - params: +params: - - name: "clusterId" - source: "event.id" - type: "string" - required: true + - name: "clusterId" + source: "event.id" + type: "string" + required: true - - name: "generation" - source: "event.generation" - type: "int" - required: true + - name: "generation" + source: "event.generation" + type: "int" + required: true - - name: "namespace" - source: "env.NAMESPACE" - type: "string" + - name: "namespace" + source: "env.NAMESPACE" + type: "string" - # Preconditions with valid operators and CEL expressions - preconditions: - - name: "clusterStatus" - apiCall: - method: "GET" - url: "/clusters/{{ .clusterId }}" - timeout: 10s - retryAttempts: 3 - retryBackoff: "exponential" - capture: - - name: "clusterName" - field: "name" - - name: "generation" - field: "generation" - - name: "timestamp" - field: "created_time" - - name: "readyConditionStatus" - expression: | - status.conditions.filter(c, c.type == "Ready").size() > 0 - ? status.conditions.filter(c, c.type == "Ready")[0].status - : "False" +# Preconditions with valid operators and CEL expressions +preconditions: + - name: "clusterStatus" + api_call: + method: "GET" + url: "/clusters/{{ .clusterId }}" + timeout: 10s + retry_attempts: 3 + retry_backoff: "exponential" + capture: + - name: "clusterName" + field: "name" + - name: "generation" + field: "generation" + - name: "timestamp" + field: "created_time" + - name: "readyConditionStatus" + expression: | + status.conditions.filter(c, c.type == "Ready").size() > 0 + ? status.conditions.filter(c, c.type == "Ready")[0].status + : "False" + - name: "placementClusterName" + expression: "\"cluster1\"" # TBC coming from placement adapter - - name: "placementClusterName" - expression: "\"cluster1\"" # TBC coming from placement adapter - description: "Unique identifier for the target maestro" + # Structured conditions with valid operators + conditions: + - field: "readyConditionStatus" + operator: "equals" + value: "False" + - name: "validationCheck" + # Valid CEL expression + expression: | + readyConditionStatus == "False" - # Structured conditions with valid operators - conditions: - - field: "readyConditionStatus" - operator: "equals" - value: "False" +# Resources with valid K8s manifests +resources: + - name: "resource0" + transport: + client: "maestro" + maestro: + target_cluster: "{{ .placementClusterName }}" - - name: "validationCheck" - # Valid CEL expression - expression: | - readyConditionStatus == "False" + # ManifestWork is a kind of manifest that can be used to create resources on the cluster. + # It is a collection of resources that are created together. + manifest: + apiVersion: work.open-cluster-management.io/v1 + kind: ManifestWork + metadata: + # ManifestWork name - must be unique within consumer namespace + name: "{{ .clusterId }}-{{ .adapter.name }}" - # Resources with valid K8s manifests - resources: - - name: "resource0" - transport: - client: "maestro" - maestro: - targetCluster: "{{ .placementClusterName }}" - - # ManifestWork is a kind of manifest that can be used to create resources on the cluster. - # It is a collection of resources that are created together. - # It is created by the adapter and can be used to create resources on the cluster. - # It is created by the adapter and can be used to create resources on the cluster. - manifest: - apiVersion: work.open-cluster-management.io/v1 - kind: ManifestWork - metadata: - # ManifestWork name - must be unique within consumer namespace - name: "{{ .clusterId }}-{{ .metadata.name }}" + # Labels for identification, filtering, and management + labels: + # HyperFleet tracking labels + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/adapter: "{{ .adapter.name }}" + hyperfleet.io/component: "infrastructure" + hyperfleet.io/generation: "{{ .generation }}" + hyperfleet.io/resource-group: "cluster-setup" - # Labels for identification, filtering, and management - labels: - # HyperFleet tracking labels - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/adapter: "{{ .metadata.name }}" - hyperfleet.io/component: "infrastructure" - hyperfleet.io/generation: "{{ .generation }}" - hyperfleet.io/resource-group: "cluster-setup" + # Maestro-specific labels + maestro.io/source-id: "{{ .adapter.name }}" + maestro.io/resource-type: "manifestwork" + maestro.io/priority: "normal" - # Maestro-specific labels - maestro.io/source-id: "{{ .metadata.name }}" - maestro.io/resource-type: "manifestwork" - maestro.io/priority: "normal" + # Standard Kubernetes application labels + app.kubernetes.io/name: "aro-hcp-cluster" + app.kubernetes.io/instance: "{{ .clusterId }}" + app.kubernetes.io/version: "v1.0.0" + app.kubernetes.io/component: "infrastructure" + app.kubernetes.io/part-of: "hyperfleet" + app.kubernetes.io/managed-by: "hyperfleet-adapter" + app.kubernetes.io/created-by: "{{ .adapter.name }}" - # Standard Kubernetes application labels - app.kubernetes.io/name: "aro-hcp-cluster" - app.kubernetes.io/instance: "{{ .clusterId }}" - app.kubernetes.io/version: "v1.0.0" - app.kubernetes.io/component: "infrastructure" - app.kubernetes.io/part-of: "hyperfleet" - app.kubernetes.io/managed-by: "hyperfleet-adapter" - app.kubernetes.io/created-by: "{{ .metadata.name }}" + # Annotations for metadata and operational information + annotations: + # Tracking and lifecycle + hyperfleet.io/created-by: "hyperfleet-adapter-framework" + hyperfleet.io/managed-by: "{{ .adapter.name }}" + hyperfleet.io/generation: "{{ .generation }}" + hyperfleet.io/cluster-name: "{{ .clusterId }}" + hyperfleet.io/deployment-time: "{{ .timestamp }}" - # Annotations for metadata and operational information - annotations: - # Tracking and lifecycle - hyperfleet.io/created-by: "hyperfleet-adapter-framework" - hyperfleet.io/managed-by: "{{ .metadata.name }}" - hyperfleet.io/generation: "{{ .generation }}" - hyperfleet.io/cluster-name: "{{ .clusterId }}" - hyperfleet.io/deployment-time: "{{ .timestamp }}" + # Maestro-specific annotations + maestro.io/applied-time: "{{ .timestamp }}" + maestro.io/source-adapter: "{{ .adapter.name }}" - # Maestro-specific annotations - maestro.io/applied-time: "{{ .timestamp }}" - maestro.io/source-adapter: "{{ .metadata.name }}" + # Operational annotations + deployment.hyperfleet.io/strategy: "rolling" + deployment.hyperfleet.io/timeout: "300s" + monitoring.hyperfleet.io/enabled: "true" - # Operational annotations - deployment.hyperfleet.io/strategy: "rolling" - deployment.hyperfleet.io/timeout: "300s" - monitoring.hyperfleet.io/enabled: "true" + # Documentation + description: "Complete cluster setup including namespace, configuration, and RBAC" + documentation: "https://docs.hyperfleet.io/adapters/aro-hcp" - # Documentation - description: "Complete cluster setup including namespace, configuration, and RBAC" - documentation: "https://docs.hyperfleet.io/adapters/aro-hcp" + # ManifestWork specification + spec: + # ============================================================================ + # Workload - Contains the Kubernetes manifests to deploy + # ============================================================================ + workload: + # Kubernetes manifests array - injected by framework from business logic config + manifests: + - apiVersion: v1 + kind: Namespace + metadata: + name: "{{ .clusterId | lower }}-{{ .adapter.name }}-namespace" + labels: + app.kubernetes.io/component: adapter-task-config + app.kubernetes.io/instance: "{{ .adapter.name }}" + app.kubernetes.io/name: hyperfleet-adapter + app.kubernetes.io/version: 1.0.0 + app.kubernetes.io/transport: maestro + annotations: + hyperfleet.io/generation: "{{ .generation }}" + - apiVersion: v1 + kind: ConfigMap + data: + cluster_id: "{{ .clusterId }}" + cluster_name: "{{ .clusterId }}" + metadata: + name: "{{ .clusterId | lower }}-{{ .adapter.name }}-configmap" + namespace: "{{ .clusterId | lower }}-{{ .adapter.name }}-namespace" + labels: + app.kubernetes.io/component: adapter-task-config + app.kubernetes.io/instance: "{{ .adapter.name }}" + app.kubernetes.io/name: hyperfleet-adapter + app.kubernetes.io/version: 1.0.0 + app.kubernetes.io/transport: maestro + annotations: + hyperfleet.io/generation: "{{ .generation }}" - # ManifestWork specification - spec: - # ============================================================================ - # Workload - Contains the Kubernetes manifests to deploy - # ============================================================================ - workload: - # Kubernetes manifests array - injected by framework from business logic config - manifests: - - apiVersion: v1 - kind: Namespace - metadata: - name: "{{ .clusterId | lower }}-{{ .metadata.name }}-namespace" - labels: - app.kubernetes.io/component: adapter-task-config - app.kubernetes.io/instance: "{{ .metadata.name }}" - app.kubernetes.io/name: hyperfleet-adapter - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/transport: maestro - annotations: - hyperfleet.io/generation: "{{ .generation }}" - - apiVersion: v1 - kind: ConfigMap - data: - cluster_id: "{{ .clusterId }}" - cluster_name: "{{ .clusterId }}" - metadata: - name: "{{ .clusterId | lower }}-{{ .metadata.name }}-configmap" - namespace: "{{ .clusterId | lower }}-{{ .metadata.name }}-namespace" - labels: - app.kubernetes.io/component: adapter-task-config - app.kubernetes.io/instance: "{{ .metadata.name }}" - app.kubernetes.io/name: hyperfleet-adapter - app.kubernetes.io/version: 1.0.0 - app.kubernetes.io/transport: maestro - annotations: - hyperfleet.io/generation: "{{ .generation }}" + # ============================================================================ + # Delete Options - How resources should be removed + # ============================================================================ + deleteOption: + # Propagation policy for resource deletion + propagationPolicy: "Foreground" - # ============================================================================ - # Delete Options - How resources should be removed - # ============================================================================ - deleteOption: - # Propagation policy for resource deletion - # - "Foreground": Wait for dependent resources to be deleted first - # - "Background": Delete immediately, let cluster handle dependents - # - "Orphan": Leave resources on cluster when ManifestWork is deleted - propagationPolicy: "Foreground" + # Grace period for graceful deletion (seconds) + gracePeriodSeconds: 30 - # Grace period for graceful deletion (seconds) - gracePeriodSeconds: 30 + # ============================================================================ + # Manifest Configurations - Per-resource settings for update and feedback + # ============================================================================ + manifestConfigs: + - resourceIdentifier: + group: "" # Core API group (empty for v1 resources) + resource: "namespaces" # Resource type + name: "{{ .clusterId | lower }}-{{ .adapter.name }}-namespace" + updateStrategy: + type: "ServerSideApply" # Use server-side apply for namespaces + feedbackRules: + - type: "JSONPaths" # Use JSON path expressions for status feedback + jsonPaths: + - name: "phase" + path: ".status.phase" + # ======================================================================== + # Configuration for Namespace resources + # ======================================================================== + - resourceIdentifier: + group: "" # Core API group (empty for v1 resources) + resource: "configmaps" # Resource type + name: "{{ .clusterId | lower }}-{{ .adapter.name }}-configmap" + namespace: "{{ .clusterId | lower }}-{{ .adapter.name }}-namespace" + updateStrategy: + type: "ServerSideApply" # Use server-side apply for namespaces + serverSideApply: + fieldManager: "hyperfleet-adapter" # Field manager name for conflict resolution + force: false # Don't force conflicts (fail on conflicts) + feedbackRules: + - type: "JSONPaths" # Use JSON path expressions for status feedback + jsonPaths: + - name: "data" + path: ".data" + - name: "resourceVersion" + path: ".metadata.resourceVersion" - # ============================================================================ - # Manifest Configurations - Per-resource settings for update and feedback - # ============================================================================ - manifestConfigs: - - resourceIdentifier: - group: "" # Core API group (empty for v1 resources) - resource: "namespaces" # Resource type - name: "{{ .clusterId | lower }}-{{ .metadata.name }}-namespace" # Specific resource name - updateStrategy: - type: "ServerSideApply" # Use server-side apply for namespaces - feedbackRules: - - type: "JSONPaths" # Use JSON path expressions for status feedback - jsonPaths: - - name: "phase" - path: ".status.phase" - # ======================================================================== - # Configuration for Namespace resources - # ======================================================================== - - resourceIdentifier: - group: "" # Core API group (empty for v1 resources) - resource: "configmaps" # Resource type - name: "{{ .clusterId | lower }}-{{ .metadata.name }}-configmap" # Specific resource name - namespace: "{{ .clusterId | lower }}-{{ .metadata.name }}-namespace" - updateStrategy: - type: "ServerSideApply" # Use server-side apply for namespaces - serverSideApply: - fieldManager: "hyperfleet-adapter" # Field manager name for conflict resolution - force: false # Don't force conflicts (fail on conflicts) - feedbackRules: - - type: "JSONPaths" # Use JSON path expressions for status feedback - jsonPaths: - - name: "data" - path: ".data" - - name: "resourceVersion" - path: ".metadata.resourceVersion" - # Discover the ResourceBundle (ManifestWork) by name from Maestro - discovery: - byName: "{{ .clusterId }}-{{ .metadata.name }}" + # Discover the ResourceBundle (ManifestWork) by name from Maestro + discovery: + by_name: "{{ .clusterId }}-{{ .adapter.name }}" - # Discover nested resources deployed by the ManifestWork - nestedDiscoveries: - - name: "namespace0" - discovery: - byName: "{{ .clusterId | lower }}-{{ .metadata.name }}-namespace" - - name: "configmap0" - discovery: - byName: "{{ .clusterId | lower }}-{{ .metadata.name }}-configmap" + # Discover nested resources deployed by the ManifestWork + nested_discoveries: + - name: "namespace0" + discovery: + by_name: "{{ .clusterId | lower }}-{{ .adapter.name }}-namespace" + - name: "configmap0" + discovery: + by_name: "{{ .clusterId | lower }}-{{ .adapter.name }}-configmap" - post: - payloads: - - name: "statusPayload" - build: - adapter: "{{ .metadata.name }}" - conditions: - # Applied: Check if ManifestWork exists and has type="Applied", status="True" - - type: "Applied" - status: - expression: | - has(resources.resource0) && has(resources.resource0.status) && has(resources.resource0.status.conditions) && resources.resource0.status.conditions.filter(c, has(c.type) && c.type == "Applied").size() > 0 ? resources.resource0.status.conditions.filter(c, c.type == "Applied")[0].status : "False" - reason: - expression: | - has(resources.resource0) && has(resources.resource0.status) && has(resources.resource0.status.conditions) && resources.resource0.status.conditions.filter(c, has(c.type) && c.type == "Applied").size() > 0 ? resources.resource0.status.conditions.filter(c, c.type == "Applied")[0].reason : "ManifestWorkNotDiscovered" - message: - expression: | - has(resources.resource0) && has(resources.resource0.status) && has(resources.resource0.status.conditions) && resources.resource0.status.conditions.filter(c, has(c.type) && c.type == "Applied").size() > 0 ? resources.resource0.status.conditions.filter(c, c.type == "Applied")[0].message : "ManifestWork not discovered from Maestro or no Applied condition" +post: + payloads: + - name: "statusPayload" + build: + adapter: "{{ .adapter.name }}" + conditions: + # Applied: Check if ManifestWork exists and has type="Applied", status="True" + - type: "Applied" + status: + expression: | + has(resources.resource0) && has(resources.resource0.status) && has(resources.resource0.status.conditions) && resources.resource0.status.conditions.filter(c, has(c.type) && c.type == "Applied").size() > 0 ? resources.resource0.status.conditions.filter(c, c.type == "Applied")[0].status : "False" + reason: + expression: | + has(resources.resource0) && has(resources.resource0.status) && has(resources.resource0.status.conditions) && resources.resource0.status.conditions.filter(c, has(c.type) && c.type == "Applied").size() > 0 ? resources.resource0.status.conditions.filter(c, c.type == "Applied")[0].reason : "ManifestWorkNotDiscovered" + message: + expression: | + has(resources.resource0) && has(resources.resource0.status) && has(resources.resource0.status.conditions) && resources.resource0.status.conditions.filter(c, has(c.type) && c.type == "Applied").size() > 0 ? resources.resource0.status.conditions.filter(c, c.type == "Applied")[0].message : "ManifestWork not discovered from Maestro or no Applied condition" - # Available: Check if nested discovered manifests are available on the spoke cluster - # Each nested discovery is enriched with top-level "conditions" from status.resourceStatus.manifests[] - - type: "Available" - status: - expression: | - has(resources.namespace0) && has(resources.namespace0.conditions) - && resources.namespace0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") - && has(resources.configmap0) && has(resources.configmap0.conditions) - && resources.configmap0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") - ? "True" - : "False" - reason: - expression: | - !(has(resources.namespace0) && has(resources.namespace0.conditions)) - ? "NamespaceNotDiscovered" - : !resources.namespace0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") - ? "NamespaceNotAvailable" - : !(has(resources.configmap0) && has(resources.configmap0.conditions)) - ? "ConfigMapNotDiscovered" - : !resources.configmap0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") - ? "ConfigMapNotAvailable" - : "AllResourcesAvailable" - message: - expression: | - !(has(resources.namespace0) && has(resources.namespace0.conditions)) - ? "Namespace not discovered from ManifestWork" - : !resources.namespace0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") - ? "Namespace not yet available on spoke cluster" - : !(has(resources.configmap0) && has(resources.configmap0.conditions)) - ? "ConfigMap not discovered from ManifestWork" - : !resources.configmap0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") - ? "ConfigMap not yet available on spoke cluster" - : "All manifests (namespace, configmap) are available on spoke cluster" + # Available: Check if nested discovered manifests are available on the spoke cluster + # Each nested discovery is enriched with top-level "conditions" from status.resourceStatus.manifests[] + - type: "Available" + status: + expression: | + has(resources.namespace0) && has(resources.namespace0.conditions) + && resources.namespace0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") + && has(resources.configmap0) && has(resources.configmap0.conditions) + && resources.configmap0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") + ? "True" + : "False" + reason: + expression: | + !(has(resources.namespace0) && has(resources.namespace0.conditions)) + ? "NamespaceNotDiscovered" + : !resources.namespace0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") + ? "NamespaceNotAvailable" + : !(has(resources.configmap0) && has(resources.configmap0.conditions)) + ? "ConfigMapNotDiscovered" + : !resources.configmap0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") + ? "ConfigMapNotAvailable" + : "AllResourcesAvailable" + message: + expression: | + !(has(resources.namespace0) && has(resources.namespace0.conditions)) + ? "Namespace not discovered from ManifestWork" + : !resources.namespace0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") + ? "Namespace not yet available on spoke cluster" + : !(has(resources.configmap0) && has(resources.configmap0.conditions)) + ? "ConfigMap not discovered from ManifestWork" + : !resources.configmap0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") + ? "ConfigMap not yet available on spoke cluster" + : "All manifests (namespace, configmap) are available on spoke cluster" - # Health: Adapter execution status — surfaces errors from any phase - - type: "Health" - status: - expression: | - adapter.?executionStatus.orValue("") == "success" - && !adapter.?resourcesSkipped.orValue(false) - ? "True" - : "False" - reason: - expression: | - adapter.?executionStatus.orValue("") != "success" - ? "ExecutionFailed:" + adapter.?executionError.?phase.orValue("unknown") - : adapter.?resourcesSkipped.orValue(false) - ? "ResourcesSkipped" - : "Healthy" - message: - expression: | - adapter.?executionStatus.orValue("") != "success" - ? "Adapter failed at phase [" - + adapter.?executionError.?phase.orValue("unknown") - + "] step [" - + adapter.?executionError.?step.orValue("unknown") - + "]: " - + adapter.?executionError.?message.orValue(adapter.?errorMessage.orValue("no details")) - : adapter.?resourcesSkipped.orValue(false) - ? "Resources skipped: " + adapter.?skipReason.orValue("unknown reason") - : "Adapter execution completed successfully" + # Health: Adapter execution status — surfaces errors from any phase + - type: "Health" + status: + expression: | + adapter.?executionStatus.orValue("") == "success" + && !adapter.?resourcesSkipped.orValue(false) + ? "True" + : "False" + reason: + expression: | + adapter.?executionStatus.orValue("") != "success" + ? "ExecutionFailed:" + adapter.?executionError.?phase.orValue("unknown") + : adapter.?resourcesSkipped.orValue(false) + ? "ResourcesSkipped" + : "Healthy" + message: + expression: | + adapter.?executionStatus.orValue("") != "success" + ? "Adapter failed at phase [" + + adapter.?executionError.?phase.orValue("unknown") + + "] step [" + + adapter.?executionError.?step.orValue("unknown") + + "]: " + + adapter.?executionError.?message.orValue(adapter.?errorMessage.orValue("no details")) + : adapter.?resourcesSkipped.orValue(false) + ? "Resources skipped: " + adapter.?skipReason.orValue("unknown reason") + : "Adapter execution completed successfully" - observed_generation: - expression: "generation" - observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" + observed_generation: + expression: "generation" + observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" - # Extract data from discovered ManifestWork from Maestro - data: - manifestwork: - name: - expression: | - has(resources.resource0) && has(resources.resource0.metadata) - ? resources.resource0.metadata.name - : "" - consumer: - expression: | - has(resources.resource0) && has(resources.resource0.metadata) - ? resources.resource0.metadata.namespace - : placementClusterName - configmap: - name: - expression: | - has(resources.configmap0) && has(resources.configmap0.metadata) - ? resources.configmap0.metadata.name - : "" - clusterId: - expression: | - has(resources.configmap0) && has(resources.configmap0.data) && has(resources.configmap0.data.cluster_id) - ? resources.configmap0.data.cluster_id - : clusterId - namespace: - name: - expression: | - has(resources.namespace0) && has(resources.namespace0.metadata) - ? resources.namespace0.metadata.name - : "" - phase: - expression: | - has(resources.namespace0) && has(resources.namespace0.statusFeedback) && has(resources.namespace0.statusFeedback.values) - && resources.namespace0.statusFeedback.values.exists(v, has(v.name) && v.name == "phase" && has(v.fieldValue)) - ? resources.namespace0.statusFeedback.values.filter(v, v.name == "phase")[0].fieldValue.string - : "Unknown" + # Extract data from discovered ManifestWork from Maestro + data: + manifestwork: + name: + expression: | + has(resources.resource0) && has(resources.resource0.metadata) + ? resources.resource0.metadata.name + : "" + consumer: + expression: | + has(resources.resource0) && has(resources.resource0.metadata) + ? resources.resource0.metadata.namespace + : placementClusterName + configmap: + name: + expression: | + has(resources.configmap0) && has(resources.configmap0.metadata) + ? resources.configmap0.metadata.name + : "" + clusterId: + expression: | + has(resources.configmap0) && has(resources.configmap0.data) && has(resources.configmap0.data.cluster_id) + ? resources.configmap0.data.cluster_id + : clusterId + namespace: + name: + expression: | + has(resources.namespace0) && has(resources.namespace0.metadata) + ? resources.namespace0.metadata.name + : "" + phase: + expression: | + has(resources.namespace0) && has(resources.namespace0.statusFeedback) && has(resources.namespace0.statusFeedback.values) + && resources.namespace0.statusFeedback.values.exists(v, has(v.name) && v.name == "phase" && has(v.fieldValue)) + ? resources.namespace0.statusFeedback.values.filter(v, v.name == "phase")[0].fieldValue.string + : "Unknown" - postActions: - - name: "reportClusterStatus" - apiCall: - method: "POST" - url: "/clusters/{{ .clusterId }}/statuses" - headers: - - name: "Content-Type" - value: "application/json" - body: "{{ .statusPayload }}" + post_actions: + - name: "reportClusterStatus" + api_call: + method: "POST" + url: "/clusters/{{ .clusterId }}/statuses" + headers: + - name: "Content-Type" + value: "application/json" + body: "{{ .statusPayload }}" diff --git a/helm/adapter3/adapter-config.yaml b/helm/adapter3/adapter-config.yaml index 4cb965a..c39813f 100644 --- a/helm/adapter3/adapter-config.yaml +++ b/helm/adapter3/adapter-config.yaml @@ -1,33 +1,26 @@ # HyperFleet Adapter deployment configuration for adapter3 -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: +adapter: name: adapter3 - labels: - hyperfleet.io/adapter-type: kubernetes - hyperfleet.io/component: adapter -spec: - adapter: - version: "0.1.0" + version: "0.2.0" - # Log the full merged configuration after load (default: false) - debugConfig: true - log: - level: debug +# Log the full merged configuration after load (default: false) +debug_config: true +log: + level: debug - clients: - hyperfleetApi: - baseUrl: http://hyperfleet-api:8000 - version: v1 - timeout: 2s - retryAttempts: 3 - retryBackoff: exponential +clients: + hyperfleet_api: + base_url: http://hyperfleet-api:8000 + version: v1 + timeout: 2s + retry_attempts: 3 + retry_backoff: exponential - broker: - # These values are overridden at deploy time via env vars from Helm values - subscriptionId: "placeholder" - topic: "placeholder" + broker: + # These values are overridden at deploy time via env vars from Helm values + subscription_id: "placeholder" + topic: "placeholder" - kubernetes: - apiVersion: "v1" - #kubeConfigPath: PATH_TO_KUBECONFIG # for local development + kubernetes: + api_version: "v1" + #kube_config_path: PATH_TO_KUBECONFIG # for local development diff --git a/helm/adapter3/adapter-task-config.yaml b/helm/adapter3/adapter-task-config.yaml index 9746668..69e762a 100644 --- a/helm/adapter3/adapter-task-config.yaml +++ b/helm/adapter3/adapter-task-config.yaml @@ -1,170 +1,158 @@ # HyperFleet Adapter task configuration for adapter3 -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - name: adapter3 - labels: - hyperfleet.io/adapter-type: adapter3 - hyperfleet.io/component: adapter -spec: - # Parameters with all required variables - params: +params: - - name: "nodepoolId" - source: "event.id" - type: "string" - required: true + - name: "nodepoolId" + source: "event.id" + type: "string" + required: true + + - name: "clusterId" + source: "event.owner_references.id" + type: "string" + required: true - - name: "generation" - source: "event.generation" - type: "int" - required: true + - name: "generation" + source: "event.generation" + type: "int" + required: true - - name: "nodepoolHref" - source: "event.href" - type: "string" - required: true - description: "Full API href for the nodepool, e.g. /api/hyperfleet/v1/clusters/{cid}/nodepools/{npid}" + - name: "namespace" + source: "env.NAMESPACE" + type: "string" - - name: "namespace" - source: "env.NAMESPACE" - type: "string" +# Preconditions with valid operators and CEL expressions +preconditions: + - name: "nodepoolStatus" + api_call: + method: "GET" + url: "/clusters/{{ .clusterId }}/nodepools/{{ .nodepoolId }}" + timeout: 10s + retry_attempts: 3 + retry_backoff: "exponential" + capture: + - name: "nodepoolName" + field: "name" + - name: "generation" + field: "generation" + - name: "readyConditionStatus" + expression: | + status.conditions.filter(c, c.type == "Ready").size() > 0 + ? status.conditions.filter(c, c.type == "Ready")[0].status + : "False" + # Structured conditions with valid operators + conditions: + - field: "readyConditionStatus" + operator: "equals" + value: "False" - # Preconditions with valid operators and CEL expressions - preconditions: - - name: "nodepoolStatus" - apiCall: - method: "GET" - url: "{{ slice .nodepoolHref 18 }}" - timeout: 10s - retryAttempts: 3 - retryBackoff: "exponential" - capture: - - name: "nodepoolName" - field: "name" - - name: "generation" - field: "generation" - - name: "readyConditionStatus" - expression: | - status.conditions.filter(c, c.type == "Ready").size() > 0 - ? status.conditions.filter(c, c.type == "Ready")[0].status - : "False" - # Structured conditions with valid operators - conditions: - - field: "readyConditionStatus" - operator: "equals" - value: "False" + - name: "validationCheck" + # Valid CEL expression + expression: | + readyConditionStatus == "False" - - name: "validationCheck" - # Valid CEL expression - expression: | - readyConditionStatus == "False" - - # Resources with valid K8s manifests - resources: - - name: "resource0" - transport: - client: "kubernetes" - manifest: - apiVersion: v1 - kind: ConfigMap - data: - nodepoolId: "{{ .nodepoolId }}" - - metadata: - name: "{{ .nodepoolId }}-adapter3-configmap" - namespace: "{{ .namespace }}" - labels: - app.kubernetes.io/component: adapter-task-config - app.kubernetes.io/instance: adapter3 - app.kubernetes.io/name: hyperfleet-adapter - app.kubernetes.io/version: 1.0.0 - hyperfleet.io/nodepool-id: "{{ .nodepoolId }}" - hyperfleet.io/nodepool-name: "{{ .nodepoolName }}" - discovery: +# Resources with valid K8s manifests +resources: + - name: "resource0" + transport: + client: "kubernetes" + manifest: + apiVersion: v1 + kind: ConfigMap + data: + nodepoolId: "{{ .nodepoolId }}" + metadata: + name: "{{ .nodepoolId }}-adapter3-configmap" namespace: "{{ .namespace }}" - bySelectors: - labelSelector: - hyperfleet.io/nodepool-id: "{{ .nodepoolId }}" - hyperfleet.io/nodepool-name: "{{ .nodepoolName }}" + labels: + app.kubernetes.io/component: adapter-task-config + app.kubernetes.io/instance: adapter3 + app.kubernetes.io/name: hyperfleet-adapter + app.kubernetes.io/version: 1.0.0 + hyperfleet.io/nodepool-id: "{{ .nodepoolId }}" + hyperfleet.io/nodepool-name: "{{ .nodepoolName }}" + discovery: + namespace: "{{ .namespace }}" + by_selectors: + label_selector: + hyperfleet.io/nodepool-id: "{{ .nodepoolId }}" + hyperfleet.io/nodepool-name: "{{ .nodepoolName }}" - # Post-processing with valid CEL expressions - post: - payloads: - - name: "statusPayload" - build: - # TODO: this should come from config.adapter.metadata.name - adapter: "adapter3" - conditions: - # Applied: Job successfully created - - type: "Applied" - status: - expression: | - has(resources.resource0.metadata.creationTimestamp) - ? "True" +# Post-processing with valid CEL expressions +post: + payloads: + - name: "statusPayload" + build: + adapter: "{{ .adapter.name }}" + conditions: + # Applied: Job successfully created + - type: "Applied" + status: + expression: | + has(resources.resource0.metadata.creationTimestamp) + ? "True" + : "False" + reason: + expression: | + has(resources.resource0.metadata.creationTimestamp) + ? "ConfigMap manifest applied successfully" + : "ConfigMap is pending to be applied" + message: + expression: | + has(resources.resource0.metadata.creationTimestamp) + ? "ConfigMap manifest applied successfully" + : "ConfigMap is pending to be applied" + # Available: Check job status conditions + - type: "Available" + status: + expression: | + has(resources.resource0.data.nodepoolId) + ? "True" : "False" - reason: - expression: | - has(resources.resource0.metadata.creationTimestamp) - ? "ConfigMap manifest applied successfully" - : "ConfigMap is pending to be applied" - message: - expression: | - has(resources.resource0.metadata.creationTimestamp) - ? "ConfigMap manifest applied successfully" - : "ConfigMap is pending to be applied" - # Available: Check job status conditions - - type: "Available" - status: - expression: | - has(resources.resource0.data.nodepoolId) - ? "True" - : "False" - reason: - expression: | - has(resources.resource0.data.nodepoolId) - ? "ConfigMap data available" - : "ConfigMap data not yet available" - message: - expression: | - has(resources.resource0.data.nodepoolId) - ? "ConfigMap data available" - : "ConfigMap data not yet available" - # Health: Adapter execution status (runtime) - - type: "Health" - status: - expression: | - has(resources.resource0.data.nodepoolId) - ? "True" - : "False" - reason: - expression: | - has(resources.resource0.data.nodepoolId) - ? "ConfigMap data available" - : "ConfigMap data not yet available" - message: - expression: | - toJson(resources.resource0) - # Event generation ID metadata field needs to use expression to avoid interpolation issues - observed_generation: - expression: "generation" - observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" - # Optional data field for adapter-specific metrics extracted from resources - data: - namespace: - name: - expression: | - resources.?resource0.?metadata.?name.orValue("") - creationTimestamp: - expression: | - resources.?resource0.?metadata.?creationTimestamp.orValue("") + reason: + expression: | + has(resources.resource0.data.nodepoolId) + ? "ConfigMap data available" + : "ConfigMap data not yet available" + message: + expression: | + has(resources.resource0.data.nodepoolId) + ? "ConfigMap data available" + : "ConfigMap data not yet available" + # Health: Adapter execution status (runtime) + - type: "Health" + status: + expression: | + has(resources.resource0.data.nodepoolId) + ? "True" + : "False" + reason: + expression: | + has(resources.resource0.data.nodepoolId) + ? "ConfigMap data available" + : "ConfigMap data not yet available" + message: + expression: | + toJson(resources.resource0) + # Event generation ID metadata field needs to use expression to avoid interpolation issues + observed_generation: + expression: "generation" + observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" + # Optional data field for adapter-specific metrics extracted from resources + data: + namespace: + name: + expression: | + resources.?resource0.?metadata.?name.orValue("") + creationTimestamp: + expression: | + resources.?resource0.?metadata.?creationTimestamp.orValue("") - postActions: - - name: "reportNodepoolStatus" - apiCall: - method: "POST" - url: "{{ slice .nodepoolHref 18 }}/statuses" - headers: - - name: "Content-Type" - value: "application/json" - body: "{{ .statusPayload }}" + post_actions: + - name: "reportNodepoolStatus" + api_call: + method: "POST" + url: "/clusters/{{ .clusterId }}/nodepools/{{ .nodepoolId }}/statuses" + headers: + - name: "Content-Type" + value: "application/json" + body: "{{ .statusPayload }}" diff --git a/helm/sentinel-nodepools/values.yaml b/helm/sentinel-nodepools/values.yaml index 65e2285..9bf88ca 100644 --- a/helm/sentinel-nodepools/values.yaml +++ b/helm/sentinel-nodepools/values.yaml @@ -10,6 +10,15 @@ sentinel: resourceSelector: [] hyperfleetApi: baseUrl: http://hyperfleet-api:8000 + messageData: + id: "resource.id" + kind: "resource.kind" + href: "resource.href" + generation: "resource.generation" + owner_references: + id: "resource.owner_references.id" + href: "resource.owner_references.href" + kind: "resource.owner_references.kind" broker: type: googlepubsub