diff --git a/v1/providers/launchpad/instance_create.go b/v1/providers/launchpad/instance_create.go index 229dabdd..1034ab1e 100644 --- a/v1/providers/launchpad/instance_create.go +++ b/v1/providers/launchpad/instance_create.go @@ -132,7 +132,7 @@ func (l launchpadCreateAttrs) generateTags(cloudCredRefID string) v1.Tags { func (c *LaunchpadClient) getLaunchpadIPAllowlist(ctx context.Context, firewallRules v1.FirewallRules) []string { if len(firewallRules.EgressRules) > 0 { - c.logger.Info(ctx, "cloud/launchpad egress rules not supported", v1.LogField("egressRules", firewallRules.EgressRules)) + c.logger.Debug(ctx, "cloud/launchpad egress rules not supported", v1.LogField("egressRules", firewallRules.EgressRules)) } ips := []string{} for _, rule := range firewallRules.IngressRules { diff --git a/v1/providers/launchpad/utils.go b/v1/providers/launchpad/utils.go index 607f4d25..7f4468bd 100644 --- a/v1/providers/launchpad/utils.go +++ b/v1/providers/launchpad/utils.go @@ -12,7 +12,6 @@ import ( ) func (c *LaunchpadClient) handleLaunchpadAPIErr(ctx context.Context, resp *http.Response, err error) error { - c.logger.Info(ctx, "Launchpad Error", v1.LogField("status", resp.Status)) body := "" defer errors.HandleErrDefer(resp.Body.Close) if apiErr, ok := err.(openapi.GenericOpenAPIError); ok { diff --git a/v1/providers/nebius/instance.go b/v1/providers/nebius/instance.go index bef3e6ea..7b126ff7 100644 --- a/v1/providers/nebius/instance.go +++ b/v1/providers/nebius/instance.go @@ -25,7 +25,7 @@ func (c *NebiusClient) CreateInstance(ctx context.Context, attrs v1.CreateInstan cleanupOnError := true defer func() { if cleanupOnError { - c.logger.Info(ctx, "cleaning up resources after instance creation failure", + c.logger.Debug(ctx, "cleaning up resources after instance creation failure", v1.LogField("refID", attrs.RefID), v1.LogField("instanceID", instanceID), v1.LogField("networkID", networkID), @@ -122,7 +122,7 @@ func (c *NebiusClient) CreateInstance(ctx context.Context, attrs v1.CreateInstan // Add labels/tags to metadata (always create labels for resource tracking) createReq.Metadata.Labels = make(map[string]string) - c.logger.Info(ctx, "Setting instance tags during CreateInstance", + c.logger.Debug(ctx, "Setting instance tags during CreateInstance", v1.LogField("providedTagsCount", len(attrs.Tags)), v1.LogField("providedTags", fmt.Sprintf("%+v", attrs.Tags)), v1.LogField("refID", attrs.RefID)) @@ -166,7 +166,7 @@ func (c *NebiusClient) CreateInstance(ctx context.Context, attrs v1.CreateInstan // Wait for instance to reach a stable state (RUNNING or terminal failure) // This prevents leaving orphaned resources if the instance fails after creation - c.logger.Info(ctx, "waiting for instance to reach RUNNING state", + c.logger.Debug(ctx, "waiting for instance to reach RUNNING state", v1.LogField("instanceID", instanceID), v1.LogField("refID", attrs.RefID)) @@ -178,7 +178,7 @@ func (c *NebiusClient) CreateInstance(ctx context.Context, attrs v1.CreateInstan return nil, fmt.Errorf("instance failed to reach RUNNING state: %w", err) } - // Return the full instance details with IP addresses and SSH info + // Return the full instance details with IP addresses and SSH Debug createdInstance.RefID = attrs.RefID createdInstance.CloudCredRefID = c.refID createdInstance.Tags = attrs.Tags @@ -379,7 +379,7 @@ func (c *NebiusClient) waitForInstanceRunning(ctx context.Context, instanceID v1 deadline := time.Now().Add(timeout) pollInterval := 10 * time.Second - c.logger.Info(ctx, "polling instance state until RUNNING or terminal failure", + c.logger.Debug(ctx, "polling instance state until RUNNING or terminal failure", v1.LogField("instanceID", instanceID), v1.LogField("refID", refID), v1.LogField("timeout", timeout.String())) @@ -405,13 +405,13 @@ func (c *NebiusClient) waitForInstanceRunning(ctx context.Context, instanceID v1 continue } - c.logger.Info(ctx, "instance state check", + c.logger.Debug(ctx, "instance state check", v1.LogField("instanceID", instanceID), v1.LogField("status", instance.Status.LifecycleStatus)) // Check for success: RUNNING state if instance.Status.LifecycleStatus == v1.LifecycleStatusRunning { - c.logger.Info(ctx, "instance reached RUNNING state", + c.logger.Debug(ctx, "instance reached RUNNING state", v1.LogField("instanceID", instanceID), v1.LogField("refID", refID)) return instance, nil @@ -425,7 +425,7 @@ func (c *NebiusClient) waitForInstanceRunning(ctx context.Context, instanceID v1 // Instance is still in transitional state (PENDING, STARTING, etc.) // Wait and poll again - c.logger.Info(ctx, "instance still transitioning, waiting...", + c.logger.Debug(ctx, "instance still transitioning, waiting...", v1.LogField("instanceID", instanceID), v1.LogField("currentStatus", instance.Status.LifecycleStatus), v1.LogField("pollInterval", pollInterval.String())) @@ -439,7 +439,7 @@ func (c *NebiusClient) waitForInstanceState(ctx context.Context, instanceID v1.C deadline := time.Now().Add(timeout) pollInterval := 5 * time.Second - c.logger.Info(ctx, "waiting for instance to reach target state", + c.logger.Debug(ctx, "waiting for instance to reach target state", v1.LogField("instanceID", instanceID), v1.LogField("targetState", targetState), v1.LogField("timeout", timeout.String())) @@ -465,14 +465,14 @@ func (c *NebiusClient) waitForInstanceState(ctx context.Context, instanceID v1.C continue } - c.logger.Info(ctx, "instance state check", + c.logger.Debug(ctx, "instance state check", v1.LogField("instanceID", instanceID), v1.LogField("currentState", instance.Status.LifecycleStatus), v1.LogField("targetState", targetState)) // Check if we've reached the target state if instance.Status.LifecycleStatus == targetState { - c.logger.Info(ctx, "instance reached target state", + c.logger.Debug(ctx, "instance reached target state", v1.LogField("instanceID", instanceID), v1.LogField("state", targetState)) return nil @@ -488,7 +488,7 @@ func (c *NebiusClient) waitForInstanceState(ctx context.Context, instanceID v1.C } // Instance is still transitioning, wait and poll again - c.logger.Info(ctx, "instance still transitioning, waiting...", + c.logger.Debug(ctx, "instance still transitioning, waiting...", v1.LogField("instanceID", instanceID), v1.LogField("currentState", instance.Status.LifecycleStatus), v1.LogField("targetState", targetState), @@ -503,7 +503,7 @@ func (c *NebiusClient) waitForInstanceDeleted(ctx context.Context, instanceID v1 deadline := time.Now().Add(timeout) pollInterval := 5 * time.Second - c.logger.Info(ctx, "waiting for instance to be fully deleted", + c.logger.Debug(ctx, "waiting for instance to be fully deleted", v1.LogField("instanceID", instanceID), v1.LogField("timeout", timeout.String())) @@ -523,7 +523,7 @@ func (c *NebiusClient) waitForInstanceDeleted(ctx context.Context, instanceID v1 if err != nil { // Check if it's a NotFound error - that means the instance is fully deleted if isNotFoundError(err) { - c.logger.Info(ctx, "instance successfully deleted (NotFound)", + c.logger.Debug(ctx, "instance successfully deleted (NotFound)", v1.LogField("instanceID", instanceID)) return nil } @@ -535,19 +535,19 @@ func (c *NebiusClient) waitForInstanceDeleted(ctx context.Context, instanceID v1 } // Instance still exists - check its state - c.logger.Info(ctx, "instance still exists, checking state", + c.logger.Debug(ctx, "instance still exists, checking state", v1.LogField("instanceID", instanceID), v1.LogField("state", instance.Status.LifecycleStatus)) // If instance is in TERMINATED state, consider it deleted if instance.Status.LifecycleStatus == v1.LifecycleStatusTerminated { - c.logger.Info(ctx, "instance reached TERMINATED state", + c.logger.Debug(ctx, "instance reached TERMINATED state", v1.LogField("instanceID", instanceID)) return nil } // Instance still in DELETING or other transitional state, wait and poll again - c.logger.Info(ctx, "instance still deleting, waiting...", + c.logger.Debug(ctx, "instance still deleting, waiting...", v1.LogField("instanceID", instanceID), v1.LogField("currentState", instance.Status.LifecycleStatus), v1.LogField("pollInterval", pollInterval.String())) @@ -584,7 +584,7 @@ func extractImageFamily(bootDisk *compute.AttachedDiskSpec) string { } func (c *NebiusClient) TerminateInstance(ctx context.Context, instanceID v1.CloudProviderInstanceID) error { - c.logger.Info(ctx, "initiating instance termination", + c.logger.Debug(ctx, "initiating instance termination", v1.LogField("instanceID", instanceID)) // Get instance details to retrieve associated resource IDs @@ -621,7 +621,7 @@ func (c *NebiusClient) TerminateInstance(ctx context.Context, instanceID v1.Clou return fmt.Errorf("instance termination failed: %v", finalOp.Status()) } - c.logger.Info(ctx, "delete operation completed, waiting for instance to be fully deleted", + c.logger.Debug(ctx, "delete operation completed, waiting for instance to be fully deleted", v1.LogField("instanceID", instanceID)) // Step 2: Wait for instance to be actually deleted (not just "DELETING") @@ -631,7 +631,7 @@ func (c *NebiusClient) TerminateInstance(ctx context.Context, instanceID v1.Clou return fmt.Errorf("instance failed to complete deletion: %w", err) } - c.logger.Info(ctx, "instance fully deleted, proceeding with resource cleanup", + c.logger.Debug(ctx, "instance fully deleted, proceeding with resource cleanup", v1.LogField("instanceID", instanceID)) // Step 3: Delete boot disk if it exists and wasn't auto-deleted @@ -651,7 +651,7 @@ func (c *NebiusClient) TerminateInstance(ctx context.Context, instanceID v1.Clou v1.LogField("subnetID", subnetID)) } - c.logger.Info(ctx, "instance successfully terminated and cleaned up", + c.logger.Debug(ctx, "instance successfully terminated and cleaned up", v1.LogField("instanceID", instanceID)) return nil @@ -669,21 +669,21 @@ func (c *NebiusClient) deleteInstanceIfExists(ctx context.Context, instanceID v1 if err != nil { // Ignore NotFound errors - instance may have already been deleted if isNotFoundError(err) { - c.logger.Info(ctx, "instance already deleted or not found", + c.logger.Debug(ctx, "instance already deleted or not found", v1.LogField("instanceID", instanceID)) return nil } return fmt.Errorf("failed to delete instance: %w", err) } - c.logger.Info(ctx, "successfully deleted instance", + c.logger.Debug(ctx, "successfully deleted instance", v1.LogField("instanceID", instanceID)) return nil } //nolint:gocognit,gocyclo,funlen // Complex function listing instances across multiple projects with filtering func (c *NebiusClient) ListInstances(ctx context.Context, args v1.ListInstancesArgs) ([]v1.Instance, error) { - c.logger.Info(ctx, "listing nebius instances", + c.logger.Debug(ctx, "listing nebius instances", v1.LogField("primaryProjectID", c.projectID), v1.LogField("location", c.location), v1.LogField("tagFilters", fmt.Sprintf("%+v", args.TagFilters)), @@ -700,7 +700,7 @@ func (c *NebiusClient) ListInstances(ctx context.Context, args v1.ListInstancesA projectToRegion = map[string]string{c.projectID: c.location} } - c.logger.Info(ctx, "querying instances across all projects", + c.logger.Debug(ctx, "querying instances across all projects", v1.LogField("projectCount", len(projectToRegion)), v1.LogField("projects", fmt.Sprintf("%v", projectToRegion))) @@ -727,7 +727,7 @@ func (c *NebiusClient) ListInstances(ctx context.Context, args v1.ListInstancesA } if len(response.Items) > 0 { - c.logger.Info(ctx, "found instances in project", + c.logger.Debug(ctx, "found instances in project", v1.LogField("projectID", projectID), v1.LogField("region", projectToRegion[projectID]), v1.LogField("count", len(response.Items)), @@ -743,11 +743,11 @@ func (c *NebiusClient) ListInstances(ctx context.Context, args v1.ListInstancesA } if len(allNebiusInstances) == 0 { - c.logger.Info(ctx, "no instances found across all projects") + c.logger.Debug(ctx, "no instances found across all projects") return []v1.Instance{}, nil } - c.logger.Info(ctx, "found raw instances from Nebius API across all projects", + c.logger.Debug(ctx, "found raw instances from Nebius API across all projects", v1.LogField("totalCount", len(allNebiusInstances))) // Convert and filter each Nebius instance to v1.Instance @@ -759,7 +759,7 @@ func (c *NebiusClient) ListInstances(ctx context.Context, args v1.ListInstancesA continue } - c.logger.Info(ctx, "Processing instance from Nebius API", + c.logger.Debug(ctx, "Processing instance from Nebius API", v1.LogField("instanceID", nebiusInstance.Metadata.Id), v1.LogField("instanceName", nebiusInstance.Metadata.Name), v1.LogField("rawLabelsCount", len(nebiusInstance.Metadata.Labels)), @@ -774,27 +774,27 @@ func (c *NebiusClient) ListInstances(ctx context.Context, args v1.ListInstancesA continue } - c.logger.Info(ctx, "Instance after conversion", + c.logger.Debug(ctx, "Instance after conversion", v1.LogField("instanceID", instance.CloudID), v1.LogField("convertedTagsCount", len(instance.Tags)), v1.LogField("convertedTags", fmt.Sprintf("%+v", instance.Tags))) // Apply tag filtering if TagFilters are provided if len(args.TagFilters) > 0 { - c.logger.Info(ctx, "🔎 Checking tag filters", + c.logger.Debug(ctx, "Checking tag filters", v1.LogField("instanceID", instance.CloudID), v1.LogField("requiredFilters", fmt.Sprintf("%+v", args.TagFilters)), v1.LogField("instanceTags", fmt.Sprintf("%+v", instance.Tags))) if !matchesTagFilters(instance.Tags, args.TagFilters) { - c.logger.Warn(ctx, "❌ Instance FILTERED OUT by tag filters", + c.logger.Warn(ctx, "Instance FILTERED OUT by tag filters", v1.LogField("instanceID", instance.CloudID), v1.LogField("instanceTags", fmt.Sprintf("%+v", instance.Tags)), v1.LogField("requiredFilters", fmt.Sprintf("%+v", args.TagFilters))) continue } - c.logger.Info(ctx, "✅ Instance PASSED tag filters", + c.logger.Debug(ctx, "Instance PASSED tag filters", v1.LogField("instanceID", instance.CloudID)) } @@ -829,7 +829,7 @@ func (c *NebiusClient) ListInstances(ctx context.Context, args v1.ListInstancesA instances = append(instances, *instance) } - c.logger.Info(ctx, "successfully listed and filtered instances", + c.logger.Debug(ctx, "successfully listed and filtered instances", v1.LogField("totalFromAPI", len(allNebiusInstances)), v1.LogField("afterFiltering", len(instances))) @@ -868,7 +868,7 @@ func matchesTagFilters(instanceTags map[string]string, tagFilters map[string][]s //nolint:dupl // StopInstance and StartInstance have similar structure but different operations func (c *NebiusClient) StopInstance(ctx context.Context, instanceID v1.CloudProviderInstanceID) error { - c.logger.Info(ctx, "initiating instance stop operation", + c.logger.Debug(ctx, "initiating instance stop operation", v1.LogField("instanceID", instanceID)) // Initiate instance stop operation @@ -889,7 +889,7 @@ func (c *NebiusClient) StopInstance(ctx context.Context, instanceID v1.CloudProv return fmt.Errorf("instance stop failed: %v", finalOp.Status()) } - c.logger.Info(ctx, "stop operation completed, waiting for instance to reach STOPPED state", + c.logger.Debug(ctx, "stop operation completed, waiting for instance to reach STOPPED state", v1.LogField("instanceID", instanceID)) // Wait for instance to actually reach STOPPED state @@ -898,7 +898,7 @@ func (c *NebiusClient) StopInstance(ctx context.Context, instanceID v1.CloudProv return fmt.Errorf("instance failed to reach STOPPED state: %w", err) } - c.logger.Info(ctx, "instance successfully stopped", + c.logger.Debug(ctx, "instance successfully stopped", v1.LogField("instanceID", instanceID)) return nil @@ -906,7 +906,7 @@ func (c *NebiusClient) StopInstance(ctx context.Context, instanceID v1.CloudProv //nolint:dupl // StartInstance and StopInstance have similar structure but different operations func (c *NebiusClient) StartInstance(ctx context.Context, instanceID v1.CloudProviderInstanceID) error { - c.logger.Info(ctx, "initiating instance start operation", + c.logger.Debug(ctx, "initiating instance start operation", v1.LogField("instanceID", instanceID)) // Initiate instance start operation @@ -927,7 +927,7 @@ func (c *NebiusClient) StartInstance(ctx context.Context, instanceID v1.CloudPro return fmt.Errorf("instance start failed: %v", finalOp.Status()) } - c.logger.Info(ctx, "start operation completed, waiting for instance to reach RUNNING state", + c.logger.Debug(ctx, "start operation completed, waiting for instance to reach RUNNING state", v1.LogField("instanceID", instanceID)) // Wait for instance to actually reach RUNNING state @@ -936,7 +936,7 @@ func (c *NebiusClient) StartInstance(ctx context.Context, instanceID v1.CloudPro return fmt.Errorf("instance failed to reach RUNNING state: %w", err) } - c.logger.Info(ctx, "instance successfully started", + c.logger.Debug(ctx, "instance successfully started", v1.LogField("instanceID", instanceID)) return nil @@ -1364,7 +1364,7 @@ func (c *NebiusClient) getPublicImagesParent() string { // //nolint:gocognit,gocyclo,funlen // Complex function with multiple fallback strategies for parsing instance types func (c *NebiusClient) parseInstanceType(ctx context.Context, instanceTypeID string) (platform string, preset string, err error) { - c.logger.Info(ctx, "parsing instance type", + c.logger.Debug(ctx, "parsing instance type", v1.LogField("instanceTypeID", instanceTypeID), v1.LogField("projectID", c.projectID)) @@ -1376,7 +1376,7 @@ func (c *NebiusClient) parseInstanceType(ctx context.Context, instanceTypeID str return "", "", errors.WrapAndTrace(err) } - c.logger.Info(ctx, "listed platforms", + c.logger.Debug(ctx, "listed platforms", v1.LogField("platformCount", len(platformsResp.GetItems()))) // DOT Format: {platform-name}.{preset-name} @@ -1387,7 +1387,7 @@ func (c *NebiusClient) parseInstanceType(ctx context.Context, instanceTypeID str platformName := dotParts[0] presetName := dotParts[1] - c.logger.Info(ctx, "parsed DOT format instance type", + c.logger.Debug(ctx, "parsed DOT format instance type", v1.LogField("platformName", platformName), v1.LogField("presetName", presetName)) @@ -1401,7 +1401,7 @@ func (c *NebiusClient) parseInstanceType(ctx context.Context, instanceTypeID str // Verify the preset exists for _, preset := range p.Spec.Presets { if preset != nil && preset.Name == presetName { - c.logger.Info(ctx, "✓ DOT format EXACT MATCH", + c.logger.Debug(ctx, "DOT format EXACT MATCH", v1.LogField("platformName", p.Metadata.Name), v1.LogField("presetName", preset.Name)) return p.Metadata.Name, preset.Name, nil @@ -1410,7 +1410,7 @@ func (c *NebiusClient) parseInstanceType(ctx context.Context, instanceTypeID str // If preset not found but platform matches, use first preset if len(p.Spec.Presets) > 0 && p.Spec.Presets[0] != nil { - c.logger.Warn(ctx, "✗ DOT format - preset not found, using first preset", + c.logger.Warn(ctx, "DOT format - preset not found, using first preset", v1.LogField("requestedPreset", presetName), v1.LogField("fallbackPreset", p.Spec.Presets[0].Name)) return p.Metadata.Name, p.Spec.Presets[0].Name, nil @@ -1450,7 +1450,7 @@ func (c *NebiusClient) parseInstanceType(ctx context.Context, instanceTypeID str // Reconstruct the preset name from remaining parts presetName := strings.Join(parts[presetStartIdx:], "-") - c.logger.Info(ctx, "parsed NEW format instance type", + c.logger.Debug(ctx, "parsed NEW format instance type", v1.LogField("gpuType", gpuType), v1.LogField("presetName", presetName), v1.LogField("presetStartIdx", presetStartIdx)) @@ -1474,7 +1474,7 @@ func (c *NebiusClient) parseInstanceType(ctx context.Context, instanceTypeID str } } - c.logger.Info(ctx, "found matching platform", + c.logger.Debug(ctx, "found matching platform", v1.LogField("platformName", p.Metadata.Name), v1.LogField("platformID", p.Metadata.Id), v1.LogField("presetCount", len(p.Spec.Presets)), @@ -1484,7 +1484,7 @@ func (c *NebiusClient) parseInstanceType(ctx context.Context, instanceTypeID str // Verify the preset exists in this platform for _, preset := range p.Spec.Presets { if preset != nil && preset.Name == presetName { - c.logger.Info(ctx, "✓ EXACT MATCH - using requested preset", + c.logger.Debug(ctx, "EXACT MATCH - using requested preset", v1.LogField("platformName", p.Metadata.Name), v1.LogField("presetName", preset.Name)) return p.Metadata.Name, preset.Name, nil @@ -1493,7 +1493,7 @@ func (c *NebiusClient) parseInstanceType(ctx context.Context, instanceTypeID str // If preset not found, use first preset as fallback if len(p.Spec.Presets) > 0 && p.Spec.Presets[0] != nil { - c.logger.Warn(ctx, "✗ MISMATCH - preset not found, using FIRST preset as fallback", + c.logger.Warn(ctx, "MISMATCH - preset not found, using FIRST preset as fallback", v1.LogField("requestedPreset", presetName), v1.LogField("fallbackPreset", p.Spec.Presets[0].Name), v1.LogField("platformName", p.Metadata.Name), diff --git a/v1/providers/nebius/integration_test.go b/v1/providers/nebius/integration_test.go index 6283368c..91d5cd91 100644 --- a/v1/providers/nebius/integration_test.go +++ b/v1/providers/nebius/integration_test.go @@ -101,7 +101,7 @@ func waitForSSH(t *testing.T, publicIP, privateKey, sshUser string, timeout time conn, err := ssh.Dial("tcp", fmt.Sprintf("%s:22", publicIP), config) if err == nil { _ = conn.Close() // Explicitly ignore close error in test connectivity check - t.Logf("✓ SSH is ready on %s after %d attempts", publicIP, attempt) + t.Logf("SSH is ready on %s after %d attempts", publicIP, attempt) return nil } @@ -134,7 +134,7 @@ func testSSHConnectivity(t *testing.T, publicIP, privateKey, sshUser string) { client, err := ssh.Dial("tcp", fmt.Sprintf("%s:22", publicIP), config) require.NoError(t, err, "SSH connection should succeed") defer func() { _ = client.Close() }() - t.Log("✓ SSH connection established successfully") + t.Log("SSH connection established successfully") // Run a test command to verify functionality session, err := client.NewSession() @@ -149,7 +149,7 @@ func testSSHConnectivity(t *testing.T, publicIP, privateKey, sshUser string) { assert.Contains(t, outputStr, "SSH connectivity test successful", "Command output should contain test message") assert.NotEmpty(t, outputStr, "Command output should not be empty") - t.Logf("✓ SSH command execution successful") + t.Logf("SSH command execution successful") t.Logf(" Output: %s", outputStr) } @@ -247,7 +247,7 @@ func TestIntegration_InstanceLifecycle(t *testing.T) { // Step 0.5: Generate SSH key pair for testing (inspired by Shadeform's SSH key handling) t.Log("Generating SSH key pair for instance access...") privateKey, publicKey := generateTestSSHKeyPair(t) - t.Log("✓ SSH key pair generated successfully") + t.Log("SSH key pair generated successfully") // Step 1: Create instance with SSH key instanceRefID := "integration-test-" + time.Now().Format("20060102-150405") @@ -311,7 +311,7 @@ func TestIntegration_InstanceLifecycle(t *testing.T) { assert.NotEmpty(t, retrievedInstance.SSHUser, "SSH user should be set") assert.Equal(t, 22, retrievedInstance.SSHPort, "SSH port should be 22") assert.NotEmpty(t, retrievedInstance.Hostname, "Hostname should be set") - t.Logf("✓ SSH connectivity fields populated: IP=%s, User=%s, Port=%d", + t.Logf("SSH connectivity fields populated: IP=%s, User=%s, Port=%d", retrievedInstance.PublicIP, retrievedInstance.SSHUser, retrievedInstance.SSHPort) // Step 2.5: Wait for SSH to be ready (instances need time to boot and run cloud-init) @@ -326,7 +326,7 @@ func TestIntegration_InstanceLifecycle(t *testing.T) { // Step 2.6: Test actual SSH connectivity t.Log("Testing SSH connectivity and command execution...") testSSHConnectivity(t, retrievedInstance.PublicIP, privateKey, retrievedInstance.SSHUser) - t.Log("✓ SSH connectivity validated successfully") + t.Log("SSH connectivity validated successfully") } } else { t.Log("WARNING: No public IP available, skipping SSH connectivity test") @@ -343,7 +343,7 @@ func TestIntegration_InstanceLifecycle(t *testing.T) { for _, inst := range instances { if inst.CloudID == instanceCloudID { foundCreatedInstance = true - t.Logf("✓ Found created instance %s in list", instanceCloudID) + t.Logf("Found created instance %s in list", instanceCloudID) break } } @@ -353,25 +353,25 @@ func TestIntegration_InstanceLifecycle(t *testing.T) { t.Logf("Stopping instance: %s", instanceCloudID) err = client.StopInstance(ctx, instanceCloudID) require.NoError(t, err, "StopInstance should succeed") - t.Logf("✓ Successfully stopped instance %s", instanceCloudID) + t.Logf("Successfully stopped instance %s", instanceCloudID) // Verify instance is stopped stoppedInstance, err := client.GetInstance(ctx, instanceCloudID) require.NoError(t, err, "Should be able to get stopped instance") assert.Equal(t, v1.LifecycleStatusStopped, stoppedInstance.Status.LifecycleStatus, "Instance should be stopped") - t.Logf("✓ Verified instance status: %s", stoppedInstance.Status.LifecycleStatus) + t.Logf("Verified instance status: %s", stoppedInstance.Status.LifecycleStatus) // Step 5: Start instance t.Logf("Starting instance: %s", instanceCloudID) err = client.StartInstance(ctx, instanceCloudID) require.NoError(t, err, "StartInstance should succeed") - t.Logf("✓ Successfully started instance %s", instanceCloudID) + t.Logf("Successfully started instance %s", instanceCloudID) // Verify instance is running again startedInstance, err := client.GetInstance(ctx, instanceCloudID) require.NoError(t, err, "Should be able to get started instance") assert.Equal(t, v1.LifecycleStatusRunning, startedInstance.Status.LifecycleStatus, "Instance should be running") - t.Logf("✓ Verified instance status: %s", startedInstance.Status.LifecycleStatus) + t.Logf("Verified instance status: %s", startedInstance.Status.LifecycleStatus) // Step 6: Terminate instance // Note: Cleanup is registered via t.Cleanup() above to ensure deletion even on test failure diff --git a/v1/providers/nebius/smoke_test.go b/v1/providers/nebius/smoke_test.go index 8840d999..11403854 100644 --- a/v1/providers/nebius/smoke_test.go +++ b/v1/providers/nebius/smoke_test.go @@ -174,7 +174,7 @@ func createTestInstance(ctx context.Context, t *testing.T, client *NebiusClient, InstanceType: string(firstInstance.ID), }) if err == nil { - t.Logf("📊 Quota for %s: %d/%d %s (Available: %t)", + t.Logf("Quota for %s: %d/%d %s (Available: %t)", firstInstance.ID, quota.Current, quota.Maximum, quota.Unit, firstInstance.IsAvailable) } } @@ -232,7 +232,7 @@ func createTestInstance(ctx context.Context, t *testing.T, client *NebiusClient, if strings.Contains(strings.ToLower(it.Type), strings.ToLower(targetPlatform)) || strings.Contains(strings.ToLower(string(it.ID)), strings.ToLower(targetPlatform)) { selectedInstanceType = it - t.Logf("🎯 Found target platform: %s", targetPlatform) + t.Logf("Found target platform: %s", targetPlatform) break } } @@ -243,7 +243,7 @@ func createTestInstance(ctx context.Context, t *testing.T, client *NebiusClient, for _, it := range availableInstanceTypes { if strings.Contains(strings.ToLower(it.Type), "l40s") { selectedInstanceType = it - t.Logf("🎮 Found L40S GPU configuration") + t.Logf("Found L40S GPU configuration") break } } @@ -252,7 +252,7 @@ func createTestInstance(ctx context.Context, t *testing.T, client *NebiusClient, // Fallback to first available instance type if selectedInstanceType.ID == "" { selectedInstanceType = availableInstanceTypes[0] - t.Logf("⚡ Using fallback instance type") + t.Logf("Using fallback instance type") } instanceType := string(selectedInstanceType.ID) @@ -261,7 +261,7 @@ func createTestInstance(ctx context.Context, t *testing.T, client *NebiusClient, // Use an actual available x86_64 image family for platform compatibility imageFamily := "ubuntu22.04-cuda12" // Known working x86_64 family with CUDA support for L40S - t.Logf("🐧 Using working x86_64 image family: %s", imageFamily) + t.Logf("Using working x86_64 image family: %s", imageFamily) if len(images) > 0 { t.Logf("Available images: %d (showing architecture diversity)", len(images)) @@ -278,7 +278,7 @@ func createTestInstance(ctx context.Context, t *testing.T, client *NebiusClient, if customDiskSize := os.Getenv("NEBIUS_DISK_SIZE_GB"); customDiskSize != "" { if size, err := strconv.Atoi(customDiskSize); err == nil && size >= 50 { diskSize = units.Base2Bytes(int64(size) * int64(units.Gibibyte)) - t.Logf("💾 Using custom disk size: %dGB", size) + t.Logf("Using custom disk size: %dGB", size) } }