diff --git a/packages/opencode/test/altimate/connections.test.ts b/packages/opencode/test/altimate/connections.test.ts index e5facc6db0..d4baede6ca 100644 --- a/packages/opencode/test/altimate/connections.test.ts +++ b/packages/opencode/test/altimate/connections.test.ts @@ -1,4 +1,4 @@ -import { describe, expect, test, beforeEach, beforeAll, afterAll } from "bun:test" +import { describe, expect, test, beforeEach, afterEach, beforeAll, afterAll } from "bun:test" import * as Dispatcher from "../../src/altimate/native/dispatcher" // Disable telemetry via env var instead of mock.module @@ -10,9 +10,10 @@ afterAll(() => { delete process.env.ALTIMATE_TELEMETRY_DISABLED }) // --------------------------------------------------------------------------- import * as Registry from "../../src/altimate/native/connections/registry" +import { detectAuthMethod } from "../../src/altimate/native/connections/registry" import * as CredentialStore from "../../src/altimate/native/connections/credential-store" import { parseDbtProfiles } from "../../src/altimate/native/connections/dbt-profiles" -import { discoverContainers } from "../../src/altimate/native/connections/docker-discovery" +import { discoverContainers, containerToConfig } from "../../src/altimate/native/connections/docker-discovery" import { registerAll } from "../../src/altimate/native/connections/register" // --------------------------------------------------------------------------- @@ -74,6 +75,132 @@ describe("ConnectionRegistry", () => { }) }) +// --------------------------------------------------------------------------- +// loadFromEnv — env-var-based connection config loading +// --------------------------------------------------------------------------- + +describe("loadFromEnv via Registry.load()", () => { + const saved: Record = {} + + function setEnv(key: string, value: string) { + saved[key] = process.env[key] + process.env[key] = value + } + + beforeEach(() => { + Registry.reset() + }) + + afterEach(() => { + for (const [key, orig] of Object.entries(saved)) { + if (orig === undefined) delete process.env[key] + else process.env[key] = orig + } + for (const key of Object.keys(saved)) delete saved[key] + }) + + test("parses valid JSON from ALTIMATE_CODE_CONN_* env vars", () => { + setEnv("ALTIMATE_CODE_CONN_MYDB", JSON.stringify({ type: "postgres", host: "localhost", port: 5432 })) + Registry.load() + const config = Registry.getConfig("mydb") + expect(config).toBeDefined() + expect(config?.type).toBe("postgres") + expect(config?.host).toBe("localhost") + }) + + test("lowercases connection name from env var suffix", () => { + setEnv("ALTIMATE_CODE_CONN_PROD_DB", JSON.stringify({ type: "snowflake", account: "abc" })) + Registry.load() + expect(Registry.getConfig("prod_db")).toBeDefined() + expect(Registry.getConfig("PROD_DB")).toBeUndefined() + }) + + test("ignores env var with invalid JSON", () => { + setEnv("ALTIMATE_CODE_CONN_BAD", "not-valid-json{") + Registry.load() + expect(Registry.getConfig("bad")).toBeUndefined() + }) + + test("ignores env var config without type field", () => { + setEnv("ALTIMATE_CODE_CONN_NOTYPE", JSON.stringify({ host: "localhost", port: 5432 })) + Registry.load() + expect(Registry.getConfig("notype")).toBeUndefined() + }) + + test("ignores non-object JSON values (string, number, array)", () => { + setEnv("ALTIMATE_CODE_CONN_STR", JSON.stringify("just a string")) + setEnv("ALTIMATE_CODE_CONN_NUM", JSON.stringify(42)) + setEnv("ALTIMATE_CODE_CONN_ARR", JSON.stringify([1, 2, 3])) + Registry.load() + expect(Registry.getConfig("str")).toBeUndefined() + expect(Registry.getConfig("num")).toBeUndefined() + expect(Registry.getConfig("arr")).toBeUndefined() + }) +}) + +// --------------------------------------------------------------------------- +// detectAuthMethod +// --------------------------------------------------------------------------- + +describe("detectAuthMethod", () => { + test("returns 'connection_string' for config with connection_string", () => { + expect(detectAuthMethod({ type: "postgres", connection_string: "postgresql://..." } as any)).toBe("connection_string") + }) + + test("returns 'key_pair' for Snowflake private_key_path", () => { + expect(detectAuthMethod({ type: "snowflake", private_key_path: "/path/to/key.p8" } as any)).toBe("key_pair") + }) + + test("returns 'key_pair' for camelCase privateKeyPath", () => { + expect(detectAuthMethod({ type: "snowflake", privateKeyPath: "/path/to/key.p8" } as any)).toBe("key_pair") + }) + + test("returns 'sso' for Snowflake externalbrowser", () => { + expect(detectAuthMethod({ type: "snowflake", authenticator: "EXTERNALBROWSER" } as any)).toBe("sso") + }) + + test("returns 'sso' for Okta URL authenticator", () => { + expect(detectAuthMethod({ type: "snowflake", authenticator: "https://myorg.okta.com" } as any)).toBe("sso") + }) + + test("returns 'oauth' for OAuth authenticator", () => { + expect(detectAuthMethod({ type: "snowflake", authenticator: "OAUTH" } as any)).toBe("oauth") + }) + + test("returns 'token' for access_token", () => { + expect(detectAuthMethod({ type: "databricks", access_token: "dapi..." } as any)).toBe("token") + }) + + test("returns 'password' for config with password", () => { + expect(detectAuthMethod({ type: "postgres", password: "secret" } as any)).toBe("password") + }) + + test("returns 'file' for duckdb", () => { + expect(detectAuthMethod({ type: "duckdb", path: "/data/my.db" } as any)).toBe("file") + }) + + test("returns 'file' for sqlite", () => { + expect(detectAuthMethod({ type: "sqlite", path: "/data/my.sqlite" } as any)).toBe("file") + }) + + test("returns 'connection_string' for mongodb without password", () => { + expect(detectAuthMethod({ type: "mongodb" } as any)).toBe("connection_string") + }) + + test("returns 'password' for mongo with password", () => { + expect(detectAuthMethod({ type: "mongo", password: "secret" } as any)).toBe("password") + }) + + test("returns 'unknown' for null/undefined", () => { + expect(detectAuthMethod(null)).toBe("unknown") + expect(detectAuthMethod(undefined)).toBe("unknown") + }) + + test("returns 'unknown' for empty config with no identifiable auth", () => { + expect(detectAuthMethod({ type: "postgres" } as any)).toBe("unknown") + }) +}) + // --------------------------------------------------------------------------- // CredentialStore (keytar not available in test environment) // --------------------------------------------------------------------------- @@ -135,6 +262,36 @@ describe("CredentialStore", () => { expect(sanitized.oauth_client_secret).toBeUndefined() expect(sanitized.authenticator).toBe("oauth") }) + + test("saveConnection strips all sensitive fields from complex config", async () => { + const config = { + type: "snowflake", + account: "abc123", + user: "svc_user", + password: "pw123", + private_key: "-----BEGIN PRIVATE KEY-----", + private_key_passphrase: "passphrase", + token: "oauth-token", + oauth_client_secret: "client-secret", + ssh_password: "ssh-pw", + connection_string: "mongodb://...", + } as any + const { sanitized, warnings } = await CredentialStore.saveConnection("complex", config) + + expect(sanitized.password).toBeUndefined() + expect(sanitized.private_key).toBeUndefined() + expect(sanitized.private_key_passphrase).toBeUndefined() + expect(sanitized.token).toBeUndefined() + expect(sanitized.oauth_client_secret).toBeUndefined() + expect(sanitized.ssh_password).toBeUndefined() + expect(sanitized.connection_string).toBeUndefined() + + expect(sanitized.type).toBe("snowflake") + expect(sanitized.account).toBe("abc123") + expect(sanitized.user).toBe("svc_user") + + expect(warnings).toHaveLength(7) + }) }) // --------------------------------------------------------------------------- @@ -261,6 +418,122 @@ snow: fs.rmSync(tmpDir, { recursive: true }) } }) + + // altimate_change start — tests for untested dbt profiles parser edge cases + test("resolves env_var with default fallback when env var is missing", async () => { + const fs = await import("fs") + const os = await import("os") + const path = await import("path") + + delete process.env.__TEST_DBT_MISSING_VAR_12345 + + const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "dbt-test-")) + const profilesPath = path.join(tmpDir, "profiles.yml") + + fs.writeFileSync( + profilesPath, + ` +myproject: + outputs: + dev: + type: postgres + host: "{{ env_var('__TEST_DBT_MISSING_VAR_12345', 'localhost') }}" + port: 5432 + user: "{{ env_var('__TEST_DBT_MISSING_USER_12345', 'default_user') }}" + password: secret + dbname: mydb +`, + ) + + try { + const connections = await parseDbtProfiles(profilesPath) + expect(connections).toHaveLength(1) + expect(connections[0].config.host).toBe("localhost") + expect(connections[0].config.user).toBe("default_user") + } finally { + fs.rmSync(tmpDir, { recursive: true }) + } + }) + + test("skips 'config' top-level key (dbt global settings)", async () => { + const fs = await import("fs") + const os = await import("os") + const path = await import("path") + + const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "dbt-test-")) + const profilesPath = path.join(tmpDir, "profiles.yml") + + fs.writeFileSync( + profilesPath, + ` +config: + send_anonymous_usage_stats: false + use_colors: true + +real_project: + outputs: + dev: + type: postgres + host: localhost + dbname: analytics +`, + ) + + try { + const connections = await parseDbtProfiles(profilesPath) + expect(connections).toHaveLength(1) + expect(connections[0].name).toBe("real_project_dev") + expect(connections.find((c) => c.name.startsWith("config"))).toBeUndefined() + } finally { + fs.rmSync(tmpDir, { recursive: true }) + } + }) + + test("handles multiple profiles with multiple outputs", async () => { + const fs = await import("fs") + const os = await import("os") + const path = await import("path") + + const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "dbt-test-")) + const profilesPath = path.join(tmpDir, "profiles.yml") + + fs.writeFileSync( + profilesPath, + ` +warehouse_a: + outputs: + dev: + type: postgres + host: localhost + dbname: dev_db + prod: + type: postgres + host: prod.example.com + dbname: prod_db + +warehouse_b: + outputs: + staging: + type: snowflake + account: abc123 + user: admin + password: pw + database: STAGING + warehouse: COMPUTE_WH + schema: PUBLIC +`, + ) + + try { + const connections = await parseDbtProfiles(profilesPath) + expect(connections).toHaveLength(3) + const names = connections.map((c) => c.name).sort() + expect(names).toEqual(["warehouse_a_dev", "warehouse_a_prod", "warehouse_b_staging"]) + } finally { + fs.rmSync(tmpDir, { recursive: true }) + } + }) + // altimate_change end }) // --------------------------------------------------------------------------- @@ -272,6 +545,28 @@ describe("Docker discovery", () => { const containers = await discoverContainers() expect(containers).toEqual([]) }) + + test("containerToConfig omits undefined optional fields", () => { + const container = { + container_id: "def456", + name: "mysql_dev", + image: "mysql:8", + db_type: "mysql", + host: "127.0.0.1", + port: 3306, + user: undefined as string | undefined, + password: undefined as string | undefined, + database: undefined as string | undefined, + status: "running", + } + const config = containerToConfig(container as any) + expect(config.type).toBe("mysql") + expect(config.host).toBe("127.0.0.1") + expect(config.port).toBe(3306) + expect(config.user).toBeUndefined() + expect(config.password).toBeUndefined() + expect(config.database).toBeUndefined() + }) }) // --------------------------------------------------------------------------- diff --git a/packages/opencode/test/altimate/dbt-lineage-helpers.test.ts b/packages/opencode/test/altimate/dbt-lineage-helpers.test.ts new file mode 100644 index 0000000000..31bfcd5ec0 --- /dev/null +++ b/packages/opencode/test/altimate/dbt-lineage-helpers.test.ts @@ -0,0 +1,330 @@ +/** + * Tests for dbt lineage helper functions: findModel, detectDialect, + * buildSchemaContext, and the top-level dbtLineage() error paths. + * + * These pure functions parse manifest data and build schema contexts + * for column-level lineage analysis. Zero tests existed previously. + * A bug in findModel or buildSchemaContext causes lineage to silently + * return empty results, which users see as "no lineage available". + */ + +import { describe, test, expect, afterEach } from "bun:test" +import { dbtLineage } from "../../src/altimate/native/dbt/lineage" +import { writeFileSync, mkdtempSync, rmSync } from "fs" +import { tmpdir } from "os" +import { join } from "path" + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +let tmpDirs: string[] = [] + +function makeTmpDir(): string { + const dir = mkdtempSync(join(tmpdir(), "dbt-lineage-test-")) + tmpDirs.push(dir) + return dir +} + +function writeManifest(dir: string, manifest: Record): string { + const manifestPath = join(dir, "manifest.json") + writeFileSync(manifestPath, JSON.stringify(manifest)) + return manifestPath +} + +afterEach(() => { + for (const dir of tmpDirs) { + rmSync(dir, { recursive: true, force: true }) + } + tmpDirs = [] +}) + +// --------------------------------------------------------------------------- +// Minimal manifest fixtures +// --------------------------------------------------------------------------- + +const BASE_MANIFEST = { + metadata: { adapter_type: "snowflake" }, + nodes: { + "model.proj.orders": { + resource_type: "model", + name: "orders", + schema: "public", + database: "analytics", + config: { materialized: "table" }, + compiled_code: "SELECT c.id, c.name FROM customers c", + depends_on: { nodes: ["source.proj.raw.customers"] }, + columns: { + id: { name: "id", data_type: "INTEGER" }, + name: { name: "name", data_type: "VARCHAR" }, + }, + }, + "model.proj.revenue": { + resource_type: "model", + name: "revenue", + compiled_code: "SELECT SUM(amount) AS total FROM orders", + depends_on: { nodes: ["model.proj.orders"] }, + columns: {}, + }, + "test.proj.not_null": { + resource_type: "test", + name: "not_null", + }, + }, + sources: { + "source.proj.raw.customers": { + name: "customers", + source_name: "raw", + schema: "raw_data", + database: "analytics", + columns: { + id: { name: "id", data_type: "INTEGER" }, + name: { name: "name", data_type: "VARCHAR" }, + email: { name: "email", data_type: "VARCHAR" }, + }, + }, + }, +} + +// --------------------------------------------------------------------------- +// 1. Model lookup (findModel) +// --------------------------------------------------------------------------- + +describe("dbtLineage: model lookup", () => { + test("finds model by unique_id", () => { + const dir = makeTmpDir() + const manifestPath = writeManifest(dir, BASE_MANIFEST) + + const result = dbtLineage({ + manifest_path: manifestPath, + model: "model.proj.orders", + }) + + expect(result.model_name).toBe("orders") + expect(result.model_unique_id).toBe("model.proj.orders") + expect(result.compiled_sql).toContain("SELECT") + }) + + test("finds model by short name", () => { + const dir = makeTmpDir() + const manifestPath = writeManifest(dir, BASE_MANIFEST) + + const result = dbtLineage({ + manifest_path: manifestPath, + model: "orders", + }) + + expect(result.model_name).toBe("orders") + expect(result.model_unique_id).toBe("model.proj.orders") + }) + + test("returns low confidence when model not found", () => { + const dir = makeTmpDir() + const manifestPath = writeManifest(dir, BASE_MANIFEST) + + const result = dbtLineage({ + manifest_path: manifestPath, + model: "nonexistent_model", + }) + + expect(result.confidence).toBe("low") + expect(result.confidence_factors).toContain("Model 'nonexistent_model' not found in manifest") + }) + + test("does not match test or seed nodes by name", () => { + const dir = makeTmpDir() + const manifestPath = writeManifest(dir, BASE_MANIFEST) + + const result = dbtLineage({ + manifest_path: manifestPath, + model: "not_null", + }) + + // "not_null" is a test node, not a model — should not be found + expect(result.confidence).toBe("low") + expect(result.confidence_factors[0]).toContain("not found in manifest") + }) +}) + +// --------------------------------------------------------------------------- +// 2. Dialect detection (detectDialect) +// --------------------------------------------------------------------------- + +describe("dbtLineage: dialect detection", () => { + test("detects dialect from manifest metadata.adapter_type", () => { + const dir = makeTmpDir() + const manifest = { + ...BASE_MANIFEST, + metadata: { adapter_type: "bigquery" }, + } + const manifestPath = writeManifest(dir, manifest) + + const result = dbtLineage({ + manifest_path: manifestPath, + model: "orders", + }) + + // We can't directly check dialect, but the result shouldn't error + // due to dialect mismatch. The model has compiled_code, so confidence + // should be high if lineage succeeds or reflect the actual error. + expect(result.model_name).toBe("orders") + }) + + test("explicit dialect param overrides auto-detection", () => { + const dir = makeTmpDir() + const manifestPath = writeManifest(dir, BASE_MANIFEST) + + const result = dbtLineage({ + manifest_path: manifestPath, + model: "orders", + dialect: "postgres", + }) + + // Should not throw regardless of dialect choice + expect(result.model_name).toBe("orders") + }) + + test("defaults to snowflake when adapter_type is missing", () => { + const dir = makeTmpDir() + const manifest = { + ...BASE_MANIFEST, + metadata: {}, + } + const manifestPath = writeManifest(dir, manifest) + + const result = dbtLineage({ + manifest_path: manifestPath, + model: "orders", + }) + + // Should not throw — defaults to snowflake + expect(result.model_name).toBe("orders") + }) +}) + +// --------------------------------------------------------------------------- +// 3. Schema context building (buildSchemaContext) +// --------------------------------------------------------------------------- + +describe("dbtLineage: schema context from upstream deps", () => { + test("builds context from source with columns", () => { + const dir = makeTmpDir() + const manifestPath = writeManifest(dir, BASE_MANIFEST) + + const result = dbtLineage({ + manifest_path: manifestPath, + model: "orders", + }) + + // The orders model depends on source.proj.raw.customers which has columns. + // If schema context was built correctly, lineage should have non-empty output. + expect(result.model_name).toBe("orders") + // compiled_sql should be present + expect(result.compiled_sql).toBeDefined() + expect(result.compiled_sql).toContain("SELECT") + }) + + test("handles model with no upstream columns gracefully", () => { + const dir = makeTmpDir() + // Revenue depends on orders, but orders has columns — so context should build. + // Create a model that depends on a node with no columns. + const manifest = { + ...BASE_MANIFEST, + nodes: { + ...BASE_MANIFEST.nodes, + "model.proj.bare": { + resource_type: "model", + name: "bare", + compiled_code: "SELECT 1 AS val", + depends_on: { nodes: ["model.proj.no_cols"] }, + columns: {}, + }, + "model.proj.no_cols": { + resource_type: "model", + name: "no_cols", + compiled_code: "SELECT 1", + depends_on: { nodes: [] }, + columns: {}, + }, + }, + } + const manifestPath = writeManifest(dir, manifest) + + const result = dbtLineage({ + manifest_path: manifestPath, + model: "bare", + }) + + // Should not crash — just returns with whatever lineage can determine + expect(result.model_name).toBe("bare") + expect(result.compiled_sql).toBe("SELECT 1 AS val") + }) +}) + +// --------------------------------------------------------------------------- +// 4. Error paths +// --------------------------------------------------------------------------- + +describe("dbtLineage: error handling", () => { + test("returns low confidence for non-existent manifest", () => { + const result = dbtLineage({ + manifest_path: "/tmp/definitely-not-a-manifest.json", + model: "orders", + }) + + expect(result.confidence).toBe("low") + expect(result.confidence_factors).toContain("Manifest file not found") + expect(result.raw_lineage).toEqual({}) + }) + + test("returns low confidence for invalid JSON manifest", () => { + const dir = makeTmpDir() + const manifestPath = join(dir, "manifest.json") + writeFileSync(manifestPath, "not valid json {{{") + + const result = dbtLineage({ + manifest_path: manifestPath, + model: "orders", + }) + + expect(result.confidence).toBe("low") + expect(result.confidence_factors[0]).toContain("Failed to parse manifest") + }) + + test("returns low confidence when model has no compiled SQL", () => { + const dir = makeTmpDir() + const manifest = { + nodes: { + "model.proj.uncompiled": { + resource_type: "model", + name: "uncompiled", + // No compiled_code or compiled_sql + depends_on: { nodes: [] }, + columns: {}, + }, + }, + sources: {}, + } + const manifestPath = writeManifest(dir, manifest) + + const result = dbtLineage({ + manifest_path: manifestPath, + model: "uncompiled", + }) + + expect(result.confidence).toBe("low") + expect(result.confidence_factors).toContain("No compiled SQL found — run `dbt compile` first") + }) + + test("handles manifest with no nodes key at all", () => { + const dir = makeTmpDir() + const manifestPath = writeManifest(dir, { metadata: {} }) + + const result = dbtLineage({ + manifest_path: manifestPath, + model: "orders", + }) + + expect(result.confidence).toBe("low") + }) +}) diff --git a/packages/opencode/test/altimate/driver-normalize.test.ts b/packages/opencode/test/altimate/driver-normalize.test.ts index f437e1187c..e1f2d4d002 100644 --- a/packages/opencode/test/altimate/driver-normalize.test.ts +++ b/packages/opencode/test/altimate/driver-normalize.test.ts @@ -663,3 +663,133 @@ describe("normalizeConfig — Snowflake private_key edge cases", () => { expect(result.private_key_path).toBeUndefined() }) }) + +// --------------------------------------------------------------------------- +// normalizeConfig — MongoDB aliases +// --------------------------------------------------------------------------- + +describe("normalizeConfig — MongoDB", () => { + test("canonical mongodb config passes through unchanged", () => { + const config = { + type: "mongodb", + host: "localhost", + port: 27017, + database: "mydb", + user: "admin", + password: "secret", + } + expect(normalizeConfig(config)).toEqual(config) + }) + + test("connectionString → connection_string", () => { + const result = normalizeConfig({ + type: "mongodb", + connectionString: "mongodb://admin:secret@localhost:27017/mydb", + }) + expect(result.connection_string).toBe("mongodb://admin:secret@localhost:27017/mydb") + expect(result.connectionString).toBeUndefined() + }) + + test("uri → connection_string", () => { + const result = normalizeConfig({ + type: "mongodb", + uri: "mongodb+srv://admin:secret@cluster0.example.net/mydb", + }) + expect(result.connection_string).toBe("mongodb+srv://admin:secret@cluster0.example.net/mydb") + expect(result.uri).toBeUndefined() + }) + + test("url → connection_string", () => { + const result = normalizeConfig({ + type: "mongodb", + url: "mongodb://localhost:27017/mydb", + }) + expect(result.connection_string).toBe("mongodb://localhost:27017/mydb") + expect(result.url).toBeUndefined() + }) + + test("connection_string takes precedence over uri alias", () => { + const result = normalizeConfig({ + type: "mongodb", + connection_string: "mongodb://correct:27017/db", + uri: "mongodb://wrong:27017/db", + }) + expect(result.connection_string).toBe("mongodb://correct:27017/db") + expect(result.uri).toBeUndefined() + }) + + test("authSource → auth_source", () => { + const result = normalizeConfig({ + type: "mongodb", + authSource: "admin", + }) + expect(result.auth_source).toBe("admin") + expect(result.authSource).toBeUndefined() + }) + + test("replicaSet → replica_set", () => { + const result = normalizeConfig({ + type: "mongodb", + replicaSet: "rs0", + }) + expect(result.replica_set).toBe("rs0") + expect(result.replicaSet).toBeUndefined() + }) + + test("directConnection → direct_connection", () => { + const result = normalizeConfig({ + type: "mongodb", + directConnection: true, + }) + expect(result.direct_connection).toBe(true) + expect(result.directConnection).toBeUndefined() + }) + + test("connectTimeoutMS → connect_timeout", () => { + const result = normalizeConfig({ + type: "mongodb", + connectTimeoutMS: 5000, + }) + expect(result.connect_timeout).toBe(5000) + expect(result.connectTimeoutMS).toBeUndefined() + }) + + test("serverSelectionTimeoutMS → server_selection_timeout", () => { + const result = normalizeConfig({ + type: "mongodb", + serverSelectionTimeoutMS: 10000, + }) + expect(result.server_selection_timeout).toBe(10000) + expect(result.serverSelectionTimeoutMS).toBeUndefined() + }) + + test("username → user for mongodb", () => { + const result = normalizeConfig({ + type: "mongodb", + username: "admin", + }) + expect(result.user).toBe("admin") + expect(result.username).toBeUndefined() + }) + + test("dbname → database for mongodb", () => { + const result = normalizeConfig({ + type: "mongodb", + dbname: "mydb", + }) + expect(result.database).toBe("mydb") + expect(result.dbname).toBeUndefined() + }) + + test("mongo type alias works", () => { + const result = normalizeConfig({ + type: "mongo", + username: "admin", + connectionString: "mongodb://localhost:27017/mydb", + authSource: "admin", + }) + expect(result.user).toBe("admin") + expect(result.connection_string).toBe("mongodb://localhost:27017/mydb") + expect(result.auth_source).toBe("admin") + }) +}) diff --git a/packages/opencode/test/altimate/fingerprint-detect.test.ts b/packages/opencode/test/altimate/fingerprint-detect.test.ts new file mode 100644 index 0000000000..70677147bf --- /dev/null +++ b/packages/opencode/test/altimate/fingerprint-detect.test.ts @@ -0,0 +1,110 @@ +import { describe, test, expect, beforeEach } from "bun:test" +import { Fingerprint } from "../../src/altimate/fingerprint" +import { tmpdir } from "../fixture/fixture" +import * as fs from "fs/promises" +import path from "path" + +beforeEach(() => { + Fingerprint.reset() +}) + +describe("Fingerprint.detect: file-based project detection", () => { + test("detects dbt project from dbt_project.yml", async () => { + await using tmp = await tmpdir() + await fs.writeFile(path.join(tmp.path, "dbt_project.yml"), "name: my_project\nversion: 1.0.0\n") + const result = await Fingerprint.detect(tmp.path) + expect(result.tags).toContain("dbt") + expect(result.tags).toContain("data-engineering") + }) + + test("detects adapter type from profiles.yml", async () => { + await using tmp = await tmpdir() + await fs.writeFile( + path.join(tmp.path, "profiles.yml"), + "my_profile:\n target: dev\n outputs:\n dev:\n type: snowflake\n", + ) + const result = await Fingerprint.detect(tmp.path) + expect(result.tags).toContain("snowflake") + }) + + test("detects sql from .sqlfluff file", async () => { + await using tmp = await tmpdir() + await fs.writeFile(path.join(tmp.path, ".sqlfluff"), "[sqlfluff]\ndialect = bigquery\n") + const result = await Fingerprint.detect(tmp.path) + expect(result.tags).toContain("sql") + }) + + test("detects sql from .sql files when no .sqlfluff", async () => { + await using tmp = await tmpdir() + await fs.writeFile(path.join(tmp.path, "query.sql"), "SELECT 1") + const result = await Fingerprint.detect(tmp.path) + expect(result.tags).toContain("sql") + }) + + test("detects airflow from airflow.cfg", async () => { + await using tmp = await tmpdir() + await fs.writeFile(path.join(tmp.path, "airflow.cfg"), "[core]\nexecutor = LocalExecutor\n") + const result = await Fingerprint.detect(tmp.path) + expect(result.tags).toContain("airflow") + }) + + test("detects airflow from dags directory", async () => { + await using tmp = await tmpdir() + await fs.mkdir(path.join(tmp.path, "dags")) + const result = await Fingerprint.detect(tmp.path) + expect(result.tags).toContain("airflow") + }) + + test("detects databricks from databricks.yml", async () => { + await using tmp = await tmpdir() + await fs.writeFile(path.join(tmp.path, "databricks.yml"), "bundle:\n name: my_bundle\n") + const result = await Fingerprint.detect(tmp.path) + expect(result.tags).toContain("databricks") + }) + + test("returns empty tags for vanilla project", async () => { + await using tmp = await tmpdir() + const result = await Fingerprint.detect(tmp.path) + expect(result.tags).toEqual([]) + }) + + test("caches result for same cwd", async () => { + await using tmp = await tmpdir() + await fs.writeFile(path.join(tmp.path, "dbt_project.yml"), "name: test\n") + const r1 = await Fingerprint.detect(tmp.path) + // Remove the file — cached result should still have dbt + await fs.rm(path.join(tmp.path, "dbt_project.yml")) + const r2 = await Fingerprint.detect(tmp.path) + expect(r1).toBe(r2) // Same reference (cached) + }) + + test("deduplicates tags from cwd and root scanning same markers", async () => { + // When cwd !== root, both directories are scanned. If both contain + // dbt_project.yml, the "dbt" and "data-engineering" tags should appear + // only once each (the source deduplicates via Set). + await using tmp = await tmpdir() + const subdir = path.join(tmp.path, "models") + await fs.mkdir(subdir) + // Place dbt_project.yml in BOTH root and subdir + await fs.writeFile(path.join(tmp.path, "dbt_project.yml"), "name: root\n") + await fs.writeFile(path.join(subdir, "dbt_project.yml"), "name: sub\n") + const result = await Fingerprint.detect(subdir, tmp.path) + const dbtCount = result.tags.filter((t) => t === "dbt").length + expect(dbtCount).toBe(1) + const deCount = result.tags.filter((t) => t === "data-engineering").length + expect(deCount).toBe(1) + }) + + test("scans both cwd and root when different", async () => { + await using tmp = await tmpdir() + const subdir = path.join(tmp.path, "models") + await fs.mkdir(subdir) + // dbt_project.yml only in root + await fs.writeFile(path.join(tmp.path, "dbt_project.yml"), "name: test\n") + // .sql file only in subdir + await fs.writeFile(path.join(subdir, "model.sql"), "SELECT 1") + const result = await Fingerprint.detect(subdir, tmp.path) + expect(result.tags).toContain("dbt") + expect(result.tags).toContain("sql") + }) +}) diff --git a/packages/opencode/test/altimate/registry-env-loading.test.ts b/packages/opencode/test/altimate/registry-env-loading.test.ts new file mode 100644 index 0000000000..ac2e961318 --- /dev/null +++ b/packages/opencode/test/altimate/registry-env-loading.test.ts @@ -0,0 +1,169 @@ +/** + * Tests for ConnectionRegistry's load() function — env var parsing, + * file loading, and merge precedence (global < local < env). + * + * The existing connections.test.ts only uses setConfigs(), which bypasses + * the entire loadFromFile/loadFromEnv pipeline. These tests verify that + * CI/CD users who set ALTIMATE_CODE_CONN_* env vars get correct configs. + */ + +import { describe, test, expect, beforeEach, afterEach } from "bun:test" +import * as Registry from "../../src/altimate/native/connections/registry" + +// --------------------------------------------------------------------------- +// Env var cleanup helper +// --------------------------------------------------------------------------- + +const ENV_PREFIX = "ALTIMATE_CODE_CONN_" +const envVarsToClean: string[] = [] + +function setEnvVar(name: string, value: string): void { + const key = `${ENV_PREFIX}${name}` + process.env[key] = value + envVarsToClean.push(key) +} + +function cleanEnvVars(): void { + for (const key of envVarsToClean) { + delete process.env[key] + } + envVarsToClean.length = 0 +} + +// --------------------------------------------------------------------------- +// Setup / Teardown +// --------------------------------------------------------------------------- + +beforeEach(() => { + Registry.reset() + cleanEnvVars() +}) + +afterEach(() => { + Registry.reset() + cleanEnvVars() +}) + +// --------------------------------------------------------------------------- +// 1. Env var loading +// --------------------------------------------------------------------------- + +describe("ConnectionRegistry: env var loading", () => { + test("loads connection from ALTIMATE_CODE_CONN_* env var", () => { + setEnvVar("MYDB", JSON.stringify({ type: "postgres", host: "localhost", port: 5432 })) + + Registry.load() + + const config = Registry.getConfig("mydb") + expect(config).toBeDefined() + expect(config?.type).toBe("postgres") + expect(config?.host).toBe("localhost") + }) + + test("env var name is lowercased to connection name", () => { + setEnvVar("MY_PROD_DB", JSON.stringify({ type: "snowflake", account: "abc123" })) + + Registry.load() + + expect(Registry.getConfig("my_prod_db")).toBeDefined() + expect(Registry.getConfig("MY_PROD_DB")).toBeUndefined() + }) + + test("ignores env vars with empty value", () => { + process.env[`${ENV_PREFIX}EMPTY`] = "" + envVarsToClean.push(`${ENV_PREFIX}EMPTY`) + + Registry.load() + + expect(Registry.getConfig("empty")).toBeUndefined() + }) + + test("ignores env vars with invalid JSON", () => { + setEnvVar("BAD_JSON", "not valid json {{{") + + Registry.load() + + expect(Registry.getConfig("bad_json")).toBeUndefined() + // Should not throw — graceful handling + }) + + test("ignores env vars where parsed value is not an object or has no type", () => { + setEnvVar("STRING_VAL", JSON.stringify("just a string")) + setEnvVar("NUMBER_VAL", JSON.stringify(42)) + setEnvVar("NULL_VAL", JSON.stringify(null)) + setEnvVar("NO_TYPE", JSON.stringify({ host: "localhost", port: 5432 })) + + Registry.load() + + expect(Registry.getConfig("string_val")).toBeUndefined() + expect(Registry.getConfig("number_val")).toBeUndefined() + expect(Registry.getConfig("null_val")).toBeUndefined() + expect(Registry.getConfig("no_type")).toBeUndefined() + }) + + test("loads multiple connections from env vars", () => { + setEnvVar("PG", JSON.stringify({ type: "postgres", host: "pg.local" })) + setEnvVar("SF", JSON.stringify({ type: "snowflake", account: "xyz" })) + setEnvVar("DDB", JSON.stringify({ type: "duckdb", path: ":memory:" })) + + Registry.load() + + // Verify all 3 env-var connections were loaded (other env vars or file + // configs may also contribute, so check by name rather than total count) + expect(Registry.getConfig("pg")).toBeDefined() + expect(Registry.getConfig("pg")?.type).toBe("postgres") + expect(Registry.getConfig("sf")).toBeDefined() + expect(Registry.getConfig("sf")?.type).toBe("snowflake") + expect(Registry.getConfig("ddb")).toBeDefined() + expect(Registry.getConfig("ddb")?.type).toBe("duckdb") + }) + +}) + +// --------------------------------------------------------------------------- +// 2. Merge precedence: env overrides file configs +// --------------------------------------------------------------------------- + +describe("ConnectionRegistry: load() replaces prior state", () => { + test("load() replaces setConfigs state entirely with fresh file+env data", () => { + // setConfigs() populates in-memory state without going through load() + Registry.setConfigs({ + mydb: { type: "postgres", host: "file-host", port: 5432 }, + }) + expect(Registry.getConfig("mydb")?.host).toBe("file-host") + + // load() clears configs and rebuilds from files + env. + // Since no connections.json exists at globalConfigPath()/localConfigPath() + // in this test environment, only env vars contribute. + setEnvVar("MYDB", JSON.stringify({ type: "postgres", host: "env-host", port: 5433 })) + Registry.load() + + const config = Registry.getConfig("mydb") + expect(config?.host).toBe("env-host") + expect(config?.port).toBe(5433) + }) +}) + +// --------------------------------------------------------------------------- +// 3. list() reflects env-loaded connections +// --------------------------------------------------------------------------- + +describe("ConnectionRegistry: list() with env-loaded connections", () => { + test("list returns warehouses loaded from env vars", () => { + setEnvVar("CI_WAREHOUSE", JSON.stringify({ + type: "bigquery", + project: "my-project", + database: "analytics", + })) + + Registry.load() + + const { warehouses } = Registry.list() + // Use .some() instead of index-based access to avoid flakiness if the + // host filesystem has a connections.json that also contributes entries. + const ciWarehouse = warehouses.find((w) => w.name === "ci_warehouse") + expect(ciWarehouse).toBeDefined() + expect(ciWarehouse?.type).toBe("bigquery") + expect(ciWarehouse?.database).toBe("analytics") + }) +}) diff --git a/packages/opencode/test/altimate/sql-analyze-format.test.ts b/packages/opencode/test/altimate/sql-analyze-format.test.ts new file mode 100644 index 0000000000..c8ff912dc1 --- /dev/null +++ b/packages/opencode/test/altimate/sql-analyze-format.test.ts @@ -0,0 +1,84 @@ +import { describe, test, expect } from "bun:test" +import { Dispatcher } from "../../src/altimate/native" +import { registerAllSql } from "../../src/altimate/native/sql/register" +import type { SqlAnalyzeResult } from "../../src/altimate/native/types" + +// Ensure sql.analyze is registered +registerAllSql() + +// --------------------------------------------------------------------------- +// sql.analyze Dispatcher — success semantics and result shape +// +// The AI-5975 fix changed success semantics: finding issues IS a successful +// analysis (success:true). Previously it returned success:false when issues +// were found, causing ~4,000 false "unknown error" telemetry entries per day. +// --------------------------------------------------------------------------- + +describe("sql.analyze: success semantics (AI-5975 regression)", () => { + test("query with lint issues still returns success:true", async () => { + // SELECT * is a known lint trigger — must still be a successful analysis + const result = await Dispatcher.call("sql.analyze", { + sql: "SELECT * FROM users", + dialect: "snowflake", + }) as SqlAnalyzeResult + // KEY INVARIANT: finding issues is a SUCCESSFUL analysis + expect(result.success).toBe(true) + // Verify issues were actually found (not a vacuous pass) + expect(result.issue_count).toBeGreaterThan(0) + expect(result.confidence).toBe("high") + }) + + test("issue_count matches issues array length", async () => { + const result = await Dispatcher.call("sql.analyze", { + sql: "SELECT * FROM orders JOIN customers", + dialect: "snowflake", + }) as SqlAnalyzeResult + expect(result.issues.length).toBeGreaterThan(0) + expect(result.issue_count).toBe(result.issues.length) + }) +}) + +describe("sql.analyze: issue structure", () => { + test("lint issues have required fields", async () => { + const result = await Dispatcher.call("sql.analyze", { + sql: "SELECT * FROM users", + dialect: "snowflake", + }) as SqlAnalyzeResult + const lintIssues = result.issues.filter((i) => i.type === "lint") + // Guard against vacuous pass — SELECT * must produce lint findings + expect(lintIssues.length).toBeGreaterThan(0) + for (const issue of lintIssues) { + expect(issue.severity).toBeDefined() + expect(issue.message).toBeDefined() + expect(typeof issue.recommendation).toBe("string") + expect(issue.confidence).toBe("high") + } + }) + + test("issue types are limited to lint, semantic, safety", async () => { + const result = await Dispatcher.call("sql.analyze", { + sql: "SELECT * FROM users WHERE 1=1", + dialect: "snowflake", + }) as SqlAnalyzeResult + expect(result.issues.length).toBeGreaterThan(0) + const validTypes = ["lint", "semantic", "safety"] + for (const issue of result.issues) { + expect(validTypes).toContain(issue.type) + } + }) +}) + +describe("sql.analyze: result shape", () => { + test("successful result has all required properties", async () => { + const result = await Dispatcher.call("sql.analyze", { + sql: "SELECT 1 LIMIT 1", + }) as SqlAnalyzeResult + expect(result).toHaveProperty("success") + expect(result).toHaveProperty("issues") + expect(result).toHaveProperty("issue_count") + expect(result).toHaveProperty("confidence") + expect(result).toHaveProperty("confidence_factors") + expect(Array.isArray(result.issues)).toBe(true) + expect(Array.isArray(result.confidence_factors)).toBe(true) + }) +}) diff --git a/packages/opencode/test/altimate/sql-analyze-tool.test.ts b/packages/opencode/test/altimate/sql-analyze-tool.test.ts new file mode 100644 index 0000000000..21afddf46e --- /dev/null +++ b/packages/opencode/test/altimate/sql-analyze-tool.test.ts @@ -0,0 +1,172 @@ +/** + * Tests for SqlAnalyzeTool title construction and formatAnalysis output. + * + * These test the formatting logic patterns used in sql-analyze.ts. + * Since formatAnalysis is not exported, we replicate its logic here — + * same approach as tool-formatters.test.ts. This means these tests + * will not catch changes to the real function unless the test copy + * is also updated. This is an accepted tradeoff in this codebase. + */ + +import { describe, test, expect } from "bun:test" + +describe("SqlAnalyzeTool: title construction", () => { + // Replicates the title template from sql-analyze.ts execute() line 25 + function buildTitle(result: { error?: string; issue_count: number; confidence: string }) { + return `Analyze: ${result.error ? "PARSE ERROR" : `${result.issue_count} issue${result.issue_count !== 1 ? "s" : ""}`} [${result.confidence}]` + } + + test("zero issues shows '0 issues'", () => { + expect(buildTitle({ issue_count: 0, confidence: "high" })).toBe("Analyze: 0 issues [high]") + }) + + test("one issue shows singular '1 issue'", () => { + expect(buildTitle({ issue_count: 1, confidence: "high" })).toBe("Analyze: 1 issue [high]") + }) + + test("multiple issues shows plural", () => { + expect(buildTitle({ issue_count: 5, confidence: "medium" })).toBe("Analyze: 5 issues [medium]") + }) + + test("error present shows PARSE ERROR", () => { + expect(buildTitle({ error: "syntax error", issue_count: 0, confidence: "low" })).toBe( + "Analyze: PARSE ERROR [low]", + ) + }) +}) + +describe("SqlAnalyzeTool: formatAnalysis output", () => { + // Replicates formatAnalysis() from sql-analyze.ts lines 45-70 + function formatAnalysis(result: { + error?: string + issues: Array<{ + type: string + severity: string + message: string + recommendation: string + location?: string + confidence: string + }> + issue_count: number + confidence: string + confidence_factors: string[] + }): string { + if (result.error) return `Analysis failed: ${result.error}` + if (result.issues.length === 0) return "No anti-patterns or issues detected." + + const lines: string[] = [ + `Found ${result.issue_count} issue${result.issue_count !== 1 ? "s" : ""} (confidence: ${result.confidence}):`, + ] + if (result.confidence_factors.length > 0) { + lines.push(` Note: ${result.confidence_factors.join("; ")}`) + } + lines.push("") + + for (const issue of result.issues) { + const loc = issue.location ? ` \u2014 ${issue.location}` : "" + const conf = issue.confidence !== "high" ? ` [${issue.confidence} confidence]` : "" + lines.push(` [${issue.severity.toUpperCase()}] ${issue.type}${conf}`) + lines.push(` ${issue.message}${loc}`) + lines.push(` \u2192 ${issue.recommendation}`) + lines.push("") + } + + return lines.join("\n") + } + + test("error result returns failure message", () => { + const output = formatAnalysis({ + error: "parse error at line 5", + issues: [], + issue_count: 0, + confidence: "low", + confidence_factors: [], + }) + expect(output).toBe("Analysis failed: parse error at line 5") + }) + + test("zero issues returns clean message", () => { + const output = formatAnalysis({ + issues: [], + issue_count: 0, + confidence: "high", + confidence_factors: [], + }) + expect(output).toBe("No anti-patterns or issues detected.") + }) + + test("issues are formatted with severity, type, location", () => { + const output = formatAnalysis({ + issues: [ + { + type: "lint", + severity: "warning", + message: "SELECT * detected", + recommendation: "Use explicit columns", + location: "line 1", + confidence: "high", + }, + ], + issue_count: 1, + confidence: "high", + confidence_factors: ["lint"], + }) + expect(output).toContain("[WARNING] lint") + expect(output).toContain("SELECT * detected \u2014 line 1") + expect(output).toContain("\u2192 Use explicit columns") + }) + + test("non-high confidence issues show confidence tag", () => { + const output = formatAnalysis({ + issues: [ + { + type: "semantic", + severity: "info", + message: "Possible unused join", + recommendation: "Review join necessity", + confidence: "medium", + }, + ], + issue_count: 1, + confidence: "medium", + confidence_factors: ["semantics"], + }) + expect(output).toContain("[medium confidence]") + }) + + test("high confidence issues omit confidence tag", () => { + const output = formatAnalysis({ + issues: [ + { + type: "safety", + severity: "high", + message: "SQL injection risk", + recommendation: "Use parameterized queries", + confidence: "high", + }, + ], + issue_count: 1, + confidence: "high", + confidence_factors: ["safety"], + }) + expect(output).not.toContain("[high confidence]") + }) + + test("confidence factors are listed in Note line", () => { + const output = formatAnalysis({ + issues: [ + { + type: "lint", + severity: "warning", + message: "Missing LIMIT", + recommendation: "Add LIMIT clause", + confidence: "high", + }, + ], + issue_count: 1, + confidence: "high", + confidence_factors: ["lint", "semantics", "safety"], + }) + expect(output).toContain("Note: lint; semantics; safety") + }) +}) diff --git a/packages/opencode/test/altimate/warehouse-telemetry.test.ts b/packages/opencode/test/altimate/warehouse-telemetry.test.ts index ccc7a4c13d..6c566c5f6b 100644 --- a/packages/opencode/test/altimate/warehouse-telemetry.test.ts +++ b/packages/opencode/test/altimate/warehouse-telemetry.test.ts @@ -169,6 +169,23 @@ describe("warehouse telemetry: detectAuthMethod", () => { expect(connectEvent).toBeDefined() expect(connectEvent.auth_method).toBe("unknown") }) + + // MongoDB-specific auth detection (added with MongoDB driver support #482) + test("detects connection_string auth for mongodb without password", () => { + expect(Registry.detectAuthMethod({ type: "mongodb", host: "localhost" } as any)).toBe("connection_string") + }) + + test("detects password auth for mongodb with password", () => { + expect(Registry.detectAuthMethod({ type: "mongodb", host: "localhost", password: "secret" } as any)).toBe("password") + }) + + test("detects connection_string auth for mongo alias without password", () => { + expect(Registry.detectAuthMethod({ type: "mongo", host: "localhost" } as any)).toBe("connection_string") + }) + + test("prefers explicit connection_string field over mongodb type fallback", () => { + expect(Registry.detectAuthMethod({ type: "mongodb", connection_string: "mongodb://localhost/test" } as any)).toBe("connection_string") + }) }) // --------------------------------------------------------------------------- diff --git a/packages/opencode/test/bus/bus-event.test.ts b/packages/opencode/test/bus/bus-event.test.ts new file mode 100644 index 0000000000..b0683985b8 --- /dev/null +++ b/packages/opencode/test/bus/bus-event.test.ts @@ -0,0 +1,75 @@ +// altimate_change start — tests for BusEvent registry and payloads +import { describe, test, expect } from "bun:test" +import z from "zod" +import { BusEvent } from "../../src/bus/bus-event" + +// Use unique type strings prefixed with __test_ to avoid colliding with +// production events already registered in the global BusEvent registry. + +describe("BusEvent.define", () => { + test("returns an object with type string and zod schema", () => { + const schema = z.object({ count: z.number() }) + const def = BusEvent.define("__test_define_shape", schema) + + expect(def.type).toBe("__test_define_shape") + expect(def.properties).toBe(schema) + }) +}) + +describe("BusEvent.payloads", () => { + test("includes a registered event in the discriminated union", () => { + const testSchema = z.object({ value: z.string() }) + BusEvent.define("__test_payloads_registered", testSchema) + const union = BusEvent.payloads() + const result = union.safeParse({ + type: "__test_payloads_registered", + properties: { value: "hello" }, + }) + expect(result.success).toBe(true) + }) + + test("rejects event with unregistered type", () => { + const union = BusEvent.payloads() + const result = union.safeParse({ + type: "__test_payloads_NONEXISTENT_999", + properties: {}, + }) + expect(result.success).toBe(false) + }) + + test("rejects event with wrong properties shape", () => { + BusEvent.define("__test_payloads_registered", z.object({ value: z.string() })) + const union = BusEvent.payloads() + const result = union.safeParse({ + type: "__test_payloads_registered", + properties: { value: 42 }, // should be string, not number + }) + expect(result.success).toBe(false) + }) +}) + +describe("BusEvent.define duplicate handling", () => { + test("last define() wins when same type is registered twice", () => { + // First definition: requires { a: string } + BusEvent.define("__test_duplicate_overwrite", z.object({ a: z.string() })) + // Second definition: requires { b: number } + BusEvent.define("__test_duplicate_overwrite", z.object({ b: z.number() })) + + const union = BusEvent.payloads() + + // Payload matching second schema should succeed + const valid = union.safeParse({ + type: "__test_duplicate_overwrite", + properties: { b: 42 }, + }) + expect(valid.success).toBe(true) + + // Payload matching ONLY first schema (missing b) should fail + const invalid = union.safeParse({ + type: "__test_duplicate_overwrite", + properties: { a: "hello" }, + }) + expect(invalid.success).toBe(false) + }) +}) +// altimate_change end diff --git a/packages/opencode/test/command/builtin-commands.test.ts b/packages/opencode/test/command/builtin-commands.test.ts new file mode 100644 index 0000000000..b99ea2795f --- /dev/null +++ b/packages/opencode/test/command/builtin-commands.test.ts @@ -0,0 +1,114 @@ +import { describe, test, expect } from "bun:test" +import { Command } from "../../src/command/index" +import { Instance } from "../../src/project/instance" +import { tmpdir } from "../fixture/fixture" + +async function withInstance(fn: () => Promise) { + await using tmp = await tmpdir({ git: true }) + await Instance.provide({ directory: tmp.path, fn }) +} + +describe("altimate builtin commands", () => { + describe("discover-and-add-mcps", () => { + test("is registered as a default command", async () => { + await withInstance(async () => { + const cmd = await Command.get("discover-and-add-mcps") + expect(cmd).toBeDefined() + expect(cmd.name).toBe("discover-and-add-mcps") + expect(cmd.source).toBe("command") + }) + }) + + test("has correct description", async () => { + await withInstance(async () => { + const cmd = await Command.get("discover-and-add-mcps") + expect(cmd.description).toBe("discover MCP servers from external AI tool configs and add them") + }) + }) + + test("template references MCP discovery workflow", async () => { + await withInstance(async () => { + const cmd = await Command.get("discover-and-add-mcps") + const template = await cmd.template + expect(typeof template).toBe("string") + expect(template.length).toBeGreaterThan(0) + // The template should reference MCP-related concepts + expect(template.toLowerCase()).toContain("mcp") + }) + }) + + test("is present in Command.Default constants", () => { + expect(Command.Default.DISCOVER_MCPS).toBe("discover-and-add-mcps") + }) + }) + + describe("configure-claude", () => { + test("is registered as a default command", async () => { + await withInstance(async () => { + const cmd = await Command.get("configure-claude") + expect(cmd).toBeDefined() + expect(cmd.name).toBe("configure-claude") + expect(cmd.source).toBe("command") + }) + }) + + test("has correct description", async () => { + await withInstance(async () => { + const cmd = await Command.get("configure-claude") + expect(cmd.description).toBe("configure /altimate command in Claude Code") + }) + }) + + test("is present in Command.Default constants", () => { + expect(Command.Default.CONFIGURE_CLAUDE).toBe("configure-claude") + }) + }) + + describe("configure-codex", () => { + test("is registered as a default command", async () => { + await withInstance(async () => { + const cmd = await Command.get("configure-codex") + expect(cmd).toBeDefined() + expect(cmd.name).toBe("configure-codex") + expect(cmd.source).toBe("command") + }) + }) + + test("has correct description", async () => { + await withInstance(async () => { + const cmd = await Command.get("configure-codex") + expect(cmd.description).toBe("configure altimate skill in Codex CLI") + }) + }) + + test("is present in Command.Default constants", () => { + expect(Command.Default.CONFIGURE_CODEX).toBe("configure-codex") + }) + }) +}) + +describe("Command.hints()", () => { + test("extracts numbered placeholders in order", () => { + expect(Command.hints("Do $1 then $2")).toEqual(["$1", "$2"]) + }) + + test("extracts $ARGUMENTS", () => { + expect(Command.hints("Run with $ARGUMENTS")).toEqual(["$ARGUMENTS"]) + }) + + test("extracts both numbered and $ARGUMENTS", () => { + expect(Command.hints("Do $1 with $ARGUMENTS")).toEqual(["$1", "$ARGUMENTS"]) + }) + + test("deduplicates repeated placeholders", () => { + expect(Command.hints("$1 and $1 again")).toEqual(["$1"]) + }) + + test("returns empty array for no placeholders", () => { + expect(Command.hints("plain text")).toEqual([]) + }) + + test("sorts numbered placeholders numerically", () => { + expect(Command.hints("$3 then $1 then $2")).toEqual(["$1", "$2", "$3"]) + }) +}) diff --git a/packages/opencode/test/command/hints-discover-mcps.test.ts b/packages/opencode/test/command/hints-discover-mcps.test.ts new file mode 100644 index 0000000000..745488b12e --- /dev/null +++ b/packages/opencode/test/command/hints-discover-mcps.test.ts @@ -0,0 +1,92 @@ +import { describe, test, expect } from "bun:test" +import { Command } from "../../src/command/index" +import { Instance } from "../../src/project/instance" +import { tmpdir } from "../fixture/fixture" + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +async function withInstance(fn: () => Promise) { + await using tmp = await tmpdir({ git: true }) + await Instance.provide({ directory: tmp.path, fn }) +} + +// --------------------------------------------------------------------------- +// Command.hints() — template hint extraction +// --------------------------------------------------------------------------- + +describe("Command.hints()", () => { + test("extracts and sorts placeholders", () => { + const result = Command.hints("Use $10 then $2 and $1") + expect(result).toEqual(["$1", "$2", "$10"]) + }) + + test("extracts $ARGUMENTS placeholder", () => { + const result = Command.hints("Run with $ARGUMENTS") + expect(result).toEqual(["$ARGUMENTS"]) + }) + + test("extracts both numbered and $ARGUMENTS", () => { + const result = Command.hints("Use $1 and $ARGUMENTS") + expect(result).toEqual(["$1", "$ARGUMENTS"]) + }) + + test("deduplicates repeated numbered placeholders", () => { + const result = Command.hints("$1 and $1 then $2") + expect(result).toEqual(["$1", "$2"]) + }) + + test("returns empty array for template with no placeholders", () => { + const result = Command.hints("No placeholders here") + expect(result).toEqual([]) + }) + + test("includes $0 as a valid placeholder", () => { + const result = Command.hints("$0 $1") + expect(result).toEqual(["$0", "$1"]) + }) +}) + +// --------------------------------------------------------------------------- +// discover-and-add-mcps builtin command (#409) +// --------------------------------------------------------------------------- + +describe("discover-and-add-mcps builtin command", () => { + test("is registered in Command.Default constants", () => { + expect(Command.Default.DISCOVER_MCPS).toBe("discover-and-add-mcps") + }) + + test("is present in command list", async () => { + await withInstance(async () => { + const commands = await Command.list() + const names = commands.map((c) => c.name) + expect(names).toContain("discover-and-add-mcps") + }) + }) + + test("has correct metadata", async () => { + await withInstance(async () => { + const cmd = await Command.get("discover-and-add-mcps") + expect(cmd).toBeDefined() + expect(cmd.name).toBe("discover-and-add-mcps") + expect(cmd.source).toBe("command") + expect(cmd.description).toContain("MCP") + }) + }) + + test("template references mcp_discover tool", async () => { + await withInstance(async () => { + const cmd = await Command.get("discover-and-add-mcps") + const template = await cmd.template + expect(template).toContain("mcp_discover") + }) + }) + + test("is not a subtask", async () => { + await withInstance(async () => { + const cmd = await Command.get("discover-and-add-mcps") + expect(cmd.subtask).toBeUndefined() + }) + }) +}) diff --git a/packages/opencode/test/fixture/fixture.ts b/packages/opencode/test/fixture/fixture.ts index c7bb9658c9..8fccf2085e 100644 --- a/packages/opencode/test/fixture/fixture.ts +++ b/packages/opencode/test/fixture/fixture.ts @@ -44,6 +44,7 @@ export async function tmpdir(options?: TmpDirOptions) { await $`git config core.fsmonitor false`.cwd(dirpath).quiet() await $`git config user.email "test@opencode.test"`.cwd(dirpath).quiet() await $`git config user.name "Test"`.cwd(dirpath).quiet() + await $`git config commit.gpgsign false`.cwd(dirpath).quiet() await $`git commit --allow-empty -m "root commit ${dirpath}"`.cwd(dirpath).quiet() } if (options?.config) { diff --git a/packages/opencode/test/util/git.test.ts b/packages/opencode/test/util/git.test.ts new file mode 100644 index 0000000000..02d1b7ef86 --- /dev/null +++ b/packages/opencode/test/util/git.test.ts @@ -0,0 +1,51 @@ +import { describe, test, expect } from "bun:test" +import { tmpdir } from "../fixture/fixture" +import { git } from "../../src/util/git" + +describe("git() utility", () => { + test("runs a simple git command and returns stdout", async () => { + await using tmp = await tmpdir({ git: true }) + + const result = await git(["rev-parse", "--is-inside-work-tree"], { cwd: tmp.path }) + expect(result.exitCode).toBe(0) + expect(result.text().trim()).toBe("true") + }) + + test("returns non-zero exit code for unknown git subcommand", async () => { + await using tmp = await tmpdir({ git: true }) + + const result = await git(["not-a-real-subcommand"], { cwd: tmp.path }) + expect(result.exitCode).not.toBe(0) + }) + + test("stderr is populated on error", async () => { + await using tmp = await tmpdir({ git: true }) + + const result = await git(["checkout", "nonexistent-branch-xyz"], { cwd: tmp.path }) + expect(result.exitCode).not.toBe(0) + expect(result.stderr.length).toBeGreaterThan(0) + }) + + test("passes custom env vars through to git process", async () => { + await using tmp = await tmpdir({ git: true }) + + // Use GIT_CONFIG_COUNT to inject a config value that only exists via env + const result = await git(["config", "--get", "test.injected"], { + cwd: tmp.path, + env: { + ...process.env, + GIT_CONFIG_COUNT: "1", + GIT_CONFIG_KEY_0: "test.injected", + GIT_CONFIG_VALUE_0: "from-env", + }, + }) + expect(result.exitCode).toBe(0) + expect(result.text().trim()).toBe("from-env") + }) + + test("returns exitCode 1 and empty stdout when cwd does not exist", async () => { + const result = await git(["status"], { cwd: "/tmp/nonexistent-dir-" + Math.random().toString(36) }) + expect(result.exitCode).not.toBe(0) + expect(result.stdout.length).toBe(0) + }) +}) diff --git a/packages/opencode/test/util/keybind.test.ts b/packages/opencode/test/util/keybind.test.ts new file mode 100644 index 0000000000..6fd9c386c9 --- /dev/null +++ b/packages/opencode/test/util/keybind.test.ts @@ -0,0 +1,232 @@ +import { describe, test, expect } from "bun:test" +import { Keybind } from "../../src/util/keybind" + +// --------------------------------------------------------------------------- +// Keybind.parse +// --------------------------------------------------------------------------- + +describe("Keybind.parse", () => { + test("parses simple key", () => { + const result = Keybind.parse("a") + expect(result).toHaveLength(1) + expect(result[0]).toMatchObject({ + ctrl: false, + meta: false, + shift: false, + leader: false, + name: "a", + }) + }) + + test("parses ctrl modifier", () => { + const [key] = Keybind.parse("ctrl+a") + expect(key.ctrl).toBe(true) + expect(key.name).toBe("a") + }) + + test("parses alt/meta/option as meta", () => { + expect(Keybind.parse("alt+x")[0].meta).toBe(true) + expect(Keybind.parse("meta+x")[0].meta).toBe(true) + expect(Keybind.parse("option+x")[0].meta).toBe(true) + }) + + test("parses multiple modifiers", () => { + const [key] = Keybind.parse("ctrl+shift+a") + expect(key.ctrl).toBe(true) + expect(key.shift).toBe(true) + expect(key.name).toBe("a") + }) + + test("parses super modifier", () => { + const [key] = Keybind.parse("super+a") + expect(key.super).toBe(true) + expect(key.name).toBe("a") + }) + + test("parses leader key", () => { + const [key] = Keybind.parse("a") + expect(key.leader).toBe(true) + expect(key.name).toBe("a") + }) + + test("parses comma-separated multiple bindings", () => { + const result = Keybind.parse("ctrl+a,ctrl+b") + expect(result).toHaveLength(2) + expect(result[0].name).toBe("a") + expect(result[1].name).toBe("b") + }) + + test("normalizes esc to escape", () => { + const [key] = Keybind.parse("esc") + expect(key.name).toBe("escape") + }) + + test("returns empty array for 'none'", () => { + expect(Keybind.parse("none")).toEqual([]) + }) +}) + +// --------------------------------------------------------------------------- +// Keybind.match +// --------------------------------------------------------------------------- + +describe("Keybind.match", () => { + test("matches identical keys", () => { + const key: Keybind.Info = { + ctrl: true, + meta: false, + shift: false, + leader: false, + name: "a", + super: false, + } + expect(Keybind.match(key, key)).toBe(true) + }) + + test("returns false for undefined first arg", () => { + const key: Keybind.Info = { + ctrl: false, + meta: false, + shift: false, + leader: false, + name: "a", + super: false, + } + expect(Keybind.match(undefined, key)).toBe(false) + }) + + test("normalizes missing super field to false", () => { + const a = { ctrl: false, meta: false, shift: false, leader: false, name: "x" } as Keybind.Info + const b: Keybind.Info = { + ctrl: false, + meta: false, + shift: false, + leader: false, + name: "x", + super: false, + } + expect(Keybind.match(a, b)).toBe(true) + }) + + test("super: true vs super: false don't match", () => { + const a: Keybind.Info = { + ctrl: false, + meta: false, + shift: false, + leader: false, + name: "a", + super: true, + } + const b: Keybind.Info = { + ctrl: false, + meta: false, + shift: false, + leader: false, + name: "a", + super: false, + } + expect(Keybind.match(a, b)).toBe(false) + }) + + test("different modifiers don't match", () => { + const a: Keybind.Info = { + ctrl: true, + meta: false, + shift: false, + leader: false, + name: "a", + super: false, + } + const b: Keybind.Info = { + ctrl: false, + meta: false, + shift: false, + leader: false, + name: "a", + super: false, + } + expect(Keybind.match(a, b)).toBe(false) + }) +}) + +// --------------------------------------------------------------------------- +// Keybind.toString +// --------------------------------------------------------------------------- + +describe("Keybind.toString", () => { + test("returns empty string for undefined", () => { + expect(Keybind.toString(undefined)).toBe("") + }) + + test("formats simple key", () => { + const key: Keybind.Info = { + ctrl: false, + meta: false, + shift: false, + leader: false, + name: "a", + super: false, + } + expect(Keybind.toString(key)).toBe("a") + }) + + test("formats modifiers in order: ctrl+alt+super+shift", () => { + const key: Keybind.Info = { + ctrl: true, + meta: true, + shift: true, + leader: false, + name: "a", + super: true, + } + expect(Keybind.toString(key)).toBe("ctrl+alt+super+shift+a") + }) + + test("formats leader prefix", () => { + const key: Keybind.Info = { + ctrl: false, + meta: false, + shift: false, + leader: true, + name: "a", + super: false, + } + expect(Keybind.toString(key)).toBe(" a") + }) + + test("maps delete to del", () => { + const key: Keybind.Info = { + ctrl: false, + meta: false, + shift: false, + leader: false, + name: "delete", + super: false, + } + expect(Keybind.toString(key)).toBe("del") + }) +}) + +// --------------------------------------------------------------------------- +// Keybind.fromParsedKey +// --------------------------------------------------------------------------- + +describe("Keybind.fromParsedKey", () => { + test("normalizes space to 'space'", () => { + const parsed = { name: " ", ctrl: false, meta: false, shift: false, super: false } + const result = Keybind.fromParsedKey(parsed as any) + expect(result.name).toBe("space") + }) + + test("sets leader flag when passed", () => { + const parsed = { name: "a", ctrl: false, meta: false, shift: false, super: false } + const result = Keybind.fromParsedKey(parsed as any, true) + expect(result.leader).toBe(true) + }) + + test("defaults leader to false", () => { + const parsed = { name: "a", ctrl: false, meta: false, shift: false, super: false } + const result = Keybind.fromParsedKey(parsed as any) + expect(result.leader).toBe(false) + }) +})