From e8fa61b680ff4f6ff5d21187def05df9f7264c3d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= Date: Wed, 8 Apr 2026 14:27:40 +0200 Subject: [PATCH] Add codeset mod files Co-Authored-By: Claude Sonnet 4.6 --- .claude/docs/files.json | 37425 ++++++++++++++++++++++++++++++++ .claude/docs/folders.json | 5603 +++++ .claude/docs/general_index.md | 515 + .claude/docs/get_context.py | 530 + .codex/docs/files.json | 37425 ++++++++++++++++++++++++++++++++ .codex/docs/folders.json | 5603 +++++ .codex/docs/general_index.md | 515 + .codex/docs/get_context.py | 530 + .codex/rules/default.rules | 5 + AGENTS.md | 255 +- CLAUDE.md | 433 + 11 files changed, 88805 insertions(+), 34 deletions(-) create mode 100644 .claude/docs/files.json create mode 100644 .claude/docs/folders.json create mode 100644 .claude/docs/general_index.md create mode 100644 .claude/docs/get_context.py create mode 100644 .codex/docs/files.json create mode 100644 .codex/docs/folders.json create mode 100644 .codex/docs/general_index.md create mode 100644 .codex/docs/get_context.py create mode 100644 .codex/rules/default.rules create mode 100644 CLAUDE.md diff --git a/.claude/docs/files.json b/.claude/docs/files.json new file mode 100644 index 0000000000..4ffe129451 --- /dev/null +++ b/.claude/docs/files.json @@ -0,0 +1,37425 @@ +{ + "files": { + "crates/forge_domain/src/policies/operation.rs": { + "short_description": "Defines permission operations (Read, Write, Execute, Fetch) for policy checks.", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "PermissionOperation", + "type": "enum" + }, + { + "name": "Write", + "type": "variant" + }, + { + "name": "Read", + "type": "variant" + }, + { + "name": "Execute", + "type": "variant" + }, + { + "name": "Fetch", + "type": "variant" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/policies/rule.rs": { + "short_description": "Policy rule types that match operations with glob patterns and optional dirs.", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "WriteRule", + "type": "struct" + }, + { + "name": "ReadRule", + "type": "struct" + }, + { + "name": "ExecuteRule", + "type": "struct" + }, + { + "name": "Fetch", + "type": "struct" + }, + { + "name": "Rule", + "type": "enum" + }, + { + "name": "match_pattern", + "type": "function" + }, + { + "name": "Display for WriteRule", + "type": "function" + }, + { + "name": "Display for ReadRule", + "type": "function" + }, + { + "name": "Display for ExecuteRule", + "type": "function" + }, + { + "name": "Display for Fetch", + "type": "function" + }, + { + "name": "Display for Rule", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/result.rs": { + "short_description": "ToolResult and ToolOutput types with formatting and error handling for tools.", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ToolResult", + "type": "struct" + }, + { + "name": "ToolOutput", + "type": "struct" + }, + { + "name": "ToolValue", + "type": "enum" + }, + { + "name": "ToolResult::new", + "type": "function" + }, + { + "name": "ToolResult::success", + "type": "function" + }, + { + "name": "ToolResult::failure", + "type": "function" + }, + { + "name": "ToolOutput::text", + "type": "function" + }, + { + "name": "ToolOutput::ai", + "type": "function" + }, + { + "name": "ToolOutput::image", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/transformer/normalize_tool_args.rs": { + "short_description": "Transformer that converts stringified tool call args to parsed JSON.", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "NormalizeToolCallArguments", + "type": "struct" + }, + { + "name": "Transformer", + "type": "trait" + }, + { + "name": "Context", + "type": "type" + }, + { + "name": "ContextMessage", + "type": "type" + }, + { + "name": "ToolCallFull", + "type": "type" + }, + { + "name": "ToolCallArguments", + "type": "type" + } + ], + "insights": [ + { + "type": "bug_fix", + "category": "State Management", + "title": "Normalize stringified tool call arguments when resuming persisted conversations", + "problem": "Resumed/persisted assistant messages could contain tool call arguments stored as Unparsed strings; provider DTO builders expected JSON objects and would emit invalid requests.", + "root_cause": "No transformation was applied during context load to convert Unparsed string arguments to structured JSON for resumed sessions.", + "solution": "Introduce NormalizeToolCallArguments transformer that walks Context.messages and runs arguments.normalize() on each ToolCallFull to convert Unparsed -> Parsed (or fallback object). The transformer is inserted into orchestrator transformer pipeline.", + "commits": [ + "3253412" + ], + "constructs": [ + "NormalizeToolCallArguments", + "NormalizeToolCallArguments::transform", + "NormalizeToolCallArguments::new" + ] + } + ], + "tests": { + "exercised_by": [ + "crates/forge_domain/tests/test_stringified_tool_calls.rs", + "crates/forge_domain/src/transformer/normalize_tool_args.rs (inline tests)" + ], + "test_functions": [ + "test_normalize_stringified_tool_call_arguments", + "test_parsed_arguments_unchanged", + "test_no_tool_calls_unchanged" + ], + "source_commits": [ + "3253412" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/transformer/sort_tools.rs": { + "short_description": "Transformer to sort tools in a context by a given order.", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "SortTools", + "type": "struct" + }, + { + "name": "Transformer", + "type": "trait" + }, + { + "name": "Context", + "type": "type" + }, + { + "name": "ToolOrder", + "type": "type" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/auth/util.rs": { + "short_description": "OAuth and HTTP helper utilities including token handling and HTTP client setup.", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "calculate_token_expiry", + "type": "function", + "callers": [ + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 676, + "context": "let expiry = calculate_token_expiry(None, expiry_duration);" + } + ] + }, + { + "name": "into_domain", + "type": "function", + "callers": [ + { + "file": "crates/forge_infra/src/auth/http/standard.rs", + "line": 81, + "context": "Ok(into_domain(token_result))" + } + ] + }, + { + "name": "build_http_client", + "type": "function", + "callers": [ + { + "file": "crates/forge_infra/src/auth/http/standard.rs", + "line": 86, + "context": "build_http_client(config.custom_headers.as_ref())" + }, + { + "file": "crates/forge_infra/src/auth/http/anthropic.rs", + "line": 6, + "context": "use crate::auth::util::build_http_client;" + }, + { + "file": "crates/forge_infra/src/auth/http/anthropic.rs", + "line": 109, + "context": "build_http_client(config.custom_headers.as_ref())" + }, + { + "file": "crates/forge_infra/src/auth/http/github.rs", + "line": 5, + "context": "use crate::auth::util::build_http_client;" + }, + { + "file": "crates/forge_infra/src/auth/http/github.rs", + "line": 33, + "context": "build_http_client(config.custom_headers.as_ref())" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 236, + "context": "let http_client = build_http_client(self.config.custom_headers.as_ref()).map_err(|e| {" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 339, + "context": "build_http_client(self.oauth_config.custom_headers.as_ref()).map_err(|e| {" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 556, + "context": "let http_client = build_http_client(self.config.custom_headers.as_ref()).map_err(|e| {" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 705, + "context": "let http_client = build_http_client(config.custom_headers.as_ref())" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 830, + "context": "let http_client = build_http_client(config.custom_headers.as_ref())" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 955, + "context": "let response = build_http_client(config.custom_headers.as_ref())" + } + ] + }, + { + "name": "build_oauth_credential", + "type": "function", + "callers": [ + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 174, + "context": "let mut credential = build_oauth_credential(" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 279, + "context": "build_oauth_credential(" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 611, + "context": "let mut credential = build_oauth_credential(" + } + ] + }, + { + "name": "build_token_response", + "type": "function", + "callers": [ + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 777, + "context": "return Ok(build_token_response(" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 787, + "context": "return Ok(build_token_response(" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 919, + "context": "return Ok(build_token_response(" + } + ] + }, + { + "name": "extract_oauth_tokens", + "type": "function", + "callers": [ + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 646, + "context": "let tokens = extract_oauth_tokens(credential)?;" + } + ] + }, + { + "name": "refresh_access_token", + "type": "function", + "callers": [ + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 653, + "context": "let token_response = refresh_access_token(config, refresh_token.as_str()).await?;" + } + ] + }, + { + "name": "github_compliant_http_request", + "type": "function", + "callers": [ + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 240, + "context": "let http_fn = |req| github_compliant_http_request(http_client.clone(), req);" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 343, + "context": "let http_fn = |req| github_compliant_http_request(http_client.clone(), req);" + } + ] + }, + { + "name": "inject_custom_headers", + "type": "function", + "callers": [ + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 743, + "context": "inject_custom_headers(&mut headers, &config.custom_headers);" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 953, + "context": "inject_custom_headers(&mut headers, &config.custom_headers);" + } + ] + }, + { + "name": "handle_oauth_error", + "type": "function", + "callers": [ + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 766, + "context": "if handle_oauth_error(error).is_ok() {" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 771, + "context": "return Err(handle_oauth_error(error).unwrap_err().into());" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 799, + "context": "if handle_oauth_error(error).is_ok() {" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 810, + "context": "return Err(handle_oauth_error(error).unwrap_err().into());" + } + ] + }, + { + "name": "parse_token_response", + "type": "function", + "callers": [ + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 775, + "context": "let (access_token, refresh_token, expires_in) = parse_token_response(&body_text)?;" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 786, + "context": "let (access_token, refresh_token, expires_in) = parse_token_response(&body_text)?;" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 915, + "context": "parse_token_response(&token_response.text().await.map_err(|e| {" + } + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/mcp/service.rs": { + "short_description": "Forge MCP service: manages MCP tool provisioning and remote server integration.", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ForgeMcpService", + "type": "struct" + }, + { + "name": "ToolHolder", + "type": "struct" + }, + { + "name": "McpService", + "type": "trait" + }, + { + "name": "McpConfigManager", + "type": "trait" + }, + { + "name": "McpServerInfra", + "type": "trait" + }, + { + "name": "McpClientInfra", + "type": "trait" + }, + { + "name": "McpServerConfig", + "type": "type" + }, + { + "name": "ToolCallFull", + "type": "type" + }, + { + "name": "ToolDefinition", + "type": "type" + }, + { + "name": "ToolName", + "type": "type" + }, + { + "name": "ToolOutput", + "type": "type" + }, + { + "name": "McpExecutor", + "type": "type" + }, + { + "name": "McpServers", + "type": "type" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/tool_services/fetch.rs": { + "short_description": "Implementation of ForgeFetch service: fetch URL content with HTML->Markdown handling.", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ForgeFetch", + "type": "struct" + }, + { + "name": "NetFetchService", + "type": "trait" + }, + { + "name": "HttpResponse", + "type": "type" + }, + { + "name": "ResponseContext", + "type": "type" + }, + { + "name": "ForgeFetch::new", + "type": "function" + }, + { + "name": "ForgeFetch::fetch", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/tool_services/plan_create.rs": { + "short_description": "Tool to create a dated plan Markdown file with versioning.", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ForgePlanCreate", + "type": "struct" + }, + { + "name": "PlanCreateService", + "type": "trait" + }, + { + "name": "PlanCreateOutput", + "type": "type" + }, + { + "name": "ForgePlanCreate::new", + "type": "function" + }, + { + "name": "ForgePlanCreate::create_plan", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/tool_services/skill.rs": { + "short_description": "Loads and caches domain skills from a repository for tool selection.", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ForgeSkillFetch", + "type": "struct" + }, + { + "name": "SkillFetchService", + "type": "trait" + }, + { + "name": "SkillRepository", + "type": "trait" + }, + { + "name": "ForgeSkillFetch::new", + "type": "function" + }, + { + "name": "ForgeSkillFetch::fetch_skill", + "type": "function" + }, + { + "name": "ForgeSkillFetch::list_skills", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/compact/summary.rs": { + "short_description": "Context summary structures to compactly represent messages and tool calls.", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ContextSummary", + "type": "struct" + }, + { + "name": "SummaryBlock", + "type": "struct" + }, + { + "name": "SummaryMessage", + "type": "enum" + }, + { + "name": "SummaryToolCall", + "type": "struct" + }, + { + "name": "SummaryTool", + "type": "enum" + }, + { + "name": "TodoChange", + "type": "struct" + }, + { + "name": "TodoChangeKind", + "type": "enum" + } + ], + "insights": [ + { + "type": "bug_fix", + "category": "Parsing", + "title": "Apply content-keyed todo updates when summarizing tool calls", + "problem": "Summarizer previously matched todo updates by ID and replaced the whole list, which would break when the model sent content-keyed incremental updates.", + "root_cause": "Summary extraction used id-based maps (todo.id) and removed items by comparing after-ids, incompatible with content-keyed incoming items.", + "solution": "Updated summarizer to iterate input.todos items, treat Cancelled by content (retain removal), update existing items by content, or append new items with empty id (server-generated). Also updated extraction to compute TodoChange entries by matching content.", + "lesson_learned": "Summary codepaths must reflect the canonical tool contract; when the tool switches to content-keyed diffs, all places consuming tool input must be updated accordingly.", + "commits": [ + "e84bc7f" + ], + "constructs": [ + "From<&Context> for ContextSummary", + "extract_tool_info" + ] + } + ], + "related_files": [ + { + "path": "crates/forge_domain/src/tools/catalog.rs", + "relationship": "frequently changed together (from commit history)", + "likely_co_change": true, + "reason_to_check": "historically modified together with this file" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/provider/openai_responses/codex_transformer.rs": { + "short_description": "Transformer adjusting Codex/Responses API requests to Codex backend quirks.", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "CodexTransformer", + "type": "struct" + }, + { + "name": "Transformer", + "type": "trait" + }, + { + "name": "CreateResponse", + "type": "type" + } + ], + "insights": [ + { + "type": "bug_fix", + "category": "Behavior", + "title": "Stop forcing low text verbosity for codex transformer", + "problem": "Transformer forcibly set response text verbosity to Low, overwriting user-specified verbosity and preventing richer outputs when needed.", + "root_cause": "Code always inserted/overrode a TextResponseParam.verbosity = Low regardless of existing request settings.", + "solution": "Removed code that forced text.verbosity = Some(Verbosity::Low); leave the request's text verbosity untouched while still configuring other fields (reasoning.effort = High). Adjusted/removed tests accordingly.", + "lesson_learned": "Don't hard-enforce provider-level parameters that override user/agent intent unless absolutely necessary; if you must, make it configurable and document the reason.", + "commits": [ + "340a752" + ], + "constructs": [ + "CodexTransformer::transform" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/anthropic/transforms/auth_system_message.rs": { + "short_description": "Anthropic DTO transform: prepends OAuth auth system message when enabled.", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "AuthSystemMessage", + "type": "struct" + }, + { + "name": "Request", + "type": "type" + }, + { + "name": "SystemMessage", + "type": "type" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/anthropic/transforms/mod.rs": { + "short_description": "Module exports for Anthropic transform pipeline components.", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "AuthSystemMessage", + "type": "type" + }, + { + "name": "CapitalizeToolNames", + "type": "type" + }, + { + "name": "DropInvalidToolUse", + "type": "type" + }, + { + "name": "EnforceStrictObjectSchema", + "type": "type" + }, + { + "name": "ReasoningTransform", + "type": "type" + }, + { + "name": "RemoveOutputFormat", + "type": "type" + }, + { + "name": "SanitizeToolIds", + "type": "type" + }, + { + "name": "SetCache", + "type": "type" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/anthropic/transforms/set_cache.rs": { + "short_description": "Anthropic SetCache transformer: caches first/last messages and flips second-to-last.", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "SetCache", + "type": "struct" + }, + { + "name": "Transformer", + "type": "trait" + }, + { + "name": "Request", + "type": "type" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/openai/transformers/set_cache.rs": { + "short_description": "OpenAI SetCache transformer: applies two-breakpoint caching on messages.", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "SetCache", + "type": "struct" + }, + { + "name": "Transformer", + "type": "trait" + }, + { + "name": "Request", + "type": "type" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".forge/skills/github-pr-comments/scripts/pr-comments.sh": { + "short_description": "GitHub PR comments extraction script for active review threads.", + "category": "SCRIPTING", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".forge/skills/resolve-conflicts/scripts/handle-deleted-modified.sh": { + "short_description": "Git conflict helper: backups and analysis for deleted-modified file conflicts.", + "category": "SCRIPTING", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".forge/skills/resolve-conflicts/scripts/validate-conflicts.sh": { + "short_description": "Validation script to ensure no unresolved Git conflicts remain.", + "category": "SCRIPTING", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".forge/skills/resolve-fixme/scripts/find-fixme.sh": { + "short_description": "Script to locate FIXME comments with surrounding context.", + "category": "SCRIPTING", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".forge/skills/test-reasoning/scripts/test-reasoning.sh": { + "short_description": "Bash test script validating reasoning parameter serialization across providers", + "category": "TEST", + "key_constructs": [ + { + "name": "json_get", + "type": "function" + }, + { + "name": "assert_field", + "type": "function" + }, + { + "name": "run_test", + "type": "function" + }, + { + "name": "run_test_expect_failure", + "type": "function" + }, + { + "name": "next_result_file", + "type": "function" + }, + { + "name": "cleanup", + "type": "function" + }, + { + "name": "BINARY", + "type": "variable" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".forge/skills/write-release-notes/scripts/fetch-release-data.sh": { + "short_description": "Fetches release metadata and PR details from GitHub for a version", + "category": "SCRIPT", + "key_constructs": [ + { + "name": "VERSION", + "type": "variable" + }, + { + "name": "REPO", + "type": "variable" + }, + { + "name": "gh api", + "type": "command" + }, + { + "name": "jq", + "type": "command" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".forge/skills/write-release-notes/scripts/validate-release-notes.sh": { + "short_description": "Validates release notes length against a 2000 char limit", + "category": "SCRIPT", + "key_constructs": [ + { + "name": "MAX_CHARS", + "type": "variable" + }, + { + "name": "content", + "type": "variable" + }, + { + "name": "char_count", + "type": "variable" + }, + { + "name": "exit", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".github/scripts/bounty/src/api.ts": { + "short_description": "GitHub REST API wrapper for bounty syncing (TypeScript)", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "GitHubApi", + "type": "interface" + }, + { + "name": "GitHubRestApi", + "type": "class" + }, + { + "name": "request", + "type": "function" + }, + { + "name": "getIssue", + "type": "function" + }, + { + "name": "getPullRequest", + "type": "function" + }, + { + "name": "listIssuesWithLabelPrefix", + "type": "function" + }, + { + "name": "addLabels", + "type": "function" + }, + { + "name": "removeLabel", + "type": "function" + }, + { + "name": "addComment", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".github/scripts/bounty/src/rules.ts": { + "short_description": "Rules engine computing label patches for bounty syncing", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "BOUNTY_GENERIC", + "type": "constant" + }, + { + "name": "BOUNTY_CLAIMED", + "type": "constant" + }, + { + "name": "BOUNTY_REWARDED", + "type": "constant" + }, + { + "name": "VALUE_LABEL_RE", + "type": "constant" + }, + { + "name": "isBountyValue", + "type": "function" + }, + { + "name": "linkedIssueNumbers", + "type": "function" + }, + { + "name": "computeIssuePatch", + "type": "function" + }, + { + "name": "computePrPatch", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".github/scripts/bounty/src/sync-all-issues.ts": { + "short_description": "CLI tool to plan/apply bounty patches for all issues", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "planAllIssues", + "type": "function" + }, + { + "name": "syncAllIssues", + "type": "function" + }, + { + "name": "GitHubRestApi", + "type": "class" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".github/scripts/bounty/src/sync-issue.ts": { + "short_description": "CLI tool to plan/apply bounty patch for a single issue", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "resolveToken", + "type": "function" + }, + { + "name": "PlanIssueInput", + "type": "interface" + }, + { + "name": "planIssue", + "type": "function" + }, + { + "name": "syncIssue", + "type": "function" + }, + { + "name": "applyPatch", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".github/scripts/bounty/src/types.ts": { + "short_description": "Core domain types for v2 bounty sync (GitHub objects and patches)", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "Label", + "type": "interface" + }, + { + "name": "User", + "type": "interface" + }, + { + "name": "Issue", + "type": "interface" + }, + { + "name": "PullRequest", + "type": "interface" + }, + { + "name": "IssueState", + "type": "interface" + }, + { + "name": "PrState", + "type": "interface" + }, + { + "name": "LabelOp", + "type": "interface" + }, + { + "name": "Patch", + "type": "interface" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/tools_overview.rs": { + "short_description": "DTO for tools overview categorized by source (system/agents/MCP)", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ToolsOverview", + "type": "struct" + }, + { + "name": "From", + "type": "function" + }, + { + "name": "impl ToolsOverview", + "type": "impl" + }, + { + "name": "Default", + "type": "trait" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/fmt/content.rs": { + "short_description": "Formatting trait for chat content to render in UI", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "FormatContent", + "type": "trait" + }, + { + "name": "ChatResponseContent", + "type": "type" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/transformers/compaction.rs": { + "short_description": "Composes a pipeline of context compaction transforms", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "SummaryTransformer", + "type": "struct" + }, + { + "name": "new", + "type": "function" + }, + { + "name": "transform", + "type": "function", + "callers": [ + { + "file": "crates/forge_app/src/compact.rs", + "line": 35, + "context": "SummaryTransformer::new(&self.environment.cwd).transform(context_summary)" + } + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/transformers/dedupe_role.rs": { + "short_description": "Transformer deduplicates consecutive messages of a role", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "DedupeRole", + "type": "struct" + }, + { + "name": "new", + "type": "function", + "callers": [ + { + "file": "crates/forge_app/src/transformers/compaction.rs", + "line": 43, + "context": ".pipe(DedupeRole::new(Role::User))" + } + ] + }, + { + "name": "transform", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/transformers/drop_role.rs": { + "short_description": "Transformer drops all messages of a specific role", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "DropRole", + "type": "struct" + }, + { + "name": "new", + "type": "function", + "callers": [ + { + "file": "crates/forge_app/src/transformers/compaction.rs", + "line": 42, + "context": "DropRole::new(Role::System)" + } + ] + }, + { + "name": "transform", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/transformers/mod.rs": { + "short_description": "Module re-exports for transformer components", + "category": "SOURCE_CODE", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/truncation/mod.rs": { + "short_description": "Truncation submodules for content and search", + "category": "SOURCE_CODE", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/truncation/truncate_fetch.rs": { + "short_description": "Truncates fetch content to a character limit", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "TruncatedFetchOutput", + "type": "struct" + }, + { + "name": "truncate_fetch_content", + "type": "function", + "callers": [ + { + "file": "crates/forge_app/src/operation.rs", + "line": 16, + "context": "Stderr, Stdout, TruncationMode, truncate_fetch_content, truncate_search_output," + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 567, + "context": "truncate_fetch_content(&output.content, config.max_fetch_chars);" + } + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/truncation/truncate_search.rs": { + "short_description": "Truncation logic for search results by lines/bytes", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "TruncationMode", + "type": "enum" + }, + { + "name": "TruncatedSearchOutput", + "type": "struct" + }, + { + "name": "truncate_search_output", + "type": "function", + "callers": [ + { + "file": "crates/forge_app/src/operation.rs", + "line": 16, + "context": "Stderr, Stdout, TruncationMode, truncate_fetch_content, truncate_search_output," + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 352, + "context": "let truncated_output = truncate_search_output(" + } + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/truncation/truncate_shell.rs": { + "short_description": "Clips and truncates shell stdout/stderr with metadata", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "truncate_shell_output", + "type": "function", + "callers": [ + { + "file": "crates/forge_app/src/operation.rs", + "line": 17, + "context": "truncate_shell_output," + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 600, + "context": "let truncated_output = truncate_shell_output(" + } + ] + }, + { + "name": "Stdout", + "type": "struct" + }, + { + "name": "Stderr", + "type": "struct" + }, + { + "name": "TruncatedShellOutput", + "type": "struct" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/src/jobs/build.rs": { + "short_description": "CI helper for creating a release build job", + "category": "BUILD", + "key_constructs": [ + { + "name": "create_build_release_job_for_publishing", + "type": "function" + }, + { + "name": "release_build_job", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/src/jobs/label_sync_job.rs": { + "short_description": "CI job to synchronize GitHub labels using a workflow step", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "label_sync_job", + "type": "function", + "callers": [ + { + "file": "crates/forge_ci/src/workflows/labels.rs", + "line": 4, + "context": "use crate::jobs::label_sync_job;" + }, + { + "file": "crates/forge_ci/src/workflows/labels.rs", + "line": 15, + "context": ".add_job(\"label-sync\", label_sync_job());" + } + ] + }, + { + "name": "Job", + "type": "type" + }, + { + "name": "Step", + "type": "type" + }, + { + "name": "Permissions", + "type": "type" + }, + { + "name": "Expression", + "type": "type" + }, + { + "name": "Event", + "type": "type" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/src/jobs/lint.rs": { + "short_description": "CI lint helpers to build fmt and clippy commands", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "cargo_cmd", + "type": "function" + }, + { + "name": "fmt_base", + "type": "function" + }, + { + "name": "clippy_base", + "type": "function" + }, + { + "name": "fmt_cmd", + "type": "function", + "callers": [ + { + "file": "crates/forge_ci/src/workflows/autofix.rs", + "line": 21, + "context": ".add_step(Step::new(\"Cargo Fmt\").run(jobs::fmt_cmd(true)))" + } + ] + }, + { + "name": "clippy_cmd", + "type": "function", + "callers": [ + { + "file": "crates/forge_ci/src/workflows/autofix.rs", + "line": 22, + "context": ".add_step(Step::new(\"Cargo Clippy\").run(jobs::clippy_cmd(true)))" + } + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/src/jobs/release_draft_pr.rs": { + "short_description": "CI job that creates a draft release for PRs", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "create_draft_release_pr_job", + "type": "function", + "callers": [ + { + "file": "crates/forge_ci/src/workflows/ci.rs", + "line": 34, + "context": "let draft_release_pr_job = jobs::create_draft_release_pr_job();" + } + ] + }, + { + "name": "Job", + "type": "type" + }, + { + "name": "Step", + "type": "type" + }, + { + "name": "Expression", + "type": "type" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/src/jobs/release_homebrew.rs": { + "short_description": "CI job to update Homebrew formula on release", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "release_homebrew_job", + "type": "function", + "callers": [ + { + "file": "crates/forge_ci/src/workflows/release_publish.rs", + "line": 4, + "context": "use crate::jobs::{ReleaseBuilderJob, release_homebrew_job, release_npm_job};" + }, + { + "file": "crates/forge_ci/src/workflows/release_publish.rs", + "line": 11, + "context": "let homebrew_release_job = release_homebrew_job().add_needs(\"build_release\");" + } + ] + }, + { + "name": "Step", + "type": "type" + }, + { + "name": "Job", + "type": "type" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/src/jobs/release_npm.rs": { + "short_description": "CI release job using matrix for multiple NPM repos", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "release_npm_job", + "type": "function", + "callers": [ + { + "file": "crates/forge_ci/src/workflows/release_publish.rs", + "line": 4, + "context": "use crate::jobs::{ReleaseBuilderJob, release_homebrew_job, release_npm_job};" + }, + { + "file": "crates/forge_ci/src/workflows/release_publish.rs", + "line": 10, + "context": "let npm_release_job = release_npm_job().add_needs(\"build_release\");" + } + ] + }, + { + "name": "create_npm_matrix", + "type": "function" + }, + { + "name": "Value", + "type": "type" + }, + { + "name": "serde_json", + "type": "module" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/src/workflows/labels.rs": { + "short_description": "Workflow generator for GitHub Labels Sync", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "generate_labels_workflow", + "type": "function", + "callers": [ + { + "file": "crates/forge_ci/tests/ci.rs", + "line": 20, + "context": "workflow::generate_labels_workflow();" + } + ] + }, + { + "name": "Workflow", + "type": "type" + }, + { + "name": "Event", + "type": "type" + }, + { + "name": "Push", + "type": "type" + }, + { + "name": "Step", + "type": "type" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/src/workflows/release_drafter.rs": { + "short_description": "Workflow generator for Release Drafter", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "generate_release_drafter_workflow", + "type": "function", + "callers": [ + { + "file": "crates/forge_ci/tests/ci.rs", + "line": 10, + "context": "workflow::generate_release_drafter_workflow();" + } + ] + }, + { + "name": "Workflow", + "type": "type" + }, + { + "name": "PullRequestTarget", + "type": "type" + }, + { + "name": "Event", + "type": "type" + }, + { + "name": "Schedule", + "type": "type" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/src/workflows/release_publish.rs": { + "short_description": "Workflow generator for publishing releases (npm/homebrew)", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "release_publish", + "type": "function", + "callers": [ + { + "file": "crates/forge_ci/tests/ci.rs", + "line": 15, + "context": "workflow::release_publish();" + } + ] + }, + { + "name": "ReleaseBuilderJob", + "type": "type" + }, + { + "name": "release_homebrew_job", + "type": "function" + }, + { + "name": "release_npm_job", + "type": "function" + }, + { + "name": "Workflow", + "type": "type" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/src/workflows/stale.rs": { + "short_description": "Workflow generator for closing stale issues/PRs", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "generate_stale_workflow", + "type": "function", + "callers": [ + { + "file": "crates/forge_ci/tests/ci.rs", + "line": 25, + "context": "workflow::generate_stale_workflow();" + } + ] + }, + { + "name": "Workflow", + "type": "type" + }, + { + "name": "Env", + "type": "type" + }, + { + "name": "Schedule", + "type": "type" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/auth/auth_params.rs": { + "short_description": "Auth code parameters for OAuth flows", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "AuthCodeParams", + "type": "type" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/auth/mod.rs": { + "short_description": "Auth module re-exports for auth submodules", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "auth_context", + "type": "module" + }, + { + "name": "auth_method", + "type": "module" + }, + { + "name": "auth_params", + "type": "module" + }, + { + "name": "auth_token_response", + "type": "module" + }, + { + "name": "credentials", + "type": "module" + }, + { + "name": "new_types", + "type": "module" + }, + { + "name": "oauth_config", + "type": "module" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/auth/oauth_config.rs": { + "short_description": "OAuth configuration data structure", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ClientId", + "type": "type" + }, + { + "name": "OAuthConfig", + "type": "type" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/compact/mod.rs": { + "short_description": "Compact module public API re-exports", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "compact_config", + "type": "module" + }, + { + "name": "result", + "type": "module" + }, + { + "name": "strategy", + "type": "module" + }, + { + "name": "summary", + "type": "module" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/compact/result.rs": { + "short_description": "Represents compaction metrics for context data", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "CompactionResult", + "type": "type" + }, + { + "name": "CompactionResult::new", + "type": "function" + }, + { + "name": "CompactionResult::token_reduction_percentage", + "type": "function" + }, + { + "name": "CompactionResult::message_reduction_percentage", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/policies/config.rs": { + "short_description": "Policy configuration and evaluation utilities", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "PolicyConfig", + "type": "type" + }, + { + "name": "PolicyConfig::new", + "type": "function" + }, + { + "name": "PolicyConfig::add_policy", + "type": "function" + }, + { + "name": "PolicyConfig::eval", + "type": "function" + }, + { + "name": "PolicyConfig::find_rules", + "type": "function" + }, + { + "name": "Display for PolicyConfig", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/policies/mod.rs": { + "short_description": "Exports for policies submodules", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "config", + "type": "module" + }, + { + "name": "engine", + "type": "module" + }, + { + "name": "operation", + "type": "module" + }, + { + "name": "policy", + "type": "module" + }, + { + "name": "rule", + "type": "module" + }, + { + "name": "types", + "type": "module" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/policies/policy.rs": { + "short_description": "Policy enum for access control rules with logic", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "Policy", + "type": "type" + }, + { + "name": "Policy::eval", + "type": "function" + }, + { + "name": "Policy::find_rules", + "type": "function" + }, + { + "name": "Policy::permission", + "type": "function" + }, + { + "name": "Display for Policy", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/policies/types.rs": { + "short_description": "Permission enum for access control decisions", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "Permission", + "type": "type" + }, + { + "name": "Display for Permission", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/mod.rs": { + "short_description": "Tools module exports for call/definition", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "call", + "type": "module" + }, + { + "name": "definition", + "type": "module" + }, + { + "name": "catalog", + "type": "module" + }, + { + "name": "result", + "type": "module" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/auth/error.rs": { + "short_description": "Error enum for authentication flows", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "Error", + "type": "type" + }, + { + "name": "Error::InitiationFailed", + "type": "variant" + }, + { + "name": "Error::Timeout", + "type": "variant" + }, + { + "name": "Error::Expired", + "type": "variant" + }, + { + "name": "Error::Denied", + "type": "variant" + }, + { + "name": "Error::PollFailed", + "type": "variant" + }, + { + "name": "Error::CompletionFailed", + "type": "variant" + }, + { + "name": "Error::RefreshFailed", + "type": "variant" + }, + { + "name": "Error::InvalidContext", + "type": "variant" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/completer/mod.rs": { + "short_description": "Module re-exports for command and input completers", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "CommandCompleter", + "type": "type" + }, + { + "name": "InputCompleter", + "type": "type" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/completer/search_term.rs": { + "short_description": "Search term parsing logic for waterfall '@' markers", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "SearchTerm", + "type": "type" + }, + { + "name": "TermResult", + "type": "type" + }, + { + "name": "SearchTerm::new", + "type": "function" + }, + { + "name": "SearchTerm::process", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/mcp/manager.rs": { + "short_description": "Manager for reading/writing MCP config with infra", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ForgeMcpManager", + "type": "type" + }, + { + "name": "ForgeMcpManager::new", + "type": "function" + }, + { + "name": "ForgeMcpManager::read_config", + "type": "function" + }, + { + "name": "ForgeMcpManager::config_path", + "type": "function" + }, + { + "name": "McpConfigManager", + "type": "trait" + }, + { + "name": "McpConfigManager for ForgeMcpManager", + "type": "trait_impl" + }, + { + "name": "McpConfig", + "type": "type" + }, + { + "name": "Scope", + "type": "type" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/mcp/tool.rs": { + "short_description": "Executor for MCP tools invoking via client infra", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "McpExecutor", + "type": "type" + }, + { + "name": "McpExecutor::new", + "type": "function" + }, + { + "name": "McpExecutor::call_tool", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/tool_services/followup.rs": { + "short_description": "Follow-up tool to clarify ambiguities with user infra", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ForgeFollowup", + "type": "type" + }, + { + "name": "ForgeFollowup::new", + "type": "function" + }, + { + "name": "FollowUpService for ForgeFollowup", + "type": "trait_impl" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/tool_services/fs_remove.rs": { + "short_description": "File removal service with snapshot coordination", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ForgeFsRemove", + "type": "type" + }, + { + "name": "ForgeFsRemove::new", + "type": "function" + }, + { + "name": "FsRemoveService for ForgeFsRemove", + "type": "trait_impl" + }, + { + "name": "ForgeFsRemove::remove", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/tool_services/fs_undo.rs": { + "short_description": "Undo service for file operations with snapshot restore", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ForgeFsUndo", + "type": "type" + }, + { + "name": "ForgeFsUndo::new", + "type": "function" + }, + { + "name": "FsUndoService for ForgeFsUndo", + "type": "trait_impl" + }, + { + "name": "ForgeFsUndo::undo", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/utils/path.rs": { + "short_description": "Utility to assert absolute paths with tests", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "assert_absolute_path", + "type": "function", + "callers": [ + { + "file": "crates/forge_services/src/tool_services/fs_patch.rs", + "line": 11, + "context": "use crate::utils::assert_absolute_path;" + }, + { + "file": "crates/forge_services/src/tool_services/fs_patch.rs", + "line": 343, + "context": "assert_absolute_path(path)?;" + }, + { + "file": "crates/forge_services/src/tool_services/fs_patch.rs", + "line": 421, + "context": "assert_absolute_path(path)?;" + }, + { + "file": "crates/forge_services/src/tool_services/fs_read.rs", + "line": 12, + "context": "use crate::utils::assert_absolute_path;" + }, + { + "file": "crates/forge_services/src/tool_services/fs_read.rs", + "line": 117, + "context": "assert_absolute_path(path)?;" + }, + { + "file": "crates/forge_services/src/tool_services/image_read.rs", + "line": 9, + "context": "use crate::utils::assert_absolute_path;" + }, + { + "file": "crates/forge_services/src/tool_services/image_read.rs", + "line": 58, + "context": "assert_absolute_path(path)?;" + }, + { + "file": "crates/forge_services/src/tool_services/fs_write.rs", + "line": 12, + "context": "use crate::utils::assert_absolute_path;" + }, + { + "file": "crates/forge_services/src/tool_services/fs_write.rs", + "line": 53, + "context": "assert_absolute_path(path)?;" + }, + { + "file": "crates/forge_services/src/tool_services/fs_undo.rs", + "line": 7, + "context": "use crate::utils::assert_absolute_path;" + }, + { + "file": "crates/forge_services/src/tool_services/fs_undo.rs", + "line": 28, + "context": "assert_absolute_path(path)?;" + }, + { + "file": "crates/forge_services/src/tool_services/fs_remove.rs", + "line": 7, + "context": "use crate::utils::assert_absolute_path;" + }, + { + "file": "crates/forge_services/src/tool_services/fs_remove.rs", + "line": 30, + "context": "assert_absolute_path(path)?;" + } + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/utils/temp_dir.rs": { + "short_description": "Temporary directory helper with markers", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "TempDir", + "type": "type" + }, + { + "name": "TempDir::new", + "type": "function" + }, + { + "name": "TempDir::path", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_tracker/src/client_id/android.rs": { + "short_description": "Android-specific persistent client ID manager", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "get_or_create_client_id", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_tracker/src/client_id/generic.rs": { + "short_description": "Non-Android client ID via hardware identifiers", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "get_or_create_client_id", + "type": "function", + "callers": [ + { + "file": "crates/forge_tracker/src/dispatch.rs", + "line": 34, + "context": "client_id::get_or_create_client_id()" + }, + { + "file": "crates/forge_tracker/src/client_id/mod.rs", + "line": 9, + "context": "pub use generic::get_or_create_client_id;" + } + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_tracker/src/client_id/mod.rs": { + "short_description": "Platform-specific client_id module selector", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "get_or_create_client_id", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_tracker/src/collect/mod.rs": { + "short_description": "Event collection trait and posthog integration", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "Collect", + "type": "trait" + }, + { + "name": "posthog", + "type": "module" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_tracker/src/collect/posthog.rs": { + "short_description": "PostHog tracker implementation of Collect", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "Tracker", + "type": "type" + }, + { + "name": "Tracker::new", + "type": "function" + }, + { + "name": "Payload", + "type": "type" + }, + { + "name": "Collect for Tracker", + "type": "trait_impl" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/google/response.rs": { + "short_description": "Google model/response DTO to domain conversion", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "Model", + "type": "type" + }, + { + "name": "From for forge_domain::Model", + "type": "function" + }, + { + "name": "EventData", + "type": "type" + }, + { + "name": "ChatCompletionMessage", + "type": "type" + } + ], + "insights": [ + { + "type": "feature", + "category": "Parsing", + "title": "Support Ping/Cost events with numeric-or-string cost fields", + "problem": "Some proxies send heartbeat 'ping' events carrying cost (sometimes as string). Previously these were unrecognized.", + "root_cause": "EventData enum didn't include a Ping variant with tolerant number/string cost.", + "solution": "Added StringOrF64 untagged enum and PingEvent struct; updated TryFrom to map Ping to ChatCompletionMessage with Usage containing cost. Added tests for numeric and string costs and unknown events.", + "lesson_learned": "Add tolerant parsing for proxy-specific heartbeat events to capture usage/cost information that may arrive outside normal message chunks.", + "commits": [ + "40cfcc8" + ], + "constructs": [ + "StringOrF64", + "PingEvent", + "TryFrom for ChatCompletionMessage (ping handling)" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/openai/model.rs": { + "short_description": "OpenAI model DTOs with pricing and conversion", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "Model", + "type": "type" + }, + { + "name": "Architecture", + "type": "type" + }, + { + "name": "Pricing", + "type": "type" + }, + { + "name": "ListModelResponse", + "type": "type" + }, + { + "name": "From for forge_domain::Model", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/call/args.rs": { + "short_description": "Serialization helpers for tool call arguments", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ToolCallArguments", + "type": "type" + }, + { + "name": "ToolCallArguments::from_json", + "type": "function" + }, + { + "name": "ToolCallArguments::from_parameters", + "type": "function" + }, + { + "name": "ToolCallArguments::parse", + "type": "function" + }, + { + "name": "ToolCallArguments::normalize", + "type": "function" + }, + { + "name": "ToolCallArguments::into_string", + "type": "function" + } + ], + "insights": [ + { + "type": "bug_fix", + "category": "Parsing", + "title": "Handle stringified/double-encoded tool call arguments on deserialize and normalize", + "problem": "Tool call arguments persisted by some providers (e.g., kimi-k2p5-turbo) arrive as a JSON string (double-encoded) or as malformed JSON strings. Previously these were stored as raw Unparsed strings and sent back to APIs as strings, causing 400 errors.", + "root_cause": "Deserialize implementation didn't attempt to repair or unwrap Value::String containing JSON; persisted conversations could therefore carry Unparsed string payloads.", + "solution": "Deserialize now checks for Value::String, attempts json_repair to parse/repair it and returns ToolCallArguments::Parsed when possible; added normalize() that promotes Unparsed -> Parsed or builds a fallback object {\"_raw_content\": \"...\"} so downstream code always receives structured JSON.", + "commits": [ + "3253412" + ], + "constructs": [ + "Deserialize for ToolCallArguments", + "ToolCallArguments::normalize", + "ToolCallArguments::parse", + "ToolCallArguments::from_json" + ] + }, + { + "type": "refactoring", + "category": "Error Handling", + "title": "Preserve raw content with fallback when repair fails", + "problem": "If repair of malformed JSON fails we previously risked losing information or returning a plain Unparsed string to parts of the pipeline expecting a JSON object.", + "root_cause": "Downstream request builders required structured JSON; an Unparsed string could still be serialized as a JSON string and break provider APIs.", + "solution": "On failed repair, normalize() creates a Parsed Value::Object with key _raw_content holding the original string so callers always get an object.", + "commits": [ + "3253412" + ], + "constructs": [ + "ToolCallArguments::normalize" + ] + } + ], + "tests": { + "exercised_by": [ + "crates/forge_domain/src/tools/call/args.rs (inline unit tests)", + "crates/forge_domain/tests/test_stringified_tool_calls.rs" + ], + "test_functions": [ + "test_normalize_unparsed_json_string", + "test_roundtrip_stringified_json", + "test_deserialize_stringified_json_object", + "test_normalize_malformed_json_from_dump", + "test_normalize_real_kimi_string" + ], + "source_commits": [ + "3253412" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/call/parser.rs": { + "short_description": "Parser for XML-like tool call definitions", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ToolCallParsed", + "type": "type" + }, + { + "name": "parse", + "type": "function" + }, + { + "name": "ToolCallFull", + "type": "type" + }, + { + "name": "From for ToolCallFull", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/provider/openai_responses/mod.rs": { + "short_description": "OpenAI Responses provider module wiring", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "codex_transformer", + "type": "module" + }, + { + "name": "repository", + "type": "module" + }, + { + "name": "request", + "type": "module" + }, + { + "name": "response", + "type": "module" + }, + { + "name": "OpenAIResponsesResponseRepository", + "type": "type" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/provider/openai_responses/repository.rs": { + "short_description": "OpenAI Responses repository for Codex/OpenAI providers", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "OpenAIResponsesProvider", + "type": "type" + }, + { + "name": "OpenAIResponsesProvider::new", + "type": "function" + }, + { + "name": "chat", + "type": "function" + }, + { + "name": "models", + "type": "function", + "callers": [ + { + "file": "crates/forge_repo/src/provider/chat.rs", + "line": 183, + "context": "Some(ProviderResponse::OpenAIResponses) => self.codex_repo.models(provider).await," + } + ] + }, + { + "name": "OpenAIResponsesResponseRepository", + "type": "type" + } + ], + "insights": [ + { + "type": "bug_fix", + "category": "API", + "title": "Preserve full responses endpoint for 'compatible' providers and support conversation headers for Codex", + "problem": "Providers that configure a full Responses endpoint were being rewritten to base/v1/responses causing wrong endpoint. For Codex style providers conversation continuity headers were not included when a conversation id was available.", + "root_cause": "Special-case list of providers omitted OPENAI_RESPONSES_COMPATIBLE; get_headers lacked an API for conversation-scoped headers.", + "solution": "Treat OPENAI_RESPONSES_COMPATIBLE like CODEX/OPENCODE_ZEN and preserve configured path. Add get_headers_for_conversation(conversation_id: Option<&str>) that injects x-client-request-id and session_id for Codex when conversation id provided.", + "lesson_learned": "When supporting multiple similar provider types, centralize provider-family behavior checks; when adding context-specific headers, expose a variant of header builder that accepts the context (conversation id).", + "commits": [ + "3ea8f23", + "5b18cce" + ], + "constructs": [ + "OpenAIResponsesProvider::new", + "OpenAIResponsesProvider::get_headers_for_conversation", + "create_headers usage in request pipeline" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_repo/src/provider/openai_responses/repository.rs tests added in commits" + ], + "test_functions": [ + "test_openai_responses_provider_new_preserves_existing_base_path_for_compatible_provider", + "test_get_headers_codex_with_conversation_id_includes_conversation_headers", + "test_get_headers_non_codex_with_conversation_id_omits_conversation_headers" + ], + "source_commits": [ + "3ea8f23", + "5b18cce" + ] + } + }, + { + "type": "bug_fix", + "category": "Parsing", + "title": "Mark SSE transport parse errors as retryable for OpenAI Responses", + "problem": "SSE parse errors coming from transport layer should be retryable, while UTF8 parse errors are not.", + "root_cause": "eventsource_stream's EventStreamError variants mix transport-level errors and non-retryable parse errors. Previously all SSE parse errors were returned as non-retryable.", + "solution": "Wrapped SSE parse error into into_sse_parse_error which wraps transport errors into forge_domain::Error::Retryable and preserves other errors as non-retryable. Updated tests to assert behavior.", + "lesson_learned": "Differentiate retryability based on SSE error kind; treat transport-level failures as retryable.", + "commits": [ + "7d63501" + ], + "constructs": [ + "into_sse_parse_error" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_repo/src/provider/openai_responses/repository.rs (inline tests)" + ], + "source_commits": [ + "7d63501" + ] + } + }, + { + "type": "feature", + "category": "API", + "title": "Support model listing for OpenAI Responses-compatible provider", + "problem": "Earlier code returned an empty model list for codex/OpenAI Responses provider.", + "root_cause": "OpenAI Responses repository lacked logic to fetch model lists configured via provider.models() URL or hardcoded list.", + "solution": "Implement models() to check provider.models(): if Hardcoded return the models, if Url then perform http_get, check status, deserialize to ListModelResponse, and convert to internal Model list. Added detailed error context and deserialization error messages.", + "lesson_learned": "Provider backends must respect provider configuration (models url or hardcoded). When performing HTTP fetches, include contextual error messages and validate HTTP status before parsing JSON.", + "commits": [ + "58827bd" + ], + "constructs": [ + "OpenAIResponsesResponseRepository::models" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/openai/transformers/github_copilot_reasoning.rs": { + "short_description": "Transformer converting reasoning_details to GitHub Copilot flat format", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "GitHubCopilotReasoning", + "type": "struct" + }, + { + "name": "Transformer", + "type": "trait" + }, + { + "name": "transform", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/openai/transformers/trim_tool_call_ids.rs": { + "short_description": "Transformer trimming tool_call_ids to 40 chars for OpenAI", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "TrimToolCallIds", + "type": "struct" + }, + { + "name": "transform", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/google/mod.rs": { + "short_description": "Google DTO module with request/response types", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "Request", + "type": "type" + }, + { + "name": "EventData", + "type": "type" + }, + { + "name": "Model", + "type": "type" + }, + { + "name": "Response", + "type": "type" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/google/request.rs": { + "short_description": "Google OpenAI-like request structure for Gemini/Google API", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "Request", + "type": "struct" + }, + { + "name": "GenerationConfig", + "type": "struct" + }, + { + "name": "ThinkingConfig", + "type": "struct" + }, + { + "name": "Content", + "type": "struct" + }, + { + "name": "Part", + "type": "enum" + }, + { + "name": "ImageSource", + "type": "struct" + }, + { + "name": "FunctionCallData", + "type": "struct" + }, + { + "name": "FunctionResponseData", + "type": "struct" + }, + { + "name": "FileDataInfo", + "type": "struct" + }, + { + "name": "CacheControl", + "type": "enum" + }, + { + "name": "Role", + "type": "enum" + }, + { + "name": "Tool", + "type": "enum" + }, + { + "name": "ToolConfig", + "type": "struct" + }, + { + "name": "Tool", + "type": "variant" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/openai/reasoning.rs": { + "short_description": "Reasoning detail data structures for OpenAI DTOs", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ReasoningDetail", + "type": "struct" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/definition/tool_definition.rs": { + "short_description": "ToolDefinition data structure for domain tools", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ToolDefinition", + "type": "struct" + }, + { + "name": "ToolDescription", + "type": "trait" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/definition/usage.rs": { + "short_description": "ToolUsagePrompt and formatting for tool schemas", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ToolUsagePrompt", + "type": "struct" + }, + { + "name": "Parameter", + "type": "struct" + }, + { + "name": "Schema", + "type": "struct" + }, + { + "name": "Display", + "type": "trait" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/result_stream_ext.rs": { + "short_description": "ResultStream extension trait for aggregating streaming results", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ResultStreamExt", + "type": "trait" + }, + { + "name": "into_full", + "type": "function" + }, + { + "name": "into_full_streaming", + "type": "function", + "callers": [ + { + "file": "crates/forge_app/src/orch.rs", + "line": 223, + "context": ".into_full_streaming(!tool_supported, self.sender.clone())" + } + ] + } + ], + "insights": [ + { + "type": "bug_fix", + "category": "Parsing", + "title": "Apply merge semantics when assembling streaming messages", + "problem": "ResultStreamExt was accumulating partial usage events, which double-counted tokens for providers that reported cumulative values in deltas.", + "root_cause": "Code assumed partial usage values were incremental deltas and used accumulate instead of merge.", + "solution": "When processing streaming events, detect 'complete' usage (prompt+completion in a message) vs cost-only vs partial: use replacement for full usage, sum costs when only cost arrives, and use Usage::merge (max) for partial cumulative updates. Tests updated to reflect merged totals.", + "lesson_learned": "Streaming aggregator must be provider-aware: prefer idempotent merge rules (max) for cumulative counters to avoid double-counting.", + "commits": [ + "caf374e" + ], + "constructs": [ + "ResultStreamExt::into_full (usage handling)" + ] + }, + { + "type": "bug_fix", + "category": "Parsing", + "title": "Handle mixed usage reporting and cost-only events robustly when combining message streams", + "problem": "Accumulating usage across streaming chunks could double-count in GLM-style streams (complete usage in each chunk) or lose cost when cost-only events arrived before complete usage.", + "root_cause": "Single accumulation logic replaced usage with latest regardless of semantics; cost-only events had zero tokens and needed special handling.", + "solution": "Detect complete usage (prompt_tokens > 0 && completion_tokens > 0) to replace existing usage, detect cost-only events (0 tokens + cost present) to set cost on usage without touching tokens, otherwise accumulate partial usage. Added tests: OpenAI-style final-chunk usage, GLM replacement, cost-only add, cost preserved when cost arrives before complete usage.", + "lesson_learned": "Streaming token/usage semantics vary: implement heuristics to decide when to accumulate vs replace, and ensure cost attribution is preserved across event ordering.", + "commits": [ + "40cfcc8" + ], + "constructs": [ + "into_full (stream aggregation)", + "is_complete_usage determination" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_domain/src/result_stream_ext.rs (multiple unit tests added)" + ], + "source_commits": [ + "40cfcc8" + ] + } + } + ], + "tests": { + "exercised_by": [ + "crates/forge_domain/src/result_stream_ext.rs::test_into_full_anthropic_streaming_usage_merge_zero_output", + "crates/forge_domain/src/result_stream_ext.rs::test_into_full_anthropic_streaming_usage_merge" + ], + "test_functions": [], + "source_commits": [ + "caf374e" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/openai/transforms/capitalize_tool_names.rs": { + "short_description": "Transformer capitalizing tool names for Anthropic compatibility", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "CapitalizeToolNames", + "type": "struct" + }, + { + "name": "transform", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/openai/transformers/normalize_tool_schema.rs": { + "short_description": "Transformations to normalize tool schemas for OpenAI compatibility", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "NormalizeToolSchema", + "type": "struct" + }, + { + "name": "EnforceStrictToolSchema", + "type": "struct" + }, + { + "name": "EnforceStrictResponseFormatSchema", + "type": "struct" + }, + { + "name": "transform", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/openai/transformers/set_reasoning_effort.rs": { + "short_description": "Transformer mapping reasoning config to reasoning_effort field", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "SetReasoningEffort", + "type": "struct" + }, + { + "name": "transform", + "type": "function" + } + ], + "insights": [ + { + "type": "feature", + "category": "Parsing", + "title": "Convert internal reasoning config to provider-specific reasoning_effort parameter", + "problem": "Some provider APIs (Requesty/OpenAI-compatible) expect reasoning_effort strings rather than a structured ReasoningConfig.", + "root_cause": "Mismatch between internal reasoning representation and external provider parameter shapes.", + "solution": "Added transformer SetReasoningEffort which maps ReasoningConfig to reasoning_effort string according to rules (enabled false -> none, effort precedence, budget->effort buckets, enabled true default medium) and removes original reasoning field.", + "lesson_learned": "When targeting multiple provider APIs, add an explicit transformation layer that maps internal strongly-typed config objects to provider-specific primitives. Include tests for mapping rules and edge cases.", + "commits": [ + "3444116" + ], + "constructs": [ + "SetReasoningEffort::transform" + ] + } + ], + "tests": { + "exercised_by": [ + "crates/forge_app/src/dto/openai/transformers/set_reasoning_effort.rs (many unit tests)" + ], + "test_functions": [], + "source_commits": [ + "3444116" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/openai/transformers/strip_thought_signature.rs": { + "short_description": "Transformer stripping thought signatures from messages", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "StripThoughtSignature", + "type": "struct" + }, + { + "name": "transform", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/anthropic/error.rs": { + "short_description": "Anthropic error type definitions", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "Error", + "type": "enum" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/anthropic/mod.rs": { + "short_description": "Anthropic DTO module exporting error, request, response, transforms", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "error", + "type": "module" + }, + { + "name": "request", + "type": "module" + }, + { + "name": "response", + "type": "module" + }, + { + "name": "transforms", + "type": "module" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/openai/error.rs": { + "short_description": "OpenAI error types and error handling", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "Error", + "type": "enum" + }, + { + "name": "ErrorCode", + "type": "enum" + }, + { + "name": "ErrorResponse", + "type": "struct" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/openai/mod.rs": { + "short_description": "OpenAI DTO root module exporting submodules", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ProviderPipeline", + "type": "type" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/openai/tool_choice.rs": { + "short_description": "ToolChoice representation for OpenAI tooling", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ToolChoice", + "type": "enum" + }, + { + "name": "FunctionName", + "type": "struct" + }, + { + "name": "FunctionType", + "type": "struct" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/call/mod.rs": { + "short_description": "Module imports for tool call domain features", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "args", + "type": "module" + }, + { + "name": "context", + "type": "module" + }, + { + "name": "parser", + "type": "module" + }, + { + "name": "tool_call", + "type": "module" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/definition/choice.rs": { + "short_description": "ToolChoice enum for tool definitions", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ToolChoice", + "type": "enum" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/definition/mod.rs": { + "short_description": "Public re-exports for tool definition domain", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "choice", + "type": "module" + }, + { + "name": "name", + "type": "module" + }, + { + "name": "tool_definition", + "type": "module" + }, + { + "name": "usage", + "type": "module" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/definition/name.rs": { + "short_description": "ToolName type with sanitization and helpers for tool names", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ToolName", + "type": "struct" + }, + { + "name": "ToolName::new", + "type": "function" + }, + { + "name": "ToolName::sanitized", + "type": "function" + }, + { + "name": "ToolName::into_string", + "type": "function" + }, + { + "name": "ToolName::as_str", + "type": "function" + }, + { + "name": "ToolName::into_sanitized", + "type": "function" + }, + { + "name": "ToolName::From", + "type": "impl" + }, + { + "name": "ToolName::From<&str>", + "type": "impl" + }, + { + "name": "ToolName::Display", + "type": "impl" + }, + { + "name": "NamedTool", + "type": "trait" + }, + { + "name": "ToolName::new", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/auth/http/anthropic.rs": { + "short_description": "Anthropic OAuth HTTP provider with non-standard PKCE state handling", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "AnthropicHttpProvider", + "type": "struct" + }, + { + "name": "AnthropicTokenRequest", + "type": "struct" + }, + { + "name": "AnthropicHttpProvider::build_auth_url", + "type": "function" + }, + { + "name": "AnthropicHttpProvider::exchange_code", + "type": "function" + }, + { + "name": "AnthropicHttpProvider::build_http_client", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/auth/http/github.rs": { + "short_description": "GitHub OAuth HTTP provider wrapper around standard provider", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "GithubHttpProvider", + "type": "struct" + }, + { + "name": "GithubHttpProvider::build_auth_url", + "type": "function" + }, + { + "name": "GithubHttpProvider::exchange_code", + "type": "function" + }, + { + "name": "GithubHttpProvider::build_http_client", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/auth/http/mod.rs": { + "short_description": "Open OAuth HTTP provider module exports", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "AnthropicHttpProvider", + "type": "item" + }, + { + "name": "GithubHttpProvider", + "type": "item" + }, + { + "name": "StandardHttpProvider", + "type": "item" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/auth/http/standard.rs": { + "short_description": "Standard OAuth2 RFC-compliant HTTP provider", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "StandardHttpProvider", + "type": "struct" + }, + { + "name": "StandardHttpProvider::build_auth_url", + "type": "function" + }, + { + "name": "StandardHttpProvider::exchange_code", + "type": "function" + }, + { + "name": "StandardHttpProvider::build_http_client", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/anthropic/transforms/drop_invalid_toolcalls.rs": { + "short_description": "Transformer normalizing ToolUse inputs to objects for Anthropic", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "DropInvalidToolUse", + "type": "struct" + }, + { + "name": "DropInvalidToolUse::transform", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/anthropic/transforms/enforce_schema.rs": { + "short_description": "Transformer enforcing strict object schemas for Anthropic API", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "EnforceStrictObjectSchema", + "type": "struct" + }, + { + "name": "EnforceStrictObjectSchema::transform", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/anthropic/transforms/remove_output_format.rs": { + "short_description": "Transformer removing output_format for Vertex Anthropic", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "RemoveOutputFormat", + "type": "struct" + }, + { + "name": "RemoveOutputFormat::transform", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/anthropic/transforms/sanitize_tool_ids.rs": { + "short_description": "Transformer sanitizing tool call IDs for Anthropic compatibility", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "SanitizeToolIds", + "type": "struct" + }, + { + "name": "SanitizeToolIds::transform", + "type": "function" + } + ], + "insights": [ + { + "type": "bug_fix", + "category": "API", + "title": "Sanitize tool/call IDs to conform to Anthropic/Vertex Anthropic requirements", + "problem": "Anthropic (Vertex Anthropic) requires tool call IDs to match ^[a-zA-Z0-9_-]+$; IDs containing other characters caused 400 Bad Request errors.", + "root_cause": "Tool call IDs were generated or forwarded verbatim, possibly containing punctuation and other invalid characters.", + "solution": "Add SanitizeToolIds transformer which replaces invalid characters with underscores for both ToolUse.id and ToolResult.tool_use_id. Add tests covering various cases and integrate transformer into anthropic provider pipeline.", + "commits": [ + "23096da" + ], + "constructs": [ + "SanitizeToolIds::transform" + ] + } + ], + "tests": { + "exercised_by": [], + "test_functions": [ + "test_sanitizes_tool_use_id_with_invalid_chars", + "test_handles_multiple_tool_calls_and_results", + "test_sanitizes_tool_result_id_with_invalid_chars", + "test_handles_empty_messages", + "test_leaves_valid_tool_ids_unchanged" + ], + "source_commits": [ + "23096da" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/openai/transformers/kimi_k2_reasoning.rs": { + "short_description": "Reasoning transformer for Kimi K2 format", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "KimiK2Reasoning", + "type": "struct" + }, + { + "name": "KimiK2Reasoning::transform", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/openai/transformers/minimax.rs": { + "short_description": "Minimax parameter transformer for minimax-m2 models", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "SetMinimaxParams", + "type": "struct" + }, + { + "name": "SetMinimaxParams::transform", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/openai/transformers/tool_choice.rs": { + "short_description": "Transformer setting tool_choice when tools exist", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "SetToolChoice", + "type": "struct" + }, + { + "name": "SetToolChoice::new", + "type": "function" + }, + { + "name": "SetToolChoice::transform", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/openai/transformers/when_model.rs": { + "short_description": "Conditional transformer based on model name regex", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "when_model", + "type": "function", + "callers": [ + { + "file": "crates/forge_app/src/dto/openai/transformers/pipeline.rs", + "line": 18, + "context": "use super::when_model::when_model;" + }, + { + "file": "crates/forge_app/src/dto/openai/transformers/pipeline.rs", + "line": 45, + "context": ".pipe(SetMinimaxParams.when(when_model(\"minimax\")))" + }, + { + "file": "crates/forge_app/src/dto/openai/transformers/pipeline.rs", + "line": 46, + "context": ".pipe(DropToolCalls.when(when_model(\"mistral\")))" + }, + { + "file": "crates/forge_app/src/dto/openai/transformers/pipeline.rs", + "line": 47, + "context": ".pipe(SetToolChoice::new(ToolChoice::Auto).when(when_model(\"gemini\")))" + }, + { + "file": "crates/forge_app/src/dto/openai/transformers/pipeline.rs", + "line": 48, + "context": ".pipe(SetCache.when(when_model(\"gemini|anthropic|minimax\")))" + }, + { + "file": "crates/forge_app/src/dto/openai/transformers/pipeline.rs", + "line": 65, + "context": "provider.id == ProviderId::FIREWORKS_AI || when_model(\"kimi\")(request)" + } + ] + }, + { + "name": "WhenModelTests", + "type": "class" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/openai/transformers/zai_reasoning.rs": { + "short_description": "Transformer mapping reasoning to Zai thinking format", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "SetZaiThinking", + "type": "struct" + }, + { + "name": "SetZaiThinking::transform", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/provider/openai_responses/response.rs": { + "short_description": "OpenAI response parser converting SSE to domain types", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ResponsesStreamEvent", + "type": "enum" + }, + { + "name": "StreamItem", + "type": "enum" + }, + { + "name": "FromDomain for oai::Response", + "type": "impl" + }, + { + "name": "IntoDomain for oai::ResponseUsage", + "type": "impl" + } + ], + "insights": [ + { + "type": "bug_fix", + "category": "State Management", + "title": "Preserve message phase and avoid duplicating reasoning text during stream-to-domain conversion", + "problem": "Phase labels from model responses and reasoning encrypted content were not preserved correctly in domain messages; streaming delta events caused duplication of reasoning text and final event needed to carry encrypted content.", + "root_cause": "Response conversion didn't map oai::MessagePhase into domain MessagePhase, and streaming conversion cleared reasoning_details entirely, losing encrypted content. Reasoning deltas already provided text/summary, so final event should preserve only encrypted content.", + "solution": "Implement IntoDomain for oai::MessagePhase -> MessagePhase mapping; preserve phase on Message when present. Implement retain_encrypted_reasoning_details(details) helper that filters reasoning details to keep only 'reasoning.encrypted' entries during streaming finalization. Ensure reasoning delta parts include item_id so encrypted parts can be correlated. Add tests validating phase preservation and reasoning filtering.", + "lesson_learned": "Streaming protocols often emit both delta content and final metadata; conversion must avoid duplicating emitted content while preserving metadata only available in final event (like encrypted blobs).", + "commits": [ + "5b18cce" + ], + "constructs": [ + "IntoDomain for oai::MessagePhase", + "retain_encrypted_reasoning_details", + "stream -> IntoDomain conversion logic" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_repo/src/provider/openai_responses/response.rs tests" + ], + "test_functions": [ + "test_response_into_domain_preserves_commentary_phase", + "test_response_into_domain_preserves_final_answer_phase", + "test_response_into_domain_no_phase_when_absent" + ], + "source_commits": [ + "5b18cce" + ] + } + }, + { + "type": "bug_fix", + "category": "Parsing", + "title": "Handle tool call arguments that arrive only in done event (Spark / GPT-5.3 Codex)", + "problem": "Some providers (Spark/gpt-5.3-codex-spark) send function arguments only in a single 'done' event without deltas. Previously code discarded done arguments if deltas had been seen or didn't emit done arguments when no deltas were present.", + "root_cause": "Logic assumed arguments would always stream via deltas and skipped done events unconditionally.", + "solution": "Track which output indices have received deltas (received_toolcall_deltas HashSet). When done arrives, emit arguments only if no deltas were seen for that output index. Introduced ToolCallIndex newtype to use as HashMap/HashSet key. Added tests to cover both scenarios and a Spark-style integration test.", + "lesson_learned": "Streaming protocols differ across providers; implement robust state machines that handle both incremental deltas and single-complete events. Record which inputs were partially/fully provided to avoid duplication.", + "commits": [ + "d0dd26e" + ], + "constructs": [ + "CodexStreamState", + "ToolCallIndex", + "ResponseFunctionCallArgumentsDone handling" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_repo/src/provider/openai_responses/response.rs (unit tests added)" + ], + "source_commits": [ + "d0dd26e" + ] + } + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_api/Cargo.toml": { + "short_description": "Cargo manifest for forge_api crate, dependency graph for API layer", + "category": "BUILD", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/Cargo.toml": { + "short_description": "Cargo manifest for forge_app crate, app layer with runtime deps", + "category": "BUILD", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/Cargo.toml": { + "short_description": "Cargo manifest for CI tooling integration", + "category": "BUILD", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_config/Cargo.toml": { + "short_description": "Cargo manifest for configuration management crate", + "category": "BUILD", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_display/Cargo.toml": { + "short_description": "Cargo manifest for display utilities (UI rendering)", + "category": "BUILD", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/Cargo.toml": { + "short_description": "Cargo manifest for domain primitives and core types", + "category": "BUILD", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_embed/Cargo.toml": { + "short_description": "Cargo manifest for embedding/semantic search features", + "category": "BUILD", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_fs/Cargo.toml": { + "short_description": "Cargo manifest for safe filesystem helpers and restricted shell mode", + "category": "BUILD", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/Cargo.toml": { + "short_description": "Cargo manifest for infrastructure services and integrations", + "category": "BUILD", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_json_repair/Cargo.toml": { + "short_description": "Cargo manifest for JSON repair utilities", + "category": "BUILD", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/Cargo.toml": { + "short_description": "Cargo manifest and binary config for forge_main", + "category": "BUILD", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_markdown_stream/Cargo.toml": { + "short_description": "Cargo manifest for markdown streaming component", + "category": "BUILD", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/Cargo.toml": { + "short_description": "Cargo manifest for repository/conversation persistence layer", + "category": "BUILD", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_select/Cargo.toml": { + "short_description": "Cargo manifest for selection/fzf integration", + "category": "BUILD", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/Cargo.toml": { + "short_description": "Cargo manifest for network/service integrations", + "category": "BUILD", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_snaps/Cargo.toml": { + "short_description": "Cargo manifest for UI snaps and snapshot utilities", + "category": "BUILD", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_spinner/Cargo.toml": { + "short_description": "Cargo manifest for spinner/utilities", + "category": "BUILD", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_stream/Cargo.toml": { + "short_description": "Cargo manifest for streaming primitives", + "category": "BUILD", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_template/Cargo.toml": { + "short_description": "Cargo manifest for templating utilities", + "category": "BUILD", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_tool_macros/Cargo.toml": { + "short_description": "Cargo manifest for procedural Macros used across crates", + "category": "BUILD", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_tracker/Cargo.toml": { + "short_description": "Workspace crate for telemetry/analytics tracking", + "category": "SOURCE_CODE", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_walker/Cargo.toml": { + "short_description": "Workspace crate for filesystem walking utilities", + "category": "SOURCE_CODE", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/orch_spec/orch_runner.rs": { + "short_description": "Test harness runner for orchestration spec (mock agent/tests)", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "Runner", + "type": "class", + "callers": [ + { + "file": "crates/forge_app/src/orch_spec/orch_setup.rs", + "line": 14, + "context": "use crate::orch_spec::orch_runner::Runner;" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_setup.rs", + "line": 90, + "context": "Runner::run(self, event.into()).await" + } + ] + }, + { + "name": "run", + "type": "function", + "callers": [ + { + "file": "crates/forge_app/src/orch_spec/orch_setup.rs", + "line": 90, + "context": "Runner::run(self, event.into()).await" + } + ] + }, + { + "name": "chat_agent", + "type": "function" + }, + { + "name": "TemplateService", + "type": "trait" + }, + { + "name": "AttachmentService", + "type": "trait" + } + ], + "insights": [ + { + "type": "testing", + "category": "Concurrency", + "title": "Simulate UI notifier acknowledgement in orchestrator spec runner", + "problem": "New notifier-based coordination required tests to simulate UI acknowledging ToolCallStart, otherwise orchestrator would deadlock waiting on notify.", + "root_cause": "Behavioral change in orchestrator required tests to mimic UI notification.", + "solution": "Updated test Runner loop to notify the notifier when it receives a ChatResponse::ToolCallStart, allowing orchestrator to proceed in tests.", + "lesson_learned": "When adding synchronization primitives, update test harness to emulate the other party's acknowledgement; otherwise tests will hang or fail.", + "commits": [ + "c1c0506" + ], + "constructs": [ + "Runner::run loop (test harness)" + ] + }, + { + "type": "refactoring", + "category": "Dependency Management", + "title": "Switch test runner to expose ShellService mock outputs", + "problem": "Orchestrator tests needed to run shell commands but runner lacked ShellService mock plumbing.", + "root_cause": "Runner previously didn't implement ShellService for test harness.", + "solution": "Added ShellOutput queue to Runner, implemented ShellService for Runner returning queued outputs, and wired mock_shell_outputs into TestContext defaults.", + "lesson_learned": "When adding features that depend on external commands, augment test harness to supply deterministic mock outputs and include them in TestContext so tests remain hermetic.", + "commits": [ + "e587cb5" + ], + "constructs": [ + "Runner struct fields: test_shell_outputs", + "impl ShellService for Runner::execute" + ] + } + ], + "tests": { + "exercised_by": [ + "crates/forge_app/src/orch_spec/orch_system_spec.rs (system prompt tests using mock shell output)" + ], + "test_functions": [], + "source_commits": [ + "e587cb5" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/orch_spec/orch_spec.rs": { + "short_description": "Tests for orchestration spec and history behavior", + "category": "TEST", + "key_constructs": [ + { + "name": "test_history_is_saved", + "type": "function" + }, + { + "name": "test_simple_conversation_no_errors", + "type": "function" + } + ], + "insights": [ + { + "type": "testing", + "category": "Other", + "title": "Add integration test to verify doom loop detection behavior", + "problem": "Need confidence that doom loop detector appends reminder after repeated tool calls and after conversation state persisted.", + "root_cause": "New feature requires regression/integration tests.", + "solution": "Added async test that simulates four identical tool calls and asserts that a system reminder user message appears after the third call and before completion (ensuring ordering relative to persisted tool-call messages).", + "commits": [ + "d1e0547" + ], + "constructs": [ + "test_doom_loop_detection_adds_user_reminder_after_repeated_calls_on_next_request" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/tests/ci.rs": { + "short_description": "CI workflow tests invoking workflow generators", + "category": "TEST", + "key_constructs": [ + { + "name": "generate", + "type": "function" + }, + { + "name": "test_release_workflow", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_config/tests/schema.rs": { + "short_description": "Schema test for ForgeConfig generation", + "category": "TEST", + "key_constructs": [ + { + "name": "generate_workflow_schema", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/tests/test_stringified_tool_calls.rs": { + "short_description": "Test suite for stringified tool call arguments", + "category": "TEST", + "key_constructs": [ + { + "name": "test_stringified_tool_call_arguments_roundtrip", + "type": "function" + } + ], + "insights": [ + { + "type": "testing", + "category": "Parsing", + "title": "Integration tests for stringified tool call arguments roundtrip", + "problem": "Regression that went unnoticed: stringified arguments from providers must be sent back as objects to avoid API 400s.", + "root_cause": "Lack of integration tests covering the resume/serialize roundtrip for stringified tool args.", + "solution": "Add several integration tests simulating provider payloads (read, patch, multiple tool calls) that assert serialized conversation contains arguments as objects.", + "commits": [ + "3253412" + ], + "constructs": [ + "test_stringified_tool_call_arguments_roundtrip", + "test_kimi_k2p5_turbo_patch_tool_scenario", + "test_multiple_stringified_tool_calls", + "test_regular_json_objects_unchanged" + ] + } + ], + "tests": { + "exercised_by": [ + "crates/forge_domain/tests/test_stringified_tool_calls.rs" + ], + "test_functions": [ + "test_multiple_stringified_tool_calls", + "test_regular_json_objects_unchanged", + "test_kimi_k2p5_turbo_patch_tool_scenario", + "test_stringified_tool_call_arguments_roundtrip" + ], + "source_commits": [ + "3253412" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/orch_spec/orch_system_spec.rs": { + "short_description": "Snapshot/tests for system prompt and extensions", + "category": "TEST", + "key_constructs": [ + { + "name": "test_system_prompt", + "type": "function" + }, + { + "name": "test_system_prompt_tool_supported", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "benchmarks/evals/semantic_search_quality/test_queries.ts": { + "short_description": "Manual TypeScript evaluator for semantic search queries", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "evaluateQueries", + "type": "function" + }, + { + "name": "formatEvaluation", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "benchmarks/evals/semantic_search_quality/test_validation.sh": { + "short_description": "Shell-based validation for semantic eval workflows", + "category": "BUILD", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_json_repair/tests/error_cases.rs": { + "short_description": "Tests for error cases in json_repair repair logic", + "category": "TEST", + "key_constructs": [ + { + "name": "test_error_cases", + "type": "function" + } + ], + "insights": [ + { + "type": "bug_fix", + "category": "Parsing", + "title": "Add regression tests for multibyte UTF-8 string repair paths", + "problem": "Previous implementation panicked for inputs with multibyte characters when repairing truncated/malformed JSON strings.", + "root_cause": "Missing coverage for Unicode/multibyte edge cases in parser repair code.", + "solution": "Added tests that exercise the repaired UTF-8 handling paths to prevent regressions.", + "commits": [ + "98bffff" + ], + "constructs": [ + "test_multibyte_unicode_missing_end_quote", + "test_multibyte_unicode_missing_comma_in_object", + "test_multibyte_unicode_missing_closing_brace" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_json_repair/tests/comments_unicode.rs": { + "short_description": "Tests for comments removal and unicode handling", + "category": "TEST", + "key_constructs": [ + { + "name": "test_remove_comments", + "type": "function" + }, + { + "name": "test_unicode_support", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_json_repair/tests/complex_repairs.rs": { + "short_description": "Complex repair scenarios for json_repair", + "category": "TEST", + "key_constructs": [ + { + "name": "test_newline_separated_json", + "type": "function" + }, + { + "name": "test_comma_separated_lists", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_json_repair/tests/escaping.rs": { + "short_description": "Tests for escape character handling in json_repair", + "category": "TEST", + "key_constructs": [ + { + "name": "test_escape_characters", + "type": "function" + }, + { + "name": "test_escape_control_characters", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_json_repair/tests/numbers.rs": { + "short_description": "Tests numeric edge cases in json_repair", + "category": "TEST", + "key_constructs": [ + { + "name": "test_invalid_numbers_to_strings", + "type": "function" + }, + { + "name": "test_numbers_with_leading_zeros", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_json_repair/tests/punctuation.rs": { + "short_description": "Tests for punctuation repairs in json_repair", + "category": "TEST", + "key_constructs": [ + { + "name": "test_repair_missing_commas", + "type": "function" + }, + { + "name": "test_repair_missing_colons", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_json_repair/tests/quotes.rs": { + "short_description": "Tests for quote repairs in json_repair", + "category": "TEST", + "key_constructs": [ + { + "name": "test_add_missing_quotes", + "type": "function" + }, + { + "name": "test_add_missing_end_quote", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_json_repair/tests/special_quotes.rs": { + "short_description": "Tests for special quote handling", + "category": "TEST", + "key_constructs": [ + { + "name": "test_escaped_string_contents", + "type": "function" + }, + { + "name": "test_special_quote_characters", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_json_repair/tests/special_values.rs": { + "short_description": "Tests for Python-like constants and undefineds", + "category": "TEST", + "key_constructs": [ + { + "name": "test_python_constants", + "type": "function" + }, + { + "name": "test_repair_undefined_values", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_json_repair/tests/strings.rs": { + "short_description": "Tests for string handling in json_repair", + "category": "TEST", + "key_constructs": [ + { + "name": "test_parse_unquoted_strings", + "type": "function" + }, + { + "name": "test_turn_symbols_into_strings", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_json_repair/tests/truncation.rs": { + "short_description": "Unit tests for json_repair truncation/ellipsis handling", + "category": "TEST", + "key_constructs": [ + { + "name": "test_add_missing_closing_brackets", + "type": "function" + }, + { + "name": "test_repair_truncated_json", + "type": "function" + }, + { + "name": "test_repair_ellipsis", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_json_repair/tests/valid_json.rs": { + "short_description": "Unit tests validating correct JSON repair of valid inputs", + "category": "TEST", + "key_constructs": [ + { + "name": "test_parse_valid_json", + "type": "function" + }, + { + "name": "assert_repair", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_test_kit/src/lib.rs": { + "short_description": "Test utilities for Forge tests including fixture loading", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "fixture", + "type": "function", + "callers": [ + { + "file": "crates/forge_domain/src/tools/call/tool_call.rs", + "line": 562, + "context": "let message = forge_test_kit::fixture!(\"/src/fixtures/tool_call_01.md\").await;" + }, + { + "file": "crates/forge_domain/src/tools/call/tool_call.rs", + "line": 572, + "context": "let message = forge_test_kit::fixture!(\"/src/fixtures/tool_call_01.md\").await;" + }, + { + "file": "crates/forge_domain/src/policies/config.rs", + "line": 104, + "context": "let yaml_content = forge_test_kit::fixture!(\"/src/fixtures/policies_test.yml\").await;" + } + ] + }, + { + "name": "fixture", + "type": "macro", + "callers": [ + { + "file": "crates/forge_domain/src/tools/call/tool_call.rs", + "line": 562, + "context": "let message = forge_test_kit::fixture!(\"/src/fixtures/tool_call_01.md\").await;" + }, + { + "file": "crates/forge_domain/src/tools/call/tool_call.rs", + "line": 572, + "context": "let message = forge_test_kit::fixture!(\"/src/fixtures/tool_call_01.md\").await;" + }, + { + "file": "crates/forge_domain/src/policies/config.rs", + "line": 104, + "context": "let yaml_content = forge_test_kit::fixture!(\"/src/fixtures/policies_test.yml\").await;" + } + ] + }, + { + "name": "json_fixture", + "type": "function", + "callers": [ + { + "file": "crates/forge_domain/src/conversation_html.rs", + "line": 495, + "context": "use forge_test_kit::json_fixture;" + }, + { + "file": "crates/forge_domain/src/conversation_html.rs", + "line": 495, + "context": "use forge_test_kit::json_fixture;" + } + ] + }, + { + "name": "json_fixture", + "type": "macro", + "callers": [ + { + "file": "crates/forge_domain/src/conversation_html.rs", + "line": 495, + "context": "use forge_test_kit::json_fixture;" + }, + { + "file": "crates/forge_domain/src/conversation_html.rs", + "line": 495, + "context": "use forge_test_kit::json_fixture;" + } + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".forge/skills/debug-cli/scripts/test_cli.sh": { + "short_description": "Smoke test script for Forge CLI behavior", + "category": "TEST", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".github/scripts/bounty/tests/rules.test.ts": { + "short_description": "Tests for bounty rule computations and patch generation", + "category": "TEST", + "key_constructs": [ + { + "name": "describe", + "type": "function" + }, + { + "name": "it", + "type": "function" + }, + { + "name": "computeIssuePatch", + "type": "function" + }, + { + "name": "computePrPatch", + "type": "function" + }, + { + "name": "linkedIssueNumbers", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".github/scripts/bounty/tests/sync-all-issues.test.ts": { + "short_description": "Tests for planning and syncing all issues' bounty labels", + "category": "TEST", + "key_constructs": [ + { + "name": "planAllIssues", + "type": "function" + }, + { + "name": "syncAllIssues", + "type": "function" + }, + { + "name": "makeMockApi", + "type": "function" + }, + { + "name": "makeIssue", + "type": "function" + }, + { + "name": "labelNames", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".github/scripts/bounty/tests/sync-issue.test.ts": { + "short_description": "Tests for per-issue bounty synchronization logic", + "category": "TEST", + "key_constructs": [ + { + "name": "syncIssue", + "type": "function" + }, + { + "name": "makeMockApi", + "type": "function" + }, + { + "name": "makeIssue", + "type": "function" + }, + { + "name": "labelNames", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".github/scripts/bounty/tests/sync-pr.test.ts": { + "short_description": "Tests for PR-label propagation and rewards workflow", + "category": "TEST", + "key_constructs": [ + { + "name": "describe", + "type": "function" + }, + { + "name": "it", + "type": "function" + }, + { + "name": "syncPr", + "type": "function" + }, + { + "name": "makeMockApi", + "type": "function" + }, + { + "name": "makePr", + "type": "function" + }, + { + "name": "makeIssue", + "type": "function" + }, + { + "name": "labelNames", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/orch_spec/mod.rs": { + "short_description": "Module exposing test orchestration specs for Forge app", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "orch_runner", + "type": "module" + }, + { + "name": "orch_setup", + "type": "module" + }, + { + "name": "orch_spec", + "type": "module" + }, + { + "name": "orch_system_spec", + "type": "module" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/src/lib.rs": { + "short_description": "Forge CI crate exporting build/test workflow components", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "jobs", + "type": "module" + }, + { + "name": "release_matrix", + "type": "module" + }, + { + "name": "steps", + "type": "module" + }, + { + "name": "workflows", + "type": "module" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_stream/src/lib.rs": { + "short_description": "Re-exports streaming primitives for Forge UI", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "mpsc_stream", + "type": "module" + }, + { + "name": "pub use", + "type": "keyword" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_template/src/lib.rs": { + "short_description": "Template utilities exporting Element type", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "element", + "type": "module" + }, + { + "name": "Element", + "type": "type" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_walker/src/lib.rs": { + "short_description": "Walker abstraction re-export for file system traversal", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "walker", + "type": "module" + }, + { + "name": "File", + "type": "type" + }, + { + "name": "Walker", + "type": "type" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".rustfmt.toml": { + "short_description": "Rustfmt configuration for code formatting", + "category": "CONFIG", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "Cross.toml": { + "short_description": "Cross toolchain config for multi-target Rust builds", + "category": "BUILD", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "diesel.toml": { + "short_description": "Diesel ORM schema/migrations configuration", + "category": "CONFIG", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "forge.default.yaml": { + "short_description": "Default Forge configuration (YAML) for agents/models", + "category": "CONFIG", + "key_constructs": [ + { + "name": "variables", + "type": "constant" + }, + { + "name": "max_requests_per_turn", + "type": "constant" + }, + { + "name": "compact", + "type": "constant" + }, + { + "name": "updates", + "type": "constant" + }, + { + "name": "model", + "type": "constant" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "forge.schema.json": { + "short_description": "JSON Schema for Forge configuration", + "category": "CONFIG", + "key_constructs": [ + { + "name": "$schema", + "type": "constant" + }, + { + "name": "title", + "type": "constant" + }, + { + "name": "description", + "type": "constant" + } + ], + "insights": [ + { + "type": "refactoring", + "category": "Configuration", + "title": "Improve schema descriptions and add double type def", + "problem": "Schema descriptions were terse and some numeric fields used generic number types; TOML/JSON expectations were noisy.", + "root_cause": "Schema needed refinement to match recent representation changes (Decimal/Percentage) and to provide consistent descriptions.", + "solution": "Update property descriptions for clarity and add $defs/double to represent double-precision floats used by Decimal types.", + "lesson_learned": "When domain types change (newtypes for decimals etc.), update public schema and descriptions to keep editor validation useful.", + "commits": [ + "e40da50", + "209cd61" + ], + "constructs": [] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "renovate.json": { + "short_description": "Renovate config for dependency updates", + "category": "CONFIG", + "key_constructs": [ + { + "name": "extends", + "type": "constant" + }, + { + "name": "automerge", + "type": "constant" + }, + { + "name": "platformAutomerge", + "type": "constant" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "rust-analyzer.toml": { + "short_description": "Rust Analyzer config for IDE tooling", + "category": "CONFIG", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "Cross.toml (duplicate)": { + "short_description": "Cross toolchain config for multi-target Rust builds", + "category": "BUILD", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "vertex.json": { + "short_description": "Data file listing AI models and capabilities", + "category": "DATA", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/fmt/mod.rs": { + "short_description": "Rust module declaring formatting utilities for forge_app", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "content", + "type": "module" + }, + { + "name": "fmt_input", + "type": "module" + }, + { + "name": "fmt_output", + "type": "module" + }, + { + "name": "todo_fmt", + "type": "module" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/src/steps/mod.rs": { + "short_description": "CI steps module re-exporting setup_protoc step", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "setup_protoc", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/conversation/mod.rs": { + "short_description": "Conversation subsystem module exposing repo types", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "conversation_record", + "type": "module" + }, + { + "name": "conversation_repo", + "type": "module" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/auth/mod.rs": { + "short_description": "Authentication layer module aggregating auth helpers", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "error", + "type": "module" + }, + { + "name": "http", + "type": "module" + }, + { + "name": "strategy", + "type": "module" + }, + { + "name": "util", + "type": "module" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/database/mod.rs": { + "short_description": "Database access module for Forge (pool/schema)", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "pool", + "type": "module" + }, + { + "name": "schema", + "type": "module" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/mcp/mod.rs": { + "short_description": "MCP integration layer with manager/service/tool", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "manager", + "type": "module" + }, + { + "name": "service", + "type": "module" + }, + { + "name": "tool", + "type": "module" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/utils/mod.rs": { + "short_description": "Utility helpers for forge_services, including path utils", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "path", + "type": "module" + }, + { + "name": "temp_dir", + "type": "module" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_test_kit/Cargo.toml": { + "short_description": "Cargo manifest for test utilities crate", + "category": "CONFIG", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/tool_services/syn/mod.rs": { + "short_description": "Syntax/validation shim for tool service registry", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "validate", + "type": "function" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".gitignore": { + "short_description": "Ignored files configuration for Git", + "category": "CONFIG", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".config/nextest.toml": { + "short_description": "Nextest test profile configuration", + "category": "CONFIG", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".devcontainer/devcontainer.json": { + "short_description": "Dev container config for IDE/dev container", + "category": "CONFIG", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".github/dependabot.yml": { + "short_description": "Dependabot configuration for dependency updates", + "category": "CONFIG", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".github/labels.json": { + "short_description": "GitHub issue labels definitions", + "category": "DATA", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "benchmarks/tsconfig.json": { + "short_description": "TypeScript compiler options for benchmarks", + "category": "CONFIG", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".github/ISSUE_TEMPLATE/bug_report.yml": { + "short_description": "Bug report issue template for GitHub", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".github/ISSUE_TEMPLATE/config.yml": { + "short_description": "Issue template for configuration questions", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".github/ISSUE_TEMPLATE/documentation.yml": { + "short_description": "Documentation issue issue-template", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".github/ISSUE_TEMPLATE/feature_request.yml": { + "short_description": "Feature request issue template", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".github/ISSUE_TEMPLATE": { + "short_description": "Directory for issue templates", + "category": "DATA", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".github/ISSUE_TEMPLATE/": { + "short_description": "Directory placeholder", + "category": "DATA", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".github/ISSUE_TEMPLATE/performance_issue.yml": { + "short_description": "GitHub issue template for reporting performance problems", + "category": "DATA", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".github/ISSUE_TEMPLATE/provider_integration.yml": { + "short_description": "GitHub issue template for requesting provider integrations", + "category": "DATA", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_config/.forge.toml": { + "short_description": "Forge runtime configuration defaults", + "category": "CONFIG", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "AGENTS.md": { + "short_description": "Project agent guidelines and development conventions", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "README.md": { + "short_description": "Forge project overview, Quickstart, and usage", + "category": "DOCS", + "key_constructs": [], + "insights": [ + { + "type": "refactoring", + "category": "Documentation", + "title": "Document increased FORGE_MAX_IMAGE_SIZE default", + "problem": "README env example was out-of-date relative to code default", + "root_cause": "When defaults changed in code, docs were not updated.", + "solution": "Updated README .env example to show new 10 MB default for FORGE_MAX_IMAGE_SIZE.", + "commits": [ + "c78894a" + ], + "constructs": [] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "benchmarks/evals/semantic_search_quality/task.yml": { + "short_description": "Evaluation task for semantic search quality", + "category": "DATA", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "plans/2025-04-02-system-context-rendering-final.md": { + "short_description": "Plan for implementing dynamic system context rendering (final)", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "plans/2025-04-02-system-context-rendering-v1.md": { + "short_description": "Plan for dynamic system context rendering (v1)", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "plans/2025-04-02-system-context-rendering-v2.md": { + "short_description": "Plan for dynamic system context rendering (v2)", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "plans/2025-04-06-retry-config-migration.md": { + "short_description": "Migration plan: move retry config to environment", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "plans/2025-04-11-tool-call-context-implementation.md": { + "short_description": "Plan: add ToolCallContext to ExecutableTool call", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "plans/2025-04-16-model-selection-command.md": { + "short_description": "Plan: add /model CLI command with model picker", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "plans/2025-04-26-large-file-read-range-support-v3.md": { + "short_description": "Plan: range-based large file read with infer (v3)", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "plans/2025-04-27-large-file-read-range-support-v4.md": { + "short_description": "Plan: range-based large file read with infer (v4)", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "plans/2025-06-07-tool-service-migration-v1.md": { + "short_description": "Migration plan: migrate tools to service layer (v1)", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "plans/2025-09-07-shell-env-variable-support-v1.md": { + "short_description": "Plan: env var support for shell tool (v1)", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/built_in_commands.json": { + "short_description": "JSON definitions of built-in Forge CLI commands", + "category": "DATA", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/permissions.default.yaml": { + "short_description": "Default permission policies for services", + "category": "CONFIG", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".github/contribution.md": { + "short_description": "Contribution guide for Forge project", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "benchmarks/README.md": { + "short_description": "Overview and usage of the benchmarks framework", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "plans/2025-09-08-agent-loader-cwd-extension-v1.md": { + "short_description": "Plan to extend agent loading to CWD directory in three-source loading", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "plans/2025-09-12-forge-dump-autoopen-env-var-v2.md": { + "short_description": "Plan to integrate FORGE_DUMP_AUTO_OPEN env var (duplicate entry)", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "plans/2025-09-13-dynamic-agent-slash-commands-v3.md": { + "short_description": "Plan to dynamically register agent-specific slash commands", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "plans/2025-09-13-forge-history-file-env-var-v2.md": { + "short_description": "Plan to support FORGE_HISTORY_FILE env var for history path", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "plans/2025-09-14-explicit-conversation-id-generation-v1.md": { + "short_description": "Plan to implement explicit conversation ID generation and resume", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "plans/2025-09-27-AppConfigRepository_Implementation-v1.md": { + "short_description": "Plan to implement AppConfigRepository pattern and migrate usage", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "plans/2026-01-22-Fix Auto-Sync Workspace Registration Issue-v1.md": { + "short_description": "Plan to fix zsh auto-sync registering unintended workspaces", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "plans/2026-01-22-Fix Auto-Sync Workspace Registration Issue-v2.md": { + "short_description": "Refined plan adding porcelain/is-indexed checks for auto-sync", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "plans/2026-03-31-slim-environment-and-add-get-config-v1.md": { + "short_description": "Plan to slim Environment and add get_config to EnvironmentInfra", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "plans/2026-04-05-config-init-at-startup-v1.md": { + "short_description": "Plan to init config at startup, remove get_config from EnvironmentInfra", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "plans/agent-context-compaction-2025-03-24-concise.md": { + "short_description": "Plan for agent context compaction feature and deprecation path", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "plans/agent-context-compaction-2025-03-25-concise.md": { + "short_description": "Revised plan: single compressible sequence in compaction", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "plans/agent-context-compaction-2025-03-25.md": { + "short_description": "Plan to update compaction to multi-sequence handling and tests", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "shell-plugin/README.md": { + "short_description": "Documentation for the Forge ZSH plugin features and usage", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "templates/forge-command-generator-prompt.md": { + "short_description": "Template for generating shell commands from NL intents", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "templates/forge-commit-message-prompt.md": { + "short_description": "Template to generate conventional commit messages from diffs", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "templates/forge-custom-agent-template.md": { + "short_description": "Template for custom agent behavior and integration", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "templates/forge-partial-summary-frame.md": { + "short_description": "Template for a multi-message summary frame guiding tool usage", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "templates/forge-partial-system-info.md": { + "short_description": "Template providing system/environment info for context", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "templates/forge-partial-tool-error-reflection.md": { + "short_description": "Template prompting deep reflection on tool call errors", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "templates/forge-partial-tool-use-example.md": { + "short_description": "Template showing correct tool call usage with JSON in tags", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "templates/forge-system-prompt-title-generation.md": { + "short_description": "Template to generate concise technical titles from tasks", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "templates/forge-tool-retry-message.md": { + "short_description": "Template for tool retry guidance after failure", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/conversation_style.css": { + "short_description": "CSS styling for domain UI components in conversations", + "category": "SOURCE_CODE", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/banner": { + "short_description": "ASCII art banner for Forge CLI", + "category": "SOURCE_CODE", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/proto/forge.proto": { + "short_description": "Protobuf API definitions for Forge repository service", + "category": "SOURCE_CODE", + "key_constructs": [ + { + "name": "ForgeService", + "type": "class" + }, + { + "name": "SearchRequest", + "type": "class" + }, + { + "name": "SearchResponse", + "type": "class" + }, + { + "name": "UploadFilesRequest", + "type": "class" + }, + { + "name": "UploadFilesResponse", + "type": "class" + }, + { + "name": "DeleteFilesRequest", + "type": "class" + }, + { + "name": "DeleteFilesResponse", + "type": "class" + }, + { + "name": "ListFilesRequest", + "type": "class" + }, + { + "name": "ListFilesResponse", + "type": "class" + }, + { + "name": "ChunkFilesRequest", + "type": "class" + }, + { + "name": "ChunkFilesResponse", + "type": "class" + }, + { + "name": "HealthCheckRequest", + "type": "class" + }, + { + "name": "HealthCheckResponse", + "type": "class" + }, + { + "name": "CreateWorkspaceRequest", + "type": "class" + }, + { + "name": "CreateWorkspaceResponse", + "type": "class" + }, + { + "name": "ListWorkspacesRequest", + "type": "class" + }, + { + "name": "ListWorkspacesResponse", + "type": "class" + }, + { + "name": "GetWorkspaceInfoRequest", + "type": "class" + }, + { + "name": "GetWorkspaceInfoResponse", + "type": "class" + }, + { + "name": "DeleteWorkspaceRequest", + "type": "class" + }, + { + "name": "DeleteWorkspaceResponse", + "type": "class" + }, + { + "name": "CreateApiKeyRequest", + "type": "class" + }, + { + "name": "CreateApiKeyResponse", + "type": "class" + }, + { + "name": "ValidateFilesRequest", + "type": "class" + }, + { + "name": "ValidateFilesResponse", + "type": "class" + }, + { + "name": "Skill", + "type": "class" + }, + { + "name": "SelectedSkill", + "type": "class" + }, + { + "name": "SelectSkillRequest", + "type": "class" + }, + { + "name": "SelectSkillResponse", + "type": "class" + }, + { + "name": "FuzzySearchRequest", + "type": "class" + }, + { + "name": "FuzzySearchResponse", + "type": "class" + }, + { + "name": "SearchRequest", + "type": "class" + }, + { + "name": "Query", + "type": "class" + }, + { + "name": "QueryResult", + "type": "class" + }, + { + "name": "Workspace", + "type": "class" + }, + { + "name": "WorkspaceDefinition", + "type": "class" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "insta.yaml": { + "short_description": "Test config enabling auto-accept for tests", + "category": "CONFIG", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "rust-toolchain.toml": { + "short_description": "Rust toolchain specification for project", + "category": "CONFIG", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "benchmarks/evals/semantic_search_quality/test_context.json": { + "short_description": "JSON test context for semantic search quality benchmarks", + "category": "DATA", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_select/README.md": { + "short_description": "Documentation for forge_select prompts interface", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/fixtures/policies_test.yml": { + "short_description": "YAML fixture defining simple access policies tests", + "category": "TEST", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/database/migrations/2025-09-12-065405_create_conversations_table/up.sql": { + "short_description": "SQL to create conversations table and fields", + "category": "DATA", + "key_constructs": [ + { + "name": "conversations", + "type": "constant" + }, + { + "name": "conversation_id", + "type": "constant" + }, + { + "name": "title", + "type": "constant" + }, + { + "name": "workspace_id", + "type": "constant" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/database/migrations/2025-09-12-065740_add_conversations_indexes/down.sql": { + "short_description": "SQL to drop indexing on conversations table", + "category": "DATA", + "key_constructs": [ + { + "name": "idx_conversations_active_workspace_updated", + "type": "constant" + }, + { + "name": "idx_conversations_workspace_created", + "type": "constant" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/database/migrations/2025-09-12-065740_add_conversations_indexes/up.sql": { + "short_description": "SQL to create indexing on conversations table", + "category": "DATA", + "key_constructs": [ + { + "name": "idx_conversations_workspace_created", + "type": "constant" + }, + { + "name": "idx_conversations_active_workspace_updated", + "type": "constant" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/database/migrations/2025-11-13-054241_create_workspace_table/down.sql": { + "short_description": "SQL to drop workspace table and related indexes", + "category": "DATA", + "key_constructs": [ + { + "name": "idx_workspace_path", + "type": "constant" + }, + { + "name": "idx_workspace_user_id", + "type": "constant" + }, + { + "name": "workspace", + "type": "constant" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/database/migrations/2025-11-13-054241_create_workspace_table/up.sql": { + "short_description": "SQL to create workspace table and indexes", + "category": "DATA", + "key_constructs": [ + { + "name": "workspace", + "type": "constant" + }, + { + "name": "idx_workspace_path", + "type": "constant" + }, + { + "name": "idx_workspace_user_id", + "type": "constant" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/database/migrations/2025-11-15-000000_create_indexing_auth_table/up.sql": { + "short_description": "SQL to create indexing_auth table for indexing service", + "category": "DATA", + "key_constructs": [ + { + "name": "indexing_auth", + "type": "constant" + }, + { + "name": "user_id", + "type": "constant" + }, + { + "name": "token", + "type": "constant" + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/database/migrations/2025-11-22-061212-0000_drop_indexing_auth_table/down.sql": { + "short_description": "SQL rollback for dropping indexing_auth table", + "category": "DATA", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/database/migrations/2025-11-22-061212-0000_drop_indexing_auth_table/up.sql": { + "short_description": "SQL migration to drop indexing_auth table", + "category": "DATA", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/database/migrations/2026-02-16-130933-0000_drop_workspace_table/down.sql": { + "short_description": "SQL rollback for recreating workspace table", + "category": "DATA", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/database/migrations/2026-02-16-130933-0000_drop_workspace_table/up.sql": { + "short_description": "SQL migration to drop workspace table and its indexes", + "category": "DATA", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".forge/skills/create-github-issue/SKILL.md": { + "short_description": "Skill to create GitHub issues via gh CLI with templates", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".forge/skills/create-plan/README.md": { + "short_description": "README for create-plan skill, plan validation usage", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".forge/skills/debug-cli/SKILL.md": { + "short_description": "Skill for debugging and validating forge CLI", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".forge/skills/github-pr-comments/SKILL.md": { + "short_description": "Skill to resolve GitHub PR inline comments", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "benchmarks/evals/sem_search/README.md": { + "short_description": "Evaluation guide for semantic code search usage", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "benchmarks/evals/semantic_search_quality/README.md": { + "short_description": "Evaluation guide for semantic search quality metric", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/allowed_extensions.txt": { + "short_description": "List of allowed file extensions for services", + "category": "DATA", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".gitattributes": { + "short_description": "Git attributes file with line-ending hint", + "category": "CONFIG", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".forge/skills/create-plan/references/example-plan.md": { + "short_description": "Example plan template showing plan structure", + "category": "DATA", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/fixtures/tool_call_01.md": { + "short_description": "Fixture describing manual tool call analysis", + "category": "TEST", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/agents/forge.md": { + "short_description": "Agent overview for Forge execution tasks", + "category": "CONFIG", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/agents/muse.md": { + "short_description": "Agent definition for strategic planning without changes", + "category": "CONFIG", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/agents/sage.md": { + "short_description": "Sage agent description and prompts for read-only codebase research", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/provider/provider.json": { + "short_description": "Configuration for multiple code-generation providers and authentication", + "category": "CONFIG", + "key_constructs": [], + "insights": [ + { + "type": "chore", + "category": "Configuration", + "title": "Provider catalog updates (model ids and params)", + "problem": "Provider model IDs sometimes incorrect or missing; some provider URL param names needed rename/normalization.", + "root_cause": "Upstream provider naming/host param conventions changed; provider catalog needed updates to remain accurate.", + "solution": "Fix Claude Opus 4.6 model id, add GLM model entries, rename minimax host param to HOSTNAME and normalize url_param_vars layout.", + "lesson_learned": "Provider metadata must be kept in sync with external provider naming; design provider.json parsing to be tolerant to small renames to make migrations easier.", + "commits": [ + "4052064", + "483bd44", + "51a730f" + ], + "constructs": [] + }, + { + "type": "feature", + "category": "Configuration", + "title": "Add new providers and update provider metadata (OpenCode Zen, Minimax, Kimi Coding, context lengths)", + "problem": "New backends and updated context lengths need to be reflected in provider metadata for selection and pipeline routing.", + "root_cause": "Provider ecosystem changes require central provider registration.", + "solution": "Added entries for opencode_zen, minimax, kimi_coding and adjusted context_length for some Claude variants and gpt-5.3-codex-spark. Also added custom_headers for providers where required.", + "lesson_learned": "Keep provider metadata authoritative and ensure new providers include required info (auth env var, url, models, custom headers). When adding providers, also update ProviderId enum, pipeline routing and any transformer logic that depends on provider IDs.", + "commits": [ + "40cfcc8", + "fe68905", + "050ab29", + "5a800dc" + ], + "constructs": [ + "provider JSON entries" + ] + }, + { + "type": "feature", + "category": "Configuration", + "title": "Add openai_responses_compatible provider and update model catalog entries", + "problem": "Need to support a Responses-compatible provider and manage evolving model IDs/metadata.", + "root_cause": "New API variants and model consolidations require provider registry updates.", + "solution": "Added a provider entry openai_responses_compatible with response_type OpenAIResponses and models URL; also added and later consolidated/updated GPT-5.x model ids and context_length changes across commits (gpt-5.4 additions, then consolidation to gpt-5.4 and context_length updates to 272000).", + "lesson_learned": "Provider registry is a central authoritative source; changes to model IDs or provider response types must be coordinated with repo code that routes and lists models. Keep provider.json changes backward compatible where possible and include tests verifying the config.", + "commits": [ + "58827bd", + "85773b4", + "a5eab21", + "0577b3a" + ], + "constructs": [ + "provider.json entries" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_repo/src/provider/provider_repo.rs::tests" + ], + "source_commits": [ + "58827bd", + "85773b4" + ] + } + }, + { + "type": "feature", + "category": "Configuration", + "title": "Add gpt-5.3-codex provider model to provider catalog", + "problem": "Catalog lacked new GPT-5.3 Codex entry resulting in unrecognized model IDs.", + "root_cause": "New model introduced upstream not added to embedded provider.json.", + "solution": "Inserted gpt-5.3-codex entry with metadata (context_length, tools_supported etc.).", + "lesson_learned": "Keep provider catalog up-to-date when adding new model families; provider.json is source-of-truth for supported model features.", + "commits": [ + "cee8777" + ], + "constructs": [] + }, + { + "type": "other", + "category": "Configuration", + "title": "Add new provider model entry (gemini-3.1-pro-preview)", + "problem": "New provider model needed to be exposed to internal provider catalog.", + "root_cause": "Catalog lacked the new 'gemini-3.1-pro-preview' model metadata.", + "solution": "Added a new JSON entry describing the Gemini model with context_length and capability flags.", + "commit": [ + "8a05393" + ], + "constructs": [] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/descriptions/followup.md": { + "short_description": "Tool description for asking clarifying follow-up questions", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/descriptions/fs_multi_patch.md": { + "short_description": "Documentation for performing multiple file edits in one operation", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/descriptions/fs_read.md": { + "short_description": "Documentation for reading local filesystem files", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/descriptions/fs_remove.md": { + "short_description": "Documentation for removing a file on the local filesystem", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/descriptions/fs_search.md": { + "short_description": "Documentation for ripgrep-based filesystem search tool", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/descriptions/fs_undo.md": { + "short_description": "Documentation for reverting the most recent file operation", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/descriptions/fs_write.md": { + "short_description": "Documentation for writing/replacing files on disk", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/descriptions/net_fetch.md": { + "short_description": "Documentation for fetching content from URLs as markdown/text", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/descriptions/plan_create.md": { + "short_description": "Documentation for creating plan files with versions and content", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/descriptions/semantic_search.md": { + "short_description": "Documentation for AI-powered semantic code search tool", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/descriptions/shell.md": { + "short_description": "Documentation for executing shell commands with safety rules", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/descriptions/skill_fetch.md": { + "short_description": "Documentation for fetching detailed information about a skill", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/descriptions/task.md": { + "short_description": "Documentation for launching autonomous agent tasks", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/descriptions/todo_read.md": { + "short_description": "Documentation for reading the current todo list", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/descriptions/todo_write.md": { + "short_description": "Documentation for creating/updating todo items", + "category": "DOCS", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/fixtures/agents/advanced.md": { + "short_description": "Advanced test agent fixture with full configuration", + "category": "TEST", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/fixtures/agents/basic.md": { + "short_description": "Basic test agent fixture for fundamental functionality", + "category": "TEST", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/fixtures/agents/no_id.md": { + "short_description": "Fixture agent with no ID in frontmatter to test filename-based ID assignment", + "category": "TEST", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/fixtures/skills/with_name_and_description.md": { + "short_description": "Skill fixture with name and description frontmatter", + "category": "TEST", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/skills/create-skill/SKILL.md": { + "short_description": "Skill definition for creating/updating skills with frontmatter and guidance", + "category": "TEST", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/skills/execute-plan/SKILL.md": { + "short_description": "Skill definition for executing plan files with status tracking guidelines", + "category": "TEST", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/skills/github-pr-description/SKILL.md": { + "short_description": "Skill for generating PR descriptions and creating PRs via GitHub CLI", + "category": "TEST", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/fixtures/commands/basic.md": { + "short_description": "Fixture for a basic test command with frontmatter", + "category": "TEST", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/fixtures/commands/invalid.md": { + "short_description": "Fixture with invalid frontmatter to test parse failure", + "category": "TEST", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/fixtures/commands/multiline.md": { + "short_description": "Fixture for a multiline prompt command", + "category": "TEST", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/fixtures/skills_with_resources/test-skill/SKILL.md": { + "short_description": "Test skill with resources fixture", + "category": "TEST", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/database/migrations/2025-09-12-065405_create_conversations_table/down.sql": { + "short_description": "SQL migration down script to drop conversations table", + "category": "DATA", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/database/migrations/2025-10-16-000000_add_metrics_to_conversations/down.sql": { + "short_description": "SQL migration down script removing metrics column", + "category": "DATA", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/database/migrations/2025-10-16-000000_add_metrics_to_conversations/up.sql": { + "short_description": "SQL migration up script adding metrics column", + "category": "DATA", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/database/migrations/2025-11-15-000000_create_indexing_auth_table/down.sql": { + "short_description": "SQL migration down script dropping indexing_auth table", + "category": "DATA", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/fixtures/agents/invalid.md": { + "short_description": "Agent fixture with invalid frontmatter to test parsing failure", + "category": "TEST", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/fixtures/skills/no_front_matter.md": { + "short_description": "Skill fixture without frontmatter", + "category": "TEST", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/fixtures/skills/with_description_only.md": { + "short_description": "Skill fixture with only description frontmatter", + "category": "TEST", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/fixtures/skills/with_name_only.md": { + "short_description": "Skill fixture with only name frontmatter", + "category": "TEST", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/fixtures/skills_with_resources/minimal-skill/SKILL.md": { + "short_description": "Minimal skill fixture with name and description", + "category": "TEST", + "key_constructs": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "scripts/list-all-porcelain.sh": { + "short_description": "Runs all `forge list ... --porcelain` commands and prints results with timing", + "category": "SOURCE_CODE", + "description": "Shell helper that runs a collection of `forge list` subcommands with the --porcelain flag, prints colored section headers and sample output, and measures/runtime for each command. It also includes a final hard-coded summary describing which list types include a $ID column to help developers inspect CLI output formats.", + "key_constructs": [ + { + "name": "FORGE_BIN", + "type": "constant", + "purpose": "Path to the forge binary (first script argument or default)" + }, + { + "name": "print_section", + "type": "function", + "purpose": "Prints a colored section header" + }, + { + "name": "print_command", + "type": "function", + "purpose": "Prints the command being executed" + }, + { + "name": "print_runtime", + "type": "function", + "purpose": "Computes and prints runtime in ms or s" + } + ], + "semantic_tags": [ + "shell", + "cli-integration", + "diagnostics", + "timing", + "formatting" + ], + "handles_entities": [ + "Agent", + "Provider", + "Model", + "Config", + "MCP server", + "Conversation", + "Custom Command", + "Skill" + ], + "key_behaviors": [ + "executes multiple 'forge list' commands with --porcelain", + "prints colored sections and sample results", + "measures and reports runtime per command", + "summarizes which lists contain $ID column" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "shell-plugin/doctor.zsh": { + "short_description": "Zsh environment diagnostic tool for Forge shell integration", + "category": "SOURCE_CODE", + "description": "A comprehensive zsh diagnostic script that checks zsh version, terminal, Forge installation, plugin loading, theme, dependencies (fzf, fd, bat), and keyboard/terminal settings across platforms. It prints pass/fail/warn messages with remediation instructions and is intended to help users debug common shell-plugin installation and configuration problems.", + "key_constructs": [ + { + "name": "print_section", + "type": "function", + "purpose": "Prints formatted section headers" + }, + { + "name": "print_result", + "type": "function", + "purpose": "Prints pass/fail/warn/info results and updates counters" + }, + { + "name": "version_gte", + "type": "function", + "purpose": "Compares semantic version strings" + }, + { + "name": "plugins, _FORGE_PLUGIN_LOADED, _FORGE_THEME_LOADED", + "type": "constant", + "purpose": "Environment variables and arrays inspected to determine plugin/theme state" + } + ], + "semantic_tags": [ + "diagnostics", + "zsh", + "environment", + "plugins", + "platform-checks" + ], + "handles_entities": [ + "User .zshrc", + "Forge binary", + "zsh plugins (zsh-autosuggestions, zsh-syntax-highlighting)", + "Dependencies (fzf, fd/fdfind, bat)" + ], + "key_behaviors": [ + "validates zsh and plugin configuration", + "reports missing dependencies and suggests installation steps", + "checks plugin loading order and theme integration", + "provides keyboard/terminal configuration guidance" + ], + "insights": [ + { + "type": "refactoring", + "category": "Other", + "title": "Adjust severity levels in doctor script", + "problem": "Some environment checks reported as failure when they should be warnings/informational (e.g., missing Oh My Zsh or fonts).", + "root_cause": "Doctor script used 'fail' for items that are non-fatal recommendations.", + "solution": "Downgraded several 'fail' results to 'warn' or 'instruction' messages and clarified prompts.", + "lesson_learned": "Health-check scripts should differentiate between fatal and advisory checks \u2014 avoid alarming users for non-fatal issues.", + "commits": [ + "d4574a2" + ], + "constructs": [ + "print_result", + "print_section" + ] + }, + { + "type": "fix", + "category": "Documentation", + "title": "Update installation hint in doctor script", + "problem": "Doctor script suggested npm i -g forgecode@latest which is outdated.", + "root_cause": "Installer moved to curl URL and docs/diagnostic script wasn't updated.", + "solution": "Changed displayed install instruction to use curl -fsSL https://forgecode.dev/cli | sh.", + "lesson_learned": "Keep diagnostics and displayed install instructions in sync with official install method.", + "commits": [ + "0dffe3e" + ], + "constructs": [ + "print_result usage" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "shell-plugin/forge.plugin.zsh": { + "short_description": "Main zsh plugin loader that sources modular plugin components", + "category": "SOURCE_CODE", + "description": "Top-level zsh plugin entrypoint that sources the plugin's modular files (config, highlight, helpers, completions, action handlers, dispatcher and bindings) to assemble the Forge zsh integration at runtime. It centralizes plugin composition so other code can generate a single plugin blob for users to eval.", + "key_constructs": [ + { + "name": "source \"${0:A:h}/lib/config.zsh\"", + "type": "function", + "purpose": "Loads configuration helpers" + }, + { + "name": "source \"${0:A:h}/lib/dispatcher.zsh\"", + "type": "function", + "purpose": "Loads main dispatcher and widget registration" + }, + { + "name": "source \"${0:A:h}/lib/bindings.zsh\"", + "type": "function", + "purpose": "Loads key bindings and widget registration" + } + ], + "semantic_tags": [ + "zsh", + "plugin", + "sourcing", + "modular" + ], + "handles_entities": [ + "plugin modules", + "completions", + "action handlers" + ], + "key_behaviors": [ + "initializes Forge zsh plugin by sourcing component scripts", + "registers completions, actions and key bindings for interactive shell" + ], + "insights": [ + { + "type": "refactoring", + "category": "State Management", + "title": "Keep session-scoped active agent and conversation ID in shell plugin", + "problem": "Shell plugin relied on environment variables and global state (FORGE_CONVERSATION_ID, FORGE_ACTIVE_AGENT) exported to the environment; handling of agent/conversation was brittle and inconsistent across sessions.", + "root_cause": "Global exported env vars were modified by plugin in ways that clashed with session expectations and required the CLI to accept --aid/--cid flags.", + "solution": "Use internal plugin hashed variables (_FORGE_ACTIVE_AGENT and _FORGE_CONVERSATION_ID) to keep session-scoped state, default to 'forge', pass the agent to CLI via --agent flag in _forge_exec, and improve fzf selection outputs with delimiters and preview calls. Also updated behaviours for session info/show and conversation selection to set the plugin-local conversation id and print selection info. This avoids exporting env vars and makes session state local to the shell plugin.", + "commits": [ + "94ac901", + "a90be20", + "fc3dedd" + ], + "constructs": [ + "_forge_exec", + "_forge_action_new", + "_forge_action_conversation", + "_forge_action_model", + "_forge_action_default", + "_forge_print_agent_message" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "shell-plugin/forge.setup.zsh": { + "short_description": "Managed .zshrc block inserted by `forge zsh setup`", + "category": "CONFIG", + "description": "Auto-generated snippet that the setup command injects into a user's .zshrc; ensures required plugins are present and evaluates the Forge plugin and theme if not already loaded. This block is intended to be machine-managed and warns users not to edit it manually.", + "key_constructs": [], + "semantic_tags": [ + "zsh", + "setup", + "configuration", + "plugins" + ], + "handles_entities": [ + ".zshrc configuration", + "zsh-autosuggestions", + "zsh-syntax-highlighting", + "Forge plugin", + "Forge theme" + ], + "key_behaviors": [ + "ensures required zsh plugins are added to plugins array", + "loads Forge plugin and theme by evaluating generated outputs" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "shell-plugin/forge.theme.zsh": { + "short_description": "Zsh theme snippet that sets RPROMPT using Forge CLI output", + "category": "SOURCE_CODE", + "description": "Defines a function that calls the Forge binary to retrieve a formatted right-prompt string (agent/model/token info) and appends it to RPROMPT when the theme is loaded. It exports flags/env to reflect per-session overrides so the prompt reflects the active session state.", + "key_constructs": [ + { + "name": "_forge_prompt_info", + "type": "function", + "purpose": "Calls `forge zsh rprompt` with session env overrides to get RPROMPT content" + }, + { + "name": "RPROMPT", + "type": "constant", + "purpose": "Appends Forge-provided right prompt to the shell RPROMPT variable" + } + ], + "semantic_tags": [ + "zsh", + "prompt", + "theme", + "rprompt", + "cli-integration" + ], + "handles_entities": [ + "RPROMPT", + "session model/provider environment variables" + ], + "key_behaviors": [ + "displays model/agent info and token counts in the zsh right prompt", + "invokes Forge CLI to render prompt content" + ], + "insights": [ + { + "type": "refactoring", + "category": "Configuration", + "title": "RPROMPT and PROMPT_SUBST handling across lazy-loaded contexts", + "problem": "When sourced from within a function (lazy-load), setopt/prompts may not apply globally, and RPROMPT set inside function can be local unless typeset -g is used.", + "root_cause": "Zsh scoping and lazy-loading interactions (plugin managers that eval within function scope).", + "solution": "Attempts to use emulate -R zsh -c 'setopt PROMPT_SUBST' and typeset -g for RPROMPT were added, but later reverted in some commits due to regressions; project added normalization and careful handling instead.", + "commits": [ + "e69703b", + "fba93e8", + "3428478", + "4add7e1" + ], + "constructs": [ + "_forge_prompt_info", + "RPROMPT assignment" + ], + "lesson_learned": "Small shell-scoped changes can have surprising side effects across plugin managers; test in multiple lazy-loaders and provide a conservative default." + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "shell-plugin/keyboard.zsh": { + "short_description": "Displays platform- and mode-specific ZLE keyboard shortcuts", + "category": "SOURCE_CODE", + "description": "Script that prints helpful keyboard shortcut documentation tailored to the user's platform (macOS/Linux/Windows) and current ZSH keymap (emacs or vi). It formats and aligns key descriptions for readability and offers guidance to run the doctor script for troubleshooting meta/Alt key behavior.", + "key_constructs": [ + { + "name": "print_shortcut", + "type": "function", + "purpose": "Prints a key and description with aligned padding" + }, + { + "name": "print_section", + "type": "function", + "purpose": "Prints a section header" + }, + { + "name": "platform detection", + "type": "constant", + "purpose": "Detects OS to tailor displayed shortcuts" + } + ], + "semantic_tags": [ + "keyboard", + "zsh", + "help", + "shortcuts", + "platform" + ], + "handles_entities": [], + "key_behaviors": [ + "prints ZLE keyboard shortcuts for the detected platform and keymap", + "advises how to enable meta/option behavior and Vi mode" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/zsh/plugin.rs": { + "short_description": "Generates zsh plugin/theme content and installs plugin into .zshrc", + "category": "SOURCE_CODE", + "description": "Rust helpers that embed the shell-plugin files, generate a single consolidated zsh plugin (including Clap completions), produce the theme string, execute zsh helper scripts with streaming output, and manage insertion/update of a managed forge block inside a user's .zshrc with backup and marker validation. It abstracts cross-platform execution details (temp file on Windows) and ensures safe updates with backups and marker parsing.", + "key_constructs": [ + { + "name": "ZSH_PLUGIN_LIB", + "type": "constant", + "purpose": "Embedded directory of shell-plugin/lib files via include_dir" + }, + { + "name": "generate_zsh_plugin", + "type": "function", + "purpose": "Concatenates embedded zsh files, strips comments/empties, appends Clap completions, and returns plugin text" + }, + { + "name": "generate_zsh_theme", + "type": "function", + "purpose": "Returns normalized theme script content with loaded marker", + "callers": [ + { + "file": "crates/forge_main/src/zsh/mod.rs", + "line": 24, + "context": "generate_zsh_plugin, generate_zsh_theme, run_zsh_doctor, run_zsh_keyboard," + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 1578, + "context": "let theme = crate::zsh::generate_zsh_theme()?;" + } + ] + }, + { + "name": "execute_zsh_script_with_streaming", + "type": "function", + "purpose": "Runs a zsh script (via -c or temp file on Windows) and streams stdout/stderr linewise" + }, + { + "name": "setup_zsh_integration", + "type": "function", + "purpose": "Injects or updates the managed forge block in ~/.zshrc, creating backups and validating markers", + "callers": [ + { + "file": "crates/forge_main/src/zsh/mod.rs", + "line": 25, + "context": "setup_zsh_integration," + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 1711, + "context": "let result = crate::zsh::setup_zsh_integration(disable_nerd_font, forge_editor)?;" + } + ] + }, + { + "name": "MarkerState", + "type": "enum", + "purpose": "Represents parsed state of start/end markers in .zshrc (NotFound/Valid/Invalid)" + }, + { + "name": "ZshSetupResult", + "type": "struct", + "purpose": "Result returned by setup_zsh_integration with message and optional backup path" + } + ], + "semantic_tags": [ + "zsh", + "plugin-generation", + "shell-integration", + "file-io", + "streaming" + ], + "handles_entities": [ + ".zshrc file", + "temporary script files", + "plugin and theme content" + ], + "key_behaviors": [ + "generates a consolidated zsh plugin including completions", + "generates theme script content for RPROMPT", + "executes embedded zsh helper scripts and streams output", + "safely inserts or updates a managed block in user's .zshrc with backups" + ], + "insights": [ + { + "type": "other", + "category": "Configuration", + "title": "Zsh plugin script generation: typeset scoping changes and normalization", + "problem": "There were multiple attempts to change the `typeset` scope to global (-g) to support lazy-loading plugin managers. These changes were reverted in places because they caused regressions and exposed implementation tradeoffs.", + "root_cause": "Using typeset -g ensures variables are global even when eval'd inside functions (helps lazy-loading), but changes the visibility and can conflict with user expectations or shell plugin semantics; toggling between -g, -gh, and -h reflects attempts to balance persistence vs hiding variables.", + "solution": "Project experimented with typeset -g and -gh then reverted to safer non-global hidden declarations in later commits. The codebase also added normalization of embedded script content to remove CRLFs for Windows and created a path to write temp zsh scripts for Windows execution.", + "commits": [ + "e69703b", + "fba93e8", + "3428478", + "4add7e1" + ], + "constructs": [ + "generate_zsh_plugin", + "generate_zsh_theme", + "execute_zsh_script_with_streaming" + ], + "lesson_learned": "Shell plugin code needs cross-platform handling (CRLF vs LF), and changing global scoping for convenience (lazy-loading) can have regressions \u2014 prefer explicit handling (hidden, localized vars) and test in multiple shell manager scenarios." + }, + { + "type": "bug_fix", + "category": "Platform/Other", + "title": "Normalize embedded shell scripts for Windows and use temp script for execution", + "problem": "include_str! embedded .zsh files on Windows may contain CRLF causing zsh parsing failures; passing long scripts via -c can mangle quotes on Windows/CreateProcess.", + "root_cause": "CRLF line endings and CreateProcess quoting rules on Windows.", + "solution": "Add normalize_script to strip CRLFs and create_temp_zsh_script to write and run a temporary file on Windows (use zsh -f ), keeping temp dir alive until child completes.", + "commits": [ + "fd60dc7" + ], + "constructs": [ + "normalize_script", + "create_temp_zsh_script", + "execute_zsh_script_with_streaming (Windows path)" + ] + }, + { + "type": "refactoring", + "category": "Build/Packaging", + "title": "Embed zsh plugin via include_dir and use forge_embed helper", + "problem": "rust-embed based iteration and API usage differed from include_dir; zsh generation needed to read embedded files and strip comments.", + "root_cause": "Transition from rust-embed to include_dir required changes to how files are enumerated and content is accessed.", + "solution": "Replaced RustEmbed-derived ZshPluginLib with static ZSH_PLUGIN_LIB include_dir Dir, used forge_embed::files iterator to traverse files, and updated test Mutex initialization to use LazyLock.", + "lesson_learned": "When replacing an embedding library adjust file iteration and access APIs; ensure tests using environment locking are updated to new LazyLock-based patterns.", + "commits": [ + "6b9cb31" + ], + "constructs": [ + "generate_zsh_plugin", + "ZSH_PLUGIN_LIB usage" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/provider/chat.rs": { + "short_description": "Routes chat and model requests to provider-specific repositories and caches models", + "category": "SOURCE_CODE", + "description": "High-level chat repository that selects the correct provider backend (OpenAI, Codex/Responses, Anthropic, Bedrock, Google, OpenCode) based on provider configuration and response type. It also caches model lists on disk (with background refresh tasks) and aborts background refresh tasks on drop.", + "key_constructs": [ + { + "name": "ForgeChatRepository", + "type": "struct", + "purpose": "Top-level chat repository that holds router, model cache and background refresh guard" + }, + { + "name": "ProviderRouter", + "type": "struct", + "purpose": "Contains concrete provider repositories and dispatches chat/models requests" + }, + { + "name": "ForgeChatRepository::models", + "type": "function", + "purpose": "Returns cached model list if available and triggers background refresh otherwise" + }, + { + "name": "ProviderRouter::chat", + "type": "function", + "purpose": "Dispatches chat requests to the correct provider implementation based on ProviderResponse" + }, + { + "name": "BgRefresh", + "type": "struct", + "purpose": "Holds and aborts background refresh tasks (AbortHandle) when dropped" + } + ], + "semantic_tags": [ + "providers", + "routing", + "caching", + "models", + "background-tasks" + ], + "handles_entities": [ + "Model", + "ChatCompletionMessage", + "Provider" + ], + "key_behaviors": [ + "routes chat requests to the appropriate provider implementation", + "fetches and caches provider model lists with background refresh", + "cancels background refreshes when repository is dropped" + ], + "insights": [ + { + "type": "refactoring", + "category": "Coupling", + "title": "Introduce ProviderRouter and model caching background refresh", + "problem": "Chat repository mixed routing logic, model caching, and background refresh duties in a brittle way.", + "root_cause": "Originally multiple repos were fields on the top-level repository with ad-hoc routing; model list caching was simplistic or missing.", + "solution": "Introduce ProviderRouter to centralize routing (chat/models). Add a model_cache (CacacheStorage) used by models() to return cached values fast and trigger a background refresh task. Implement BgRefresh guard to cancel background tasks on drop. models() now returns cached list when available and refreshes in background; otherwise fetches and caches synchronously.", + "lesson_learned": "Separate routing concerns into a small router struct and keep caching isolated. When doing background refreshes, track abort handles and ensure they are cancelled on drop to avoid runaway tasks. Be careful to avoid silent failures: log background refresh errors with context.", + "commits": [ + "0577b3a", + "58827bd" + ], + "constructs": [ + "ForgeChatRepository::models", + "ProviderRouter", + "BgRefresh" + ] + }, + { + "type": "bug_fix", + "category": "Routing", + "title": "Route OpenAIResponses providers to codex_repo for chat and models", + "problem": "OpenAI Responses-compatible providers need to use the codex repo for chat/models; earlier routing did not include the responses variant.", + "root_cause": "ProviderResponse variants added required routing to map OpenAIResponses to existing codex repo implementation.", + "solution": "Add match arms mapping ProviderResponse::OpenAIResponses to codex_repo for chat() and models().", + "lesson_learned": "When adding provider response variants, ensure the central router maps them to the appropriate implementation.", + "commits": [ + "58827bd", + "0577b3a" + ], + "constructs": [ + "chat", + "models" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/agent_executor.rs": { + "short_description": "Executes agents as tools and provides agent tool definitions", + "category": "SOURCE_CODE", + "description": "AgentExecutor provides functions to list agent tool definitions and to execute an agent as a tool call by creating (or reusing) a conversation, invoking the Forge application chat workflow, streaming responses, and converting the final output into a ToolOutput element. It also caches agent definitions and exposes a contains_tool helper.", + "key_constructs": [ + { + "name": "AgentExecutor", + "type": "struct", + "purpose": "Encapsulates services and cached tool agent definitions" + }, + { + "name": "AgentExecutor::agent_definitions", + "type": "function", + "purpose": "Returns cached or freshly loaded ToolDefinition list for available agents" + }, + { + "name": "AgentExecutor::execute", + "type": "function", + "purpose": "Runs an agent as a tool call, streams chat responses, and returns ToolOutput" + }, + { + "name": "AgentExecutor::contains_tool", + "type": "function", + "purpose": "Checks whether a given tool name is provided by any agent" + } + ], + "semantic_tags": [ + "agent", + "tools", + "conversation", + "execution", + "streaming" + ], + "handles_entities": [ + "Agent", + "ToolDefinition", + "Conversation", + "ToolOutput" + ], + "key_behaviors": [ + "exposes agent tool definitions for tool registry", + "executes an agent as an LLM-powered tool and collects output", + "reuses or creates conversations for agent-invoked tasks" + ], + "insights": [ + { + "type": "feature", + "category": "State Management", + "title": "AgentExecutor supports conversation reuse (session_id) for subagents", + "problem": "Subagents needed to either resume an existing agent conversation or start a fresh one; previously agent execution always created a new conversation.", + "root_cause": "AgentExecutor.execute created a new conversation unconditionally.", + "solution": "Add optional ConversationId parameter to execute. If provided, find_conversation and reuse it (error if missing); otherwise create a conversation with context.initiator(\"agent\") and upsert it. This enables resumption of prior agent sessions and proper billing/metadata.", + "lesson_learned": "Allow agent tool calls to optionally reuse conversation context to preserve continuity; always mark agent-initiated conversations appropriately.", + "commits": [ + "9d5094f" + ], + "constructs": [ + "AgentExecutor::execute", + "AgentExecutor::new" + ] + }, + { + "type": "bug_fix", + "category": "Parsing", + "title": "Pattern-match ChatResponse::ToolCallStart variant fields correctly", + "problem": "Code matched deprecated tuple-style enum variant patterns leading to compile-time mismatches after ChatResponse variant changed to struct-like.", + "root_cause": "ChatResponse ToolCallStart changed shape earlier; agent_executor wasn't updated to struct-style match patterns.", + "solution": "Updated matches to use ChatResponse::ToolCallStart { .. } pattern and similar for send/clear behavior.", + "lesson_learned": "When changing enum variant representation (tuple -> struct-like), update all pattern matches. Prefer using { .. } when only presence matters.", + "commits": [ + "c1c0506" + ], + "constructs": [ + "AgentExecutor::handle_message_match" + ] + }, + { + "type": "breaking_change", + "category": "Typing", + "title": "AgentExecutor.execute now takes AgentId instead of String", + "problem": "Multiple callsites expected the old signature; sub-agent calls required strong AgentId typing.", + "root_cause": "Refactor to strengthen types and avoid passing raw strings as agent identifiers.", + "solution": "Change execute signature to accept AgentId; update internal chat call to pass agent id in ChatRequest and ForgeApp.chat.", + "commit": [ + "b22ee2e" + ], + "constructs": [ + "AgentExecutor::execute" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/user_prompt.rs": { + "short_description": "Generates and injects user prompts and attachments into conversations", + "category": "SOURCE_CODE", + "description": "UserPromptGenerator renders the user's prompt (using templates and event context), injects it into the conversation context, optionally adds droppable 'piped' context and todos when resuming, parses attachments found in the prompt and records file-read metrics. It's responsible for preparing the conversation state that will be sent to the chat model.", + "key_constructs": [ + { + "name": "UserPromptGenerator", + "type": "struct", + "purpose": "Service struct that holds services, agent, event and current time for prompt generation" + }, + { + "name": "UserPromptGenerator::add_user_prompt", + "type": "function", + "purpose": "Main entrypoint that adds rendered user prompt, todos, additional context and attachments" + }, + { + "name": "UserPromptGenerator::add_rendered_message", + "type": "function", + "purpose": "Renders the user message using templates and returns any rendered content for attachments" + }, + { + "name": "UserPromptGenerator::add_attachments", + "type": "function", + "purpose": "Parses attachments from rendered content, adds them to context, and records file read metrics" + }, + { + "name": "UserPromptGenerator::add_todos_on_resume", + "type": "function", + "purpose": "Injects existing todos as a droppable user message when resuming a conversation" + } + ], + "semantic_tags": [ + "prompts", + "templating", + "attachments", + "conversation", + "metrics" + ], + "handles_entities": [ + "Conversation", + "ContextMessage", + "Attachment", + "Todo", + "TextMessage" + ], + "key_behaviors": [ + "renders and injects user prompts into conversation context", + "adds piped input and todos as droppable messages when appropriate", + "parses attachments and records file-read operations in metrics" + ], + "insights": [ + { + "type": "refactoring", + "category": "State Management", + "title": "Use FileInfo for attachments and account for cancelled todos", + "problem": "Attachment handling and metrics used per-range fields and content_hash; with new FileInfo shape and content-keyed todos the code needed to adapt.", + "root_cause": "File read output shape changed (now ReadOutput.info: FileInfo). Also todo updates are content-keyed so metrics updates need to refer to FileInfo.content_hash.", + "solution": "Updated code that constructs metrics to read FileInfo from AttachmentContent::FileContent and store FileOperation.content_hash from info.content_hash. Also added display string for TodoStatus::Cancelled in prompt generator.", + "lesson_learned": "When internal representations change (consolidated metadata structs), update all callers. Keep prompt generation consistent with domain enum additions.", + "commits": [ + "e84bc7f", + "29db91a" + ], + "constructs": [ + "UserPromptGenerator::generate_prompt_fragment", + "fixture_todo_write_output_raw (test helper)" + ] + }, + { + "type": "bug_fix", + "category": "State Management", + "title": "Use raw content_hash from attachment rather than recomputing from displayed content", + "problem": "UserPromptGenerator previously recomputed content-based metric hashes from displayed (line-numbered) content, causing mismatch with raw-file hash used by external-change detector.", + "root_cause": "The metrics tracked file content hash using formatted/display content instead of a raw content hash present in attachment metadata.", + "solution": "Use AttachmentContent::FileContent.content_hash (raw hash) when populating conversation metrics; updated tests to include content_hash fields.", + "commits": [ + "70cba43" + ], + "constructs": [ + "UserPromptGenerator::generate_metrics" + ] + }, + { + "type": "feature", + "category": "State Management", + "title": "Inject existing todos into resumed conversations", + "problem": "When resuming a conversation, the assistant lacked immediate visibility of existing todos in the prompt which could cause context loss.", + "root_cause": "Resumption flow added messages but didn't re-inject todos as droppable user messages.", + "solution": "On resume, detect prior user messages and inject a droppable user message containing the current todo list formatted as markdown; added add_todos_on_resume and format_todos_as_markdown helper functions. Added tests verifying injection on resume and no injection on new conversations.", + "lesson_learned": "Long-lived session state (todos) should be surfaced into resumed contexts in a droppable/non-authoritative way to preserve context without polluting canonical history. Tests should cover both resume and new-conversation flows.", + "commits": [ + "4f1ad6b" + ], + "constructs": [ + "add_todos_on_resume", + "format_todos_as_markdown", + "add_user_prompt" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_app/src/user_prompt.rs::tests" + ], + "source_commits": [ + "4f1ad6b" + ] + } + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/lib.rs": { + "short_description": "Top-level forge_main crate module and public exports", + "category": "SOURCE_CODE", + "description": "Declares and imports the submodules used by the forge CLI/TUI crate, re-exports key types (Cli, Sandbox, UI, title display utilities) and initializes a global tracker LazyLock. Acts as the central module aggregator for the forge_main crate.", + "key_constructs": [ + { + "name": "TRACKER", + "type": "constant", + "purpose": "Global LazyLock-wrapped forge_tracker::Tracker used across the CLI", + "callers": [ + { + "file": "crates/forge_main/src/tracker.rs", + "line": 3, + "context": "use crate::TRACKER;" + }, + { + "file": "crates/forge_main/src/tracker.rs", + "line": 8, + "context": "tokio::spawn(TRACKER.dispatch(event));" + }, + { + "file": "crates/forge_main/src/tracker.rs", + "line": 16, + "context": "tokio::runtime::Handle::current().block_on(TRACKER.dispatch(event))" + }, + { + "file": "crates/forge_main/src/tracker.rs", + "line": 47, + "context": "tokio::spawn(TRACKER.set_model(model));" + }, + { + "file": "crates/forge_main/src/tracker.rs", + "line": 51, + "context": "tokio::spawn(TRACKER.login(login));" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 51, + "context": "use crate::{TRACKER, banner, tracker};" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 236, + "context": "_guard: forge_tracker::init_tracing(env.log_path(), TRACKER.clone())?," + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 337, + "context": "TRACKER.set_conversation(conversation).await;" + } + ] + }, + { + "name": "pub use cli::{Cli, TopLevelCommand}", + "type": "reexport", + "purpose": "Re-exports CLI entry types for consumers" + } + ], + "semantic_tags": [ + "module-index", + "reexports", + "initialization", + "tracker" + ], + "handles_entities": [ + "Tracker" + ], + "key_behaviors": [ + "exposes CLI and UI types to consumers", + "initializes and provides a global tracker instance" + ], + "insights": [ + { + "type": "breaking_change", + "category": "API", + "title": "ConfigManager export removed; config module deleted", + "problem": "Previously public ConfigManager was exported from lib.rs and consumed elsewhere", + "root_cause": "Config logic was inlined into UI and config module files were deleted", + "solution": "Remove pub use config::ConfigManager; update callers to use UI::handle_config_command", + "commits": [ + "cbe2825" + ], + "constructs": [ + "lib exports" + ], + "lesson_learned": "Deleting a module and its public types is a breaking change. Ensure dependent crates or integration points are updated simultaneously; consider a deprecation bridge where needed." + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/model.rs": { + "short_description": "Manages slash/forge commands, registers agent/workflow commands, and parses input", + "category": "SOURCE_CODE", + "description": "Provides ForgeCommand and ForgeCommandManager for registering default, workflow, and agent-derived commands, sanitizing agent IDs for command names, detecting conflicts, and parsing user input into a SlashCommand enum. The file also defines the SlashCommand enum which models all supported slash commands and their usage strings used by the interactive CLI.", + "key_constructs": [ + { + "name": "ForgeCommandManager", + "type": "struct", + "purpose": "Manages a thread-safe list of forge commands and handles registration/parsing" + }, + { + "name": "ForgeCommandManager::register_agent_commands", + "type": "function", + "purpose": "Registers agent-derived commands, skipping reserved name conflicts" + }, + { + "name": "ForgeCommandManager::parse", + "type": "function", + "purpose": "Parses raw user input into a SlashCommand (commands, custom, shell, messages)" + }, + { + "name": "SlashCommand", + "type": "enum", + "purpose": "Represents all types of user inputs/commands recognized by the CLI" + }, + { + "name": "ForgeCommand", + "type": "struct", + "purpose": "Represents a registered command with name, description and optional default value" + } + ], + "semantic_tags": [ + "command-parsing", + "completion", + "agent-commands", + "cli", + "templating" + ], + "handles_entities": [ + "ForgeCommand", + "UserCommand", + "Agent" + ], + "key_behaviors": [ + "registers agent and workflow commands for tab-completion", + "parses slash-style commands into structured SlashCommand values", + "sanitizes agent IDs into safe command names and avoids reserved conflicts" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/lib.rs": { + "short_description": "Forge repository crate root exposing repo modules and proto bindings", + "category": "SOURCE_CODE", + "description": "Declares internal modules for repository functionality (agents, conversations, provider adapters, fuzzy search, proto bindings, etc.) and exposes the main forge_repo container publicly. Also includes the compiled tonic protobuf generated module for interop.", + "key_constructs": [ + { + "name": "proto_generated", + "type": "module", + "purpose": "Includes tonic-generated protobuf types for forge.v1" + }, + { + "name": "pub use forge_repo::*", + "type": "reexport", + "purpose": "Re-exports the main forge_repo module to consumers" + } + ], + "semantic_tags": [ + "repository", + "proto", + "persistence", + "module-index" + ], + "handles_entities": [], + "key_behaviors": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/provider/openai.rs": { + "short_description": "OpenAI-compatible provider client handling chat streaming and model listing", + "category": "SOURCE_CODE", + "description": "Implements an HTTP-backed provider client for OpenAI-compatible APIs (OpenAI, Azure, OpenRouter, Vertex, etc.), handling header composition (including provider-specific optimizations like Copilot headers), request pipeline transformation, eventsource streaming of chat responses, and fetching model lists (including static Vertex list). It wraps provider responses with retry/enhanced error context and implements the ChatRepository interface.", + "key_constructs": [ + { + "name": "OpenAIProvider", + "type": "struct", + "purpose": "Low-level client that prepares headers and issues eventsource chat requests to provider URL" + }, + { + "name": "OpenAIProvider::get_headers_with_request", + "type": "function", + "purpose": "Assembles request headers including session id and provider-specific optimization headers" + }, + { + "name": "OpenAIProvider::inner_chat", + "type": "function", + "purpose": "Posts an eventsource chat request and converts responses into ChatCompletionMessage stream" + }, + { + "name": "OpenAIResponseRepository", + "type": "struct", + "purpose": "Repository adapter implementing ChatRepository for OpenAI-style providers with retry handling" + }, + { + "name": "enhance_error", + "type": "function", + "purpose": "Adds provider-specific diagnostic hints to errors (e.g., GitHub Copilot model access)" + } + ], + "semantic_tags": [ + "providers", + "http", + "eventsource", + "models", + "headers" + ], + "handles_entities": [ + "Provider", + "Model", + "ChatCompletionMessage" + ], + "key_behaviors": [ + "sends chat requests to OpenAI-compatible providers and streams responses", + "fetches provider model lists (including special-case Vertex static list)", + "injects provider-specific headers and enriches errors with helpful context" + ], + "insights": [ + { + "type": "feature", + "category": "Configuration", + "title": "Support provider-level custom_headers and include them in requests", + "problem": "Some providers require additional headers (e.g., custom User-Agent) at provider level; current provider template didn't carry custom headers.", + "root_cause": "provider.json and Provider template lacked a custom_headers field.", + "solution": "Added custom_headers Option> to Provider template, propagated through provider repo and services. OpenAI provider get_headers appends provider.custom_headers entries to request headers. Tests added to verify header injection.", + "lesson_learned": "Allow per-provider custom headers to handle proxies or vendor-specific requirements. Ensure default header insertion logic respects provider-provided values (e.g., don't override a provider User-Agent).", + "commits": [ + "fe68905" + ], + "constructs": [ + "Provider.custom_headers", + "OpenAIProvider::get_headers" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_repo/src/provider/openai.rs (unit tests)" + ], + "source_commits": [ + "fe68905" + ] + } + }, + { + "type": "bug_fix", + "category": "Error Handling", + "title": "Enhance GitHub Copilot model-not-supported error message", + "problem": "Errors from GitHub Copilot about disabled models returned cryptic provider errors; users couldn't easily find remediation steps.", + "root_cause": "Generic error messages were propagated without provider-specific context.", + "solution": "Added enhance_error() which inspects error messages for model_not_supported patterns for ProviderId::GITHUB_COPILOT and appends helpful context including a link. Wrapped http_eventsource error mapping with map_err to call enhance_error. Also applied enhance_error when mapping stream items with into_retry.", + "lesson_learned": "Provider-specific errors should be enriched with actionable guidance; wrap map_err at call sites and include provider id context. Add snapshot tests to ensure message formatting.", + "commits": [ + "a7684e8" + ], + "constructs": [ + "enhance_error", + "OpenAIProvider::new", + "OpenAIProvider::http_eventsource", + "ChatRepository::chat_completion_stream" + ] + } + ], + "tests": { + "exercised_by": [ + "inline unit test test_enhance_error_github_copilot_model_not_supported in openai.rs", + "crates/forge_repo/src/provider/snapshots/forge_repo__provider__openai__tests__enhance_error_github_copilot_model_not_supported.snap" + ], + "test_functions": [ + "test_enhance_error_github_copilot_model_not_supported" + ], + "source_commits": [ + "a7684e8" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/agent.rs": { + "short_description": "Domain model definitions for Agent and related reasoning configuration", + "category": "SOURCE_CODE", + "description": "Defines the AgentId wrapper, the Agent struct representing runtime agent configuration (provider, model, prompts, tools, compaction and reasoning options), ReasoningConfig and Effort enum, and utility functions such as estimate_token_count. It also provides conversion to ToolDefinition and helper methods for compaction and tool ordering.", + "key_constructs": [ + { + "name": "AgentId", + "type": "struct", + "purpose": "Typed wrapper for agent identifiers with convenience constructors" + }, + { + "name": "Agent", + "type": "struct", + "purpose": "Represents an agent's runtime configuration including model, provider, prompts and tools" + }, + { + "name": "ReasoningConfig", + "type": "struct", + "purpose": "Configures reasoning effort, max_tokens and exclusion flags for agent reasoning" + }, + { + "name": "Effort", + "type": "enum", + "purpose": "Enumerates reasoning effort levels (None..Max) supported by providers" + }, + { + "name": "estimate_token_count", + "type": "function", + "purpose": "Provides a rough token estimate for reporting/compaction", + "callers": [ + { + "file": "crates/forge_domain/src/context.rs", + "line": 810, + "context": "use crate::{DirectoryEntry, FileInfo, estimate_token_count};" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 862, + "context": "let token_count = estimate_token_count(context.to_text().len());" + } + ] + } + ], + "semantic_tags": [ + "domain-model", + "agent", + "configuration", + "reasoning", + "templating" + ], + "handles_entities": [ + "Agent", + "AgentId", + "ToolDefinition" + ], + "key_behaviors": [ + "models agent configuration and reasoning preferences", + "creates ToolDefinition from agent metadata", + "estimates token counts for compaction/reporting" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/conversation.rs": { + "short_description": "Domain Conversation model, IDs and helpers", + "category": "SOURCE_CODE", + "description": "Defines the Conversation domain model, a strongly-typed ConversationId wrapper, metadata, and helper methods for usage/cost aggregation and HTML rendering. It exposes utilities to generate IDs, compute costs/token counts, and extract related agent conversation IDs from tool results.", + "key_constructs": [ + { + "name": "ConversationId", + "type": "struct", + "purpose": "UUID wrapper for conversation identities with parsing/generation helpers" + }, + { + "name": "Conversation", + "type": "struct", + "purpose": "Main conversation aggregate storing id, title, context, metrics, and metadata with helper methods" + }, + { + "name": "MetaData", + "type": "struct", + "purpose": "Stores creation and optional update timestamps for a conversation" + }, + { + "name": "Conversation::to_html", + "type": "function", + "purpose": "Renders the conversation to HTML using the conversation_html renderer" + }, + { + "name": "Conversation::related_conversation_ids", + "type": "function", + "purpose": "Extracts conversation IDs referenced by AI tool values in tool results" + }, + { + "name": "Conversation::total_cost", + "type": "function", + "purpose": "Sums accumulated costs across a slice of conversations" + } + ], + "semantic_tags": [ + "conversation", + "domain-model", + "usage", + "html-rendering", + "ids" + ], + "handles_entities": [ + "Conversation", + "ConversationId", + "Context", + "ToolValue", + "Usage", + "Metrics" + ], + "key_behaviors": [ + "generates new conversation IDs and conversations", + "renders conversations to HTML", + "computes accumulated tokens and cost summaries", + "extracts related agent conversation IDs from tool results" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/http.rs": { + "short_description": "HTTP client infra with debug request dumping and TLS config", + "category": "SOURCE_CODE", + "description": "Provides ForgeHttpInfra, a configured reqwest client wrapper implementing HttpInfra with centralized header sanitation, TLS configuration, eventsource support and optional debug request body persistence. It standardizes GET/POST/DELETE/eventsource flows and writes request payloads to disk when debug_paths are configured.", + "key_constructs": [ + { + "name": "ForgeHttpInfra", + "type": "struct", + "purpose": "Configurable HTTP infrastructure wrapper around reqwest that implements HttpInfra" + }, + { + "name": "to_reqwest_tls", + "type": "function", + "purpose": "Maps Forge TlsVersion enum to reqwest TLS version" + }, + { + "name": "sanitize_headers", + "type": "function", + "purpose": "Redacts sensitive header values for logging", + "callers": [ + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 11, + "context": "use forge_infra::sanitize_headers;" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 202, + "context": "headers = ?sanitize_headers(&headers)," + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 261, + "context": "info!(method = \"GET\", url = %url, headers = ?sanitize_headers(&headers), \"Fetching Models\");" + }, + { + "file": "crates/forge_infra/src/lib.rs", + "line": 26, + "context": "pub use http::sanitize_headers;" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 11, + "context": "use forge_infra::sanitize_headers;" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 166, + "context": "headers = ?sanitize_headers(&headers)," + } + ] + }, + { + "name": "ForgeHttpInfra::execute_request", + "type": "function", + "purpose": "Generic request executor with consistent error handling and context formatting" + }, + { + "name": "ForgeHttpInfra::write_debug_request", + "type": "function", + "purpose": "Asynchronously writes request bodies to configured debug path for offline inspection" + }, + { + "name": "VERSION", + "type": "constant", + "purpose": "Application version used in headers" + } + ], + "semantic_tags": [ + "http-client", + "tls", + "eventsource", + "debugging", + "headers" + ], + "handles_entities": [ + "HTTP requests", + "HTTP responses", + "debug request files" + ], + "key_behaviors": [ + "performs GET/POST/DELETE and eventsource HTTP requests", + "configures TLS and root certificates from config", + "optionally writes request payloads to a debug file", + "sanitizes headers for logging" + ], + "insights": [ + { + "type": "security", + "category": "Security", + "title": "Consolidate header sanitization and expand sensitive list", + "problem": "Multiple providers had ad-hoc sanitize_headers implementations and the set of sensitive headers was incomplete.", + "root_cause": "Utility duplication across crates and incomplete list (only Authorization previously).", + "solution": "Moved sanitize_headers into forge_infra::http module (public re-export) and expanded sensitive headers to include x-api-key, x-goog-api-key, api-key, etc. Added unit test test_sanitize_headers_redacts_sensitive_values.", + "lesson_learned": "Centralize logging/PII redaction utilities in infra layer, keep the sensitive header list broad (authorization, API keys), and add tests to prevent regressions.", + "commits": [ + "54b2ccc" + ], + "constructs": [ + "sanitize_headers" + ] + }, + { + "type": "bug_fix", + "category": "IO", + "title": "Append debug request body chunks instead of overwriting", + "problem": "When streaming request bodies were logged to debug files, later chunks overwrote earlier ones causing truncated logs.", + "root_cause": "The debug path writer used write (overwrite) instead of append.", + "solution": "Switch to file_writer.append(...) when writing debug request body chunks; implement FileWriterInfra::append and underlying impls in ForgeFileWriteService and ForgeFS.", + "lesson_learned": "For streaming debug captures, append semantics are required; ensure underlying FS service exposes append and tests simulate append behavior.", + "commits": [ + "27124d7" + ], + "constructs": [ + "record_debug_request_body (internal)", + "sanitize_headers" + ] + }, + { + "type": "feature", + "category": "Observability", + "title": "Write debug POST/requests bodies to file when debug_requests configured", + "problem": "Debugging remote provider interactions required capturing raw request bodies. Existing code duplicated logic for POST/eventsource handling.", + "root_cause": "No single helper to conditionally write debug request bodies and repeated code paths.", + "solution": "Introduced write_debug_request helper and invoked it in post() and eventsource() paths. Spawn write task to file writer. Added tests to assert file writes occur when debug_requests path present.", + "lesson_learned": "Centralize debug/log writing logic to avoid duplication and ensure uniform behavior across HTTP modes.", + "commits": [ + "d0dd26e", + "50ab299" + ], + "constructs": [ + "write_debug_request", + "eventsource", + "post" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_infra/src/http.rs (unit tests)" + ], + "source_commits": [ + "d0dd26e" + ] + } + } + ], + "tests": { + "exercised_by": [ + "crates/forge_infra/src/http.rs::test_sanitize_headers_redacts_sensitive_values" + ], + "test_functions": [], + "source_commits": [ + "54b2ccc", + "27124d7" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_select/src/select.rs": { + "short_description": "Interactive fuzzy select builder using fzf", + "category": "SOURCE_CODE", + "description": "Implements a SelectBuilder that wraps fzf to present interactive fuzzy-search selection and confirm prompts on the terminal. It formats items with an index prefix to avoid ambiguities, strips ANSI sequences for matching, and supports confirm prompts mapped to generic types.", + "key_constructs": [ + { + "name": "SelectBuilder", + "type": "struct", + "purpose": "Builder for constructing interactive fuzzy selection prompts with options and UI hints" + }, + { + "name": "build_fzf", + "type": "function", + "purpose": "Configures an Fzf instance with flags and optional header/query/cursor" + }, + { + "name": "indexed_items", + "type": "function", + "purpose": "Formats display items as \"{idx}\\t{display}\" so selection can map back to original index", + "callers": [ + { + "file": "crates/forge_select/src/multi.rs", + "line": 7, + "context": "use crate::select::{indexed_items, parse_fzf_index};" + }, + { + "file": "crates/forge_select/src/multi.rs", + "line": 51, + "context": "fzf.add_items(indexed_items(&display_options))" + } + ] + }, + { + "name": "parse_fzf_index", + "type": "function", + "purpose": "Parses the index prefix from fzf output to recover selected option", + "callers": [ + { + "file": "crates/forge_select/src/multi.rs", + "line": 7, + "context": "use crate::select::{indexed_items, parse_fzf_index};" + }, + { + "file": "crates/forge_select/src/multi.rs", + "line": 63, + "context": "parse_fzf_index(line).and_then(|index| self.options.get(index).cloned())" + } + ] + }, + { + "name": "SelectBuilder::prompt", + "type": "function", + "purpose": "Runs the fzf prompt and returns the selected option or None if cancelled" + } + ], + "semantic_tags": [ + "fzf", + "interactive", + "cli-ui", + "fuzzy-search", + "ansi-stripping" + ], + "handles_entities": [ + "Selection Options" + ], + "key_behaviors": [ + "presents fuzzy-search selection lists inline using fzf", + "provides yes/no confirmation prompts via fzf", + "maps fzf output back to original option indices" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Security", + "title": "Replace unsafe transmute_copy with safe Any downcast for confirm prompt", + "problem": "Code used unsafe std::mem::transmute_copy to convert a bool result to generic type T, which is undefined behavior / memory-unsafe.", + "root_cause": "Unsafe mem transmute used to coerce types instead of safe runtime type check and downcasting.", + "solution": "Introduce prompt_confirm returning Option and prompt_confirm_as wrapper which verifies TypeId and safely downcasts via Box. Eliminates transmute_copy usage.", + "commits": [ + "c4543aa" + ], + "constructs": [ + "prompt_confirm", + "prompt_confirm_as", + "SelectBuilder::build (TypeId check)" + ] + }, + { + "type": "refactoring", + "category": "Other", + "title": "Replace dialoguer-based selection UI with fzf-wrapped integration", + "problem": "dialoguer's fuzzy select had limitations/maintenance issues; desire to match shell plugin/fzf behavior and support header-lines/starting cursor.", + "root_cause": "UI consistency across CLI and shell plugin required fzf-like behavior (header rows, starting cursor, column display).", + "solution": "Introduce fzf-wrapped selection builder API (via fzf-wrapped crate), remove terminal.rs and crossterm dependencies, adapt select internals to support starting_cursor, header_lines and more porcelain-friendly display. Many callers updated to provide header and row semantics.", + "commits": [ + "7fc0c5e" + ], + "constructs": [ + "ForgeSelect", + "SelectBuilder", + "SelectBuilderOwned", + "InputBuilder", + "MultiSelectBuilder" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/lib.rs": { + "short_description": "Service crate exports and conversion traits", + "category": "SOURCE_CODE", + "description": "Top-level module for forge_services that declares submodules and re-exports common service types. It also defines generic traits IntoDomain and FromDomain used across services to convert between external representations and domain models.", + "key_constructs": [ + { + "name": "IntoDomain", + "type": "trait", + "purpose": "Trait to convert a type into its domain model representation" + }, + { + "name": "FromDomain", + "type": "trait", + "purpose": "Trait to construct an external type from a domain type with error handling" + }, + { + "name": "mod provider_service", + "type": "module", + "purpose": "Module placeholder exported via pub use (actual provider integrations)" + } + ], + "semantic_tags": [ + "service-layer", + "type-conversion", + "exports", + "abstraction" + ], + "handles_entities": [], + "key_behaviors": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/openai/request.rs": { + "short_description": "DTOs and conversions for OpenAI-style request payloads", + "category": "SOURCE_CODE", + "description": "Defines the request-side DTOs (Message, Request, ContentPart, Tool, etc.) used to communicate with OpenAI/compatible providers and implements conversions from domain types (Context, ContextMessage, ToolCallFull). It also handles caching flags, response format mapping, and safe serialization/normalization of tool arguments.", + "key_constructs": [ + { + "name": "Request", + "type": "struct", + "purpose": "Represents the top-level request payload to OpenAI-like providers (messages, model, tools, streaming options, reasoning, etc.)" + }, + { + "name": "Message", + "type": "struct", + "purpose": "Represents a single chat message with role, content, tool metadata and reasoning details" + }, + { + "name": "ContentPart", + "type": "enum", + "purpose": "Represents parts of a message (text or image) with optional cache control" + }, + { + "name": "impl From for Request", + "type": "function", + "purpose": "Converts domain Context into a provider Request, mapping tools, streaming and reasoning options" + }, + { + "name": "serialize_tool_call_arguments", + "type": "function", + "purpose": "Serializes and normalizes tool call arguments against the tool catalog schema for provider compatibility" + }, + { + "name": "impl From for ToolCall", + "type": "function", + "purpose": "Converts internal ToolCallFull into the DTO format expected by providers" + } + ], + "semantic_tags": [ + "openai", + "dto", + "serialization", + "tool-calls", + "reasoning" + ], + "handles_entities": [ + "Context", + "ContextMessage", + "ToolCallFull", + "Tool", + "Message" + ], + "key_behaviors": [ + "converts domain chat contexts into provider-compatible request payloads", + "normalizes and serializes tool call arguments to JSON schema", + "supports message caching annotations and streaming options" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Parsing", + "title": "Handle double-encoded tool call arguments when serializing OpenAI-style requests", + "problem": "Tool call arguments could be double-encoded (string containing JSON) or not strictly conforming to a tool's input schema; naive serde_json::to_string(&value.arguments) would re-serialize the original string producing a JSON string payload for the API.", + "root_cause": "Serialization didn't attempt to parse/normalize argument strings or coerce them to the tool's declared schema.", + "solution": "Add serialize_tool_call_arguments(tool_call) which parses the arguments, attempts to coerce to the tool's input_schema via forge_json_repair::coerce_to_schema (ToolCatalog lookup), and serializes the normalized object. Fall back to original serialization when parsing/coercion fails.", + "commits": [ + "5a6f3c8" + ], + "constructs": [ + "serialize_tool_call_arguments", + "impl From for ToolCall" + ] + }, + { + "type": "feature", + "category": "Compatibility", + "title": "Add reasoning_content flat alias for Kimi K2 compatibility", + "problem": "Some backends like kimi_k2 expect a flat reasoning_content field rather than reasoning_details arrays.", + "root_cause": "Different model providers have distinct schema expectations.", + "solution": "Added reasoning_content Option alias to Message struct and ensured transformers clear/populate it as needed.", + "lesson_learned": "When supporting multiple provider backends, make DTOs flexible and add explicit fields or aliases used by transformers for interop.", + "commits": [ + "40cfcc8" + ], + "constructs": [ + "Message.reasoning_content" + ] + } + ], + "tests": { + "exercised_by": [ + "crates/forge_app/src/dto/openai/request.rs (inline tests)" + ], + "test_functions": [ + "test_assistant_message_with_dump_style_tool_call_arguments_conversion" + ], + "source_commits": [ + "5a6f3c8" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/conversation/conversation_record.rs": { + "short_description": "Repository DTOs mapping conversation domain types to storage records", + "category": "SOURCE_CODE", + "description": "Defines storage-focused record types for conversation persistence and conversions to/from forge_domain types, including legacy compatibility migrations for older record variants. This keeps the repository layer decoupled from domain changes while providing TryFrom/From implementations for safe serialization/deserialization.", + "key_constructs": [ + { + "name": "ModelIdRecord / ImageRecord / ToolCallIdRecord", + "type": "struct", + "purpose": "Primitive repository record wrappers mapping domain identifiers and images to storage-safe representations" + }, + { + "name": "ToolCallArgumentsRecord", + "type": "struct", + "purpose": "Stores tool call arguments as raw JSON to accommodate parsed and unparsed variants" + }, + { + "name": "TokenCountRecord / UsageRecord", + "type": "enum|struct", + "purpose": "Repository representations for token counts and usage that convert to domain Usage" + }, + { + "name": "ToolValueRecord", + "type": "enum", + "purpose": "Serialized tool value including legacy variants and migration logic back to domain ToolValue" + }, + { + "name": "TextMessageRecord / ToolResultRecord / ToolOutputRecord", + "type": "struct", + "purpose": "Record representations of text messages and tool results with conversions to domain types" + } + ], + "semantic_tags": [ + "persistence", + "dto", + "serialization", + "migration", + "conversation" + ], + "handles_entities": [ + "Conversation", + "TextMessage", + "ToolResult", + "ToolValue", + "Usage", + "TokenCount" + ], + "key_behaviors": [ + "serializes conversation components for storage", + "deserializes stored records into domain types with legacy migrations", + "encapsulates storage schema independent of domain model" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Parsing", + "title": "Add legacy ToolValueRecord variants and migration paths", + "problem": "Old persisted conversations used variants (Markdown, FileDiff, Pair) removed from current domain model; deserializing these caused failures.", + "root_cause": "Domain model evolved but stored data still contains older variants; no migration code existed in repository layer.", + "solution": "Added legacy variants Markdown, FileDiff, Pair and FileDiffRecord struct, and TryFrom impls to convert legacy variants into current forge_domain::ToolValue equivalents (e.g., Markdown -> Text, FileDiff -> Text summary, Pair -> use first element).", + "lesson_learned": "When evolving serialized domain enums keep repository-level compatibility code that recognizes legacy variants and converts them to current domain types; add unit tests exercising legacy JSON blobs.", + "commits": [ + "39977d3" + ], + "constructs": [ + "ToolValueRecord (enum)", + "FileDiffRecord", + "TryFrom for forge_domain::ToolValue" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/changed_files.rs": { + "short_description": "Detects externally modified files and injects notifications", + "category": "SOURCE_CODE", + "description": "Service that detects file changes external to the agent using a FileChangeDetector, updates conversation metrics to avoid duplicate notices, and inserts a droppable user message summarizing changed files. It formats display paths relative to cwd and updates file hashes in the conversation metrics.", + "key_constructs": [ + { + "name": "ChangedFiles", + "type": "struct", + "purpose": "Service struct holding infra and agent info used to detect and notify about changed files" + }, + { + "name": "ChangedFiles::update_file_stats", + "type": "function", + "purpose": "Detects changed files, updates metrics hashes, and appends a droppable notification message to the conversation" + }, + { + "name": "FileChangeDetector", + "type": "module", + "purpose": "Used to detect file modifications by comparing recorded hashes to current file states (used via crate::file_tracking)" + } + ], + "semantic_tags": [ + "file-tracking", + "notifications", + "filesystem", + "conversation", + "metrics" + ], + "handles_entities": [ + "Conversation", + "Metrics", + "FileOperation", + "FileInfo" + ], + "key_behaviors": [ + "detects externally modified files against tracked hashes", + "updates conversation metrics with new file hashes", + "inserts a user-facing, droppable notification listing modified files" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Configuration", + "title": "Use dynamic config for parallel file reads", + "problem": "The number of parallel file reads was previously passed in from callers or read from a cached config; when config changed the detectors might still use the old value.", + "root_cause": "Parallel read config was threaded instead of being fetched where needed.", + "solution": "Changed update_file_stats to call services.get_config().map(|c| c.max_parallel_file_reads).unwrap_or(4) and FileChangeDetector.detect now accepts parallel_file_reads parameter. Tests updated to call new signature.", + "lesson_learned": "Small concurrency/throughput knobs should be fetched dynamically; update tests when signatures change.", + "commits": [ + "5bd0b94", + "7e8a51d" + ], + "constructs": [ + "ChangedFiles::update_file_stats" + ] + }, + { + "type": "refactoring", + "category": "API", + "title": "Use FileInfo for ReadOutput in changed files tests", + "problem": "Tests constructing ReadOutput used separate fields; code now expects info: FileInfo.", + "root_cause": "Migration to FileInfo required test updates.", + "solution": "Updated tests to populate ReadOutput.info with FileInfo::new(..., content_hash).", + "lesson_learned": "When changing public DTO shapes, update unit tests to validate the new shape; snapshots must be consistent with new fields.", + "commits": [ + "29db91a" + ], + "constructs": [ + "ReadOutput test fixtures" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_app/src/fmt/snapshots/*" + ], + "source_commits": [ + "29db91a" + ] + } + }, + { + "type": "performance", + "category": "Concurrency", + "title": "Pass parallel_file_reads configuration into FileChangeDetector", + "problem": "ChangedFiles triggered file detection without controlling concurrency.", + "root_cause": "FileChangeDetector gained a new constructor but callers were not updated to pass concurrency.", + "solution": "Changed update_file_stats to pass self.services.get_environment().parallel_file_reads into FileChangeDetector::new.", + "lesson_learned": "When introducing a concurrency tuning parameter, update all construction sites and surface it through environment/service layers.", + "commits": [ + "e25c1c0" + ], + "constructs": [ + "update_file_stats" + ] + } + ], + "tests": { + "exercised_by": [ + "crates/forge_app/src/changed_files.rs (mod tests adjusted to new signature)" + ], + "test_functions": [], + "source_commits": [ + "5bd0b94", + "7e8a51d" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/system_prompt.rs": { + "short_description": "Builds system prompt blocks using templates, files, tools and git stats", + "category": "SOURCE_CODE", + "description": "Constructs the system-level prompt content for an Agent by rendering templates with environment, tool and file context, and optional custom rules/skills. It can fetch git-based extension statistics, prepare tool information dependent on model capabilities, and append system messages to conversations.", + "key_constructs": [ + { + "name": "SystemPrompt", + "type": "struct", + "purpose": "Holds services, environment, agent, tool and template configuration used to generate the system message" + }, + { + "name": "SystemPrompt::add_system_message", + "type": "function", + "purpose": "Renders static and non-static template blocks to populate system messages for a conversation" + }, + { + "name": "SystemPrompt::fetch_extensions", + "type": "function", + "purpose": "Runs git ls-files via shell service to compute extension statistics for workspace summary" + }, + { + "name": "parse_extensions", + "type": "function", + "purpose": "Parses git ls-files output into Extension/ExtensionStat summaries" + }, + { + "name": "SystemPrompt::is_tool_supported", + "type": "function", + "purpose": "Determines whether tools are supported by checking agent and model capabilities" + } + ], + "semantic_tags": [ + "system-prompt", + "templates", + "git", + "tools", + "workspace-summary" + ], + "handles_entities": [ + "SystemContext", + "Extension", + "ToolDefinition", + "Skill", + "Agent" + ], + "key_behaviors": [ + "renders the system prompt template with environment and tool info", + "fetches git extension statistics for workspace summaries", + "decides and injects tool information based on model/agent capabilities" + ], + "insights": [ + { + "type": "feature", + "category": "Other", + "title": "Add file extension statistics to system prompt using git ls-files", + "problem": "System prompt lacked quick summary of repository file-type composition which is useful for system-level context.", + "root_cause": "SystemContext didn't include extension statistics and there was no service hook to run git to gather stats.", + "solution": "Added ShellService dependency to SystemPrompt, implemented fetch_extensions that runs git ls-files via ShellService, added parse_extensions to summarize extension counts/percentages, extended SystemContext to include Extension data, updated templates to render workspace_extensions partial, and added tests plus fixtures to validate behavior and truncation to top-N extensions.", + "lesson_learned": "Adding environment context via shell commands is useful but must handle failures (non-git repos) gracefully; keep summary sizes bounded (max_extensions) and ensure templates and tests reflect the new context.", + "commits": [ + "e587cb5" + ], + "constructs": [ + "SystemPrompt::fetch_extensions", + "SystemPrompt::add_system_message", + "parse_extensions", + "Extension/ExtensionStat structs" + ] + } + ], + "tests": { + "exercised_by": [ + "crates/forge_app/src/orch_spec/orch_system_spec.rs (new tests)", + "fixtures: git_ls_files_mixed.txt and git_ls_files_many_extensions.txt" + ], + "test_functions": [], + "source_commits": [ + "e587cb5" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/conversation_html.rs": { + "short_description": "HTML renderer for Conversation objects", + "category": "SOURCE_CODE", + "description": "Provides functions to render a Conversation (and related conversations) into a complete HTML document using an Element DSL, including info tables, message context, tools, usage and reasoning sections. It formats tool outputs, agent conversation anchors, and includes CSS for styling.", + "key_constructs": [ + { + "name": "render_conversation_html", + "type": "function", + "purpose": "Renders a single Conversation into a full HTML document string", + "callers": [ + { + "file": "crates/forge_domain/src/conversation.rs", + "line": 96, + "context": "crate::conversation_html::render_conversation_html(self)" + } + ] + }, + { + "name": "render_conversation_html_with_related", + "type": "function", + "purpose": "Renders a main conversation plus related agent conversations with anchor navigation", + "callers": [ + { + "file": "crates/forge_domain/src/conversation.rs", + "line": 104, + "context": "crate::conversation_html::render_conversation_html_with_related(self, related)" + } + ] + }, + { + "name": "create_info_table", + "type": "function", + "purpose": "Builds an information table with metadata, reasoning and usage stats" + }, + { + "name": "create_conversation_context_section", + "type": "function", + "purpose": "Renders messages, tool calls, reasoning details and attachments into HTML" + }, + { + "name": "create_tools_section", + "type": "function", + "purpose": "Displays available tools and their schemas" + } + ], + "semantic_tags": [ + "html-rendering", + "conversation", + "tools", + "usage", + "reasoning" + ], + "handles_entities": [ + "Conversation", + "ContextMessage", + "ToolValue", + "ToolDefinition" + ], + "key_behaviors": [ + "renders conversations and related agent conversations as navigable HTML", + "displays tool definitions, usage stats and reasoning details in HTML", + "applies CSS and structured elements to present messages and errors" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/system_context.rs": { + "short_description": "Structures for system prompt context and extension stats", + "category": "SOURCE_CODE", + "description": "Defines SystemContext and supporting structs (Extension, ExtensionStat, TemplateConfig) used by templates to render system prompts. It encapsulates environment, files, tool information, skills, and configuration values passed into templates.", + "key_constructs": [ + { + "name": "ExtensionStat", + "type": "struct", + "purpose": "Holds count and percentage stats for a single file extension" + }, + { + "name": "Extension", + "type": "struct", + "purpose": "Aggregated extension statistics for the repository (top N plus remainder)" + }, + { + "name": "TemplateConfig", + "type": "struct", + "purpose": "Holds configuration values exposed to tool description templates" + }, + { + "name": "SystemContext", + "type": "struct", + "purpose": "Primary structure passed to templates containing env, files, tools, skills and config" + } + ], + "semantic_tags": [ + "system-context", + "templates", + "extension-stats", + "environment", + "tool-info" + ], + "handles_entities": [ + "Extension", + "ExtensionStat", + "SystemContext", + "TemplateConfig" + ], + "key_behaviors": [ + "provides structured context used by system prompt templates", + "represents file extension and template-related configuration" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/main.rs": { + "short_description": "CLI/TUI entrypoint that initializes UI and runtime", + "category": "CLI", + "description": "Application entrypoint that parses CLI args, detects piped stdin, reads configuration, sets up platform-specific ANSI support and panic hooks, and initializes the UI with ForgeAPI. It centralizes startup validation (config and services URL) and sandbox/worktree selection before handing control to the interactive or one-shot UI.", + "key_constructs": [ + { + "name": "main", + "type": "function", + "purpose": "Tokio async main that calls run and prints errors on failure" + }, + { + "name": "run", + "type": "function", + "purpose": "Performs initialization: ANSI setup, panic hook, CLI parse, config read, services URL validation and UI bootstrap" + }, + { + "name": "enable_stdout_vt_processing", + "type": "function", + "purpose": "Windows-specific helper to ensure VT processing on STDOUT for ANSI escapes" + }, + { + "name": "Cli parsing and piped input detection", + "type": "logic", + "purpose": "Detects piped stdin and merges into Cli.piped_input; handles sandbox/directory selection" + } + ], + "semantic_tags": [ + "cli", + "startup", + "ui-init", + "configuration", + "panic-handling" + ], + "handles_entities": [ + "Cli", + "ForgeConfig", + "UI", + "ForgeAPI", + "Sandbox" + ], + "key_behaviors": [ + "parses CLI args and detects piped input", + "validates and loads Forge configuration", + "initializes application UI and ForgeAPI", + "sets up platform-specific terminal ANSI behavior and panic reporting" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Platform/Other", + "title": "Enable VT processing on STD_OUTPUT_HANDLE on Windows for ANSI rendering", + "problem": "ANSI escape sequences were shown as raw bytes on Windows in some console configurations because VT processing was not enabled on the STD_OUTPUT_HANDLE; the enable_ansi_support crate sets CONOUT$ but flags are per-handle and may not propagate.", + "root_cause": "Console mode flags are per-handle on Windows; enabling VT on the CONOUT$ screen buffer isn't always sufficient for the standard stdout handle.", + "solution": "Add enable_stdout_vt_processing() that calls GetStdHandle(STD_OUTPUT_HANDLE), GetConsoleMode, and SetConsoleMode to OR in ENABLE_VIRTUAL_TERMINAL_PROCESSING (only on Windows). Keep stderr unchanged to avoid breaking console crate spinner behavior.", + "commits": [ + "d9c5f24" + ], + "constructs": [ + "enable_stdout_vt_processing", + "main (Windows VT initialization block)" + ] + }, + { + "type": "refactoring", + "category": "State Management", + "title": "Thread CLI model/provider flags into ForgeAPI init", + "problem": "New CLI flags needed to be passed into API construction.", + "root_cause": "ForgeAPI::init signature didn't accept runtime overrides, so flags couldn't be used at session start.", + "solution": "Modified ForgeAPI::init to accept override_model and override_provider and capture cli.model/cli.provider into the closure passed to UI::init.", + "lesson_learned": "When adding global CLI-level overrides, ensure initialization closures capture and forward values to downstream dependency constructors.", + "commits": [ + "0328695" + ], + "constructs": [ + "main", + "ForgeAPI::init" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/skill.rs": { + "short_description": "Skill repository loading built-in, global and project skills", + "category": "SOURCE_CODE", + "description": "Implements ForgeSkillRepository which loads builtin embedded skills, then global and project-local skills from disk, resolves conflicts by precedence (CWD > Global > Built-in), and renders skill commands with environment paths. It reads SKILL.md front matter (YAML) to extract metadata and gathers resource files in each skill directory.", + "key_constructs": [ + { + "name": "ForgeSkillRepository", + "type": "struct", + "purpose": "Repository implementation that loads skills from builtin, global and local directories via infra" + }, + { + "name": "ForgeSkillRepository::load_builtin_skills", + "type": "function", + "purpose": "Returns a list of embedded built-in skill definitions" + }, + { + "name": "ForgeSkillRepository::load_skills_from_dir", + "type": "function", + "purpose": "Asynchronously enumerates subdirectories, reads SKILL.md and collects resources for each skill" + }, + { + "name": "extract_skill", + "type": "function", + "purpose": "Parses YAML front matter from SKILL.md to extract skill name and description" + }, + { + "name": "resolve_skill_conflicts", + "type": "function", + "purpose": "Resolves duplicate skill names keeping the last occurrence (preference ordering)" + } + ], + "semantic_tags": [ + "skills", + "repository", + "front-matter", + "walk", + "conflict-resolution" + ], + "handles_entities": [ + "Skill", + "Skill resources", + "Environment" + ], + "key_behaviors": [ + "loads skills from built-in, global and project-local locations", + "parses SKILL.md front matter to extract metadata", + "resolves duplicate skills giving CWD precedence", + "renders skill commands with environment paths" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Parsing", + "title": "Avoid treating skill content as full template", + "problem": "Skill command content was being rendered through a full template engine causing unintended template evaluation and errors when the skill text contained template-like sequences.", + "root_cause": "TemplateEngine::render_template was used on arbitrary skill.command content, but skills are expected to support only a couple of simple substitutions.", + "solution": "Replace template engine rendering with simple .replace of known placeholders ({{global_skills_path}} and {{local_skills_path}}) preserving literal skill content elsewhere.", + "lesson_learned": "Avoid over-eager templating on free-form user content \u2014 prefer explicit, minimal substitutions to avoid accidental interpretation of template syntax.", + "commits": [ + "394f7bd" + ], + "constructs": [ + "render_skill" + ] + }, + { + "type": "refactoring", + "category": "Other", + "title": "Rename skill path and name from pr-description to github-pr-description", + "problem": "Skill naming not explicit about GitHub context.", + "root_cause": "Skill name 'pr-description' ambiguous; renaming to github-pr-description clarifies provider target.", + "solution": "Rename skill file path and name constant; update references and tests.", + "lesson_learned": "Rename resources to be explicit about ecosystem (GitHub) to reduce ambiguity. Update fixtures and tests referencing the path.", + "commits": [ + "9704bba" + ], + "constructs": [ + "skill registry include entries" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_app/src/fixtures/git_ls_files_many_extensions.txt", + "crates/forge_repo/src/skill.rs::tests" + ], + "source_commits": [ + "9704bba" + ] + } + }, + { + "type": "refactoring", + "category": "State Management", + "title": "Embed new built-in PR description skill", + "problem": "Built-in skills list didn't include the new pr-description skill and tests expected the list size to increase.", + "root_cause": "New skill files were added/renamed (.forge -> commands and skills embedded in repo) but the skill registry wasn't updated.", + "solution": "Added an entry for forge://skills/pr-description/SKILL.md into the embedded skills array and extended tests to assert presence.", + "lesson_learned": "When adding/renaming embedded skill files update the skill registry and corresponding tests; keep skill path constants consistent (forge:// prefix).", + "commits": [ + "4d11b0c" + ], + "constructs": [ + "ForgeSkillRepository::new", + "ForgeSkillRepository::list_skills" + ] + } + ], + "tests": { + "exercised_by": [ + "crates/forge_repo/src/skill.rs (inline tests updated in same file)" + ], + "test_functions": [], + "source_commits": [ + "4d11b0c" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/auth.rs": { + "short_description": "Auth service that fetches user info and usage from services API", + "category": "SOURCE_CODE", + "description": "Provides ForgeAuthService, which queries the configured services_url endpoints to fetch authenticated user information and usage statistics using an API key. It builds authorization headers and deserializes JSON responses into User and UserUsage domain DTOs.", + "key_constructs": [ + { + "name": "ForgeAuthService", + "type": "struct", + "purpose": "Service wrapper that uses HttpInfra and EnvironmentInfra to call auth endpoints" + }, + { + "name": "ForgeAuthService::user_info", + "type": "function", + "purpose": "Calls the auth/user endpoint with bearer token and parses User" + }, + { + "name": "ForgeAuthService::user_usage", + "type": "function", + "purpose": "Calls the auth/usage endpoint with bearer token and parses UserUsage" + } + ], + "semantic_tags": [ + "authentication", + "http", + "user-info", + "usage", + "services-api" + ], + "handles_entities": [ + "User", + "UserUsage", + "API key" + ], + "key_behaviors": [ + "fetches user profile from services API using bearer token", + "fetches user usage metrics from services API" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/provider_auth.rs": { + "short_description": "Provider authentication flows and credential refresh logic", + "category": "SOURCE_CODE", + "description": "Implements ForgeProviderAuthService which orchestrates provider-specific authentication flows using StrategyFactory and ProviderRepository: initializing auth contexts, completing auth flows to persist credentials, and refreshing provider credentials when near expiry. It handles API key, OAuth, device-code, and Google ADC variants and writes refreshed credentials back to storage.", + "key_constructs": [ + { + "name": "ForgeProviderAuthService", + "type": "struct", + "purpose": "Service orchestrating provider auth initialization, completion and refresh using infra strategies" + }, + { + "name": "init_provider_auth", + "type": "function", + "purpose": "Creates an auth strategy, initializes the flow and pre-fills existing credential params where applicable" + }, + { + "name": "complete_provider_auth", + "type": "function", + "purpose": "Completes an auth flow via strategy, converts response to credential and upserts it" + }, + { + "name": "refresh_provider_credential", + "type": "function", + "purpose": "Attempts credential refresh for providers nearing expiry by trying supported auth methods and updating stored credential" + } + ], + "semantic_tags": [ + "provider-auth", + "credentials", + "strategy", + "refresh", + "oauth" + ], + "handles_entities": [ + "Provider", + "Credential", + "AuthContextRequest", + "AuthContextResponse" + ], + "key_behaviors": [ + "initializes provider authentication flows and pre-fills existing params", + "completes authentication and persists credentials", + "refreshes credentials before expiry using provider-supported methods" + ], + "insights": [ + { + "type": "bug_fix", + "category": "State Management", + "title": "Treat Google ADC marker across Vertex-like providers", + "problem": "Code only treated VERTEX_AI provider specially for Google ADC marker; other Vertex variants (e.g., VERTEX_AI_ANTHROPIC) were not recognized and would attempt incorrect auth flows.", + "root_cause": "Conditional check compared provider_id == VERTEX_AI only; a second provider identifier representing Vertex Anthropic existed but wasn't included.", + "solution": "Define is_vertex_provider boolean checking both VERTEX_AI and VERTEX_AI_ANTHROPIC then use it in the condition to detect the 'google_adc_marker'. Expand match arms to include AuthMethod::GoogleAdc in places that previously only recognized CodexDevice etc.", + "lesson_learned": "When multiple provider identifiers represent similar auth flows, centralize detection logic (is_vertex_provider) to avoid missing variants. Also include new AuthMethod variants into match arms consistently.", + "commits": [ + "c0d8f9f" + ], + "constructs": [ + "provider_auth detection logic" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/provider/openai_responses/request.rs": { + "short_description": "Converts domain chat context to OpenAI Responses API requests", + "category": "SOURCE_CODE", + "description": "Implements FromDomain conversions to create async-openai Responses API requests from Forge domain ChatContext, mapping messages, tool calls/results, images, reasoning config and tool schemas. It groups reasoning details for replay, normalizes schemars JSON schemas to OpenAI-compatible tool parameters, and ensures streaming/encrypted reasoning content is requested when necessary.", + "key_constructs": [ + { + "name": "map_reasoning_details_to_input_items", + "type": "function", + "purpose": "Groups reasoning detail fragments by id and constructs OpenAI ReasoningItem input objects" + }, + { + "name": "FromDomain for oai::Reasoning", + "type": "function", + "purpose": "Converts domain reasoning config into OpenAI Responses API ReasoningArgs" + }, + { + "name": "codex_tool_parameters", + "type": "function", + "purpose": "Normalizes schemars schema into strict JSON Schema suitable for the Responses API" + }, + { + "name": "FromDomain for oai::CreateResponse", + "type": "function", + "purpose": "Transforms domain chat context (messages, tools, tool_choice, reasoning) into a Responses API CreateResponse builder" + }, + { + "name": "FromDomain for oai::ToolChoiceParam", + "type": "function", + "purpose": "Maps domain tool choice options into provider-specific tool choice param" + } + ], + "semantic_tags": [ + "openai", + "responses-api", + "conversion", + "reasoning", + "tool-schema" + ], + "handles_entities": [ + "ChatContext", + "ContextMessage", + "ToolCall", + "Tool", + "ReasoningConfig" + ], + "key_behaviors": [ + "builds provider-compatible Responses API requests from domain chat context", + "includes reasoning encrypted content when reasoning is enabled", + "converts tool schemas into strict JSON Schema accepted by the Responses API" + ], + "insights": [ + { + "type": "feature", + "category": "State Management", + "title": "Preserve message phase and request encrypted reasoning content when needed", + "problem": "Phase labels from messages (commentary vs final_answer) were not preserved when building requests; reasoning encrypted content was not requested leading to missing data for stateless reasoning replay.", + "root_cause": "Mapping to OpenAI request types omitted message.phase and didn't include ReasoningEncryptedContent include when reasoning was configured.", + "solution": "Add mapping from domain MessagePhase to oai::MessagePhase; set phase on assistant EasyMessage items when present; when response.reasoning.is_some() ensure include contains ReasoningEncryptedContent to receive encrypted reasoning data for replay.", + "lesson_learned": "When supporting advanced response metadata (phases, encrypted blobs) ensure both outgoing requests ask for the content and incoming response conversion preserves the metadata.", + "commits": [ + "5b18cce" + ], + "constructs": [ + "to_oai_phase", + "FromDomain for oai::CreateResponse", + "include ReasoningEncryptedContent logic" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_repo/src/provider/openai_responses/request.rs tests" + ], + "test_functions": [ + "test_codex_request_with_reasoning_includes_encrypted_content", + "test_codex_request_preserves_phase_on_assistant_message", + "test_codex_request_preserves_final_answer_phase" + ], + "source_commits": [ + "5b18cce" + ] + } + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/src/workflows/ci.rs": { + "short_description": "Generates the project's GitHub Actions CI workflow", + "category": "BUILD", + "description": "Programmatically constructs the repository's GitHub Actions 'ci.yml' workflow, defining jobs like build, performance tests, and release builders. It emits the generated YAML file using a workflow generation library to keep CI declarative and maintainable from Rust code.", + "key_constructs": [], + "semantic_tags": [ + "ci", + "github-actions", + "workflow-generation", + "release", + "automation" + ], + "handles_entities": [], + "key_behaviors": [ + "generates GitHub Actions workflow YAML", + "defines CI jobs for build, coverage and performance", + "wires conditional release build jobs" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/lib.rs": { + "short_description": "Top-level reexports and module declarations for the application crate", + "category": "SOURCE_CODE", + "description": "Declares internal modules and reexports the most important types and modules of the forge_app crate for convenient external consumption. Acts as the public entrypoint to the app-layer APIs (agents, services, tools, utils, workspace helpers).", + "key_constructs": [ + { + "name": "agent", + "type": "module", + "purpose": "Agent-related services and traits (chat, tool calls, agent lifecycle)" + }, + { + "name": "services", + "type": "module", + "purpose": "Application service abstractions and implementations" + }, + { + "name": "utils::compute_hash", + "type": "function", + "purpose": "Utility to compute SHA-256 content hashes exposed by the crate" + }, + { + "name": "domain", + "type": "module", + "purpose": "Re-export of forge_domain types used across the application" + } + ], + "semantic_tags": [ + "module", + " reexports", + "application-layer", + "api" + ], + "handles_entities": [ + "Agent", + "Conversation", + "Tool", + "Workspace" + ], + "key_behaviors": [ + "exposes application-level modules and utilities", + "centralizes public API reexports for downstream crates" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/utils.rs": { + "short_description": "General-purpose utilities: path formatting, hashing and schema normalization", + "category": "SOURCE_CODE", + "description": "Provides helper functions for formatting display paths, rendering search match results, computing SHA-256 hashes, enforcing strict JSON schema shapes for LLM providers, and detecting binary Content-Types. These utilities are used widely across the app to ensure consistent presentation and provider-compatible schema shapes.", + "key_constructs": [ + { + "name": "format_display_path", + "type": "function", + "purpose": "Turn absolute paths into relative display paths where possible", + "callers": [ + { + "file": "crates/forge_app/src/fmt/fmt_input.rs", + "line": 6, + "context": "use crate::utils::format_display_path;" + }, + { + "file": "crates/forge_app/src/fmt/fmt_input.rs", + "line": 10, + "context": "let display_path_for = |path: &str| format_display_path(Path::new(path), env.cwd.as_path());" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 78, + "context": "use crate::utils::format_display_path;" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 82, + "context": ".sub_title(format_display_path(policy_path.as_path(), &cwd))," + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 19, + "context": "use crate::utils::{compute_hash, format_display_path};" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 337, + "context": "let display_path = format_display_path(Path::new(&input.path), env.cwd.as_path());" + }, + { + "file": "crates/forge_app/src/changed_files.rs", + "line": 6, + "context": "use crate::utils::format_display_path;" + }, + { + "file": "crates/forge_app/src/changed_files.rs", + "line": 58, + "context": "let display_path = format_display_path(&change.path, &cwd);" + }, + { + "file": "crates/forge_services/src/attachment.rs", + "line": 6, + "context": "use forge_app::utils::format_display_path;" + }, + { + "file": "crates/forge_services/src/attachment.rs", + "line": 54, + "context": "let normalized_path = format_display_path(&entry_path, &path);" + }, + { + "file": "crates/forge_app/src/fmt/fmt_output.rs", + "line": 7, + "context": "use crate::utils::format_display_path;" + }, + { + "file": "crates/forge_app/src/fmt/fmt_output.rs", + "line": 37, + "context": "format_display_path(&output.path, &env.cwd)" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 17, + "context": "use forge_app::utils::{format_display_path, truncate_key};" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 1307, + "context": "info = info.add_key_value(\"path\", format_display_path(&path, &env.cwd));" + } + ] + }, + { + "name": "format_match", + "type": "function", + "purpose": "Render a Match (search result) into a human-readable string with context", + "callers": [ + { + "file": "crates/forge_app/src/truncation/truncate_search.rs", + "line": 4, + "context": "use crate::utils::format_match;" + }, + { + "file": "crates/forge_app/src/truncation/truncate_search.rs", + "line": 92, + "context": ".map(|v| format_match(v, search_dir))" + } + ] + }, + { + "name": "compute_hash", + "type": "function", + "purpose": "Compute SHA-256 hex digest for content strings", + "callers": [ + { + "file": "crates/forge_app/src/lib.rs", + "line": 53, + "context": "pub use utils::{compute_hash, is_binary_content_type};" + }, + { + "file": "crates/forge_app/src/changed_files.rs", + "line": 96, + "context": "use crate::{FsReadService, ReadOutput, compute_hash};" + }, + { + "file": "crates/forge_app/src/changed_files.rs", + "line": 115, + "context": "let hash = compute_hash(content);" + }, + { + "file": "crates/forge_app/src/changed_files.rs", + "line": 195, + "context": "let hash = crate::compute_hash(content);" + }, + { + "file": "crates/forge_app/src/changed_files.rs", + "line": 215, + "context": "let old_hash = crate::compute_hash(\"old content\");" + }, + { + "file": "crates/forge_app/src/changed_files.rs", + "line": 234, + "context": "let old_hash = crate::compute_hash(\"old content\");" + }, + { + "file": "crates/forge_app/src/changed_files.rs", + "line": 236, + "context": "let new_hash = crate::compute_hash(new_content);" + }, + { + "file": "crates/forge_app/src/changed_files.rs", + "line": 263, + "context": "(\"/test/file1.txt\".into(), Some(crate::compute_hash(\"old 1\")))," + }, + { + "file": "crates/forge_app/src/changed_files.rs", + "line": 264, + "context": "(\"/test/file2.txt\".into(), Some(crate::compute_hash(\"old 2\")))," + }, + { + "file": "crates/forge_app/src/changed_files.rs", + "line": 281, + "context": "let old_hash = crate::compute_hash(\"old content\");" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 19, + "context": "use crate::utils::{compute_hash, format_display_path};" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 514, + "context": "let content_hash = output.after_undo.as_ref().map(|s| compute_hash(s));" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 860, + "context": "let hash = crate::compute_hash(content);" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 891, + "context": "let hash = crate::compute_hash(content);" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 921, + "context": "let hash = crate::compute_hash(content);" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 952, + "context": "let hash = crate::compute_hash(content);" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 995, + "context": "content_hash: compute_hash(content)," + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 1026, + "context": "content_hash: compute_hash(content)," + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 1531, + "context": "content_hash: compute_hash(content)," + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 1565, + "context": "content_hash: compute_hash(content)," + } + ] + }, + { + "name": "enforce_strict_schema", + "type": "function", + "purpose": "Normalize JSON schemas to add additionalProperties=false and OpenAI-compatible transformations", + "callers": [ + { + "file": "crates/forge_repo/src/provider/openai_responses/request.rs", + "line": 6, + "context": "use forge_app::utils::enforce_strict_schema;" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/request.rs", + "line": 167, + "context": "enforce_strict_schema(&mut params, true);" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/request.rs", + "line": 381, + "context": "use forge_app::utils::enforce_strict_schema;" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/request.rs", + "line": 765, + "context": "enforce_strict_schema(&mut schema, true);" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/request.rs", + "line": 782, + "context": "enforce_strict_schema(&mut schema, true);" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/request.rs", + "line": 797, + "context": "enforce_strict_schema(&mut schema, true);" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/request.rs", + "line": 824, + "context": "enforce_strict_schema(&mut schema, true);" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/request.rs", + "line": 856, + "context": "enforce_strict_schema(&mut schema, true);" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/request.rs", + "line": 872, + "context": "enforce_strict_schema(&mut schema, true);" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/request.rs", + "line": 889, + "context": "enforce_strict_schema(&mut schema, true);" + }, + { + "file": "crates/forge_app/src/dto/anthropic/transforms/enforce_schema.rs", + "line": 4, + "context": "use crate::utils::enforce_strict_schema;" + } + ] + }, + { + "name": "is_binary_content_type", + "type": "function", + "purpose": "Detect whether an HTTP Content-Type should be treated as binary", + "callers": [ + { + "file": "crates/forge_services/src/tool_services/fetch.rs", + "line": 2, + "context": "use forge_app::{HttpResponse, NetFetchService, ResponseContext, is_binary_content_type};" + }, + { + "file": "crates/forge_services/src/tool_services/fetch.rs", + "line": 92, + "context": "if is_binary_content_type(&content_type) {" + }, + { + "file": "crates/forge_services/src/tool_services/fetch.rs", + "line": 151, + "context": "assert!(!is_binary_content_type(\"text/html\"));" + }, + { + "file": "crates/forge_services/src/tool_services/fetch.rs", + "line": 152, + "context": "assert!(!is_binary_content_type(\"text/plain\"));" + }, + { + "file": "crates/forge_services/src/tool_services/fetch.rs", + "line": 153, + "context": "assert!(!is_binary_content_type(\"text/css\"));" + }, + { + "file": "crates/forge_services/src/tool_services/fetch.rs", + "line": 154, + "context": "assert!(!is_binary_content_type(\"application/json\"));" + }, + { + "file": "crates/forge_services/src/tool_services/fetch.rs", + "line": 155, + "context": "assert!(!is_binary_content_type(\"application/xml\"));" + }, + { + "file": "crates/forge_services/src/tool_services/fetch.rs", + "line": 156, + "context": "assert!(!is_binary_content_type(\"application/javascript\"));" + }, + { + "file": "crates/forge_services/src/tool_services/fetch.rs", + "line": 157, + "context": "assert!(!is_binary_content_type(\"application/yaml\"));" + }, + { + "file": "crates/forge_services/src/tool_services/fetch.rs", + "line": 158, + "context": "assert!(!is_binary_content_type(\"image/svg+xml\"));" + }, + { + "file": "crates/forge_services/src/tool_services/fetch.rs", + "line": 159, + "context": "assert!(!is_binary_content_type(\"text/csv\"));" + }, + { + "file": "crates/forge_services/src/tool_services/fetch.rs", + "line": 160, + "context": "assert!(!is_binary_content_type(\"text/markdown\"));" + }, + { + "file": "crates/forge_services/src/tool_services/fetch.rs", + "line": 161, + "context": "assert!(!is_binary_content_type(\"\")); // empty = unknown, allow" + }, + { + "file": "crates/forge_services/src/tool_services/fetch.rs", + "line": 166, + "context": "assert!(is_binary_content_type(\"application/gzip\"));" + }, + { + "file": "crates/forge_services/src/tool_services/fetch.rs", + "line": 167, + "context": "assert!(is_binary_content_type(\"application/x-gzip\"));" + }, + { + "file": "crates/forge_services/src/tool_services/fetch.rs", + "line": 168, + "context": "assert!(is_binary_content_type(\"application/octet-stream\"));" + }, + { + "file": "crates/forge_services/src/tool_services/fetch.rs", + "line": 169, + "context": "assert!(is_binary_content_type(\"application/zip\"));" + }, + { + "file": "crates/forge_services/src/tool_services/fetch.rs", + "line": 170, + "context": "assert!(is_binary_content_type(\"application/x-tar\"));" + }, + { + "file": "crates/forge_services/src/tool_services/fetch.rs", + "line": 171, + "context": "assert!(is_binary_content_type(\"application/pdf\"));" + }, + { + "file": "crates/forge_services/src/tool_services/fetch.rs", + "line": 172, + "context": "assert!(is_binary_content_type(\"image/png\"));" + } + ] + } + ], + "semantic_tags": [ + "utilities", + "hashing", + "path-formatting", + "json-schema", + "content-detection" + ], + "handles_entities": [ + "Match", + "MatchResult" + ], + "key_behaviors": [ + "format paths for UI display", + "normalize JSON schemas for LLM provider compatibility", + "detect binary versus text content types", + "compute content hashes for change detection" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/workspace_status.rs": { + "short_description": "Compute file sync status and sync operation paths against remote hashes", + "category": "SOURCE_CODE", + "description": "Compares local file hashes with remote server-provided file hashes to derive per-file SyncStatus, produce upload/delete path lists, and track sync progress. It normalizes relative remote paths against a base directory and provides utilities to drive two-phase sync workflows.", + "key_constructs": [ + { + "name": "WorkspaceStatus", + "type": "struct", + "purpose": "Holds base_dir and remote file hashes, computes statuses and sync paths" + }, + { + "name": "WorkspaceStatus::file_statuses", + "type": "function", + "purpose": "Compare local and remote FileHash vectors to produce FileStatus for each path" + }, + { + "name": "WorkspaceStatus::get_sync_paths", + "type": "function", + "purpose": "Return lists of absolute paths to upload or delete based on file statuses" + }, + { + "name": "SyncProgressCounter", + "type": "struct", + "purpose": "Track completed operations and emit SyncProgress estimates" + }, + { + "name": "absolutize", + "type": "function", + "purpose": "Resolve relative server paths against base_dir to absolute strings" + } + ], + "semantic_tags": [ + "sync", + "workspace", + "file-hashes", + "change-detection", + "progress-tracking" + ], + "handles_entities": [ + "FileHash", + "FileStatus", + "SyncPaths", + "SyncProgress" + ], + "key_behaviors": [ + "compute sync status for workspace files", + "generate lists of files to upload or delete", + "report sync progress based on operations" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/chat_response.rs": { + "short_description": "Domain types for agent chat responses and tool call events", + "category": "SOURCE_CODE", + "description": "Defines enums and structs representing streamed agent responses, tool call lifecycle events, reasoning and interruption reasons, and title formatting used by the UI and orchestrator. These types standardize how agents emit messages, tool call start/end signals, retries, and interruptions across the system.", + "key_constructs": [ + { + "name": "ChatResponseContent", + "type": "enum", + "purpose": "Represents message payloads: tool input, tool output, or markdown (possibly partial)" + }, + { + "name": "ChatResponse", + "type": "enum", + "purpose": "Top-level events emitted by agents, including TaskMessage, ToolCallStart/End, RetryAttempt, and Interrupt" + }, + { + "name": "TitleFormat", + "type": "struct", + "purpose": "Structured title/subtitle/category/timestamp for UI status messages" + }, + { + "name": "InterruptionReason", + "type": "enum", + "purpose": "Captures reasons for interrupting an agent turn (limits reached)" + }, + { + "name": "Cause", + "type": "struct", + "purpose": "Wraps an error cause into a string for retry/error reporting" + } + ], + "semantic_tags": [ + "streaming", + "events", + "tool-calls", + "agent", + "ui" + ], + "handles_entities": [ + "ToolCallFull", + "ToolResult", + "ChatCompletionMessage", + "TitleFormat" + ], + "key_behaviors": [ + "model agent events and tool call lifecycle", + "signal retries and interruptions", + "format status/title messages for display" + ], + "insights": [ + { + "type": "refactoring", + "category": "API", + "title": "ToolCallStart now carries notifier; enum style normalized", + "problem": "ToolCallStart previously carried ToolCallFull directly; needed to carry notifier to coordinate UI/tool execution.", + "root_cause": "UI/tool stdout interleaving fix needed a way to notify orchestration when UI header is rendered.", + "solution": "Replaced ChatResponse::ToolCallStart(ToolCallFull) with ChatResponse::ToolCallStart { tool_call: ToolCallFull, notifier: Arc }. Also normalized enum variant formatting and imports.", + "lesson_learned": "When changing message types used across async tasks, prefer explicit structured variants so additional coordination fields can be added without awkward API ripples.", + "commits": [ + "c1c0506" + ], + "constructs": [ + "ChatResponse::ToolCallStart", + "ChatResponseContent variants" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/validation.rs": { + "short_description": "gRPC-backed implementation of file syntax validation", + "category": "SOURCE_CODE", + "description": "Implements ValidationRepository by sending file contents to a remote Forge gRPC validation service and translating the proto response into domain SyntaxError objects. It handles valid, error and unsupported-language responses and logs details for failing validations.", + "key_constructs": [ + { + "name": "ForgeValidationRepository", + "type": "struct", + "purpose": "Holds an Arc to infrastructural gRPC provider and implements ValidationRepository" + }, + { + "name": "validate_file", + "type": "function", + "purpose": "Send a ValidateFilesRequest over gRPC and convert proto ValidationStatus to SyntaxError vector", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 614, + "context": ".validate_file(path, content)" + } + ] + } + ], + "semantic_tags": [ + "validation", + "grpc", + "syntax-check", + "remote-service", + "error-mapping" + ], + "handles_entities": [ + "SyntaxError", + "ValidateFilesRequest", + "File" + ], + "key_behaviors": [ + "send file content to validation service", + "convert proto errors to domain SyntaxError", + "log validation failures and skip unsupported languages" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/discovery.rs": { + "short_description": "File discovery service adapter for environment and walker infra", + "category": "SOURCE_CODE", + "description": "Wraps infra implementations to provide FileDiscoveryService: walking a workspace using a Walker config and listing the current directory with sorted entries. It normalizes results into domain File records and ensures directories are listed before files alphabetically.", + "key_constructs": [ + { + "name": "ForgeDiscoveryService", + "type": "struct", + "purpose": "Adapter holding infra to implement FileDiscoveryService" + }, + { + "name": "discover_with_config", + "type": "function", + "purpose": "Invoke Walker infra and map WalkedFile results into domain File list" + }, + { + "name": "list_current_directory", + "type": "function", + "purpose": "List entries for current cwd, convert to File, and sort directories before files" + } + ], + "semantic_tags": [ + "discovery", + "filesystem", + "directory-listing", + "walker", + "adapter" + ], + "handles_entities": [ + "File", + "WalkedFile", + "Environment" + ], + "key_behaviors": [ + "collect files by walker configuration", + "list and sort current directory entries for UI" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/agent.rs": { + "short_description": "Agent service trait and configuration application utilities", + "category": "SOURCE_CODE", + "description": "Declares the AgentService trait (chat, tool calls, update) and provides a blanket implementation for any Services+EnvironmentInfra type. Also provides AgentExt to apply workflow-level ForgeConfig overrides to domain Agent objects, merging settings like reasoning and compact policies.", + "key_constructs": [ + { + "name": "AgentService", + "type": "trait", + "purpose": "Abstracts chat_agent, call, and conversation update operations used by orchestrator" + }, + { + "name": "impl AgentService for T", + "type": "implementation", + "purpose": "Blanket impl that routes chat and calls through the services and tool registry" + }, + { + "name": "AgentExt", + "type": "trait", + "purpose": "Extension trait defining apply_config to apply top-level ForgeConfig to Agent" + }, + { + "name": "AgentExt::apply_config", + "type": "function", + "purpose": "Merge workflow settings (temperature, compact, reasoning) into Agent defaults" + } + ], + "semantic_tags": [ + "agent", + "configuration", + "trait", + "service-abstraction", + "tooling" + ], + "handles_entities": [ + "Agent", + "ForgeConfig", + "ReasoningConfig", + "Compact" + ], + "key_behaviors": [ + "execute chat completions via configured provider", + "dispatch tool calls via ToolRegistry", + "apply workflow-level configuration to agents" + ], + "insights": [ + { + "type": "feature", + "category": "Configuration", + "title": "Allow agent to define provider (per-agent provider)", + "problem": "Agents could not specify their own provider; the system assumed a single default provider.", + "root_cause": "Agent struct didn't have an optional provider field to override the default.", + "solution": "Add provider: Option to Agent struct and merge/serde support. AgentService chat methods now accept provider_id Option and resolve to per-agent provider or default provider internally. This enables per-agent provider resolution throughout ForgeApp.", + "commit": [ + "d9207f" + ], + "constructs": [ + "Agent struct field provider", + "AgentService::chat signature change (provider_id)" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/file_tracking.rs": { + "short_description": "Detects external file changes by comparing stored and current hashes", + "category": "SOURCE_CODE", + "description": "Provides a FileChangeDetector that reads current file hashes using an FsReadService and compares them with tracked hashes in Metrics to detect modifications, deletions, or unreadable files. The detector parallelizes reads, returns sorted change lists, and includes comprehensive tests with a mock FS service to avoid false positives from truncated display content.", + "key_constructs": [ + { + "name": "FileChange", + "type": "struct", + "purpose": "Represents a detected change with path and optional new content_hash" + }, + { + "name": "FileChangeDetector", + "type": "struct", + "purpose": "Detects file changes by reading current content hashes and comparing with tracked Metrics" + }, + { + "name": "FileChangeDetector::detect", + "type": "function", + "purpose": "Asynchronously check files in Metrics, parallelized, and return sorted changes" + }, + { + "name": "MockFsReadService", + "type": "struct (test)", + "purpose": "Test double that simulates reading files and producing raw/display content/hash" + } + ], + "semantic_tags": [ + "file-watching", + "change-detection", + "metrics", + "fs-read", + "testing" + ], + "handles_entities": [ + "Metrics", + "FileOperation", + "FileChange" + ], + "key_behaviors": [ + "detect external modifications to files", + "avoid false positives when displayed content is truncated", + "return deterministic sorted change lists" + ], + "insights": [ + { + "type": "refactoring", + "category": "Performance", + "title": "FileChangeDetector parallelism parameter made explicit", + "problem": "FileChangeDetector stored a parallelism field and was constructed with that value; after moving config resolution into services the signature needed to accept the concurrency value at detect time.", + "root_cause": "Configuration movement/refactor required API signature changes to avoid holding stale config in the detector.", + "solution": "Removed parallel_file_reads from struct, changed new() and detect() to accept parallel_file_reads on detect, and tests updated accordingly.", + "lesson_learned": "When a service depends on dynamic config, avoid embedding config into long-lived structs unless it's immutable; prefer passing runtime parameters at call time.", + "commits": [ + "7e8a51d", + "5bd0b94" + ], + "constructs": [ + "FileChangeDetector::new", + "FileChangeDetector::detect" + ] + }, + { + "type": "bug_fix", + "category": "State Management", + "title": "Compare stored file hashes to FileInfo.content_hash from range_read", + "problem": "External change detector read and compared content hashes that didn't match stored hashes because stored hash might be from different content computation.", + "root_cause": "ReadOutput.content_hash field was replaced by ReadOutput.info.content_hash; callers still expected previous field.", + "solution": "Updated FileChangeDetector to use output.info.content_hash. Tests updated to assert FileInfo in ReadOutput.", + "lesson_learned": "When consolidating data into a nested struct, update all callsites and tests; this avoids silent mismatches.", + "commits": [ + "29db91a" + ], + "constructs": [ + "FileChangeDetector::detect", + "ReadOutput usage" + ] + }, + { + "type": "performance", + "category": "Concurrency", + "title": "Cap parallel file reads to avoid EMFILE (too many open files)", + "problem": "Unbounded parallel reads could exhaust file descriptors (EMFILE) when hashing many files concurrently.", + "root_cause": "detect() created an unbounded set of futures and awaited join_all, causing many simultaneous file opens.", + "solution": "Switch to streaming over entries and use buffer_unordered(n) with a configured parallel_file_reads cap; collect with filter_map and preserve deterministic sorting. Accept parallel_file_reads in FileChangeDetector::new and propagate via environment.", + "lesson_learned": "I/O-heavy parallel operations must be bounded; expose a configurable concurrency cap via environment and use StreamExt::buffer_unordered to limit concurrency and avoid system resource exhaustion.", + "commits": [ + "e25c1c0" + ], + "constructs": [ + "FileChangeDetector::new", + "FileChangeDetector::detect" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_app/src/file_tracking.rs::tests" + ], + "source_commits": [ + "e25c1c0" + ] + } + } + ], + "tests": { + "exercised_by": [ + "crates/forge_app/src/file_tracking.rs (unit tests updated to pass parallel_file_reads)" + ], + "test_functions": [], + "source_commits": [ + "7e8a51d" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_config/src/reader.rs": { + "short_description": "Layered Forge configuration reader with env and legacy support", + "category": "SOURCE_CODE", + "description": "Builds ForgeConfig by layering embedded defaults, global user files, legacy JSON, provided TOML strings, and FORGE_ environment variables, while loading .env files from the working directory chain once per process. It centralizes configuration resolution and exposes helper methods for config path discovery.", + "key_constructs": [ + { + "name": "LOAD_DOT_ENV", + "type": "constant", + "purpose": "Static LazyLock that loads .env files walking up from cwd only once" + }, + { + "name": "ConfigReader", + "type": "struct", + "purpose": "Builder that accumulates config sources and produces a ForgeConfig" + }, + { + "name": "ConfigReader::build", + "type": "function", + "purpose": "Finalize builder, trigger .env loading, and deserialize ForgeConfig" + }, + { + "name": "ConfigReader::read_env", + "type": "function", + "purpose": "Add FORGE_ prefixed environment variables as a config source" + } + ], + "semantic_tags": [ + "configuration", + "env", + "toml", + "legacy", + "loader" + ], + "handles_entities": [ + "ForgeConfig", + "LegacyConfig", + "ModelConfig" + ], + "key_behaviors": [ + "load layered configuration from defaults, files, legacy JSON and env vars", + "discover config file paths and respect FORGE_CONFIG override", + "load .env files found up the directory tree once" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/file.rs": { + "short_description": "Domain models for file metadata, hashes and sync statuses", + "category": "SOURCE_CODE", + "description": "Defines domain structs representing files and file read metadata (File, FileInfo), server file hashes (FileHash), and synchronization enums/structs (SyncStatus, FileStatus). These models are used to coordinate workspace syncing, change detection and reporting across services.", + "key_constructs": [ + { + "name": "File", + "type": "struct", + "purpose": "Represents a path and whether it's a directory" + }, + { + "name": "FileInfo", + "type": "struct", + "purpose": "Describes a read operation, lines range, total lines, and full content hash" + }, + { + "name": "FileHash", + "type": "struct", + "purpose": "Represents server file path and SHA-256 hash" + }, + { + "name": "SyncStatus", + "type": "enum", + "purpose": "Possible sync states: InSync, Modified, New, Deleted, Failed" + }, + { + "name": "FileStatus", + "type": "struct", + "purpose": "A path with its computed SyncStatus" + } + ], + "semantic_tags": [ + "domain", + "file-metadata", + "sync", + "hashing" + ], + "handles_entities": [ + "File", + "FileInfo", + "FileHash", + "FileStatus", + "SyncStatus" + ], + "key_behaviors": [ + "model file metadata and sync states", + "store full-file content hash for reliable change detection" + ], + "insights": [ + { + "type": "refactoring", + "category": "Typing", + "title": "FileInfo extended with content_hash (full-file SHA-256)", + "problem": "External-change detector hashes full file content; range readers used partial/truncated content hashes which could cause false positives.", + "root_cause": "Inconsistent hashing: callers sometimes hashed the returned range content leading to mismatches with external detectors that hash the full file", + "solution": "Added content_hash: String to FileInfo and updated constructor to accept it. FileInfo is now Serialize/Deserialize/Eq to support storage and comparisons.", + "lesson_learned": "When external systems perform equality checks, ensure your metadata represents the same canonical substrate (full-file hash) and make that explicit in types.", + "commits": [ + "29db91a" + ], + "constructs": [ + "FileInfo::new" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/session_metrics.rs": { + "short_description": "Session metrics storage for file operations, todos and timing", + "category": "SOURCE_CODE", + "description": "Holds metrics collected during a session including last file operations, accessed files, todos and start time. Provides helper methods to record file operations (tracking reads separately), compute session duration, and apply incoming todo changes while validating constraints.", + "key_constructs": [ + { + "name": "Metrics", + "type": "struct", + "purpose": "Primary session metrics container for file_operations, files_accessed, todos and start time" + }, + { + "name": "Metrics::insert", + "type": "function", + "purpose": "Record a file operation and add reads to files_accessed set" + }, + { + "name": "Metrics::apply_todo_changes", + "type": "function", + "purpose": "Merge incoming TodoItem changes into stored todos with validation and history rules" + }, + { + "name": "Metrics::get_active_todos", + "type": "function", + "purpose": "Return todos with Pending or InProgress status" + } + ], + "semantic_tags": [ + "metrics", + "session", + "file-operations", + "todos", + "validation" + ], + "handles_entities": [ + "FileOperation", + "Todo", + "TodoItem", + "TodoStatus" + ], + "key_behaviors": [ + "record file operations and mark files accessed", + "manage todo lifecycle and validate incoming changes", + "compute session duration" + ], + "insights": [ + { + "type": "refactoring", + "category": "State Management", + "title": "Apply content-keyed todo changes and introduce TodoItem type", + "problem": "Old update_todos relied on IDs being provided by the client and validated uniqueness; this conflicted with the new model where server manages IDs and updates are content-keyed.", + "root_cause": "Protocol changed: model now sends only changed items keyed by 'content' and can send 'cancelled' status to indicate removal. Server-side API still expected client-provided IDs.", + "solution": "Replaced update_todos(Vec) with apply_todo_changes(Vec) that validates content, treats 'cancelled' as removal by content, updates existing items by matching content, and generates server UUIDs for new items. Ensured completed items not mentioned remain in history and only active items are returned.", + "lesson_learned": "When the wire format changes (content-keyed diffs), adapt server-side data structures and validation accordingly; avoid requiring client-managed IDs for server-managed entities. Provide clear validation errors for empty/oversized content.", + "commits": [ + "e84bc7f" + ], + "constructs": [ + "apply_todo_changes", + "get_active_todos", + "todo_item" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_domain/src/session_metrics.rs (unit tests added in same file)" + ], + "test_functions": [ + "test_apply_todo_changes_adds_new_items", + "test_apply_todo_changes_updates_by_content_key", + "test_apply_todo_changes_cancelled_removes_item", + "test_apply_todo_changes_preserves_untouched_items", + "test_apply_todo_changes_completed_stays_in_history" + ], + "source_commits": [ + "e84bc7f" + ] + } + }, + { + "type": "refactoring", + "category": "State Management", + "title": "Preserve historical completed todos when updating session todos", + "problem": "When update_todos replaced the session todos vector, completed todos that had been removed from the active list were lost from metrics (no historical trace). Also callers expected update_todos to return active todos.", + "root_cause": "update_todos blindly replaced the internal vector; there was no distinction between active todos and historical completed todos.", + "solution": "Introduce merging logic: keep incoming active todos and re-append any previously-known completed todos that are not present in the new active list; sort by id for deterministic ordering. Add get_active_todos() to return only pending/in-progress items while get_todos() returns all known todos.", + "lesson_learned": "Metrics that are append-only or partially historical must be explicit about what 'replace' semantics mean. Provide separate accessors for active vs historical items and ensure mutating methods preserve history where required.", + "commits": [ + "970a75f", + "4f1ad6b" + ], + "constructs": [ + "Metrics::get_active_todos", + "Metrics::get_todos", + "Metrics::update_todos" + ] + }, + { + "type": "testing", + "category": "Testing", + "title": "Add unit test verifying removed completed todos are preserved", + "problem": "Lack of tests to capture history-preservation expectations caused earlier regressions.", + "root_cause": "No test coverage for update_todos history behaviour.", + "solution": "Added test test_update_todos_keeps_removed_completed_in_history confirming completed historical items are retained in metrics and that update_todos returns active items.", + "lesson_learned": "When changing state model (active vs historical) add tests asserting both return values and persisted state.", + "commits": [ + "970a75f" + ], + "constructs": [ + "Metrics::update_todos", + "Metrics::get_todos", + "Metrics::get_active_todos" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_domain/src/session_metrics.rs::tests" + ], + "source_commits": [ + "970a75f" + ] + } + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_json_repair/src/schema_coercion.rs": { + "short_description": "Coerce JSON values to match JSON Schema types (used to repair/convert LLM outputs)", + "category": "SOURCE_CODE", + "description": "Recursively coerces serde_json::Value instances to conform to schemars::Schema definitions, handling $ref resolution, nullable conversion, anyOf/oneOf/allOf, arrays, objects and string coercions to numbers/booleans/arrays. It includes permissive parsing (JSON, JSON5, and repair) to extract arrays or objects from messy LLM outputs and make function arguments schema-safe.", + "key_constructs": [ + { + "name": "coerce_to_schema", + "type": "function", + "purpose": "Public entry to coerce a JSON Value into the expected Schema shape", + "callers": [ + { + "file": "crates/forge_app/src/dto/openai/request.rs", + "line": 5, + "context": "use forge_json_repair::coerce_to_schema;" + }, + { + "file": "crates/forge_app/src/dto/openai/request.rs", + "line": 425, + "context": ".map(|tool| coerce_to_schema(parsed_arguments.clone(), &tool.definition().input_schema))" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 1162, + "context": "forge_json_repair::coerce_to_schema(parsed_args.clone(), &schema)" + } + ] + }, + { + "name": "coerce_value_with_schema", + "type": "function", + "purpose": "Recursive worker that dispatches coercion logic depending on schema shape" + }, + { + "name": "try_coerce_string", + "type": "function", + "purpose": "Attempt to coerce a string into integer, number, boolean, null, object or array" + }, + { + "name": "try_parse_json_string", + "type": "function", + "purpose": "Try parsing a string as JSON/JSON5 or apply json_repair fallback" + }, + { + "name": "extract_array_from_string", + "type": "function", + "purpose": "Permissively extract an array substring from noisy strings" + } + ], + "semantic_tags": [ + "json", + "schema", + "coercion", + "llm-output", + "robust-parsing" + ], + "handles_entities": [], + "key_behaviors": [ + "coerce and repair JSON outputs to match declared schemas", + "try multiple parsing strategies (JSON, JSON5, repair) for nested strings", + "handle nullable and combined-schema constructs" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Parsing", + "title": "Unwrap nested/double-encoded JSON strings and support JSON5/repair fallback", + "problem": "Strings containing JSON (and even double-encoded JSON) were not being recursively parsed; coercion to schema could miss the intended object.", + "root_cause": "try_parse_json_string only attempted serde_json then JSON5 previously; it did not unwrap nested JSON strings nor use json_repair fallback comprehensively.", + "solution": "Introduce parse_json_like_value which tries serde_json, then JSON5, then json_repair. try_parse_json_string now repeatedly unwraps up to 4 nested string layers (common double-encoding depth) to return the innermost structured value.", + "commits": [ + "5a6f3c8" + ], + "constructs": [ + "try_parse_json_string", + "parse_json_like_value" + ] + } + ], + "tests": { + "exercised_by": [ + "crates/forge_json_repair/src/schema_coercion.rs (inline tests)" + ], + "test_functions": [ + "test_repairs_invalid_json_string_when_schema_expects_array", + "test_coerce_double_encoded_string_to_object" + ], + "source_commits": [ + "5a6f3c8" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/porcelain.rs": { + "short_description": "Convert Info into a tabular, machine-friendly Porcelain representation", + "category": "SOURCE_CODE", + "description": "Implements Porcelain, a flattened tabular representation of Info sections for machine-readable output and formatted display. It supports transformations like truncation, sorting, case conversion, long-form unpivoting, setting headers, and a Display impl to render aligned columns.", + "key_constructs": [ + { + "name": "Porcelain", + "type": "struct", + "purpose": "Core container for 2D optional string cells used to represent tabular info" + }, + { + "name": "Porcelain::from(&Info)", + "type": "function", + "purpose": "Convert Info/Section structures into the Porcelain table format" + }, + { + "name": "Porcelain::sort_by", + "type": "function", + "purpose": "Sort rows by multiple columns preserving header row" + }, + { + "name": "Porcelain::into_long", + "type": "function", + "purpose": "Unpivot wide table into long format with $ID, field and value" + }, + { + "name": "impl Display for Porcelain", + "type": "implementation", + "purpose": "Render the Porcelain table as aligned text with column widths" + } + ], + "semantic_tags": [ + "output-format", + "tabular", + "ui", + "serialization", + "info" + ], + "handles_entities": [ + "Info", + "Section" + ], + "key_behaviors": [ + "convert Info into machine-friendly tabular form", + "transform and format tabular output for CLI and scripting", + "provide sorting, truncation and case conversions for columns" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Edge Case", + "title": "Use char count (not byte len) for truncation to support multibyte Unicode", + "problem": "Previous truncation used byte-based slicing causing broken output or panics for multibyte characters (e.g., accented letters, emoji).", + "root_cause": "value.len() returned byte length; slicing assumed byte index equals character index.", + "solution": "Use value.chars().count() for length and value.chars().take(max_len).collect() to build truncated string and then append '...'. Updated tests for Unicode cases.", + "lesson_learned": "Text truncation must be character-aware (not byte-aware) in Rust when dealing with Unicode; tests for emojis and non-ASCII characters are critical.", + "commits": [ + "019a6bd" + ], + "constructs": [ + "Porcelain::truncate" + ] + }, + { + "type": "refactoring", + "category": "Formatting", + "title": "New Porcelain module centralizes machine-friendly output", + "problem": "Porcelain formatting was previously done ad-hoc in multiple UI locations", + "root_cause": "Multiple places implemented column formatting/skip/drop logic leading to duplication and inconsistent behavior", + "solution": "Add porcelain.rs providing Porcelain::from(&Info) and chaining API (.into_long(), .skip(n), .drop_col(n), .map_col) consumed by UI", + "commits": [ + "9b3b618" + ], + "constructs": [ + "Porcelain::from", + "Porcelain::into_long", + "Porcelain::skip", + "Porcelain::drop_col", + "Porcelain::map_col" + ], + "lesson_learned": "Encapsulate machine-readable formatting in a dedicated module to keep UI logic declarative and consistent; it makes it easier to manage row/column transformations and skip semantics." + } + ], + "tests": { + "exercised_by": [ + "inline tests added in porcelain.rs (unicode truncation tests)" + ], + "test_functions": [], + "source_commits": [ + "019a6bd" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/update.rs": { + "short_description": "Check for Forge updates and optionally execute the official updater", + "category": "SOURCE_CODE", + "description": "Integrates with update_informer to check GitHub for newer releases and, if approved or auto_update is enabled, runs the official install script to update the CLI. It prompts the user when appropriate, executes the curl|sh updater asynchronously, and logs update failures silently to tracking.", + "key_constructs": [ + { + "name": "execute_update_command", + "type": "function", + "purpose": "Run the official CLI install script and optionally exit the process after success" + }, + { + "name": "confirm_update", + "type": "function", + "purpose": "Prompt the user to confirm an upgrade between versions" + }, + { + "name": "on_update", + "type": "function", + "purpose": "Public entry that checks for updates and triggers confirm/auto-update flow", + "callers": [ + { + "file": "crates/forge_main/src/ui.rs", + "line": 48, + "context": "use crate::update::on_update;" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 689, + "context": "on_update(self.api.clone(), Some(&update)).await;" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 1899, + "context": "on_update(self.api.clone(), None).await;" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 3038, + "context": "on_update(self.api.clone(), self.config.updates.as_ref()).await;" + } + ] + } + ], + "semantic_tags": [ + "self-update", + "cli", + "updater", + "github", + "prompt" + ], + "handles_entities": [ + "Update", + "API" + ], + "key_behaviors": [ + "check for newer releases on GitHub", + "prompt user and run installer to update CLI", + "report update failures to telemetry" + ], + "insights": [ + { + "type": "feature", + "category": "UX", + "title": "Update command: run installer in background and support auto-update no-confirm", + "problem": "Users wanted background update flow and ability to auto-apply updates without interactive confirmation.", + "root_cause": "execute_update_command previously always prompted to close the app after update.", + "solution": "execute_update_command now accepts auto_update bool to immediately exit after successful update when requested. UI routes TopLevelCommand::Update to on_update with Update.auto_update flag. Tests added for CLI parsing of --no-confirm flag.", + "lesson_learned": "Background update flows need to respect non-interactive contexts; expose a no-confirm flag and ensure update task spawns without blocking UI.", + "commits": [ + "f7ebfd6" + ], + "constructs": [ + "execute_update_command", + "on_update" + ] + }, + { + "type": "fix", + "category": "Configuration", + "title": "Use new install URL in update command", + "problem": "Installer URL moved; update command referenced old install.sh path.", + "root_cause": "Hard-coded install URL became stale.", + "solution": "Replaced curl -fsSL https://forgecode.dev/install.sh | sh with https://forgecode.dev/cli | sh.", + "lesson_learned": "Keep external URLs centralized (README and update command) and update across code/docs together.", + "commits": [ + "9dc272a" + ], + "constructs": [ + "execute_update_command" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/agent_registry.rs": { + "short_description": "In-memory agent registry with lazy loading and active-agent management", + "category": "SOURCE_CODE", + "description": "Provides an in-memory registry for runtime Agent instances, lazily loading agents from an AgentRepository and exposing methods to get, set and reload the active agent ID. It caches agents in a DashMap behind RwLocks and translates repository/config defaults into loaded Agent entries.", + "key_constructs": [ + { + "name": "ForgeAgentRegistryService", + "type": "class", + "purpose": "Main service struct that holds repository, cached agents map, and active_agent_id.", + "callers": [ + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 14, + "context": "use crate::agent_registry::ForgeAgentRegistryService;" + }, + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 79, + "context": "agent_registry_service: Arc>," + }, + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 133, + "context": "let agent_registry_service = Arc::new(ForgeAgentRegistryService::new(infra.clone()));" + }, + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 231, + "context": "type AgentRegistry = ForgeAgentRegistryService;" + } + ] + }, + { + "name": "ensure_agents_loaded", + "type": "function", + "purpose": "Lazily initializes and returns the cached agents map, loading from repository if needed." + }, + { + "name": "load_agents", + "type": "function", + "purpose": "Loads agents from AgentRepository using session defaults from config and populates DashMap." + }, + { + "name": "impl forge_app::AgentRegistry for ForgeAgentRegistryService", + "type": "implementation", + "purpose": "Trait implementation exposing get/set active agent id, get_agents/get_agent, and reload_agents." + } + ], + "semantic_tags": [ + "agent-management", + "caching", + "lazy-loading", + "repository", + "concurrency" + ], + "handles_entities": [ + "Agent", + "AgentId" + ], + "key_behaviors": [ + "loads and caches agent definitions from repository", + "gets and sets the active agent id", + "reloads the agent cache on demand" + ], + "insights": [ + { + "type": "refactoring", + "category": "State Management", + "title": "Switch active agent to in-memory session state", + "problem": "Previously setting the active agent updated persistent app config which made the 'active agent' global across sessions and required disk writes for what should be a session-level choice.", + "root_cause": "Design stored active agent in app_config and persisted it; switching agents required writing config frequently.", + "solution": "Add active_agent_id: RwLock> to AgentLoaderService/AgentRegistry to keep active agent in-memory (session-scoped). Update get_active_agent/get_active_agent_id/set_active_agent to read/write the RwLock. Remove reliance on AppConfig for immediate active agent resolution. This avoids constant config writes and isolates sessions.", + "commit": [ + "94ac901" + ], + "constructs": [ + "AgentLoaderService::new", + "get_active_agent", + "get_active_agent_id", + "set_active_agent" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_tracker/src/dispatch.rs": { + "short_description": "Telemetry tracker that rate-limits and dispatches events to collectors", + "category": "SOURCE_CODE", + "description": "Implements a Tracker that collects system and conversation metadata and dispatches telemetry events to collectors (e.g., PostHog) with rate limiting and opt-out support. It gathers cached environment info, extracts possible emails from git/ssh/defaults, and supports queuing conversation payloads for event dispatch.", + "key_constructs": [ + { + "name": "Tracker", + "type": "class", + "purpose": "Main telemetry client that holds collectors, rate limiter, system metadata and dispatch logic.", + "callers": [ + { + "file": "crates/forge_main/src/lib.rs", + "line": 34, + "context": "pub static TRACKER: LazyLock =" + }, + { + "file": "crates/forge_main/src/lib.rs", + "line": 35, + "context": "LazyLock::new(forge_tracker::Tracker::default);" + }, + { + "file": "crates/forge_tracker/src/log.rs", + "line": 8, + "context": "use crate::Tracker;" + }, + { + "file": "crates/forge_tracker/src/log.rs", + "line": 11, + "context": "pub fn init_tracing(log_path: PathBuf, tracker: Tracker) -> anyhow::Result {" + }, + { + "file": "crates/forge_tracker/src/log.rs", + "line": 41, + "context": "tracker: Tracker," + }, + { + "file": "crates/forge_tracker/src/log.rs", + "line": 66, + "context": "tracker: Tracker," + }, + { + "file": "crates/forge_tracker/src/log.rs", + "line": 71, + "context": "pub fn new(tracker: Tracker) -> Self {" + }, + { + "file": "crates/forge_tracker/src/lib.rs", + "line": 10, + "context": "pub use dispatch::Tracker;" + } + ] + }, + { + "name": "tracking_enabled", + "type": "function", + "purpose": "Checks FORGE_TRACKER env var to determine whether tracking is enabled." + }, + { + "name": "system_info", + "type": "function", + "purpose": "Asynchronously attempts to discover user email addresses from git, SSH keys, and system defaults." + }, + { + "name": "dispatch", + "type": "function", + "purpose": "Builds an Event from EventKind and system metadata and sends it to all configured collectors (honors rate limiting).", + "callers": [ + { + "file": "crates/forge_main/src/tracker.rs", + "line": 8, + "context": "tokio::spawn(TRACKER.dispatch(event));" + }, + { + "file": "crates/forge_main/src/tracker.rs", + "line": 16, + "context": "tokio::runtime::Handle::current().block_on(TRACKER.dispatch(event))" + }, + { + "file": "crates/forge_tracker/src/log.rs", + "line": 86, + "context": "let _ = tracker.dispatch(event_kind).await;" + } + ] + } + ], + "semantic_tags": [ + "telemetry", + "analytics", + "rate-limiting", + "system-info", + "posthog" + ], + "handles_entities": [ + "Conversation", + "Event" + ], + "key_behaviors": [ + "dispatches telemetry events to collectors", + "rate-limits events to avoid runaway dispatch", + "collects environment and identity metadata for events", + "buffers conversation payloads for inclusion in events" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Concurrency", + "title": "Add and integrate rate limiter to prevent event dispatch loops", + "problem": "Unbounded event dispatch during failures could cause runaway loops (e.g., write errors triggering events that cause more errors).", + "root_cause": "No upper bound on event emission; errors in the event path could cause flood of events.", + "solution": "Add RateLimiter and use it in Tracker::dispatch to drop events when rate exceeded. Later commits adjusted ownership and synchronization of the limiter (Arc> and use of inc_and_check) to avoid races.", + "commits": [ + "884fe78", + "7e077ca" + ], + "constructs": [ + "Tracker::dispatch", + "Tracker::default" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/build.rs": { + "short_description": "Protobuf compilation build script", + "category": "BUILD", + "description": "Build script invoked by Cargo to compile protobuf definitions located at proto/forge.proto using tonic_prost_build. Ensures generated Rust prost types are produced before building the crate.", + "key_constructs": [], + "semantic_tags": [ + "build", + "protobuf", + "codegen" + ], + "handles_entities": [], + "key_behaviors": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/openai/transformers/pipeline.rs": { + "short_description": "Provider-specific request transformation pipeline", + "category": "SOURCE_CODE", + "description": "Defines a transformation pipeline that adapts outbound Request DTOs per provider and model characteristics (OpenRouter, Z.ai, Gemini, Anthropic, etc.). It composes many smaller transformers conditionally to normalize tool schemas, set provider-specific params, strip thought signatures, and apply compatibility shims.", + "key_constructs": [ + { + "name": "ProviderPipeline", + "type": "class", + "purpose": "Pipeline struct that holds a Provider reference and implements Transformer to apply provider/model-specific transformations.", + "callers": [ + { + "file": "crates/forge_app/src/dto/openai/mod.rs", + "line": 15, + "context": "pub use transformers::ProviderPipeline;" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 8, + "context": "use forge_app::dto::openai::{ListModelResponse, ProviderPipeline, Request, Response};" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 193, + "context": "let mut pipeline = ProviderPipeline::new(&self.provider);" + }, + { + "file": "crates/forge_app/src/dto/openai/transformers/mod.rs", + "line": 17, + "context": "pub use pipeline::ProviderPipeline;" + } + ] + }, + { + "name": "is_zai_provider", + "type": "function", + "purpose": "Helper to detect Z.ai provider ids so that Z.ai specific transformations can be applied." + }, + { + "name": "is_gemini3_model", + "type": "function", + "purpose": "Checks whether a Request targets a gemini-3 model to control thought-signature preservation." + }, + { + "name": "supports_open_router_params", + "type": "function", + "purpose": "Determines if a provider accepts open-router style parameters to branch pipeline behavior." + } + ], + "semantic_tags": [ + "provider-adaptation", + "request-transformation", + "compatibility", + "model-heuristics", + "tool-schema" + ], + "handles_entities": [ + "Request", + "Provider", + "ModelId" + ], + "key_behaviors": [ + "applies provider-specific transformations to requests", + "normalizes tool schema and response formatting", + "enables/disables provider compatibility shims based on model" + ], + "insights": [ + { + "type": "feature", + "category": "Compatibility", + "title": "Add provider-specific transformers: Kimi K2 reasoning flat-field, strict tool schema enforcement for OpenCode Zen", + "problem": "Different model backends expect different request shapes (kimi_k2 expects reasoning_content flat string; opencode_zen via OpenAI backend needs nullable enum converted to anyOf with null).", + "root_cause": "Heterogeneous provider expectations for reasoning and JSON schema encoding.", + "solution": "Added KimiK2Reasoning transformer, EnforceStrictToolSchema transformer and integrated them into ProviderPipeline for provider-specific routing (when model/provider match). Added tests. Also wired reasoning_content field into request DTO.", + "lesson_learned": "Maintain a pipeline of pluggable transformers to adapt to provider idiosyncrasies. Keep mapping decisions centralized in the pipeline.", + "commits": [ + "40cfcc8", + "fe68905" + ], + "constructs": [ + "KimiK2Reasoning", + "EnforceStrictToolSchema", + "ProviderPipeline::transform" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_app/src/dto/openai/transformers/pipeline.rs (unit tests)" + ] + } + }, + { + "type": "bug_fix", + "category": "Performance", + "title": "Enable caching for minimax models via open_router pipeline", + "problem": "Minimax models proxied through open_router didn't receive caching transformer (SetCache) causing potential redundant calls and performance loss.", + "root_cause": "SetCache.when predicate didn't include minimax in the when_model regex.", + "solution": "Updated pipeline to include 'minimax' in when_model list and added tests verifying cache flag application for minimax models when provider supports open_router params.", + "lesson_learned": "When using regex-based model matching for transformer application, explicitly include all vendor/model name variants (minimax) and add pipeline unit tests to prevent regressions.", + "commits": [ + "fdf7472" + ], + "constructs": [ + "ProviderPipeline::new", + "SetCache.when" + ] + } + ], + "tests": { + "exercised_by": [ + "inline pipeline tests test_minimax_model_applies_cache_via_open_router and test_non_minimax_model_does_not_apply_cache_via_open_router" + ], + "test_functions": [], + "source_commits": [ + "fdf7472" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/transformers/trim_context_summary.rs": { + "short_description": "Transformer that deduplicates redundant assistant operations in summaries", + "category": "SOURCE_CODE", + "description": "Implements TrimContextSummary which prunes consecutive redundant tool call operations in assistant messages by keeping the latest operation per resource (file, command, search, etc.). This reduces context size while preserving the final state of operations for compaction/summarization.", + "key_constructs": [ + { + "name": "TrimContextSummary", + "type": "class", + "purpose": "Transformer that removes duplicate consecutive operations in assistant summary blocks.", + "callers": [ + { + "file": "crates/forge_app/src/transformers/compaction.rs", + "line": 8, + "context": "use crate::transformers::trim_context_summary::TrimContextSummary;" + }, + { + "file": "crates/forge_app/src/transformers/compaction.rs", + "line": 44, + "context": ".pipe(TrimContextSummary)" + } + ] + }, + { + "name": "Operation", + "type": "class", + "purpose": "Enum representing different operation targets (File, Shell, Search, etc.) used for equality/deduplication." + }, + { + "name": "to_op", + "type": "function", + "purpose": "Converts a SummaryTool into an Operation variant for comparison." + } + ], + "semantic_tags": [ + "context-compaction", + "summarization", + "deduplication", + "assistant-messages" + ], + "handles_entities": [ + "ContextSummary", + "SummaryMessage", + "SummaryTool" + ], + "key_behaviors": [ + "deduplicates redundant assistant tool calls in context summaries", + "preserves the most recent operation per resource" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/tool_services/fs_patch.rs": { + "short_description": "File patching service with snapshot coordination and fuzzy fallback", + "category": "SOURCE_CODE", + "description": "Provides ForgeFsPatch which applies text patch operations (replace, replace_all, append, prepend, swap) to files while creating snapshots and validating syntax. It computes match ranges, normalizes line endings, supports fuzzy-search fallback when exact matches fail, and writes final content via the infra with content hashing.", + "key_constructs": [ + { + "name": "Range", + "type": "class", + "purpose": "Represents a byte range in file content and provides helpers for locating and normalizing matches." + }, + { + "name": "Error", + "type": "constant", + "purpose": "Local error enum describing file operation, no-match, multiple matches, and range out-of-bounds conditions." + }, + { + "name": "compute_range", + "type": "function", + "purpose": "Determines a match range from search text and handles operation-specific missing-match semantics." + }, + { + "name": "apply_replacement", + "type": "function", + "purpose": "Applies a patch operation to the provided content and returns patched content, handling append/prepend/replace/swap." + }, + { + "name": "ForgeFsPatch", + "type": "class", + "purpose": "Service struct implementing FsPatchService coordinating snapshots, writes, validation and hash computation.", + "callers": [ + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 28, + "context": "ForgeFetch, ForgeFollowup, ForgeFsPatch, ForgeFsRead, ForgeFsRemove, ForgeFsSearch," + }, + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 71, + "context": "file_patch_service: Arc>," + }, + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 126, + "context": "let file_patch_service = Arc::new(ForgeFsPatch::new(infra.clone()));" + }, + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 220, + "context": "type FsPatchService = ForgeFsPatch;" + } + ] + } + ], + "semantic_tags": [ + "file-editing", + "patching", + "snapshots", + "fuzzy-search", + "validation" + ], + "handles_entities": [ + "File content (path)", + "Snapshot", + "PatchOutput", + "PatchEdit" + ], + "key_behaviors": [ + "apply text patches with replace/append/prepend/swap semantics", + "create snapshots before modifying files", + "validate file syntax after writes and compute content hash", + "fallback to fuzzy search when exact matches fail" + ], + "insights": [ + { + "type": "feature", + "category": "Other", + "title": "Add multi_patch tool for batch edits", + "problem": "Users often need to make multiple related edits to the same file atomically; previous API only supported single patch edits.", + "root_cause": "Single edit tool required multiple sequential calls, risking partial application or conflicting state.", + "solution": "Implemented multi_patch that reads the file once, applies edits sequentially on the in-memory content (supporting replace/replace_all), computes final content hash, validates syntax via infra.validate_file (graceful), inserts snapshot before write, writes final content once, and returns PatchOutput. Added tool definition, tool descriptions, UI/formatting and tests.", + "lesson_learned": "When implementing batch edits: read once, apply sequential edits in-memory, validate prior to committing, snapshot before mutation, and write once for atomicity. Explicitly document atomicity expectations in tool description.", + "commits": [ + "9873264" + ], + "constructs": [ + "multi_patch" + ] + }, + { + "type": "bug_fix", + "category": "Edge Case", + "title": "Handle CRLF (Windows) line endings in patch range computation and search normalization", + "problem": "Patch operations miscomputed byte positions and newlines when source used CRLF, leading to incorrect replacements/matches.", + "root_cause": "Existing logic assumed LF newlines and added +1 per newline using byte counts; didn't account for CRLF two-byte newline length and mismatched search strings.", + "solution": "Added detect_line_ending, normalize_search_line_endings, used line_ending_len instead of +1 when summing line lengths, normalized search strings to source line endings before find_exact, and adjusted various patch formatting points to insert platform-appropriate line endings. Added many tests to cover CRLF/LF scenarios.", + "lesson_learned": "Text-processing and range math must operate on character-line abstractions and be aware of different newline encodings; normalize search content to the target file's newline style before computing ranges. Add tests for CRLF cases.", + "commits": [ + "0943a9f" + ], + "constructs": [ + "Range::detect_line_ending", + "Range::normalize_search_line_endings", + "Range::from_search_match", + "compute_range", + "apply_replacement" + ] + }, + { + "type": "performance", + "category": "Other", + "title": "Simplify fs_patch by dropping Delete operation", + "problem": "The patch implementation had complex handling for delete and many tests/edge cases; delete semantics problematic and added complexity", + "root_cause": "Delete handling involved newline boundary logic and many corner cases; maintaining it caused code complexity and performance impacts", + "solution": "Remove PatchOperation::Delete handling and associated tests; keep prepend/append/replace/replace_all/swap operations and simplify apply_replacement logic to improve maintainability and performance", + "commits": [ + "5966509" + ], + "constructs": [ + "apply_replacement", + "PatchOperation enum (Delete removed)" + ], + "lesson_learned": "If an operation adds disproportionate complexity and is rarely needed, consider removing it; prefer simpler and well-defined operations to improve correctness and maintainability. When removing features, update schemas/documentation accordingly." + } + ], + "tests": { + "exercised_by": [ + "inline tests at end of fs_patch.rs (CRLF/LF tests)", + "crates/forge_services/src/tool_services/fs_patch.rs (new multi_patch unit behavior)" + ], + "test_functions": [], + "source_commits": [ + "9873264", + "0943a9f" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/tool_services/shell.rs": { + "short_description": "Shell command execution wrapper that validates and strips ANSI", + "category": "SOURCE_CODE", + "description": "ForgeShell wraps infra execution of shell commands, validates command strings, optionally strips ANSI escape sequences from stdout/stderr, and returns structured ShellOutput including exit code and environment shell. It ensures environment configuration is captured from EnvironmentInfra and forwards env-vars and description metadata.", + "key_constructs": [ + { + "name": "ForgeShell", + "type": "class", + "purpose": "Shell service that holds environment config and infra to execute commands safely.", + "callers": [ + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 29, + "context": "ForgeFsUndo, ForgeFsWrite, ForgeImageRead, ForgePlanCreate, ForgeShell, ForgeSkillFetch," + }, + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 73, + "context": "shell_service: Arc>," + }, + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 128, + "context": "let shell_service = Arc::new(ForgeShell::new(infra.clone()));" + }, + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 228, + "context": "type ShellService = ForgeShell;" + } + ] + }, + { + "name": "strip_ansi", + "type": "function", + "purpose": "Utility to remove ANSI escape sequences from command output." + }, + { + "name": "validate_command", + "type": "function", + "purpose": "Ensures command string is not empty or whitespace before execution." + }, + { + "name": "execute", + "type": "function", + "purpose": "Implements ShellService: executes a command via infra, strips ANSI if requested and returns ShellOutput." + } + ], + "semantic_tags": [ + "shell-execution", + "command-runner", + "ansi-stripping", + "environment" + ], + "handles_entities": [ + "ShellOutput", + "CommandOutput", + "Environment" + ], + "key_behaviors": [ + "executes shell commands through infra", + "strips ANSI codes from stdout/stderr when requested", + "forwards environment variables and command description" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/data_gen.rs": { + "short_description": "Data generation app that streams model-driven JSON outputs from schema and templates", + "category": "SOURCE_CODE", + "description": "DataGenerationApp reads a JSON schema, optional system/user prompts, and an input-lines file, then invokes the chat provider to generate structured outputs per input using a templated prompt. It runs requests concurrently, collects tool outputs into JSON objects and returns a streaming BoxStream of generated items.", + "key_constructs": [ + { + "name": "DataGenerationApp", + "type": "class", + "purpose": "Main struct that holds Services and coordinates data generation runs.", + "callers": [ + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 9, + "context": "CommandLoaderService, ConversationService, DataGenerationApp, EnvironmentInfra," + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 398, + "context": "let app = DataGenerationApp::new(self.services.clone());" + } + ] + }, + { + "name": "read_file", + "type": "function", + "purpose": "Resolves and reads files relative to CWD via FsReadService and returns file contents." + }, + { + "name": "load_parameters", + "type": "function", + "purpose": "Loads schema, optional system/user prompts, and parses input file lines into JSON values." + }, + { + "name": "execute", + "type": "function", + "purpose": "Starts the concurrent streaming data generation run, sends prompts to provider and yields JSON outputs.", + "callers": [ + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 399, + "context": "app.execute(data_parameters).await" + } + ] + } + ], + "semantic_tags": [ + "data-generation", + "templating", + "streaming", + "provider-integration", + "schema-validation" + ], + "handles_entities": [ + "Context", + "Template", + "ToolDefinition", + "generated JSON outputs" + ], + "key_behaviors": [ + "generates JSON outputs from model responses using a schema", + "renders prompts using template engine", + "streams results with configurable concurrency" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/template_engine.rs": { + "short_description": "Handlebars-based template engine with custom helpers and embedded templates", + "category": "SOURCE_CODE", + "description": "Sets up a globally available Handlebars instance with custom helpers (inc, json, contains), strict mode and no escaping, and registers embedded templates from the templates directory. Provides TemplateEngine wrapper to render templates or template strings consistently across the app.", + "key_constructs": [ + { + "name": "create_handlebar", + "type": "function", + "purpose": "Creates and configures a Handlebars instance with helpers and registers embedded templates." + }, + { + "name": "HANDLEBARS", + "type": "constant", + "purpose": "Lazily initialized global Handlebars instance used by TemplateEngine." + }, + { + "name": "TemplateEngine", + "type": "class", + "purpose": "Wrapper providing render and render_template methods using the configured Handlebars instance.", + "callers": [ + { + "file": "crates/forge_app/src/compact.rs", + "line": 7, + "context": "use crate::TemplateEngine;" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 90, + "context": "let summary = TemplateEngine::default().render(" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 341, + "context": "TemplateEngine::default()" + }, + { + "file": "crates/forge_services/src/template.rs", + "line": 26, + "context": ".get_or_init(|| async { RwLock::new(forge_app::TemplateEngine::handlebar_instance()) })" + }, + { + "file": "crates/forge_app/src/orch.rs", + "line": 14, + "context": "use crate::{EnvironmentInfra, TemplateEngine};" + }, + { + "file": "crates/forge_app/src/orch.rs", + "line": 343, + "context": "let text = TemplateEngine::default()" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 291, + "context": "use crate::TemplateEngine;" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 293, + "context": "let handlebars = TemplateEngine::handlebar_instance();" + }, + { + "file": "crates/forge_app/src/hooks/doom_loop.rs", + "line": 10, + "context": "use crate::TemplateEngine;" + }, + { + "file": "crates/forge_app/src/hooks/doom_loop.rs", + "line": 237, + "context": "let reminder = TemplateEngine::default().render(" + }, + { + "file": "crates/forge_app/src/title_generator.rs", + "line": 11, + "context": "use crate::TemplateEngine;" + }, + { + "file": "crates/forge_app/src/title_generator.rs", + "line": 55, + "context": "let template = TemplateEngine::default().render(" + }, + { + "file": "crates/forge_app/src/command_generator.rs", + "line": 9, + "context": "AppConfigService, EnvironmentInfra, FileDiscoveryService, ProviderService, TemplateEngine," + }, + { + "file": "crates/forge_app/src/command_generator.rs", + "line": 42, + "context": "let rendered_system_prompt = TemplateEngine::default().render(" + }, + { + "file": "crates/forge_services/src/provider_service.rs", + "line": 37, + "context": "let handlebars = forge_app::TemplateEngine::handlebar_instance();" + }, + { + "file": "crates/forge_app/src/user_prompt.rs", + "line": 8, + "context": "use crate::{AttachmentService, TemplateEngine};" + } + ] + } + ], + "semantic_tags": [ + "templating", + "handlebars", + "helpers", + "embedded-templates" + ], + "handles_entities": [ + "Template" + ], + "key_behaviors": [ + "renders templates with custom helpers", + "provides a reused global Handlebars instance for consistent rendering" + ], + "insights": [ + { + "type": "refactoring", + "category": "Build/Packaging", + "title": "Replace rust-embed with include_dir for templates and zsh embedding", + "problem": "rust-embed usage and embedded folder interpolation caused complexity; include_dir is preferred and more deterministic for compile-time embedding.", + "root_cause": "rust-embed dependency and macros were being used in multiple places; consolidating to include_dir reduces feature flags and allows simpler registration functions.", + "solution": "Introduced forge_embed crate that uses include_dir; replaced rust_embed registration with forge_embed::register_templates using include_dir::include_dir static directories; converted global template engine to LazyLock rather than lazy_static. Adjusted tests accordingly.", + "lesson_learned": "Use include_dir for predictable compile-time embedding; provide a small helper crate to consistently register embedded templates across modules. Replace lazy_static with std::sync::LazyLock for modern lazy initialization.", + "commits": [ + "6b9cb31" + ], + "constructs": [ + "create_handlebar", + "forge_embed::register_templates", + "HANDLEBARS static LazyLock" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/tool_resolver.rs": { + "short_description": "Resolves and filters available tool definitions for agents (supports globs & aliases)", + "category": "SOURCE_CODE", + "description": "ToolResolver matches agent-declared tool patterns (including glob patterns) against all available ToolDefinition entries, supports deprecated aliases for backward compatibility, deduplicates results and orders them according to agent tool_order. It exposes helpers to check whether a tool is allowed for an agent.", + "key_constructs": [ + { + "name": "ToolResolver", + "type": "class", + "purpose": "Service storing all tool definitions and resolving the subset applicable to a given Agent." + }, + { + "name": "deprecated_tool_aliases", + "type": "function", + "purpose": "Provides mappings from deprecated/capitalized tool names to canonical ToolName values." + }, + { + "name": "resolve", + "type": "function", + "purpose": "Main resolution method that builds patterns, matches tools, deduplicates and sorts them.", + "callers": [ + { + "file": "crates/forge_app/src/app.rs", + "line": 105, + "context": "tool_resolver.resolve(&agent).into_iter().cloned().collect();" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 1403, + "context": ".resolve(&agent)" + } + ] + }, + { + "name": "is_allowed", + "type": "function", + "purpose": "Static helper to check whether a particular tool name is permitted for an agent (honoring aliases).", + "callers": [ + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 347, + "context": "let matches = ToolResolver::is_allowed(agent, tool_name);" + } + ] + } + ], + "semantic_tags": [ + "tool-resolution", + "glob-matching", + "backward-compatibility", + "deduplication" + ], + "handles_entities": [ + "ToolDefinition", + "ToolName", + "Agent" + ], + "key_behaviors": [ + "filters available tools for an agent based on patterns", + "supports glob patterns and deprecated name aliases", + "deduplicates and orders resolved tools according to agent preferences" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_config/src/compact.rs": { + "short_description": "Configuration structures for compaction and update frequency", + "category": "SOURCE_CODE", + "description": "Defines config types used to control automatic updates and conversation context compaction (Compact and Update), including thresholds, retention windows and model overrides. It includes serde/JSON schema annotations, defaults, and validation via tests to ensure eviction_window serializes/deserializes correctly.", + "key_constructs": [ + { + "name": "UpdateFrequency", + "type": "class", + "purpose": "Enum describing how often updates are checked (Daily, Weekly, Always)." + }, + { + "name": "Update", + "type": "class", + "purpose": "Config struct for automatic update behavior and auto_install preference.", + "callers": [ + { + "file": "crates/forge_main/src/update.rs", + "line": 5, + "context": "use forge_config::Update;" + }, + { + "file": "crates/forge_main/src/update.rs", + "line": 69, + "context": "pub async fn on_update(api: Arc, update: Option<&Update>) {" + }, + { + "file": "crates/forge_config/src/config.rs", + "line": 12, + "context": "AutoDumpFormat, Compact, Decimal, HttpConfig, ModelConfig, ReasoningConfig, RetryConfig, Update," + }, + { + "file": "crates/forge_config/src/config.rs", + "line": 210, + "context": "pub updates: Option," + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 688, + "context": "let update = forge_config::Update::default().auto_update(args.no_confirm);" + } + ] + }, + { + "name": "Compact", + "type": "class", + "purpose": "Config struct controlling context compaction parameters like retention_window and eviction_window.", + "callers": [ + { + "file": "crates/forge_config/src/config.rs", + "line": 12, + "context": "AutoDumpFormat, Compact, Decimal, HttpConfig, ModelConfig, ReasoningConfig, RetryConfig, Update," + }, + { + "file": "crates/forge_config/src/config.rs", + "line": 244, + "context": "pub compact: Option," + } + ] + } + ], + "semantic_tags": [ + "configuration", + "compaction", + "updates", + "serialization" + ], + "handles_entities": [ + "Compact config" + ], + "key_behaviors": [ + "configures compaction thresholds and retention behavior", + "configures update check frequency and auto-update preference" + ], + "insights": [ + { + "type": "refactoring", + "category": "Typing", + "title": "Use Percentage type for eviction_window and update tests", + "problem": "eviction_window was a raw f64 with custom deserializer; tests and TOML output exhibited floating noise.", + "root_cause": "Raw floats were used for percent semantics causing validation & formatting issues.", + "solution": "Switch eviction_window to crate::Percentage (newtype) and update default/tests to use Percentage::new/.value. Add unit tests verifying TOML round-trip and invalid-range rejection.", + "lesson_learned": "Switching to validated newtypes centralizes validation and formatting responsibilities and simplifies callers.", + "commits": [ + "209cd61", + "46c936c" + ], + "constructs": [ + "Compact", + "Compact::new", + "Dummy impl for Compact (tests)" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_config/src/compact.rs tests" + ], + "source_commits": [ + "209cd61" + ] + } + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/hook.rs": { + "short_description": "Lifecycle hook/event system for conversation processing", + "category": "SOURCE_CODE", + "description": "Defines lifecycle events, payload types and an extensible EventHandle trait to attach handlers that run on conversation lifecycle events (start, end, request, response, toolcall start/end). It provides Hook composition, a default NoOpHandler, and helpers to combine handlers so callers can customize behavior during conversation processing.", + "key_constructs": [ + { + "name": "EventData", + "type": "class", + "purpose": "Generic container holding agent, model_id and event payload for lifecycle events." + }, + { + "name": "LifecycleEvent", + "type": "class", + "purpose": "Enum enumerating Start, End, Request, Response, ToolcallStart and ToolcallEnd events with typed payloads." + }, + { + "name": "EventHandle", + "type": "function", + "purpose": "Async trait for handling lifecycle events and mutating Conversation state." + }, + { + "name": "Hook", + "type": "class", + "purpose": "Container of typed event handlers with builder-style setters and zip composition to combine hooks." + } + ], + "semantic_tags": [ + "hooks", + "event-handling", + "lifecycle", + "conversation" + ], + "handles_entities": [ + "Conversation", + "Agent", + "ModelId", + "ToolCallFull", + "ToolResult" + ], + "key_behaviors": [ + "attach handlers to conversation lifecycle events", + "compose multiple handlers/hooks into a chain", + "invoke handlers on lifecycle transitions" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/message.rs": { + "short_description": "Message and usage domain models for LLM chat responses", + "category": "SOURCE_CODE", + "description": "Defines message-related domain types used for chat responses, including ChatCompletionMessage, Content (part/full), Usage with accumulate/merge semantics, message phases and finish reasons. These types standardize streaming and full message representations and provide utilities to combine usage metrics safely across streaming events.", + "key_constructs": [ + { + "name": "MessagePhase", + "type": "class", + "purpose": "Enum marking assistant message phase (Commentary or FinalAnswer).", + "callers": [ + { + "file": "crates/forge_domain/src/context.rs", + "line": 21, + "context": "Attachment, AttachmentContent, ConversationId, EventValue, Image, MessagePhase, ModelId," + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 321, + "context": "pub phase: Option," + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 572, + "context": "phase: Option," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/request.rs", + "line": 5, + "context": "use forge_app::domain::{Context as ChatContext, ContextMessage, MessagePhase, Role, ToolChoice};" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/request.rs", + "line": 12, + "context": "fn to_oai_phase(phase: MessagePhase) -> oai::MessagePhase {" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/request.rs", + "line": 14, + "context": "MessagePhase::Commentary => oai::MessagePhase::Commentary," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/request.rs", + "line": 15, + "context": "MessagePhase::FinalAnswer => oai::MessagePhase::FinalAnswer," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/request.rs", + "line": 1296, + "context": "use forge_app::domain::{MessagePhase, TextMessage};" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/request.rs", + "line": 1300, + "context": "assistant_msg.phase = Some(MessagePhase::Commentary);" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/request.rs", + "line": 1337, + "context": "use forge_app::domain::{MessagePhase, TextMessage};" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/request.rs", + "line": 1341, + "context": "assistant_msg.phase = Some(MessagePhase::FinalAnswer);" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/response.rs", + "line": 5, + "context": "ChatCompletionMessage, Content, FinishReason, MessagePhase, TokenCount, ToolCall," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/response.rs", + "line": 105, + "context": "type Domain = MessagePhase;" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/response.rs", + "line": 109, + "context": "oai::MessagePhase::Commentary => MessagePhase::Commentary," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/response.rs", + "line": 110, + "context": "oai::MessagePhase::FinalAnswer => MessagePhase::FinalAnswer," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/response.rs", + "line": 876, + "context": "Some(forge_app::domain::MessagePhase::Commentary)" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/response.rs", + "line": 911, + "context": "Some(forge_app::domain::MessagePhase::FinalAnswer)" + } + ] + }, + { + "name": "Usage", + "type": "class", + "purpose": "Tracks token/cost usage and provides accumulate and merge strategies for different reporting patterns.", + "callers": [ + { + "file": "crates/forge_domain/src/conversation.rs", + "line": 120, + "context": "pub fn accumulated_usage(&self) -> Option {" + }, + { + "file": "crates/forge_domain/src/conversation.rs", + "line": 124, + "context": "pub fn usage(&self) -> Option {" + }, + { + "file": "crates/forge_domain/src/conversation.rs", + "line": 271, + "context": "use crate::{MessageEntry, Usage};" + }, + { + "file": "crates/forge_domain/src/conversation.rs", + "line": 275, + "context": "let main_usage = Usage { cost: Some(0.01), ..Usage::default() };" + }, + { + "file": "crates/forge_domain/src/conversation.rs", + "line": 275, + "context": "let main_usage = Usage { cost: Some(0.01), ..Usage::default() };" + }, + { + "file": "crates/forge_domain/src/conversation.rs", + "line": 285, + "context": "let related_usage_1 = Usage { cost: Some(0.02), ..Usage::default() };" + }, + { + "file": "crates/forge_domain/src/conversation.rs", + "line": 285, + "context": "let related_usage_1 = Usage { cost: Some(0.02), ..Usage::default() };" + }, + { + "file": "crates/forge_domain/src/conversation.rs", + "line": 294, + "context": "let related_usage_2 = Usage { cost: Some(0.03), ..Usage::default() };" + }, + { + "file": "crates/forge_domain/src/conversation.rs", + "line": 294, + "context": "let related_usage_2 = Usage { cost: Some(0.03), ..Usage::default() };" + }, + { + "file": "crates/forge_domain/src/conversation.rs", + "line": 323, + "context": "use crate::{MessageEntry, Usage};" + }, + { + "file": "crates/forge_domain/src/conversation.rs", + "line": 329, + "context": "let related_usage = Usage { cost: Some(0.05), ..Usage::default() };" + }, + { + "file": "crates/forge_domain/src/conversation.rs", + "line": 329, + "context": "let related_usage = Usage { cost: Some(0.05), ..Usage::default() };" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 556, + "context": "use forge_domain::{TokenCount, Usage};" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 562, + "context": "let inside_usage = Usage {" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 571, + "context": "let inside_usage2 = Usage {" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 580, + "context": "let outside_usage = Usage {" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 620, + "context": "let expected_compacted_usage = Usage {" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 636, + "context": "let expected_total_usage = Usage {" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 7, + "context": "ToolCallFull, ToolCallPart, Usage," + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 62, + "context": "let mut usage: Usage = Default::default();" + } + ] + }, + { + "name": "ChatCompletionMessage", + "type": "class", + "purpose": "Represents a possibly-partial message from provider including content, reasoning, tool_calls and usage.", + "callers": [ + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 6, + "context": "ArcSender, ChatCompletionMessage, ChatCompletionMessageFull, ChatResponse, ChatResponseContent," + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 47, + "context": "impl ResultStreamExt for crate::BoxStream {" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 312, + "context": "let result_stream: BoxStream =" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 365, + "context": "let result_stream: BoxStream =" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 417, + "context": "let result_stream: BoxStream =" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 470, + "context": "let result_stream: BoxStream =" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 528, + "context": "let result_stream: BoxStream =" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 591, + "context": "let result_stream: BoxStream =" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 626, + "context": "let result_stream: BoxStream =" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 671, + "context": "let result_stream: BoxStream =" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 725, + "context": "let result_stream: BoxStream =" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 762, + "context": "let result_stream: BoxStream =" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 792, + "context": "let result_stream: BoxStream =" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 839, + "context": "let result_stream: BoxStream =" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 879, + "context": "let result_stream: BoxStream =" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 921, + "context": "let result_stream: BoxStream =" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 961, + "context": "let result_stream: BoxStream =" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 1005, + "context": "let result_stream: BoxStream =" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 1051, + "context": "let result_stream: BoxStream =" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 1082, + "context": "let result_stream: BoxStream =" + } + ] + }, + { + "name": "Content", + "type": "class", + "purpose": "Enum wrapper for partial (streamed) or full message content.", + "callers": [ + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 214, + "context": "ChatCompletionMessage::assistant(forge_domain::Content::part(" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 283, + "context": "ChatCompletionMessage::assistant(forge_domain::Content::part(" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 433, + "context": "Content, Context as ChatContext, ContextMessage, FinishReason, ModelId, Provider," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 1317, + "context": "assert_eq!(first.content, Some(Content::part(\"hello\")));" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 1393, + "context": "assert_eq!(first.content, Some(Content::part(\"hello from codex\")));" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 1475, + "context": "assert_eq!(first.content, Some(Content::part(\"hello\")));" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 291, + "context": "BoxStream, Content, FinishReason, TokenCount, ToolCall, ToolCallArguments, ToolCallId," + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 300, + "context": "Ok(ChatCompletionMessage::default().content(Content::part(\"Hello \")))," + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 302, + "context": ".content(Content::part(\"world!\"))" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 346, + "context": ".content(Content::part(\"Hello \"))" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 355, + "context": ".content(Content::part(\"world!\"))" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 399, + "context": ".content(Content::part(\"Hello world!\"))" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 460, + "context": ".content(Content::part(\"Hello world!\"))" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 514, + "context": "Ok(ChatCompletionMessage::default().content(Content::part(\"Hello \")))," + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 515, + "context": "Ok(ChatCompletionMessage::default().content(Content::part(\"world!\")))," + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 577, + "context": "Ok(ChatCompletionMessage::default().content(Content::part(\"Hello \")))," + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 578, + "context": "Ok(ChatCompletionMessage::default().content(Content::part(\"world!\")))," + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 622, + "context": "Ok(ChatCompletionMessage::default().content(Content::part(\"Hello \")))," + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 623, + "context": "Ok(ChatCompletionMessage::default().content(Content::part(\"world!\")))," + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 664, + "context": ".content(Content::part(\"Answer: \"))" + } + ] + } + ], + "semantic_tags": [ + "messaging", + "usage-tracking", + "streaming", + "content", + "finish-reason" + ], + "handles_entities": [ + "ChatCompletionMessage", + "ChatCompletionMessageFull", + "Usage", + "Content" + ], + "key_behaviors": [ + "represent LLM messages and content variants", + "accumulate and merge usage metrics appropriately", + "preserve message phase and finish reasons" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Parsing", + "title": "Prevent token double-counting for streaming providers (Anthropic/Bedrock/Google)", + "problem": "Streaming providers sometimes emit cumulative token counts across multiple events (message_start + message_delta), and code was summing these values leading to inflated token totals.", + "root_cause": "Usage.accumulate() summed token counters which is correct for independent requests but incorrect when events are cumulative deltas; earlier logic used accumulate for partial streaming updates.", + "solution": "Add Usage::merge that takes per-field max (TokenCount::max) for token counts and sums cost fields. Add TokenCount::max helper. Change streaming handling to use merge for cumulative partial events. Add unit tests illustrating correct semantics.", + "lesson_learned": "Different providers use different semantics for partial usage reporting (deltas vs cumulative totals). When combining partial cumulative events, use max/merge semantics for token counts and additive semantics for cost.", + "commits": [ + "caf374e" + ], + "constructs": [ + "Usage::accumulate", + "Usage::merge", + "TokenCount::max" + ] + }, + { + "type": "feature", + "category": "State Management", + "title": "Add MessagePhase enum to preserve assistant phase labels", + "problem": "Assistant messages could contain an implicit phase (intermediate commentary vs final answer) emitted by some model families; this metadata was being dropped and could degrade multi-turn reasoning and context replay.", + "root_cause": "Domain model did not include a phase field for messages.", + "solution": "Add MessagePhase enum (Commentary, FinalAnswer) and include phase in ChatCompletionMessage and ChatCompletionMessageFull. Wire the field through request/response/path where needed.", + "lesson_learned": "When provider responses carry meta-labels that affect subsequent reasoning, capture them explicitly in domain types so they can be re-sent and preserved across turns.", + "commits": [ + "5b18cce" + ], + "constructs": [ + "MessagePhase", + "ChatCompletionMessage.phase", + "ChatCompletionMessageFull.phase" + ] + } + ], + "tests": { + "exercised_by": [ + "crates/forge_domain/src/message.rs::test_usage_merge_preserves_costs", + "crates/forge_domain/src/message.rs::test_usage_merge_anthropic_cumulative" + ], + "test_functions": [], + "source_commits": [ + "caf374e" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/lib.rs": { + "short_description": "Infrastructure crate re-exports and module declarations", + "category": "SOURCE_CODE", + "description": "Top-level lib that declares and exposes various infrastructure modules (console, env, executor, grpc, http, fs I/O, storage, mcp, walker etc.) and re-exports key implementations for use across the workspace. It centralizes infra utilities and concrete types so other crates can depend on stable exported APIs.", + "key_constructs": [ + { + "name": "StdConsoleWriter", + "type": "constant", + "purpose": "Exported console writer implementation for standard console output." + }, + { + "name": "ForgeEnvironmentInfra", + "type": "constant", + "purpose": "Exported environment infra implementation that provides environment/config access." + }, + { + "name": "ForgeCommandExecutorService", + "type": "constant", + "purpose": "Exported command executor implementation for running commands." + }, + { + "name": "sanitize_headers", + "type": "function", + "purpose": "HTTP helper used to sanitize headers for outbound requests." + }, + { + "name": "CacacheStorage", + "type": "constant", + "purpose": "Exported KV storage implementation backed by cacache." + } + ], + "semantic_tags": [ + "infrastructure", + "reexports", + "io", + "grpc", + "fs" + ], + "handles_entities": [ + "environment", + "console", + "storage" + ], + "key_behaviors": [ + "provide infrastructure implementations and utilities", + "expose executor, environment and storage helpers to other crates" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/sync_display.rs": { + "short_description": "Human-friendly display messages for sync progress events", + "category": "SOURCE_CODE", + "description": "Provides SyncProgressDisplay trait and an implementation for SyncProgress that formats sync lifecycle events into human-readable UI messages. Includes helper pluralize and unit tests to verify message output for different sync states.", + "key_constructs": [ + { + "name": "SyncProgressDisplay", + "type": "class", + "purpose": "Trait defining message() for converting SyncProgress to an optional human-readable string." + }, + { + "name": "impl SyncProgressDisplay for SyncProgress", + "type": "implementation", + "purpose": "Implements message formatting for various SyncProgress variants (Starting, DiffComputed, Syncing, Completed, etc.)." + }, + { + "name": "pluralize", + "type": "function", + "purpose": "Helper returning 'file' or 'files' based on count for display text." + } + ], + "semantic_tags": [ + "ui", + "display", + "sync", + "formatting" + ], + "handles_entities": [ + "SyncProgress", + "WorkspaceId" + ], + "key_behaviors": [ + "format sync progress events into human-readable messages", + "provide pluralization for file counts" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/agent.rs": { + "short_description": "Load and parse agent definition files from built-in and custom directories", + "category": "SOURCE_CODE", + "description": "Provides an infrastructure-backed repository that discovers, parses, and deduplicates agent definitions from built-in, global, and project-local locations, applying precedence rules. It parses Markdown files with YAML frontmatter into AgentDefinition instances and exposes a load_agents entry point used by the application to enumerate available agents.", + "key_constructs": [ + { + "name": "ForgeAgentRepository", + "type": "class", + "purpose": "Repository wrapper that loads agent definitions via provided infra (file, env, directory readers).", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 25, + "context": "use crate::agent::ForgeAgentRepository;" + }, + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 48, + "context": "agent_repository: Arc>," + }, + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 81, + "context": "let agent_repository = Arc::new(ForgeAgentRepository::new(infra.clone()));" + } + ] + }, + { + "name": "load_agents", + "type": "function", + "purpose": "Public async entry that loads agents from built-ins, global dir, and CWD and resolves conflicts.", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 497, + "context": "let agent_defs = self.agent_repository.load_agents().await?;" + } + ] + }, + { + "name": "parse_agent_file", + "type": "function", + "purpose": "Parses a Markdown file with YAML frontmatter into an AgentDefinition using gray_matter." + }, + { + "name": "resolve_agent_conflicts", + "type": "function", + "purpose": "Deduplicates agents by ID keeping the last occurrence to implement precedence rules." + } + ], + "semantic_tags": [ + "agent-management", + "file-parsing", + "yaml-frontmatter", + "repository", + "configuration" + ], + "handles_entities": [ + "AgentDefinition" + ], + "key_behaviors": [ + "discovers built-in and custom agent definitions", + "parses markdown+frontmatter into typed agent objects", + "resolves agent ID conflicts with defined precedence" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/provider_service.rs": { + "short_description": "Service wrapper that renders provider templates and delegates provider/model calls", + "category": "SOURCE_CODE", + "description": "Wraps a ProviderRepository and renders template-based provider configurations (URLs and model sources) into fully resolved Provider instances, then delegates chat and model listing to the underlying repository. It centralizes template rendering logic and credential handling before callers consume provider clients.", + "key_constructs": [ + { + "name": "ForgeProviderService", + "type": "class", + "purpose": "Service struct that adapts a provider repository and performs template rendering for provider configs.", + "callers": [ + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 25, + "context": "use crate::provider_service::ForgeProviderService;" + }, + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 58, + "context": "chat_service: Arc>," + }, + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 118, + "context": "let chat_service = Arc::new(ForgeProviderService::new(infra.clone()));" + }, + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 234, + "context": "type ProviderService = ForgeProviderService;" + } + ] + }, + { + "name": "render_url_template", + "type": "function", + "purpose": "Renders a URL template with parameters using the template engine and returns a parsed Url." + }, + { + "name": "render_provider", + "type": "function", + "purpose": "Converts a ProviderTemplate into a Provider by rendering main and model URLs and preserving credential info." + }, + { + "name": "ProviderService impl for ForgeProviderService", + "type": "function", + "purpose": "Implements ProviderService methods to delegate chat, models, provider retrieval, and credential ops to the repository." + } + ], + "semantic_tags": [ + "providers", + "template-rendering", + "authentication", + "service-layer", + "network" + ], + "handles_entities": [ + "Provider", + "ProviderTemplate", + "Model", + "AuthCredential" + ], + "key_behaviors": [ + "renders provider URL templates into usable URLs", + "delegates chat and model requests to repository", + "exposes provider and credential management functions" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/build.rs": { + "short_description": "Build script setting package version and name from env", + "category": "BUILD", + "description": "A small build.rs that normalizes an APP_VERSION environment variable (strips leading 'v') and emits cargo:rustc-env directives to set CARGO_PKG_VERSION and CARGO_PKG_NAME for the build. It also instructs Cargo to rerun if APP_VERSION changes so the binary embeds the intended version.", + "key_constructs": [], + "semantic_tags": [ + "build", + "versioning", + "ci", + "env" + ], + "handles_entities": [], + "key_behaviors": [ + "injects runtime package version/name into the compiled binary" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "shell-plugin/lib/bindings.zsh": { + "short_description": "ZLE widget registrations and key bindings for the forge plugin", + "category": "SOURCE_CODE", + "description": "Registers ZLE widgets and custom keybindings for the shell plugin, including a custom bracketed-paste handler to fix syntax highlighting and bindings for Enter and Tab to wire into the plugin's widgets. These bindings ensure the interactive :command experience and paste behavior behave reliably across ZSH sessions.", + "key_constructs": [ + { + "name": "forge-bracketed-paste", + "type": "function", + "purpose": "Custom bracketed-paste handler that calls the built-in widget and forces redisplay/reset-prompt to fix highlighting." + }, + { + "name": "forge-accept-line", + "type": "function", + "purpose": "ZLE widget name registered for accepting lines and transforming :commands (registered, implementation in dispatcher.zsh)." + }, + { + "name": "forge-completion", + "type": "function", + "purpose": "ZLE widget name registered for completion (implementation in completion.zsh)." + } + ], + "semantic_tags": [ + "zsh", + "keybindings", + "ui-integration", + "shell-plugin", + "input-handling" + ], + "handles_entities": [], + "key_behaviors": [ + "registers widgets and keybindings for plugin interactions", + "ensures bracketed paste refreshes highlighting", + "rebinds Enter and Tab to plugin handlers" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "shell-plugin/lib/completion.zsh": { + "short_description": "Custom tab-completion widget handling :commands and @ file picks", + "category": "SOURCE_CODE", + "description": "Provides a ZLE completion widget that supports two modes: @-prefixed file/directory selection (uses fd + fzf preview) and colon-prefixed :command completion (shows interactive fzf selection of commands). It updates the ZSH buffer with the selected item and resets the prompt to maintain UX consistency.", + "key_constructs": [ + { + "name": "forge-completion", + "type": "function", + "purpose": "Main completion widget that resolves current word and performs file or command selection using fzf and plugin helper commands." + } + ], + "semantic_tags": [ + "zsh", + "completion", + "fzf", + "file-selection", + "interactive" + ], + "handles_entities": [], + "key_behaviors": [ + "completes @file references via fzf with previews", + "provides interactive selection of :commands with filtering" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "shell-plugin/lib/config.zsh": { + "short_description": "Configuration variables and environment detection for the shell plugin", + "category": "CONFIG", + "description": "Defines plugin-scoped configuration variables (paths, command detection, preview/window options, session overrides) using typeset to avoid polluting the global environment. It detects utilities like fd and bat, caches commands lazily, and exposes session-scoped overrides for model/provider and reasoning effort.", + "key_constructs": [], + "semantic_tags": [ + "configuration", + "zsh", + "environment", + "session", + "plugin" + ], + "handles_entities": [], + "key_behaviors": [ + "provides runtime configuration and utility command detection for plugin", + "stores session-scoped model/provider overrides" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "shell-plugin/lib/dispatcher.zsh": { + "short_description": "Main dispatcher for :commands and the accept-line widget logic", + "category": "SOURCE_CODE", + "description": "Contains the core command dispatch logic transforming colon-prefixed lines into plugin actions: switching agents, executing commands, generating conversations, and delegating to the CLI. It implements the forge-accept-line ZLE widget which parses buffer contents, handles aliases, delegates to specific _forge_action_* handlers, and centrally resets the prompt after actions.", + "key_constructs": [ + { + "name": "_forge_action_default", + "type": "function", + "purpose": "Primary handler for executing commands or setting active agents when no specialized action handler matches." + }, + { + "name": "forge-accept-line", + "type": "function", + "purpose": "ZLE widget that parses :command input from BUFFER, manages history and cursor, maps aliases, dispatches to action handlers, and resets the prompt." + } + ], + "semantic_tags": [ + "zsh", + "command-dispatch", + "cli-integration", + "user-input", + "agents" + ], + "handles_entities": [ + "Conversation", + "Agent" + ], + "key_behaviors": [ + "parses and dispatches :commands from the shell prompt", + "sets active agent or executes commands with conversation context", + "generates conversations and starts background sync/update" + ], + "insights": [ + { + "type": "refactoring", + "category": "UX", + "title": "Rename login to provider-login (keep login alias) and add suggest-model/ccm aliases", + "problem": "Command naming inconsistency for provider login and new config actions.", + "root_cause": "New commands introduced and plugin naming conventions moved to Object-Action pattern.", + "solution": "Dispatch provider-login|login to login handler, add suggest-model|sm and config-commit-model|ccm dispatch branches. Update README to document Object-Action style and alias behavior.", + "lesson_learned": "When renaming user-facing commands, keep backwards-compatible aliases to avoid breaking users, and document naming conventions in plugin README.", + "commits": [ + "9a6008e", + "da37b43", + "f8a260e" + ], + "constructs": [ + "forge-accept-line (case dispatch)" + ] + }, + { + "type": "feature", + "category": "UX", + "title": "Dispatch new copy and start background update from dispatcher", + "problem": "Dispatcher lacked routing for 'copy' and background update start.", + "root_cause": "New shell features required dispatch entries.", + "solution": "Added 'copy' case to forge-accept-line and calls to _forge_start_background_update in default action triggers.", + "lesson_learned": "Keep plugin dispatcher and action scripts synchronized when adding new actions; unit tests for plugin are hard but manual validation is necessary.", + "commits": [ + "a8d3acc", + "f7ebfd6" + ], + "constructs": [ + "forge-accept-line", + "_forge_action_default" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "shell-plugin/lib/helpers.zsh": { + "short_description": "Utility helpers for executing forge CLI calls and UI helpers", + "category": "SOURCE_CODE", + "description": "Provides helper functions used by the ZSH plugin including lazy command caching, fzf wrapper, consistent CLI execution wrappers (interactive and non-interactive), prompt reset, logging helpers, and background sync/update starters. These utilities standardize how the plugin invokes the forge binary and manage background activities without disrupting the interactive shell.", + "key_constructs": [ + { + "name": "_forge_get_commands", + "type": "function", + "purpose": "Lazily loads and caches the output of `forge list commands --porcelain` for completions and dispatch." + }, + { + "name": "_forge_exec_interactive", + "type": "function", + "purpose": "Runs the forge binary connected to /dev/tty so interactive child processes (readline, fzf) behave correctly from ZLE." + }, + { + "name": "_forge_exec", + "type": "function", + "purpose": "Executes forge with appropriate agent and session environment variables for non-interactive uses." + }, + { + "name": "_forge_log", + "type": "function", + "purpose": "Consistent timestamped logging helper with colored output for levels: error, info, success, warning, debug." + }, + { + "name": "_forge_start_background_sync", + "type": "function", + "purpose": "Starts a background job to sync the workspace if indexing is enabled and not already running." + } + ], + "semantic_tags": [ + "zsh", + "helpers", + "cli-exec", + "background-jobs", + "logging" + ], + "handles_entities": [ + "Conversation", + "Workspace" + ], + "key_behaviors": [ + "executes forge CLI consistently from ZLE widgets", + "provides logging and utility wrappers for plugin actions", + "starts background sync and update checks without blocking shell" + ], + "insights": [ + { + "type": "refactoring", + "category": "Usability", + "title": "Add _forge_exec_interactive helper to attach child commands to /dev/tty", + "problem": "Interactive commands run from ZLE widgets did not have TTY attached and rustyline/fzf saw EOF.", + "root_cause": "ZLE spawns child processes with pipes; interactive libraries require the real TTY.", + "solution": "Added _forge_exec_interactive which runs the CLI with /dev/tty so interactive prompts work from ZLE. Documented not to use inside command substitution.", + "lesson_learned": "Shell widgets invoking interactive binaries must forward the real TTY into child processes. Provide a distinct helper to avoid misuse.", + "commits": [ + "ca0fac8" + ], + "constructs": [ + "_forge_exec_interactive" + ] + }, + { + "type": "refactoring", + "category": "UX", + "title": "Change fzf layout to reverse and preview bottom for better UX", + "problem": "fzf preview at top and full height made selection awkward in many terminals.", + "root_cause": "Default fzf options used earlier were not optimal for modern terminal layouts.", + "solution": "Change _FORGE_PREVIEW_WINDOW to bottom:75% and _forge_fzf to use --reverse and --height 80% for a reversed layout with preview at bottom.", + "lesson_learned": "Small UX improvements in shell pickers significantly improve discoverability and navigation; pick sensible defaults for preview placement and layout.", + "commits": [ + "3e04e22" + ], + "constructs": [ + "_forge_fzf" + ] + }, + { + "type": "feature", + "category": "UX", + "title": "Start background update check from zsh plugin", + "problem": "Users needed automatic periodic update checks from shell plugin.", + "root_cause": "Shell plugin only started background sync previously.", + "solution": "Added _forge_start_background_update that executes $_FORGE_BIN update --no-confirm in background (redirecting IO and closing stdin/out to avoid terminal flash). Hooked into action startup paths.", + "lesson_learned": "Background jobs invoked from shell plugins must close stdio and avoid prompting for input; use --no-confirm for non-interactive runs.", + "commits": [ + "f7ebfd6" + ], + "constructs": [ + "_forge_start_background_update" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "shell-plugin/lib/highlight.zsh": { + "short_description": "Syntax highlighting patterns for forge conversation syntax", + "category": "CONFIG", + "description": "Configures zsh-syntax-highlighting patterns for tagged file tokens, colon-prefixed commands, and the post-command text to style them with colors and emphasis. It improves visual differentiation for plugin-specific syntax in the shell prompt.", + "key_constructs": [], + "semantic_tags": [ + "zsh", + "syntax-highlighting", + "ui", + "configuration" + ], + "handles_entities": [], + "key_behaviors": [ + "applies highlighting to @[...] tokens and :commands in the shell" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/context.rs": { + "short_description": "Domain types representing messages, contexts, and related utilities", + "category": "SOURCE_CODE", + "description": "Defines core domain types for conversation messaging: ContextMessage enum, TextMessage structure, MessageEntry wrapper, Role enum, and the Context request object, plus helper functions for token estimation, serialization to text, and context manipulation. These types represent messages sent to providers and are central to building requests, storing contexts, and compacting conversation state across the system.", + "key_constructs": [ + { + "name": "ContextMessage", + "type": "enum", + "purpose": "Represents message variants sent to the provider: Text, Tool (ToolResult), or Image." + }, + { + "name": "TextMessage", + "type": "struct", + "purpose": "Represents a textual message with role, content, optional tool calls, reasoning details, and other metadata." + }, + { + "name": "Context", + "type": "struct", + "purpose": "Encapsulates a request to an LLM provider, including messages, tools, generation parameters, and streaming options." + }, + { + "name": "token_count_approx", + "type": "function", + "purpose": "Estimates token usage from message content via character-based approximation to help budgeting and prompt sizing." + }, + { + "name": "MessageEntry", + "type": "struct", + "purpose": "Wrapper around a ContextMessage that includes optional usage metrics and implements Deref for convenience." + } + ], + "semantic_tags": [ + "conversation", + "messages", + "context", + "token-estimation", + "serialization" + ], + "handles_entities": [ + "Context", + "Message", + "ToolResult", + "Image", + "Usage" + ], + "key_behaviors": [ + "models messages and conversation contexts for provider requests", + "estimates token usage and serializes messages for storage or transport", + "manages adding attachments, tools, and metadata to contexts" + ], + "insights": [ + { + "type": "bug_fix", + "category": "API / Edge Case", + "title": "Avoid duplicating reasoning block when both raw reasoning text and reasoning_details are present", + "problem": "append_message would convert a raw reasoning string into a reasoning_details entry even when structured reasoning_details (with signatures) already existed. This duplicated a thinking block with a null signature which Anthropic rejects.", + "root_cause": "Unconditional conversion of flat reasoning text into a new reasoning_details entry without checking for existing structured details.", + "solution": "Change merge logic to prefer existing reasoning_details when present; only convert raw reasoning string into reasoning_details when structured details are absent. Added regression test.", + "commits": [ + "9510ab5" + ], + "constructs": [ + "Context::append_message (merged_reasoning_details logic)" + ] + }, + { + "type": "bug_fix", + "category": "State Management", + "title": "Detect model changes to avoid reusing reasoning across models", + "problem": "Reasoning signatures are model-specific; reusing reasoning when switching models can be invalid and misleading.", + "root_cause": "Normalization logic previously only considered whether reasoning was enabled, not whether the model changed since the last assistant message.", + "solution": "Add Context::has_model_changed(current_model) which inspects the last assistant message with a model and returns true if it differs (or absent). Use this to drop/normalize reasoning only when model has changed. Add tests covering multiple scenarios.", + "lesson_learned": "When context metadata is model-scoped (e.g., signatures), changing the model invalidates previously stored or serialized reasoning \u2014 check model continuity before re-using reasoning.", + "commits": [ + "2991aec" + ], + "constructs": [ + "Context::has_model_changed" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_domain/src/context.rs tests (has_model_changed tests)" + ], + "source_commits": [ + "2991aec" + ] + } + }, + { + "type": "feature", + "category": "Edge Case", + "title": "Store raw user content in TextMessage.raw_content", + "problem": "Original user message raw content (pre-template/rendering) was not preserved, making it difficult to display the user's raw multi-line input in summaries and for diagnostics.", + "root_cause": "ContextMessage TextMessage lacked a raw_content field to persist the literal event value.", + "solution": "Add raw_content: Option to TextMessage and provide ContextMessage::raw_content() accessor. Various transformers and DTO tests were updated to populate raw_content = None where appropriate. UserPromptBuilder stores the original event value as raw_content when constructing user messages.", + "commit": [ + "c796876" + ], + "constructs": [ + "TextMessage::raw_content", + "ContextMessage::raw_content", + "ContextMessage::user", + "UserPromptBuilder logic" + ] + }, + { + "type": "bug_fix", + "category": "Typing", + "title": "TokenCount became Copy and context token access simplified", + "problem": "TokenCount and total_tokens handling used clones and Option mapping which was verbose and error-prone", + "root_cause": "TokenCount was non-Copy leading to unnecessary clones; Usage.total_tokens stored TokenCount previously as cloneable values", + "solution": "Make TokenCount Copy, adjust methods to return TokenCount by value and use map(|u| u.total_tokens) instead of cloning", + "commits": [ + "b0ba8c2" + ], + "constructs": [ + "TokenCount (derive Copy)", + "Context::token_count() usage changes" + ], + "lesson_learned": "Prefer simple Copy types for small numeric wrappers to reduce clone noise and accidental ownership complexity." + } + ], + "tests": { + "exercised_by": [ + "crates/forge_domain/src/context.rs (inline tests)" + ], + "test_functions": [ + "test_append_message_does_not_duplicate_reasoning_when_details_present" + ], + "source_commits": [ + "9510ab5" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/zsh/rprompt.rs": { + "short_description": "Renderer for the ZSH right prompt showing agent, model, tokens, and cost", + "category": "SOURCE_CODE", + "description": "Implements a ZshRPrompt struct with Display to render RPROMPT content (agent name, token count, cost, and model) with styling and optional nerd font symbols. It formats active vs inactive states, supports currency conversion and tests ensure expected output strings.", + "key_constructs": [ + { + "name": "ZshRPrompt", + "type": "class", + "purpose": "Configuration holder for agent/model/token/cost display and rendering options for the ZSH right prompt.", + "callers": [ + { + "file": "crates/forge_main/src/zsh/mod.rs", + "line": 27, + "context": "pub use rprompt::ZshRPrompt;" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 50, + "context": "use crate::zsh::ZshRPrompt;" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 3712, + "context": "let rprompt = ZshRPrompt::default()" + } + ] + }, + { + "name": "Display for ZshRPrompt", + "type": "function", + "purpose": "Formats and styles the right prompt string depending on active state and provided fields." + }, + { + "name": "AGENT_SYMBOL", + "type": "constant", + "purpose": "Unicode symbol used to prefix the agent name when nerd fonts are enabled." + }, + { + "name": "MODEL_SYMBOL", + "type": "constant", + "purpose": "Unicode symbol used to prefix the model id when nerd fonts are enabled." + } + ], + "semantic_tags": [ + "zsh", + "ui", + "prompt", + "formatting", + "display" + ], + "handles_entities": [ + "Agent", + "Model", + "TokenCount" + ], + "key_behaviors": [ + "renders a styled right-hand shell prompt showing agent, tokens, cost and model", + "switches styling based on active token usage" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/conversation/conversation_repo.rs": { + "short_description": "Diesel-backed repository implementation for persisting conversations", + "category": "SOURCE_CODE", + "description": "Implements ConversationRepository backed by a Diesel DatabasePool, providing CRUD operations such as upsert_conversation, get_conversation, get_all_conversations, get_last_conversation, and delete_conversation scoped to a workspace. It maps between Conversation domain objects and ConversationRecord DB representations and includes comprehensive tests for persistence behavior.", + "key_constructs": [ + { + "name": "ConversationRepositoryImpl", + "type": "class", + "purpose": "Concrete repository storing and retrieving Conversation entities from a SQL database via Diesel.", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 27, + "context": "use crate::conversation::ConversationRepositoryImpl;" + }, + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 43, + "context": "conversation_repository: Arc," + }, + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 67, + "context": "let conversation_repository = Arc::new(ConversationRepositoryImpl::new(" + } + ] + }, + { + "name": "upsert_conversation", + "type": "function", + "purpose": "Inserts or updates a conversation record within the workspace using Diesel's on_conflict.", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 116, + "context": ".upsert_conversation(conversation)" + } + ] + }, + { + "name": "get_all_conversations", + "type": "function", + "purpose": "Retrieves recent conversations for the workspace with optional limit and excludes empty contexts.", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 134, + "context": ".get_all_conversations(limit)" + } + ] + }, + { + "name": "delete_conversation", + "type": "function", + "purpose": "Removes a conversation row scoped to the current workspace id for security.", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 144, + "context": ".delete_conversation(conversation_id)" + } + ] + } + ], + "semantic_tags": [ + "database", + "persistence", + "conversations", + "diesel", + "repository" + ], + "handles_entities": [ + "Conversation", + "ConversationRecord", + "Workspace" + ], + "key_behaviors": [ + "persists and updates conversations in the workspace database", + "fetches conversations, last active conversation, and deletes by id", + "filters out empty contexts when listing or getting last conversation" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Parsing", + "title": "Tests for legacy conversation deserialization", + "problem": "No tests verified migration from legacy tool value formats.", + "root_cause": "Backward compatibility code lacked explicit coverage.", + "solution": "Added unit tests that deserialize JSON with Pair, Markdown, and FileDiff variants and assert converted domain ToolOutput values match expectations (e.g., Pair => first element).", + "lesson_learned": "Whenever adding migration/parsing code for legacy serialized formats, add representative tests with actual legacy JSON examples to avoid regressions.", + "commits": [ + "39977d3" + ], + "constructs": [ + "tests::test_legacy_tool_value_pair_deserialization", + "tests::test_legacy_tool_value_markdown_deserialization", + "tests::test_legacy_tool_value_file_diff_deserialization" + ] + } + ], + "tests": { + "exercised_by": [ + "crates/forge_repo/src/conversation/conversation_repo.rs (three legacy deserialization tests)" + ], + "test_functions": [], + "source_commits": [ + "39977d3" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/provider/retry.rs": { + "short_description": "Classifies errors to decide whether they are retryable based on heuristics", + "category": "SOURCE_CODE", + "description": "Contains logic to map various HTTP, SSE, and provider-specific errors into retryable domain errors based on configured status codes and transport-error heuristics (e.g., ECONNRESET, ETIMEDOUT, Anthropic overloaded). The core into_retry function inspects many error types and wraps retryable ones as DomainError::Retryable for upstream retry handling.", + "key_constructs": [ + { + "name": "into_retry", + "type": "function", + "purpose": "Main entry that inspects an anyhow::Error and the RetryConfig to return a Retryable-wrapped error when applicable.", + "callers": [ + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 17, + "context": "use crate::provider::retry::into_retry;" + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 946, + "context": "BedrockProvider::new(provider).map_err(|e| into_retry(e, &retry_config))?;" + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 951, + "context": ".map_err(|e| into_retry(e, &retry_config))?;" + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 954, + "context": "item.map_err(|e| into_retry(e, &retry_config))" + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 964, + "context": ".map_err(|e| into_retry(e, &retry_config))" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 21, + "context": "use crate::provider::retry::into_retry;" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 345, + "context": ".map_err(|e| into_retry(e, &retry_config))?;" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 348, + "context": "item.map_err(|e| into_retry(e, &retry_config))" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 359, + "context": ".map_err(|e| into_retry(e, &retry_config))" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 18, + "context": "use crate::provider::retry::into_retry;" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 347, + "context": ".map_err(|e| into_retry(e, &retry_config))?;" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 350, + "context": "item.map_err(|e| enhance_error(into_retry(e, &retry_config), &provider_id))" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 360, + "context": ".map_err(|e| into_retry(e, &retry_config))" + }, + { + "file": "crates/forge_repo/src/provider/google.rs", + "line": 13, + "context": "use crate::provider::retry::into_retry;" + }, + { + "file": "crates/forge_repo/src/provider/google.rs", + "line": 204, + "context": ".map_err(|e| into_retry(e, &retry_config))?;" + }, + { + "file": "crates/forge_repo/src/provider/google.rs", + "line": 207, + "context": "item.map_err(|e| into_retry(e, &retry_config))" + }, + { + "file": "crates/forge_repo/src/provider/google.rs", + "line": 218, + "context": ".map_err(|e| into_retry(e, &retry_config))" + } + ] + }, + { + "name": "is_api_transport_error", + "type": "function", + "purpose": "Detects transport-like codes embedded in provider ErrorResponse payloads (e.g., ECONNRESET)." + }, + { + "name": "get_api_status_code", + "type": "function", + "purpose": "Extracts status codes from openai::Error responses, including nested structures." + }, + { + "name": "is_anthropic_overloaded_error", + "type": "function", + "purpose": "Detects Anthropic SSE overloaded errors reported as event payloads that should be retried." + } + ], + "semantic_tags": [ + "retry", + "error-handling", + "providers", + "transport", + "heuristics" + ], + "handles_entities": [ + "ErrorResponse", + "DomainError" + ], + "key_behaviors": [ + "classifies errors as retryable based on status codes and transport heuristics", + "wraps retryable errors for upstream retry logic" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Parsing", + "title": "Treat Anthropic overloaded SSE event as retryable", + "problem": "Anthropic SSE overloaded errors arrived as an SSE payload rather than HTTP status, previously not recognized as retryable.", + "root_cause": "into_retry only inspected HTTP status codes and generic SSE parse errors; Anthropic's overloaded error is an SSE domain error type.", + "solution": "Added is_anthropic_overloaded_error detection checking downcast to AnthropicError::OverloadedError and treat it as retryable. Added unit tests.", + "lesson_learned": "Domain-specific SSE/streaming errors may bypass HTTP status handling; map stream-level domain errors into retry semantics.", + "commits": [ + "304e4e7" + ], + "constructs": [ + "into_retry", + "is_anthropic_overloaded_error" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_repo/src/provider/retry.rs (unit tests added inline)" + ], + "source_commits": [ + "304e4e7" + ] + } + }, + { + "type": "bug_fix", + "category": "Edge Case", + "title": "Treat request-level reqwest errors as retryable", + "problem": "Some transient errors from reqwest were not considered retryable (e.g., incomplete response/incomplete message errors), causing immediate failures instead of retries.", + "root_cause": "Predicate only checked is_timeout() || is_connect() but not is_request() on reqwest::Error.", + "solution": "Include e.is_request() in the transport error predicate, and added a test that triggers incomplete TCP response to verify it's retryable.", + "commits": [ + "3ce465e" + ], + "constructs": [ + "is_req_transport_error", + "is_retryable", + "into_retry" + ] + } + ], + "tests": { + "exercised_by": [], + "test_functions": [ + "test_incomplete_message_is_retryable" + ], + "source_commits": [ + "3ce465e" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/tool_services/fs_read.rs": { + "short_description": "Filesystem read tool service with MIME detection, size checks, and line truncation", + "category": "SOURCE_CODE", + "description": "Implements ForgeFsRead which reads files via an infra, enforces size and line limits, detects MIME types (using content magic or extension), handles visual files by returning base64 images, and truncates long lines. It ensures safe absolute path usage, computes content hashes, and exposes assert_file_size and other helpers; tests cover detection, truncation, and size validation.", + "key_constructs": [ + { + "name": "ForgeFsRead", + "type": "class", + "purpose": "Service that implements FsReadService to read files with validation, truncation, and MIME handling.", + "callers": [ + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 28, + "context": "ForgeFetch, ForgeFollowup, ForgeFsPatch, ForgeFsRead, ForgeFsRemove, ForgeFsSearch," + }, + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 67, + "context": "file_read_service: Arc>," + }, + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 122, + "context": "let file_read_service = Arc::new(ForgeFsRead::new(infra.clone()));" + }, + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 221, + "context": "type FsReadService = ForgeFsRead;" + } + ] + }, + { + "name": "assert_file_size", + "type": "function", + "purpose": "Validates that a file's byte size does not exceed a configured maximum, returning an error otherwise.", + "callers": [ + { + "file": "crates/forge_services/src/tool_services/image_read.rs", + "line": 64, + "context": "crate::tool_services::fs_read::assert_file_size(&*self.infra, path, max_image_size_bytes)" + } + ] + }, + { + "name": "detect_mime_type", + "type": "function", + "purpose": "Detects MIME type from file content (magic numbers via infer) or falls back to extension mapping." + }, + { + "name": "truncate_line", + "type": "function", + "purpose": "Truncates lines safely at character boundaries and appends a truncation notice when exceeding max chars." + } + ], + "semantic_tags": [ + "filesystem", + "mime-detection", + "file-reading", + "validation", + "tools" + ], + "handles_entities": [ + "File", + "Image", + "FileInfo", + "ReadOutput" + ], + "key_behaviors": [ + "reads and validates file contents with size and line/char limits", + "detects visual files and returns base64 image payloads", + "truncates overly long lines and computes content hashes" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/error.rs": { + "short_description": "Application-level error enum for tool and agent runtime errors", + "category": "SOURCE_CODE", + "description": "Defines the forge_app::Error enum representing domain and runtime error conditions (invalid tool args, not found, timeouts, auth states, missing providers/models, interrupted tool executions). This centralized error type is used across the app layer to provide consistent error messages and classifications for callers.", + "key_constructs": [ + { + "name": "Error", + "type": "class", + "purpose": "An enum of application-level errors with variants for argument errors, tool not found, timeouts, auth, agent/conversation not found, and missing provider/model.", + "callers": [ + { + "file": "crates/forge_app/src/app.rs", + "line": 86, + "context": ".ok_or(crate::Error::AgentNotFound(agent_id.clone()))?" + }, + { + "file": "crates/forge_app/src/agent_executor.rs", + "line": 13, + "context": "use crate::error::Error;" + }, + { + "file": "crates/forge_app/src/agent_executor.rs", + "line": 64, + "context": ".ok_or(Error::ConversationNotFound { id: conversation_id })?" + }, + { + "file": "crates/forge_app/src/agent_executor.rs", + "line": 115, + "context": "return Err(Error::AgentToolInterrupted(reason))" + }, + { + "file": "crates/forge_app/src/agent_executor.rs", + "line": 135, + "context": "Err(Error::EmptyToolResponse.into())" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 19, + "context": "use crate::error::Error;" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 57, + "context": ".context(Error::CallTimeout {" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 209, + "context": "Err(Error::NotFound(input.name).into())" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 345, + "context": "fn validate_tool_call(agent: &Agent, tool_name: &ToolName) -> Result<(), Error> {" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 357, + "context": "return Err(Error::NotAllowed { name: tool_name.clone(), supported_tools });" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 383, + "context": ") -> Result<(), Error> {" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 414, + "context": "return Err(Error::UnsupportedModality {" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 434, + "context": "use crate::error::Error;" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 512, + "context": "let expected = Error::NotAllowed {" + } + ] + } + ], + "semantic_tags": [ + "errors", + "application-layer", + "tools", + "authentication", + "domain" + ], + "handles_entities": [], + "key_behaviors": [ + "represents and formats application-level error conditions" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/search_dedup.rs": { + "short_description": "Deduplicate semantic code search results across queries", + "category": "SOURCE_CODE", + "description": "Provides logic to deduplicate nodes returned by multiple semantic search queries, keeping each node only in the query where it has the best score. It defines a Score ordering that prioritizes relevance, then distance, then query index and uses that to prune duplicated nodes across query result buckets.", + "key_constructs": [ + { + "name": "Score", + "type": "class", + "purpose": "Tracks a node's score (relevance, distance) and query index and implements ordering to pick the best occurrence." + }, + { + "name": "Score::new", + "type": "function", + "purpose": "Constructs a Score from a query index and a Node result." + }, + { + "name": "deduplicate_results", + "type": "function", + "purpose": "Mutates a slice of per-query Node vectors to keep each node only in the query where it scored best.", + "callers": [ + { + "file": "crates/forge_app/src/tool_executor.rs", + "line": 213, + "context": "crate::search_dedup::deduplicate_results(&mut results);" + } + ] + } + ], + "semantic_tags": [ + "semantic-search", + "deduplication", + "ranking", + "results", + "node" + ], + "handles_entities": [ + "Node", + "NodeId" + ], + "key_behaviors": [ + "deduplicates search results across multiple queries", + "retains each node in the query with the best score" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_config/src/lib.rs": { + "short_description": "Top-level re-exports and Result alias for config crate", + "category": "SOURCE_CODE", + "description": "Re-exports config-related modules and types from the crate and exposes a Result alias using the crate's Error type. Acts as the public entry point for forge_config functionality so other crates import config types from a single place.", + "key_constructs": [ + { + "name": "Result", + "type": "constant", + "purpose": "Type alias for std::result::Result used across the crate." + }, + { + "name": "pub use config::*", + "type": "function", + "purpose": "Re-exports the main config types and helpers for external use." + } + ], + "semantic_tags": [ + "configuration", + "re-exports", + "schema", + "serialization", + "errors" + ], + "handles_entities": [], + "key_behaviors": [ + "exposes configuration modules and a unified Result alias for consumers" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_config/src/retry.rs": { + "short_description": "Retry/backoff configuration struct", + "category": "SOURCE_CODE", + "description": "Defines RetryConfig which captures retry/backoff policy parameters (initial delay, factor, status codes, max attempts, etc.). It is a serializable configuration type used to control retry behavior for network or transient operations.", + "key_constructs": [ + { + "name": "RetryConfig", + "type": "class", + "purpose": "Holds configuration fields controlling retry delays, backoff factor, max attempts and status codes to retry.", + "callers": [ + { + "file": "crates/forge_app/src/retry.rs", + "line": 4, + "context": "use forge_config::RetryConfig;" + }, + { + "file": "crates/forge_app/src/retry.rs", + "line": 8, + "context": "config: &RetryConfig," + }, + { + "file": "crates/forge_config/src/config.rs", + "line": 12, + "context": "AutoDumpFormat, Compact, Decimal, HttpConfig, ModelConfig, ReasoningConfig, RetryConfig, Update," + }, + { + "file": "crates/forge_config/src/config.rs", + "line": 109, + "context": "pub retry: Option," + }, + { + "file": "crates/forge_repo/src/provider/retry.rs", + "line": 4, + "context": "use forge_config::RetryConfig;" + }, + { + "file": "crates/forge_repo/src/provider/retry.rs", + "line": 8, + "context": "pub fn into_retry(error: anyhow::Error, retry_config: &RetryConfig) -> anyhow::Error {" + }, + { + "file": "crates/forge_repo/src/provider/retry.rs", + "line": 135, + "context": "fn fixture_retry_config(codes: Vec) -> RetryConfig {" + }, + { + "file": "crates/forge_repo/src/provider/retry.rs", + "line": 136, + "context": "RetryConfig::default().status_codes(codes)" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_spec.rs", + "line": 110, + "context": "ctx.config.retry = Some(forge_config::RetryConfig {" + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 6, + "context": "use forge_config::RetryConfig;" + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 927, + "context": "retry_config: Arc," + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 931, + "context": "pub fn new(retry_config: Arc) -> Self {" + } + ] + } + ], + "semantic_tags": [ + "retry", + "backoff", + "configuration", + "resilience", + "error-handling" + ], + "handles_entities": [ + "RetryConfig" + ], + "key_behaviors": [ + "configures retry and backoff policies for infra operations" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_display/src/markdown.rs": { + "short_description": "Render markdown for terminal with syntax highlighting", + "category": "SOURCE_CODE", + "description": "Provides MarkdownFormat which formats markdown for terminal output using termimad and a code block parser with syntax highlighting. It also normalizes excessive newlines and restores highlighted code blocks into the rendered output.", + "key_constructs": [ + { + "name": "MarkdownFormat", + "type": "class", + "purpose": "Encapsulates a termimad skin, newline normalization settings, and a syntax highlighter for rendering markdown for terminals.", + "callers": [ + { + "file": "crates/forge_display/src/lib.rs", + "line": 8, + "context": "pub use markdown::MarkdownFormat;" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 20, + "context": "use forge_display::MarkdownFormat;" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 103, + "context": "markdown: MarkdownFormat," + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 234, + "context": "markdown: MarkdownFormat::new()," + } + ] + }, + { + "name": "MarkdownFormat::render", + "type": "function", + "purpose": "Render markdown input to ANSI-formatted terminal text with code highlighting and newline trimming." + }, + { + "name": "MarkdownFormat::strip_excessive_newlines", + "type": "function", + "purpose": "Reduce runs of consecutive newlines to a configurable maximum to improve terminal output." + } + ], + "semantic_tags": [ + "markdown", + "terminal-formatting", + "syntax-highlighting", + "ansi", + "code-blocks" + ], + "handles_entities": [], + "key_behaviors": [ + "renders markdown for terminal display with code highlighting", + "normalizes consecutive newlines in markdown content" + ], + "insights": [ + { + "type": "performance", + "category": "Performance", + "title": "Lazily initialize SyntaxHighlighter", + "problem": "SyntaxHighlighter was initialized eagerly leading to unnecessary startup cost even when highlighting wasn't used.", + "root_cause": "highlighter field held a full SyntaxHighlighter created at MarkdownFormat construction.", + "solution": "Replace eager SyntaxHighlighter with OnceLock and call get_or_init on demand.", + "lesson_learned": "For heavy initializations that may not be used in all runs, lazy statics or OnceLock can reduce startup latency.", + "commits": [ + "54fe7a4" + ], + "constructs": [ + "MarkdownFormat::new", + "MarkdownFormat::render/format" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/error.rs": { + "short_description": "Domain error enum and helpers", + "category": "SOURCE_CODE", + "description": "Defines a comprehensive Error enum representing domain-level failures across agents, tools, providers, and workspace operations, plus helper conversion methods. It also exposes Result and stream type aliases used throughout the domain crates.", + "key_constructs": [ + { + "name": "Error", + "type": "class", + "purpose": "Enumerates domain-specific error variants (tool call errors, provider issues, indexing errors, etc.) with Display messages.", + "callers": [ + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 221, + "context": ".map_err(crate::Error::Retryable)?;" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 268, + "context": "return Err(crate::Error::EmptyCompletion.into_retryable().into());" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 1170, + "context": "use crate::Error;" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 125, + "context": ".ok_or_else(|| Error::provider_not_available(id.clone()))?)" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 33, + "context": ".ok_or_else(|| forge_domain::Error::NoDefaultProvider.into())" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 45, + "context": ".ok_or(forge_domain::Error::NoDefaultProvider)?;" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 56, + "context": ".ok_or(forge_domain::Error::NoDefaultProvider)?," + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 66, + "context": ".ok_or_else(|| forge_domain::Error::no_default_model(provider_id.clone()).into())" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 68, + "context": "Err(forge_domain::Error::no_default_model(provider_id.clone()).into())" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 1144, + "context": "type Error = crate::Error;" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 1169, + "context": ".map_err(|error| crate::Error::AgentCallArgument { error })" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 1188, + "context": "type Error = crate::Error;" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 1191, + "context": "serde_json::from_value(value).map_err(|error| crate::Error::AgentCallArgument { error })" + }, + { + "file": "crates/forge_app/src/dto/openai/response.rs", + "line": 370, + "context": ".ok_or(forge_domain::Error::ToolCallMissingName)?," + }, + { + "file": "crates/forge_services/src/provider_auth.rs", + "line": 41, + "context": ".ok_or_else(|| forge_domain::Error::provider_not_available(provider_id.clone()))?;" + }, + { + "file": "crates/forge_services/src/provider_auth.rs", + "line": 119, + "context": ".ok_or_else(|| forge_domain::Error::provider_not_available(provider_id.clone()))?;" + }, + { + "file": "crates/forge_services/src/provider_auth.rs", + "line": 159, + "context": "|| forge_domain::Error::ProviderNotAvailable {" + }, + { + "file": "crates/forge_domain/src/tools/call/tool_call.rs", + "line": 9, + "context": "use crate::{Error, Result, ToolCallArguments, ToolName, ToolResult};" + }, + { + "file": "crates/forge_domain/src/tools/call/tool_call.rs", + "line": 206, + "context": "pub fn try_from_xml(input: &str) -> std::result::Result, Error> {" + }, + { + "file": "crates/forge_domain/src/tools/call/tool_call.rs", + "line": 211, + "context": "json_repair_parse(content).map_err(|repair_error| Error::ToolCallArgument {" + } + ] + }, + { + "name": "Result", + "type": "constant", + "purpose": "Type alias for std::result::Result used in domain APIs." + }, + { + "name": "ToolCallArgumentError", + "type": "class", + "purpose": "Wrapper for deserialization errors of tool call arguments with a custom Display implementation.", + "callers": [ + { + "file": "crates/forge_app/src/error.rs", + "line": 1, + "context": "use forge_domain::{ConversationId, InterruptionReason, ToolCallArgumentError, ToolName};" + }, + { + "file": "crates/forge_app/src/error.rs", + "line": 6, + "context": "CallArgument(ToolCallArgumentError)," + } + ] + } + ], + "semantic_tags": [ + "error-handling", + "domain-errors", + "providers", + "tools", + "workspace" + ], + "handles_entities": [ + "AgentId", + "ConversationId", + "ProviderId", + "WorkspaceId" + ], + "key_behaviors": [ + "represents and formats domain error cases", + "provides helpers to convert and manufacture specific error variants" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/line_numbers.rs": { + "short_description": "Utility to number lines of text for display", + "category": "SOURCE_CODE", + "description": "Implements LineNumbers trait and NumberedContent to produce right-aligned, numbered text suitable for display (e.g., file snippets). It calculates required width and formats each line with its line number, preserving blank lines and alignment across digit boundaries.", + "key_constructs": [ + { + "name": "NumberedContent", + "type": "class", + "purpose": "Holds start offset and raw content and implements Display to produce numbered lines." + }, + { + "name": "LineNumbers", + "type": "class", + "purpose": "Trait providing to_numbered and to_numbered_from helpers for types convertible to &str." + } + ], + "semantic_tags": [ + "text-formatting", + "display", + "line-numbering", + "util" + ], + "handles_entities": [], + "key_behaviors": [ + "renders text with prefixed, right-aligned line numbers", + "supports custom starting line offsets" + ], + "insights": [ + { + "type": "refactoring", + "category": "API", + "title": "Change LineNumbers to return a Displayable NumberedContent instead of String", + "problem": "Previously to_numbered_from returned a String, but callers needed both the numbered string and access to raw content for hashing. Additionally, returning String prevented lazy formatting via Display.", + "root_cause": "Line-numbering was implemented as an owned String transformation; this made it easy to accidentally hash the transformed content instead of raw content and cost extra allocations.", + "solution": "Introduce NumberedContent<'a> that implements Display; LineNumbers::to_numbered/from return NumberedContent which can be converted to string when needed (.to_string()). Tests updated to call to_string() where needed.", + "commits": [ + "70cba43" + ], + "constructs": [ + "NumberedContent", + "LineNumbers::to_numbered", + "LineNumbers::to_numbered_from" + ] + } + ], + "tests": { + "exercised_by": [], + "test_functions": [ + "existing to_numbered / to_numbered_from tests updated to use .to_string()" + ], + "source_commits": [ + "70cba43" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/reasoning.rs": { + "short_description": "Aggregate streaming reasoning parts into full reasoning entries", + "category": "SOURCE_CODE", + "description": "Defines ReasoningDetail and Reasoning enums used to represent partial (streamed) and full reasoning results. Provides from_parts which merges streaming reasoning parts by type, concatenates text fields and picks first non-empty metadata values to produce consolidated reasoning entries.", + "key_constructs": [ + { + "name": "ReasoningDetail", + "type": "class", + "purpose": "Represents a single reasoning fragment or full entry with text, signature, data, id and type.", + "callers": [ + { + "file": "crates/forge_repo/src/conversation/conversation_record.rs", + "line": 178, + "context": "fn from(record: ReasoningFullRecord) -> Self {" + }, + { + "file": "crates/forge_app/src/dto/openai/response.rs", + "line": 155, + "context": "impl From for forge_domain::ReasoningDetail {" + }, + { + "file": "crates/forge_app/src/dto/openai/response.rs", + "line": 157, + "context": "forge_domain::ReasoningDetail {" + }, + { + "file": "crates/forge_app/src/dto/openai/response.rs", + "line": 249, + "context": "fn into_reasoning_detail(self) -> forge_domain::ReasoningDetail {" + }, + { + "file": "crates/forge_app/src/dto/openai/response.rs", + "line": 250, + "context": "forge_domain::ReasoningDetail {" + }, + { + "file": "crates/forge_app/src/dto/openai/response.rs", + "line": 156, + "context": "fn from(detail: ReasoningDetail) -> Self {" + } + ] + }, + { + "name": "Reasoning", + "type": "class", + "purpose": "Enum representing either partial streaming parts or full consolidated reasoning entries.", + "callers": [ + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 4, + "context": "use crate::reasoning::{Reasoning, ReasoningFull};" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 244, + "context": ".chain(Reasoning::from_parts(partial_reasoning_details))" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 815, + "context": "use crate::reasoning::{Reasoning, ReasoningFull};" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 833, + "context": ".add_reasoning_detail(Reasoning::Full(reasoning_full.clone())))," + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 836, + "context": ".add_reasoning_detail(Reasoning::Part(vec![reasoning_part])))," + }, + { + "file": "crates/forge_app/src/dto/openai/response.rs", + "line": 342, + "context": "resp = resp.add_reasoning_detail(forge_domain::Reasoning::Full(" + }, + { + "file": "crates/forge_app/src/dto/openai/response.rs", + "line": 349, + "context": "resp = resp.add_reasoning_detail(forge_domain::Reasoning::Full(" + }, + { + "file": "crates/forge_app/src/dto/openai/response.rs", + "line": 409, + "context": "resp = resp.add_reasoning_detail(forge_domain::Reasoning::Part(" + }, + { + "file": "crates/forge_app/src/dto/openai/response.rs", + "line": 416, + "context": "resp = resp.add_reasoning_detail(forge_domain::Reasoning::Part(" + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 329, + "context": ".add_reasoning_detail(forge_domain::Reasoning::Part(vec![" + } + ] + }, + { + "name": "Reasoning::from_parts", + "type": "function", + "purpose": "Flatten and group streaming parts by type and produce merged full reasoning entries." + } + ], + "semantic_tags": [ + "reasoning", + "streaming", + "aggregation", + "metadata", + "text-merging" + ], + "handles_entities": [ + "ReasoningDetail" + ], + "key_behaviors": [ + "merge streamed reasoning fragments into consolidated reasoning objects", + "group reasoning content by type and preserve first non-empty metadata" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/workspace.rs": { + "short_description": "Workspace UUID identifier type", + "category": "SOURCE_CODE", + "description": "Defines WorkspaceId, a thin wrapper around a UUID used to identify workspaces both locally and on the workspace server. Provides generation, parsing from string, and access to the inner UUID to standardize workspace identifiers across the system.", + "key_constructs": [ + { + "name": "WorkspaceId", + "type": "class", + "purpose": "Wrapper type around uuid::Uuid representing a workspace identifier.", + "callers": [ + { + "file": "crates/forge_main/src/ui.rs", + "line": 4065, + "context": "let parsed_ids: Vec = workspace_ids" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 4068, + "context": "forge_domain::WorkspaceId::from_string(id)" + }, + { + "file": "crates/forge_api/src/api.rs", + "line": 215, + "context": "async fn delete_workspaces(&self, workspace_ids: Vec) -> Result<()>;" + }, + { + "file": "crates/forge_api/src/api.rs", + "line": 230, + "context": "async fn init_workspace(&self, path: PathBuf) -> Result;" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 370, + "context": "async fn delete_workspaces(&self, workspace_ids: Vec) -> Result<()> {" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 386, + "context": "async fn init_workspace(&self, path: PathBuf) -> Result {" + }, + { + "file": "crates/forge_services/src/fd.rs", + "line": 7, + "context": "use forge_domain::WorkspaceId;" + }, + { + "file": "crates/forge_services/src/fd.rs", + "line": 87, + "context": "workspace_id: &WorkspaceId," + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 11, + "context": "ToolOutput, WorkspaceAuth, WorkspaceId, WorkspaceInfo," + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 308, + "context": "async fn delete_workspace(&self, workspace_id: &WorkspaceId) -> anyhow::Result<()>;" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 311, + "context": "async fn delete_workspaces(&self, workspace_ids: &[WorkspaceId]) -> anyhow::Result<()>;" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 326, + "context": "async fn init_workspace(&self, path: PathBuf) -> anyhow::Result;" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 1048, + "context": "async fn delete_workspace(&self, workspace_id: &WorkspaceId) -> anyhow::Result<()> {" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 1054, + "context": "async fn delete_workspaces(&self, workspace_ids: &[WorkspaceId]) -> anyhow::Result<()> {" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 1076, + "context": "async fn init_workspace(&self, path: PathBuf) -> anyhow::Result {" + }, + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 538, + "context": ") -> anyhow::Result {" + }, + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 569, + "context": "workspace_id: &forge_domain::WorkspaceId," + }, + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 597, + "context": "workspace_id: &forge_domain::WorkspaceId," + }, + { + "file": "crates/forge_services/src/sync.rs", + "line": 6, + "context": "use forge_domain::{ApiKey, FileHash, SyncProgress, UserId, WorkspaceId, WorkspaceIndexRepository};" + }, + { + "file": "crates/forge_services/src/sync.rs", + "line": 54, + "context": "workspace_id: WorkspaceId," + } + ] + }, + { + "name": "WorkspaceId::generate", + "type": "function", + "purpose": "Creates a new random WorkspaceId." + }, + { + "name": "WorkspaceId::from_string", + "type": "function", + "purpose": "Parses a WorkspaceId from a string, returning an error for invalid UUIDs." + } + ], + "semantic_tags": [ + "workspace", + "identifiers", + "uuid", + "serialization" + ], + "handles_entities": [ + "WorkspaceId" + ], + "key_behaviors": [ + "generate and parse stable workspace identifiers" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/executor.rs": { + "short_description": "Service to execute shell commands with streaming output", + "category": "SOURCE_CODE", + "description": "Implements ForgeCommandExecutorService which spawns shell commands, streams stdout/stderr to a synchronized console writer, captures output buffers and exit status, and supports environment variable propagation and silent mode. It serializes command execution with a mutex and offers a raw execution path that inherits IO.", + "key_constructs": [ + { + "name": "ForgeCommandExecutorService", + "type": "class", + "purpose": "Service providing methods to execute shell commands with streaming output capture and environment configuration.", + "callers": [ + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 22, + "context": "use crate::executor::ForgeCommandExecutorService;" + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 47, + "context": "command_executor_service: Arc," + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 95, + "context": "command_executor_service: Arc::new(ForgeCommandExecutorService::new(" + }, + { + "file": "crates/forge_infra/src/lib.rs", + "line": 24, + "context": "pub use executor::ForgeCommandExecutorService;" + } + ] + }, + { + "name": "execute_command_internal", + "type": "function", + "purpose": "Internal async routine to prepare, spawn, stream outputs and collect CommandOutput for a command." + }, + { + "name": "OutputPrinterWriter", + "type": "class", + "purpose": "Adaptor implementing std::io::Write to forward bytes to the StdConsoleWriter for stdout or stderr." + }, + { + "name": "stream", + "type": "function", + "purpose": "Async helper that reads from an AsyncRead and writes to a Write, returning the collected bytes." + } + ], + "semantic_tags": [ + "subprocess", + "streaming", + "console", + "environment", + "synchronization" + ], + "handles_entities": [ + "CommandOutput", + "Environment" + ], + "key_behaviors": [ + "execute shell commands and stream output to terminal", + "capture stdout, stderr and exit code for command results", + "support selective environment variable propagation and silent execution" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_json_repair/src/parser.rs": { + "short_description": "Robust parser that repairs and deserializes malformed JSON-like text", + "category": "SOURCE_CODE", + "description": "Implements JsonRepairParser that incrementally parses and repairs common JSON problems (missing commas, quotes, trailing commas, comments, markdown code fences, etc.) and finally deserializes into a requested serde type. The parser contains many specialized parse_* helpers to tolerate and fix broken inputs often produced by LLMs or mixed content.", + "key_constructs": [ + { + "name": "JsonRepairParser", + "type": "class", + "purpose": "Stateful parser that walks input chars, repairs malformations and builds output JSON string for deserialization." + }, + { + "name": "JsonRepairParser::parse", + "type": "function", + "purpose": "Top-level entry that orchestrates reparsing and returns a deserialized type or a JsonRepairError." + }, + { + "name": "parse_value / parse_object / parse_array / parse_string", + "type": "function", + "purpose": "Core helper methods that handle parsing and repairing JSON values, objects, arrays and strings respectively." + } + ], + "semantic_tags": [ + "json-repair", + "parser", + "error-recovery", + "deserialization", + "robustness" + ], + "handles_entities": [ + "JSON" + ], + "key_behaviors": [ + "repair malformed JSON-like inputs and deserialize into serde types", + "handle comments, markdown fences, missing delimiters and other common issues" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Parsing", + "title": "Prevent out-of-bounds panics when slicing strings with multi-byte UTF-8", + "problem": "Indexing into Vec using byte-length based indexes produced panics for strings containing multi-byte UTF-8 characters (out-of-bounds).", + "root_cause": "The code used byte-counts (String::len) to index a Vec and slice strings. Byte and char positions differ for multibyte characters, causing indexing errors.", + "solution": "Switch to character-based indexing (collect chars) to find the proper char index, then convert the char index back to a byte offset via char_indices() before slicing the underlying string. Updated both insert_before_last_whitespace and insert_before_last_whitespace_str accordingly.", + "commits": [ + "98bffff" + ], + "constructs": [ + "JsonRepairParser::insert_before_last_whitespace", + "JsonRepairParser::insert_before_last_whitespace_str", + "JsonRepairParser::is_whitespace" + ] + } + ], + "tests": { + "exercised_by": [ + "crates/forge_json_repair/tests/error_cases.rs" + ], + "test_functions": [ + "test_multibyte_unicode_missing_end_quote", + "test_multibyte_unicode_missing_comma_in_object", + "test_multibyte_unicode_missing_closing_brace" + ], + "source_commits": [ + "98bffff" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/conversation_selector.rs": { + "short_description": "TUI helper to list and pick a conversation", + "category": "SOURCE_CODE", + "description": "Builds a porcelain-style table of conversations (title and updated time), displays it via ForgeWidget selection UI and returns the chosen conversation. It filters conversations to those with titles and contexts, computes a human-friendly updated time, and prepositions the selector cursor to the current conversation when present.", + "key_constructs": [ + { + "name": "ConversationSelector", + "type": "class", + "purpose": "Container for selection logic exposing a select_conversation async method to pick a conversation from a list.", + "callers": [ + { + "file": "crates/forge_main/src/ui.rs", + "line": 35, + "context": "use crate::conversation_selector::ConversationSelector;" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 1771, + "context": "ConversationSelector::select_conversation(&conversations, self.state.conversation_id)" + } + ] + }, + { + "name": "ConversationSelector::select_conversation", + "type": "function", + "purpose": "Formats conversations into an Info/Porcelain table and prompts the user to select one via ForgeWidget." + }, + { + "name": "ConversationRow", + "type": "class", + "purpose": "Internal display row struct used to hold a conversation and its formatted display line for the selector." + } + ], + "semantic_tags": [ + "conversation", + "selection", + "tui", + "porcelain", + "ui" + ], + "handles_entities": [ + "Conversation", + "ConversationId" + ], + "key_behaviors": [ + "display conversations in a selectable tabular UI", + "return the conversation selected by the user" + ], + "insights": [ + { + "type": "refactoring", + "category": "UI", + "title": "Switch conversation selection to porcelain + fzf UI and support cursor preselection", + "problem": "Old selection UI used dialoguer; it also displayed and selected conversations in a way that didn't match shell plugin.", + "root_cause": "Different presentation expectations between CLI and shell plugin and dialoguer limitations.", + "solution": "Filter conversations for those with title and context, build an Info/Porcelain table, drop UUID column, truncate titles, then present rows to ForgeSelect (fzf-backed) with header_lines=1 and starting_cursor based on current conversation id. Returns selected Conversation instead of just id.", + "commits": [ + "7fc0c5e" + ], + "constructs": [ + "ConversationSelector::select_conversation" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_select/src/lib.rs": { + "short_description": "Public re-exports for selection/input widgets", + "category": "SOURCE_CODE", + "description": "Module root that re-exports selection and input builders (ForgeWidget, SelectBuilder, MultiSelectBuilder, InputBuilder) to provide a unified API surface for interactive prompts and selection widgets. It organizes the selection UI components used by the CLI and TUI.", + "key_constructs": [ + { + "name": "ForgeWidget", + "type": "class", + "purpose": "Primary interactive widget used to present selectable lists and prompts." + }, + { + "name": "SelectBuilder", + "type": "class", + "purpose": "Builder for single-selection prompts." + }, + { + "name": "MultiSelectBuilder", + "type": "class", + "purpose": "Builder for multi-selection prompts." + }, + { + "name": "InputBuilder", + "type": "class", + "purpose": "Builder for typed input prompts." + } + ], + "semantic_tags": [ + "ui", + "selection", + "prompts", + "widgets", + "fzf" + ], + "handles_entities": [], + "key_behaviors": [ + "present interactive selection and input prompts to users" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/command.rs": { + "short_description": "Load and parse built-in and custom command definitions", + "category": "SOURCE_CODE", + "description": "Implements CommandLoaderService which loads built-in commands bundled in the binary and discovers custom commands from global and local directories, parsing YAML frontmatter via gray_matter into Command objects. It caches loaded commands and resolves name conflicts by keeping the last occurrence (CWD highest precedence).", + "key_constructs": [ + { + "name": "CommandLoaderService", + "type": "class", + "purpose": "Service that discovers, parses, caches and provides command definitions from multiple sources.", + "callers": [ + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 18, + "context": "use crate::command::CommandLoaderService as ForgeCommandLoaderService;" + } + ] + }, + { + "name": "parse_command_file", + "type": "function", + "purpose": "Parse a markdown file with YAML frontmatter into a typed Command using gray_matter." + }, + { + "name": "resolve_command_conflicts", + "type": "function", + "purpose": "Deduplicate commands by name keeping the last occurrence (implements precedence)." + } + ], + "semantic_tags": [ + "command-loading", + "frontmatter", + "caching", + "file-discovery", + "parsing" + ], + "handles_entities": [ + "Command" + ], + "key_behaviors": [ + "load built-in and custom command definitions", + "parse YAML frontmatter into Command objects", + "resolve command name conflicts with defined precedence" + ], + "insights": [ + { + "type": "feature", + "category": "Configuration", + "title": "Load built-in commands via init_default", + "problem": "Built-in commands were not being loaded from embedded markdown; precedence ordering for built-in vs custom commands needed clarity.", + "root_cause": "init previously merged custom and built-in commands without a dedicated initializer for embedded defaults.", + "solution": "Added init_default() that parses embedded commands (github-pr-description) and calls it first in init() so built-ins have lowest precedence. Added unit test test_init_default_contains_builtin_commands.", + "lesson_learned": "Explicitly separate built-in embedded loading from user/custom loading and test presence of built-ins. When changing precedence, include tests.", + "commits": [ + "4d11b0c" + ], + "constructs": [ + "CommandLoaderService::init_default", + "CommandLoaderService::init", + "parse_command_iter" + ] + } + ], + "tests": { + "exercised_by": [ + "crates/forge_services/src/command.rs (unit test test_init_default_contains_builtin_commands)" + ], + "test_functions": [ + "test_init_default_contains_builtin_commands" + ], + "source_commits": [ + "4d11b0c" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_spinner/src/progress_bar.rs": { + "short_description": "Manage determinate progress bar using indicatif", + "category": "SOURCE_CODE", + "description": "ProgressBarManager wraps indicatif::ProgressBar providing start, update, message and stop functionality including a short sleep for smooth clearing. It supports querying active state and is used to surface progress for operations with known totals.", + "key_constructs": [ + { + "name": "ProgressBarManager", + "type": "class", + "purpose": "Manages the lifecycle and updates of a determinate indicatif progress bar.", + "callers": [ + { + "file": "crates/forge_main/src/ui.rs", + "line": 3808, + "context": "use forge_spinner::ProgressBarManager;" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 3832, + "context": "let mut progress_bar = ProgressBarManager::default();" + } + ] + }, + { + "name": "ProgressBarManager::start", + "type": "function", + "purpose": "Initialize and display a progress bar with a total and message." + }, + { + "name": "ProgressBarManager::stop", + "type": "function", + "purpose": "Finish and clear the progress bar, optionally printing a message after a small delay." + } + ], + "semantic_tags": [ + "progress", + "ui", + "spinner", + "indicatif", + "feedback" + ], + "handles_entities": [], + "key_behaviors": [ + "display and update a determinate progress bar", + "stop and optionally print a completion message" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_tracker/src/rate_limit.rs": { + "short_description": "Simple fixed-window rate limiter for events", + "category": "SOURCE_CODE", + "description": "Implements a tiny fixed-window RateLimiter that counts events per 60-second window and blocks events when the configured maximum is reached, resetting at the next window. Intended for throttling telemetry or event dispatch to avoid excessive calls.", + "key_constructs": [ + { + "name": "RateLimiter", + "type": "class", + "purpose": "Tracks a per-minute window_start, count and maximum to permit or block events.", + "callers": [ + { + "file": "crates/forge_tracker/src/dispatch.rs", + "line": 16, + "context": "use crate::rate_limit::RateLimiter;" + }, + { + "file": "crates/forge_tracker/src/dispatch.rs", + "line": 69, + "context": "rate_limiter: Arc>," + }, + { + "file": "crates/forge_tracker/src/dispatch.rs", + "line": 85, + "context": "rate_limiter: Arc::new(Mutex::new(RateLimiter::new(MAX_EVENTS_PER_MINUTE)))," + } + ] + }, + { + "name": "RateLimiter::inc_and_check", + "type": "function", + "purpose": "Public helper that increments the count and returns whether the event is allowed in the current window." + }, + { + "name": "RateLimiter::check_at", + "type": "function", + "purpose": "Internal check that enforces window reset logic and maximum enforcement based on a provided timestamp." + } + ], + "semantic_tags": [ + "rate-limiting", + "throttling", + "telemetry", + "fixed-window", + "events" + ], + "handles_entities": [], + "key_behaviors": [ + "limit events dispatched per minute", + "reset counts when a new time window begins" + ], + "insights": [ + { + "type": "refactoring", + "category": "Concurrency", + "title": "Multiple iterations of rate limiter implementation to fix race conditions", + "problem": "Initial atomic-based RateLimiter implementation had complex compare_exchange logic; later refactors introduced a Mutex-protected state to avoid races and simplify semantics.", + "root_cause": "Atomic-based reset logic can be subtle and vulnerable to races; tests and further usage uncovered race conditions when RateLimiter used concurrently.", + "solution": "Two-step evolution: (a) initial atomic implementation added (commit 884fe78), (b) later refactor replaced atomics with Mutex to make window/counter updates atomic and straightforward, and renamed API to inc_and_check for mutating check. Also updated tests to use check_at and verify behavior deterministically.", + "commits": [ + "884fe78", + "6884ef5", + "7e077ca" + ], + "constructs": [ + "RateLimiter::new", + "RateLimiter::check", + "RateLimiter::check_at", + "RateLimiter::inc_and_check", + "State" + ] + } + ], + "tests": { + "exercised_by": [], + "test_functions": [ + "test_rate_limiter_blocks_after_limit", + "test_rate_limiter_resets_on_new_window" + ], + "source_commits": [ + "6884ef5", + "7e077ca", + "884fe78" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_walker/src/walker.rs": { + "short_description": "Filesystem walker that enumerates files/dirs with limits and binary filtering", + "category": "SOURCE_CODE", + "description": "Provides a configurable directory walker that scans a workspace respecting depth, breadth, file size and total size limits, and optional binary-file skipping. Exposes synchronous and async entrypoints and includes tests/fixtures for behavior verification.", + "key_constructs": [ + { + "name": "File", + "type": "class", + "purpose": "Represents a discovered file or directory with path, optional file_name, and size", + "callers": [ + { + "file": "crates/forge_walker/src/lib.rs", + "line": 3, + "context": "pub use walker::{File, Walker};" + } + ] + }, + { + "name": "Walker", + "type": "class", + "purpose": "Configurable walker with settings (cwd, max_depth, max_breadth, size limits, skip_binary) and scanning methods" + }, + { + "name": "Walker::get", + "type": "function", + "purpose": "Async wrapper that spawns a blocking filesystem scan" + }, + { + "name": "Walker::get_blocking", + "type": "function", + "purpose": "Blocking implementation that enumerates files using ignore::WalkBuilder enforcing limits and ignore rules" + }, + { + "name": "Walker::is_likely_binary", + "type": "function", + "purpose": "Heuristic extension-based check to decide whether to skip binary files" + } + ], + "semantic_tags": [ + "filesystem", + "file-enumeration", + "filtering", + "concurrency", + "validation" + ], + "handles_entities": [ + "File", + "Directory" + ], + "key_behaviors": [ + "enumerates workspace files with configurable limits", + "skips binary files based on extension list", + "respects .ignore rules and stops when size/count limits are hit" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Edge Case", + "title": "Exclude symlinked files/directories from discovery", + "problem": "Symlinked files (including dangling symlinks) were being included in discovery/walker results, causing duplicate or invalid file processing by indexing/sync pipelines.", + "root_cause": "Walker previously followed directory entries without skipping symlink entries.", + "solution": "Add entry.path_is_symlink() check in walker loop to skip symlinks. Add tests to ensure symlinks (real and dangling) are excluded.", + "lesson_learned": "Discovery should avoid symlinks to prevent walking external or circular file trees; explicitly test for dangling symlinks.", + "commits": [ + "09fbef3" + ], + "constructs": [ + "Walker::get (loop)", + "entry.path_is_symlink" + ] + } + ], + "tests": { + "exercised_by": [ + "crates/forge_walker/src/walker.rs::test_walker_excludes_dangling_symlinks", + "crates/forge_walker/src/walker.rs::test_walker_excludes_symlinks" + ], + "test_functions": [], + "source_commits": [ + "09fbef3" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "Cargo.toml": { + "short_description": "Workspace Cargo manifest with dependency and build configuration", + "category": "BUILD", + "description": "Top-level Cargo workspace manifest listing all internal crates, workspace-wide dependencies, and release profile settings. Centralizes versions, features and internal crate paths for building the multi-crate Forge project.", + "key_constructs": [], + "semantic_tags": [ + "rust", + "workspace", + "dependencies", + "build-config", + "profile" + ], + "handles_entities": [], + "key_behaviors": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "package.json": { + "short_description": "Node/TypeScript evals and benchmark scripts config", + "category": "CONFIG", + "description": "NPM package manifest for the small TypeScript evaluation/benchmarking tooling used by the repository, declaring scripts (eval, tests) and JS/TS dependencies. It's private and used for running local eval tasks and bounty sync scripts.", + "key_constructs": [], + "semantic_tags": [ + "node", + "scripts", + "benchmarks", + "typescript" + ], + "handles_entities": [], + "key_behaviors": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/provider/mod.rs": { + "short_description": "Provider module aggregator and conversion traits for provider types", + "category": "SOURCE_CODE", + "description": "Re-exports provider-related submodules (OpenAI, Anthropic, Google, Bedrock etc.) and exposes common conversion traits used internally to map provider-specific types into domain types. Acts as the central module for provider implementations in the repo crate.", + "key_constructs": [ + { + "name": "pub use chat::*", + "type": "function", + "purpose": "Re-export chat-related items for external use" + }, + { + "name": "IntoDomain", + "type": "trait", + "purpose": "Trait for converting provider-specific types into domain types" + }, + { + "name": "FromDomain", + "type": "trait", + "purpose": "Trait for constructing types from domain values with error handling" + } + ], + "semantic_tags": [ + "providers", + "abstraction", + "conversion", + "integration" + ], + "handles_entities": [ + "Provider-specific responses", + "Domain types" + ], + "key_behaviors": [ + "aggregates provider implementations", + "standardizes conversion between provider and domain models" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/agent_provider_resolver.rs": { + "short_description": "Resolves provider and model selection for agents, with credential handling", + "category": "SOURCE_CODE", + "description": "Service that determines which provider and model to use for a given agent (or defaults), delegating to registry and provider services and handling missing agent fallbacks. Designed to centralize provider/model lookup logic used by agent execution flows.", + "key_constructs": [ + { + "name": "AgentProviderResolver", + "type": "class", + "purpose": "Resolver wrapper around shared services to obtain provider and model for an agent", + "callers": [ + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 8, + "context": "AgentProviderResolver, AgentRegistry, AppConfigService, AuthService, CommandInfra," + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 229, + "context": "let agent_provider_resolver = AgentProviderResolver::new(self.services.clone());" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 292, + "context": "let agent_provider_resolver = AgentProviderResolver::new(self.services.clone());" + }, + { + "file": "crates/forge_app/src/app.rs", + "line": 22, + "context": "AgentExt, AgentProviderResolver, ConversationService, EnvironmentInfra, FileDiscoveryService," + }, + { + "file": "crates/forge_app/src/app.rs", + "line": 79, + "context": "let agent_provider_resolver = AgentProviderResolver::new(services.clone());" + }, + { + "file": "crates/forge_app/src/app.rs", + "line": 272, + "context": "let agent_provider_resolver = AgentProviderResolver::new(self.services.clone());" + }, + { + "file": "crates/forge_app/src/git_app.rs", + "line": 13, + "context": "use crate::{AgentProviderResolver, EnvironmentInfra, Services};" + }, + { + "file": "crates/forge_app/src/git_app.rs", + "line": 289, + "context": "resolver: &AgentProviderResolver," + }, + { + "file": "crates/forge_app/src/git_app.rs", + "line": 309, + "context": "let agent_provider_resolver = AgentProviderResolver::new(self.services.clone());" + } + ] + }, + { + "name": "AgentProviderResolver::get_provider", + "type": "function", + "purpose": "Return the Provider for a given agent id or default provider" + }, + { + "name": "AgentProviderResolver::get_model", + "type": "function", + "purpose": "Return the ModelId for a given agent id or provider default" + } + ], + "semantic_tags": [ + "provider-resolution", + "model-selection", + "credentials", + "agent-management" + ], + "handles_entities": [ + "Agent", + "Provider", + "ModelId" + ], + "key_behaviors": [ + "selects provider for an agent or default", + "selects model for an agent or provider default" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/title_generator.rs": { + "short_description": "Generates conversation titles using an LLM with structured JSON schema output", + "category": "SOURCE_CODE", + "description": "Wraps calls to the agent chat service to produce succinct titles for conversations. It prepares a system prompt template, requests structured JSON schema output and falls back to plain text parsing when providers don't support structured responses.", + "key_constructs": [ + { + "name": "TitleResponse", + "type": "class", + "purpose": "Deserialize target JSON schema containing the generated title" + }, + { + "name": "TitleGenerator", + "type": "class", + "purpose": "Holds services, prompt, model and provider info and performs title generation", + "callers": [ + { + "file": "crates/forge_app/src/hooks/title_generation.rs", + "line": 13, + "context": "use crate::title_generator::TitleGenerator;" + }, + { + "file": "crates/forge_app/src/hooks/title_generation.rs", + "line": 67, + "context": "let generator = TitleGenerator::new(" + } + ] + }, + { + "name": "TitleGenerator::generate", + "type": "function", + "purpose": "Invoke chat agent, parse structured JSON or fallback to plain text, and return the generated title" + } + ], + "semantic_tags": [ + "llm", + "templating", + "title-generation", + "structured-output" + ], + "handles_entities": [ + "Conversation", + "Title" + ], + "key_behaviors": [ + "generates a context-aware title for a conversation", + "uses JSON schema to request structured LLM output" + ], + "insights": [ + { + "type": "refactoring", + "category": "Feature", + "title": "TitleGenerator accepts optional provider_id to generate titles using agent's provider", + "problem": "Title generator implicitly used default provider; when agents specify provider it should use agent provider.", + "root_cause": "TitleGenerator previously only stored model id and services; per-agent provider support required passing provider id through.", + "solution": "Add provider_id: Option to TitleGenerator, propagate through TitleGenerationHandler and Orchestrator so title generation uses agent.provider when set.", + "commit": [ + "d9207f", + "0cf8736" + ], + "constructs": [ + "TitleGenerator::new", + "generate" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_config/src/auto_dump.rs": { + "short_description": "Defines conversation auto-dump output formats", + "category": "SOURCE_CODE", + "description": "Enum describing supported formats for automatically dumping conversations (JSON or HTML) and unit tests to validate the variants. Used when persisting or exporting conversation transcripts at task completion.", + "key_constructs": [ + { + "name": "AutoDumpFormat", + "type": "constant", + "purpose": "Enum enumerating supported dump formats (Json, Html)" + } + ], + "semantic_tags": [ + "configuration", + "serialization", + "export" + ], + "handles_entities": [ + "Conversation" + ], + "key_behaviors": [ + "specifies the format used when auto-dumping conversations" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_config/src/decimal.rs": { + "short_description": "Decimal newtype that serializes to two decimal places for clean TOML output", + "category": "SOURCE_CODE", + "description": "A floating-point newtype that ensures two-decimal-place serialization to avoid noisy float representations in toml_edit output. Implements serde, schemars, fake::Dummy and conversion helpers to integrate with configuration types.", + "key_constructs": [ + { + "name": "Decimal", + "type": "class", + "purpose": "Wraps an f64 and provides controlled serialization to two decimal places" + }, + { + "name": "impl serde::Serialize/Deserialize for Decimal", + "type": "function", + "purpose": "Custom (de)serialization that formats values to two decimal places" + }, + { + "name": "impl schemars::JsonSchema for Decimal", + "type": "function", + "purpose": "Expose JSON schema compatibility for Decimal" + } + ], + "semantic_tags": [ + "configuration", + "serialization", + "validation", + "formatting" + ], + "handles_entities": [], + "key_behaviors": [ + "ensures config floats serialize to clean two-decimal values", + "provides conversion helpers for config consumers" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Parsing", + "title": "Introduce Decimal newtype to prevent TOML float noise", + "problem": "toml_edit serialized f32/f64 values with long noisy bit-patterns (e.g., 0.10000000149), degrading readability and tests.", + "root_cause": "Direct floating-point serialization emitted binary-representation artifacts.", + "solution": "Introduce Decimal(f64) newtype that formats floats to two decimal places during serde::Serialize, preserving readable TOML output and stable round-trip behavior.", + "lesson_learned": "Use newtypes with custom serde when you need deterministic and human-friendly printed numeric formats in config files.", + "commits": [ + "209cd61" + ], + "constructs": [ + "Decimal", + "impl serde::Serialize for Decimal", + "impl serde::Deserialize for Decimal" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_config/src/compact.rs tests", + "crates/forge_config/src/config.rs tests" + ], + "source_commits": [ + "209cd61" + ] + } + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_config/src/http.rs": { + "short_description": "HTTP client configuration types (TLS versions/backends and timeouts)", + "category": "SOURCE_CODE", + "description": "Defines configuration structures for HTTP client behavior including TLS versioning, backend selection, timeouts, pool settings, and keep-alive options. Includes unit tests to assert basic construction and enum variants.", + "key_constructs": [ + { + "name": "TlsVersion", + "type": "constant", + "purpose": "Enum of supported TLS protocol versions" + }, + { + "name": "TlsBackend", + "type": "constant", + "purpose": "Enum for choosing TLS implementation backend" + }, + { + "name": "HttpConfig", + "type": "class", + "purpose": "Aggregates HTTP client configuration fields (timeouts, pooling, TLS, keep-alive)", + "callers": [ + { + "file": "crates/forge_config/src/config.rs", + "line": 12, + "context": "AutoDumpFormat, Compact, Decimal, HttpConfig, ModelConfig, ReasoningConfig, RetryConfig, Update," + }, + { + "file": "crates/forge_config/src/config.rs", + "line": 141, + "context": "pub http: Option," + }, + { + "file": "crates/forge_infra/src/http.rs", + "line": 40, + "context": "let http = config.http.unwrap_or(forge_config::HttpConfig {" + } + ] + } + ], + "semantic_tags": [ + "http", + "configuration", + "tls", + "timeouts", + "network" + ], + "handles_entities": [], + "key_behaviors": [ + "captures HTTP client tuning and TLS options for the application" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_config/src/legacy.rs": { + "short_description": "Converts legacy JSON config to the new TOML ForgeConfig representation", + "category": "SOURCE_CODE", + "description": "Parses the old ~/forge/.config.json legacy format into an intermediate LegacyConfig, converts it into the newer ForgeConfig shape, and serializes it to TOML for migration. Ensures backwards compatibility when loading older user config files.", + "key_constructs": [ + { + "name": "LegacyConfig", + "type": "class", + "purpose": "Deserializable representation of the legacy JSON config layout", + "callers": [ + { + "file": "crates/forge_config/src/reader.rs", + "line": 8, + "context": "use crate::legacy::LegacyConfig;" + }, + { + "file": "crates/forge_config/src/reader.rs", + "line": 123, + "context": "let content = LegacyConfig::read(&Self::config_legacy_path());" + } + ] + }, + { + "name": "LegacyConfig::read", + "type": "function", + "purpose": "Reads and converts a legacy JSON file at a path into TOML string for ForgeConfig" + }, + { + "name": "LegacyConfig::into_forge_config", + "type": "function", + "purpose": "Transforms legacy fields into the partial ForgeConfig structure" + } + ], + "semantic_tags": [ + "migration", + "configuration", + "compatibility", + "serialization" + ], + "handles_entities": [ + "ForgeConfig", + "LegacyConfig" + ], + "key_behaviors": [ + "migrates legacy JSON config into current TOML format", + "preserves only covered fields so defaults are not overwritten" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_config/src/model.rs": { + "short_description": "Model and provider identifier types and ModelConfig pairing", + "category": "SOURCE_CODE", + "description": "Defines simple type aliases for ProviderId and ModelId and a ModelConfig struct that pairs optional provider and model selections for different operations. Used across configuration and service selection code to propagate model/provider choices.", + "key_constructs": [ + { + "name": "ProviderId", + "type": "constant", + "purpose": "Type alias for provider identifier string" + }, + { + "name": "ModelId", + "type": "constant", + "purpose": "Type alias for model identifier string" + }, + { + "name": "ModelConfig", + "type": "class", + "purpose": "Holds optional provider_id and model_id used for session/commit/suggest settings", + "callers": [ + { + "file": "crates/forge_config/src/config.rs", + "line": 12, + "context": "AutoDumpFormat, Compact, Decimal, HttpConfig, ModelConfig, ReasoningConfig, RetryConfig, Update," + }, + { + "file": "crates/forge_config/src/config.rs", + "line": 194, + "context": "pub session: Option," + }, + { + "file": "crates/forge_config/src/config.rs", + "line": 197, + "context": "pub commit: Option," + }, + { + "file": "crates/forge_config/src/config.rs", + "line": 205, + "context": "pub suggest: Option," + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 125, + "context": "use forge_config::{ForgeConfig, ModelConfig};" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 236, + "context": "_ => ModelConfig::default()" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 243, + "context": "ModelConfig::default()" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 250, + "context": "ModelConfig::default()" + }, + { + "file": "crates/forge_infra/src/env.rs", + "line": 6, + "context": "use forge_config::{ConfigReader, ForgeConfig, ModelConfig};" + }, + { + "file": "crates/forge_infra/src/env.rs", + "line": 38, + "context": "let session = fc.session.get_or_insert_with(ModelConfig::default);" + }, + { + "file": "crates/forge_infra/src/env.rs", + "line": 43, + "context": "Some(ModelConfig { provider_id: Some(pid_str), model_id: Some(mid_str) });" + }, + { + "file": "crates/forge_infra/src/env.rs", + "line": 47, + "context": "fc.commit = mc.map(|m| ModelConfig {" + }, + { + "file": "crates/forge_infra/src/env.rs", + "line": 53, + "context": "fc.suggest = Some(ModelConfig {" + }, + { + "file": "crates/forge_infra/src/env.rs", + "line": 251, + "context": "use forge_config::ModelConfig as ForgeCfgModelConfig;" + }, + { + "file": "crates/forge_infra/src/env.rs", + "line": 278, + "context": "use forge_config::ModelConfig as ForgeCfgModelConfig;" + }, + { + "file": "crates/forge_config/src/legacy.rs", + "line": 6, + "context": "use crate::{ForgeConfig, ModelConfig};" + }, + { + "file": "crates/forge_config/src/legacy.rs", + "line": 62, + "context": "ModelConfig { provider_id: Some(provider_id.to_string()), model_id }" + }, + { + "file": "crates/forge_config/src/legacy.rs", + "line": 67, + "context": ".map(|c| ModelConfig { provider_id: c.provider, model_id: c.model });" + }, + { + "file": "crates/forge_config/src/legacy.rs", + "line": 71, + "context": ".map(|s| ModelConfig { provider_id: s.provider, model_id: s.model });" + }, + { + "file": "crates/forge_config/src/reader.rs", + "line": 139, + "context": "use crate::ModelConfig;" + } + ] + } + ], + "semantic_tags": [ + "configuration", + "model-selection", + "providers" + ], + "handles_entities": [ + "ModelConfig" + ], + "key_behaviors": [ + "represents provider+model selection for operations" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_config/src/percentage.rs": { + "short_description": "Validated percentage type constrained to [0.0,1.0] with two-decimal serialization", + "category": "SOURCE_CODE", + "description": "Wraps Decimal to provide a percentage value validated at deserialization and construction time, ensuring config values are within [0,1]. Implements serde, schemars, fake::Dummy and includes tests for range validation and TOML formatting.", + "key_constructs": [ + { + "name": "Percentage", + "type": "class", + "purpose": "Validated percentage newtype that serializes via Decimal", + "callers": [ + { + "file": "crates/forge_config/src/compact.rs", + "line": 8, + "context": "use crate::Percentage;" + }, + { + "file": "crates/forge_config/src/compact.rs", + "line": 59, + "context": "pub eviction_window: Percentage," + }, + { + "file": "crates/forge_config/src/compact.rs", + "line": 103, + "context": "eviction_window: Percentage::new(0.2).unwrap()," + }, + { + "file": "crates/forge_config/src/compact.rs", + "line": 115, + "context": "eviction_window: Percentage::from((0.0f64..=1.0f64).fake_with_rng::(rng))," + }, + { + "file": "crates/forge_config/src/compact.rs", + "line": 136, + "context": "eviction_window: Percentage::new(0.2).unwrap()," + } + ] + }, + { + "name": "Percentage::new", + "type": "function", + "purpose": "Constructor enforcing value in [0.0, 1.0]" + }, + { + "name": "serde impl for Percentage", + "type": "function", + "purpose": "Custom (de)serialization that delegates to Decimal and enforces validation" + } + ], + "semantic_tags": [ + "configuration", + "validation", + "serialization", + "numeric" + ], + "handles_entities": [], + "key_behaviors": [ + "validates percentage fields in config", + "produces clean two-decimal TOML output for percentage values" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Validation", + "title": "Introduce Percentage newtype with validation and Decimal serialization", + "problem": "Previously percentage fields used raw floats leading to potential out-of-range values and noisy serialization.", + "root_cause": "No dedicated type-level validation for config percentages; serialization used underlying float formatting.", + "solution": "Add Percentage(Decimal) newtype with constructor enforcing [0.0,1.0] range at deserialization time and delegate formatting to Decimal.", + "lesson_learned": "Encapsulate domain constraints in dedicated types so deserialization can reject invalid config early and serializers can control textual form.", + "commits": [ + "209cd61" + ], + "constructs": [ + "Percentage", + "Percentage::new", + "impl serde::Serialize for Percentage", + "impl<'de> serde::Deserialize<'de> for Percentage" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_config/src/compact.rs tests", + "crates/forge_config/src/percentage.rs tests" + ], + "source_commits": [ + "209cd61" + ] + } + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_config/src/writer.rs": { + "short_description": "Writes ForgeConfig to disk with schema header for editor validation", + "category": "SOURCE_CODE", + "description": "Serializes a ForgeConfig to a TOML file and writes it to the given path, creating parent directories as needed and prepending a $schema pointer for editor tooling. Used to persist user configuration files in a predictable format.", + "key_constructs": [ + { + "name": "ConfigWriter", + "type": "class", + "purpose": "Encapsulates a ForgeConfig and provides a write method to persist it", + "callers": [ + { + "file": "crates/forge_config/src/config.rs", + "line": 10, + "context": "use crate::writer::ConfigWriter;" + }, + { + "file": "crates/forge_config/src/config.rs", + "line": 295, + "context": "ConfigWriter::new(self.clone()).write(&path)" + } + ] + }, + { + "name": "ConfigWriter::write", + "type": "function", + "purpose": "Serializes the config to pretty TOML and writes it to disk including a $schema header" + } + ], + "semantic_tags": [ + "configuration", + "io", + "serialization", + "persistence" + ], + "handles_entities": [ + "ForgeConfig" + ], + "key_behaviors": [ + "persists user Forge configuration to disk with editor schema metadata" + ], + "insights": [ + { + "type": "refactoring", + "category": "Configuration", + "title": "Inject $schema key into generated config for editor validation", + "problem": "Generated forge config files did not include a $schema hint so editors couldn't provide validation/autocomplete.", + "root_cause": "toml output was written as-is without a $schema helper line.", + "solution": "Prefix generated TOML with a $schema = \"https://forgecode.dev/schema.json\" line so editors pick up JSON Schema-based validation.", + "lesson_learned": "Small metadata lines (like $schema) significantly improve UX for generated config files \u2014 include them at generation time.", + "commits": [ + "e40da50" + ], + "constructs": [ + "ConfigWriter::write" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_display/src/code.rs": { + "short_description": "Markdown code-block extraction and terminal syntax highlighting", + "category": "SOURCE_CODE", + "description": "Parses markdown to extract fenced code blocks (including indented ones), replaces them with placeholders, and later restores highlighted code using syntect. Provides a reusable SyntaxHighlighter and CodeBlockParser to enable syntax-colored rendering in terminal outputs.", + "key_constructs": [ + { + "name": "SyntaxHighlighter", + "type": "class", + "purpose": "Loads syntect resources and highlights code strings for terminal output", + "callers": [ + { + "file": "crates/forge_display/src/markdown.rs", + "line": 8, + "context": "use crate::code::{CodeBlockParser, SyntaxHighlighter};" + }, + { + "file": "crates/forge_display/src/markdown.rs", + "line": 18, + "context": "highlighter: OnceLock," + }, + { + "file": "crates/forge_display/src/markdown.rs", + "line": 60, + "context": "let highlighter = self.highlighter.get_or_init(SyntaxHighlighter::default);" + } + ] + }, + { + "name": "CodeBlock", + "type": "class", + "purpose": "Represents an extracted code block with code and language" + }, + { + "name": "CodeBlockParser", + "type": "class", + "purpose": "Extracts code blocks from markdown, stores placeholders, and restores highlighted blocks into rendered markdown", + "callers": [ + { + "file": "crates/forge_display/src/markdown.rs", + "line": 8, + "context": "use crate::code::{CodeBlockParser, SyntaxHighlighter};" + }, + { + "file": "crates/forge_display/src/markdown.rs", + "line": 56, + "context": "let processed = CodeBlockParser::new(&content);" + } + ] + }, + { + "name": "CodeBlockParser::new", + "type": "function", + "purpose": "Parses content into markdown with placeholders and collects code blocks" + }, + { + "name": "CodeBlockParser::restore", + "type": "function", + "purpose": "Replaces placeholders in rendered markdown with highlighted code blocks" + } + ], + "semantic_tags": [ + "markdown", + "syntax-highlighting", + "terminal", + "rendering" + ], + "handles_entities": [], + "key_behaviors": [ + "extracts and highlights code blocks from markdown for terminal display", + "preserves indentation and supports multiple languages" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/attachment.rs": { + "short_description": "Attachment types and parser for file/directory references in chat messages", + "category": "SOURCE_CODE", + "description": "Defines Attachment and AttachmentContent variants (Image, FileContent, DirectoryListing) plus parsing utilities to extract file tags expressed as @[path:line:line#symbol] from text. Provides helpers to inspect attachments and a robust FileTag parser handling windows drives, line ranges, symbols and deduplication.", + "key_constructs": [ + { + "name": "Attachment", + "type": "class", + "purpose": "Represents an attachment with resolved content and original path reference", + "callers": [ + { + "file": "crates/forge_domain/src/context.rs", + "line": 21, + "context": "Attachment, AttachmentContent, ConversationId, EventValue, Image, MessagePhase, ModelId," + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 474, + "context": "pub fn add_attachments(self, attachments: Vec, model_id: Option) -> Self {" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 1198, + "context": "let fixture_attachments = vec![Attachment {" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 1226, + "context": "let fixture_attachments = vec![Attachment {" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 1248, + "context": "Attachment {" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 1255, + "context": "Attachment {" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 1280, + "context": "let fixture_attachments = vec![Attachment {" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 1374, + "context": "let fixture_attachments = vec![Attachment {" + }, + { + "file": "crates/forge_domain/src/event.rs", + "line": 8, + "context": "use crate::{Attachment, NamedTool, Template, ToolName};" + }, + { + "file": "crates/forge_domain/src/event.rs", + "line": 50, + "context": "pub attachments: Vec," + } + ] + }, + { + "name": "AttachmentContent", + "type": "constant", + "purpose": "Enum of possible resolved attachment contents (Image, FileContent, DirectoryListing)" + }, + { + "name": "Attachment::parse_all", + "type": "function", + "purpose": "Scans text for all file tags in the @[...] format and returns deduplicated FileTag instances" + }, + { + "name": "FileTag", + "type": "class", + "purpose": "Parsed representation of a referenced file with optional location and symbol" + }, + { + "name": "FileTag::parse", + "type": "function", + "purpose": "Nom-based parser that interprets paths, optional line ranges, and symbols inside @[...]" + } + ], + "semantic_tags": [ + "attachments", + "parsing", + "file-references", + "nom", + "chat" + ], + "handles_entities": [ + "Attachment", + "FileTag", + "DirectoryEntry", + "Image", + "FileInfo" + ], + "key_behaviors": [ + "parses inline file references from chat text", + "represents resolved file, image or directory attachments", + "supports line ranges and symbol anchors for file references" + ], + "insights": [ + { + "type": "refactoring", + "category": "API", + "title": "Consolidate file-range metadata into FileInfo", + "problem": "AttachmentContent::FileContent carried multiple separate fields (start_line, end_line, total_lines, content_hash) which resulted in repeated structs across codebase.", + "root_cause": "Duplication and inconsistent usage of range metadata across modules increased risk of mismatch.", + "solution": "Replaced per-field metadata with info: FileInfo. Updated range_info() to return values from info.", + "lesson_learned": "Use a single well-typed struct for related metadata to improve consistency and reduce refactor surface.", + "commits": [ + "29db91a" + ], + "constructs": [ + "AttachmentContent::FileContent", + "range_info" + ] + }, + { + "type": "refactoring", + "category": "State Management", + "title": "Add content_hash to FileContent attachment and richer attachment types", + "problem": "Attachment struct lacked raw content hash and more descriptive fields for file content and directory listings.", + "root_cause": "Earlier representation combined path and content without raw-hash, making external-change detection unreliable.", + "solution": "Expanded Attachment and AttachmentContent enums: FileContent now contains content (display text), start_line, end_line, total_lines, and content_hash; added DirectoryListing and DirectoryEntry types, improved doc comments.", + "commits": [ + "70cba43" + ], + "constructs": [ + "Attachment", + "AttachmentContent", + "DirectoryEntry" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/message_pattern.rs": { + "short_description": "Test helper that builds Context message sequences from compact patterns", + "category": "SOURCE_CODE", + "description": "Provides MessagePattern, a small utility used mainly in tests to construct forge_domain::Context instances from a compact string pattern. Each character maps to a role or tool-related message, enabling concise fixtures that include user, assistant, system messages and tool call/result examples.", + "key_constructs": [ + { + "name": "MessagePattern", + "type": "class", + "purpose": "Represents a compact pattern string and builds a Context with messages from it", + "callers": [ + { + "file": "crates/forge_app/src/compact.rs", + "line": 656, + "context": "forge_domain::MessagePattern::new(pattern).build()" + }, + { + "file": "crates/forge_domain/src/compact/strategy.rs", + "line": 159, + "context": "use crate::MessagePattern;" + }, + { + "file": "crates/forge_domain/src/compact/strategy.rs", + "line": 162, + "context": "MessagePattern::new(pattern.to_string()).build()" + }, + { + "file": "crates/forge_domain/src/compact/compact_config.rs", + "line": 166, + "context": "use crate::MessagePattern;" + }, + { + "file": "crates/forge_domain/src/compact/compact_config.rs", + "line": 174, + "context": "MessagePattern::new(pattern).build()" + } + ] + }, + { + "name": "MessagePattern::new", + "type": "function", + "purpose": "Creates a new MessagePattern from a string" + }, + { + "name": "MessagePattern::build", + "type": "function", + "purpose": "Converts the pattern into a Context filled with MessageEntry instances and sample tool call/result data" + }, + { + "name": "From<&str> for MessagePattern", + "type": "function", + "purpose": "Convenience conversion from &str to MessagePattern" + }, + { + "name": "tests", + "type": "module", + "purpose": "Unit tests validating pattern translation, content numbering, and tool-call/result behavior" + } + ], + "semantic_tags": [ + "testing-helpers", + "message-construction", + "context", + "tool-calls", + "fixtures" + ], + "handles_entities": [ + "Context", + "MessageEntry", + "ContextMessage", + "ToolCallFull", + "ToolResult", + "ModelId" + ], + "key_behaviors": [ + "constructs message contexts from pattern strings", + "provides reusable test fixtures including tool calls/results", + "validates pattern input and message contents in tests" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/model.rs": { + "short_description": "Model metadata types and model identifier wrapper", + "category": "SOURCE_CODE", + "description": "Defines model-related domain types including Model, ModelId, Parameters, and InputModality. It encodes model capabilities, input modalities with defaults, and a small Parameters struct used for tooling support flags, and provides serialization/schema helpers.", + "key_constructs": [ + { + "name": "InputModality", + "type": "class", + "purpose": "Enum of supported input modalities (Text, Image) for models", + "callers": [ + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 7, + "context": "Agent, AgentId, AgentInput, ChatResponse, ChatResponseContent, Environment, InputModality," + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 394, + "context": ".find(|im| matches!(im, InputModality::Image))" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 406, + "context": "InputModality::Text => \"text\".to_string()," + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 407, + "context": "InputModality::Image => \"image\".to_string()," + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 755, + "context": "modalities: Vec," + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 808, + "context": "use forge_domain::InputModality;" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 817, + "context": "let vision_model = create_test_model(\"gpt-4o\", vec![InputModality::Text, InputModality::Image]);" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 817, + "context": "let vision_model = create_test_model(\"gpt-4o\", vec![InputModality::Text, InputModality::Image]);" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 836, + "context": "use forge_domain::InputModality;" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 845, + "context": "let text_only_model = create_test_model(\"gpt-3.5-turbo\", vec![InputModality::Text]);" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 865, + "context": "use forge_domain::{InputModality, ToolCatalog};" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 867, + "context": "let vision_model = create_test_model(\"gpt-4o\", vec![InputModality::Text, InputModality::Image]);" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 867, + "context": "let vision_model = create_test_model(\"gpt-4o\", vec![InputModality::Text, InputModality::Image]);" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 879, + "context": "use forge_domain::{InputModality, ToolCatalog};" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 881, + "context": "let text_only_model = create_test_model(\"gpt-3.5-turbo\", vec![InputModality::Text]);" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 900, + "context": "use forge_domain::{InputModality, ToolCatalog};" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 902, + "context": "let text_only_model = create_test_model(\"gpt-3.5-turbo\", vec![InputModality::Text]);" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 931, + "context": "use forge_domain::{InputModality, ToolCatalog};" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 933, + "context": "let text_only_model = create_test_model(\"gpt-3.5-turbo\", vec![InputModality::Text]);" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 129, + "context": "AnyProvider, ChatRepository, ConfigOperation, Environment, InputModality, MigrationResult," + } + ] + }, + { + "name": "Model", + "type": "class", + "purpose": "Holds metadata about a model such as id, name, context length and capabilities", + "callers": [ + { + "file": "crates/forge_api/src/api.rs", + "line": 24, + "context": "async fn get_models(&self) -> Result>;" + }, + { + "file": "crates/forge_domain/src/system_context.rs", + "line": 5, + "context": "use crate::{Agent, Environment, File, Model, Skill};" + }, + { + "file": "crates/forge_domain/src/system_context.rs", + "line": 121, + "context": "pub model: Option," + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 83, + "context": "async fn get_models(&self) -> Result> {" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 9, + "context": "FileStatus, Image, McpConfig, McpServers, Model, ModelId, Node, Provider, ProviderId," + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 166, + "context": "async fn models(&self, provider: Provider) -> anyhow::Result>;" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 650, + "context": "async fn models(&self, provider: Provider) -> anyhow::Result> {" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 130, + "context": "Model, ModelId, ModelSource, Provider, ProviderId, ProviderResponse, ProviderTemplate," + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 162, + "context": "models: Some(ModelSource::Hardcoded(vec![Model {" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 188, + "context": "models: Some(ModelSource::Hardcoded(vec![Model {" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 292, + "context": ") -> anyhow::Result> {" + }, + { + "file": "crates/forge_app/src/dto/openai/model.rs", + "line": 93, + "context": "impl From for forge_domain::Model {" + }, + { + "file": "crates/forge_app/src/dto/openai/model.rs", + "line": 119, + "context": "forge_domain::Model {" + }, + { + "file": "crates/forge_app/src/dto/openai/model.rs", + "line": 282, + "context": "let domain_model: forge_domain::Model = model.into();" + }, + { + "file": "crates/forge_app/src/dto/openai/model.rs", + "line": 309, + "context": "let domain_model: forge_domain::Model = model.into();" + }, + { + "file": "crates/forge_app/src/dto/openai/model.rs", + "line": 94, + "context": "fn from(value: Model) -> Self {" + }, + { + "file": "crates/forge_services/src/provider_service.rs", + "line": 7, + "context": "AnyProvider, ChatCompletionMessage, Model, ModelId, ProviderId, ResultStream," + }, + { + "file": "crates/forge_services/src/provider_service.rs", + "line": 91, + "context": "async fn models(&self, provider: Provider) -> Result> {" + }, + { + "file": "crates/forge_services/src/provider_service.rs", + "line": 149, + "context": "models: Vec," + }, + { + "file": "crates/forge_services/src/provider_service.rs", + "line": 154, + "context": "fn new(models: Vec) -> Self {" + } + ] + }, + { + "name": "Parameters", + "type": "class", + "purpose": "Small struct describing runtime parameters (e.g., tool_supported)" + }, + { + "name": "ModelId", + "type": "class", + "purpose": "Wrapper around a String for type-safe model identifiers with parsing and display traits", + "callers": [ + { + "file": "crates/forge_app/src/dto/openai/transformers/minimax.rs", + "line": 48, + "context": "use forge_domain::ModelId;" + }, + { + "file": "crates/forge_app/src/dto/openai/transformers/minimax.rs", + "line": 55, + "context": ".model(ModelId::new(model))" + }, + { + "file": "crates/forge_app/src/dto/openai/transformers/minimax.rs", + "line": 118, + "context": "let fixture = Request::default().model(ModelId::new(\"minimax-m2\"));" + }, + { + "file": "crates/forge_app/src/user_prompt.rs", + "line": 248, + "context": "AgentId, AttachmentContent, Context, ContextMessage, ConversationId, FileInfo, ModelId," + }, + { + "file": "crates/forge_app/src/user_prompt.rs", + "line": 268, + "context": "ModelId::from(\"test-model\")," + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 8, + "context": "AuthDetails, ChatCompletionMessage, ChatRepository, Context, Model, ModelId, Provider," + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 188, + "context": "model: &ModelId," + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 940, + "context": "model_id: &ModelId," + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 1256, + "context": "use forge_domain::{Model, ModelId, ModelSource};" + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 1261, + "context": "id: ModelId::from(\"claude-3-opus\".to_string())," + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 1271, + "context": "id: ModelId::from(\"claude-3-sonnet\".to_string())," + }, + { + "file": "crates/forge_app/src/command_generator.rs", + "line": 90, + "context": "model: &ModelId," + }, + { + "file": "crates/forge_app/src/command_generator.rs", + "line": 199, + "context": "_id: &ModelId," + }, + { + "file": "crates/forge_app/src/command_generator.rs", + "line": 261, + "context": ") -> anyhow::Result {" + }, + { + "file": "crates/forge_app/src/command_generator.rs", + "line": 262, + "context": "Ok(ModelId::new(\"test-model\"))" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 6, + "context": "ChatCompletionMessage, Context, Model, ModelId, ResultStream, Transformer," + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 97, + "context": "model: &ModelId," + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 335, + "context": "model_id: &ModelId," + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 535, + "context": "let model_id = ModelId::new(\"gpt-4\");" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 826, + "context": ".add_message(ContextMessage::user(\"test\", ModelId::new(\"test\").into()))" + } + ] + } + ], + "semantic_tags": [ + "models", + "metadata", + "serialization", + "schema", + "identifiers" + ], + "handles_entities": [ + "Model", + "ModelId", + "Parameters" + ], + "key_behaviors": [ + "represents model capabilities and metadata", + "provides default input modality and id helpers" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/update.rs": { + "short_description": "Update scheduling settings and frequency conversion", + "category": "SOURCE_CODE", + "description": "Defines Update and UpdateFrequency types representing auto-update configuration and how often updates should occur. It includes conversion from UpdateFrequency to std::time::Duration and merge/serde support for configuration merging.", + "key_constructs": [ + { + "name": "UpdateFrequency", + "type": "class", + "purpose": "Enum indicating update cadence (Daily, Weekly, Always) with default" + }, + { + "name": "impl From for Duration", + "type": "function", + "purpose": "Converts UpdateFrequency to a Duration used to schedule checks" + }, + { + "name": "Update", + "type": "class", + "purpose": "Configuration struct for update frequency and auto-update toggle" + } + ], + "semantic_tags": [ + "configuration", + "scheduling", + "updates", + "serialization" + ], + "handles_entities": [ + "Update" + ], + "key_behaviors": [ + "represents auto-update configuration", + "converts frequency enums to time durations" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_fs/src/lib.rs": { + "short_description": "Filesystem abstraction and utilities for uniform error handling", + "category": "SOURCE_CODE", + "description": "Provides the ForgeFS filesystem abstraction, centralizing FS helpers, error types and a SHA-256 compute_hash helper used by other filesystem operations. It re-exports detection utilities and defines a consistent layer for file operations across the application.", + "key_constructs": [ + { + "name": "ForgeFS", + "type": "class", + "purpose": "Top-level FS abstraction used to implement file operations with standardized errors", + "callers": [ + { + "file": "crates/forge_main/src/ui.rs", + "line": 24, + "context": "use forge_fs::ForgeFS;" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 2958, + "context": "let content = ForgeFS::read_utf8(path).await?;" + }, + { + "file": "crates/forge_fs/src/is_binary.rs", + "line": 5, + "context": "impl crate::ForgeFS {" + }, + { + "file": "crates/forge_fs/src/is_binary.rs", + "line": 67, + "context": "let (is_text_or_doc, _) = crate::ForgeFS::is_binary_path(text_file.path()).await?;" + }, + { + "file": "crates/forge_fs/src/is_binary.rs", + "line": 74, + "context": "crate::ForgeFS::is_binary_path(binary_file.path()).await?;" + }, + { + "file": "crates/forge_fs/src/is_binary.rs", + "line": 90, + "context": "let (is_text_or_doc, file_type) = crate::ForgeFS::is_binary_path(png_file.path()).await?;" + }, + { + "file": "crates/forge_fs/src/is_binary.rs", + "line": 99, + "context": "let (is_text_or_doc, _) = crate::ForgeFS::is_binary_path(empty_file.path()).await?;" + }, + { + "file": "crates/forge_fs/src/is_binary.rs", + "line": 17, + "context": "Self::is_binary(&mut file).await" + }, + { + "file": "crates/forge_fs/src/read.rs", + "line": 5, + "context": "impl crate::ForgeFS {" + }, + { + "file": "crates/forge_fs/src/read.rs", + "line": 7, + "context": "Self::read(path)" + }, + { + "file": "crates/forge_fs/src/write.rs", + "line": 6, + "context": "impl crate::ForgeFS {" + }, + { + "file": "crates/forge_fs/src/read_range.rs", + "line": 9, + "context": "impl crate::ForgeFS {" + }, + { + "file": "crates/forge_fs/src/read_range.rs", + "line": 57, + "context": "let content_hash = crate::ForgeFS::compute_hash(content.as_ref());" + }, + { + "file": "crates/forge_fs/src/read_range.rs", + "line": 118, + "context": "let full_hash = crate::ForgeFS::compute_hash(content);" + }, + { + "file": "crates/forge_fs/src/read_range.rs", + "line": 121, + "context": "let (result, info) = crate::ForgeFS::read_range_utf8(file.path(), 2, 5).await?;" + }, + { + "file": "crates/forge_fs/src/read_range.rs", + "line": 129, + "context": "let (result, info) = crate::ForgeFS::read_range_utf8(file.path(), 1, 3).await?;" + }, + { + "file": "crates/forge_fs/src/read_range.rs", + "line": 136, + "context": "let (result, info) = crate::ForgeFS::read_range_utf8(file.path(), 8, 10).await?;" + }, + { + "file": "crates/forge_fs/src/read_range.rs", + "line": 143, + "context": "let (result, info) = crate::ForgeFS::read_range_utf8(file.path(), 1, 10).await?;" + }, + { + "file": "crates/forge_fs/src/read_range.rs", + "line": 150, + "context": "let (result, info) = crate::ForgeFS::read_range_utf8(file.path(), 5, 5).await?;" + }, + { + "file": "crates/forge_fs/src/read_range.rs", + "line": 157, + "context": "let (result, info) = crate::ForgeFS::read_range_utf8(file.path(), 1, 1).await?;" + } + ] + }, + { + "name": "ForgeFS::compute_hash", + "type": "function", + "purpose": "Computes SHA-256 hex hash for given string content used for file-change detection" + }, + { + "name": "is_binary", + "type": "function", + "purpose": "Re-exported helper to determine if a file is binary" + }, + { + "name": "Error", + "type": "class", + "purpose": "Re-exported filesystem error enum/type for consistent error handling" + } + ], + "semantic_tags": [ + "filesystem", + "error-handling", + "hashing", + "io-utilities" + ], + "handles_entities": [], + "key_behaviors": [ + "standardizes file operations and errors across the codebase", + "computes content hash for change detection" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_fs/src/read_range.rs": { + "short_description": "Read specific line ranges from files with UTF-8 handling", + "category": "SOURCE_CODE", + "description": "Implements ForgeFS::read_range_utf8 to read inclusive line ranges from a file, validate inputs, detect binary files, and return FileInfo including a whole-file content hash. The module includes comprehensive tests for multi-byte UTF-8, invalid-UTF8 resilience, line capping, and large-file ranges.", + "key_constructs": [ + { + "name": "ForgeFS::read_range_utf8", + "type": "function", + "purpose": "Reads a 1-based inclusive line range from a file, returning the text slice and FileInfo with total lines and content hash" + }, + { + "name": "tests", + "type": "module", + "purpose": "Unit tests covering range reading, UTF-8 handling, invalid ranges, and performance scenarios" + } + ], + "semantic_tags": [ + "file-reading", + "line-ranges", + "utf8", + "validation", + "hashing" + ], + "handles_entities": [ + "FileInfo", + "file content" + ], + "key_behaviors": [ + "reads specific line ranges from files", + "validates start/end indices and caps end at file length", + "computes stable whole-file hash for change detection" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Edge Case", + "title": "Hash entire file content for FileInfo rather than range content", + "problem": "Range reads returned content_hash computed from the truncated range which did not match the external-change detector's full-file hash.", + "root_cause": "Callers (external-change detector) compute full-file hashes for change detection; using range-based hash led to false positives.", + "solution": "read_range_utf8 now computes SHA-256 on the full file content and includes it in FileInfo.content_hash while still returning the requested range content. Added ForgeFS::compute_hash helper.", + "lesson_learned": "When consumers require a canonical representation (full-file hash), compute and expose that consistently even if you return partial data.", + "commits": [ + "29db91a" + ], + "constructs": [ + "read_range_utf8", + "ForgeFS::compute_hash" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/console.rs": { + "short_description": "Thread-safe console writer synchronizing stdout/stderr writes", + "category": "SOURCE_CODE", + "description": "Provides StdConsoleWriter which wraps stdout/stderr in mutexes to prevent interleaved writes across threads and supports injecting mock writers for tests. Implements the forge_domain::ConsoleWriter trait and contains tests that verify concurrent write behavior and mock writer support.", + "key_constructs": [ + { + "name": "StdConsoleWriter", + "type": "class", + "purpose": "Mutex-wrapped stdout/stderr writer that serializes concurrent terminal writes", + "callers": [ + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 20, + "context": "use crate::console::StdConsoleWriter;" + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 54, + "context": "output_printer: Arc," + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 85, + "context": "let output_printer = Arc::new(StdConsoleWriter::default());" + }, + { + "file": "crates/forge_infra/src/lib.rs", + "line": 22, + "context": "pub use console::StdConsoleWriter;" + }, + { + "file": "crates/forge_infra/src/executor.rs", + "line": 11, + "context": "use crate::console::StdConsoleWriter;" + }, + { + "file": "crates/forge_infra/src/executor.rs", + "line": 17, + "context": "output_printer: Arc," + }, + { + "file": "crates/forge_infra/src/executor.rs", + "line": 24, + "context": "pub fn new(env: Environment, output_printer: Arc) -> Self {" + }, + { + "file": "crates/forge_infra/src/executor.rs", + "line": 143, + "context": "printer: Arc," + } + ] + }, + { + "name": "StdConsoleWriter::with_writers", + "type": "function", + "purpose": "Constructs a writer using custom stdout/stderr instances for testing" + }, + { + "name": "impl ConsoleWriter for StdConsoleWriter", + "type": "function", + "purpose": "Implements write/flush methods to the underlying synchronized writers" + }, + { + "name": "tests", + "type": "module", + "purpose": "Tests ensuring outputs don't interleave and that mock writers work" + } + ], + "semantic_tags": [ + "console", + "synchronization", + "thread-safety", + "io", + "testing" + ], + "handles_entities": [ + "Stdout", + "Stderr" + ], + "key_behaviors": [ + "prevents interleaved terminal output from concurrent threads", + "allows testing with mock writers" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/fs_read.rs": { + "short_description": "File reader infra bridging ForgeFS into application infra traits", + "category": "SOURCE_CODE", + "description": "Implements FileReaderInfra using ForgeFS to provide async file reads, range reads and a batched streaming reader that yields path/result pairs. The module adapts ForgeFS calls into a Stream for batched reading and includes unit tests validating batching behavior.", + "key_constructs": [ + { + "name": "ForgeFileReadService", + "type": "class", + "purpose": "Service implementing FileReaderInfra for single, range, and batched reads", + "callers": [ + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 25, + "context": "use crate::fs_read::ForgeFileReadService;" + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 40, + "context": "file_read_service: Arc," + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 76, + "context": "let file_read_service = Arc::new(ForgeFileReadService::new());" + } + ] + }, + { + "name": "ForgeFileReadService::read_utf8", + "type": "function", + "purpose": "Reads whole file as UTF-8 via ForgeFS" + }, + { + "name": "ForgeFileReadService::read_batch_utf8", + "type": "function", + "purpose": "Returns a Stream of (PathBuf, Result) for batched file reading with concurrency" + }, + { + "name": "tests", + "type": "module", + "purpose": "Unit tests for batch reading behavior and edge cases" + } + ], + "semantic_tags": [ + "file-reading", + "streaming", + "batching", + "infra", + "async" + ], + "handles_entities": [ + "file content", + "PathBuf" + ], + "key_behaviors": [ + "reads files individually and in batches as a stream", + "exposes range reads and raw byte reads via infra trait" + ], + "insights": [ + { + "type": "refactoring", + "category": "API", + "title": "Adjust file read service to yield per-file results instead of batch Results", + "problem": "Previous implementation returned Result> per batch; needed per-file error reporting.", + "root_cause": "Batching API made propagating individual failures awkward and caused upstream changes.", + "solution": "Produce a Stream of batches, map each file to (path, Result), join and then flat_map to yield one item per file: (PathBuf, anyhow::Result). Tests updated to check item-wise results.", + "commits": [ + "1b114a4" + ], + "constructs": [ + "ForgeFileReadService::read_batch_utf8" + ] + } + ], + "tests": { + "exercised_by": [], + "test_functions": [ + "tests in this file adapted to new stream shape" + ], + "source_commits": [ + "1b114a4" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/fs_read_dir.rs": { + "short_description": "Directory reader service that lists and reads files with filtering", + "category": "SOURCE_CODE", + "description": "Provides ForgeDirectoryReaderService to list directory entries (respecting gitignore) and to asynchronously read files in a directory with optional glob filtering and concurrency caps. It leverages ForgeFS and forge_walker to skip binaries and respects ignore files, with tests covering listing, filtering, and ignore behavior.", + "key_constructs": [ + { + "name": "ForgeDirectoryReaderService", + "type": "class", + "purpose": "Service for listing directory entries and reading files with concurrency and filtering", + "callers": [ + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 26, + "context": "use crate::fs_read_dir::ForgeDirectoryReaderService;" + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 46, + "context": "directory_reader_service: Arc," + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 78, + "context": "let directory_reader_service = Arc::new(ForgeDirectoryReaderService::new(" + } + ] + }, + { + "name": "ForgeDirectoryReaderService::list_directory_entries", + "type": "function", + "purpose": "Lists entries (path, is_directory) in a directory using Walker and ignore rules" + }, + { + "name": "ForgeDirectoryReaderService::read_directory_files", + "type": "function", + "purpose": "Reads files in a directory, optionally filtered by glob pattern, in parallel with a cap" + }, + { + "name": "tests", + "type": "module", + "purpose": "Unit tests validating listing, filtering, ignore handling, and read behavior" + } + ], + "semantic_tags": [ + "directory-walking", + "glob-filtering", + "parallel-io", + "gitignore", + "infra" + ], + "handles_entities": [ + "files", + "directory entries", + "PathBuf" + ], + "key_behaviors": [ + "lists directory entries respecting ignore rules", + "reads matching files in parallel with concurrency limits", + "filters files via glob patterns" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Performance", + "title": "Add concurrency cap to directory file reads to avoid EMFILE", + "problem": "Previous implementation used join_all to run arbitrary many async reads causing too many file descriptors open concurrently.", + "root_cause": "join_all over potentially large iterators with ForgeFS::read_to_string opens many files simultaneously.", + "solution": "Replace join_all with a stream using buffer_unordered(parallel_file_reads) and filter_map to only keep successful reads. Add new ForgeDirectoryReaderService::new(parallel_file_reads) to configure cap.", + "lesson_learned": "Don't use join_all for potentially large IO batches; prefer bounded concurrency via buffer_unordered and expose a tunable parameter.", + "commits": [ + "e25c1c0" + ], + "constructs": [ + "ForgeDirectoryReaderService::new", + "read_directory_files" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_infra/src/fs_read_dir.rs::tests" + ], + "source_commits": [ + "e25c1c0" + ] + } + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/mcp_client.rs": { + "short_description": "MCP client that connects to MCP servers (stdio/http/sse) and calls tools", + "category": "SOURCE_CODE", + "description": "Implements ForgeMcpClient providing connection management, tool listing and tool invocation against MCP servers using stdio or HTTP/SSE transports, with retry logic for transient transport errors. It also resolves HTTP header templates using a handlebars engine and translates RMCP raw responses into ToolOutput domain types, including tests for header templating.", + "key_constructs": [ + { + "name": "ForgeMcpClient", + "type": "class", + "purpose": "Main MCP client managing connections, retries, and API calls to MCP servers", + "callers": [ + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 32, + "context": "use crate::mcp_client::ForgeMcpClient;" + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 279, + "context": "type Client = ForgeMcpClient;" + }, + { + "file": "crates/forge_infra/src/mcp_server.rs", + "line": 6, + "context": "use crate::mcp_client::ForgeMcpClient;" + }, + { + "file": "crates/forge_infra/src/mcp_server.rs", + "line": 13, + "context": "type Client = ForgeMcpClient;" + }, + { + "file": "crates/forge_infra/src/mcp_server.rs", + "line": 20, + "context": "Ok(ForgeMcpClient::new(config, env_vars))" + } + ] + }, + { + "name": "ForgeMcpClient::create_connection", + "type": "function", + "purpose": "Creates a connection to an MCP server via Stdio, HTTP, or SSE transports" + }, + { + "name": "ForgeMcpClient::list", + "type": "function", + "purpose": "Lists tools exposed by the MCP server and converts them to ToolDefinition" + }, + { + "name": "ForgeMcpClient::call", + "type": "function", + "purpose": "Calls a tool by name with JSON input and converts the RMCP response to ToolOutput" + }, + { + "name": "resolve_http_templates", + "type": "function", + "purpose": "Resolves mustache/handlebars templates in MCP HTTP headers using provided env vars" + }, + { + "name": "tests", + "type": "module", + "purpose": "Unit tests for HTTP header template resolution behavior" + } + ], + "semantic_tags": [ + "mcp", + "rpc", + "transport", + "retry", + "templating" + ], + "handles_entities": [ + "McpServerConfig", + "ToolDefinition", + "ToolOutput", + "ToolName" + ], + "key_behaviors": [ + "connects to MCP servers over multiple transports", + "lists and invokes remote tools with retries", + "resolves templated headers using environment variables" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/banner.rs": { + "short_description": "Terminal banner renderer with version and command tips", + "category": "SOURCE_CODE", + "description": "Renders a stylized startup banner including version information, usage tips for CLI or interactive modes, and an encouragement block for zsh integration. It supports using a custom banner via FORGE_BANNER and aligns labels before printing a boxed tip section.", + "key_constructs": [ + { + "name": "DisplayBox", + "type": "class", + "purpose": "Formats messages into a bordered box for terminal display" + }, + { + "name": "display", + "type": "function", + "purpose": "Builds and prints the banner and tips based on cli_mode and environment", + "callers": [ + { + "file": "crates/forge_main/src/ui.rs", + "line": 153, + "context": "banner::display(false)?;" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 557, + "context": "banner::display(true)?;" + } + ] + }, + { + "name": "display_zsh_encouragement", + "type": "function", + "purpose": "Prints a boxed tip encouraging use of the zsh plugin" + } + ], + "semantic_tags": [ + "ui", + "banner", + "terminal", + "zsh", + "branding" + ], + "handles_entities": [], + "key_behaviors": [ + "prints startup banner and usage tips", + "formats boxed encouragement for zsh plugin" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/display_constants.rs": { + "short_description": "Centralized display constants and CommandType enum", + "category": "SOURCE_CODE", + "description": "Provides a single place for UI-related constants (status markers, headers, special markers) and defines CommandType as a type-safe discriminator for command listings. Includes tests to ensure consistent formatting conventions like bracketed markers and CommandType string values.", + "key_constructs": [ + { + "name": "status::YES / status::NO", + "type": "constant", + "purpose": "Standardized status markers for enabled/disabled values" + }, + { + "name": "markers::EMPTY / markers::BUILT_IN", + "type": "constant", + "purpose": "Special UI markers to indicate empty or built-in entities" + }, + { + "name": "CommandType", + "type": "class", + "purpose": "Enum that discriminates between Command, Agent, and Custom types for safe display", + "callers": [ + { + "file": "crates/forge_main/src/info.rs", + "line": 260, + "context": "impl IntoInfoValue for crate::display_constants::CommandType {" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 36, + "context": "use crate::display_constants::{CommandType, headers, markers, status};" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 1202, + "context": ".add_key_value(\"type\", CommandType::Command)" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 1209, + "context": ".add_key_value(\"type\", CommandType::Agent)" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 1215, + "context": ".add_key_value(\"type\", CommandType::Agent)" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 1229, + "context": ".add_key_value(\"type\", CommandType::Agent)" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 1237, + "context": ".add_key_value(\"type\", CommandType::Custom)" + } + ] + }, + { + "name": "impl Display for CommandType", + "type": "function", + "purpose": "Provides string conversion for CommandType values" + } + ], + "semantic_tags": [ + "ui", + "constants", + "display", + "enums", + "presentation" + ], + "handles_entities": [], + "key_behaviors": [ + "centralizes display strings and markers", + "provides type-safe command type discrimination" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/editor.rs": { + "short_description": "Interactive line editor using reedline with completions and history", + "category": "SOURCE_CODE", + "description": "Wraps reedline to provide an interactive prompt with history, keybindings, completion menu, and pre-filled buffer support for the TUI. ForgeEditor configures custom keybindings (tab completion, clear screen, search), integrates an InputCompleter, and adapts reedline signals into a ReadResult enum for the application.", + "key_constructs": [ + { + "name": "ForgeEditor", + "type": "class", + "purpose": "Encapsulates Reedline editor with history, completer, and custom keybindings", + "callers": [ + { + "file": "crates/forge_main/src/input.rs", + "line": 6, + "context": "use crate::editor::{ForgeEditor, ReadResult};" + }, + { + "file": "crates/forge_main/src/input.rs", + "line": 14, + "context": "editor: Mutex," + }, + { + "file": "crates/forge_main/src/input.rs", + "line": 24, + "context": "let editor = Mutex::new(ForgeEditor::new(env, custom_history_path, command.clone()));" + } + ] + }, + { + "name": "ForgeEditor::new", + "type": "function", + "purpose": "Constructs the editor with environment-aware history file and completion menu" + }, + { + "name": "ForgeEditor::prompt", + "type": "function", + "purpose": "Reads a line from the user and maps reedline signals to ReadResult" + }, + { + "name": "ReadResult", + "type": "class", + "purpose": "Enum representing prompt outcomes (Success, Empty, Continue, Exit)", + "callers": [ + { + "file": "crates/forge_main/src/input.rs", + "line": 6, + "context": "use crate::editor::{ForgeEditor, ReadResult};" + }, + { + "file": "crates/forge_main/src/input.rs", + "line": 36, + "context": "ReadResult::Continue => continue," + }, + { + "file": "crates/forge_main/src/input.rs", + "line": 37, + "context": "ReadResult::Exit => return Ok(SlashCommand::Exit)," + }, + { + "file": "crates/forge_main/src/input.rs", + "line": 38, + "context": "ReadResult::Empty => continue," + }, + { + "file": "crates/forge_main/src/input.rs", + "line": 39, + "context": "ReadResult::Success(text) => {" + } + ] + }, + { + "name": "From for ReadResult", + "type": "function", + "purpose": "Converts reedline Signal variants into ReadResult values" + } + ], + "semantic_tags": [ + "interactive", + "editor", + "completions", + "history", + "reedline" + ], + "handles_entities": [ + "history file", + "input buffer" + ], + "key_behaviors": [ + "reads interactive user input with completions and history", + "maps editor signals to application-level results", + "pre-fills buffer for follow-up prompts" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/stream_renderer.rs": { + "short_description": "Streaming markdown renderer that coordinates terminal spinner and output", + "category": "SOURCE_CODE", + "description": "Implements StreamingWriter which renders markdown streams via StreamdownRenderer while managing a spinner so output doesn't overlap with an active spinner. It provides thread-safe SharedSpinner, styles for normal/dimmed text, and a StreamDirectWriter that pauses/resumes spinners and writes styled output to a ConsoleWriter.", + "key_constructs": [ + { + "name": "SharedSpinner", + "type": "class", + "purpose": "Arc/Mutex wrapper around SpinnerManager to allow safe concurrent spinner control", + "callers": [ + { + "file": "crates/forge_main/src/ui.rs", + "line": 44, + "context": "use crate::stream_renderer::{SharedSpinner, StreamingWriter};" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 110, + "context": "spinner: SharedSpinner," + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 221, + "context": "let spinner = SharedSpinner::new(SpinnerManager::new(api.clone()));" + } + ] + }, + { + "name": "StreamingWriter", + "type": "class", + "purpose": "Coordinates streaming markdown rendering and spinner visibility, creating renderers per style", + "callers": [ + { + "file": "crates/forge_main/src/ui.rs", + "line": 44, + "context": "use crate::stream_renderer::{SharedSpinner, StreamingWriter};" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 3118, + "context": "let mut writer = StreamingWriter::new(self.spinner.clone(), self.api.clone());" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 3239, + "context": "writer: &mut StreamingWriter," + } + ] + }, + { + "name": "StreamDirectWriter", + "type": "class", + "purpose": "io::Write adapter used by the markdown renderer that pauses/resumes spinner and writes styled bytes to printer" + }, + { + "name": "Style", + "type": "class", + "purpose": "Enum to select Normal or Dimmed styling for streamed content" + } + ], + "semantic_tags": [ + "streaming", + "markdown", + "spinner", + "terminal", + "rendering" + ], + "handles_entities": [ + "spinner", + "markdown streams" + ], + "key_behaviors": [ + "streams markdown to terminal while managing spinner visibility", + "applies text styling and ensures proper flushing on drop" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/title_display.rs": { + "short_description": "Formats TitleFormat into colored or plain display strings", + "category": "SOURCE_CODE", + "description": "Provides TitleDisplay that implements Display for TitleFormat in the presentation layer, rendering colored icons, timestamps and optional subtitles according to Category. It offers a builder-style API to toggle colors and an extension trait to convert TitleFormat into a displayable form.", + "key_constructs": [ + { + "name": "TitleDisplay", + "type": "class", + "purpose": "Formats a TitleFormat into a colorized or plain display string with timestamp and icon" + }, + { + "name": "TitleDisplay::with_colors", + "type": "function", + "purpose": "Configures whether to include ANSI color styling in the output" + }, + { + "name": "TitleDisplayExt", + "type": "class", + "purpose": "Trait providing convenience methods to convert TitleFormat into TitleDisplay" + } + ], + "semantic_tags": [ + "ui", + "formatting", + "colored-output", + "timestamps", + "presentation" + ], + "handles_entities": [ + "TitleFormat", + "Category" + ], + "key_behaviors": [ + "renders titles with colored icons and timestamps", + "supports plain output for non-color environments" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Other", + "title": "Use bullet glyph for better Windows terminal compatibility", + "problem": "Originally used \u23fa glyph which rendered poorly on some Windows terminals.", + "root_cause": "Some Unicode glyphs are not consistently supported across terminal stacks.", + "solution": "Switch to a simpler '\u25cf' bullet glyph and keep warning glyph '\u26a0\ufe0f' for warnings.", + "lesson_learned": "Prefer broadly supported glyphs for terminal UI to avoid rendering issues across platforms.", + "commits": [ + "d02477e" + ], + "constructs": [ + "TitleDisplay::render (icon selection)" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/utils.rs": { + "short_description": "Small presentation utilities for human-friendly time and numbers", + "category": "SOURCE_CODE", + "description": "Contains helpers humanize_time and humanize_number used to present relative times and compact numeric strings (k/M/B). Includes unit tests verifying formatting behavior across ranges.", + "key_constructs": [ + { + "name": "humanize_time", + "type": "function", + "purpose": "Converts a chrono::DateTime into a human-friendly relative time string", + "callers": [ + { + "file": "crates/forge_main/src/ui.rs", + "line": 49, + "context": "use crate::utils::humanize_time;" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 3924, + "context": ".map_or(\"NEVER\".to_string(), humanize_time);" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 3937, + "context": ".add_key_value(\"Created At\", humanize_time(workspace.created_at))" + } + ] + }, + { + "name": "humanize_number", + "type": "function", + "purpose": "Formats integers with k/M/B suffixes for compact display", + "callers": [ + { + "file": "crates/forge_main/src/zsh/rprompt.rs", + "line": 13, + "context": "use crate::utils::humanize_number;" + }, + { + "file": "crates/forge_main/src/zsh/rprompt.rs", + "line": 77, + "context": "let num = humanize_number(*count);" + } + ] + }, + { + "name": "tests", + "type": "module", + "purpose": "Unit tests verifying number formatting edge cases" + } + ], + "semantic_tags": [ + "formatting", + "utilities", + "time", + "numbers", + "presentation" + ], + "handles_entities": [], + "key_behaviors": [ + "renders relative times as human-readable strings", + "compacts large numbers into readable suffix formats" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_markdown_stream/src/heading.rs": { + "short_description": "Render markdown headings with theme-aware styling and wrapping", + "category": "SOURCE_CODE", + "description": "Provides rendering for markdown heading elements (h1..h6), applying theme-based ANSI or test tags, handling uppercase H1 behavior and line wrapping. It converts inline markdown into styled text and prepends dimmed prefix markers for each heading level. Tests verify styling, wrapping, and inline element rendering.", + "key_constructs": [ + { + "name": "render_heading", + "type": "function", + "purpose": "Render a heading line(s) with level-aware styling, prefix, wrapping, and inline rendering.", + "callers": [ + { + "file": "crates/forge_markdown_stream/src/renderer.rs", + "line": 9, + "context": "use crate::heading::render_heading;" + }, + { + "file": "crates/forge_markdown_stream/src/renderer.rs", + "line": 183, + "context": "let lines = render_heading(*level, content, width, &margin, &self.theme);" + } + ] + }, + { + "name": "HeadingStyler", + "type": "trait", + "purpose": "Styler trait used as a generic parameter to apply heading styles (imported from style.rs)." + }, + { + "name": "InlineStyler", + "type": "trait", + "purpose": "Styler trait used to render inline elements within heading content." + } + ], + "semantic_tags": [ + "markdown", + "rendering", + "styling", + "wrapping", + "headings" + ], + "handles_entities": [ + "Heading" + ], + "key_behaviors": [ + "renders markdown headings with level-specific styles", + "wraps long heading text to fit a given width", + "applies inline element rendering within headings" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_markdown_stream/src/list.rs": { + "short_description": "Render nested markdown lists with bullets, numbering, checkboxes and wrapping", + "category": "SOURCE_CODE", + "description": "Implements markdown list rendering with nested indentation, bullet cycling, ordered numbering state, checkbox handling and line wrapping. Provides a ListState to track nesting and numbering and render_list_item to produce styled, wrapped lines for various list bullet types. Tests cover bullet cycling, nested levels, checkboxes, wrapping and state resets.", + "key_constructs": [ + { + "name": "ListState", + "type": "struct", + "purpose": "Tracks nested list stack, numbering and pending reset state for ordered/unordered lists." + }, + { + "name": "render_list_item", + "type": "function", + "purpose": "Renders a single list item with appropriate bullet/number, checkbox handling, indentation and wrapped content.", + "callers": [ + { + "file": "crates/forge_markdown_stream/src/renderer.rs", + "line": 11, + "context": "use crate::list::{ListState, render_list_item};" + }, + { + "file": "crates/forge_markdown_stream/src/renderer.rs", + "line": 221, + "context": "let lines = render_list_item(" + } + ] + }, + { + "name": "strip_checkbox_prefix", + "type": "function", + "purpose": "Detects and extracts tasklist checkbox markers ([ ], [x], [X]) from item content." + }, + { + "name": "BULLETS_DASH", + "type": "constant", + "purpose": "Defines bullet glyphs used at different nesting levels for dash lists." + } + ], + "semantic_tags": [ + "markdown", + "lists", + "rendering", + "wrapping", + "checkboxes" + ], + "handles_entities": [ + "List items", + "Checkbox state" + ], + "key_behaviors": [ + "renders nested markdown lists with correct bullets and indentation", + "manages ordered list numbering across nested levels", + "detects and styles tasklist checkboxes", + "wraps long list item content with aligned prefixes" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_markdown_stream/src/style.rs": { + "short_description": "Styler traits defining API for markdown element formatting", + "category": "SOURCE_CODE", + "description": "Declares trait interfaces for styling markdown inline elements, headings, lists, and tables. These traits (InlineStyler, HeadingStyler, ListStyler, TableStyler) are implemented by theme backends to produce ANSI colored output or test tag output. The file centralizes the contract used by the renderer.", + "key_constructs": [ + { + "name": "InlineStyler", + "type": "trait", + "purpose": "Defines methods for styling inline markdown constructs like bold, italic, code, links and dimmed text." + }, + { + "name": "HeadingStyler", + "type": "trait", + "purpose": "Defines methods for styling heading levels h1..h6." + }, + { + "name": "ListStyler", + "type": "trait", + "purpose": "Defines methods for styling list bullets, numbers and checkboxes." + }, + { + "name": "TableStyler", + "type": "trait", + "purpose": "Defines methods for styling table borders and headers." + } + ], + "semantic_tags": [ + "styling", + "traits", + "markdown", + "abstraction" + ], + "handles_entities": [], + "key_behaviors": [ + "provides a pluggable styling interface for markdown renderers", + "enables different backends (ANSI vs test tags) to be swapped interchangeably" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_markdown_stream/src/theme.rs": { + "short_description": "Terminal theme and concrete styler implementations for markdown output", + "category": "SOURCE_CODE", + "description": "Defines Theme and Style structs to encapsulate color and text attributes and supplies concrete implementations of InlineStyler, HeadingStyler, ListStyler and TableStyler that emit ANSI (via colored) or readable tag output for tests. It detects light/dark terminal mode and exposes predefined light/dark themes. Test TagStyler provides HTML-like output used by unit snapshots.", + "key_constructs": [ + { + "name": "Style", + "type": "struct", + "purpose": "Encapsulates text attributes (fg/bg, bold, italic, underline, strikethrough, dimmed) and applies them to strings." + }, + { + "name": "Theme", + "type": "struct", + "purpose": "Holds Style settings for all markdown elements and implements the styler traits to render ANSI or tag output." + }, + { + "name": "TagStyler", + "type": "struct", + "purpose": "Test-only styler that renders readable HTML-like tags for unit snapshots." + }, + { + "name": "Theme::detect", + "type": "function", + "purpose": "Determines terminal theme mode and selects light or dark Theme presets." + } + ], + "semantic_tags": [ + "theme", + "ansi", + "styling", + "terminal", + "markdown" + ], + "handles_entities": [], + "key_behaviors": [ + "applies color and text attributes to rendered markdown elements", + "chooses light or dark theme based on terminal detection", + "provides test-friendly tag styler for snapshot assertions" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_select/src/input.rs": { + "short_description": "Interactive single-line input builder using rustyline with paste handling", + "category": "SOURCE_CODE", + "description": "Implements InputBuilder that prompts the user for single-line input using rustyline, supports default values and re-prompting when empty is not allowed. It guards against non-tty stdin and strips bracketed-paste escape sequences to return clean values. Unit tests validate builder creation and the paste-stripping helper.", + "key_constructs": [ + { + "name": "InputBuilder", + "type": "struct", + "purpose": "Builder for interactive prompts: message, default option, and empty-input policy." + }, + { + "name": "InputBuilder::prompt", + "type": "function", + "purpose": "Uses rustyline to read user input, handles defaults, cancellation, and cleans bracketed-paste sequences." + }, + { + "name": "strip_bracketed_paste", + "type": "function", + "purpose": "Removes bracketed-paste escape markers from pasted input captured by the terminal." + } + ], + "semantic_tags": [ + "interactive", + "tty", + "prompt", + "input-sanitization" + ], + "handles_entities": [], + "key_behaviors": [ + "prompts user for single-line input with editing support", + "honors default values and allow-empty policy", + "strips bracketed-paste escape sequences from input" + ], + "insights": [ + { + "type": "refactoring", + "category": "Usability", + "title": "New rustyline-based InputBuilder with bracketed paste stripping", + "problem": "Interactive input needs robust line editing and handling pasted input sequences (bracketed paste) especially when invoked from ZLE widget / direct TTY contexts.", + "root_cause": "Previous simple prompt didn't provide expected editing features and didn't strip bracketed paste markers which pollute captured values.", + "solution": "Introduced InputBuilder using rustyline::DefaultEditor with prompt formatting, initial value support, bracketed paste strip, Windows-specific prompt fallback, and standardized return semantics (Ok(Some), Ok(None) for cancel).", + "lesson_learned": "Use a proper line-editing library to handle common terminal interactions and sanitize bracketed paste escape sequences before treating input as authoritative.", + "commits": [ + "ca0fac8" + ], + "constructs": [ + "InputBuilder::prompt", + "strip_bracketed_paste" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_select/src/input.rs (unit tests added)" + ] + } + }, + { + "type": "bug_fix", + "category": "Environment", + "title": "Bail out of interactive prompts if stdin is not a terminal", + "problem": "When stdin is not a TTY (background jobs or CI), interactive prompts could block or cause the process to hang waiting for input.", + "root_cause": "Previous Select/Input builders did not check terminal status and attempted to read interactively.", + "solution": "Added check using std::io::IsTerminal to return Ok(None) immediately when stdin is not a terminal in Input.prompt(), MultiSelect, and Select builders.", + "lesson_learned": "Interactive libraries must detect non-interactive environments and fall back or skip prompts to avoid blocking background processes or CI.", + "commits": [ + "635ba87", + "ca0fac8" + ], + "constructs": [ + "InputBuilder::prompt (stdin.is_terminal check)", + "MultiSelectBuilder::prompt", + "SelectBuilder.prompt" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_select/src/multi.rs": { + "short_description": "Multi-select prompt backed by fzf with ANSI stripping and result parsing", + "category": "SOURCE_CODE", + "description": "Provides MultiSelectBuilder to present a multi-select prompt via fzf, sanitizing display strings by stripping ANSI codes and returning selected items by mapping fzf indices to original options. It configures fzf for multi-select, exact matching and custom layout and handles non-tty or empty options gracefully. A simple unit test verifies builder creation.", + "key_constructs": [ + { + "name": "MultiSelectBuilder", + "type": "struct", + "purpose": "Builder holding message and options, with a prompt() method to run fzf and return selections." + }, + { + "name": "MultiSelectBuilder::prompt", + "type": "function", + "purpose": "Runs fzf with configured options, feeds items, parses output and returns the selected items or None." + }, + { + "name": "build_multi_fzf", + "type": "function", + "purpose": "Constructs and configures an Fzf instance for multi-select interactions with desired flags." + } + ], + "semantic_tags": [ + "fzf", + "interactive", + "selection", + "ansi" + ], + "handles_entities": [], + "key_behaviors": [ + "opens an fzf-based multi-select UI", + "returns selected options mapped back to original values", + "skips interaction on non-tty or when options are empty" + ], + "insights": [ + { + "type": "refactoring", + "category": "Testing", + "title": "New MultiSelectBuilder using fzf_wrapped and consistent output parsing", + "problem": "Multi-select functionality needed a consolidated builder and consistent parsing of fzf output indices.", + "root_cause": "Earlier code split across modules and had fragile parsing of fzf output.", + "solution": "Introduced MultiSelectBuilder that builds a configured Fzf instance, adds indexed items, reads raw output and maps lines back to original options using parse_fzf_index helper. Added tests.", + "lesson_learned": "Encapsulate fzf options and parsing so callers only interact with typed builders.", + "commits": [ + "ca0fac8" + ], + "constructs": [ + "MultiSelectBuilder::prompt", + "build_multi_fzf" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/fd.rs": { + "short_description": "Workspace file discovery and filtering utilities with git-fallback", + "category": "SOURCE_CODE", + "description": "Implements file discovery utilities and filtering for workspace synchronization, including a permissioned allowed-extensions list and symlink exclusion. Defines FileDiscovery trait and FdDefault routing discovery that tries git-based discovery then falls back to a walker; exposes filter_and_resolve to validate and resolve relative paths. Tests ensure symlinks/dangling links are excluded and allowed extensions are respected.", + "key_constructs": [ + { + "name": "ALLOWED_EXTENSIONS", + "type": "constant", + "purpose": "Embedded list of allowed file extensions used to filter indexable workspace files." + }, + { + "name": "filter_and_resolve", + "type": "function", + "purpose": "Filters candidate relative paths by extension and symlink status, resolves them to absolute PathBufs, and errors if none remain.", + "callers": [ + { + "file": "crates/forge_services/src/fd_walker.rs", + "line": 9, + "context": "use crate::fd::{FileDiscovery, filter_and_resolve};" + }, + { + "file": "crates/forge_services/src/fd_walker.rs", + "line": 47, + "context": "filter_and_resolve(dir_path, paths)" + }, + { + "file": "crates/forge_services/src/fd_git.rs", + "line": 8, + "context": "use crate::fd::{FileDiscovery, filter_and_resolve};" + }, + { + "file": "crates/forge_services/src/fd_git.rs", + "line": 74, + "context": "filter_and_resolve(dir_path, paths)" + } + ] + }, + { + "name": "FileDiscovery", + "type": "trait", + "purpose": "Abstracts strategies to discover workspace files to be indexed (e.g., git or walker)." + }, + { + "name": "FdDefault", + "type": "struct", + "purpose": "Routing discovery implementation that attempts git-based discovery and falls back to a filesystem walker." + }, + { + "name": "discover_sync_file_paths", + "type": "function", + "purpose": "Wrapper that logs and invokes a FileDiscovery to enumerate files for sync.", + "callers": [ + { + "file": "crates/forge_services/src/sync.rs", + "line": 10, + "context": "use crate::fd::{FileDiscovery, discover_sync_file_paths};" + }, + { + "file": "crates/forge_services/src/sync.rs", + "line": 329, + "context": "let file_paths: Vec = match discover_sync_file_paths(" + } + ] + } + ], + "semantic_tags": [ + "file-discovery", + "filesystem", + "git", + "filtering", + "workspace" + ], + "handles_entities": [ + "Workspace files", + "Paths" + ], + "key_behaviors": [ + "discovers files to index in a workspace", + "filters out symlinks and non-allowed extensions", + "falls back from git-based discovery to walker when git fails" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Edge Case", + "title": "Exclude symlink paths in filter_and_resolve", + "problem": "filter_and_resolve resolved user-supplied relative paths and included symlink targets which are undesirable for indexing/sync.", + "root_cause": "No symlink exclusion existed in the path resolution pipeline.", + "solution": "Add helper is_symlink(path) and filter out symlinks before extension filtering. Added tests covering normal, dangling, and dir-symlink cases.", + "lesson_learned": "When resolving paths for indexing, explicitly exclude symlinks early to avoid following external roots.", + "commits": [ + "09fbef3" + ], + "constructs": [ + "is_symlink", + "filter_and_resolve" + ] + } + ], + "tests": { + "exercised_by": [ + "crates/forge_services/src/fd.rs::test_filter_and_resolve_excludes_dangling_symlinks", + "crates/forge_services/src/fd.rs::test_filter_and_resolve_excludes_symlinks" + ], + "test_functions": [], + "source_commits": [ + "09fbef3" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/instructions.rs": { + "short_description": "Service to discover and read AGENTS.md custom instruction files", + "category": "SOURCE_CODE", + "description": "Provides ForgeCustomInstructionsService to locate AGENTS.md files in prioritized locations (global base, git root, cwd), read their contents and cache them for use as custom agent instructions. It uses the environment and command infra to detect git root and reads files via FileReaderInfra; implements CustomInstructionsService to expose cached instructions. This ensures project and user-specific agent guidance is included in agent sessions.", + "key_constructs": [ + { + "name": "ForgeCustomInstructionsService", + "type": "struct", + "purpose": "Service that discovers AGENTS.md files, reads their content, caches results and exposes them via CustomInstructionsService." + }, + { + "name": "discover_agents_files", + "type": "function", + "purpose": "Finds candidate AGENTS.md paths in base path, git root, and current working directory with priority deduplication." + }, + { + "name": "get_git_root", + "type": "function", + "purpose": "Runs git rev-parse to determine repository root if available." + }, + { + "name": "get_custom_instructions", + "type": "function", + "purpose": "Cached accessor that returns discovered AGENTS.md contents implementing CustomInstructionsService." + } + ], + "semantic_tags": [ + "agents", + "configuration", + "file-reading", + "caching", + "git" + ], + "handles_entities": [ + "Custom instruction files (AGENTS.md)", + "Conversation instructions" + ], + "key_behaviors": [ + "discovers AGENTS.md files from multiple locations", + "reads and caches custom agent instructions for sessions", + "falls back gracefully if git root is unavailable" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/policy.rs": { + "short_description": "Policy management and interactive permission decision service", + "category": "SOURCE_CODE", + "description": "Implements ForgePolicyService which loads default policies, reads/initializes policies YAML, evaluates permission operations with a PolicyEngine, and prompts the user for confirmation when a policy requires confirmation. It can persist policy changes, create default policy files, and add rules for specific operations (read/write/execute/fetch). Tests validate policy creation heuristics for various operations.", + "key_constructs": [ + { + "name": "ForgePolicyService", + "type": "struct", + "purpose": "Service implementing PolicyService to evaluate and manage permission policies and interactive confirmations." + }, + { + "name": "PolicyPermission", + "type": "enum", + "purpose": "Enum representing user choices when asked to confirm an operation (Accept, Reject, AcceptAndRemember)." + }, + { + "name": "DEFAULT_POLICIES", + "type": "constant", + "purpose": "Embedded default PolicyConfig loaded from YAML used to initialize policies file." + }, + { + "name": "create_policy_for_operation", + "type": "function", + "purpose": "Generates a Policy entry heuristically for a given PermissionOperation (reads extension/host/command patterns)." + } + ], + "semantic_tags": [ + "policy", + "permissions", + "interactive", + "file-write", + "security" + ], + "handles_entities": [ + "PolicyConfig", + "Policy", + "PermissionOperation" + ], + "key_behaviors": [ + "evaluates whether an operation is allowed, denied, or requires confirmation", + "initializes default policies file if missing", + "prompts users to accept/reject and can persist remembered choices" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/template.rs": { + "short_description": "Template registration and rendering service using Handlebars", + "category": "SOURCE_CODE", + "description": "Provides ForgeTemplateService which lazily initializes a Handlebars engine, reads template files in parallel and registers them, and renders templates given a Template wrapper and data object. It compiles .md files as Handlebars templates and treats non-markdown files as raw templates to avoid substitution. Tests ensure compilation, raw-file behavior, parallel reads and rendering correctness.", + "key_constructs": [ + { + "name": "ForgeTemplateService", + "type": "struct", + "purpose": "Service that manages a lazily-initialized Handlebars instance, template registration and rendering." + }, + { + "name": "get_hb", + "type": "function", + "purpose": "Initializes and returns a shared RwLock-protected Handlebars instance on first use." + }, + { + "name": "read_all", + "type": "function", + "purpose": "Reads multiple template files concurrently and returns their filenames and contents." + }, + { + "name": "compile_template", + "type": "function", + "purpose": "Compiles .md Handlebars templates or wraps non-.md content as raw templates." + } + ], + "semantic_tags": [ + "templating", + "handlebars", + "concurrency", + "rendering" + ], + "handles_entities": [ + "Template" + ], + "key_behaviors": [ + "registers templates from disk into Handlebars engine", + "renders templates with provided data", + "treats .md files as Handlebars and other files as raw content" + ], + "insights": [ + { + "type": "performance", + "category": "Performance", + "title": "Lazily initialize Handlebars instance", + "problem": "Handlebars template engine instantiation was done at service construction causing slower startup.", + "root_cause": "Handlebars instance was created eagerly and wrapped in RwLock.", + "solution": "Use tokio::sync::OnceCell to lazily initialize a RwLock on first use via get_hb().", + "lesson_learned": "Wrap heavy, shared resources in OnceCell/OnceLock and create them on first demand; expose accessor helpers to simplify usage.", + "commits": [ + "54fe7a4" + ], + "constructs": [ + "ForgeTemplateService::new", + "ForgeTemplateService::get_hb" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/provider/provider_repo.rs": { + "short_description": "Provider registry and credential migration logic for model providers", + "category": "SOURCE_CODE", + "description": "Parses embedded and custom provider configurations into ProviderConfig structures, merges configurations, creates provider templates (configured or unconfigured), migrates environment-variable credentials to a credentials file, and refreshes Google ADC tokens when needed. It exposes ForgeProviderRepository with functions to get providers, create credentials from env, and provider lookup by id. The module contains conversion logic to/from forge_config and forge_domain types and handles special provider cases.", + "key_constructs": [ + { + "name": "ProviderConfig", + "type": "struct", + "purpose": "Internal representation of a provider entry parsed from configuration (embedded or custom)." + }, + { + "name": "UrlParamVarConfig", + "type": "enum", + "purpose": "Represents URL parameter variable configuration, either plain env var name or with dropdown options." + }, + { + "name": "ProviderConfigs", + "type": "struct", + "purpose": "Transparent wrapper around Vec implementing custom merge logic for combining configs." + }, + { + "name": "ForgeProviderRepository", + "type": "struct", + "purpose": "Repository that exposes provider templates, handles migration of env credentials to file, and refreshes special credentials like Google ADC." + }, + { + "name": "migrate_env_to_file", + "type": "function", + "purpose": "One-time migration to convert environment-based provider credentials into a persisted credentials file.", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 206, + "context": "self.provider_repository.migrate_env_to_file().await" + } + ] + } + ], + "semantic_tags": [ + "providers", + "credentials", + "migration", + "configuration", + "http" + ], + "handles_entities": [ + "Provider", + "ProviderConfig", + "AuthCredential", + "MigrationResult" + ], + "key_behaviors": [ + "merges embedded and custom provider configs", + "creates provider templates including credential lookup", + "migrates env-var credentials into a credentials file", + "refreshes Google ADC tokens when required" + ], + "insights": [ + { + "type": "refactoring", + "category": "Configuration", + "title": "Support URL parameter specs with preset options and dropdown UI", + "problem": "Provider url_param_vars were strings only, preventing providers from declaring a set of allowed template options that should appear as a dropdown in the UI.", + "root_cause": "url_param_vars was modeled as Vec so the UI could only render free-text input.", + "solution": "Introduce UrlParamVarConfig (untagged enum) to parse both plain string and {name, options} shapes from provider.json. Provide into_spec conversion to URLParamSpec (domain) and adapt provider template construction and environment variable lookup to use the param name. Update provider.json parsing & tests accordingly.", + "lesson_learned": "Data format evolution: allow both simple and richer declarative forms (plain string or object with options) in external catalogs and convert to a typed spec used by UI infra.", + "commits": [ + "430b0d1", + "51a730f" + ], + "constructs": [ + "UrlParamVarConfig", + "UrlParamVarConfig::into_spec" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_repo/src/provider/provider_repo.rs tests" + ], + "source_commits": [ + "430b0d1" + ] + } + }, + { + "type": "bug_fix", + "category": "Configuration", + "title": "Skip openai-compatible providers when OPENAI_URL not configured", + "problem": "Provider loader should skip OpenAI-compatible entries if the environment lacks OPENAI_URL, but new openai_responses_compatible was not considered.", + "root_cause": "Conditional logic only checked OPENAI_COMPATIBLE; added OPENAI_RESPONSES_COMPATIBLE should be treated similarly.", + "solution": "Update condition to skip when config.id is OPENAI_COMPATIBLE or OPENAI_RESPONSES_COMPATIBLE and OPENAI_URL is not configured.", + "lesson_learned": "When adding similar provider variants, ensure conditionals that gate them on environment variables are updated in tandem.", + "commits": [ + "58827bd" + ], + "constructs": [ + "get_provider_configs (loading logic)" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_repo/src/provider/provider_repo.rs::tests" + ], + "source_commits": [ + "58827bd" + ] + } + }, + { + "type": "refactoring", + "category": "Performance", + "title": "Remove unnecessary clone when returning providers", + "problem": "get_all_providers returned self.get_providers().await.clone() which clones the Vec unnecessarily.", + "root_cause": "An extra clone created needless allocation and potential performance impact.", + "solution": "Return the awaited value directly (Ok(self.get_providers().await)).", + "commit": [ + "0050ea3" + ], + "constructs": [ + "get_all_providers", + "get_providers" + ] + }, + { + "type": "refactoring", + "category": "State Management", + "title": "Centralize credentials file path and use it in migrations", + "problem": "Multiple places constructed the credentials file path by joining base_path with '.credentials.json', leading to duplication and fragile tests that relied on ends_with checks.", + "root_cause": "Path construction logic was duplicated rather than centralized.", + "solution": "Use Environment::credentials_path() helper (from forge_domain) across migrate_env_to_file, read_credentials, write_credentials and update tests to compare exact Path equality.", + "commit": [ + "a1e7f35" + ], + "constructs": [ + "migrate_env_to_file", + "read_credentials", + "write_credentials" + ] + } + ], + "tests": { + "exercised_by": [ + "crates/forge_repo/src/provider/provider_repo.rs (mod env_tests)" + ], + "test_functions": [], + "source_commits": [ + "a1e7f35" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/anthropic/request.rs": { + "short_description": "DTOs and serialization for Anthropic request payloads", + "category": "SOURCE_CODE", + "description": "Defines Request, Message, Content, and related types used to serialize forge_domain Context into the Anthropic API request shape, handling reasoning/output_config differences and tool-call encoding. It includes TryFrom implementations to convert domain Context and tool calls into the appropriate Anthropic content structure and manages cache-control markers. This layer adapts domain messages and reasoning configuration into the external provider's expected JSON.", + "key_constructs": [ + { + "name": "Request", + "type": "struct", + "purpose": "Top-level DTO for Anthropic requests containing messages, tools, reasoning, and other model options." + }, + { + "name": "TryFrom for Request", + "type": "function", + "purpose": "Conversion logic that maps domain Context messages, tools and reasoning into Anthropic Request fields and reasoning/output_config variants." + }, + { + "name": "Message", + "type": "struct", + "purpose": "Represents an Anthropic message composed of Content items and a role." + }, + { + "name": "Content", + "type": "enum", + "purpose": "Represents Anthropic content variants (Text, Image, ToolUse, ToolResult, Thinking) with cache-control support." + } + ], + "semantic_tags": [ + "dto", + "anthropic", + "serialization", + "provider-adapter", + "tool-calls" + ], + "handles_entities": [ + "Context", + "Message", + "ToolCall", + "Image" + ], + "key_behaviors": [ + "converts internal conversation Context into Anthropic request JSON", + "maps reasoning configuration to appropriate Anthropic fields", + "serializes tool calls and images into provider content" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/anthropic/response.rs": { + "short_description": "Parsing and mapping Anthropic streaming responses to domain messages", + "category": "SOURCE_CODE", + "description": "Defines response DTOs (Event, ContentBlock, Usage, etc.) for Anthropic streaming API and implements conversions into forge_domain types like ChatCompletionMessage and Model. It parses events, maps usage and stop reasons to domain equivalents, handles unknown events gracefully and extracts reasoning/tool-use details from content blocks. Tests validate event deserialization and mapping behavior.", + "key_constructs": [ + { + "name": "Event / EventData", + "type": "enum", + "purpose": "Represents the different streaming events returned by Anthropic and an untagged wrapper for unknown events." + }, + { + "name": "ContentBlock", + "type": "enum", + "purpose": "Represents content block payloads (text, tool use, thinking deltas) that are converted to domain ChatCompletionMessage pieces." + }, + { + "name": "Usage", + "type": "struct", + "purpose": "Holds token counts from Anthropic response and maps to forge_domain::Usage." + }, + { + "name": "impl TryFrom for ChatCompletionMessage", + "type": "function", + "purpose": "Converts streaming events into domain ChatCompletionMessage, handling deltas, usage and errors." + }, + { + "name": "get_context_length", + "type": "function", + "purpose": "Heuristic mapping from Anthropic model id to supported context window sizes." + } + ], + "semantic_tags": [ + "dto", + "anthropic", + "streaming", + "parsing", + "usage" + ], + "handles_entities": [ + "ChatCompletionMessage", + "Model", + "Usage" + ], + "key_behaviors": [ + "parses Anthropic streaming events into domain-friendly messages", + "maps provider token usage into internal Usage objects", + "handles tool-use and reasoning details embedded in streamed content" + ], + "insights": [ + { + "type": "feature", + "category": "Parsing", + "title": "Parse Anthropic ping cost and accept numeric-or-string cost", + "problem": "OpenCode Zen / Anthropic streams may send a Ping with cost encoded as string or number.", + "root_cause": "Existing Event enum didn't model cost in Ping event and didn't accept both string and numeric forms.", + "solution": "Added StringOrF64 untagged enum and updated Event::Ping to include Option. TryFrom maps Ping with cost into ChatCompletionMessage with usage.cost. Tests added.", + "lesson_learned": "Streaming formats from different providers can be inconsistent; prefer flexible types (untagged) and map non-content events into usage records.", + "commits": [ + "40cfcc8" + ], + "constructs": [ + "StringOrF64", + "Event::Ping handling in TryFrom" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/fmt/todo_fmt.rs": { + "short_description": "Formats Todo lists/diffs into ANSI-styled checklist output", + "category": "SOURCE_CODE", + "description": "Renders Todo items and diffs as ANSI-styled checklist lines with status-specific icons, coloring, strikethrough for completed items, and preserved insertion order. Provides format_todos to render a list and format_todos_diff to render changes between before/after states including removed and newly-added items. Unit tests assert colorized snapshots and plain outputs.", + "key_constructs": [ + { + "name": "format_todo_line", + "type": "function", + "purpose": "Renders a single Todo line with appropriate icon and ANSI styling based on status and emphasis." + }, + { + "name": "format_todos_diff", + "type": "function", + "purpose": "Produces a textual diff rendering showing surviving, removed and added todos with correct styling.", + "callers": [ + { + "file": "crates/forge_app/src/fmt/fmt_output.rs", + "line": 5, + "context": "use crate::fmt::todo_fmt::{format_todos, format_todos_diff};" + }, + { + "file": "crates/forge_app/src/fmt/fmt_output.rs", + "line": 42, + "context": "format_todos_diff(before, after)," + } + ] + }, + { + "name": "format_todos", + "type": "function", + "purpose": "Renders a list of todos into ANSI-styled checklist lines preserving insertion order.", + "callers": [ + { + "file": "crates/forge_app/src/fmt/fmt_output.rs", + "line": 5, + "context": "use crate::fmt::todo_fmt::{format_todos, format_todos_diff};" + }, + { + "file": "crates/forge_app/src/fmt/fmt_output.rs", + "line": 45, + "context": "Some(ChatResponseContent::ToolOutput(format_todos(output)))" + } + ] + } + ], + "semantic_tags": [ + "formatting", + "todos", + "ansi", + "diff" + ], + "handles_entities": [ + "Todo" + ], + "key_behaviors": [ + "renders todo lists with status-aware icons and colors", + "produces readable diffs showing changes between todo states", + "preserves insertion order when formatting" + ], + "insights": [ + { + "type": "refactoring", + "category": "State Management", + "title": "Switch todo diff rendering to content-keyed incremental updates and add Cancelled status", + "problem": "Previous todo diff logic keyed on stable IDs and sorted outputs by id which hid insertion order and made cancelled/removed items render incorrectly.", + "root_cause": "Tool protocol changed to send incremental changes keyed by content (not server-managed IDs). Rendering assumed ID-sorted outputs and used ID-based comparison; cancelled tasks weren't represented with a dedicated status or styling.", + "solution": "Rewrote format_todos_diff to iterate over before list preserving insertion order, match by content-keyed maps, render removed items with status-aware icons (including new Cancelled icon), and append newly-added items in insertion order. Added TodoStatus::Cancelled and new styling rules.", + "lesson_learned": "When protocol key changes (content-keyed incremental updates), update both domain models and UI renderers. Preserve insertion order coming from the source when it carries semantic meaning. Add explicit cancelled status handling and consistent styling for removed items.", + "commits": [ + "e84bc7f" + ], + "constructs": [ + "format_todo_line", + "format_todos_diff", + "format_todos", + "TodoLineStyle" + ] + }, + { + "type": "bug_fix", + "category": "Edge Case", + "title": "Ensure ANSI color state is forced in tests to stabilize snapshots", + "problem": "ANSI colorization tests could vary depending on environment causing snapshot flakiness.", + "root_cause": "Tests relied on console color state; CI runner environments may disable colors.", + "solution": "Added an ANSI_STYLE_LOCK Mutex and ColorStateGuard to force-enable colors for the duration of raw snapshot tests.", + "lesson_learned": "Tests that depend on ANSI sequences should control color state and serialize access to global console settings to avoid flakiness across environments.", + "commits": [ + "e84bc7f" + ], + "constructs": [ + "ANSI_STYLE_LOCK", + "ColorStateGuard", + "fixture_todo_write_output_raw" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_app/src/fmt/snapshots/forge_app__fmt__todo_fmt__tests__todo_write_mixed_changes_snapshot.snap", + "crates/forge_app/src/fmt/snapshots/forge_app__fmt__todo_fmt__tests__todo_write_removed_in_progress_renders_with_in_progress_icon_in_raw_snapshot.snap", + "crates/forge_app/src/fmt/snapshots/forge_app__fmt__todo_fmt__tests__todo_write_removed_pending_renders_with_pending_icon_in_raw_snapshot.snap" + ], + "source_commits": [ + "e84bc7f" + ] + } + }, + { + "type": "bug_fix", + "category": "State Management", + "title": "Deterministic todo diff ordering and status-aware removed rendering", + "problem": "Todo diffs were rendered in an unstable order and removed todos were rendered with a single visual style regardless of prior status.", + "root_cause": "Previous implementation iterated 'after' then 'before' and pushed lines directly to the output; removed items were appended after current ones without a stable sort and removed completed todos were not dimmed.", + "solution": "Collect current and removed lines into an intermediate DiffLine enum vector, sort by todo.id, then render; render removed items with status-aware styling (dim white for previously completed, red strikethrough for removed pending). Also ensure format_todos sorts todos by id.", + "lesson_learned": "When producing human-visible diffs rely on deterministic ordering (IDs) and preserve semantic styling for removed/historical items. Use an intermediate representation and sort it before rendering to avoid non-determinism in tests and UI.", + "commits": [ + "970a75f", + "4f1ad6b" + ], + "constructs": [ + "TodoLineStyle", + "format_todo_line", + "format_todos_diff", + "format_todos", + "DiffLine" + ] + }, + { + "type": "testing", + "category": "Testing", + "title": "Added snapshot and unit tests for todo formatting", + "problem": "Formatting behaviour (order and removed-item styling) didn't have tests; regressions surfaced in snapshots.", + "root_cause": "No deterministic ordering enforced earlier; missing tests for removed completed todos and ordering.", + "solution": "Added multiple unit tests and snapshots covering mixed changes, removed completed todos, ordering by id, and multi-step flows to assert exact textual output.", + "lesson_learned": "When UI text is important for UX and tooling, add snapshot/unit tests verifying order and styling to catch regressions early.", + "commits": [ + "970a75f", + "4f1ad6b" + ], + "constructs": [ + "format_todos_diff", + "format_todos" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_app/src/fmt/snapshots/forge_app__fmt__todo_fmt__tests__todo_write_mixed_changes_snapshot.snap", + "crates/forge_app/src/fmt/todo_fmt.rs::tests" + ], + "source_commits": [ + "970a75f", + "4f1ad6b" + ] + } + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/hooks/title_generation.rs": { + "short_description": "Asynchronous per-conversation title generation hook using background tasks", + "category": "SOURCE_CODE", + "description": "Defines TitleGenerationHandler which spawns a single background title-generation task per conversation and safely coordinates StartPayload and EndPayload events via a DashMap of TitleTask sentries. It enqueues a generator task on Start events and awaits its oneshot result on End events, handling cancellation, deduplication, and caching of titles. Tests cover concurrency, task lifecycle, and correct transitions between task states.", + "key_constructs": [ + { + "name": "TitleGenerationHandler", + "type": "struct", + "purpose": "Hook handler that coordinates asynchronous title generation tasks per conversation and stores results." + }, + { + "name": "TitleTask", + "type": "enum", + "purpose": "Represents per-conversation task state: InProgress(oneshot Receiver), Awaiting sentinel, or Done(title)." + }, + { + "name": "impl EventHandle> for TitleGenerationHandler", + "type": "function", + "purpose": "Starts a background title generation task if no title exists and no task is in progress." + }, + { + "name": "impl EventHandle> for TitleGenerationHandler", + "type": "function", + "purpose": "Awaits the spawned oneshot result, sets conversation title on success, and manages map transitions." + } + ], + "semantic_tags": [ + "concurrency", + "title-generation", + "hooks", + "background-tasks", + "caching" + ], + "handles_entities": [ + "Conversation", + "Title" + ], + "key_behaviors": [ + "spawns a single background task to generate a conversation title", + "awaits and applies the generated title when conversation ends", + "prevents duplicate tasks and handles cancellation gracefully" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Concurrency", + "title": "Use oneshot channel for title generation background tasks instead of JoinHandle", + "problem": "JoinHandle-based per-conversation background title generation required aborts and could panic or complicate cancellation semantics.", + "root_cause": "JoinHandle abort semantics and panics are heavier; simpler oneshot channel suffices to send result back once.", + "solution": "Replaced TitleTask::InProgress(JoinHandle) with InProgress(oneshot::Receiver>). Spawned tasks send via oneshot::Sender. On drop, clearing receivers suffices; no explicit abort required. Updated EndPayload handling and tests.", + "lesson_learned": "For simple one-shot result delivery from background tasks, prefer oneshot channels over JoinHandle for simpler cancellation and no panics on poll-after-completion.", + "commits": [ + "7c9e325" + ], + "constructs": [ + "TitleTask enum", + "TitleGenerationHandler logic" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_app/src/hooks/title_generation.rs (unit tests adjusted)" + ] + } + }, + { + "type": "refactoring", + "category": "Concurrency", + "title": "Move title generation into its own hook with concurrency-safe semantics", + "problem": "Title generation logic was embedded in Orchestrator and risked being spawned multiple times for the same conversation when Start/End events raced; resource management and cancellation were ad-hoc.", + "root_cause": "Orchestrator spawned a background JoinHandle and awaited it at end; concurrent lifecycle events could spawn duplicates or race.", + "solution": "Introduce TitleGenerationHandler backed by DashMap of per-conversation TitleTask state (InProgress, Awaiting, Done). Use Entry API to ensure at-most-one task spawn, atomically transition InProgress\u2192Awaiting, await handle to extract result, and store Done or remove on failure. Drop impl aborts in-progress handles. Tests added to assert concurrency semantics.", + "commit": [ + "0cf8736" + ], + "constructs": [ + "TitleGenerationHandler", + "TitleTask", + "handle (StartPayload)", + "handle (EndPayload)", + "Drop::drop" + ] + } + ], + "tests": { + "exercised_by": [ + "inline tests in crates/forge_app/src/hooks/title_generation.rs" + ], + "test_functions": [ + "test_start_skips_if_done", + "test_end_handles_task_failure", + "test_start_skips_if_awaiting", + "test_start_skips_if_title_exists", + "test_concurrent_start_spawns_only_one_task", + "test_start_skips_if_task_already_in_progress", + "test_end_sets_title_from_completed_task" + ], + "source_commits": [ + "0cf8736" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/transformers/strip_working_dir.rs": { + "short_description": "Transformer that strips working-dir prefixes from file paths in summaries", + "category": "SOURCE_CODE", + "description": "Provides a Transformer that removes a configured working directory prefix from file paths embedded in ContextSummary tool calls so summaries are more portable and smaller. It operates on FileRead/FileUpdate/FileRemove/Undo tool call path fields and includes extensive unit tests for POSIX and Windows path behaviors. The implementation relies on std::path::Path::strip_prefix so behavior is platform-specific.", + "key_constructs": [ + { + "name": "StripWorkingDir", + "type": "class", + "purpose": "Transformer struct holding the working_dir prefix to strip from summary tool paths" + }, + { + "name": "StripWorkingDir::new", + "type": "function", + "purpose": "Constructor to create a StripWorkingDir with a specified working directory" + }, + { + "name": "StripWorkingDir::strip_prefix", + "type": "function", + "purpose": "Helper that attempts to strip the configured working_dir from a given path string" + }, + { + "name": "Transformer::transform for StripWorkingDir", + "type": "function", + "purpose": "Core transformation logic that walks ContextSummary messages and rewrites tool call paths" + } + ], + "semantic_tags": [ + "transformer", + "path-normalization", + "summaries", + "platform-specific", + "testing" + ], + "handles_entities": [ + "ContextSummary", + "SummaryMessage", + "SummaryTool (FileRead, FileUpdate, FileRemove, Undo)" + ], + "key_behaviors": [ + "removes configured working directory prefixes from file paths in summaries", + "preserves non-file tool calls and content blocks", + "maintains platform-specific path handling (POSIX/Windows) behavior" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/auth/auth_context.rs": { + "short_description": "Types for managing various OAuth and API key auth flows", + "category": "SOURCE_CODE", + "description": "Defines request/response structs and enums representing API key, authorization-code, and device-code authentication flows and a generic AuthContext pairing requests with responses. It also provides helper constructors to build AuthContextResponse variants used by provider authentication logic. This module centralizes typed containers used during interactive authentication flows.", + "key_constructs": [ + { + "name": "URLParameters", + "type": "class", + "purpose": "Wrapper around HashMap of URLParam to URLParamValue for auth redirect parameters", + "callers": [ + { + "file": "crates/forge_domain/src/provider.rs", + "line": 230, + "context": "pub type ProviderTemplate = Provider>;" + }, + { + "file": "crates/forge_services/src/provider_service.rs", + "line": 234, + "context": "url: Template::::new(" + }, + { + "file": "crates/forge_services/src/provider_service.rs", + "line": 247, + "context": "Template::::new(\"https://api.openai.com/v1/models\")," + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 316, + "context": "url: forge_domain::Template::::new(p.url.as_str())," + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 319, + "context": "forge_domain::URLParameters," + }, + { + "file": "crates/forge_repo/src/provider/provider_repo.rs", + "line": 169, + "context": "forge_domain::Template::::new(model_url_template)," + }, + { + "file": "crates/forge_repo/src/provider/provider_repo.rs", + "line": 180, + "context": "url: forge_domain::Template::::new(&config.url)," + }, + { + "file": "crates/forge_repo/src/provider/provider_repo.rs", + "line": 404, + "context": "forge_domain::Template::::new(model_url_template)," + }, + { + "file": "crates/forge_repo/src/provider/provider_repo.rs", + "line": 415, + "context": "url: forge_domain::Template::::new(&config.url)," + } + ] + }, + { + "name": "ApiKeyRequest", + "type": "class", + "purpose": "Parameters required to request API key credentials", + "callers": [ + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 5, + "context": "ApiKey, ApiKeyRequest, AuthContextRequest, AuthContextResponse, AuthCredential, CodeRequest," + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 33, + "context": "Ok(AuthContextRequest::ApiKey(ApiKeyRequest {" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 440, + "context": "Ok(AuthContextRequest::ApiKey(ApiKeyRequest {" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 13, + "context": "API, AgentId, AnyProvider, ApiKeyRequest, AuthContextRequest, AuthContextResponse, ChatRequest," + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 2241, + "context": "request: &ApiKeyRequest," + } + ] + }, + { + "name": "ApiKeyResponse", + "type": "class", + "purpose": "Holds obtained API key and URL parameters" + }, + { + "name": "CodeRequest / CodeResponse", + "type": "class", + "purpose": "Represents authorization-code flow request and response values" + }, + { + "name": "DeviceCodeRequest / DeviceCodeResponse", + "type": "class", + "purpose": "Represents device-code flow request and its response placeholder" + }, + { + "name": "AuthContext", + "type": "class", + "purpose": "Generic pairing of a request and its response used in AuthContextResponse variants" + }, + { + "name": "AuthContextResponse", + "type": "class", + "purpose": "Enum encapsulating completed auth flows with helper constructors (api_key, device_code, code)", + "callers": [ + { + "file": "crates/forge_api/src/api.rs", + "line": 185, + "context": "context: AuthContextResponse," + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 331, + "context": "context: AuthContextResponse," + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 7, + "context": "AgentId, AnyProvider, Attachment, AuthContextRequest, AuthContextResponse, AuthMethod," + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 527, + "context": "context: AuthContextResponse," + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 1006, + "context": "context: AuthContextResponse," + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 5, + "context": "ApiKey, ApiKeyRequest, AuthContextRequest, AuthContextResponse, AuthCredential, CodeRequest," + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 42, + "context": "context_response: AuthContextResponse," + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 45, + "context": "AuthContextResponse::ApiKey(ctx) => Ok(AuthCredential::new_api_key(" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 154, + "context": "context_response: AuthContextResponse," + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 157, + "context": "AuthContextResponse::Code(ctx) => {" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 267, + "context": "context_response: AuthContextResponse," + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 270, + "context": "AuthContextResponse::DeviceCode(ctx) => {" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 369, + "context": "context_response: AuthContextResponse," + } + ] + } + ], + "semantic_tags": [ + "authentication", + "oauth", + "api-key", + "device-code", + "authorization" + ], + "handles_entities": [ + "ApiKey", + "AuthorizationCode", + "DeviceCode", + "OAuthConfig", + "URL parameters" + ], + "key_behaviors": [ + "models authentication requests and responses for multiple auth flows", + "constructs typed auth context responses used by provider login flows" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/transformer/reasoning_normalizer.rs": { + "short_description": "Transformer that strips assistant reasoning when model mismatch occurs", + "category": "SOURCE_CODE", + "description": "Implements a Transformer that walks assistant messages backwards to preserve reasoning only for the contiguous tail produced by the current model, and strips reasoning details from messages at or before the first model mismatch. It ensures the reasoning configuration remains present so the new model can still reason despite previous reasoning being removed. The file includes comprehensive tests exercising different model-change scenarios.", + "key_constructs": [ + { + "name": "ReasoningNormalizer", + "type": "class", + "purpose": "Transformer struct capturing the current model_id used to decide which reasoning to keep", + "callers": [ + { + "file": "crates/forge_app/src/orch.rs", + "line": 211, + "context": ".pipe(ReasoningNormalizer::new(model_id.clone()));" + }, + { + "file": "crates/forge_domain/src/transformer/mod.rs", + "line": 87, + "context": "pub use reasoning_normalizer::ReasoningNormalizer;" + } + ] + }, + { + "name": "ReasoningNormalizer::new", + "type": "function", + "purpose": "Constructor to create a normalizer for a given ModelId" + }, + { + "name": "Transformer::transform for ReasoningNormalizer", + "type": "function", + "purpose": "Core algorithm that finds the cutoff and clears reasoning_details and thought_signature on assistant messages before or at cutoff" + } + ], + "semantic_tags": [ + "transformer", + "reasoning", + "model-management", + "context-pruning", + "testing" + ], + "handles_entities": [ + "Context", + "ContextMessage", + "ReasoningConfig", + "ModelId" + ], + "key_behaviors": [ + "removes outdated assistant reasoning when model changes", + "preserves contiguous assistant tail produced by current model", + "preserves reasoning config so new model can still enable reasoning" + ], + "insights": [ + { + "type": "bug_fix", + "category": "State Management", + "title": "Normalize reasoning behavior to drop reasoning when model changes and keep when unchanged", + "problem": "Reasoning details (including signatures) were being preserved across model changes causing invalid signatures being used with a different model.", + "root_cause": "Normalizer didn't check model continuity and had heuristics that could keep reasoning even when model changed.", + "solution": "Make ReasoningNormalizer aware of the current model and drop reasoning (including details) when the model changed. Update snapshot tests to cover both unchanged and changed scenarios.", + "lesson_learned": "Transformers that operate on contextual metadata must be model-aware when metadata is model-scoped (signatures). Add targeted tests validating both changed/unchanged flows.", + "commits": [ + "2991aec" + ], + "constructs": [ + "ReasoningNormalizer::new", + "ReasoningNormalizer::transform" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_domain/src/transformer/snapshots/..." + ], + "source_commits": [ + "2991aec" + ] + } + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/transformer/transform_tool_calls.rs": { + "short_description": "Transformer that flattens tool-supported messages into standard context messages", + "category": "SOURCE_CODE", + "description": "Transforms a Context containing assistant tool-calls and tool result messages into a non-tool-supported format by removing tool call metadata from assistant messages and converting tool result outputs into user messages or images. It also clears the tools field from the context so downstream providers that don't support tools can consume a plain-context. The module includes tests and snapshot assertions for different output shapes.", + "key_constructs": [ + { + "name": "TransformToolCalls", + "type": "class", + "purpose": "Transformer struct optionally holding a ModelId used when creating new user messages from tool outputs", + "callers": [ + { + "file": "crates/forge_app/src/orch.rs", + "line": 205, + "context": ".pipe(TransformToolCalls::new().when(|_| !tool_supported))" + }, + { + "file": "crates/forge_domain/src/transformer/mod.rs", + "line": 90, + "context": "pub use transform_tool_calls::TransformToolCalls;" + }, + { + "file": "crates/forge_domain/src/transformer/mod.rs", + "line": 153, + "context": "let transform_tool_calls = TransformToolCalls::new();" + } + ] + }, + { + "name": "TransformToolCalls::new", + "type": "function", + "purpose": "Constructor that sets model to None" + }, + { + "name": "Transformer::transform for TransformToolCalls", + "type": "function", + "purpose": "Main logic that strips tool_calls from assistant messages, converts ToolResult entries into ContextMessage::user or Image messages, and clears tools" + } + ], + "semantic_tags": [ + "transformer", + "tools", + "context-conversion", + "tool-output", + "testing" + ], + "handles_entities": [ + "Context", + "ContextMessage", + "ToolResult", + "ToolOutput", + "ToolCallFull" + ], + "key_behaviors": [ + "converts tool results into user-visible messages", + "removes tool call metadata from assistant messages", + "clears tool definitions from context when flattened" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/zsh/mod.rs": { + "short_description": "Zsh integration helpers and script normalization utility", + "category": "SOURCE_CODE", + "description": "Exports zsh-related submodules for plugin/theme generation, diagnostics, rprompt rendering and styling utilities and provides normalize_script to sanitize embedded shell scripts across platforms by stripping CR characters. This module centralizes ZSH integration plumbing used by the CLI installer and prompt rendering utilities. It re-exports plugin and rprompt functions for external use.", + "key_constructs": [ + { + "name": "normalize_script", + "type": "function", + "purpose": "Normalizes shell script content by converting CRLF and CR line endings to LF so zsh can parse embedded scripts cross-platform", + "callers": [ + { + "file": "crates/forge_main/src/zsh/plugin.rs", + "line": 25, + "context": "let content = super::normalize_script(std::str::from_utf8(file.contents())?);" + }, + { + "file": "crates/forge_main/src/zsh/plugin.rs", + "line": 55, + "context": "super::normalize_script(include_str!(\"../../../../shell-plugin/forge.theme.zsh\"));" + }, + { + "file": "crates/forge_main/src/zsh/plugin.rs", + "line": 88, + "context": "let script_content = super::normalize_script(script_content);" + }, + { + "file": "crates/forge_main/src/zsh/plugin.rs", + "line": 256, + "context": "let forge_init_config = super::normalize_script(FORGE_INIT_CONFIG_RAW);" + } + ] + }, + { + "name": "generate_zsh_plugin / generate_zsh_theme / run_zsh_doctor / run_zsh_keyboard / setup_zsh_integration", + "type": "function", + "purpose": "Re-exported plugin management and setup functions implemented in submodules" + }, + { + "name": "ZshRPrompt", + "type": "class", + "purpose": "Re-exported type providing rprompt rendering (from rprompt module)" + } + ], + "semantic_tags": [ + "shell-integration", + "zsh", + "script-normalization", + "cli-integration", + "ui" + ], + "handles_entities": [], + "key_behaviors": [ + "normalizes embedded zsh scripts for cross-platform usage", + "provides functions to generate and install zsh plugin and theme", + "exposes rprompt rendering utilities" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Platform/Other", + "title": "Normalize scripts to strip CRLF for Windows zsh support", + "problem": "Zsh scripts embedded via include_str! could include CRLF on Windows (core.autocrlf), breaking zsh parsing.", + "root_cause": "Line ending differences on Windows checkouts.", + "solution": "Add normalize_script(content: &str) -> String that replaces CRLF and lone CR with LF and apply to included plugin/theme/setup files.", + "commits": [ + "fd60dc7" + ], + "constructs": [ + "normalize_script" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/provider/google.rs": { + "short_description": "Google provider client and repository for streaming chat and model listing", + "category": "SOURCE_CODE", + "description": "Implements a Google API client (generic over HttpInfra) to stream chat completions via SSE and to fetch available models, plus a GoogleResponseRepository that implements ChatRepository for use by the app. It handles header selection (API key header vs bearer), request serialization, SSE event handling, and integrates retry mapping. The file also contains comprehensive unit tests with a mock HTTP infra and mock server helpers.", + "key_constructs": [ + { + "name": "Google", + "type": "class", + "purpose": "HTTP client wrapper that knows how to call Google streaming chat endpoints and list models" + }, + { + "name": "Google::chat", + "type": "function", + "purpose": "Builds request URL for Google's streaming API, posts request as SSE and returns a stream of ChatCompletionMessage" + }, + { + "name": "Google::models", + "type": "function", + "purpose": "Fetches and parses model list from the configured models endpoint or returns hardcoded models" + }, + { + "name": "GoogleResponseRepository", + "type": "class", + "purpose": "Repository implementing ChatRepository by creating Google clients and delegating chat/models calls with retry mapping" + }, + { + "name": "GoogleResponseRepository::create_client", + "type": "function", + "purpose": "Constructs a Google client extracting credentials and deciding header strategy" + } + ], + "semantic_tags": [ + "provider", + "google", + "http", + "sse", + "models", + "retry" + ], + "handles_entities": [ + "Context", + "ChatCompletionMessage", + "Model", + "Provider", + "AuthDetails" + ], + "key_behaviors": [ + "streams chat completions from Google via SSE", + "fetches available models from Google provider endpoints", + "maps infra/HTTP errors into retryable domain errors" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/provider/mock_server.rs": { + "short_description": "HTTP mock server helpers for provider unit tests", + "category": "SOURCE_CODE", + "description": "Provides a small MockServer wrapper around mockito async server to create canned responses for models and SSE response streams used in provider tests. It also supplies normalize_ports to sanitize dynamic localhost ports in logs and snapshots. This utility centralizes test server setup used by provider client tests.", + "key_constructs": [ + { + "name": "MockServer", + "type": "class", + "purpose": "Async mock server wrapper used to register model and SSE mocks for tests", + "callers": [ + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 380, + "context": "use crate::provider::mock_server::{MockServer, normalize_ports};" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 563, + "context": "let mut fixture = MockServer::new().await;" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 577, + "context": "let mut fixture = MockServer::new().await;" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 595, + "context": "let mut fixture = MockServer::new().await;" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 613, + "context": "let mut fixture = MockServer::new().await;" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 643, + "context": "let mut fixture = MockServer::new().await;" + }, + { + "file": "crates/forge_repo/src/provider/google.rs", + "line": 237, + "context": "use crate::provider::mock_server::{MockServer, normalize_ports};" + }, + { + "file": "crates/forge_repo/src/provider/google.rs", + "line": 411, + "context": "let mut fixture = MockServer::new().await;" + }, + { + "file": "crates/forge_repo/src/provider/google.rs", + "line": 428, + "context": "let mut fixture = MockServer::new().await;" + }, + { + "file": "crates/forge_repo/src/provider/google.rs", + "line": 513, + "context": "let mut fixture = MockServer::new().await;" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 377, + "context": "use crate::provider::mock_server::{MockServer, normalize_ports};" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 571, + "context": "let mut fixture = MockServer::new().await;" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 588, + "context": "let mut fixture = MockServer::new().await;" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 606, + "context": "let mut fixture = MockServer::new().await;" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 625, + "context": "let mut fixture = MockServer::new().await;" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 440, + "context": "use crate::provider::mock_server::MockServer;" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 1267, + "context": "let mut fixture = MockServer::new().await;" + } + ] + }, + { + "name": "MockServer::mock_models", + "type": "function", + "purpose": "Creates a mock GET /models endpoint returning provided JSON and status" + }, + { + "name": "MockServer::mock_responses_stream", + "type": "function", + "purpose": "Creates a mock SSE POST stream at /v1/responses with provided events and status" + }, + { + "name": "normalize_ports", + "type": "function", + "purpose": "Replaces dynamic localhost ports in strings with a stable placeholder for snapshot testing", + "callers": [ + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 377, + "context": "use crate::provider::mock_server::{MockServer, normalize_ports};" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 600, + "context": "insta::assert_snapshot!(normalize_ports(format!(\"{:#?}\", actual.unwrap_err())));" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 618, + "context": "insta::assert_snapshot!(normalize_ports(format!(\"{:#?}\", actual.unwrap_err())));" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 380, + "context": "use crate::provider::mock_server::{MockServer, normalize_ports};" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 589, + "context": "insta::assert_snapshot!(normalize_ports(format!(\"{:#?}\", actual.unwrap_err())));" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 607, + "context": "insta::assert_snapshot!(normalize_ports(format!(\"{:#?}\", actual.unwrap_err())));" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 655, + "context": "insta::assert_snapshot!(normalize_ports(format!(\"{:#?}\", actual.unwrap_err())));" + }, + { + "file": "crates/forge_repo/src/provider/google.rs", + "line": 237, + "context": "use crate::provider::mock_server::{MockServer, normalize_ports};" + }, + { + "file": "crates/forge_repo/src/provider/google.rs", + "line": 440, + "context": "insta::assert_snapshot!(normalize_ports(format!(\"{:#?}\", actual.unwrap_err())));" + } + ] + } + ], + "semantic_tags": [ + "testing", + "mock-server", + "http", + "sse", + "snapshots" + ], + "handles_entities": [], + "key_behaviors": [ + "creates mocked HTTP endpoints for provider tests", + "normalizes dynamic ports in test output for stable assertions" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/tool_services/fs_search.rs": { + "short_description": "Filesystem search service using grep crates with advanced options", + "category": "SOURCE_CODE", + "description": "Implements a FsSearchService backed by grep-regex and grep-searcher that supports regex, file-type filtering, globbing, output modes (files, content, count), context lines, and multiline matching. It orchestrates walking directories, filtering files, and producing Match/SearchResult structures and includes a custom ContextSink to capture match context lines. The module contains robust tests to validate behavior across filters, contexts, binary-skipping, and line numbering.", + "key_constructs": [ + { + "name": "ForgeFsSearch", + "type": "class", + "purpose": "Service wrapper that implements FsSearchService using provided infra for file IO and walking" + }, + { + "name": "ForgeFsSearch::search", + "type": "function", + "purpose": "Entrypoint that validates path, builds matcher, finds files, and routes to the selected output mode" + }, + { + "name": "ContextSink", + "type": "class", + "purpose": "Custom grep-searcher Sink implementation that accumulates before/after context and produces MatchResult::ContextMatch" + }, + { + "name": "build_matcher / get_matching_files / search_files_with_matches / search_count / search_content", + "type": "function", + "purpose": "Helper functions that construct the regex matcher, resolve files to scan, and perform different search output modes" + } + ], + "semantic_tags": [ + "search", + "filesystem", + "regex", + "grep", + "context-lines", + "file-walking" + ], + "handles_entities": [ + "FSSearch", + "SearchResult", + "Match", + "MatchResult", + "Walker" + ], + "key_behaviors": [ + "searches repository files with regex and glob filters", + "returns matches as file hits, counts, or contextual snippets", + "skips binary files and respects file-type filters" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_api/src/lib.rs": { + "short_description": "API crate re-exporting application DTOs and domain types", + "category": "SOURCE_CODE", + "description": "Small facade library that re-exports the internal API modules and commonly used types from forge_app, forge_config, and forge_domain to present a unified public surface for the HTTP/API layer. It centralizes imports so consumers can use forge_api::* to get DTOs, Plan/UsageInfo, and domain models. The file itself is primarily re-exports with minimal logic.", + "key_constructs": [ + { + "name": "pub use api::* / pub use forge_api::* / pub use forge_app::dto::*", + "type": "function", + "purpose": "Re-export statements exposing API modules and DTOs to consumers" + } + ], + "semantic_tags": [ + "api", + "re-exports", + "dto", + "facade" + ], + "handles_entities": [], + "key_behaviors": [ + "exposes API DTOs and domain types for external consumers", + "simplifies imports for API server code" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/apply_tunable_parameters.rs": { + "short_description": "Applies agent-configured tunable parameters into a conversation context", + "category": "SOURCE_CODE", + "description": "Contains ApplyTunableParameters which copies tunable settings from an Agent (temperature, top_p, top_k, max_tokens, reasoning) into a Conversation's Context and attaches tool definitions. This ensures conversations run with agent-specific defaults and tool schemas. The module includes tests verifying parameter propagation.", + "key_constructs": [ + { + "name": "ApplyTunableParameters", + "type": "class", + "purpose": "Struct that holds Agent and tool definitions and applies them to a Conversation", + "callers": [ + { + "file": "crates/forge_app/src/app.rs", + "line": 9, + "context": "use crate::apply_tunable_parameters::ApplyTunableParameters;" + }, + { + "file": "crates/forge_app/src/app.rs", + "line": 138, + "context": "let conversation = ApplyTunableParameters::new(agent.clone(), tool_definitions.clone())" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_runner.rs", + "line": 14, + "context": "use crate::apply_tunable_parameters::ApplyTunableParameters;" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_runner.rs", + "line": 123, + "context": "ApplyTunableParameters::new(agent.clone(), system_tools.clone()).apply(conversation);" + } + ] + }, + { + "name": "ApplyTunableParameters::new", + "type": "function", + "purpose": "Const constructor to build an applicator with agent and tool definitions" + }, + { + "name": "ApplyTunableParameters::apply", + "type": "function", + "purpose": "Copies tunable fields from Agent into Conversation Context and attaches tools" + } + ], + "semantic_tags": [ + "agent", + "configuration", + "conversation", + "context", + "parameters" + ], + "handles_entities": [ + "Agent", + "Conversation", + "Context", + "ToolDefinition" + ], + "key_behaviors": [ + "applies agent tuning (temperature, top_p, top_k, max_tokens, reasoning) to conversations", + "attaches tool definitions to conversation contexts" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/mcp_executor.rs": { + "short_description": "Executor that forwards MCP tool calls to registered MCP services", + "category": "SOURCE_CODE", + "description": "Provides McpExecutor which sends a formatted tool input notification to the UI/context and delegates execution to an McpService implementation. It also exposes a contains_tool helper that queries registered MCP servers to determine if a named tool exists. This isolates MCP execution concerns behind a simple API used by agents/tools.", + "key_constructs": [ + { + "name": "McpExecutor", + "type": "class", + "purpose": "Executor wrapper that holds services implementing McpService for executing MCP tool calls" + }, + { + "name": "McpExecutor::execute", + "type": "function", + "purpose": "Sends a UI title and delegates an MCP tool call to services.execute_mcp" + }, + { + "name": "McpExecutor::contains_tool", + "type": "function", + "purpose": "Checks registry of MCP servers to see if a tool with the given name exists" + } + ], + "semantic_tags": [ + "mcp", + "tool-execution", + "executor", + "services", + "integration" + ], + "handles_entities": [ + "ToolCallFull", + "ToolCallContext", + "ToolOutput", + "MCP servers / tool registry" + ], + "key_behaviors": [ + "executes MCP tool calls via configured services", + "reports whether a named MCP tool is available" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/retry.rs": { + "short_description": "Retry helper that applies backoff strategy for retryable errors", + "category": "SOURCE_CODE", + "description": "Provides retry_with_config which uses backon exponential backoff to retry async operations according to RetryConfig, optionally sending notifications on attempts. It filters retries to only Error::Retryable domain errors using should_retry. This centralizes retry logic and backoff parameters for network and provider calls.", + "key_constructs": [ + { + "name": "retry_with_config", + "type": "function", + "purpose": "Async helper that retries a provided operation using Exponential backoff configured by RetryConfig and optional notify callback", + "callers": [ + { + "file": "crates/forge_app/src/orch.rs", + "line": 271, + "context": "let message = crate::retry::retry_with_config(" + }, + { + "file": "crates/forge_app/src/git_app.rs", + "line": 216, + "context": "crate::retry::retry_with_config(" + } + ] + }, + { + "name": "should_retry", + "type": "function", + "purpose": "Predicate that permits retries only for domain Error::Retryable" + } + ], + "semantic_tags": [ + "retry", + "backoff", + "resilience", + "error-handling" + ], + "handles_entities": [ + "RetryConfig", + "Error (domain error)" + ], + "key_behaviors": [ + "retries asynchronous operations with exponential backoff", + "only retries when error is marked retryable" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_config/src/error.rs": { + "short_description": "Defines configuration-related error types", + "category": "SOURCE_CODE", + "description": "Declares the Error enum used by the forge_config crate to represent config parsing, serialization, I/O and JSON errors with conversions from underlying error types. This centralizes configuration error handling and provides descriptive messages for callers. It's a small error definition file relying on thiserror.", + "key_constructs": [ + { + "name": "Error", + "type": "class", + "purpose": "Enum with variants for Config, Serialization, Io, and Serde errors used across forge_config", + "callers": [ + { + "file": "crates/forge_config/src/lib.rs", + "line": 19, + "context": "pub use error::Error;" + }, + { + "file": "crates/forge_config/src/lib.rs", + "line": 29, + "context": "pub type Result = std::result::Result;" + } + ] + } + ], + "semantic_tags": [ + "error-handling", + "configuration", + "serialization", + "io" + ], + "handles_entities": [], + "key_behaviors": [ + "represents configuration load/serialize-related errors", + "converts underlying library errors into a unified type" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_config/src/reasoning.rs": { + "short_description": "Configuration types for model reasoning behavior", + "category": "SOURCE_CODE", + "description": "Defines ReasoningConfig (effort, max_tokens, exclude, enabled) and the Effort enum controlling model reasoning intensity and visibility. The types include serde/schemars derives, setters, and fake dummies for config generation and validation. This file encapsulates runtime configuration options for enabling and tuning model internal reasoning.", + "key_constructs": [ + { + "name": "ReasoningConfig", + "type": "class", + "purpose": "Holds reasoning configuration options such as effort, max_tokens, exclude and enabled", + "callers": [ + { + "file": "crates/forge_app/src/agent.rs", + "line": 176, + "context": "use forge_config::{Effort as ConfigEffort, ReasoningConfig as ConfigReasoningConfig};" + }, + { + "file": "crates/forge_config/src/config.rs", + "line": 12, + "context": "AutoDumpFormat, Compact, Decimal, HttpConfig, ModelConfig, ReasoningConfig, RetryConfig, Update," + }, + { + "file": "crates/forge_config/src/config.rs", + "line": 259, + "context": "pub reasoning: Option," + }, + { + "file": "crates/forge_infra/src/env.rs", + "line": 70, + "context": ".get_or_insert_with(forge_config::ReasoningConfig::default);" + } + ] + }, + { + "name": "Effort", + "type": "class", + "purpose": "Enum enumerating effort levels (None, Minimal, Low, Medium, High, XHigh, Max)", + "callers": [ + { + "file": "crates/forge_app/src/agent.rs", + "line": 149, + "context": "use forge_config::Effort as ConfigEffort;" + }, + { + "file": "crates/forge_app/src/agent.rs", + "line": 176, + "context": "use forge_config::{Effort as ConfigEffort, ReasoningConfig as ConfigReasoningConfig};" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 103, + "context": "forge_config::Effort::None => Effort::None," + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 104, + "context": "forge_config::Effort::Minimal => Effort::Minimal," + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 105, + "context": "forge_config::Effort::Low => Effort::Low," + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 106, + "context": "forge_config::Effort::Medium => Effort::Medium," + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 107, + "context": "forge_config::Effort::High => Effort::High," + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 108, + "context": "forge_config::Effort::XHigh => Effort::XHigh," + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 109, + "context": "forge_config::Effort::Max => Effort::Max," + }, + { + "file": "crates/forge_infra/src/env.rs", + "line": 60, + "context": "forge_domain::Effort::None => forge_config::Effort::None," + }, + { + "file": "crates/forge_infra/src/env.rs", + "line": 61, + "context": "forge_domain::Effort::Minimal => forge_config::Effort::Minimal," + }, + { + "file": "crates/forge_infra/src/env.rs", + "line": 62, + "context": "forge_domain::Effort::Low => forge_config::Effort::Low," + }, + { + "file": "crates/forge_infra/src/env.rs", + "line": 63, + "context": "forge_domain::Effort::Medium => forge_config::Effort::Medium," + }, + { + "file": "crates/forge_infra/src/env.rs", + "line": 64, + "context": "forge_domain::Effort::High => forge_config::Effort::High," + }, + { + "file": "crates/forge_infra/src/env.rs", + "line": 65, + "context": "forge_domain::Effort::XHigh => forge_config::Effort::XHigh," + }, + { + "file": "crates/forge_infra/src/env.rs", + "line": 66, + "context": "forge_domain::Effort::Max => forge_config::Effort::Max," + } + ] + } + ], + "semantic_tags": [ + "configuration", + "reasoning", + "model-tuning", + "serde", + "schemars" + ], + "handles_entities": [ + "ReasoningConfig", + "Effort" + ], + "key_behaviors": [ + "expresses reasoning tuning options for models", + "provides serialized schema-compatible types for config files" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_display/src/lib.rs": { + "short_description": "Display formats re-exports for diff, grep and markdown", + "category": "SOURCE_CODE", + "description": "Top-level display crate module that organizes and re-exports formatting utilities for code diffs, grep output and markdown rendering used by the CLI/TUI. It exposes DiffFormat, GrepFormat and MarkdownFormat types for consumers. The module is a small aggregator to keep display-related types discoverable.", + "key_constructs": [ + { + "name": "DiffFormat", + "type": "function", + "purpose": "Re-exported type for formatting diffs (defined in diff module)" + }, + { + "name": "GrepFormat", + "type": "function", + "purpose": "Re-exported type for formatting grep/search results (defined in grep module)" + }, + { + "name": "MarkdownFormat", + "type": "function", + "purpose": "Re-exported type for rendering markdown (defined in markdown module)" + } + ], + "semantic_tags": [ + "display", + "formatting", + "diff", + "grep", + "markdown" + ], + "handles_entities": [], + "key_behaviors": [ + "provides formatting utilities for diffs, search output, and markdown rendering", + "re-exports display types for consumer crates" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/command.rs": { + "short_description": "Domain struct for user-defined markdown-backed commands", + "category": "SOURCE_CODE", + "description": "Defines a Command value object representing a user-defined slash command loaded from a Markdown file with YAML frontmatter. It carries name, description and optional prompt template used by the UI and agents to surface custom commands.", + "key_constructs": [ + { + "name": "Command", + "type": "class", + "purpose": "Holds command metadata (name, description) and optional prompt body parsed from Markdown frontmatter.", + "callers": [ + { + "file": "crates/forge_api/src/api.rs", + "line": 166, + "context": "async fn get_commands(&self) -> Result>;" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 303, + "context": "async fn get_commands(&self) -> Result> {" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 484, + "context": "async fn get_commands(&self) -> anyhow::Result>;" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 934, + "context": "async fn get_commands(&self) -> anyhow::Result> {" + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 122, + "context": "pub fn register_all(&self, commands: Vec) {" + }, + { + "file": "crates/forge_services/src/command.rs", + "line": 5, + "context": "use forge_app::domain::Command;" + }, + { + "file": "crates/forge_services/src/command.rs", + "line": 18, + "context": "cache: tokio::sync::OnceCell>," + }, + { + "file": "crates/forge_services/src/command.rs", + "line": 27, + "context": "fn init_default(&self) -> anyhow::Result> {" + }, + { + "file": "crates/forge_services/src/command.rs", + "line": 43, + "context": "async fn get_commands(&self) -> anyhow::Result> {" + }, + { + "file": "crates/forge_services/src/command.rs", + "line": 52, + "context": "async fn cache_or_init(&self) -> anyhow::Result> {" + }, + { + "file": "crates/forge_services/src/command.rs", + "line": 56, + "context": "async fn init(&self) -> anyhow::Result> {" + }, + { + "file": "crates/forge_services/src/command.rs", + "line": 76, + "context": "async fn init_command_dir(&self, dir: &std::path::Path) -> anyhow::Result> {" + }, + { + "file": "crates/forge_services/src/command.rs", + "line": 103, + "context": "fn resolve_command_conflicts(commands: Vec) -> Vec {" + }, + { + "file": "crates/forge_services/src/command.rs", + "line": 103, + "context": "fn resolve_command_conflicts(commands: Vec) -> Vec {" + }, + { + "file": "crates/forge_services/src/command.rs", + "line": 105, + "context": "let mut command_map: HashMap = HashMap::new();" + }, + { + "file": "crates/forge_services/src/command.rs", + "line": 118, + "context": ") -> anyhow::Result>" + }, + { + "file": "crates/forge_services/src/command.rs", + "line": 135, + "context": "fn parse_command_file(content: &str) -> Result {" + }, + { + "file": "crates/forge_services/src/command.rs", + "line": 138, + "context": "let result = gray_matter.parse::(content)?;" + }, + { + "file": "crates/forge_services/src/command.rs", + "line": 231, + "context": "Command::default().name(\"command1\").description(\"Command 1\")," + }, + { + "file": "crates/forge_services/src/command.rs", + "line": 232, + "context": "Command::default().name(\"command2\").description(\"Command 2\")," + } + ] + } + ], + "semantic_tags": [ + "commands", + "markdown", + "serde", + "domain-model", + "templating" + ], + "handles_entities": [ + "Command" + ], + "key_behaviors": [ + "represents user-defined slash commands parsed from markdown", + "serializes/deserializes command metadata for discovery and UI" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/console.rs": { + "short_description": "Trait for synchronized console output writers", + "category": "SOURCE_CODE", + "description": "Declares the ConsoleWriter trait which provides thread-safe, synchronized write and flush operations for primary and error output channels. Implementors supply concrete I/O behavior used by CLI/TUI components to emit user and error streams safely across threads.", + "key_constructs": [ + { + "name": "ConsoleWriter", + "type": "class", + "purpose": "Trait specifying write/flush operations for primary and error outputs, requiring Send + Sync for thread-safe use." + } + ], + "semantic_tags": [ + "io", + "output", + "thread-safety", + "console", + "logging" + ], + "handles_entities": [], + "key_behaviors": [ + "provides synchronized writing and flushing of primary and error streams" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/data_gen.rs": { + "short_description": "Parameters for LLM-driven data generation jobs", + "category": "SOURCE_CODE", + "description": "Defines DataGenerationParameters which encapsulate configuration for bulk data generation tasks (input JSONL, schema, optional prompts, concurrency). It centralizes parameters passed into generation tooling and can be serialized/deserialized for tooling integrations.", + "key_constructs": [ + { + "name": "DataGenerationParameters", + "type": "class", + "purpose": "Holds file paths and runtime options (concurrency) used by data generation pipelines.", + "callers": [ + { + "file": "crates/forge_api/src/api.rs", + "line": 239, + "context": "data_parameters: DataGenerationParameters," + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 396, + "context": "data_parameters: DataGenerationParameters," + }, + { + "file": "crates/forge_app/src/data_gen.rs", + "line": 6, + "context": "Context, ContextMessage, DataGenerationParameters, ResultStreamExt, Template, ToolDefinition," + }, + { + "file": "crates/forge_app/src/data_gen.rs", + "line": 59, + "context": "params: DataGenerationParameters," + }, + { + "file": "crates/forge_app/src/data_gen.rs", + "line": 85, + "context": "params: DataGenerationParameters," + }, + { + "file": "crates/forge_main/src/cli.rs", + "line": 759, + "context": "impl From for forge_domain::DataGenerationParameters {" + }, + { + "file": "crates/forge_main/src/cli.rs", + "line": 804, + "context": "let actual: forge_domain::DataGenerationParameters = fixture.into();" + }, + { + "file": "crates/forge_main/src/cli.rs", + "line": 805, + "context": "let expected = forge_domain::DataGenerationParameters {" + } + ] + } + ], + "semantic_tags": [ + "data-generation", + "concurrency", + "templates", + "schema", + "configuration" + ], + "handles_entities": [ + "DataGenerationParameters" + ], + "key_behaviors": [ + "captures configuration for generating dataset items via LLMs", + "serializes/deserializes generation job parameters" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/event.rs": { + "short_description": "Event and command structures used for CLI event dispatch", + "category": "SOURCE_CODE", + "description": "Implements Event-related domain types for representing user prompts and commands, their contextual metadata, attachments, and helpers to construct events with ids/timestamps. Includes conversion helpers and EventContext utilities to mark contexts as feedback or task; used for CLI dispatch and tool integration.", + "key_constructs": [ + { + "name": "Event", + "type": "class", + "purpose": "Primary event container holding id, optional value (text or command), timestamp, attachments and additional context.", + "callers": [ + { + "file": "crates/forge_domain/src/chat_request.rs", + "line": 4, + "context": "use crate::{ConversationId, Event};" + }, + { + "file": "crates/forge_domain/src/chat_request.rs", + "line": 9, + "context": "pub event: Event," + }, + { + "file": "crates/forge_domain/src/chat_request.rs", + "line": 14, + "context": "pub fn new(content: Event, conversation_id: ConversationId) -> Self {" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_setup.rs", + "line": 9, + "context": "Event, File, MessageEntry, ModelId, ProviderId, Role, Template, ToolCallFull, ToolDefinition," + }, + { + "file": "crates/forge_app/src/orch_spec/orch_setup.rs", + "line": 86, + "context": "self.run_event(Event::new(event.as_ref())).await" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_setup.rs", + "line": 89, + "context": "pub async fn run_event(&mut self, event: impl Into) -> anyhow::Result<()> {" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_runner.rs", + "line": 6, + "context": "Event, Hook, ProviderId, ToolCallFull, ToolErrorTracker, ToolResult," + }, + { + "file": "crates/forge_app/src/orch_spec/orch_runner.rs", + "line": 76, + "context": "pub async fn run(setup: &mut TestContext, event: Event) -> anyhow::Result<()> {" + }, + { + "file": "crates/forge_app/src/user_prompt.rs", + "line": 15, + "context": "event: Event," + }, + { + "file": "crates/forge_app/src/user_prompt.rs", + "line": 24, + "context": "event: Event," + }, + { + "file": "crates/forge_app/src/user_prompt.rs", + "line": 276, + "context": "fn fixture_generator(agent: Agent, event: Event) -> UserPromptGenerator {" + } + ] + }, + { + "name": "EventValue", + "type": "class", + "purpose": "Enum describing the payload of an Event as either Text (UserPrompt) or Command (UserCommand).", + "callers": [ + { + "file": "crates/forge_domain/src/context.rs", + "line": 21, + "context": "Attachment, AttachmentContent, ConversationId, EventValue, Image, MessagePhase, ModelId," + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 80, + "context": "pub fn as_value(&self) -> Option<&EventValue> {" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 304, + "context": "pub raw_content: Option," + }, + { + "file": "crates/forge_app/src/hooks/title_generation.rs", + "line": 166, + "context": "Agent, ChatCompletionMessage, Context, ContextMessage, Conversation, EventValue, ModelId," + }, + { + "file": "crates/forge_app/src/hooks/title_generation.rs", + "line": 204, + "context": "TextMessage::new(Role::User, message).raw_content(EventValue::text(message))," + }, + { + "file": "crates/forge_main/src/info.rs", + "line": 761, + "context": "use forge_api::{Environment, EventValue};" + }, + { + "file": "crates/forge_main/src/info.rs", + "line": 1035, + "context": ".raw_content(EventValue::text(\"First user message\"))," + } + ] + }, + { + "name": "UserCommand", + "type": "class", + "purpose": "Represents a parsed CLI/user command with name, templated value and string parameters.", + "callers": [ + { + "file": "crates/forge_main/src/model.rs", + "line": 4, + "context": "use forge_domain::UserCommand;" + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 321, + "context": "Ok(SlashCommand::Custom(UserCommand::new(" + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 407, + "context": "Custom(UserCommand)," + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 22, + "context": "AuthMethod, ChatResponseContent, ConsoleWriter, ContextMessage, Role, TitleFormat, UserCommand," + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 2921, + "context": "let event: UserCommand = serde_json::from_str(&json)?;" + } + ] + }, + { + "name": "EventContext", + "type": "class", + "purpose": "Holds contextual metadata and variables related to an event and provides helpers to convert to feedback or task contexts.", + "callers": [ + { + "file": "crates/forge_app/src/user_prompt.rs", + "line": 153, + "context": "let mut event_context = EventContext::new(EventContextValue::new(user_input))" + }, + { + "file": "crates/forge_repo/src/agent_definition.rs", + "line": 3, + "context": "Agent, AgentId, Compact, EventContext, MaxTokens, ModelId, ProviderId, ReasoningConfig," + }, + { + "file": "crates/forge_repo/src/agent_definition.rs", + "line": 50, + "context": "pub user_prompt: Option>," + }, + { + "file": "crates/forge_domain/src/agent.rs", + "line": 11, + "context": "Compact, Error, EventContext, MaxTokens, ModelId, ProviderId, Result, SystemContext," + }, + { + "file": "crates/forge_domain/src/agent.rs", + "line": 134, + "context": "pub user_prompt: Option>," + } + ] + }, + { + "name": "UserPrompt", + "type": "class", + "purpose": "Transparent wrapper around a user prompt string used in EventValue::Text.", + "callers": [ + { + "file": "crates/forge_api/src/api.rs", + "line": 172, + "context": "async fn generate_command(&self, prompt: UserPrompt) -> Result;" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 311, + "context": "async fn generate_command(&self, prompt: UserPrompt) -> Result {" + }, + { + "file": "crates/forge_app/src/title_generator.rs", + "line": 6, + "context": "ReasoningConfig, ResponseFormat, ResultStreamExt, UserPrompt," + }, + { + "file": "crates/forge_app/src/title_generator.rs", + "line": 29, + "context": "user_prompt: UserPrompt," + }, + { + "file": "crates/forge_app/src/title_generator.rs", + "line": 41, + "context": "user_prompt: UserPrompt," + }, + { + "file": "crates/forge_app/src/command_generator.rs", + "line": 36, + "context": "pub async fn generate(&self, prompt: UserPrompt) -> Result {" + }, + { + "file": "crates/forge_app/src/command_generator.rs", + "line": 291, + "context": ".generate(UserPrompt::from(\"list all files\".to_string()))" + }, + { + "file": "crates/forge_app/src/command_generator.rs", + "line": 306, + "context": ".generate(UserPrompt::from(\"show current directory\".to_string()))" + }, + { + "file": "crates/forge_app/src/command_generator.rs", + "line": 321, + "context": ".generate(UserPrompt::from(\"do something\".to_string()))" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 15, + "context": "Event, InterruptionReason, ModelId, Provider, ProviderId, TextMessage, UserPrompt," + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 574, + "context": "self.on_cmd(UserPrompt::from(prompt)).await?;" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 1741, + "context": "async fn on_cmd(&mut self, prompt: UserPrompt) -> anyhow::Result<()> {" + } + ] + } + ], + "semantic_tags": [ + "events", + "cli", + "serialization", + "context", + "templating" + ], + "handles_entities": [ + "Event", + "UserCommand", + "EventContext", + "UserPrompt" + ], + "key_behaviors": [ + "constructs timestamped events from user prompts and commands", + "converts contexts into feedback or task events", + "serializes/deserializes CLI events for dispatching" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/fuzzy_search.rs": { + "short_description": "Represents ranges matched by fuzzy search", + "category": "SOURCE_CODE", + "description": "Defines SearchMatch, a small struct recording start and end line numbers for results returned by fuzzy search. It provides a compact serializable representation of where matches occur in files.", + "key_constructs": [ + { + "name": "SearchMatch", + "type": "class", + "purpose": "Holds 0-based start and end line indices for a fuzzy search hit.", + "callers": [ + { + "file": "crates/forge_services/src/tool_services/fs_patch.rs", + "line": 7, + "context": "use forge_domain::{FuzzySearchRepository, SearchMatch, SnapshotRepository, ValidationRepository};" + }, + { + "file": "crates/forge_services/src/tool_services/fs_patch.rs", + "line": 62, + "context": "fn from_search_match(source: &str, search_match: &SearchMatch) -> Self {" + }, + { + "file": "crates/forge_services/src/tool_services/fs_patch.rs", + "line": 499, + "context": "use forge_domain::SearchMatch;" + }, + { + "file": "crates/forge_services/src/tool_services/fs_patch.rs", + "line": 506, + "context": "let search_match = SearchMatch { start_line: 1, end_line: 1 };" + }, + { + "file": "crates/forge_services/src/tool_services/fs_patch.rs", + "line": 519, + "context": "let search_match = SearchMatch { start_line: 1, end_line: 2 };" + }, + { + "file": "crates/forge_services/src/tool_services/fs_patch.rs", + "line": 532, + "context": "let search_match = SearchMatch { start_line: 0, end_line: 0 };" + }, + { + "file": "crates/forge_services/src/tool_services/fs_patch.rs", + "line": 545, + "context": "let search_match = SearchMatch { start_line: 2, end_line: 2 };" + }, + { + "file": "crates/forge_services/src/tool_services/fs_patch.rs", + "line": 558, + "context": "let search_match = SearchMatch { start_line: 2, end_line: 2 };" + }, + { + "file": "crates/forge_services/src/tool_services/fs_patch.rs", + "line": 571, + "context": "let search_match = SearchMatch { start_line: 0, end_line: 2 };" + }, + { + "file": "crates/forge_services/src/tool_services/fs_patch.rs", + "line": 584, + "context": "let search_match = SearchMatch { start_line: 0, end_line: 0 };" + }, + { + "file": "crates/forge_services/src/tool_services/fs_patch.rs", + "line": 597, + "context": "let search_match = SearchMatch { start_line: 0, end_line: 0 };" + }, + { + "file": "crates/forge_services/src/tool_services/fs_patch.rs", + "line": 1043, + "context": "let search_match = SearchMatch { start_line: 1, end_line: 1 };" + }, + { + "file": "crates/forge_services/src/tool_services/fs_patch.rs", + "line": 1056, + "context": "let search_match = SearchMatch { start_line: 1, end_line: 2 };" + }, + { + "file": "crates/forge_services/src/tool_services/fs_patch.rs", + "line": 1069, + "context": "let search_match = SearchMatch { start_line: 0, end_line: 0 };" + }, + { + "file": "crates/forge_services/src/tool_services/fs_patch.rs", + "line": 1082, + "context": "let search_match = SearchMatch { start_line: 0, end_line: 2 };" + }, + { + "file": "crates/forge_services/src/tool_services/fs_patch.rs", + "line": 1150, + "context": "let search_match = SearchMatch { start_line: 100, end_line: 200 };" + }, + { + "file": "crates/forge_services/src/tool_services/fs_patch.rs", + "line": 1161, + "context": "let search_match = SearchMatch { start_line: 1, end_line: 100 };" + }, + { + "file": "crates/forge_services/src/tool_services/fs_patch.rs", + "line": 1174, + "context": "let search_match = SearchMatch { start_line: 2, end_line: 2 };" + }, + { + "file": "crates/forge_services/src/tool_services/fs_patch.rs", + "line": 1185, + "context": "let search_match = SearchMatch { start_line: 1, end_line: 1 };" + }, + { + "file": "crates/forge_services/src/tool_services/fs_patch.rs", + "line": 1196, + "context": "let search_match = SearchMatch { start_line: 0, end_line: 2 };" + } + ] + } + ], + "semantic_tags": [ + "search", + "fuzzy-search", + "matching", + "lines", + "serialization" + ], + "handles_entities": [ + "SearchMatch" + ], + "key_behaviors": [ + "represents locations of fuzzy search hits in files" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/group_by_key.rs": { + "short_description": "Utility trait to group collections by a computed key", + "category": "SOURCE_CODE", + "description": "Provides a GroupByKey trait and a Vec implementation that groups elements by a key function into a HashMap. The implementation sorts grouped items and returns owned collections; it's a convenience utility used across domain code.", + "key_constructs": [ + { + "name": "GroupByKey", + "type": "class", + "purpose": "Trait declaring grouping behavior for collections by a computed key." + }, + { + "name": "impl GroupByKey for Vec::group_by_key", + "type": "function", + "purpose": "Groups a vector of items by applying a key function and returns a HashMap of key to sorted Vec." + } + ], + "semantic_tags": [ + "collection", + "grouping", + "utilities", + "sorting", + "hashmap" + ], + "handles_entities": [], + "key_behaviors": [ + "groups items by extracted keys and sorts groups" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/mcp.rs": { + "short_description": "Model Context Protocol (MCP) server configuration types", + "category": "SOURCE_CODE", + "description": "Implements types and helpers for configuring MCP servers (stdio or HTTP) and a top-level McpConfig mapping server names to configs. It includes constructors, helper methods (is_disabled, server_type) and a deterministic cache_key for config hashing, along with extensive tests validating deserialization and hashing behavior.", + "key_constructs": [ + { + "name": "McpServerConfig", + "type": "class", + "purpose": "Enum representing either a stdio-based or HTTP-based MCP server configuration." + }, + { + "name": "McpStdioServer", + "type": "class", + "purpose": "Configuration for a stdio-backed MCP server (command, args, env, timeout, disable)." + }, + { + "name": "McpHttpServer", + "type": "class", + "purpose": "Configuration for an HTTP MCP server (url, headers, timeout, disable).", + "callers": [ + { + "file": "crates/forge_infra/src/mcp_client.rs", + "line": 9, + "context": "use forge_domain::{Image, McpHttpServer, McpServerConfig, ToolDefinition, ToolName, ToolOutput};" + }, + { + "file": "crates/forge_infra/src/mcp_client.rs", + "line": 154, + "context": "fn reqwest_client(&self, config: &McpHttpServer) -> anyhow::Result {" + }, + { + "file": "crates/forge_infra/src/mcp_client.rs", + "line": 269, + "context": "mut http: McpHttpServer," + }, + { + "file": "crates/forge_infra/src/mcp_client.rs", + "line": 271, + "context": ") -> anyhow::Result {" + }, + { + "file": "crates/forge_infra/src/mcp_client.rs", + "line": 301, + "context": "let http = McpHttpServer {" + }, + { + "file": "crates/forge_infra/src/mcp_client.rs", + "line": 335, + "context": "let http = McpHttpServer {" + }, + { + "file": "crates/forge_infra/src/mcp_client.rs", + "line": 358, + "context": "let http = McpHttpServer {" + } + ] + }, + { + "name": "McpConfig", + "type": "class", + "purpose": "Top-level container mapping ServerName to McpServerConfig and providing a cache_key() hash.", + "callers": [ + { + "file": "crates/forge_api/src/api.rs", + "line": 110, + "context": "async fn read_mcp_config(&self, scope: Option<&Scope>) -> Result;" + }, + { + "file": "crates/forge_api/src/api.rs", + "line": 117, + "context": "async fn write_mcp_config(&self, scope: &Scope, config: &McpConfig) -> Result<()>;" + } + ] + }, + { + "name": "ServerName", + "type": "class", + "purpose": "Wrapper type for server identifier strings used as keys in the config map.", + "callers": [ + { + "file": "crates/forge_domain/src/mcp_servers.rs", + "line": 5, + "context": "use crate::{ServerName, ToolDefinition};" + }, + { + "file": "crates/forge_domain/src/mcp_servers.rs", + "line": 17, + "context": "servers: HashMap>," + }, + { + "file": "crates/forge_domain/src/mcp_servers.rs", + "line": 20, + "context": "failures: HashMap," + }, + { + "file": "crates/forge_domain/src/mcp_servers.rs", + "line": 26, + "context": "servers: HashMap>," + }, + { + "file": "crates/forge_domain/src/mcp_servers.rs", + "line": 27, + "context": "failures: HashMap," + }, + { + "file": "crates/forge_domain/src/mcp_servers.rs", + "line": 33, + "context": "pub fn get_servers(&self) -> &HashMap> {" + }, + { + "file": "crates/forge_domain/src/mcp_servers.rs", + "line": 38, + "context": "pub fn get_failures(&self) -> &HashMap {" + }, + { + "file": "crates/forge_domain/src/mcp_servers.rs", + "line": 44, + "context": "type Item = (ServerName, Vec);" + }, + { + "file": "crates/forge_domain/src/mcp_servers.rs", + "line": 45, + "context": "type IntoIter = hash_map::IntoIter>;" + }, + { + "file": "crates/forge_services/src/mcp/service.rs", + "line": 6, + "context": "McpConfig, McpServerConfig, McpServers, ServerName, ToolCallFull, ToolDefinition, ToolName," + }, + { + "file": "crates/forge_services/src/mcp/service.rs", + "line": 19, + "context": "failed_servers: Arc>>," + }, + { + "file": "crates/forge_services/src/mcp/service.rs", + "line": 53, + "context": "async fn insert_clients(&self, server_name: &ServerName, client: Arc) -> anyhow::Result<()> {" + }, + { + "file": "crates/forge_services/src/mcp/service.rs", + "line": 85, + "context": "server_name: &ServerName," + }, + { + "file": "crates/forge_services/src/mcp/service.rs", + "line": 158, + "context": ".entry(ServerName::from(tool.server_name.clone()))" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 500, + "context": "let name = forge_api::ServerName::from(rm.name);" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 515, + "context": "let name = forge_api::ServerName::from(val.name);" + } + ] + } + ], + "semantic_tags": [ + "mcp", + "configuration", + "http", + "stdio", + "serialization" + ], + "handles_entities": [ + "McpServerConfig", + "McpStdioServer", + "McpHttpServer", + "McpConfig", + "ServerName" + ], + "key_behaviors": [ + "models MCP server configuration for stdio and HTTP transports", + "provides deterministic hashing for config caching", + "validates and deserializes MCP config from JSON" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Parsing", + "title": "Tighten McpConfig deserialization: avoid accepting invalid JSON shapes", + "problem": "MCP config deserialization previously accepted invalid shapes (e.g. empty object or wrong field names) silently, leading to later runtime errors.", + "root_cause": "Struct annotations allowed defaulting/empty values; code didn't validate presence of required fields.", + "solution": "Removed serde default on top-level McpConfig to force required presence of mcpServers; added tests asserting errors for empty object, wrong field names, or null mcpServers and a valid deserialization test for expected shape.", + "commit": [ + "8e109e4" + ], + "constructs": [ + "McpConfig (serde deserialization)" + ] + } + ], + "tests": { + "exercised_by": [ + "inline tests in crates/forge_domain/src/mcp.rs (mod tests)" + ], + "test_functions": [ + "test_mcp_config_deserialization_null_mcp_servers", + "test_mcp_config_deserialization_valid", + "test_mcp_config_deserialization_wrong_field_name", + "test_mcp_config_deserialization_empty_object" + ], + "source_commits": [ + "8e109e4" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/model_config.rs": { + "short_description": "Pairs provider and model identifiers into a config object", + "category": "SOURCE_CODE", + "description": "Defines ModelConfig, a small domain struct that binds a ProviderId to a ModelId for model selection and session configuration. It's used as the payload for config operations that set provider/model pairs across the system.", + "key_constructs": [ + { + "name": "ModelConfig", + "type": "class", + "purpose": "Represents a provider + model pairing to configure which model to use for a session or operation.", + "callers": [ + { + "file": "crates/forge_api/src/api.rs", + "line": 153, + "context": "async fn get_commit_config(&self) -> anyhow::Result>;" + }, + { + "file": "crates/forge_api/src/api.rs", + "line": 157, + "context": "async fn get_suggest_config(&self) -> anyhow::Result>;" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 246, + "context": "async fn get_commit_config(&self) -> anyhow::Result> {" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 250, + "context": "async fn get_suggest_config(&self) -> anyhow::Result> {" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 203, + "context": "async fn get_commit_config(&self) -> anyhow::Result>;" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 207, + "context": "async fn get_suggest_config(&self) -> anyhow::Result>;" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 964, + "context": "async fn get_commit_config(&self) -> anyhow::Result> {" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 968, + "context": "async fn get_suggest_config(&self) -> anyhow::Result> {" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 4, + "context": "use forge_domain::{ConfigOperation, Effort, ModelConfig, ModelId, ProviderId, ProviderRepository};" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 72, + "context": "async fn get_commit_config(&self) -> anyhow::Result> {" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 77, + "context": ".map(|(pid, mid)| ModelConfig {" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 84, + "context": "async fn get_suggest_config(&self) -> anyhow::Result> {" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 89, + "context": ".map(|(pid, mid)| ModelConfig {" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 127, + "context": "use forge_domain::ModelConfig as DomainModelConfig;" + }, + { + "file": "crates/forge_infra/src/env.rs", + "line": 228, + "context": "use forge_domain::{ModelConfig as DomainModelConfig, ModelId, ProviderId};" + }, + { + "file": "crates/forge_infra/src/env.rs", + "line": 252, + "context": "use forge_domain::{ModelConfig as DomainModelConfig, ModelId, ProviderId};" + }, + { + "file": "crates/forge_infra/src/env.rs", + "line": 279, + "context": "use forge_domain::{ModelConfig as DomainModelConfig, ModelId, ProviderId};" + } + ] + }, + { + "name": "ModelConfig::new", + "type": "function", + "purpose": "Constructor helper to create a ModelConfig from provider and model identifiers." + } + ], + "semantic_tags": [ + "model", + "provider", + "configuration", + "schema", + "serialization" + ], + "handles_entities": [ + "ModelConfig" + ], + "key_behaviors": [ + "represents and serializes the provider-model pairing used by higher-level config operations" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/shell.rs": { + "short_description": "Represents command execution output", + "category": "SOURCE_CODE", + "description": "Provides CommandOutput which captures stdout, stderr, exit code and the original command string for executed shell commands. Includes a convenience success() method to check exit status, used by tooling that runs shell commands safely.", + "key_constructs": [ + { + "name": "CommandOutput", + "type": "class", + "purpose": "Container for command, stdout, stderr, and optional exit code produced by shell execution.", + "callers": [ + { + "file": "crates/forge_api/src/api.rs", + "line": 101, + "context": ") -> Result;" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 201, + "context": ") -> anyhow::Result {" + }, + { + "file": "crates/forge_services/src/tool_services/shell.rs", + "line": 72, + "context": "use forge_app::domain::{CommandOutput, Environment};" + }, + { + "file": "crates/forge_services/src/tool_services/shell.rs", + "line": 91, + "context": ") -> anyhow::Result {" + }, + { + "file": "crates/forge_services/src/tool_services/shell.rs", + "line": 95, + "context": "Ok(CommandOutput {" + }, + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 12, + "context": "AnyProvider, AuthCredential, ChatCompletionMessage, ChatRepository, CommandOutput, Context," + }, + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 470, + "context": ") -> anyhow::Result {" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 1047, + "context": "output: forge_domain::CommandOutput {" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 1082, + "context": "output: forge_domain::CommandOutput {" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 1119, + "context": "output: forge_domain::CommandOutput {" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 1162, + "context": "output: forge_domain::CommandOutput {" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 1200, + "context": "output: forge_domain::CommandOutput {" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 1228, + "context": "output: forge_domain::CommandOutput {" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 1256, + "context": "output: forge_domain::CommandOutput {" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 1297, + "context": "output: forge_domain::CommandOutput {" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 2385, + "context": "output: forge_domain::CommandOutput {" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 2414, + "context": "output: forge_domain::CommandOutput {" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 8, + "context": "ChatCompletionMessage, CommandOutput, Context, Conversation, ConversationId, File, FileInfo," + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 23, + "context": "pub output: CommandOutput," + }, + { + "file": "crates/forge_app/src/infra.rs", + "line": 8, + "context": "AuthCodeParams, CommandOutput, ConfigOperation, Environment, FileInfo, McpServerConfig," + } + ] + }, + { + "name": "CommandOutput::success", + "type": "function", + "purpose": "Determines whether the command execution is considered successful based on exit_code semantics." + } + ], + "semantic_tags": [ + "shell", + "command-execution", + "process", + "output", + "diagnostics" + ], + "handles_entities": [ + "CommandOutput" + ], + "key_behaviors": [ + "captures and reports stdout/stderr and exit status of shell commands" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/template.rs": { + "short_description": "Generic template wrapper with JSON schema support", + "category": "SOURCE_CODE", + "description": "Defines a transparent Template wrapper around a template string used across the domain (e.g., prompts) and implements JsonSchema to map its schema to a plain string. Also provides convenience constructors and a From impl for Template.", + "key_constructs": [ + { + "name": "Template", + "type": "class", + "purpose": "Generic wrapper containing a template string and a phantom type marker for typed templating payloads." + }, + { + "name": "Template::new", + "type": "function", + "purpose": "Constructor for creating a Template from any ToString value." + }, + { + "name": "impl JsonSchema for Template", + "type": "function", + "purpose": "Maps Template's JSON schema to a string schema for tooling interoperability." + }, + { + "name": "From for Template", + "type": "function", + "purpose": "Allows constructing a Template directly from a string-like type." + } + ], + "semantic_tags": [ + "templating", + "schema", + "serde", + "json-schema", + "prompts" + ], + "handles_entities": [ + "Template" + ], + "key_behaviors": [ + "wraps and types template strings for domain use", + "exposes JSON schema compatibility by mapping to string" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tool_order.rs": { + "short_description": "Ordering and pattern-based prioritization for tools", + "category": "SOURCE_CODE", + "description": "Implements ToolOrder which defines tool ordering via explicit lists and glob patterns, with optimized lookup structures for fast sorting. It supports alphabetical fallback, pattern matching, sorting of owned and referenced tool definitions, and provides tests covering ordering semantics.", + "key_constructs": [ + { + "name": "ToolOrder", + "type": "class", + "purpose": "Encapsulates a prioritized list of ToolName entries and precompiled weights/patterns for sorting tools.", + "callers": [ + { + "file": "crates/forge_domain/src/transformer/sort_tools.rs", + "line": 2, + "context": "use crate::{Context, ToolOrder};" + }, + { + "file": "crates/forge_domain/src/transformer/sort_tools.rs", + "line": 7, + "context": "order: ToolOrder," + }, + { + "file": "crates/forge_domain/src/transformer/sort_tools.rs", + "line": 11, + "context": "pub fn new(order: ToolOrder) -> Self {" + }, + { + "file": "crates/forge_domain/src/transformer/sort_tools.rs", + "line": 18, + "context": "Self::new(ToolOrder::default())" + }, + { + "file": "crates/forge_domain/src/transformer/sort_tools.rs", + "line": 50, + "context": "let mut transformer = SortTools::new(ToolOrder::new(vec![])); // Empty = alphabetical" + }, + { + "file": "crates/forge_domain/src/transformer/sort_tools.rs", + "line": 69, + "context": "let custom_order = ToolOrder::new(vec![" + } + ] + }, + { + "name": "ToolOrder::new", + "type": "function", + "purpose": "Constructs a ToolOrder preparing weight map and compiled glob patterns for efficient lookups." + }, + { + "name": "ToolOrder::sort", + "type": "function", + "purpose": "Sorts a mutable slice of ToolDefinition according to configured order or alphabetically when empty." + }, + { + "name": "ToolOrder::get_weight", + "type": "function", + "purpose": "Resolves a ToolName's weight via exact-name lookup or pattern matching." + }, + { + "name": "ToolOrder::compare_by_weight", + "type": "function", + "purpose": "Comparison helper used to order tools by weight with alphabetic tiebreaker." + } + ], + "semantic_tags": [ + "ordering", + "glob-patterns", + "tools", + "sorting", + "optimization" + ], + "handles_entities": [ + "ToolOrder", + "ToolName", + "ToolDefinition" + ], + "key_behaviors": [ + "orders tool definitions by explicit priority or alphabetically", + "matches tool names against glob patterns to assign positions" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/validation.rs": { + "short_description": "Syntax error diagnostic struct", + "category": "SOURCE_CODE", + "description": "Defines SyntaxError, a simple diagnostic structure describing a syntax problem with line, column and message. It's used to report parsing/validation issues discovered in files.", + "key_constructs": [ + { + "name": "SyntaxError", + "type": "class", + "purpose": "Represents a single syntax error location and message for file diagnostics.", + "callers": [ + { + "file": "crates/forge_domain/src/repo.rs", + "line": 209, + "context": ") -> Result>;" + }, + { + "file": "crates/forge_repo/src/validation.rs", + "line": 7, + "context": "use forge_domain::{SyntaxError, ValidationRepository};" + }, + { + "file": "crates/forge_repo/src/validation.rs", + "line": 34, + "context": ") -> Result> {" + }, + { + "file": "crates/forge_repo/src/validation.rs", + "line": 93, + "context": "SyntaxError {" + }, + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 612, + "context": ") -> anyhow::Result> {" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 207, + "context": "fn create_validation_warning(path: &str, errors: &[forge_domain::SyntaxError]) -> Element {" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 793, + "context": "fn test_syntax_errors(errors: Vec<(u32, u32, &str)>) -> Vec {" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 794, + "context": "use forge_domain::SyntaxError;" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 798, + "context": ".map(|(line, column, message)| SyntaxError {" + }, + { + "file": "crates/forge_app/src/fmt/fmt_output.rs", + "line": 195, + "context": "errors: vec![forge_domain::SyntaxError {" + }, + { + "file": "crates/forge_app/src/fmt/fmt_output.rs", + "line": 335, + "context": "errors: vec![forge_domain::SyntaxError {" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 10, + "context": "ResultStream, Scope, SearchParams, SyncProgress, SyntaxError, Template, ToolCallFull," + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 30, + "context": "pub errors: Vec," + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 122, + "context": "pub errors: Vec," + } + ] + } + ], + "semantic_tags": [ + "validation", + "diagnostics", + "syntax", + "errors" + ], + "handles_entities": [ + "SyntaxError" + ], + "key_behaviors": [ + "models syntax error locations and messages for reporting" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_embed/src/lib.rs": { + "short_description": "Helpers to register embedded templates with Handlebars", + "category": "SOURCE_CODE", + "description": "Provides utilities to iterate over files embedded via include_dir and register them as Handlebars templates. It ensures template names and contents are valid UTF-8 and fails early if registration errors occur, simplifying embedding prompt and system templates into the runtime.", + "key_constructs": [ + { + "name": "files", + "type": "function", + "purpose": "Returns an iterator over all embedded files in a static Dir, recursively.", + "callers": [ + { + "file": "crates/forge_main/src/zsh/plugin.rs", + "line": 24, + "context": "for file in forge_embed::files(&ZSH_PLUGIN_LIB) {" + } + ] + }, + { + "name": "register_templates", + "type": "function", + "purpose": "Registers all embedded files as Handlebars templates, validating UTF-8 and template parsing.", + "callers": [ + { + "file": "crates/forge_app/src/orch_spec/orch_runner.rs", + "line": 54, + "context": "forge_embed::register_templates(&mut hb, &TEMPLATE_DIR);" + }, + { + "file": "crates/forge_app/src/template_engine.rs", + "line": 102, + "context": "forge_embed::register_templates(&mut hb, &TEMPLATE_DIR);" + } + ] + } + ], + "semantic_tags": [ + "templates", + "embedding", + "handlebars", + "assets", + "utf8-validation" + ], + "handles_entities": [], + "key_behaviors": [ + "enumerates embedded resource files", + "registers embedded files as Handlebars templates for runtime use" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_fs/src/write.rs": { + "short_description": "Async file system write helpers on ForgeFS", + "category": "SOURCE_CODE", + "description": "Adds async write utilities to the ForgeFS abstraction: create_dir_all, write, append, and remove_file. These wrap tokio filesystem calls and attach context-rich error messages, used across services that need reliable async file operations.", + "key_constructs": [ + { + "name": "ForgeFS::create_dir_all", + "type": "function", + "purpose": "Creates directory trees asynchronously and provides contextual error information on failure." + }, + { + "name": "ForgeFS::write", + "type": "function", + "purpose": "Writes bytes to a file asynchronously and returns contextual errors." + }, + { + "name": "ForgeFS::append", + "type": "function", + "purpose": "Appends bytes to an existing file or creates it, asynchronously." + }, + { + "name": "ForgeFS::remove_file", + "type": "function", + "purpose": "Removes a file asynchronously with contextual failure messages." + } + ], + "semantic_tags": [ + "filesystem", + "async", + "tokio", + "io", + "error-handling" + ], + "handles_entities": [ + "files", + "directories" + ], + "key_behaviors": [ + "creates directories and writes or appends file contents asynchronously", + "removes files with contextual errors" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/fs_write.rs": { + "short_description": "Infrastructure service for writing files with parent-dir handling", + "category": "SOURCE_CODE", + "description": "Implements ForgeFileWriteService that provides low-level file write operations implementing the FileWriterInfra trait, ensuring parent directories exist and supporting append and temporary file creation. It wraps forge_fs utilities and tempfile creation for infrastructure-level file writes used by services.", + "key_constructs": [ + { + "name": "ForgeFileWriteService", + "type": "class", + "purpose": "Service struct offering write/append/write_temp operations for files with parent directory creation.", + "callers": [ + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 28, + "context": "use crate::fs_write::ForgeFileWriteService;" + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 41, + "context": "file_write_service: Arc," + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 51, + "context": "http_service: Arc>," + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 71, + "context": "let file_write_service = Arc::new(ForgeFileWriteService::new());" + } + ] + }, + { + "name": "ForgeFileWriteService::create_parent_dirs", + "type": "function", + "purpose": "Ensures parent directories exist before file operations; invoked by write/append logic." + }, + { + "name": "impl FileWriterInfra for ForgeFileWriteService", + "type": "function", + "purpose": "Async trait implementation providing write, append and write_temp behaviors for external consumers." + } + ], + "semantic_tags": [ + "file-write", + "infra", + "tempfile", + "async", + "fs" + ], + "handles_entities": [ + "files", + "paths" + ], + "key_behaviors": [ + "writes and appends files ensuring parent directories exist", + "creates temporary files and writes content for infra consumers" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/grpc.rs": { + "short_description": "Lazily-initialized, shared gRPC Channel wrapper with optional TLS", + "category": "SOURCE_CODE", + "description": "Provides a thread-safe, cheaply clonable wrapper around a tonic gRPC Channel that is created on first use and cached. It configures concurrency limits and enables TLS for https URLs and allows forcing a reconnect (hydrate). This enables multiple clients to share one connection to the workspace server.", + "key_constructs": [ + { + "name": "ForgeGrpcClient", + "type": "class", + "purpose": "Struct that holds server URL and an optional cached Channel behind a Mutex for lazy, shared access.", + "callers": [ + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 29, + "context": "use crate::grpc::ForgeGrpcClient;" + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 53, + "context": "grpc_client: Arc," + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 84, + "context": "let grpc_client = Arc::new(ForgeGrpcClient::new(services_url));" + } + ] + }, + { + "name": "ForgeGrpcClient::new", + "type": "function", + "purpose": "Constructor that initializes the client with a server URL and empty cached channel." + }, + { + "name": "ForgeGrpcClient::channel", + "type": "function", + "purpose": "Lazily creates and returns a connected tonic::transport::Channel, configuring TLS for https and caching it." + }, + { + "name": "ForgeGrpcClient::hydrate", + "type": "function", + "purpose": "Clears the cached channel to force reinitialization on next access." + } + ], + "semantic_tags": [ + "grpc", + "networking", + "tls", + "lazy-init", + "concurrency" + ], + "handles_entities": [ + "gRPC channel", + "server_url" + ], + "key_behaviors": [ + "creates and caches a gRPC channel on first use", + "configures TLS for secure endpoints", + "allows resetting the cached connection" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/inquire.rs": { + "short_description": "Async bridge to blocking terminal prompts and selections", + "category": "SOURCE_CODE", + "description": "Implements the UserInfra trait by delegating interactive prompts and selections to ForgeWidget running in spawn_blocking. It provides methods to prompt freeform questions, single selections, and multi-selections from async code safely. This ties the app's infra layer to the terminal selection/input widget.", + "key_constructs": [ + { + "name": "ForgeInquire", + "type": "class", + "purpose": "Thin service struct implementing UserInfra for prompting users.", + "callers": [ + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 31, + "context": "use crate::inquire::ForgeInquire;" + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 48, + "context": "inquire_service: Arc," + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 99, + "context": "inquire_service: Arc::new(ForgeInquire::new())," + } + ] + }, + { + "name": "ForgeInquire::prompt", + "type": "function", + "purpose": "Helper that runs a blocking prompt closure on tokio's spawn_blocking and returns its result asynchronously." + }, + { + "name": "UserInfra::prompt_question", + "type": "function", + "purpose": "Prompts a freeform input string using ForgeWidget::input via prompt helper." + }, + { + "name": "UserInfra::select_one", + "type": "function", + "purpose": "Displays a single-selection prompt using ForgeWidget::select if options exist." + }, + { + "name": "UserInfra::select_many", + "type": "function", + "purpose": "Displays a multi-selection prompt using ForgeWidget::multi_select if options exist." + } + ], + "semantic_tags": [ + "prompting", + "interactive", + "ui", + "blocking-to-async" + ], + "handles_entities": [], + "key_behaviors": [ + "prompts user for freeform input asynchronously", + "allows single and multiple option selection from async contexts" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/walker.rs": { + "short_description": "Filesystem walker service that adapts Walker config and returns WalkedFile list", + "category": "SOURCE_CODE", + "description": "Converts an application Walker config into a forge_walker configuration (with sensible max/min presets), executes an async filesystem traversal, and returns a vector of WalkedFile metadata. It centralizes traversal limits (depth, breadth, sizes) and exposes a simple async API for other services to index or inspect the workspace.", + "key_constructs": [ + { + "name": "ForgeWalkerService", + "type": "class", + "purpose": "Service struct providing filesystem walking operations.", + "callers": [ + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 34, + "context": "use crate::walker::ForgeWalkerService;" + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 50, + "context": "walker_service: Arc," + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 101, + "context": "walker_service: Arc::new(ForgeWalkerService::new())," + } + ] + }, + { + "name": "ForgeWalkerService::new", + "type": "function", + "purpose": "Constructor for the walker service." + }, + { + "name": "ForgeWalkerService::walk", + "type": "function", + "purpose": "Async method converting Walker config to forge_walker, executing the walk, and returning WalkedFile results." + } + ], + "semantic_tags": [ + "filesystem", + "traversal", + "workspace", + "limits", + "async" + ], + "handles_entities": [ + "WalkedFile", + "Walker" + ], + "key_behaviors": [ + "walks the workspace filesystem under configured limits", + "returns file metadata for indexing or scanning" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_json_repair/src/lib.rs": { + "short_description": "Public API re-exports for JSON repair utilities", + "category": "SOURCE_CODE", + "description": "Re-exports the json repair functionality, error types, and schema coercion helpers from internal modules so other crates can call json_repair and coerce_to_schema. It centralizes the crate's public surface for repairing and coercing JSON to expected schemas.", + "key_constructs": [ + { + "name": "JsonRepairError", + "type": "class", + "purpose": "Error type representing failures during JSON repair/coercion." + }, + { + "name": "json_repair", + "type": "function", + "purpose": "Parser function that repairs malformed JSON-like input into valid JSON." + }, + { + "name": "coerce_to_schema", + "type": "function", + "purpose": "Attempts to coerce repaired JSON into a provided schema shape." + } + ], + "semantic_tags": [ + "json", + "repair", + "parsing", + "schema", + "validation" + ], + "handles_entities": [ + "JSON documents" + ], + "key_behaviors": [ + "exposes APIs to repair malformed JSON", + "coerces JSON to expected schema shapes" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/input.rs": { + "short_description": "Console wrapper that reads user input via ForgeEditor and parses commands", + "category": "SOURCE_CODE", + "description": "Encapsulates the interactive console logic: holds an editor instance and a command manager, loops prompting for user input, tracks telemetry, and returns parsed SlashCommand values. It also allows setting the editor buffer programmatically for the next prompt. This component bridges the line editor with the application's command parsing layer.", + "key_constructs": [ + { + "name": "Console", + "type": "class", + "purpose": "Holds the ForgeEditor and ForgeCommandManager and exposes prompt logic.", + "callers": [ + { + "file": "crates/forge_main/src/ui.rs", + "line": 39, + "context": "use crate::input::Console;" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 107, + "context": "console: Console," + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 226, + "context": "console: Console::new(" + } + ] + }, + { + "name": "Console::new", + "type": "function", + "purpose": "Constructor that initializes the editor with environment and history path." + }, + { + "name": "Console::prompt", + "type": "function", + "purpose": "Main loop that prompts the user, handles editor outcomes, records telemetry, and parses into SlashCommand." + }, + { + "name": "Console::set_buffer", + "type": "function", + "purpose": "Sets the editor buffer contents for the next prompt." + } + ], + "semantic_tags": [ + "cli", + "input", + "editor", + "command-parsing", + "telemetry" + ], + "handles_entities": [ + "SlashCommand", + "ForgeEditor" + ], + "key_behaviors": [ + "prompts the user and returns parsed commands", + "records prompt telemetry", + "allows pre-filling the next input buffer" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/oauth_callback.rs": { + "short_description": "Localhost OAuth callback HTTP server for capturing authorization codes", + "category": "SOURCE_CODE", + "description": "Implements a small local HTTP listener that waits for OAuth redirect callbacks to capture authorization codes, validates state and method, and returns success/error HTML pages. It supports loopback hosts (IPv4/IPv6 and 'localhost'), enforces timeouts and shutdowns, and provides utilities to parse and classify incoming callback requests. This is used to complete browser-based OAuth flows when redirecting to a local port.", + "key_constructs": [ + { + "name": "LocalhostOAuthCallbackServer", + "type": "class", + "purpose": "Struct that starts a tiny-http Server, waits in background for the redirect, and returns the auth code.", + "callers": [ + { + "file": "crates/forge_main/src/ui.rs", + "line": 2437, + "context": "match crate::oauth_callback::LocalhostOAuthCallbackServer::start(request) {" + } + ] + }, + { + "name": "LocalhostOAuthCallbackServer::start", + "type": "function", + "purpose": "Attempts to start the local listener if the request contains a supported localhost redirect URI." + }, + { + "name": "LocalhostOAuthCallbackServer::wait_for_code", + "type": "function", + "purpose": "Awaits the background task to complete and returns the captured authorization code." + }, + { + "name": "wait_for_localhost_oauth_callback", + "type": "function", + "purpose": "Blocking routine that receives requests, validates them, responds with HTML, and returns the code or error." + }, + { + "name": "parse_oauth_callback_target", + "type": "function", + "purpose": "Parses and validates the callback request target, checking path, state, error, and code parameters." + } + ], + "semantic_tags": [ + "oauth", + "authentication", + "http", + "callback", + "localhost" + ], + "handles_entities": [ + "CodeRequest", + "OAuthCallbackPayload", + "authorization code" + ], + "key_behaviors": [ + "starts a localhost HTTP listener for OAuth redirects", + "validates state and method, responds with success/error pages", + "returns the OAuth authorization code to caller" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/prompt.rs": { + "short_description": "Specialized reedline Prompt implementation rendering agent, cwd, model, and usage", + "category": "SOURCE_CODE", + "description": "Defines ForgePrompt which implements reedline::Prompt to render a multi-line CLI prompt showing the active agent, current directory, optional git branch, model, and token usage. It also provides history-search and multiline indicators and includes tests for formatting behavior. This shapes the interactive prompt UI for the agent chat experience.", + "key_constructs": [ + { + "name": "ForgePrompt", + "type": "class", + "purpose": "Prompt struct storing cwd, usage, agent_id, and model for rendering the REPL prompt.", + "callers": [ + { + "file": "crates/forge_main/src/input.rs", + "line": 8, + "context": "use crate::prompt::ForgePrompt;" + }, + { + "file": "crates/forge_main/src/input.rs", + "line": 30, + "context": "pub async fn prompt(&self, prompt: ForgePrompt) -> anyhow::Result {" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 42, + "context": "use crate::prompt::ForgePrompt;" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 258, + "context": "let forge_prompt = ForgePrompt { cwd: self.state.cwd.clone(), usage, model, agent_id };" + } + ] + }, + { + "name": "ForgePrompt::render_prompt_left", + "type": "function", + "purpose": "Renders the left-hand part of the prompt including agent mode, folder and optional git branch." + }, + { + "name": "ForgePrompt::render_prompt_right", + "type": "function", + "purpose": "Renders the right-hand status area with version, model, and token usage." + }, + { + "name": "get_git_branch", + "type": "function", + "purpose": "Helper that invokes git to determine the current branch name if in a repo." + } + ], + "semantic_tags": [ + "prompt", + "terminal-ui", + "reedline", + "git", + "styling" + ], + "handles_entities": [ + "AgentId", + "ModelId", + "Usage" + ], + "key_behaviors": [ + "renders a styled, multi-line prompt showing agent, cwd, branch and usage", + "provides history-search and multiline indicators" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/tools_display.rs": { + "short_description": "Formats a ToolsOverview into an Info display organized by categories", + "category": "SOURCE_CODE", + "description": "Formats available tools and MCP server statuses into a human-friendly Info structure for display, showing availability checkboxes and truncating long errors. It groups system/agent/mcp tools and lists failed MCP servers separately to surface errors to users. This is used to present the tool inventory in the CLI/UI.", + "key_constructs": [ + { + "name": "format_tools", + "type": "function", + "purpose": "Builds an Info object listing tools by category with availability markers and failure summaries.", + "callers": [ + { + "file": "crates/forge_main/src/ui.rs", + "line": 47, + "context": "use crate::tools_display::format_tools;" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 1411, + "context": "let info = format_tools(&agent_tools, &all_tools);" + } + ] + } + ], + "semantic_tags": [ + "tools", + "display", + "formatting", + "mcp", + "ui" + ], + "handles_entities": [ + "ToolName", + "ToolsOverview" + ], + "key_behaviors": [ + "renders tools organized by category with availability status", + "summarizes failed MCP servers with truncated errors" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_markdown_stream/src/code.rs": { + "short_description": "Code block highlighter and line-wrapping renderer using syntect", + "category": "SOURCE_CODE", + "description": "Provides CodeHighlighter that uses syntect for syntax highlighting and streamdown_render::code::code_wrap for line wrapping, rendering code lines with margin and continuation indent handling. It supports theme selection based on detected terminal theme and returns ANSI escaped highlighted lines, used by the markdown renderer to print code blocks. Tests validate wrapping behavior.", + "key_constructs": [ + { + "name": "CodeHighlighter", + "type": "class", + "purpose": "Encapsulates syntax and theme sets and exposes methods to highlight and render wrapped code lines.", + "callers": [ + { + "file": "crates/forge_markdown_stream/src/renderer.rs", + "line": 8, + "context": "use crate::code::CodeHighlighter;" + }, + { + "file": "crates/forge_markdown_stream/src/renderer.rs", + "line": 22, + "context": "highlighter: CodeHighlighter," + }, + { + "file": "crates/forge_markdown_stream/src/renderer.rs", + "line": 46, + "context": "highlighter: CodeHighlighter::default()," + } + ] + }, + { + "name": "CodeHighlighter::highlight_line", + "type": "function", + "purpose": "Highlights a single line with syntect given an optional language, falling back to plain text." + }, + { + "name": "CodeHighlighter::render_code_line", + "type": "function", + "purpose": "Wraps and highlights a code line, adding margins and continuation indentation, returning rendered lines." + } + ], + "semantic_tags": [ + "syntax-highlighting", + "code-rendering", + "terminal", + "wrapping", + "theme" + ], + "handles_entities": [ + "code lines", + "language identifiers" + ], + "key_behaviors": [ + "highlights code lines for terminal output", + "wraps long code lines and preserves indentation and margins" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_markdown_stream/src/inline.rs": { + "short_description": "Renders inline markdown elements to styled strings via a styler", + "category": "SOURCE_CODE", + "description": "Parses inline markdown into InlineElement sequences and converts them to styled strings by invoking an InlineStyler implementation for each element type (bold, italic, link, image, code, etc.). It provides a small test suite validating formatting and HTML-like output used by the Streamdown renderer. This isolates inline formatting logic from block rendering.", + "key_constructs": [ + { + "name": "render_inline_content", + "type": "function", + "purpose": "Parses inline markdown content and renders it using the provided InlineStyler.", + "callers": [ + { + "file": "crates/forge_markdown_stream/src/heading.rs", + "line": 5, + "context": "use crate::inline::render_inline_content;" + }, + { + "file": "crates/forge_markdown_stream/src/heading.rs", + "line": 27, + "context": "let rendered_content = render_inline_content(&content_to_render, styler);" + }, + { + "file": "crates/forge_markdown_stream/src/list.rs", + "line": 7, + "context": "use crate::inline::render_inline_content;" + }, + { + "file": "crates/forge_markdown_stream/src/list.rs", + "line": 173, + "context": "let rendered_content = render_inline_content(actual_content, styler);" + }, + { + "file": "crates/forge_markdown_stream/src/renderer.rs", + "line": 10, + "context": "use crate::inline::{render_inline_content, render_inline_elements};" + }, + { + "file": "crates/forge_markdown_stream/src/renderer.rs", + "line": 259, + "context": "let rendered_content = render_inline_content(text, &self.theme);" + }, + { + "file": "crates/forge_markdown_stream/src/table.rs", + "line": 6, + "context": "use crate::inline::render_inline_content;" + }, + { + "file": "crates/forge_markdown_stream/src/table.rs", + "line": 21, + "context": ".map(|cell| render_inline_content(cell, styler))" + } + ] + }, + { + "name": "render_inline_elements", + "type": "function", + "purpose": "Renders a slice of InlineElement values into a single styled string by delegating to styler methods.", + "callers": [ + { + "file": "crates/forge_markdown_stream/src/renderer.rs", + "line": 10, + "context": "use crate::inline::{render_inline_content, render_inline_elements};" + }, + { + "file": "crates/forge_markdown_stream/src/renderer.rs", + "line": 302, + "context": "self.write(&render_inline_elements(elements, &self.theme))?;" + } + ] + } + ], + "semantic_tags": [ + "inline-rendering", + "markdown", + "styling", + "parser-integration" + ], + "handles_entities": [ + "InlineElement" + ], + "key_behaviors": [ + "renders inline markdown elements to styled output", + "supports links, images, code, emphasis, and footnotes" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_markdown_stream/src/lib.rs": { + "short_description": "Streaming markdown renderer wrapper (StreamdownRenderer) for terminal output", + "category": "SOURCE_CODE", + "description": "Top-level crate API that composes a Parser and Renderer into StreamdownRenderer which buffers incoming tokens, repairs lines, parses them, and streams rendered events to a writer. It exposes convenience constructors, theming, push/finish semantics and re-exports key types. This provides a streaming interface for LLM output to be rendered as terminal-friendly markdown.", + "key_constructs": [ + { + "name": "StreamdownRenderer", + "type": "class", + "purpose": "Main streaming renderer that buffers tokens, parses lines, and delegates rendering to the internal Renderer.", + "callers": [ + { + "file": "crates/forge_main/src/stream_renderer.rs", + "line": 7, + "context": "use forge_markdown_stream::StreamdownRenderer;" + }, + { + "file": "crates/forge_main/src/stream_renderer.rs", + "line": 148, + "context": "let renderer = StreamdownRenderer::new(writer, term_width());" + }, + { + "file": "crates/forge_main/src/stream_renderer.rs", + "line": 157, + "context": "renderer: StreamdownRenderer>," + } + ] + }, + { + "name": "StreamdownRenderer::new", + "type": "function", + "purpose": "Creates a new renderer with default theme and specified width." + }, + { + "name": "StreamdownRenderer::push", + "type": "function", + "purpose": "Accepts incoming token fragments, buffers until full lines, repairs and renders them." + }, + { + "name": "StreamdownRenderer::finish", + "type": "function", + "purpose": "Flushes any remaining buffered content and finalizes rendering." + } + ], + "semantic_tags": [ + "markdown", + "streaming", + "renderer", + "llm-output", + "terminal" + ], + "handles_entities": [ + "lines", + "tokens", + "events" + ], + "key_behaviors": [ + "buffers and renders streaming markdown tokens line-by-line", + "applies repair and parsing before rendering to terminal" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_markdown_stream/src/renderer.rs": { + "short_description": "Core event-driven markdown renderer mapping parse events to terminal output", + "category": "SOURCE_CODE", + "description": "Implements Renderer which consumes ParseEvent values and outputs formatted lines to a writer handling headings, code blocks, lists, tables, blockquotes, think blocks, horizontal rules and inline content. It maintains state for code highlighting, table buffering, lists and blockquote depth and handles margins/widths for wrapping. This is the central component that turns parsed markdown events into terminal text.", + "key_constructs": [ + { + "name": "Renderer", + "type": "class", + "purpose": "Main struct that renders ParseEvent items to an io::Write, tracking state like width and current code language.", + "callers": [ + { + "file": "crates/forge_markdown_stream/src/lib.rs", + "line": 39, + "context": "pub use renderer::Renderer;" + }, + { + "file": "crates/forge_markdown_stream/src/lib.rs", + "line": 53, + "context": "renderer: Renderer," + }, + { + "file": "crates/forge_markdown_stream/src/lib.rs", + "line": 62, + "context": "renderer: Renderer::new(writer, width)," + }, + { + "file": "crates/forge_markdown_stream/src/lib.rs", + "line": 71, + "context": "renderer: Renderer::with_theme(writer, width, theme)," + } + ] + }, + { + "name": "Renderer::render_event", + "type": "function", + "purpose": "Handles a single ParseEvent, updating internal buffers/state and writing formatted output." + }, + { + "name": "Renderer::left_margin", + "type": "function", + "purpose": "Computes blockquote margin string based on depth and theme." + }, + { + "name": "Renderer::flush_table", + "type": "function", + "purpose": "Flushes buffered table rows by delegating to render_table and writing lines." + } + ], + "semantic_tags": [ + "markdown-rendering", + "parse-events", + "tables", + "code-highlighting", + "wrapping" + ], + "handles_entities": [ + "ParseEvent", + "code blocks", + "tables", + "lists" + ], + "key_behaviors": [ + "renders parsed markdown events to terminal with styling and wrapping", + "buffers tables and code blocks for proper rendering" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_markdown_stream/src/repair.rs": { + "short_description": "Repairs malformed markdown lines (embedded closing fences) before parsing", + "category": "SOURCE_CODE", + "description": "Provides repair_line that normalizes lines to work around parser limitations, particularly splitting embedded closing fences found at end of code lines when inside a code block. It prevents mis-parsing when closing fence tokens share a line with code content and includes tests to validate behavior. This preprocessing improves robustness of streaming parsing.", + "key_constructs": [ + { + "name": "repair_line", + "type": "function", + "purpose": "Repairs a line given current ParseState, splitting embedded closing fences when inside code blocks.", + "callers": [ + { + "file": "crates/forge_markdown_stream/src/lib.rs", + "line": 40, + "context": "pub use repair::repair_line;" + }, + { + "file": "crates/forge_markdown_stream/src/lib.rs", + "line": 85, + "context": "for repaired in repair_line(&line, self.parser.state()) {" + }, + { + "file": "crates/forge_markdown_stream/src/lib.rs", + "line": 100, + "context": "for repaired in repair_line(&self.line_buffer, self.parser.state()) {" + } + ] + }, + { + "name": "split_embedded_fence", + "type": "function", + "purpose": "Detects and splits lines that end with ``` or ~~~ preceded by non-whitespace so they become separate fence lines." + } + ], + "semantic_tags": [ + "markdown-repair", + "preprocessing", + "code-fence", + "parser-compat" + ], + "handles_entities": [], + "key_behaviors": [ + "normalizes lines containing embedded code fences so parser can process them correctly" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_markdown_stream/src/table.rs": { + "short_description": "Table renderer that computes column widths, wraps cells and preserves ANSI", + "category": "SOURCE_CODE", + "description": "Renders tables with box-drawing borders by computing visible widths, shrinking columns to fit max width, wrapping cell content while preserving ANSI escape sequences and handling Unicode widths. It exposes render_table and internal wrap/split_word_at_width utilities and contains extensive tests for wrapping, margins and unicode. This enables pretty terminal table output for markdown tables.", + "key_constructs": [ + { + "name": "render_table", + "type": "function", + "purpose": "Main function that formats rows into boxed table lines honoring margins, widths and styling.", + "callers": [ + { + "file": "crates/forge_markdown_stream/src/renderer.rs", + "line": 13, + "context": "use crate::table::render_table;" + }, + { + "file": "crates/forge_markdown_stream/src/renderer.rs", + "line": 105, + "context": "let lines = render_table(&rows, &margin, &self.theme, self.width);" + } + ] + }, + { + "name": "wrap", + "type": "function", + "purpose": "Word-wraps a cell's rendered content into lines of a given visible width while preserving ANSI sequences." + }, + { + "name": "split_word_at_width", + "type": "function", + "purpose": "Splits a long word at a visible width boundary while preserving escape sequences." + } + ], + "semantic_tags": [ + "table-rendering", + "wrapping", + "ansi", + "unicode-width", + "formatting" + ], + "handles_entities": [ + "table rows", + "table cells" + ], + "key_behaviors": [ + "renders markdown tables with borders and wrapped cells", + "preserves ANSI coloring and handles unicode widths" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_markdown_stream/src/utils.rs": { + "short_description": "Terminal theme detection utilities (dark or light)", + "category": "SOURCE_CODE", + "description": "Defines ThemeMode enum and detect_theme_mode which queries the terminal colorsaurus to determine whether the terminal is in light or dark mode, defaulting to dark on error. The result is used by theming code throughout the markdown renderer to pick color themes appropriately.", + "key_constructs": [ + { + "name": "ThemeMode", + "type": "class", + "purpose": "Enum representing Dark or Light terminal theme modes.", + "callers": [ + { + "file": "crates/forge_markdown_stream/src/code.rs", + "line": 9, + "context": "use crate::utils::{ThemeMode, detect_theme_mode};" + }, + { + "file": "crates/forge_markdown_stream/src/code.rs", + "line": 17, + "context": "theme_mode: ThemeMode," + }, + { + "file": "crates/forge_markdown_stream/src/code.rs", + "line": 38, + "context": "ThemeMode::Dark => \"base16-ocean.dark\"," + }, + { + "file": "crates/forge_markdown_stream/src/code.rs", + "line": 39, + "context": "ThemeMode::Light => \"InspiredGitHub\"," + }, + { + "file": "crates/forge_markdown_stream/src/theme.rs", + "line": 274, + "context": "use crate::utils::{ThemeMode, detect_theme_mode};" + }, + { + "file": "crates/forge_markdown_stream/src/theme.rs", + "line": 277, + "context": "ThemeMode::Light => Self::light()," + }, + { + "file": "crates/forge_markdown_stream/src/theme.rs", + "line": 278, + "context": "ThemeMode::Dark => Self::dark()," + } + ] + }, + { + "name": "detect_theme_mode", + "type": "function", + "purpose": "Queries terminal_colorsaurus for the current theme mode and maps it to ThemeMode.", + "callers": [ + { + "file": "crates/forge_markdown_stream/src/code.rs", + "line": 9, + "context": "use crate::utils::{ThemeMode, detect_theme_mode};" + }, + { + "file": "crates/forge_markdown_stream/src/code.rs", + "line": 25, + "context": "theme_mode: detect_theme_mode()," + }, + { + "file": "crates/forge_markdown_stream/src/theme.rs", + "line": 274, + "context": "use crate::utils::{ThemeMode, detect_theme_mode};" + }, + { + "file": "crates/forge_markdown_stream/src/theme.rs", + "line": 276, + "context": "match detect_theme_mode() {" + } + ] + } + ], + "semantic_tags": [ + "theme-detection", + "terminal", + "styling", + "colors" + ], + "handles_entities": [ + "theme mode" + ], + "key_behaviors": [ + "detects terminal theme mode (dark or light) for renderer theming" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/agent_definition.rs": { + "short_description": "Agent config deserialization, validation, and conversion to domain Agent", + "category": "SOURCE_CODE", + "description": "Defines AgentDefinition, the on-disk/config representation of an agent, with serde schema and setters. Provides into_agent to apply default provider/model and convert into the domain Agent, and includes unit tests validating numeric parameter deserialization (temperature, top_p, top_k, max_tokens).", + "key_constructs": [ + { + "name": "AgentDefinition", + "type": "class", + "purpose": "Struct modelling agent configuration stored on disk and deserialized from files", + "callers": [ + { + "file": "crates/forge_repo/src/agent.rs", + "line": 9, + "context": "use crate::agent_definition::AgentDefinition;" + }, + { + "file": "crates/forge_repo/src/agent.rs", + "line": 46, + "context": "pub(crate) async fn load_agents(&self) -> anyhow::Result> {" + }, + { + "file": "crates/forge_repo/src/agent.rs", + "line": 51, + "context": "async fn load_all_agents(&self) -> anyhow::Result> {" + }, + { + "file": "crates/forge_repo/src/agent.rs", + "line": 70, + "context": "async fn init_default(&self) -> anyhow::Result> {" + }, + { + "file": "crates/forge_repo/src/agent.rs", + "line": 82, + "context": "async fn init_agent_dir(&self, dir: &std::path::Path) -> anyhow::Result> {" + }, + { + "file": "crates/forge_repo/src/agent.rs", + "line": 111, + "context": "fn resolve_agent_conflicts(agents: Vec) -> Vec {" + }, + { + "file": "crates/forge_repo/src/agent.rs", + "line": 111, + "context": "fn resolve_agent_conflicts(agents: Vec) -> Vec {" + }, + { + "file": "crates/forge_repo/src/agent.rs", + "line": 115, + "context": "let mut agent_map: HashMap = HashMap::new();" + }, + { + "file": "crates/forge_repo/src/agent.rs", + "line": 128, + "context": ") -> anyhow::Result>" + }, + { + "file": "crates/forge_repo/src/agent.rs", + "line": 145, + "context": "fn parse_agent_file(content: &str) -> Result {" + }, + { + "file": "crates/forge_repo/src/agent.rs", + "line": 148, + "context": "let result = gray_matter.parse::(content)?;" + } + ] + }, + { + "name": "AgentDefinition::into_agent", + "type": "function", + "purpose": "Converts AgentDefinition into a domain Agent, applying default provider and model" + }, + { + "name": "tests::test_temperature_validation", + "type": "function", + "purpose": "Unit test ensuring temperature values validate on deserialize" + }, + { + "name": "tests::test_top_p_validation", + "type": "function", + "purpose": "Unit test ensuring top_p values validate on deserialize" + }, + { + "name": "tests::test_top_k_validation", + "type": "function", + "purpose": "Unit test ensuring top_k values validate on deserialize" + }, + { + "name": "tests::test_max_tokens_validation", + "type": "function", + "purpose": "Unit test ensuring max_tokens values validate on deserialize" + } + ], + "semantic_tags": [ + "configuration", + "serialization", + "validation", + "agent", + "defaults" + ], + "handles_entities": [ + "Agent" + ], + "key_behaviors": [ + "deserializes agent configuration from files", + "validates numeric model parameters", + "converts config into runtime Agent applying defaults" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/fuzzy_search.rs": { + "short_description": "gRPC-backed implementation of a fuzzy search repository", + "category": "SOURCE_CODE", + "description": "Implements ForgeFuzzySearchRepository which calls a remote gRPC service to perform fuzzy searches. It constructs a FuzzySearchRequest, calls the ForgeService gRPC API, and maps proto matches into domain SearchMatch results.", + "key_constructs": [ + { + "name": "ForgeFuzzySearchRepository", + "type": "class", + "purpose": "Repository implementation that delegates fuzzy searches to a gRPC service", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 30, + "context": "use crate::fuzzy_search::ForgeFuzzySearchRepository;" + }, + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 51, + "context": "fuzzy_search_repository: Arc>," + }, + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 84, + "context": "let fuzzy_search_repository = Arc::new(ForgeFuzzySearchRepository::new(infra.clone()));" + } + ] + }, + { + "name": "ForgeFuzzySearchRepository::new", + "type": "function", + "purpose": "Constructor accepting infra that provides a gRPC channel" + }, + { + "name": "FuzzySearchRepository::fuzzy_search", + "type": "function", + "purpose": "Async method implementation that issues the gRPC request and converts proto results" + } + ], + "semantic_tags": [ + "grpc", + "search", + "fuzzy-search", + "networking", + "proto" + ], + "handles_entities": [ + "SearchMatch" + ], + "key_behaviors": [ + "performs fuzzy search via remote gRPC service", + "converts proto response into domain search matches" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_select/src/confirm.rs": { + "short_description": "Builder and prompt logic for yes/no confirmation prompts", + "category": "SOURCE_CODE", + "description": "Provides ConfirmBuilder which constructs and runs interactive yes/no prompts, supports setting a default, handles cancellation, and loops until a valid response is entered. It uses InputBuilder for text input and normalizes input into Option semantics for confirm/deny/cancel.", + "key_constructs": [ + { + "name": "ConfirmBuilder", + "type": "class", + "purpose": "Holds prompt message and optional default for confirmation prompts", + "callers": [ + { + "file": "crates/forge_select/src/widget.rs", + "line": 1, + "context": "use crate::confirm::ConfirmBuilder;" + }, + { + "file": "crates/forge_select/src/widget.rs", + "line": 27, + "context": "pub fn confirm(message: impl Into) -> ConfirmBuilder {" + }, + { + "file": "crates/forge_select/src/widget.rs", + "line": 28, + "context": "ConfirmBuilder { message: message.into(), default: None }" + } + ] + }, + { + "name": "ConfirmBuilder::with_default", + "type": "function", + "purpose": "Sets the default response used when the user submits empty input" + }, + { + "name": "ConfirmBuilder::prompt", + "type": "function", + "purpose": "Runs the interactive prompt loop, parses Y/N responses and returns Option" + } + ], + "semantic_tags": [ + "cli", + "prompts", + "input", + "confirmation", + "user-interaction" + ], + "handles_entities": [], + "key_behaviors": [ + "prompts for yes/no confirmation", + "parses user responses and returns confirmation or cancellation" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_select/src/widget.rs": { + "short_description": "Factory for selection and prompt builders (fzf-based)", + "category": "SOURCE_CODE", + "description": "Provides ForgeWidget as a central factory that creates SelectBuilder, ConfirmBuilder, InputBuilder, and MultiSelectBuilder instances with consistent defaults. It standardizes interactive selection UI usage (backed by fzf/external builders) across the codebase.", + "key_constructs": [ + { + "name": "ForgeWidget", + "type": "class", + "purpose": "Stateless entrypoint for creating various prompt/select builders", + "callers": [ + { + "file": "crates/forge_infra/src/inquire.rs", + "line": 3, + "context": "use forge_select::ForgeWidget;" + }, + { + "file": "crates/forge_infra/src/inquire.rs", + "line": 31, + "context": "self.prompt(move || ForgeWidget::input(&question).allow_empty(true).prompt())" + }, + { + "file": "crates/forge_infra/src/inquire.rs", + "line": 45, + "context": "self.prompt(move || ForgeWidget::select(&message, options).prompt())" + }, + { + "file": "crates/forge_infra/src/inquire.rs", + "line": 59, + "context": "self.prompt(move || ForgeWidget::multi_select(&message, options).prompt())" + }, + { + "file": "crates/forge_select/src/select.rs", + "line": 242, + "context": "use crate::ForgeWidget;" + }, + { + "file": "crates/forge_select/src/select.rs", + "line": 246, + "context": "let builder = ForgeWidget::select(\"Test\", vec![\"a\", \"b\", \"c\"]);" + }, + { + "file": "crates/forge_select/src/select.rs", + "line": 253, + "context": "let builder = ForgeWidget::confirm(\"Confirm?\");" + }, + { + "file": "crates/forge_select/src/select.rs", + "line": 260, + "context": "ForgeWidget::select(\"Test\", vec![\"apple\", \"banana\", \"cherry\"]).with_initial_text(\"app\");" + }, + { + "file": "crates/forge_select/src/select.rs", + "line": 267, + "context": "ForgeWidget::select(\"Test\", vec![\"apple\", \"banana\", \"cherry\"]).with_initial_text(\"ban\");" + }, + { + "file": "crates/forge_select/src/select.rs", + "line": 322, + "context": "let builder = ForgeWidget::select(\"Test\", vec![\"a\", \"b\", \"c\"]).with_starting_cursor(2);" + }, + { + "file": "crates/forge_select/src/lib.rs", + "line": 10, + "context": "pub use widget::ForgeWidget;" + }, + { + "file": "crates/forge_main/src/update.rs", + "line": 6, + "context": "use forge_select::ForgeWidget;" + }, + { + "file": "crates/forge_main/src/update.rs", + "line": 30, + "context": "let answer = forge_select::ForgeWidget::confirm(" + }, + { + "file": "crates/forge_main/src/update.rs", + "line": 53, + "context": "let answer = ForgeWidget::confirm(format!(" + }, + { + "file": "crates/forge_select/src/multi.rs", + "line": 102, + "context": "use crate::ForgeWidget;" + }, + { + "file": "crates/forge_select/src/multi.rs", + "line": 106, + "context": "let builder = ForgeWidget::multi_select(\"Select options:\", vec![\"a\", \"b\", \"c\"]);" + }, + { + "file": "crates/forge_select/src/input.rs", + "line": 111, + "context": "use crate::ForgeWidget;" + }, + { + "file": "crates/forge_select/src/input.rs", + "line": 115, + "context": "let builder = ForgeWidget::input(\"Enter name\");" + }, + { + "file": "crates/forge_select/src/input.rs", + "line": 122, + "context": "let builder = ForgeWidget::input(\"Enter key\").with_default(\"mykey\");" + }, + { + "file": "crates/forge_select/src/input.rs", + "line": 128, + "context": "let builder = ForgeWidget::input(\"Enter\").allow_empty(true);" + } + ] + }, + { + "name": "ForgeWidget::select", + "type": "function", + "purpose": "Creates a SelectBuilder for single-choice fuzzy select" + }, + { + "name": "ForgeWidget::confirm", + "type": "function", + "purpose": "Creates a ConfirmBuilder for yes/no prompts" + }, + { + "name": "ForgeWidget::input", + "type": "function", + "purpose": "Creates an InputBuilder for text input" + }, + { + "name": "ForgeWidget::multi_select", + "type": "function", + "purpose": "Creates a MultiSelectBuilder for multiple selections" + } + ], + "semantic_tags": [ + "cli", + "selection", + "fzf", + "prompts", + "ui" + ], + "handles_entities": [], + "key_behaviors": [ + "constructs prompt/select builders for interactive CLI", + "centralizes fzf-driven selection usage" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/conversation.rs": { + "short_description": "Service adapter implementing conversation management over a repository", + "category": "SOURCE_CODE", + "description": "ForgeConversationService wraps a ConversationRepository and implements the forge_app::ConversationService trait. It provides operations to modify, find, upsert, list, get last, and delete conversations while handling repository interactions and error mapping.", + "key_constructs": [ + { + "name": "ForgeConversationService", + "type": "class", + "purpose": "Service struct holding an Arc to a ConversationRepository", + "callers": [ + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 19, + "context": "use crate::conversation::ForgeConversationService;" + }, + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 60, + "context": "conversation_service: Arc>," + }, + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 116, + "context": "let conversation_service = Arc::new(ForgeConversationService::new(infra.clone()));" + }, + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 207, + "context": "type ConversationService = ForgeConversationService;" + } + ] + }, + { + "name": "ForgeConversationService::new", + "type": "function", + "purpose": "Constructs the conversation service with a repository" + }, + { + "name": "ConversationService::modify_conversation", + "type": "function", + "purpose": "Loads a conversation, applies a closure mutation, and upserts it back" + }, + { + "name": "ConversationService::find_conversation", + "type": "function", + "purpose": "Retrieves a conversation by ID" + }, + { + "name": "ConversationService::upsert_conversation", + "type": "function", + "purpose": "Persists a conversation to the repository" + } + ], + "semantic_tags": [ + "conversations", + "persistence", + "service", + "repository", + "crud" + ], + "handles_entities": [ + "Conversation", + "ConversationId" + ], + "key_behaviors": [ + "modifies and persists conversations", + "retrieves and deletes conversations", + "lists and returns last conversation" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/error.rs": { + "short_description": "Error enum for authentication and provider flows", + "category": "SOURCE_CODE", + "description": "Defines an Error enum covering various authentication flow failures (initiation, timeout, denial, polling, refresh, validation, etc.) used throughout provider/auth code. It centralizes error messages and types for consistent error handling in auth flows.", + "key_constructs": [ + { + "name": "Error", + "type": "class", + "purpose": "Enum enumerating authentication- and provider-related error variants", + "callers": [ + { + "file": "crates/forge_services/src/fd.rs", + "line": 10, + "context": "use crate::error::Error as ServiceError;" + } + ] + } + ], + "semantic_tags": [ + "authentication", + "errors", + "provider", + "validation" + ], + "handles_entities": [], + "key_behaviors": [ + "represents authentication and provider flow errors for call sites" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/fd_git.rs": { + "short_description": "Git-backed file discovery using `git ls-files`", + "category": "SOURCE_CODE", + "description": "FsGit implements FileDiscovery by running `git ls-files` via the provided CommandInfra to enumerate repository-tracked files. It returns resolved PathBufs after filtering and errors when git is not available or returns no files.", + "key_constructs": [ + { + "name": "FsGit", + "type": "class", + "purpose": "Struct that holds infra to run git commands for discovery", + "callers": [ + { + "file": "crates/forge_services/src/fd.rs", + "line": 11, + "context": "use crate::fd_git::FsGit;" + }, + { + "file": "crates/forge_services/src/fd.rs", + "line": 106, + "context": "git: FsGit," + }, + { + "file": "crates/forge_services/src/fd.rs", + "line": 114, + "context": "Self { git: FsGit::new(infra.clone()), walker: FdWalker::new(infra) }" + } + ] + }, + { + "name": "FsGit::new", + "type": "function", + "purpose": "Constructor accepting infra" + }, + { + "name": "FsGit::git_ls_files", + "type": "function", + "purpose": "Executes 'git ls-files' in a directory and returns path lines" + }, + { + "name": "FileDiscovery::discover (impl for FsGit)", + "type": "function", + "purpose": "Implements discovery to return resolved file paths" + } + ], + "semantic_tags": [ + "file-discovery", + "git", + "workspace", + "command-execution" + ], + "handles_entities": [], + "key_behaviors": [ + "discovers repository files via git ls-files", + "resolves and filters discovered file paths" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/fd_walker.rs": { + "short_description": "Filesystem walker fallback for file discovery", + "category": "SOURCE_CODE", + "description": "FdWalker uses a WalkerInfra implementation to walk a directory tree and collect non-directory file paths as a fallback when git-based discovery is unavailable. It configures the walker, gathers file paths and delegates resolution/filtering.", + "key_constructs": [ + { + "name": "FdWalker", + "type": "class", + "purpose": "Struct that holds a WalkerInfra for filesystem walking", + "callers": [ + { + "file": "crates/forge_services/src/fd.rs", + "line": 12, + "context": "use crate::fd_walker::FdWalker;" + }, + { + "file": "crates/forge_services/src/fd.rs", + "line": 107, + "context": "walker: FdWalker," + }, + { + "file": "crates/forge_services/src/fd.rs", + "line": 114, + "context": "Self { git: FsGit::new(infra.clone()), walker: FdWalker::new(infra) }" + } + ] + }, + { + "name": "FdWalker::new", + "type": "function", + "purpose": "Constructor accepting a WalkerInfra" + }, + { + "name": "FileDiscovery::discover (impl for FdWalker)", + "type": "function", + "purpose": "Performs directory walk and returns resolved file paths" + } + ], + "semantic_tags": [ + "file-discovery", + "filesystem", + "walker", + "workspace" + ], + "handles_entities": [], + "key_behaviors": [ + "walks workspace directories to discover files", + "returns filtered and resolved file paths for sync/indexing" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/sync.rs": { + "short_description": "Engine to sync workspace files with remote index (hash/compare/upload/delete)", + "category": "SOURCE_CODE", + "description": "WorkspaceSyncEngine orchestrates discovery, hashing, diffing with remote indexes, deletion of stale files and batched uploads of new/modified files while emitting SyncProgress events. It streams file reads and uploads to keep memory bounded, aggregates failures, and returns a final status or error if sync failed.", + "key_constructs": [ + { + "name": "FileReadError", + "type": "class", + "purpose": "Error wrapper carrying a file path and source error for read failures" + }, + { + "name": "canonicalize_path", + "type": "function", + "purpose": "Canonicalizes a path and attaches context on failure", + "callers": [ + { + "file": "crates/forge_services/src/context_engine.rs", + "line": 17, + "context": "use crate::sync::{WorkspaceSyncEngine, canonicalize_path};" + }, + { + "file": "crates/forge_services/src/context_engine.rs", + "line": 68, + "context": "let path = canonicalize_path(path)?;" + }, + { + "file": "crates/forge_services/src/context_engine.rs", + "line": 138, + "context": "let canonical_path = canonicalize_path(path)?;" + }, + { + "file": "crates/forge_services/src/context_engine.rs", + "line": 187, + "context": "let path = canonicalize_path(path)?;" + } + ] + }, + { + "name": "extract_failed_statuses", + "type": "function", + "purpose": "Converts file read errors into FileStatus entries with Failed status" + }, + { + "name": "WorkspaceSyncEngine", + "type": "class", + "purpose": "Primary engine struct coordinating sync operations", + "callers": [ + { + "file": "crates/forge_services/src/context_engine.rs", + "line": 17, + "context": "use crate::sync::{WorkspaceSyncEngine, canonicalize_path};" + }, + { + "file": "crates/forge_services/src/context_engine.rs", + "line": 79, + "context": "WorkspaceSyncEngine::new(" + }, + { + "file": "crates/forge_services/src/context_engine.rs", + "line": 364, + "context": "WorkspaceSyncEngine::new(" + } + ] + }, + { + "name": "WorkspaceSyncEngine::run", + "type": "function", + "purpose": "Performs full sync flow: discover, hash, compare, delete, upload, and emit progress" + }, + { + "name": "WorkspaceSyncEngine::compute_status", + "type": "function", + "purpose": "Computes per-file sync status by hashing local files and comparing with remote" + }, + { + "name": "WorkspaceSyncEngine::upload_files", + "type": "function", + "purpose": "Returns a stream that reads file contents on-demand and uploads them in parallel" + }, + { + "name": "WorkspaceSyncEngine::read_hashes", + "type": "function", + "purpose": "Streams file hashes by reading files in batches while dropping content after hashing" + } + ], + "semantic_tags": [ + "sync", + "workspace", + "upload", + "hashing", + "streaming" + ], + "handles_entities": [ + "FileHash", + "FileStatus", + "SyncProgress", + "WorkspaceId", + "CodeBase" + ], + "key_behaviors": [ + "synchronizes local workspace state with remote index", + "uploads new/modified files and deletes stale ones", + "emits incremental sync progress and reports failures" + ], + "insights": [ + { + "type": "performance", + "category": "Performance", + "title": "Two-pass workspace sync to reduce memory and improve throughput", + "problem": "Previous workspace sync read all file contents into memory to compute hashes and build upload batches, causing high memory usage and poor performance on large repositories.", + "root_cause": "Single-pass read that retained all file content until upload; lack of streaming hash-first approach.", + "solution": "Introduce WorkspaceSyncEngine with a two-pass strategy: pass 1 streams file contents and yields FileHash values (hashes only), compute plan against remote; pass 2 uploads files by reading content on-demand immediately before upload in bounded batches. Also changed delete/upload path representations to PathBuf and used streaming/buffer_unordered to limit concurrency footprint.", + "commits": [ + "b924d21" + ], + "constructs": [ + "WorkspaceSyncEngine", + "WorkspaceSyncEngine::run", + "WorkspaceSyncEngine::read_hashes", + "WorkspaceSyncEngine::upload_files", + "WorkspaceSyncEngine::delete_files", + "read_hashes", + "upload_files", + "fetch_remote_hashes" + ] + }, + { + "type": "refactoring", + "category": "Coupling", + "title": "Split sync logic out of context_engine and expose compute_status/run", + "problem": "context_engine previously contained large sync logic making it monolithic and hard to test/optimize.", + "root_cause": "Single-file sync implementation was tightly coupled with file read/discovery and infra.", + "solution": "Extracted sync engine into its own module (crates/forge_services/src/sync.rs). context_engine delegates to WorkspaceSyncEngine::run and compute_status. This separation enables targeted perf improvements and easier unit testing.", + "commits": [ + "b924d21" + ], + "constructs": [ + "WorkspaceSyncEngine", + "context_engine::sync_codebase_internal (delegation)" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_tool_macros/src/lib.rs": { + "short_description": "Proc-macros to derive ToolDescription from docs or external file", + "category": "SOURCE_CODE", + "description": "Provides a procedural macro derive (ToolDescription) and an attribute (tool_description_file) that embed doc comments or an external file's contents at compile time to implement a ToolDescription trait returning a description string. It reads either an annotated file path or doc attributes and generates an impl returning the description.", + "key_constructs": [ + { + "name": "tool_description_file", + "type": "function", + "purpose": "Attribute macro marker to point to an external description file" + }, + { + "name": "derive_description", + "type": "function", + "purpose": "Proc-macro derive that generates a ToolDescription impl using doc comments or an external file", + "callers": [ + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 10, + "context": "use forge_tool_macros::ToolDescription;" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 76, + "context": "#[derive(Default, Debug, Clone, Serialize, Deserialize, JsonSchema, ToolDescription, PartialEq)]" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 191, + "context": "#[derive(Default, Debug, Clone, Serialize, Deserialize, JsonSchema, ToolDescription, PartialEq)]" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 214, + "context": "#[derive(Default, Debug, Clone, Serialize, Deserialize, JsonSchema, ToolDescription, PartialEq)]" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 232, + "context": "#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, ToolDescription, PartialEq)]" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 430, + "context": "#[derive(Default, Debug, Clone, Serialize, Deserialize, JsonSchema, ToolDescription, PartialEq)]" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 442, + "context": "#[derive(Default, Debug, Clone, Serialize, Deserialize, JsonSchema, ToolDescription, PartialEq)]" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 529, + "context": "#[derive(Default, Debug, Clone, Serialize, Deserialize, JsonSchema, ToolDescription, PartialEq)]" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 565, + "context": "#[derive(Default, Debug, Clone, Serialize, Deserialize, JsonSchema, ToolDescription, PartialEq)]" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 575, + "context": "#[derive(Default, Debug, Clone, Serialize, Deserialize, JsonSchema, ToolDescription, PartialEq)]" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 582, + "context": "#[derive(Default, Debug, Clone, Serialize, Deserialize, JsonSchema, ToolDescription, PartialEq)]" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 620, + "context": "#[derive(Default, Debug, Clone, Serialize, Deserialize, JsonSchema, ToolDescription, PartialEq)]" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 632, + "context": "#[derive(Default, Debug, Clone, Serialize, Deserialize, JsonSchema, ToolDescription, PartialEq)]" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 664, + "context": "#[derive(Default, Debug, Clone, Serialize, Deserialize, JsonSchema, ToolDescription, PartialEq)]" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 678, + "context": "#[derive(Default, Debug, Clone, Serialize, Deserialize, JsonSchema, ToolDescription, PartialEq)]" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 700, + "context": "#[derive(Default, Debug, Clone, Serialize, Deserialize, JsonSchema, ToolDescription, PartialEq)]" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 711, + "context": "#[derive(Default, Debug, Clone, Serialize, Deserialize, JsonSchema, ToolDescription, PartialEq)]" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 727, + "context": "#[derive(Default, Deserialize, JsonSchema, ToolDescription, PartialEq)]" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 741, + "context": "#[derive(Default, Deserialize, JsonSchema, ToolDescription, PartialEq)]" + }, + { + "file": "crates/forge_domain/src/tools/catalog.rs", + "line": 755, + "context": "#[derive(Default, Deserialize, JsonSchema, ToolDescription, PartialEq)]" + } + ] + } + ], + "semantic_tags": [ + "procedural-macro", + "codegen", + "compile-time", + "tools", + "doc-embedding" + ], + "handles_entities": [], + "key_behaviors": [ + "generates ToolDescription implementations at compile time", + "embeds documentation or file contents into generated code" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_tracker/src/lib.rs": { + "short_description": "Tracker crate public exports and module wiring", + "category": "SOURCE_CODE", + "description": "Re-exports key tracker modules and types (VERSION, Tracker, Event, Guard, init_tracing) and exposes the crate's Result alias. Acts as the public interface and entrypoint for the telemetry/tracking functionality used by other crates.", + "key_constructs": [ + { + "name": "VERSION", + "type": "constant", + "purpose": "Exposed crate version/can-track version identifier" + }, + { + "name": "Tracker", + "type": "class", + "purpose": "Primary tracker dispatcher type re-exported for clients" + }, + { + "name": "Result", + "type": "constant", + "purpose": "Alias for crate error Result type" + }, + { + "name": "Event", + "type": "class", + "purpose": "Telemetry event type exported for use" + }, + { + "name": "Guard", + "type": "class", + "purpose": "Logging guard exported for lifecycle management" + }, + { + "name": "init_tracing", + "type": "function", + "purpose": "Function re-export used to initialize tracing/logging" + } + ], + "semantic_tags": [ + "telemetry", + "tracking", + "logging", + "exports", + "api" + ], + "handles_entities": [ + "Event" + ], + "key_behaviors": [ + "exposes tracking API and utilities to other crates", + "initializes and configures telemetry logging" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_tracker/src/log.rs": { + "short_description": "Tracing/logging initialization with optional PostHog writer", + "category": "SOURCE_CODE", + "description": "Sets up a JSON-formatted tracing_subscriber that either writes to a daily rolling file or forwards logs asynchronously to PostHog via a custom PostHogWriter when tracking is enabled. Provides init_tracing to configure filters, layers and returns a Guard to keep writer worker alive.", + "key_constructs": [ + { + "name": "init_tracing", + "type": "function", + "purpose": "Initializes tracing subscriber with JSON formatter and chosen writer", + "callers": [ + { + "file": "crates/forge_tracker/src/lib.rs", + "line": 13, + "context": "pub use log::{Guard, init_tracing};" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 236, + "context": "_guard: forge_tracker::init_tracing(env.log_path(), TRACKER.clone())?," + } + ] + }, + { + "name": "prepare_writer", + "type": "function", + "purpose": "Selects between PostHogWriter or rolling file appender and returns writer, guard, and env filter" + }, + { + "name": "Guard", + "type": "class", + "purpose": "Wrapper around WorkerGuard to hold appender lifetime", + "callers": [ + { + "file": "crates/forge_tracker/src/lib.rs", + "line": 13, + "context": "pub use log::{Guard, init_tracing};" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 113, + "context": "_guard: forge_tracker::Guard," + } + ] + }, + { + "name": "PostHogWriter", + "type": "class", + "purpose": "Implements std::io::Write to asynchronously dispatch log payloads to PostHog via Tracker" + } + ], + "semantic_tags": [ + "logging", + "tracing", + "posthog", + "telemetry", + "async" + ], + "handles_entities": [ + "Event" + ], + "key_behaviors": [ + "initializes JSON structured logging", + "forwards traces to PostHog when allowed", + "maintains writer lifecycle via Guard" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/hooks/mod.rs": { + "short_description": "Re-exports hook handlers used by the application", + "category": "SOURCE_CODE", + "description": "Module that groups and re-exports various hook handlers (compaction, doom loop detection, title generation, tracing) so other crates can import them from a single location. It organizes lifecycle and background handlers used by the application logic.", + "key_constructs": [ + { + "name": "CompactionHandler", + "type": "class", + "purpose": "Re-exported handler for automatic context compaction" + }, + { + "name": "DoomLoopDetector", + "type": "class", + "purpose": "Re-exported handler to detect runaway agent loops" + }, + { + "name": "TitleGenerationHandler", + "type": "class", + "purpose": "Re-exported handler for generating conversation titles" + }, + { + "name": "TracingHandler", + "type": "class", + "purpose": "Re-exported handler for tracing integration" + } + ], + "semantic_tags": [ + "hooks", + "handlers", + "lifecycle", + "monitoring", + "tracing" + ], + "handles_entities": [], + "key_behaviors": [ + "provides common hook handlers for agent lifecycle and background tasks" + ], + "insights": [ + { + "type": "refactoring", + "category": "Code Organization", + "title": "Expose TitleGenerationHandler from hooks module", + "problem": "Title generation was relocated to its own file but not exported from hooks mod.", + "root_cause": "Refactor split title generation into separate module; hooks/mod.rs needed to export the type.", + "solution": "Added mod title_generation and pub use TitleGenerationHandler in hooks/mod.rs so app.rs and Hook builder can compose the handler.", + "commit": [ + "0cf8736" + ], + "constructs": [ + "TitleGenerationHandler (export)" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/src/jobs/draft_release_update_job.rs": { + "short_description": "GitHub Actions job to update the release draft via release-drafter", + "category": "BUILD", + "description": "Constructs a gh_workflow::Job that runs the Auto Labeler and Release Drafter actions to update a release draft, conditioned on pull_request_target events. Encapsulates the workflow steps and required environment setup for release draft automation.", + "key_constructs": [], + "semantic_tags": [ + "ci", + "github-actions", + "release", + "automation" + ], + "handles_entities": [ + "Release draft" + ], + "key_behaviors": [ + "updates release draft using release-drafter and autolabeler" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/src/jobs/release_build_job.rs": { + "short_description": "Builder for matrix release build job (cross/target builds)", + "category": "BUILD", + "description": "Defines ReleaseBuilderJob holding version/release_id and converts it into a gh_workflow::Job that sets up protoc, cross toolchains, rust targets, and performs matrixed builds. Optionally copies and uploads built binaries to a GitHub Release when release_id is provided.", + "key_constructs": [], + "semantic_tags": [ + "ci", + "release", + "build-matrix", + "cross-compilation", + "workflow" + ], + "handles_entities": [ + "Release artifacts", + "Built binaries" + ], + "key_behaviors": [ + "generates matrix job to build release binaries", + "optionally uploads binaries to GitHub Release" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/src/workflows/autofix.rs": { + "short_description": "Generates GitHub Actions workflow to autofix formatting and lint issues", + "category": "SOURCE_CODE", + "description": "Builds and emits an autofix GitHub Actions workflow (autofix.yml) that runs rustfmt, clippy and an autofix action on pushes and PRs to main. It composes job/step definitions, events and concurrency settings and writes the generated workflow file.", + "key_constructs": [ + { + "name": "generate_autofix_workflow", + "type": "function", + "purpose": "Constructs jobs, events and workflow metadata then generates autofix.yml", + "callers": [ + { + "file": "crates/forge_ci/tests/ci.rs", + "line": 30, + "context": "workflow::generate_autofix_workflow();" + } + ] + }, + { + "name": "lint_fix_job", + "type": "constant", + "purpose": "Defines the job that checks out code, installs deps, runs toolchain/components, formatting and clippy, and invokes autofix action" + } + ], + "semantic_tags": [ + "ci", + "github-actions", + "workflow-generation", + "linting", + "automation" + ], + "handles_entities": [ + "workflow file" + ], + "key_behaviors": [ + "generates an autofix CI workflow file", + "configures linting and formatting CI job", + "sets workflow trigger events and concurrency" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/auth/credentials.rs": { + "short_description": "Defines credential types and utilities for provider authentication", + "category": "SOURCE_CODE", + "description": "Models authentication credentials (API keys, OAuth tokens, Google ADC) and provides constructors and helpers to inspect token expiry and OAuth config. It serializes/deserializes credential details and centralizes refresh logic for different auth types.", + "key_constructs": [ + { + "name": "AuthCredential", + "type": "struct", + "purpose": "Represents a provider credential with id, auth details and optional URL params, and provides constructors for API key, OAuth and Google ADC" + }, + { + "name": "AuthDetails", + "type": "enum", + "purpose": "Encodes the underlying auth mechanism (ApiKey, GoogleAdc, OAuth, OAuthWithApiKey)" + }, + { + "name": "OAuthTokens", + "type": "struct", + "purpose": "Holds access/refresh tokens and expiration time with helpers to check expiry/refresh needs" + } + ], + "semantic_tags": [ + "authentication", + "oauth", + "api-keys", + "token-management", + "serialization" + ], + "handles_entities": [ + "AccessToken", + "RefreshToken", + "ApiKey", + "OAuthConfig", + "AuthCredential" + ], + "key_behaviors": [ + "create provider credentials from API keys or OAuth tokens", + "determine whether credentials need refresh", + "expose OAuth configuration for credentials" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/transformer/drop_reasoning_details.rs": { + "short_description": "Transformer that strips reasoning details and reasoning config from context", + "category": "SOURCE_CODE", + "description": "Implements a Transformer that removes reasoning_details from all text messages and clears the overall reasoning configuration on a Context. Includes tests and snapshots to assert that reasoning data is removed while preserving other message types.", + "key_constructs": [ + { + "name": "DropReasoningDetails", + "type": "struct", + "purpose": "Transformer type that removes per-message reasoning details and clears reasoning config" + }, + { + "name": "transform", + "type": "function", + "purpose": "Performs the in-place mutation of Context to drop reasoning information" + } + ], + "semantic_tags": [ + "transformer", + "context", + "privacy", + "message-processing", + "testing" + ], + "handles_entities": [ + "Context", + "ContextMessage", + "TextMessage", + "ReasoningConfig" + ], + "key_behaviors": [ + "remove internal reasoning details from messages", + "clear reasoning configuration from a context" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/transformer/image_handling.rs": { + "short_description": "Transformer that extracts images from tool outputs into separate image messages", + "category": "SOURCE_CODE", + "description": "Scans tool result values for image payloads, replaces inline image values with placeholder text and appends corresponding user/image message pairs at the end of the Context. The module ensures image attachments are surfaced as standalone messages and includes tests covering multiple scenarios.", + "key_constructs": [ + { + "name": "ImageHandling", + "type": "struct", + "purpose": "Transformer that extracts ToolValue::Image into standalone Context messages and placeholders" + }, + { + "name": "transform", + "type": "function", + "purpose": "Iterates tool results, moves images out of outputs and appends image messages to the context", + "callers": [ + { + "file": "crates/forge_domain/src/context.rs", + "line": 873, + "context": "let actual = transformer.transform(fixture);" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 890, + "context": "let actual = transformer.transform(fixture);" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 916, + "context": "let actual = transformer.transform(fixture);" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 933, + "context": "let actual = transformer.transform(fixture);" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 957, + "context": "let actual = transformer.transform(fixture);" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 987, + "context": "let actual = transformer.transform(fixture);" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 1019, + "context": "let actual = transformer.transform(fixture);" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 1037, + "context": "let actual = transformer.transform(fixture);" + } + ] + } + ], + "semantic_tags": [ + "images", + "transformer", + "tool-results", + "attachments", + "message-normalization" + ], + "handles_entities": [ + "Image", + "ToolResult", + "ToolOutput", + "Context" + ], + "key_behaviors": [ + "convert inline tool image outputs into separate attachment messages", + "insert placeholder text where images were removed" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/transformer/set_model.rs": { + "short_description": "Transformer that assigns a default model to text messages lacking one", + "category": "SOURCE_CODE", + "description": "Walks the Context's text messages and sets a provided ModelId on any TextMessage that doesn't already specify a model, ensuring a consistent model selection for downstream provider calls. Tests verify preservation of existing models and application to all text message roles.", + "key_constructs": [ + { + "name": "SetModel", + "type": "struct", + "purpose": "Holds the ModelId to apply to text messages" + }, + { + "name": "transform", + "type": "function", + "purpose": "Sets the stored model on each TextMessage that has no model", + "callers": [ + { + "file": "crates/forge_app/src/orch.rs", + "line": 374, + "context": "context = SetModel::new(model_id.clone()).transform(context);" + } + ] + } + ], + "semantic_tags": [ + "model-selection", + "transformer", + "context", + "message-processing" + ], + "handles_entities": [ + "Context", + "TextMessage", + "ModelId" + ], + "key_behaviors": [ + "assign a default model id to text messages missing a model" + ], + "insights": [ + { + "type": "refactoring", + "category": "State Management", + "title": "Set model for all text messages (not only user messages)", + "problem": "Transformer previously only applied model to user messages, leaving assistant/system messages without a model in some flows. That caused model-mismatch when replaying or further processing messages.", + "root_cause": "SetModel transformer had a guard limiting application to Role::User only.", + "solution": "Broaden transformer so it sets model on all text messages that lack a model (user, assistant, system). Update tests and snapshots accordingly.", + "lesson_learned": "Model identification should be present on all text messages in a conversation to allow deterministic downstream behavior (reasoning, signature validation, phase tracking).", + "commits": [ + "2991aec" + ], + "constructs": [ + "SetModel::transform" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_domain/src/transformer/snapshots tests" + ], + "source_commits": [ + "2991aec" + ] + } + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/completer/input_completer.rs": { + "short_description": "Provides fuzzy file and command completion for the interactive input", + "category": "SOURCE_CODE", + "description": "Implements a reedline Completer that suggests file path completions using a Walker and fuzzy matching (nucleo) and delegates slash-prefixed commands to a CommandCompleter. It escapes special characters for pattern parsing, scores matches and returns sorted suggestions with formatted values.", + "key_constructs": [ + { + "name": "InputCompleter", + "type": "struct", + "purpose": "Holds Walker, CommandCompleter and a fuzzy Matcher to provide completions" + }, + { + "name": "complete", + "type": "function", + "purpose": "Generates suggestions by delegating to command completer or performing fuzzy file matching and scoring" + }, + { + "name": "escape_for_pattern_parse", + "type": "function", + "purpose": "Escapes special characters that would break pattern parsing for fuzzy matching" + } + ], + "semantic_tags": [ + "completion", + "fuzzy-matching", + "cli", + "file-walking", + "suggestions" + ], + "handles_entities": [ + "file paths" + ], + "key_behaviors": [ + "suggest file path completions with fuzzy ranking", + "delegate command completions when input looks like a command", + "handle special-character escaping in queries" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/zsh/style.rs": { + "short_description": "Helpers to format ZSH prompt escapes with colors and bolding", + "category": "SOURCE_CODE", + "description": "Provides ZshColor constants and ZshStyled wrapper to render strings with ZSH-native prompt escape sequences (e.g., %F{n}, %B/%b). Includes a trait to conveniently create styled wrappers from &str/String and tests for formatting correctness.", + "key_constructs": [ + { + "name": "ZshColor", + "type": "struct", + "purpose": "Represents a 256-color palette value for ZSH prompts and exposes common constants" + }, + { + "name": "ZshStyled", + "type": "struct", + "purpose": "Wraps text with optional color and bold flags and formats ZSH prompt escapes" + }, + { + "name": "ZshStyle", + "type": "trait", + "purpose": "Extension trait to create ZshStyled from &str or String" + } + ], + "semantic_tags": [ + "zsh", + "prompt", + "styling", + "formatting", + "terminal" + ], + "handles_entities": [], + "key_behaviors": [ + "format strings for inclusion in ZSH PROMPT/RPROMPT with colors and bold" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/provider/bedrock_cache.rs": { + "short_description": "Adds Bedrock cache point blocks to converse stream requests", + "category": "SOURCE_CODE", + "description": "Transforms an AWS Bedrock ConverseStreamInput by inserting CachePoint blocks after the first system message (if present) and at the end of the last user/assistant message to enable Bedrock prompt caching. Includes tests that validate cache point placement for various conversation shapes.", + "key_constructs": [ + { + "name": "SetCache", + "type": "struct", + "purpose": "Transformer that implements the two-breakpoint caching strategy for Bedrock requests" + }, + { + "name": "transform", + "type": "function", + "purpose": "Mutates ConverseStreamInput to insert SystemContentBlock::CachePoint and ContentBlock::CachePoint at strategic positions" + } + ], + "semantic_tags": [ + "caching", + "aws-bedrock", + "request-adapter", + "transformer" + ], + "handles_entities": [ + "ConverseStreamInput", + "CachePointBlock", + "SystemContentBlock", + "ContentBlock" + ], + "key_behaviors": [ + "insert cache breakpoints into Bedrock request payloads" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/provider/opencode_zen.rs": { + "short_description": "Routes OpenCode Zen calls to appropriate backend per model prefix", + "category": "SOURCE_CODE", + "description": "Provides an OpenCodeZenResponseRepository that selects a backend (Anthropic, OpenAIResponses, Google, or OpenAI) based on model id prefixes and adapts Provider URLs and response types before delegating chat and models calls. This acts as an adapter to unify multiple provider-specific implementations under one endpoint.", + "key_constructs": [ + { + "name": "OpenCodeZenResponseRepository", + "type": "struct", + "purpose": "Holds per-backend repositories and exposes chat/models methods that route requests" + }, + { + "name": "get_backend", + "type": "function", + "purpose": "Determines the backend enum based on ModelId prefix" + }, + { + "name": "build_provider", + "type": "function", + "purpose": "Adjusts a Provider (url and response type) to match the chosen backend's expected endpoint" + }, + { + "name": "OpenCodeBackend", + "type": "enum", + "purpose": "Enum enumerating target backends (OpenAI, OpenAIResponses, Anthropic, Google)" + } + ], + "semantic_tags": [ + "provider-routing", + "adapter", + "model-routing", + "http", + "backend-selection" + ], + "handles_entities": [ + "Provider", + "Model", + "ChatContext", + "ModelId" + ], + "key_behaviors": [ + "select backend based on model id", + "adapt provider configuration and delegate chat requests", + "return hardcoded models when present" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/tool_services/image_read.rs": { + "short_description": "Service to read and validate image files into Image domain objects", + "category": "SOURCE_CODE", + "description": "Implements ImageReadService to read binary image files from disk, enforce absolute paths and size limits, validate file extension to supported image formats, and return a forge_app::domain::Image with appropriate MIME type. It uses infra interfaces for file reading and configuration and returns contextual errors for invalid inputs.", + "key_constructs": [ + { + "name": "ForgeImageRead", + "type": "struct", + "purpose": "Service implementation that reads image bytes via infra and converts them to domain Image" + }, + { + "name": "ImageFormat", + "type": "enum", + "purpose": "Enumerates supported image formats and provides MIME type mapping" + }, + { + "name": "read_image", + "type": "function", + "purpose": "Validates path and size, checks extension, reads bytes and returns Image" + } + ], + "semantic_tags": [ + "images", + "file-io", + "validation", + "service", + "mime" + ], + "handles_entities": [ + "Image", + "file path" + ], + "key_behaviors": [ + "read image bytes from disk with validation", + "ensure format is supported and below size limits", + "return Image with correct MIME type" + ], + "insights": [ + { + "type": "feature", + "category": "Security", + "title": "Add image read tool with validation and size limits", + "problem": "No safe mechanism to read image binaries for vision-capable models", + "root_cause": "Existing read tool was text-oriented and rejected binary files", + "solution": "Introduce ForgeImageRead service that validates absolute path, checks env.max_image_size, determines supported format by extension, reads binary via infra, and returns forge_app::domain::Image with MIME type. ImageFormat enum restricts supported formats.", + "commits": [ + "5c86244" + ], + "constructs": [ + "ForgeImageRead::read_image", + "ImageFormat" + ], + "lesson_learned": "Binary tools need explicit validation (absolute path, configured size limits) and format whitelisting to avoid accidental large binary transfers or unsupported formats. Centralize size limit via environment config (FORGE_MAX_IMAGE_SIZE)." + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".forge/skills/create-plan/validate-all-plans.sh": { + "short_description": "Bash script to run plan validator across all plan files and summarize results", + "category": "CLI", + "description": "Finds all markdown plan files in a plans directory, runs validate-plan.sh on each, tallies pass/fail counts and prints a summary with colored output. This automates bulk validation of plan documents in the repository.", + "key_constructs": [], + "semantic_tags": [ + "validation", + "plans", + "bash", + "automation", + "reporting" + ], + "handles_entities": [ + "plan files" + ], + "key_behaviors": [ + "iterate and validate all plan markdown files", + "summarize validation results with counts" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + ".forge/skills/create-plan/validate-plan.sh": { + "short_description": "Validates a single plan markdown file for structure and content quality", + "category": "CLI", + "description": "Performs detailed checks on a plan file including filename pattern, required sections, checkbox usage in Implementation Plan, absence of code blocks, task quality heuristics and warnings, outputting errors/warnings and appropriate exit codes. It enforces formatting and quality guidelines for project plans.", + "key_constructs": [], + "semantic_tags": [ + "validation", + "markdown", + "quality", + "heuristics", + "bash" + ], + "handles_entities": [ + "plan file" + ], + "key_behaviors": [ + "validate plan filename and date components", + "ensure required markdown sections exist", + "check implementation plan uses checkboxes and task quality" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "benchmarks/evals/semantic_search_quality/llm_judge.ts": { + "short_description": "TS script that uses Gemini to judge semantic search query and result quality", + "category": "CLI", + "description": "Uses a Vertex AI model (Gemini) to evaluate semantic-search queries and returned results against a detailed Zod schema, producing structured scores and feedback. It reads a context JSON, extracts sem_search calls, builds a prompt, invokes the LLM to generate a validated Evaluation object and formats the results for CLI output.", + "key_constructs": [], + "semantic_tags": [ + "evaluation", + "llm", + "semantic-search", + "vertex-ai", + "zod" + ], + "handles_entities": [ + "semantic search queries", + "evaluation result" + ], + "key_behaviors": [ + "invoke an LLM to evaluate semantic search quality", + "parse and format structured evaluation results" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "benchmarks/evals/semantic_search_quality/run_eval.sh": { + "short_description": "Shell orchestration to verify sem_search usage and optionally run LLM judge", + "category": "CLI", + "description": "Checks a context JSON for sem_search tool usage, extracts intent and expected/avoid file types, verifies Google Cloud authentication and conditionally runs the tsx llm_judge.ts script to perform the semantic search quality evaluation. It returns appropriate exit codes and messages depending on findings and authentication state.", + "key_constructs": [], + "semantic_tags": [ + "orchestration", + "evaluation", + "authentication", + "shell-script", + "semantic-search" + ], + "handles_entities": [ + "context file" + ], + "key_behaviors": [ + "validate presence of sem_search calls", + "run LLM-based judge when cloud auth exists", + "report pass/fail status of evaluation" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "benchmarks/evals/semantic_search_quality/run_tests.sh": { + "short_description": "Test harness that runs multiple semantic search eval scenarios using run_eval.sh", + "category": "CLI", + "description": "Executes a suite of evaluation scenarios (good/bad/missing sem_search) using run_eval.sh with predetermined context files and tallies pass/fail/skip counts to assert expected outcomes. It's a convenience harness for validating the evaluation tooling and conditional logic.", + "key_constructs": [], + "semantic_tags": [ + "testing", + "harness", + "evaluation", + "shell-script", + "semantic-search" + ], + "handles_entities": [ + "evaluation context files" + ], + "key_behaviors": [ + "run multiple evaluation scenarios and summarize results", + "assert expected pass/fail behavior for eval cases" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/init_conversation_metrics.rs": { + "short_description": "Initialize conversation metrics with a start timestamp", + "category": "SOURCE_CODE", + "description": "Provides a small transformer that stamps a Conversation's metrics.started_at with the current time (converted to UTC). Includes a unit test verifying timestamp assignment within a tolerance.", + "key_constructs": [ + { + "name": "InitConversationMetrics", + "type": "struct", + "purpose": "Holds current local time and applies it to a Conversation.metrics.started_at" + }, + { + "name": "InitConversationMetrics::apply", + "type": "function", + "purpose": "Sets the conversation.metrics.started_at to the stored time converted to UTC" + }, + { + "name": "test_sets_started_at", + "type": "function", + "purpose": "Unit test ensuring started_at is populated and time conversion is correct" + } + ], + "semantic_tags": [ + "conversation", + "metrics", + "time", + "initialization", + "telemetry" + ], + "handles_entities": [ + "Conversation" + ], + "key_behaviors": [ + "sets conversation start timestamp when a conversation is initialized" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/set_conversation_id.rs": { + "short_description": "Populate conversation context with its own ID", + "category": "SOURCE_CODE", + "description": "Contains a small utility that ensures a Conversation's context has its conversation_id set to the conversation's id. Includes a unit test validating the context mutation.", + "key_constructs": [ + { + "name": "SetConversationId", + "type": "struct", + "purpose": "Marker/transform type used to set the conversation_id inside a Conversation's context" + }, + { + "name": "SetConversationId::apply", + "type": "function", + "purpose": "Takes a Conversation, ensures it has a Context, and sets Context.conversation_id to conversation.id" + }, + { + "name": "test_sets_conversation_id", + "type": "function", + "purpose": "Unit test confirming conversation.context.conversation_id is populated" + } + ], + "semantic_tags": [ + "conversation", + "context", + "identity", + "state-mutation" + ], + "handles_entities": [ + "Conversation", + "Context" + ], + "key_behaviors": [ + "writes the conversation's id into its context for downstream consumers" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/user.rs": { + "short_description": "User, plan and usage domain types for auth/plan data", + "category": "SOURCE_CODE", + "description": "Defines lightweight serializable domain types representing an authenticated user (AuthProviderId, User), subscription plan (Plan) and usage accounting (UsageInfo, UserUsage). Includes helper methods like Plan::is_upgradeable and simple conversions for AuthProviderId.", + "key_constructs": [ + { + "name": "AuthProviderId", + "type": "struct", + "purpose": "Newtype wrapper for a provider identifier string with constructors" + }, + { + "name": "User", + "type": "struct", + "purpose": "Represents a user record referencing an AuthProviderId" + }, + { + "name": "Plan::is_upgradeable", + "type": "function", + "purpose": "Determines whether a plan type string is considered upgradeable (free or pro)" + }, + { + "name": "UsageInfo", + "type": "struct", + "purpose": "Holds current, limit and remaining usage counters and optional reset interval" + } + ], + "semantic_tags": [ + "authentication", + "billing", + "usage", + "serialization", + "user" + ], + "handles_entities": [ + "User", + "Plan", + "UsageInfo", + "UserUsage" + ], + "key_behaviors": [ + "models user identity and plan/usage information for auth and billing display" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/walker.rs": { + "short_description": "Filesystem walker configuration and walked file representation", + "category": "SOURCE_CODE", + "description": "Defines Walker configuration controls for traversing a filesystem (depth, breadth, file size/total limits and binary skipping) along with conservative and unlimited presets. Also defines WalkedFile, a simple file/dir descriptor with helpers such as is_dir().", + "key_constructs": [ + { + "name": "Walker", + "type": "struct", + "purpose": "Configuration for directory walking operations with setters and default presets" + }, + { + "name": "Walker::conservative", + "type": "function", + "purpose": "Returns a Walker with conservative limits suitable for safe indexing" + }, + { + "name": "WalkedFile", + "type": "struct", + "purpose": "Represents a discovered file or directory, including relative path and size" + }, + { + "name": "WalkedFile::is_dir", + "type": "function", + "purpose": "Determines whether a WalkedFile represents a directory" + } + ], + "semantic_tags": [ + "filesystem", + "traversal", + "configuration", + "limits", + "io" + ], + "handles_entities": [ + "WalkedFile" + ], + "key_behaviors": [ + "configures and represents filesystem traversal parameters and results" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/src/release_matrix.rs": { + "short_description": "Defines CI release build matrix entries and JSON conversion", + "category": "SOURCE_CODE", + "description": "Provides MatrixEntry and ReleaseMatrix types that enumerate target triples, binary names/paths and cross flags used by CI release jobs. Implements a conversion to a serde_json::Value to embed the matrix in CI workflows.", + "key_constructs": [ + { + "name": "MatrixEntry", + "type": "struct", + "purpose": "Describes a single CI build matrix row (os, target, binary info, cross flag)" + }, + { + "name": "ReleaseMatrix", + "type": "struct", + "purpose": "Collection wrapper for MatrixEntry items with convenience constructors" + }, + { + "name": "impl From for Value", + "type": "function", + "purpose": "Serializes the release matrix into JSON with an include key for CI usage" + } + ], + "semantic_tags": [ + "ci", + "release", + "build-matrix", + "serialization", + "packaging" + ], + "handles_entities": [], + "key_behaviors": [ + "provides release build matrix data for CI job generation and serialization" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_display/src/diff.rs": { + "short_description": "Generate colored inline diffs with line numbers and counts", + "category": "SOURCE_CODE", + "description": "Uses the similar crate to compute inline diffs between two strings and formats a colored, line-numbered output while counting added/removed lines. Provides a DiffResult containing the formatted diff and metrics, plus unit tests and snapshots ensuring proper formatting and width calculation.", + "key_constructs": [ + { + "name": "DiffFormat::format", + "type": "function", + "purpose": "Computes and formats a colored inline diff between old and new text, producing a DiffResult" + }, + { + "name": "DiffResult", + "type": "struct", + "purpose": "Encapsulates the formatted diff string and counts of lines added/removed" + }, + { + "name": "Line", + "type": "struct", + "purpose": "Helper for printing line numbers with fixed width formatting" + } + ], + "semantic_tags": [ + "diff", + "text-formatting", + "terminal-output", + "color", + "comparison" + ], + "handles_entities": [], + "key_behaviors": [ + "renders human-friendly colored diffs with line numbers and change counts" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_display/src/grep.rs": { + "short_description": "Format ripgrep-style search results with optional regex highlighting", + "category": "SOURCE_CODE", + "description": "Parses ripgrep-like lines (path:line:content) and formats them grouped by file with aligned line numbers and optional regex-based highlighting. Falls back to printing raw file paths when input isn't in expected format and includes comprehensive tests and snapshots.", + "key_constructs": [ + { + "name": "GrepFormat", + "type": "struct", + "purpose": "Holds lines and optional Regex and exposes formatting methods to produce colorized output" + }, + { + "name": "ParsedLine::parse", + "type": "function", + "purpose": "Parses a single 'path:line:content' entry, validating numeric line numbers" + }, + { + "name": "GrepFormat::format", + "type": "function", + "purpose": "Groups parsed entries by path, computes padding, and returns formatted string with highlighting" + } + ], + "semantic_tags": [ + "search", + "formatting", + "regex", + "ripgrep", + "highlighting" + ], + "handles_entities": [], + "key_behaviors": [ + "formats and highlights grep-like search results for terminal display" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/chat_request.rs": { + "short_description": "ChatRequest wrapper for events tied to a conversation", + "category": "SOURCE_CODE", + "description": "Defines ChatRequest, a serializable container combining an Event with a ConversationId for sending chat events through the system. Includes a small constructor for ergonomic creation.", + "key_constructs": [ + { + "name": "ChatRequest", + "type": "struct", + "purpose": "Encapsulates an Event and the ConversationId that it belongs to" + }, + { + "name": "ChatRequest::new", + "type": "function", + "purpose": "Constructor creating a ChatRequest from an Event and ConversationId" + } + ], + "semantic_tags": [ + "messaging", + "conversation", + "serialization", + "events" + ], + "handles_entities": [ + "ChatRequest", + "Event", + "ConversationId" + ], + "key_behaviors": [ + "packages chat events together with their conversation identifier for transport" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/file_operation.rs": { + "short_description": "Record metrics for file operations performed by tools", + "category": "SOURCE_CODE", + "description": "Defines FileOperation which tracks lines added/removed, an optional content hash, and the ToolKind that performed the operation. Provides a constructor that initializes default metrics and setters-based mutability for callers to record file change details.", + "key_constructs": [ + { + "name": "FileOperation", + "type": "struct", + "purpose": "Holds per-file change metrics and metadata (lines added/removed, content hash, tool)" + }, + { + "name": "FileOperation::new", + "type": "function", + "purpose": "Creates a new FileOperation for a given ToolKind with zeroed counters" + } + ], + "semantic_tags": [ + "file-metrics", + "audit", + "tools", + "serialization" + ], + "handles_entities": [ + "FileOperation" + ], + "key_behaviors": [ + "captures and serializes per-file change statistics produced by tools" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/http_config.rs": { + "short_description": "HTTP client configuration including TLS and HTTP/2 options", + "category": "SOURCE_CODE", + "description": "Defines TlsVersion and TlsBackend enums and a comprehensive HttpConfig struct with defaults for timeouts, pooling, redirects, TLS, and HTTP/2 settings. Implements parsing/display semantics for TLS versions and includes tests ensuring defaults and serialization behavior.", + "key_constructs": [ + { + "name": "TlsVersion", + "type": "enum", + "purpose": "Represents supported TLS protocol versions with Display and FromStr implementations" + }, + { + "name": "TlsBackend", + "type": "enum", + "purpose": "Specifies TLS backend selection (Default or Rustls) with Display" + }, + { + "name": "HttpConfig", + "type": "struct", + "purpose": "Configuration holder for HTTP client timeouts, pooling, TLS, and HTTP/2 behavior with sensible defaults" + } + ], + "semantic_tags": [ + "http", + "tls", + "configuration", + "networking", + "security" + ], + "handles_entities": [ + "HttpConfig", + "TlsVersion", + "TlsBackend" + ], + "key_behaviors": [ + "provides configurable HTTP client settings including TLS constraints and HTTP/2 tuning" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/image.rs": { + "short_description": "Image data URL helper for base64-encoded images", + "category": "SOURCE_CODE", + "description": "Represents an Image as a data URL (data:{mime};base64,{data}) with helpers to construct from raw bytes or base64 strings and to extract the raw base64 payload. Useful for serializing images inline when transporting or storing image data.", + "key_constructs": [ + { + "name": "Image", + "type": "struct", + "purpose": "Holds a data URL and mime type for an image and provides construction helpers" + }, + { + "name": "Image::new_bytes", + "type": "function", + "purpose": "Encodes raw bytes to base64 and constructs a data URL Image" + }, + { + "name": "Image::data", + "type": "function", + "purpose": "Returns the base64 payload without the data URL prefix" + } + ], + "semantic_tags": [ + "image", + "base64", + "serialization", + "data-url" + ], + "handles_entities": [ + "Image" + ], + "key_behaviors": [ + "creates and reads base64 data-URL representations of images" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/max_tokens.rs": { + "short_description": "Validated max_tokens newtype with serde support", + "category": "SOURCE_CODE", + "description": "Provides MaxTokens, a newtype wrapping a u32 that enforces a valid range (1..=100000), implements serialization/deserialization, Display, Deref and tests to ensure correctness. Used to validate and transport model generation token limits safely across the system.", + "key_constructs": [ + { + "name": "MaxTokens", + "type": "struct", + "purpose": "Newtype enforcing validity rules for model max_tokens values" + }, + { + "name": "MaxTokens::new", + "type": "function", + "purpose": "Creates a MaxTokens instance performing range validation" + }, + { + "name": "impl Serialize/Deserialize for MaxTokens", + "type": "implementation", + "purpose": "Serializes as a u32 and validates on deserialization to prevent invalid values" + } + ], + "semantic_tags": [ + "validation", + "model-config", + "serialization", + "tokens" + ], + "handles_entities": [ + "MaxTokens" + ], + "key_behaviors": [ + "validates and (de)serializes model max_tokens configuration values" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/mcp_servers.rs": { + "short_description": "Cache structure for MCP servers and their tool definitions", + "category": "SOURCE_CODE", + "description": "Defines McpServers, a serializable cache mapping ServerName to a vector of ToolDefinition and tracking failures. Exposes getters, constructors and an IntoIterator implementation to iterate successful servers.", + "key_constructs": [ + { + "name": "McpServers", + "type": "struct", + "purpose": "Stores a map of loaded MCP servers to their tool definitions and a map of failures" + }, + { + "name": "McpServers::new", + "type": "function", + "purpose": "Constructs a McpServers instance from server and failure maps" + }, + { + "name": "impl IntoIterator for McpServers", + "type": "implementation", + "purpose": "Allows iterating over successful servers and their tool lists" + } + ], + "semantic_tags": [ + "mcp", + "caching", + "tools", + "serialization", + "failures" + ], + "handles_entities": [ + "McpServers", + "ToolDefinition", + "ServerName" + ], + "key_behaviors": [ + "stores and exposes MCP server tool definitions and records failures for lookup" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/merge.rs": { + "short_description": "Helpers for merging/overwriting collections and options", + "category": "SOURCE_CODE", + "description": "Provides small merge utility functions (overwrite, option replacement, hashmap insert merge) and a vec::unify_by_key function that merges items implementing Merge+Key by unique id. Also defines a Key trait used by unify_by_key.", + "key_constructs": [ + { + "name": "std::overwrite", + "type": "function", + "purpose": "Overwrites base value with other value" + }, + { + "name": "vec::unify_by_key", + "type": "function", + "purpose": "Merge two Vecs of items implementing Merge+Key by key, merging existing and appending new ones" + }, + { + "name": "option", + "type": "function", + "purpose": "Replace base Option with other if other is Some", + "callers": [ + { + "file": "crates/forge_domain/src/compact/compact_config.rs", + "line": 31, + "context": "#[merge(strategy = crate::merge::option)]" + }, + { + "file": "crates/forge_domain/src/compact/compact_config.rs", + "line": 36, + "context": "#[merge(strategy = crate::merge::option)]" + }, + { + "file": "crates/forge_domain/src/compact/compact_config.rs", + "line": 41, + "context": "#[merge(strategy = crate::merge::option)]" + }, + { + "file": "crates/forge_domain/src/compact/compact_config.rs", + "line": 46, + "context": "#[merge(strategy = crate::merge::option)]" + }, + { + "file": "crates/forge_domain/src/compact/compact_config.rs", + "line": 52, + "context": "#[merge(strategy = crate::merge::option)]" + }, + { + "file": "crates/forge_domain/src/compact/compact_config.rs", + "line": 57, + "context": "#[merge(strategy = crate::merge::option)]" + } + ] + }, + { + "name": "Key", + "type": "trait", + "purpose": "Trait requiring an identifying key used by vec::unify_by_key" + } + ], + "semantic_tags": [ + "merge", + "collections", + "utilities", + "deduplication" + ], + "handles_entities": [], + "key_behaviors": [ + "merges maps, options and vectors by configured strategies and keys" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/migration.rs": { + "short_description": "Result type for credential migration from env to file", + "category": "SOURCE_CODE", + "description": "Defines MigrationResult representing the outcome when migrating provider credentials from environment variables into a credentials file, including path and list of migrated providers. Includes a simple constructor and a unit test to validate the struct fields.", + "key_constructs": [ + { + "name": "MigrationResult", + "type": "struct", + "purpose": "Represents the path to credentials file and which providers were migrated" + }, + { + "name": "MigrationResult::new", + "type": "function", + "purpose": "Creates a new MigrationResult with provided path and migrated providers" + }, + { + "name": "test_migration_result", + "type": "function", + "purpose": "Unit test asserting MigrationResult fields are set correctly" + } + ], + "semantic_tags": [ + "migration", + "credentials", + "providers", + "io" + ], + "handles_entities": [ + "MigrationResult", + "ProviderId" + ], + "key_behaviors": [ + "represents and reports which provider credentials were migrated to a file" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/point.rs": { + "short_description": "Domain model for embedding points and search queries", + "category": "SOURCE_CODE", + "description": "Defines a Point container holding content, an embedding vector, and timestamps plus a lightweight PointId. Also provides a Query type used to request nearest neighbors and utility methods to create and map points. This module is used by embedding and semantic search components to represent indexed items and search requests.", + "key_constructs": [ + { + "name": "PointId", + "type": "class", + "purpose": "Newtype wrapping a UUID used as stable identifier for points" + }, + { + "name": "Point", + "type": "class", + "purpose": "Generic container for content, embedding vector, id, and created/updated timestamps" + }, + { + "name": "Point::new", + "type": "function", + "purpose": "Creates a new Point with current timestamps and generated id" + }, + { + "name": "Point::try_map", + "type": "function", + "purpose": "Transforms a Point's content with fallible mapping while preserving metadata" + }, + { + "name": "Query", + "type": "class", + "purpose": "Represents a nearest-neighbor query with embedding, optional limit and distance" + } + ], + "semantic_tags": [ + "embeddings", + "vector-search", + "identifiers", + "timestamps", + "domain-model" + ], + "handles_entities": [ + "Point" + ], + "key_behaviors": [ + "creates embedding points with metadata", + "maps point content safely", + "constructs search queries for nearest-neighbor lookup" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/skill.rs": { + "short_description": "Represents reusable skills/prompts and their metadata", + "category": "SOURCE_CODE", + "description": "Defines the Skill struct which models a named reusable prompt/skill with optional file path, command content, description, and resource files. Provides a constructor and derives setters/serialization for easy loading and modification. Skills are used by agents/tools to expose prewritten prompts and resources in the system.", + "key_constructs": [ + { + "name": "Skill", + "type": "class", + "purpose": "Model of a reusable skill with name, optional path, prompt command, description and resources", + "callers": [ + { + "file": "crates/forge_api/src/api.rs", + "line": 169, + "context": "async fn get_skills(&self) -> Result>;" + }, + { + "file": "crates/forge_services/src/tool_services/skill.rs", + "line": 5, + "context": "use forge_domain::Skill;" + }, + { + "file": "crates/forge_services/src/tool_services/skill.rs", + "line": 15, + "context": "cache: OnceCell>," + }, + { + "file": "crates/forge_services/src/tool_services/skill.rs", + "line": 27, + "context": "async fn fetch_skill(&self, skill_name: String) -> anyhow::Result {" + }, + { + "file": "crates/forge_services/src/tool_services/skill.rs", + "line": 41, + "context": "async fn list_skills(&self) -> anyhow::Result> {" + }, + { + "file": "crates/forge_services/src/tool_services/skill.rs", + "line": 48, + "context": "async fn get_or_load_skills(&self) -> anyhow::Result<&Vec> {" + }, + { + "file": "crates/forge_services/src/tool_services/skill.rs", + "line": 62, + "context": "use forge_domain::Skill;" + }, + { + "file": "crates/forge_services/src/tool_services/skill.rs", + "line": 68, + "context": "skills: Vec," + }, + { + "file": "crates/forge_services/src/tool_services/skill.rs", + "line": 73, + "context": "async fn load_skills(&self) -> anyhow::Result> {" + }, + { + "file": "crates/forge_services/src/tool_services/skill.rs", + "line": 82, + "context": "Skill::new(\"pdf\", \"Handle PDF files\", \"PDF handling skill\").path(\"/skills/pdf.md\")," + }, + { + "file": "crates/forge_services/src/tool_services/skill.rs", + "line": 83, + "context": "Skill::new(\"xlsx\", \"Handle Excel files\", \"Excel handling skill\")" + }, + { + "file": "crates/forge_services/src/tool_services/skill.rs", + "line": 95, + "context": "Skill::new(\"pdf\", \"Handle PDF files\", \"PDF handling skill\").path(\"/skills/pdf.md\");" + }, + { + "file": "crates/forge_services/src/tool_services/skill.rs", + "line": 103, + "context": "Skill::new(\"pdf\", \"Handle PDF files\", \"PDF handling skill\").path(\"/skills/pdf.md\")," + }, + { + "file": "crates/forge_services/src/tool_services/skill.rs", + "line": 121, + "context": "Skill::new(\"pdf\", \"Handle PDF files\", \"PDF handling skill\").path(\"/skills/pdf.md\")," + }, + { + "file": "crates/forge_services/src/tool_services/skill.rs", + "line": 122, + "context": "Skill::new(\"xlsx\", \"Handle Excel files\", \"Excel handling skill\")" + }, + { + "file": "crates/forge_domain/src/system_context.rs", + "line": 5, + "context": "use crate::{Agent, Environment, File, Model, Skill};" + }, + { + "file": "crates/forge_domain/src/system_context.rs", + "line": 117, + "context": "pub skills: Vec," + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 58, + "context": "pub async fn get_skills_internal(&self) -> Result> {" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 307, + "context": "async fn get_skills(&self) -> Result> {" + }, + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 15, + "context": "ProviderRepository, ResultStream, SearchMatch, Skill, SkillRepository, Snapshot," + } + ] + }, + { + "name": "Skill::new", + "type": "function", + "purpose": "Constructs a Skill with required fields and default empty resources" + } + ], + "semantic_tags": [ + "skills", + "prompts", + "serialization", + "configuration", + "resources" + ], + "handles_entities": [ + "Skill" + ], + "key_behaviors": [ + "creates and modifies reusable skill definitions", + "serializes/deserializes skill metadata" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/snapshot.rs": { + "short_description": "File snapshot metadata and path generation utilities", + "category": "SOURCE_CODE", + "description": "Provides SnapshotId (UUID) and Snapshot structs to capture a file snapshot's id, timestamp and canonical path. Includes creation logic validating absolute paths, hashing the path for storage, and building a snapshot filename/path using the timestamp. This is used to persist or reference on-disk snapshots of files for indexing or history.", + "key_constructs": [ + { + "name": "SnapshotId", + "type": "class", + "purpose": "Newtype wrapper around a UUID for snapshot identity" + }, + { + "name": "Snapshot::create", + "type": "function", + "purpose": "Create a Snapshot from a PathBuf, canonicalizing path and recording timestamp" + }, + { + "name": "Snapshot::path_hash", + "type": "function", + "purpose": "Generate a stable hash string from snapshot path for storage partitioning" + }, + { + "name": "Snapshot::snapshot_path", + "type": "function", + "purpose": "Build a timestamped snapshot filename and optional cwd-rooted path" + }, + { + "name": "Snapshot", + "type": "class", + "purpose": "Holds id, timestamp and original file path metadata", + "callers": [ + { + "file": "crates/forge_repo/src/fs_snap.rs", + "line": 5, + "context": "use forge_domain::{Environment, Snapshot, SnapshotRepository};" + }, + { + "file": "crates/forge_repo/src/fs_snap.rs", + "line": 22, + "context": "async fn insert_snapshot(&self, file_path: &Path) -> Result {" + }, + { + "file": "crates/forge_snaps/src/service.rs", + "line": 4, + "context": "use forge_domain::Snapshot;" + }, + { + "file": "crates/forge_snaps/src/service.rs", + "line": 22, + "context": "pub async fn create_snapshot(&self, path: PathBuf) -> Result {" + }, + { + "file": "crates/forge_snaps/src/service.rs", + "line": 23, + "context": "let snapshot = Snapshot::create(path)?;" + }, + { + "file": "crates/forge_snaps/src/service.rs", + "line": 58, + "context": "let snapshot = Snapshot::create(path.clone())?;" + }, + { + "file": "crates/forge_snaps/src/service.rs", + "line": 127, + "context": "async fn create_snapshot(&self) -> Result {" + }, + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 15, + "context": "ProviderRepository, ResultStream, SearchMatch, Skill, SkillRepository, Snapshot," + }, + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 103, + "context": "async fn insert_snapshot(&self, file_path: &Path) -> anyhow::Result {" + }, + { + "file": "crates/forge_domain/src/repo.rs", + "line": 9, + "context": "SearchMatch, Skill, Snapshot, WorkspaceAuth, WorkspaceId," + }, + { + "file": "crates/forge_domain/src/repo.rs", + "line": 25, + "context": "async fn insert_snapshot(&self, file_path: &Path) -> Result;" + } + ] + } + ], + "semantic_tags": [ + "snapshot", + "filesystem", + "timestamps", + "hashing", + "metadata" + ], + "handles_entities": [ + "Snapshot", + "File" + ], + "key_behaviors": [ + "creates snapshot metadata from file paths", + "computes hashed storage paths and timestamped filenames" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/suggestion.rs": { + "short_description": "Simple struct modeling a usage suggestion", + "category": "SOURCE_CODE", + "description": "Defines the Suggestion struct with a use_case and suggestion text for serializable exchange. It's a lightweight data transfer object used to present recommended actions or examples to users or agents.", + "key_constructs": [ + { + "name": "Suggestion", + "type": "class", + "purpose": "Holds a use case label and suggestion text for consumption by UI or agents" + } + ], + "semantic_tags": [ + "suggestions", + "dto", + "serialization", + "ux" + ], + "handles_entities": [ + "Suggestion" + ], + "key_behaviors": [ + "represents suggested actions or examples for presentation" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/temperature.rs": { + "short_description": "Validated temperature newtype for model randomness configuration", + "category": "SOURCE_CODE", + "description": "Implements a Temperature newtype wrapping an f32 with validation (0.0..=2.0), Display, Deref and serde (de)serialization behavior. Includes safe and unchecked constructors plus unit tests to ensure validation and JSON behavior. This type centralizes model temperature validation and consistent serialization across the system.", + "key_constructs": [ + { + "name": "Temperature", + "type": "class", + "purpose": "Newtype wrapping an f32 with validation for allowed temperature range", + "callers": [ + { + "file": "crates/forge_app/src/apply_tunable_parameters.rs", + "line": 42, + "context": "Temperature, ToolDefinition, TopK, TopP," + }, + { + "file": "crates/forge_app/src/apply_tunable_parameters.rs", + "line": 60, + "context": ".temperature(Temperature::new(0.7).unwrap())" + }, + { + "file": "crates/forge_app/src/apply_tunable_parameters.rs", + "line": 76, + "context": "assert_eq!(ctx.temperature, Some(Temperature::new(0.7).unwrap()));" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 17, + "context": "use crate::temperature::Temperature;" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 418, + "context": "pub temperature: Option," + }, + { + "file": "crates/forge_repo/src/agent_definition.rs", + "line": 4, + "context": "SystemContext, Temperature, Template, ToolName, TopK, TopP," + }, + { + "file": "crates/forge_repo/src/agent_definition.rs", + "line": 80, + "context": "pub temperature: Option," + } + ] + }, + { + "name": "Temperature::new", + "type": "function", + "purpose": "Validated constructor returning Err on out-of-range values" + }, + { + "name": "Temperature::new_unchecked", + "type": "function", + "purpose": "Unchecked constructor for trusted values (debug-asserts correctness)" + }, + { + "name": "Serialize/Deserialize impls for Temperature", + "type": "function", + "purpose": "Custom (de)serialization to ensure precision and range validation" + } + ], + "semantic_tags": [ + "validation", + "model-parameters", + "serde", + "temperature", + "serialization" + ], + "handles_entities": [ + "Temperature" + ], + "key_behaviors": [ + "validates temperature values", + "provides consistent JSON (de)serialization for model config" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/top_k.rs": { + "short_description": "Validated top_k newtype for model token filtering", + "category": "SOURCE_CODE", + "description": "Defines TopK as a newtype around u32 with validation for allowed range (1..=1000), Display, Deref and serde implementations. Includes constructors, unchecked creation for trusted values and unit tests for validation/serialization. This ensures consistent handling of the top_k model parameter across the codebase.", + "key_constructs": [ + { + "name": "TopK", + "type": "class", + "purpose": "Newtype encapsulating a u32 with range validation for top_k", + "callers": [ + { + "file": "crates/forge_app/src/dto/anthropic/transforms/reasoning_transform.rs", + "line": 22, + "context": "use forge_domain::{Context, ReasoningConfig, TopK, TopP, Transformer};" + }, + { + "file": "crates/forge_app/src/dto/anthropic/transforms/reasoning_transform.rs", + "line": 29, + "context": ".top_k(TopK::new(50).unwrap())" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 18, + "context": "use crate::top_k::TopK;" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 422, + "context": "pub top_k: Option," + }, + { + "file": "crates/forge_repo/src/agent_definition.rs", + "line": 4, + "context": "SystemContext, Temperature, Template, ToolName, TopK, TopP," + }, + { + "file": "crates/forge_repo/src/agent_definition.rs", + "line": 103, + "context": "pub top_k: Option," + } + ] + }, + { + "name": "TopK::new", + "type": "function", + "purpose": "Create a validated TopK or return an error if out of range" + }, + { + "name": "Serialize/Deserialize impls for TopK", + "type": "function", + "purpose": "Custom serde handling to enforce validation during (de)serialization" + } + ], + "semantic_tags": [ + "model-parameters", + "validation", + "serde", + "top_k", + "token-filtering" + ], + "handles_entities": [ + "TopK" + ], + "key_behaviors": [ + "validates top_k values", + "ensures safe serde (de)serialization of top_k config" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/top_p.rs": { + "short_description": "Validated top_p newtype for nucleus sampling configuration", + "category": "SOURCE_CODE", + "description": "Provides TopP as a newtype over f32 with range validation (0.0..=1.0), Deref, Display and custom serde implementations. Offers safe and unchecked constructors and tests to verify validation and JSON behavior. TopP centralizes handling of the top_p model parameter to ensure correctness throughout the system.", + "key_constructs": [ + { + "name": "TopP", + "type": "class", + "purpose": "Newtype encapsulating an f32 with validation for top_p parameter", + "callers": [ + { + "file": "crates/forge_app/src/dto/anthropic/transforms/reasoning_transform.rs", + "line": 22, + "context": "use forge_domain::{Context, ReasoningConfig, TopK, TopP, Transformer};" + }, + { + "file": "crates/forge_app/src/dto/anthropic/transforms/reasoning_transform.rs", + "line": 30, + "context": ".top_p(TopP::new(0.8).unwrap())" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 19, + "context": "use crate::top_p::TopP;" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 420, + "context": "pub top_p: Option," + }, + { + "file": "crates/forge_repo/src/agent_definition.rs", + "line": 4, + "context": "SystemContext, Temperature, Template, ToolName, TopK, TopP," + }, + { + "file": "crates/forge_repo/src/agent_definition.rs", + "line": 92, + "context": "pub top_p: Option," + }, + { + "file": "crates/forge_domain/src/agent.rs", + "line": 12, + "context": "Temperature, Template, ToolDefinition, ToolName, TopK, TopP," + } + ] + }, + { + "name": "TopP::new", + "type": "function", + "purpose": "Validated constructor returning Err for out-of-range values" + }, + { + "name": "Serialize/Deserialize impls for TopP", + "type": "function", + "purpose": "Custom (de)serialization enforcing precision and validation" + } + ], + "semantic_tags": [ + "model-parameters", + "validation", + "serde", + "top_p", + "sampling" + ], + "handles_entities": [ + "TopP" + ], + "key_behaviors": [ + "validates top_p values", + "provides consistent JSON (de)serialization for nucleus sampling setting" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/xml.rs": { + "short_description": "Helpers to extract or remove XML-style tag content from text", + "category": "SOURCE_CODE", + "description": "Contains utility functions to extract inner content of a specific XML-like tag and to remove all tags whose names start with a given prefix (using regex). These helpers are used to sanitize or parse agent/tool messages and to strip tool markup from text before display or processing. Unit tests cover multiple tag/edge cases and nested scenarios.", + "key_constructs": [ + { + "name": "extract_tag_content", + "type": "function", + "purpose": "Finds and returns trimmed content between a matching opening and closing tag", + "callers": [ + { + "file": "crates/forge_domain/src/tools/call/tool_call.rs", + "line": 8, + "context": "use crate::xml::extract_tag_content;" + }, + { + "file": "crates/forge_domain/src/tools/call/tool_call.rs", + "line": 207, + "context": "match extract_tag_content(input, \"forge_tool_call\") {" + } + ] + }, + { + "name": "remove_tag_with_prefix", + "type": "function", + "purpose": "Removes complete tags (including their content) whose names start with a given prefix" + } + ], + "semantic_tags": [ + "xml-parsing", + "text-processing", + "regex", + "sanitization", + "markup-handling" + ], + "handles_entities": [], + "key_behaviors": [ + "extracts content from XML-style tags", + "removes tagged markup by prefix to sanitize text" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_fs/src/binary_detection.rs": { + "short_description": "Heuristic BOM and zero-byte based binary file detection", + "category": "SOURCE_CODE", + "description": "Provides functions to inspect a file's first bytes to detect BOM-based encodings (UTF-8/16) and heuristically determine whether a file is binary by scanning for zero bytes and endian patterns. Exposes an async is_binary(path) function for callers and an internal is_binary_internal buffer-based detector used in tests. This helps the system avoid treating binary files as text when reading or indexing workspace files.", + "key_constructs": [ + { + "name": "Encoding", + "type": "class", + "purpose": "Enum enumerating detected BOM encodings (Utf8WithBom, Utf16BE, Utf16LE)" + }, + { + "name": "Encoding::detect", + "type": "function", + "purpose": "Detects BOM signatures in a buffer to identify UTF encodings" + }, + { + "name": "is_binary", + "type": "function", + "purpose": "Async function that opens a file, reads up to 512 bytes and returns if it seems binary", + "callers": [ + { + "file": "crates/forge_fs/src/lib.rs", + "line": 20, + "context": "pub use crate::binary_detection::is_binary;" + }, + { + "file": "crates/forge_fs/src/meta.rs", + "line": 13, + "context": "is_binary(path).await" + } + ] + }, + { + "name": "is_binary_internal", + "type": "function", + "purpose": "Buffer-based heuristic that inspects zero bytes and possible UTF-16 patterns" + } + ], + "semantic_tags": [ + "binary-detection", + "bom", + "encoding", + "utf16", + "file-io" + ], + "handles_entities": [ + "File" + ], + "key_behaviors": [ + "detects if a file is binary vs text using BOM and zero-byte heuristics", + "identifies common UTF encodings to avoid false positives" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_fs/src/error.rs": { + "short_description": "Filesystem-related error types for ForgeFS", + "category": "SOURCE_CODE", + "description": "Defines a domain error enum for ForgeFS operations covering binary-file detection, range/index errors, UTF-8 validation and I/O errors. Uses thiserror for clear error messages and conversion from common error types to unify error handling across file operations.", + "key_constructs": [ + { + "name": "Error", + "type": "class", + "purpose": "Enum of file operation errors including BinaryFileNotSupported, Index errors, Utf8ValidationFailed and IoError", + "callers": [ + { + "file": "crates/forge_fs/src/lib.rs", + "line": 21, + "context": "pub use crate::error::Error;" + }, + { + "file": "crates/forge_fs/src/read_range.rs", + "line": 7, + "context": "use crate::error::Error;" + }, + { + "file": "crates/forge_fs/src/read_range.rs", + "line": 32, + "context": "return Err(Error::StartGreaterThanEnd { start: start_line, end: end_line }.into());" + }, + { + "file": "crates/forge_fs/src/read_range.rs", + "line": 41, + "context": "return Err(Error::IndexStartingWithZero { start: start_line, end: end_line }.into());" + }, + { + "file": "crates/forge_fs/src/read_range.rs", + "line": 46, + "context": "return Err(Error::BinaryFileNotSupported(file_type).into());" + }, + { + "file": "crates/forge_fs/src/read_range.rs", + "line": 77, + "context": "Error::StartBeyondFileSize { start: start_line, total: total_lines }.into()," + } + ] + } + ], + "semantic_tags": [ + "errors", + "file-io", + "validation", + "utf8" + ], + "handles_entities": [], + "key_behaviors": [ + "represents and formats filesystem operation errors consistently" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_fs/src/file_size.rs": { + "short_description": "Async helper to get file size from metadata", + "category": "SOURCE_CODE", + "description": "Adds an async file_size method to the ForgeFS impl that retrieves file metadata and returns the file length without reading its contents. It centralizes error context for metadata retrieval so callers can use a simple API to get file sizes efficiently.", + "key_constructs": [ + { + "name": "ForgeFS::file_size", + "type": "function", + "purpose": "Async function that returns the file length by reading filesystem metadata" + } + ], + "semantic_tags": [ + "file-io", + "metadata", + "async", + "filesystem" + ], + "handles_entities": [ + "File" + ], + "key_behaviors": [ + "retrieves file size efficiently via metadata" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_fs/src/is_binary.rs": { + "short_description": "File type detection using infer crate and sample reads", + "category": "SOURCE_CODE", + "description": "Implements ForgeFS methods to determine if a file is text/document or binary by sampling up to 8192 bytes and using the infer crate to get a matcher/type. Returns a boolean and a human-readable description. This variant is used within ForgeFS to classify files for reading/indexing and contains tests exercising different file types.", + "key_constructs": [ + { + "name": "ForgeFS::is_binary_path", + "type": "function", + "purpose": "Test-only helper that opens a path and delegates to is_binary to determine type" + }, + { + "name": "ForgeFS::is_binary", + "type": "function", + "purpose": "Crate-private async function that reads a file sample and uses infer to classify text vs binary" + } + ], + "semantic_tags": [ + "file-detection", + "infer", + "mime-detection", + "async", + "file-io" + ], + "handles_entities": [ + "File" + ], + "key_behaviors": [ + "classifies files as text/document or binary with a descriptive mime/type", + "reads a sample from file for type inference" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_fs/src/meta.rs": { + "short_description": "Filesystem metadata and simple helpers on ForgeFS", + "category": "SOURCE_CODE", + "description": "Adds simple metadata helpers to ForgeFS including exists, is_binary_file, is_file and an async read_dir wrapper with error context. These convenience wrappers centralize common filesystem checks and directory reading with consistent error messages for callers.", + "key_constructs": [ + { + "name": "ForgeFS::exists", + "type": "function", + "purpose": "Synchronous check for path existence" + }, + { + "name": "ForgeFS::is_binary_file", + "type": "function", + "purpose": "Async wrapper delegating to is_binary to determine binary status" + }, + { + "name": "ForgeFS::is_file", + "type": "function", + "purpose": "Synchronous check whether path is a file" + }, + { + "name": "ForgeFS::read_dir", + "type": "function", + "purpose": "Async wrapper around tokio::fs::read_dir adding contextual errors" + } + ], + "semantic_tags": [ + "filesystem", + "metadata", + "directory", + "helpers", + "async" + ], + "handles_entities": [ + "File", + "Directory" + ], + "key_behaviors": [ + "checks path existence and type", + "reads directories with contextual errors", + "delegates binary detection for files" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_fs/src/read.rs": { + "short_description": "File reading helpers (bytes and UTF-8 variants) for ForgeFS", + "category": "SOURCE_CODE", + "description": "Provides async methods on ForgeFS to read files as raw bytes, as UTF-8 lossy strings, or as owned strings using tokio helpers. Each function adds contextual error messages for easier debugging and consistent behavior across callers.", + "key_constructs": [ + { + "name": "ForgeFS::read", + "type": "function", + "purpose": "Reads file contents into a Vec with contextual error messages" + }, + { + "name": "ForgeFS::read_utf8", + "type": "function", + "purpose": "Reads a file and returns a UTF-8 lossy String" + }, + { + "name": "ForgeFS::read_to_string", + "type": "function", + "purpose": "Reads a file directly into a String using tokio::fs::read_to_string" + } + ], + "semantic_tags": [ + "file-read", + "utf8", + "async", + "io", + "helpers" + ], + "handles_entities": [ + "File" + ], + "key_behaviors": [ + "reads files as bytes or strings with contextual errors", + "provides UTF-8 aware reading utilities" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/error.rs": { + "short_description": "Infrastructure error types for MCP integration", + "category": "SOURCE_CODE", + "description": "Declares an Error enum used by infra layer to represent unsupported MCP responses and similar integration errors. It provides a typed error value for callers to match against when handling MCP protocol issues.", + "key_constructs": [ + { + "name": "Error", + "type": "class", + "purpose": "Enum containing infra-level errors like UnsupportedMcpResponse", + "callers": [ + { + "file": "crates/forge_infra/src/mcp_client.rs", + "line": 22, + "context": "use crate::error::Error;" + }, + { + "file": "crates/forge_infra/src/mcp_client.rs", + "line": 209, + "context": "Err(Error::UnsupportedMcpResponse(\"Resource\").into())" + }, + { + "file": "crates/forge_infra/src/mcp_client.rs", + "line": 212, + "context": "Err(Error::UnsupportedMcpResponse(\"ResourceLink\").into())" + }, + { + "file": "crates/forge_infra/src/mcp_client.rs", + "line": 215, + "context": "Err(Error::UnsupportedMcpResponse(\"Audio\").into())" + } + ] + }, + { + "name": "UnsupportedMcpResponse", + "type": "constant", + "purpose": "Variant indicating an unexpected or unsupported MCP response type" + } + ], + "semantic_tags": [ + "errors", + "mcp", + "integration", + "infra" + ], + "handles_entities": [ + "MCP response" + ], + "key_behaviors": [ + "represents unsupported MCP responses as typed errors" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/fs_create_dirs.rs": { + "short_description": "Filesystem directory-creation infra implementation", + "category": "SOURCE_CODE", + "description": "Provides an implementation of the FileDirectoryInfra trait that creates directory trees using the forge_fs helper. It adapts forge_fs::ForgeFS::create_dir_all into an async, dependency-injectable service used by higher-level code.", + "key_constructs": [ + { + "name": "ForgeCreateDirsService", + "type": "class", + "purpose": "Service struct implementing directory creation infra", + "callers": [ + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 23, + "context": "use crate::fs_create_dirs::ForgeCreateDirsService;" + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 45, + "context": "create_dirs_service: Arc," + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 93, + "context": "create_dirs_service: Arc::new(ForgeCreateDirsService)," + } + ] + }, + { + "name": "create_dirs", + "type": "function", + "purpose": "Implements FileDirectoryInfra to create directories asynchronously", + "callers": [ + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 224, + "context": "self.create_dirs_service.create_dirs(path).await" + } + ] + } + ], + "semantic_tags": [ + "filesystem", + "directories", + "infra", + "async", + "service" + ], + "handles_entities": [ + "filesystem paths", + "directories" + ], + "key_behaviors": [ + "creates directory trees on disk" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/fs_meta.rs": { + "short_description": "File metadata and existence checks service", + "category": "SOURCE_CODE", + "description": "Implements FileInfoInfra using forge_fs helpers to provide file metadata operations (existence, file detection, binary detection, size). This module centralizes simple file property checks for use by other services that need file metadata.", + "key_constructs": [ + { + "name": "ForgeFileMetaService", + "type": "class", + "purpose": "Service struct providing file metadata queries", + "callers": [ + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 24, + "context": "use crate::fs_meta::ForgeFileMetaService;" + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 44, + "context": "file_meta_service: Arc," + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 77, + "context": "let file_meta_service = Arc::new(ForgeFileMetaService);" + } + ] + }, + { + "name": "is_file", + "type": "function", + "purpose": "Checks whether a path is a regular file", + "callers": [ + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 203, + "context": "self.file_meta_service.is_file(path).await" + } + ] + }, + { + "name": "is_binary", + "type": "function", + "purpose": "Asynchronously determines if a file is binary", + "callers": [ + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 199, + "context": "self.file_meta_service.is_binary(path).await" + } + ] + }, + { + "name": "exists", + "type": "function", + "purpose": "Checks whether a path exists", + "callers": [ + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 207, + "context": "self.file_meta_service.exists(path).await" + } + ] + }, + { + "name": "file_size", + "type": "function", + "purpose": "Returns the size of a file asynchronously", + "callers": [ + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 211, + "context": "self.file_meta_service.file_size(path).await" + } + ] + } + ], + "semantic_tags": [ + "filesystem", + "metadata", + "binary-detection", + "infra", + "file-io" + ], + "handles_entities": [ + "files", + "file metadata" + ], + "key_behaviors": [ + "checks if path is a file", + "detects binary files", + "reports file existence and size" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/fs_remove.rs": { + "short_description": "Low-level file removal infra service", + "category": "SOURCE_CODE", + "description": "Provides a primitive file removal implementation of FileRemoverInfra that calls forge_fs::ForgeFS::remove_file. It intentionally performs raw deletions without snapshot coordination so higher layers manage snapshots and safety.", + "key_constructs": [ + { + "name": "ForgeFileRemoveService", + "type": "class", + "purpose": "Service struct for file removal operations", + "callers": [ + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 27, + "context": "use crate::fs_remove::ForgeFileRemoveService;" + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 42, + "context": "file_remove_service: Arc," + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 90, + "context": "file_remove_service: Arc::new(ForgeFileRemoveService::new())," + } + ] + }, + { + "name": "new", + "type": "function", + "purpose": "Constructs a new ForgeFileRemoveService", + "callers": [ + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 90, + "context": "file_remove_service: Arc::new(ForgeFileRemoveService::new())," + } + ] + }, + { + "name": "remove", + "type": "function", + "purpose": "Deletes the specified file asynchronously", + "callers": [ + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 217, + "context": "self.file_remove_service.remove(path).await" + } + ] + } + ], + "semantic_tags": [ + "filesystem", + "deletion", + "infra", + "safety", + "async" + ], + "handles_entities": [ + "files" + ], + "key_behaviors": [ + "removes files from disk without snapshot coordination" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/kv_storage.rs": { + "short_description": "cacache-backed generic key-value storage with TTL", + "category": "SOURCE_CODE", + "description": "Implements forge_app::KVStore using cacache to store JSON-serialized entries wrapped with timestamps for optional TTL expiration. It provides deterministic key hashing, get/set/clear operations, and compatibility-friendly handling of invalid cached entries.", + "key_constructs": [ + { + "name": "CachedEntry", + "type": "class", + "purpose": "Wrapper storing a value with a timestamp for TTL checks" + }, + { + "name": "CacacheStorage", + "type": "class", + "purpose": "Main storage struct holding cache directory and TTL configuration", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 19, + "context": "pub use forge_infra::CacacheStorage;" + }, + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 44, + "context": "mcp_cache_repository: Arc," + }, + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 72, + "context": "let mcp_cache_repository = Arc::new(CacacheStorage::new(" + }, + { + "file": "crates/forge_repo/src/provider/chat.rs", + "line": 8, + "context": "use forge_infra::CacacheStorage;" + }, + { + "file": "crates/forge_repo/src/provider/chat.rs", + "line": 23, + "context": "model_cache: Arc," + }, + { + "file": "crates/forge_repo/src/provider/chat.rs", + "line": 46, + "context": "let model_cache = Arc::new(CacacheStorage::new(" + }, + { + "file": "crates/forge_infra/src/lib.rs", + "line": 27, + "context": "pub use kv_storage::CacacheStorage;" + } + ] + }, + { + "name": "key_to_string", + "type": "function", + "purpose": "Converts a Hash key to a deterministic string for cacache" + }, + { + "name": "cache_get", + "type": "function", + "purpose": "Reads and deserializes a cached value, honoring TTL and invalid entries", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 247, + "context": "self.mcp_cache_repository.cache_get(key).await" + }, + { + "file": "crates/forge_repo/src/provider/chat.rs", + "line": 86, + "context": ".cache_get::<_, Vec>(&cache_key)" + } + ] + }, + { + "name": "cache_set", + "type": "function", + "purpose": "Serializes and writes a value into the cache with timestamp", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 255, + "context": "self.mcp_cache_repository.cache_set(key, value).await" + }, + { + "file": "crates/forge_repo/src/provider/chat.rs", + "line": 99, + "context": "if let Err(err) = cache.cache_set(&key, &models).await {" + }, + { + "file": "crates/forge_repo/src/provider/chat.rs", + "line": 115, + "context": "if let Err(err) = self.model_cache.cache_set(&cache_key, &models).await {" + } + ] + }, + { + "name": "cache_clear", + "type": "function", + "purpose": "Clears the entire cacache directory", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 259, + "context": "self.mcp_cache_repository.cache_clear().await" + } + ] + } + ], + "semantic_tags": [ + "caching", + "storage", + "ttl", + "serialization", + "content-addressable" + ], + "handles_entities": [ + "cached entries", + "key-value pairs" + ], + "key_behaviors": [ + "stores and retrieves JSON-serializable values in cacache", + "expires entries by TTL", + "clears the cache" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/mcp_server.rs": { + "short_description": "MCP server connector producing MCP clients", + "category": "SOURCE_CODE", + "description": "Provides an implementation of McpServerInfra that constructs ForgeMcpClient instances from an McpServerConfig and environment variables. This adapter isolates MCP connection creation behind an infra trait so other code consumes a typed client.", + "key_constructs": [ + { + "name": "ForgeMcpServer", + "type": "class", + "purpose": "Connector implementing McpServerInfra", + "callers": [ + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 33, + "context": "use crate::mcp_server::ForgeMcpServer;" + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 49, + "context": "mcp_server: ForgeMcpServer," + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 100, + "context": "mcp_server: ForgeMcpServer," + } + ] + }, + { + "name": "connect", + "type": "function", + "purpose": "Creates a ForgeMcpClient given server config and env vars", + "callers": [ + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 286, + "context": "self.mcp_server.connect(config, env_vars).await" + } + ] + } + ], + "semantic_tags": [ + "mcp", + "rpc", + "client", + "infra", + "integration" + ], + "handles_entities": [ + "McpServerConfig", + "McpClient" + ], + "key_behaviors": [ + "connects to MCP server and returns a client instance" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_json_repair/src/error.rs": { + "short_description": "Error types for JSON repair/parsing", + "category": "SOURCE_CODE", + "description": "Defines a rich JsonRepairError enum covering character, structure, unicode, and serde_json parsing errors used by the JSON repair facility. It centralizes error variants and provides a Result alias for convenience across the json-repair module.", + "key_constructs": [ + { + "name": "JsonRepairError", + "type": "class", + "purpose": "Enum of possible JSON repair and parse error kinds", + "callers": [ + { + "file": "crates/forge_domain/src/tools/call/tool_call.rs", + "line": 228, + "context": ") -> std::result::Result" + }, + { + "file": "crates/forge_domain/src/tools/call/tool_call.rs", + "line": 233, + "context": ".map_err(forge_json_repair::JsonRepairError::JsonError)" + }, + { + "file": "crates/forge_json_repair/src/parser.rs", + "line": 3, + "context": "use crate::error::{JsonRepairError, Result};" + }, + { + "file": "crates/forge_json_repair/src/parser.rs", + "line": 21, + "context": "return Err(JsonRepairError::UnexpectedEnd { position: self.chars.len() });" + }, + { + "file": "crates/forge_json_repair/src/parser.rs", + "line": 50, + "context": "Err(JsonRepairError::UnexpectedCharacter {" + }, + { + "file": "crates/forge_json_repair/src/parser.rs", + "line": 233, + "context": "return Err(JsonRepairError::ObjectKeyExpected { position: self.i });" + }, + { + "file": "crates/forge_json_repair/src/parser.rs", + "line": 247, + "context": "return Err(JsonRepairError::ColonExpected { position: self.i });" + }, + { + "file": "crates/forge_json_repair/src/parser.rs", + "line": 257, + "context": "return Err(JsonRepairError::ColonExpected { position: self.i });" + }, + { + "file": "crates/forge_json_repair/src/parser.rs", + "line": 502, + "context": "return Err(JsonRepairError::InvalidUnicodeCharacter {" + }, + { + "file": "crates/forge_json_repair/src/parser.rs", + "line": 529, + "context": "return Err(JsonRepairError::InvalidCharacter {" + }, + { + "file": "crates/forge_json_repair/src/lib.rs", + "line": 5, + "context": "pub use error::{JsonRepairError, Result};" + }, + { + "file": "crates/forge_domain/src/error.rs", + "line": 4, + "context": "use forge_json_repair::JsonRepairError;" + }, + { + "file": "crates/forge_domain/src/error.rs", + "line": 31, + "context": "error: JsonRepairError," + }, + { + "file": "crates/forge_domain/src/error.rs", + "line": 163, + "context": "use forge_json_repair::JsonRepairError;" + }, + { + "file": "crates/forge_domain/src/error.rs", + "line": 173, + "context": "error: JsonRepairError::from(serde_error)," + } + ] + }, + { + "name": "Result", + "type": "function", + "purpose": "Type alias for Result" + } + ], + "semantic_tags": [ + "error-handling", + "json", + "parsing", + "serde", + "validation" + ], + "handles_entities": [ + "JSON payloads", + "parsing errors" + ], + "key_behaviors": [ + "represents parsing and repair error kinds for JSON repairing" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/sandbox.rs": { + "short_description": "Git worktree sandbox creation helper", + "category": "SOURCE_CODE", + "description": "Manages creation and reuse of git worktrees to provide a sandbox directory for operations. It validates the current git repository, checks branch/worktree existence, creates a branch if needed, and returns the canonical path of the worktree while printing status messages.", + "key_constructs": [ + { + "name": "Sandbox", + "type": "class", + "purpose": "Struct encapsulating a sandbox/worktree name and operations", + "callers": [ + { + "file": "crates/forge_main/src/lib.rs", + "line": 30, + "context": "pub use sandbox::Sandbox;" + }, + { + "file": "crates/forge_main/src/main.rs", + "line": 10, + "context": "use forge_main::{Cli, Sandbox, TitleDisplayExt, UI, tracker};" + }, + { + "file": "crates/forge_main/src/main.rs", + "line": 119, + "context": "let mut sandbox = Sandbox::new(sandbox).create()?;" + }, + { + "file": "crates/forge_main/src/main.rs", + "line": 123, + "context": "(Some(sandbox), _) => Sandbox::new(sandbox).create()?," + } + ] + }, + { + "name": "new", + "type": "function", + "purpose": "Creates a new Sandbox instance for a directory name", + "callers": [ + { + "file": "crates/forge_main/src/main.rs", + "line": 119, + "context": "let mut sandbox = Sandbox::new(sandbox).create()?;" + }, + { + "file": "crates/forge_main/src/main.rs", + "line": 123, + "context": "(Some(sandbox), _) => Sandbox::new(sandbox).create()?," + } + ] + }, + { + "name": "create", + "type": "function", + "purpose": "Creates or reuses a git worktree and returns its canonical path", + "callers": [ + { + "file": "crates/forge_main/src/main.rs", + "line": 119, + "context": "let mut sandbox = Sandbox::new(sandbox).create()?;" + }, + { + "file": "crates/forge_main/src/main.rs", + "line": 123, + "context": "(Some(sandbox), _) => Sandbox::new(sandbox).create()?," + } + ] + } + ], + "semantic_tags": [ + "git", + "worktree", + "sandbox", + "cli", + "process" + ], + "handles_entities": [ + "git worktrees", + "directories", + "branches" + ], + "key_behaviors": [ + "creates or reuses git worktrees for a sandbox directory", + "validates git repository and branch state" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/state.rs": { + "short_description": "UI state container for CLI/TUI", + "category": "SOURCE_CODE", + "description": "Defines UIState which tracks UI-related information like the current working directory and active conversation id. It provides a constructor that initializes state from an Environment and setters via derive_setters for convenient updates.", + "key_constructs": [ + { + "name": "UIState", + "type": "class", + "purpose": "Struct holding UI state (cwd and conversation id)", + "callers": [ + { + "file": "crates/forge_main/src/ui.rs", + "line": 43, + "context": "use crate::state::UIState;" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 104, + "context": "state: UIState," + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 3067, + "context": "self.state = UIState::new(self.api.environment());" + } + ] + }, + { + "name": "new", + "type": "function", + "purpose": "Creates UIState from an Environment", + "callers": [ + { + "file": "crates/forge_main/src/ui.rs", + "line": 3067, + "context": "self.state = UIState::new(self.api.environment());" + } + ] + } + ], + "semantic_tags": [ + "ui", + "state", + "conversation", + "environment", + "cli" + ], + "handles_entities": [ + "ConversationId", + "Environment" + ], + "key_behaviors": [ + "tracks current working directory and active conversation id for UI" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/tracker.rs": { + "short_description": "Async telemetry event dispatch helpers", + "category": "SOURCE_CODE", + "description": "Small helper wrappers that send telemetry events to the global TRACKER, choosing background spawning or blocking dispatch where appropriate. Exposes convenience functions for error, prompt, tool call, model setting, and login events to reduce boilerplate across the codebase.", + "key_constructs": [ + { + "name": "dispatch", + "type": "function", + "purpose": "Spawns async dispatch of an EventKind to TRACKER" + }, + { + "name": "dispatch_blocking", + "type": "function", + "purpose": "Dispatches an EventKind blocking the current thread" + }, + { + "name": "error", + "type": "function", + "purpose": "Sends an error event with Debug formatting", + "callers": [ + { + "file": "crates/forge_main/src/ui.rs", + "line": 339, + "context": "tracker::error(&error);" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 351, + "context": "tracker::error(&error);" + } + ] + }, + { + "name": "tool_call", + "type": "function", + "purpose": "Sends a ToolCall telemetry event", + "callers": [ + { + "file": "crates/forge_main/src/ui.rs", + "line": 3293, + "context": "tracker::tool_call(payload);" + } + ] + }, + { + "name": "set_model", + "type": "function", + "purpose": "Sets model on the tracker asynchronously", + "callers": [ + { + "file": "crates/forge_main/src/ui.rs", + "line": 3479, + "context": "tracker::set_model(model.to_string());" + } + ] + } + ], + "semantic_tags": [ + "telemetry", + "async", + "events", + "tracking", + "analytics" + ], + "handles_entities": [ + "toolcall events", + "prompt events", + "error events", + "model settings", + "login events" + ], + "key_behaviors": [ + "dispatches telemetry events asynchronously and sometimes blocking", + "sends tool call and prompt events to tracker" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/vscode.rs": { + "short_description": "VS Code terminal detection and extension installer", + "category": "SOURCE_CODE", + "description": "Detects if the process is running inside the VS Code integrated terminal and provides utilities to check and install the Forge VS Code extension via the `code` CLI. It exposes a should_install_extension helper that combines detection and installation state to guide automated extension installation.", + "key_constructs": [ + { + "name": "is_vscode_terminal", + "type": "function", + "purpose": "Detects presence of VS Code integrated terminal via environment variables" + }, + { + "name": "is_extension_installed", + "type": "function", + "purpose": "Checks installed VS Code extensions using `code --list-extensions`" + }, + { + "name": "install_extension", + "type": "function", + "purpose": "Attempts to install the Forge VS Code extension using the `code` CLI", + "callers": [ + { + "file": "crates/forge_main/src/ui.rs", + "line": 1610, + "context": "match crate::vscode::install_extension() {" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 4249, + "context": "let _ = crate::vscode::install_extension();" + } + ] + }, + { + "name": "should_install_extension", + "type": "function", + "purpose": "Determines whether installation should be attempted (in VS Code and not installed)", + "callers": [ + { + "file": "crates/forge_main/src/ui.rs", + "line": 4248, + "context": "if crate::vscode::should_install_extension() {" + } + ] + } + ], + "semantic_tags": [ + "vscode", + "integration", + "extension", + "detection", + "cli" + ], + "handles_entities": [ + "VS Code extension (ForgeCode.forge-vscode)" + ], + "key_behaviors": [ + "detects VS Code integrated terminal", + "checks and installs the Forge VS Code extension" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/fs_snap.rs": { + "short_description": "Filesystem-backed snapshot repository adapter", + "category": "SOURCE_CODE", + "description": "Adapts forge_snaps::SnapshotService to the SnapshotRepository trait by delegating snapshot creation and undo operations to the snapshot service. It configures the snapshot service using the environment's snapshot path so snapshots persist to the expected location.", + "key_constructs": [ + { + "name": "ForgeFileSnapshotService", + "type": "class", + "purpose": "Adapter wrapping forge_snaps::SnapshotService for SnapshotRepository", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 29, + "context": "use crate::fs_snap::ForgeFileSnapshotService;" + }, + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 42, + "context": "file_snapshot_service: Arc," + }, + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 64, + "context": "let file_snapshot_service = Arc::new(ForgeFileSnapshotService::new(env.clone()));" + } + ] + }, + { + "name": "new", + "type": "function", + "purpose": "Constructs the service using Environment snapshot path", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 64, + "context": "let file_snapshot_service = Arc::new(ForgeFileSnapshotService::new(env.clone()));" + } + ] + }, + { + "name": "insert_snapshot", + "type": "function", + "purpose": "Creates a snapshot for a file via the inner service", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 104, + "context": "self.file_snapshot_service.insert_snapshot(file_path).await" + } + ] + }, + { + "name": "undo_snapshot", + "type": "function", + "purpose": "Undoes a snapshot for a file via the inner service", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 108, + "context": "self.file_snapshot_service.undo_snapshot(file_path).await" + } + ] + } + ], + "semantic_tags": [ + "snapshots", + "filesystem", + "undo", + "persistence", + "adapter" + ], + "handles_entities": [ + "Snapshot", + "SnapshotRepository", + "file snapshots" + ], + "key_behaviors": [ + "creates file snapshots", + "performs undo operations via snapshot service" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/clipper.rs": { + "short_description": "Text clipping/truncation strategies and helpers", + "category": "SOURCE_CODE", + "description": "Provides the Clipper enum and ClipperResult struct to truncate text while preserving configurable prefix and/or suffix regions. It supports prefix-only, suffix-only, and prefix+suffix strategies and returns byte-range slices for the preserved portions, with unit tests validating behavior.", + "key_constructs": [ + { + "name": "ClipperResult", + "type": "class", + "purpose": "Holds original content and optional prefix/suffix ranges after clipping" + }, + { + "name": "Clipper", + "type": "class", + "purpose": "Enum describing truncation strategies (Prefix, Suffix, PrefixSuffix)" + }, + { + "name": "clip", + "type": "function", + "purpose": "Applies the selected Clipper strategy to content and returns ClipperResult" + }, + { + "name": "MAX_LIMIT", + "type": "constant", + "purpose": "Default maximum character limit used by the default Clipper" + } + ], + "semantic_tags": [ + "text-processing", + "clipping", + "truncation", + "utilities", + "testing" + ], + "handles_entities": [ + "textual content" + ], + "key_behaviors": [ + "truncates long text preserving prefix/suffix according to strategies", + "reports ranges for preserved content" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/metadata.rs": { + "short_description": "Simple metadata builder and display formatter", + "category": "SOURCE_CODE", + "description": "Implements a tiny Metadata container that accumulates key/value pairs and renders them as a simple YAML-like block via Display. It provides convenience methods to add values and conditionally add optional entries for inclusion in messages or metadata banners.", + "key_constructs": [ + { + "name": "Metadata", + "type": "class", + "purpose": "Container for key/value metadata entries with display formatting" + }, + { + "name": "add", + "type": "function", + "purpose": "Adds a key/value pair and returns self for chaining" + }, + { + "name": "add_optional", + "type": "function", + "purpose": "Adds a key/value pair only if the value is Some" + } + ], + "semantic_tags": [ + "metadata", + "formatting", + "display", + "utilities" + ], + "handles_entities": [ + "metadata key-value pairs" + ], + "key_behaviors": [ + "collects key-value metadata and renders it as a simple YAML-like block" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/range.rs": { + "short_description": "Line-range resolution and validation utility", + "category": "SOURCE_CODE", + "description": "Provides resolve_range which normalizes start/end line inputs, enforces minimums, swaps if reversed, and clamps the range to a maximum size. The function ensures callers always receive a valid (start, end) tuple and includes thorough unit tests for edge cases.", + "key_constructs": [ + { + "name": "resolve_range", + "type": "function", + "purpose": "Computes a normalized, clamped start/end line range given optional inputs and a max size", + "callers": [ + { + "file": "crates/forge_services/src/attachment.rs", + "line": 11, + "context": "use crate::range::resolve_range;" + }, + { + "file": "crates/forge_services/src/attachment.rs", + "line": 92, + "context": "let (start_line, end_line) = resolve_range(start, end, max_read_lines);" + }, + { + "file": "crates/forge_services/src/tool_services/fs_read.rs", + "line": 11, + "context": "use crate::range::resolve_range;" + }, + { + "file": "crates/forge_services/src/tool_services/fs_read.rs", + "line": 162, + "context": "let (start_line, end_line) = resolve_range(start_line, end_line, config.max_read_lines);" + } + ] + } + ], + "semantic_tags": [ + "range-validation", + "lines", + "validation", + "utilities", + "testing" + ], + "handles_entities": [ + "line ranges" + ], + "key_behaviors": [ + "computes valid start/end line ranges constrained by max size", + "normalizes and clamps user inputs" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_snaps/src/lib.rs": { + "short_description": "Snapshot crate public re-exports", + "category": "SOURCE_CODE", + "description": "Module root for the forge_snaps crate that declares the service module and re-exports its public types. It exposes snapshot-related types (e.g., SnapshotInfo, SnapshotId) to other crates without exposing internal module layout.", + "key_constructs": [ + { + "name": "service", + "type": "function", + "purpose": "Private module containing snapshot service implementation (re-exported)" + }, + { + "name": "pub use service::*", + "type": "function", + "purpose": "Re-exports public items from the service module" + } + ], + "semantic_tags": [ + "snapshots", + "reexport", + "library", + "module" + ], + "handles_entities": [ + "SnapshotInfo", + "SnapshotId", + "snapshot service types" + ], + "key_behaviors": [ + "exposes snapshot service types to other crates" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_snaps/src/service.rs": { + "short_description": "Filesystem snapshot service to create and undo file snapshots", + "category": "SOURCE_CODE", + "description": "Provides a SnapshotService that creates, stores and restores snapshots of files on disk under a configurable snapshots directory. It implements asynchronous create and undo operations, finds the most recent snapshot by filename, and includes comprehensive unit tests exercising snapshot creation and undo behaviors.", + "key_constructs": [ + { + "name": "SnapshotService", + "type": "class", + "purpose": "Service struct holding snapshots_directory and coordinating snapshot operations", + "callers": [ + { + "file": "crates/forge_repo/src/fs_snap.rs", + "line": 8, + "context": "inner: Arc," + }, + { + "file": "crates/forge_repo/src/fs_snap.rs", + "line": 14, + "context": "inner: Arc::new(forge_snaps::SnapshotService::new(env.snapshot_path()))," + } + ] + }, + { + "name": "SnapshotService::new", + "type": "function", + "purpose": "Constructor to create a SnapshotService with a specified base directory" + }, + { + "name": "SnapshotService::create_snapshot", + "type": "function", + "purpose": "Creates a Snapshot from a path, writes snapshot content into snapshots_directory" + }, + { + "name": "SnapshotService::find_recent_snapshot", + "type": "function", + "purpose": "Iterates a snapshot directory to locate the most recent .snap file by filename" + }, + { + "name": "SnapshotService::undo_snapshot", + "type": "function", + "purpose": "Restores the most recent snapshot to the original path and removes used snapshot file" + }, + { + "name": "TestContext", + "type": "class", + "purpose": "Test helper struct used by the module tests to set up temp dirs and operate the service" + } + ], + "semantic_tags": [ + "snapshots", + "filesystem", + "async", + "backup", + "restore" + ], + "handles_entities": [ + "Snapshot", + "file" + ], + "key_behaviors": [ + "creates a local snapshot of a file", + "restores a file from the most recent snapshot (undo)", + "manages snapshot files (write/remove) in a snapshots directory" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_stream/src/mpsc_stream.rs": { + "short_description": "Wraps a tokio mpsc producer as a futures::Stream and aborts task on drop", + "category": "SOURCE_CODE", + "description": "Defines MpscStream which spawns an asynchronous producer task that sends values over a tokio mpsc channel and exposes the receiver as a futures::Stream. The type polls the receiver for items and aborts the spawned task and closes the receiver when dropped; unit tests verify message delivery and abort-on-drop behavior.", + "key_constructs": [ + { + "name": "MpscStream", + "type": "class", + "purpose": "Struct that holds a JoinHandle and a Receiver and implements Stream", + "callers": [ + { + "file": "crates/forge_services/src/context_engine.rs", + "line": 12, + "context": "use forge_stream::MpscStream;" + }, + { + "file": "crates/forge_services/src/context_engine.rs", + "line": 229, + "context": "async fn sync_workspace(&self, path: PathBuf) -> Result>> {" + }, + { + "file": "crates/forge_services/src/context_engine.rs", + "line": 232, + "context": "let stream = MpscStream::spawn(move |tx| async move {" + }, + { + "file": "crates/forge_api/src/api.rs", + "line": 7, + "context": "use forge_stream::MpscStream;" + }, + { + "file": "crates/forge_api/src/api.rs", + "line": 39, + "context": "async fn chat(&self, chat: ChatRequest) -> Result>>;" + }, + { + "file": "crates/forge_api/src/api.rs", + "line": 196, + "context": ") -> Result>>;" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 18, + "context": "use forge_stream::MpscStream;" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 131, + "context": ") -> anyhow::Result>> {" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 347, + "context": ") -> Result>> {" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 292, + "context": ") -> anyhow::Result>>;" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 1028, + "context": ") -> anyhow::Result>> {" + } + ] + }, + { + "name": "MpscStream::spawn", + "type": "function", + "purpose": "Spawns a background async producer that receives a Sender and returns an MpscStream" + }, + { + "name": "Stream for MpscStream::poll_next", + "type": "function", + "purpose": "Poll implementation to receive the next item from the internal receiver" + }, + { + "name": "Drop for MpscStream", + "type": "function", + "purpose": "Closes the receiver and aborts the spawned task when the stream is dropped" + }, + { + "name": "test_stream_receives_messages", + "type": "function", + "purpose": "Unit test verifying a sent message is received by the stream" + }, + { + "name": "test_drop_aborts_task", + "type": "function", + "purpose": "Unit test ensuring drop aborts the background task and prevents completion" + } + ], + "semantic_tags": [ + "async", + "stream", + "mpsc", + "tokio", + "concurrency" + ], + "handles_entities": [ + "message" + ], + "key_behaviors": [ + "exposes a background producer as a Stream", + "ensures background tasks are aborted when the stream is dropped" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_template/src/element.rs": { + "short_description": "Lightweight HTML/XML element builder with rendering and escaping", + "category": "SOURCE_CODE", + "description": "Implements an Element builder type for composing elements with attributes, classes, children and text (with HTML escaping), plus rendering into a textual markup representation. It provides helper constructors, append behavior via a CanAppend trait, Display impl, and many unit tests covering rendering and attribute/class handling.", + "key_constructs": [ + { + "name": "Element", + "type": "class", + "purpose": "Core struct representing an element with name, attributes, children, and optional text", + "callers": [ + { + "file": "crates/forge_app/src/orch.rs", + "line": 8, + "context": "use forge_template::Element;" + }, + { + "file": "crates/forge_app/src/orch.rs", + "line": 345, + "context": "let message = Element::new(\"retry\").text(text);" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 6, + "context": "use forge_template::Element;" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 116, + "context": "let mut message_element = Element::new(\"message\").attr(\"role\", message.role);" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 119, + "context": "message_element.append(Element::new(\"content\").text(&message.content));" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 124, + "context": "Element::new(\"forge_tool_call\")" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 133, + "context": ".append(Element::new(\"thought_signature\").text(thought_signature));" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 140, + "context": "message_element.append(Element::new(\"reasoning_detail\").text(text));" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 149, + "context": "Element::new(\"message\")" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 152, + "context": "Element::new(\"forge_tool_result\")" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 158, + "context": "ContextMessage::Image(_) => Element::new(\"image\").attr(\"path\", \"[base64 URL]\").render()," + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 479, + "context": "let elm = Element::new(\"file_content\")" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 495, + "context": "let elm = Element::new(\"directory_listing\")" + }, + { + "file": "crates/forge_domain/src/context.rs", + "line": 499, + "context": "Element::new(tag_name).text(entry.path)" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 11, + "context": "use forge_template::Element;" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 150, + "context": "Element::new(\"permission_denied\")" + }, + { + "file": "crates/forge_app/src/hooks/doom_loop.rs", + "line": 7, + "context": "use forge_template::Element;" + }, + { + "file": "crates/forge_app/src/hooks/doom_loop.rs", + "line": 241, + "context": "let content = Element::new(\"system_reminder\").cdata(reminder);" + }, + { + "file": "crates/forge_template/src/lib.rs", + "line": 3, + "context": "pub use element::Element;" + }, + { + "file": "crates/forge_domain/src/tools/result.rs", + "line": 2, + "context": "use forge_template::Element;" + } + ] + }, + { + "name": "Element::new", + "type": "function", + "purpose": "Creates a new Element and parses dotted class shorthand (e.g., div.foo.bar)" + }, + { + "name": "Element::text", + "type": "function", + "purpose": "Sets escaped text content for the element" + }, + { + "name": "Element::cdata", + "type": "function", + "purpose": "Sets raw CDATA text for the element" + }, + { + "name": "Element::attr / class / append / render", + "type": "function", + "purpose": "Manipulate attributes/classes/children and render the element to a string" + }, + { + "name": "CanAppend", + "type": "function", + "purpose": "Trait to support appending single or iterable children to an Element" + } + ], + "semantic_tags": [ + "templating", + "html", + "rendering", + "builder", + "escaping" + ], + "handles_entities": [ + "Element", + "HTML" + ], + "key_behaviors": [ + "builds hierarchical markup programmatically", + "renders elements to escaped HTML/XML text", + "supports class shorthand and attribute manipulation" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_tracker/src/can_track.rs": { + "short_description": "Checks whether telemetry/tracking should be enabled based on version", + "category": "SOURCE_CODE", + "description": "Exports a VERSION constant determined from build-time environment or Cargo package version and a can_track function that disables tracking for development or placeholder versions. Includes unit tests validating behavior for dev and production version strings.", + "key_constructs": [ + { + "name": "VERSION", + "type": "constant", + "purpose": "Build-time version string taken from APP_VERSION or Cargo package version", + "callers": [ + { + "file": "crates/forge_main/src/banner.rs", + "line": 4, + "context": "use forge_tracker::VERSION;" + }, + { + "file": "crates/forge_main/src/banner.rs", + "line": 64, + "context": "let version_label = (\"Version:\", VERSION);" + }, + { + "file": "crates/forge_main/src/update.rs", + "line": 7, + "context": "use forge_tracker::VERSION;" + }, + { + "file": "crates/forge_main/src/update.rs", + "line": 55, + "context": "VERSION.to_string().bold().white()," + }, + { + "file": "crates/forge_main/src/update.rs", + "line": 76, + "context": "if VERSION.contains(\"dev\") || VERSION == \"0.1.0\" {" + }, + { + "file": "crates/forge_main/src/update.rs", + "line": 76, + "context": "if VERSION.contains(\"dev\") || VERSION == \"0.1.0\" {" + }, + { + "file": "crates/forge_main/src/update.rs", + "line": 81, + "context": "let informer = update_informer::new(registry::GitHub, \"antinomyhq/forge\", VERSION)" + }, + { + "file": "crates/forge_main/src/info.rs", + "line": 7, + "context": "use forge_tracker::VERSION;" + }, + { + "file": "crates/forge_main/src/info.rs", + "line": 282, + "context": ".add_key_value(\"Version\", VERSION)" + }, + { + "file": "crates/forge_main/src/prompt.rs", + "line": 9, + "context": "use forge_tracker::VERSION;" + }, + { + "file": "crates/forge_main/src/prompt.rs", + "line": 76, + "context": "write!(result, \"[{VERSION}\").unwrap();" + }, + { + "file": "crates/forge_main/src/prompt.rs", + "line": 230, + "context": "assert!(actual.contains(&VERSION.to_string()));" + }, + { + "file": "crates/forge_main/src/prompt.rs", + "line": 238, + "context": "assert!(actual.contains(&VERSION.to_string()));" + }, + { + "file": "crates/forge_main/src/prompt.rs", + "line": 310, + "context": "assert!(actual.contains(&VERSION.to_string()));" + }, + { + "file": "crates/forge_tracker/src/lib.rs", + "line": 9, + "context": "pub use can_track::VERSION;" + } + ] + }, + { + "name": "can_track", + "type": "function", + "purpose": "Public function that determines whether telemetry should be enabled", + "callers": [ + { + "file": "crates/forge_tracker/src/log.rs", + "line": 9, + "context": "use crate::can_track::can_track;" + }, + { + "file": "crates/forge_tracker/src/log.rs", + "line": 47, + "context": "let ((non_blocking, guard), env) = if can_track() {" + }, + { + "file": "crates/forge_tracker/src/dispatch.rs", + "line": 13, + "context": "use crate::can_track::can_track;" + }, + { + "file": "crates/forge_tracker/src/dispatch.rs", + "line": 76, + "context": "let can_track = can_track();" + } + ] + }, + { + "name": "can_track_inner", + "type": "function", + "purpose": "Internal helper that implements version string checks to exclude dev/placeholder versions" + } + ], + "semantic_tags": [ + "telemetry", + "versioning", + "feature-flag", + "analytics" + ], + "handles_entities": [], + "key_behaviors": [ + "determines whether tracking is allowed based on application version" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_tracker/src/error.rs": { + "short_description": "Unified error enum and result alias for tracker operations", + "category": "SOURCE_CODE", + "description": "Defines an Error enum that wraps various error types (reqwest, serde_json, url parsing, PostHog, tokio join, IO) using derive_more for From conversions, and provides a Result type alias for convenience across the tracker crate.", + "key_constructs": [ + { + "name": "Error", + "type": "class", + "purpose": "Enum aggregator for all error types used by the tracker" + }, + { + "name": "Result", + "type": "constant", + "purpose": "Type alias Result used throughout the tracker code" + } + ], + "semantic_tags": [ + "error-handling", + "tracking", + "http", + "integration" + ], + "handles_entities": [ + "Error" + ], + "key_behaviors": [ + "standardizes error handling and conversions for the tracker subsystem" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_tracker/src/event.rs": { + "short_description": "Telemetry event types and payload serialization helpers", + "category": "SOURCE_CODE", + "description": "Defines the Event data structure containing telemetry metadata (timestamps, client info, conversation), a Name wrapper that normalizes to snake_case, ToolCallPayload and Identity types, and an EventKind enum that maps event kinds to canonical names and serialized values for sending analytics events.", + "key_constructs": [ + { + "name": "Event", + "type": "class", + "purpose": "Primary telemetry event struct holding metadata and optional conversation/identity", + "callers": [ + { + "file": "crates/forge_tracker/src/collect/posthog.rs", + "line": 12, + "context": "use crate::Event;" + }, + { + "file": "crates/forge_tracker/src/collect/posthog.rs", + "line": 47, + "context": "fn new(api_key: String, mut input: Event) -> Self {" + }, + { + "file": "crates/forge_tracker/src/collect/posthog.rs", + "line": 76, + "context": "fn create_request(&self, event: Event) -> Result {" + }, + { + "file": "crates/forge_tracker/src/collect/posthog.rs", + "line": 97, + "context": "async fn collect(&self, event: Event) -> Result<()> {" + }, + { + "file": "crates/forge_tracker/src/lib.rs", + "line": 12, + "context": "pub use event::{Event, EventKind, ToolCallPayload};" + }, + { + "file": "crates/forge_tracker/src/dispatch.rs", + "line": 17, + "context": "use crate::{Event, EventKind, client_id};" + }, + { + "file": "crates/forge_tracker/src/dispatch.rs", + "line": 118, + "context": "let event = Event {" + }, + { + "file": "crates/forge_tracker/src/collect/mod.rs", + "line": 1, + "context": "use crate::Event;" + }, + { + "file": "crates/forge_tracker/src/collect/mod.rs", + "line": 9, + "context": "async fn collect(&self, event: Event) -> super::Result<()>;" + } + ] + }, + { + "name": "Name", + "type": "class", + "purpose": "Wrapper that converts event names to snake_case and implements Deref/From conversions" + }, + { + "name": "ToolCallPayload", + "type": "class", + "purpose": "Payload structure used when tracking tool invocation events", + "callers": [ + { + "file": "crates/forge_main/src/tracker.rs", + "line": 1, + "context": "use forge_tracker::{EventKind, ToolCallPayload};" + }, + { + "file": "crates/forge_main/src/tracker.rs", + "line": 36, + "context": "pub fn tool_call(payload: ToolCallPayload) {" + }, + { + "file": "crates/forge_tracker/src/lib.rs", + "line": 12, + "context": "pub use event::{Event, EventKind, ToolCallPayload};" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 27, + "context": "use forge_tracker::ToolCallPayload;" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 3285, + "context": "let mut r = ToolCallPayload::new(toolcall_result.name.to_string());" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 3291, + "context": "ToolCallPayload::new(toolcall_result.name.to_string())" + } + ] + }, + { + "name": "EventKind", + "type": "class", + "purpose": "Enum representing kinds of telemetry events and providing methods to obtain canonical name and payload value", + "callers": [ + { + "file": "crates/forge_main/src/tracker.rs", + "line": 1, + "context": "use forge_tracker::{EventKind, ToolCallPayload};" + }, + { + "file": "crates/forge_main/src/tracker.rs", + "line": 7, + "context": "fn dispatch(event: EventKind) {" + }, + { + "file": "crates/forge_main/src/tracker.rs", + "line": 14, + "context": "fn dispatch_blocking(event: EventKind) {" + }, + { + "file": "crates/forge_main/src/tracker.rs", + "line": 23, + "context": "dispatch(EventKind::Error(format!(\"{error:?}\")));" + }, + { + "file": "crates/forge_main/src/tracker.rs", + "line": 27, + "context": "dispatch_blocking(EventKind::Error(format!(\"{error:?}\")));" + }, + { + "file": "crates/forge_main/src/tracker.rs", + "line": 32, + "context": "dispatch(EventKind::Error(error));" + }, + { + "file": "crates/forge_main/src/tracker.rs", + "line": 37, + "context": "dispatch(EventKind::ToolCall(payload));" + } + ] + }, + { + "name": "Identity", + "type": "class", + "purpose": "Simple struct holding user login identity for login events", + "callers": [ + { + "file": "crates/forge_tracker/src/dispatch.rs", + "line": 15, + "context": "use crate::event::Identity;" + }, + { + "file": "crates/forge_tracker/src/dispatch.rs", + "line": 103, + "context": "let id = Identity { login: login_value };" + } + ] + } + ], + "semantic_tags": [ + "telemetry", + "events", + "serialization", + "analytics" + ], + "handles_entities": [ + "Event", + "Conversation", + "Identity", + "ToolCallPayload" + ], + "key_behaviors": [ + "represents telemetry events with metadata", + "normalizes event names and serializes payloads for analytics" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "shell-plugin/lib/actions/auth.zsh": { + "short_description": "ZSH actions to login and logout providers via fzf selection", + "category": "SOURCE_CODE", + "description": "Provides shell action handlers used by the plugin to prompt for a provider (optionally via fuzzy query) and invoke the forge binary to perform interactive provider login or logout. These functions bridge the interactive fzf picker and the forge CLI login/logout commands.", + "key_constructs": [ + { + "name": "_forge_action_login", + "type": "function", + "purpose": "Prompts user to pick a provider and runs 'forge provider login ' interactively" + }, + { + "name": "_forge_action_logout", + "type": "function", + "purpose": "Prompts user to pick a provider (with optional status filter) and runs 'forge provider logout '" + } + ], + "semantic_tags": [ + "authentication", + "provider", + "fzf", + "shell-integration" + ], + "handles_entities": [ + "provider" + ], + "key_behaviors": [ + "launches provider selection", + "initiates interactive login and logout flows" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "shell-plugin/lib/actions/config.zsh": { + "short_description": "Config and model/provider/agent selection actions with fzf", + "category": "SOURCE_CODE", + "description": "Contains many shell action handlers to inspect and change configuration (select agent, pick model/provider, set session model, reasoning effort, sync workspace, edit config file) using fzf pickers and invoking the forge CLI. Helper functions abstract model picking and setting logic and support both persistent and session-scoped overrides.", + "key_constructs": [ + { + "name": "_forge_action_agent", + "type": "function", + "purpose": "Interactive agent picker that sets the active agent" + }, + { + "name": "_forge_pick_model", + "type": "function", + "purpose": "Helper that presents a model picker (fzf) across providers and returns the selected line" + }, + { + "name": "_forge_action_model", + "type": "function", + "purpose": "Handles model selection and optionally switches provider before setting model" + }, + { + "name": "_forge_action_session_model", + "type": "function", + "purpose": "Selects a model only for the current shell session without mutating global config" + }, + { + "name": "_forge_action_config_edit", + "type": "function", + "purpose": "Opens the global forge config in an editor and ensures file/directory existence" + } + ], + "semantic_tags": [ + "configuration", + "models", + "providers", + "fzf", + "session" + ], + "handles_entities": [ + "agent", + "model", + "provider", + "workspace" + ], + "key_behaviors": [ + "selects and sets agents/models/providers", + "edits and reloads config", + "sets session-scoped model/provider and reasoning effort", + "triggers workspace sync operations" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Usability", + "title": "Use interactive execution helper to preserve TTY for API-key prompts and support atomic model selection", + "problem": "Setting provider from ZLE widgets caused rustyline to see non-tty stdin and fail prompting for API keys; model selection logic also required provider disambiguation.", + "root_cause": "ZLE redirects stdin/stdout; without connecting to /dev/tty interactive prompts fail. Also model lists can collide across providers.", + "solution": "Added _forge_exec_interactive to run commands with /dev/tty for interactive prompts; switched provider/model set calls to use it. Also enhanced pick model to accept current provider to disambiguate when model ids collide and added --model flag to config set provider to atomically set model.", + "lesson_learned": "When launching interactive CLI from a shell plugin or widget, ensure interactive child processes are attached to the real TTY. Also disambiguate model selection by provider to avoid accidental changes.", + "commits": [ + "f92ea90", + "e74c862", + "03741f7" + ], + "constructs": [ + "_forge_exec_interactive", + "_forge_pick_model", + "_forge_action_model" + ] + }, + { + "type": "feature", + "category": "UX", + "title": "Add interactive model pickers and config setters for commit/suggest", + "problem": "Users lacked convenient fzf-based model pickers for commit and suggest model configuration.", + "root_cause": "CLI added config subcommands but shell plugin needed actions to call them.", + "solution": "Add _forge_pick_model helper to render fzf pickers, add _forge_action_commit_model and _forge_action_suggest_model to call `forge config set commit/suggest `, and update model selection flow to pre-position cursor and trim fields.", + "lesson_learned": "Shell plugin actions must mirror CLI semantics and validate provider-model combo or switch provider when necessary. Keep pickers consistent (header-lines, with-nth) and return raw lines for parsing.", + "commits": [ + "f8a260e", + "da37b43", + "0577b3a", + "9a6008e" + ], + "constructs": [ + "_forge_pick_model", + "_forge_action_commit_model", + "_forge_action_suggest_model", + "_forge_action_model" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "shell-plugin/lib/actions/conversation.zsh": { + "short_description": "Conversation management actions: list, switch, clone, copy, and rename", + "category": "SOURCE_CODE", + "description": "Implements interactive conversation workflows (list and switch via fzf, toggle previous conversation, clone conversations, copy last assistant message to clipboard, rename) and helper functions to manage current/previous conversation tracking. It integrates with the forge CLI to show conversation content and info and handles clipboard portability across platforms.", + "key_constructs": [ + { + "name": "_forge_switch_conversation", + "type": "function", + "purpose": "Switches active conversation and saves previous id for toggle semantics" + }, + { + "name": "_forge_clear_conversation", + "type": "function", + "purpose": "Clears the current conversation while tracking previous id" + }, + { + "name": "_forge_action_conversation", + "type": "function", + "purpose": "Interactive conversation picker and direct-id switcher with preview and info display" + }, + { + "name": "_forge_action_clone", + "type": "function", + "purpose": "Clones a selected or specified conversation and switches to the clone" + }, + { + "name": "_forge_action_copy", + "type": "function", + "purpose": "Copies the last assistant message (raw markdown) to the OS clipboard" + } + ], + "semantic_tags": [ + "conversations", + "fzf", + "clipboard", + "session-management" + ], + "handles_entities": [ + "Conversation", + "message" + ], + "key_behaviors": [ + "lists and switches conversations", + "clones conversations and switches to clones", + "copies assistant output to system clipboard", + "renames conversations" + ], + "insights": [ + { + "type": "feature", + "category": "UX", + "title": "Add :copy command to copy last assistant message to clipboard", + "problem": "Users wanted an easy shell shortcut to copy the last assistant reply as raw markdown.", + "root_cause": "No shell-level helper to extract and copy markdown output existed.", + "solution": "Added _forge_action_copy which calls $_FORGE_BIN conversation show --md , uses pbcopy/xclip/xsel for OS clipboard and prints a success message with lines/bytes. Dispatcher updated to route 'copy' action.", + "lesson_learned": "CLI features intended to be non-interactive (clipboard, piping) should expose a machine-readable/raw output flag (here --md) and plugin should choose appropriate clipboard tools fallback.", + "commits": [ + "a8d3acc" + ], + "constructs": [ + "_forge_action_copy" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "shell-plugin/lib/actions/core.zsh": { + "short_description": "Core conversation and session action handlers", + "category": "SOURCE_CODE", + "description": "Provides core actions such as starting a new conversation (optionally sending initial text), showing session info, environment, dumping, compacting, and retrying conversation messages. A helper ensures conversation-specific commands validate an active conversation before delegating to the forge CLI.", + "key_constructs": [ + { + "name": "_forge_action_new", + "type": "function", + "purpose": "Starts a new conversation, optionally sends provided input, and starts background tasks" + }, + { + "name": "_forge_action_info", + "type": "function", + "purpose": "Displays session information, optionally scoped to the active conversation" + }, + { + "name": "_forge_handle_conversation_command", + "type": "function", + "purpose": "Common helper to validate active conversation and invoke conversation subcommands" + }, + { + "name": "_forge_action_dump", + "type": "function", + "purpose": "Dumps conversation content, supporting HTML output" + } + ], + "semantic_tags": [ + "conversation", + "session", + "cli-integration", + "background-tasks" + ], + "handles_entities": [ + "Conversation" + ], + "key_behaviors": [ + "starts new conversations and sends input", + "invokes conversation subcommands (dump, compact, retry)", + "shows session and environment info" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "shell-plugin/lib/actions/doctor.zsh": { + "short_description": "Run environment diagnostics via the forge doctor command", + "category": "SOURCE_CODE", + "description": "Provides a single action handler that invokes the forge binary's zsh doctor command for diagnostics and environment checks. It acts as a thin bridge from the plugin to the built-in diagnostic routine.", + "key_constructs": [ + { + "name": "_forge_action_doctor", + "type": "function", + "purpose": "Calls 'forge zsh doctor' to run diagnostics" + } + ], + "semantic_tags": [ + "diagnostics", + "doctor", + "environment" + ], + "handles_entities": [], + "key_behaviors": [ + "runs diagnostics for the shell/plugin environment" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "shell-plugin/lib/actions/editor.zsh": { + "short_description": "Open external editor and generate shell commands from descriptions", + "category": "SOURCE_CODE", + "description": "Implements _forge_action_editor to open an external editor for composing multi-line input and insert the result into the shell buffer, and _forge_action_suggest to call the forge suggest command to generate a shell command from natural language and replace the buffer. It handles temp file creation, cleanup, editor validation, and zsh buffer manipulation.", + "key_constructs": [ + { + "name": "_forge_action_editor", + "type": "function", + "purpose": "Opens the user editor on a temp file, reads content, and inserts it into the shell buffer" + }, + { + "name": "_forge_action_suggest", + "type": "function", + "purpose": "Calls 'forge suggest' with a description and replaces shell buffer with the generated command" + } + ], + "semantic_tags": [ + "editor", + "command-suggestion", + "zle", + "buffer" + ], + "handles_entities": [ + "temp_file", + "command" + ], + "key_behaviors": [ + "compose command text in external editor", + "generate shell commands from natural language descriptions" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "shell-plugin/lib/actions/git.zsh": { + "short_description": "Git commit helpers that generate AI commit messages", + "category": "SOURCE_CODE", + "description": "Provides actions to generate AI-powered commit messages and either perform the commit or preview it by populating the shell buffer with an appropriate git commit command. The preview adjusts between committing staged changes vs committing all tracked files and uses the forge CLI to produce the message.", + "key_constructs": [ + { + "name": "_forge_action_commit", + "type": "function", + "purpose": "Generates an AI commit message via 'forge commit' and resets the shell buffer" + }, + { + "name": "_forge_action_commit_preview", + "type": "function", + "purpose": "Generates an AI commit message and places a git commit command into the shell buffer for user review" + } + ], + "semantic_tags": [ + "git", + "commit", + "ai", + "shell-integration" + ], + "handles_entities": [ + "commit message" + ], + "key_behaviors": [ + "generate commit messages", + "populate shell with git commit command for preview or execution" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "shell-plugin/lib/actions/keyboard.zsh": { + "short_description": "Display keyboard shortcuts via the forge keyboard command", + "category": "SOURCE_CODE", + "description": "Defines a small action that invokes the forge CLI to show keyboard shortcuts/help for the zsh integration. It acts as a convenience wrapper to surface shortcut documentation to users.", + "key_constructs": [ + { + "name": "_forge_action_keyboard", + "type": "function", + "purpose": "Calls 'forge zsh keyboard' to display keyboard shortcuts" + } + ], + "semantic_tags": [ + "keyboard", + "shortcuts", + "help" + ], + "handles_entities": [], + "key_behaviors": [ + "displays keyboard shortcuts and help" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "shell-plugin/lib/actions/provider.zsh": { + "short_description": "Provider fzf selection helper for the shell plugin", + "category": "SOURCE_CODE", + "description": "Implements _forge_select_provider which lists providers (with optional type/status filters), preselects the current provider, supports a query string, and returns the user's selection for use by other actions. It centralizes provider selection and filtering logic for login/logout and other provider-related flows.", + "key_constructs": [ + { + "name": "_forge_select_provider", + "type": "function", + "purpose": "Presents providers via fzf with filtering and returns the selected provider line" + } + ], + "semantic_tags": [ + "provider", + "selection", + "fzf", + "shell-integration" + ], + "handles_entities": [ + "provider" + ], + "key_behaviors": [ + "select a provider with optional filtering and preselection" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/openai/transformers/mod.rs": { + "short_description": "Module re-exporting OpenAI provider transformation pipeline components", + "category": "SOURCE_CODE", + "description": "Declares and re-exports transformer submodules used to adapt and normalize OpenAI/related provider requests and responses. Exposes ProviderPipeline as the public entry point for composing provider-specific transformations.", + "key_constructs": [ + { + "name": "ProviderPipeline", + "type": "struct", + "purpose": "Public pipeline type that composes provider-specific transformers for request/response adaptation" + } + ], + "semantic_tags": [ + "provider-adaptation", + "transformers", + "dto", + "openai", + "pipeline" + ], + "handles_entities": [], + "key_behaviors": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/hooks/doom_loop.rs": { + "short_description": "Detects repeating tool-call doom loops in conversation history", + "category": "SOURCE_CODE", + "description": "Implements a DoomLoopDetector that scans conversation context for repeating or consecutive identical tool call patterns and inserts a system reminder when a loop is detected. It provides an EventHandle implementation to run during request processing and includes unit tests covering detection logic and edge cases.", + "key_constructs": [ + { + "name": "DoomLoopDetector", + "type": "struct", + "purpose": "Detects repeating tool-call patterns in a conversation and inserts remediation reminders" + }, + { + "name": "DoomLoopDetector::detect_from_conversation", + "type": "function", + "purpose": "Analyzes conversation tool call history and returns the count when a loop is found" + }, + { + "name": "DoomLoopDetector::check_repeating_pattern", + "type": "function", + "purpose": "Finds repeating suffix patterns and counts consecutive repetitions" + }, + { + "name": "EventHandle> for DoomLoopDetector", + "type": "impl", + "purpose": "Hook integration that appends a reminder into conversation context when a loop is detected" + } + ], + "semantic_tags": [ + "conversation", + "loop-detection", + "tool-calls", + "hooks", + "event-handling" + ], + "handles_entities": [ + "Conversation", + "ContextMessage", + "TextMessage", + "ToolCallFull" + ], + "key_behaviors": [ + "detects repeating tool call patterns in conversation history", + "inserts system reminders into conversation context when loops are found", + "provides a request-phase event hook for doom loop detection" + ], + "insights": [ + { + "type": "refactoring", + "category": "Other", + "title": "Add a doom-loop detector hook to prevent repetitive tool calls", + "problem": "Agents might get stuck repeatedly calling the same tool without progress (doom loops).", + "root_cause": "No existing mechanism to detect and insert a guard/reminder to the user when many similar tool calls occur consecutively.", + "solution": "Introduce DoomLoopDetector hook that runs on request paths; when repeated similar tool calls are detected, it appends a templated user reminder (templates/forge-doom-loop-reminder.md) into conversation context to nudge the agent. Hook wired into app startup and tests.", + "commits": [ + "d1e0547" + ], + "constructs": [ + "DoomLoopDetector", + "Hook::on_request" + ] + } + ], + "tests": { + "exercised_by": [ + "crates/forge_app/src/orch_spec/orch_spec.rs" + ], + "test_functions": [ + "test_doom_loop_detection_adds_user_reminder_after_repeated_calls_on_next_request" + ], + "source_commits": [ + "d1e0547" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/hooks/tracing.rs": { + "short_description": "Logging/tracing handler for conversation lifecycle events", + "category": "SOURCE_CODE", + "description": "Provides TracingHandler that logs key lifecycle events (start, request, response, tool call start/end, end) to aid debugging and telemetry. Implements EventHandle for multiple event payload types and includes tests verifying it doesn't panic on typical events.", + "key_constructs": [ + { + "name": "TracingHandler", + "type": "struct", + "purpose": "Event handler that logs detailed debug/info/warn messages for lifecycle events" + }, + { + "name": "EventHandle> for TracingHandler", + "type": "impl", + "purpose": "Logs token usage, costs and other response metrics" + }, + { + "name": "EventHandle> for TracingHandler", + "type": "impl", + "purpose": "Logs warnings on tool call failures with arguments and output" + } + ], + "semantic_tags": [ + "tracing", + "logging", + "events", + "observability" + ], + "handles_entities": [ + "Conversation", + "ToolCall", + "ChatCompletionMessageFull" + ], + "key_behaviors": [ + "logs agent and conversation initialization", + "records token usage and cost information", + "logs tool call start and failures" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/src/jobs/bounty_job.rs": { + "short_description": "Defines CI job steps for bounty label synchronization scripts", + "category": "BUILD", + "description": "Constructs GitHub Actions job definitions for running TypeScript scripts that reconcile bounty labels across issues and propagate bounty labels to PRs. Provides helper functions to build checkout/install/run job steps with appropriate permissions.", + "key_constructs": [], + "semantic_tags": [ + "ci", + "github-actions", + "jobs", + "automation", + "bounty" + ], + "handles_entities": [ + "GitHub issues", + "Pull requests", + "Workflow jobs" + ], + "key_behaviors": [ + "defines CI job that runs npm/tsx scripts to sync bounty labels", + "ensures required permissions and checkout/install steps for scripts" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/src/workflows/bounty.rs": { + "short_description": "Generates the bounty management GitHub Actions workflow", + "category": "BUILD", + "description": "Programmatically generates a complete GitHub Actions workflow YAML for bounty management, wiring event triggers, permissions, and jobs for syncing issues and PRs. Uses the jobs module to attach the sync-all-issues and sync-pr jobs and emits the final workflow file.", + "key_constructs": [], + "semantic_tags": [ + "ci", + "workflow-generation", + "github-actions", + "bounty" + ], + "handles_entities": [ + "Workflow", + "GitHub events" + ], + "key_behaviors": [ + "generates a scheduled and event-driven workflow for bounty label reconciliation", + "configures job permissions and adds job definitions" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/auth/auth_method.rs": { + "short_description": "Defines authentication method enum and utility accessors", + "category": "SOURCE_CODE", + "description": "Declares the AuthMethod enum representing supported credential flows (API key, various OAuth modes, Google ADC, Codex device) and exposes constructors and oauth_config accessor. Includes serde (de)serialization and unit tests validating construction and round-trip behavior.", + "key_constructs": [ + { + "name": "AuthMethod", + "type": "enum", + "purpose": "Represents supported authentication methods and associated OAuth configuration" + }, + { + "name": "AuthMethod::oauth_config", + "type": "function", + "purpose": "Returns an Option<&OAuthConfig> for variants that carry OAuth settings" + }, + { + "name": "tests::test_codex_device_deserializes_from_json", + "type": "function", + "purpose": "Unit test ensuring codex_device variant deserializes and exposes oauth config" + } + ], + "semantic_tags": [ + "authentication", + "oauth", + "serde", + "credentials" + ], + "handles_entities": [ + "OAuthConfig", + "AuthMethod" + ], + "key_behaviors": [ + "represents and serializes/deserializes available auth flows", + "provides helpers to construct OAuth-based auth methods" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/auth/new_types.rs": { + "short_description": "Newtype wrappers for authentication-related string values", + "category": "SOURCE_CODE", + "description": "Defines transparent newtype wrappers for API keys, tokens, codes, and URL parameter specs used by auth flows, with convenience traits and Display implementations (including safe API key truncation). Also defines URLParamSpec helper constructors for UI use.", + "key_constructs": [ + { + "name": "ApiKey", + "type": "struct", + "purpose": "Transparent newtype for API key strings with custom Display that truncates for safe output" + }, + { + "name": "truncate_key", + "type": "function", + "purpose": "Formats a key for display by truncating long keys", + "callers": [ + { + "file": "crates/forge_app/src/utils.rs", + "line": 46, + "context": "pub use forge_domain::truncate_key;" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 17, + "context": "use forge_app::utils::{format_display_path, truncate_key};" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 1525, + "context": "info = info.add_key_value(\"Agent API Key\", truncate_key(api_key.as_str()));" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 1530, + "context": "info = info.add_key_value(\"Default API Key\", truncate_key(api_key.as_str()));" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 1537, + "context": "info = info.add_key_value(\"API Key\", truncate_key(api_key.as_str()));" + } + ] + }, + { + "name": "URLParamSpec", + "type": "struct", + "purpose": "Specifies a URL parameter name and optional preset options for UI rendering" + } + ], + "semantic_tags": [ + "auth", + "newtypes", + "secrets", + "tokens", + "serialization" + ], + "handles_entities": [ + "ApiKey", + "AuthorizationCode", + "DeviceCode", + "PkceVerifier", + "AccessToken", + "RefreshToken", + "URLParamSpec" + ], + "key_behaviors": [ + "wraps credential strings in typed newtypes", + "provides safe display of secrets by truncation", + "models UI parameter specs for auth flows" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/transformer/mod.rs": { + "short_description": "Generic transformer trait and composition utilities", + "category": "SOURCE_CODE", + "description": "Defines a Transformer trait with combinators (pipe and when) and default implementations, plus several concrete transformer modules re-exported for use transforming Context objects. Contains unit tests that snapshot transformations and exercise piping of transformers.", + "key_constructs": [ + { + "name": "Transformer", + "type": "trait", + "purpose": "Trait defining a transform operation on a value and combinators to compose transformers" + }, + { + "name": "DefaultTransformation", + "type": "struct", + "purpose": "No-op transformer that returns the value unchanged" + }, + { + "name": "Pipe", + "type": "struct", + "purpose": "Composes two transformers so value flows through both" + }, + { + "name": "Cond", + "type": "struct", + "purpose": "Conditional transformer that applies inner transform only when predicate holds" + } + ], + "semantic_tags": [ + "transformation", + "composition", + "pipeline", + "context", + "serialization" + ], + "handles_entities": [ + "Context", + "ContextMessage", + "ToolResult" + ], + "key_behaviors": [ + "composes and applies modular transformations to context objects", + "provides conditional and piped transformer combinators" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/completer/command.rs": { + "short_description": "Command completer for interactive shell input", + "category": "SOURCE_CODE", + "description": "Implements a reedline Completer that suggests available forge commands (prefixing slash for slash-commands and keeping shell ! prefix). Pulls available commands from ForgeCommandManager and constructs completion suggestions matching the input.", + "key_constructs": [ + { + "name": "CommandCompleter", + "type": "struct", + "purpose": "Provides completion suggestions for the interactive prompt using command metadata" + }, + { + "name": "CommandCompleter::complete", + "type": "function", + "purpose": "Generates reedline Suggestion list by matching available commands against the current line" + } + ], + "semantic_tags": [ + "cli", + "autocomplete", + "reedline", + "ux" + ], + "handles_entities": [ + "ForgeCommandManager", + "Command metadata" + ], + "key_behaviors": [ + "suggests commands with appropriate prefixes for interactive input" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/database/pool.rs": { + "short_description": "SQLite connection pool builder and migration runner with retry", + "category": "SOURCE_CODE", + "description": "Creates and configures a Diesel/r2d2 SQLite connection pool with PRAGMA tuning for concurrency, runs embedded migrations, and provides retry/backoff logic for transient failures. Exposes in-memory pool creation for tests and configurable PoolConfig to customize timeouts and sizes.", + "key_constructs": [ + { + "name": "PoolConfig", + "type": "struct", + "purpose": "Holds configuration for pool sizing, timeouts, retries, and database path" + }, + { + "name": "DatabasePool", + "type": "struct", + "purpose": "Wraps a Diesel r2d2 pool and provides connection acquisition with exponential backoff" + }, + { + "name": "SqliteCustomizer", + "type": "struct", + "purpose": "Implements CustomizeConnection to set SQLite PRAGMA tuning on acquire" + }, + { + "name": "MIGRATIONS", + "type": "constant", + "purpose": "Embedded Diesel migrations to run on new databases" + } + ], + "semantic_tags": [ + "database", + "sqlite", + "connection-pool", + "migrations", + "retry" + ], + "handles_entities": [ + "SqliteConnection", + "Database migrations", + "Connection pool" + ], + "key_behaviors": [ + "creates and configures SQLite connection pools", + "applies embedded migrations on pool creation", + "retries connection acquisition and pool creation with backoff" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/database/schema.rs": { + "short_description": "Diesel-generated database schema for conversations table", + "category": "GENERATED", + "description": "Auto-generated Diesel schema module declaring the conversations table and its columns used by the repository layer. Provides compile-time SQL mapping for Diesel ORM interactions.", + "key_constructs": [], + "semantic_tags": [ + "database", + "diesel", + "schema", + "generated", + "conversations" + ], + "handles_entities": [ + "conversations (table)" + ], + "key_behaviors": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/provider/bedrock_sanitize_ids.rs": { + "short_description": "Sanitizes tool call IDs to be compatible with AWS Bedrock constraints", + "category": "SOURCE_CODE", + "description": "Implements a Transformer that replaces invalid characters in Bedrock tool_use and tool_result IDs with underscores to satisfy Bedrock's ID pattern. Includes conversion logic for ContentBlock types and unit tests validating sanitization and no-op behavior for valid IDs.", + "key_constructs": [ + { + "name": "SanitizeToolIds", + "type": "struct", + "purpose": "Transformer that rewrites tool_use_id and tool_result tool_use_id to allowed characters" + }, + { + "name": "INVALID_CHARS", + "type": "constant", + "purpose": "Regex matching characters not allowed in Bedrock tool IDs" + } + ], + "semantic_tags": [ + "provider", + "aws-bedrock", + "sanitization", + "transformer", + "tool-calls" + ], + "handles_entities": [ + "ConverseStreamInput", + "ContentBlock", + "ToolUseBlock", + "ToolResultBlock" + ], + "key_behaviors": [ + "sanitizes tool call/result identifiers for Bedrock compatibility", + "reconstructs content blocks with sanitized IDs" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Parsing", + "title": "Sanitize tool call IDs for Bedrock/Anthropic compatibility", + "problem": "Bedrock (and Anthropic via Bedrock) rejected tool call IDs containing characters outside ^[a-zA-Z0-9_-]+ (e.g., \"functions.shell:0\").", + "root_cause": "Tool call IDs produced by the system included characters (dots, colons, punctuation) invalid for the Bedrock validation regex.", + "solution": "Add SanitizeToolIds transformer that replaces invalid characters with underscores using a precompiled regex. Transformer rebuilds ToolUse and ToolResult blocks with sanitized IDs. Tests added to validate sanitization and resilience to empty messages.", + "lesson_learned": "Normalize/validate external-facing IDs to the strictest target API expectations before sending; provide a transformer layer in the request pipeline to avoid per-provider hacks later.", + "commits": [ + "f469f68" + ], + "constructs": [ + "SanitizeToolIds::transform" + ] + } + ], + "tests": { + "exercised_by": [ + "crates/forge_repo/src/provider/bedrock_sanitize_ids.rs (inline unit tests)" + ], + "test_functions": [], + "source_commits": [ + "f469f68" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/provider/event.rs": { + "short_description": "Converts event-source SSE stream to chat completion message stream", + "category": "SOURCE_CODE", + "description": "Transforms a reqwest_eventsource EventSource (SSE) into an async Stream of ChatCompletionMessage by parsing provider responses and mapping errors into enriched contexts. Handles special SSE events like OPEN and DONE and maps HTTP errors into descriptive anyhow contexts.", + "key_constructs": [ + { + "name": "into_chat_completion_message", + "type": "function", + "purpose": "Consumes an EventSource of SSE messages and yields parsed ChatCompletionMessage results or enriched errors", + "callers": [ + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 20, + "context": "use crate::provider::event::into_chat_completion_message;" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 160, + "context": "let stream = into_chat_completion_message::(parsed_url, source);" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 17, + "context": "use crate::provider::event::into_chat_completion_message;" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 218, + "context": "let stream = into_chat_completion_message::(url, es);" + }, + { + "file": "crates/forge_repo/src/provider/google.rs", + "line": 12, + "context": "use crate::provider::event::into_chat_completion_message;" + }, + { + "file": "crates/forge_repo/src/provider/google.rs", + "line": 88, + "context": "let stream = into_chat_completion_message::(url.clone(), source);" + } + ] + } + ], + "semantic_tags": [ + "sse", + "streaming", + "provider", + "parsing", + "error-handling" + ], + "handles_entities": [ + "ChatCompletionMessage", + "EventSource", + "Provider response payloads" + ], + "key_behaviors": [ + "parses SSE provider messages into domain completion items", + "maps HTTP and stream errors into contextual anyhow errors" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/provider/utils.rs": { + "short_description": "HTTP helper utilities for provider requests", + "category": "SOURCE_CODE", + "description": "Provides small helpers to format HTTP context strings, safely join a base URL with a path, and build reqwest HeaderMap from key/value pairs. It performs validation on paths to avoid dangerous inputs and surfaces contextual errors on failures.", + "key_constructs": [ + { + "name": "format_http_context", + "type": "function", + "purpose": "Formats method, URL and optional status into a concise context string for logging/errors", + "callers": [ + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 22, + "context": "use crate::provider::utils::{create_headers, format_http_context};" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 158, + "context": ".with_context(|| format_http_context(None, \"POST\", &url))?;" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 178, + "context": ".with_context(|| format_http_context(None, \"POST\", parsed_url))?;" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 187, + "context": ".with_context(|| format_http_context(Some(status), \"POST\", parsed_url));" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 213, + "context": "format_http_context(None, \"POST\", request_url.clone())" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 217, + "context": "format_http_context(None, \"POST\", request_url.clone())" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 241, + "context": ".with_context(|| format_http_context(None, \"GET\", url))" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 245, + "context": "let ctx_msg = format_http_context(Some(status), \"GET\", url);" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 19, + "context": "use crate::provider::utils::{create_headers, format_http_context, join_url};" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 215, + "context": ".with_context(|| format_http_context(None, \"POST\", &url))" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 244, + "context": ".with_context(|| format_http_context(None, \"GET\", url))" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 267, + "context": ".with_context(|| format_http_context(None, \"GET\", &url))" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 271, + "context": "let ctx_message = format_http_context(Some(status), \"GET\", &url);" + }, + { + "file": "crates/forge_repo/src/provider/google.rs", + "line": 14, + "context": "use crate::provider::utils::{create_headers, format_http_context};" + }, + { + "file": "crates/forge_repo/src/provider/google.rs", + "line": 86, + "context": ".with_context(|| format_http_context(None, \"POST\", &url))?;" + }, + { + "file": "crates/forge_repo/src/provider/google.rs", + "line": 102, + "context": ".with_context(|| format_http_context(None, \"GET\", url))" + }, + { + "file": "crates/forge_repo/src/provider/google.rs", + "line": 106, + "context": "let ctx_msg = format_http_context(Some(status), \"GET\", url);" + }, + { + "file": "crates/forge_repo/src/provider/event.rs", + "line": 10, + "context": "use super::utils::format_http_context;" + }, + { + "file": "crates/forge_repo/src/provider/event.rs", + "line": 81, + "context": ".map(|result| result.with_context(|| format_http_context(None, \"POST\", url.clone())))" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 19, + "context": "use crate::provider::utils::{create_headers, format_http_context};" + } + ] + }, + { + "name": "join_url", + "type": "function", + "purpose": "Safely appends a path to a base URL after validating the path", + "callers": [ + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 19, + "context": "use crate::provider::utils::{create_headers, format_http_context, join_url};" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 260, + "context": "let url = join_url(url, \"\")?;" + } + ] + }, + { + "name": "create_headers", + "type": "function", + "purpose": "Constructs a reqwest::HeaderMap from a vector of header key/value pairs", + "callers": [ + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 22, + "context": "use crate::provider::utils::{create_headers, format_http_context};" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 148, + "context": "let headers = create_headers(self.get_headers());" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 239, + "context": ".http_get(url, Some(create_headers(self.get_headers())))" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 19, + "context": "use crate::provider::utils::{create_headers, format_http_context, join_url};" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 197, + "context": "let headers = create_headers(self.get_headers_with_request(&request));" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 259, + "context": "let headers = create_headers(self.get_headers());" + }, + { + "file": "crates/forge_repo/src/provider/google.rs", + "line": 14, + "context": "use crate::provider::utils::{create_headers, format_http_context};" + }, + { + "file": "crates/forge_repo/src/provider/google.rs", + "line": 82, + "context": "Some(create_headers(self.get_headers()))," + }, + { + "file": "crates/forge_repo/src/provider/google.rs", + "line": 100, + "context": ".http_get(url, Some(create_headers(self.get_headers())))" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 19, + "context": "use crate::provider::utils::{create_headers, format_http_context};" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 152, + "context": "let headers = create_headers(self.get_headers_for_conversation(conversation_id.as_deref()));" + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 395, + "context": "let headers = create_headers(provider_client.get_headers());" + } + ] + } + ], + "semantic_tags": [ + "http", + "url", + "headers", + "validation", + "provider" + ], + "handles_entities": [], + "key_behaviors": [ + "formats HTTP request context for errors and logs", + "validates and creates combined URLs", + "builds header maps for HTTP requests" + ], + "insights": [ + { + "type": "refactoring", + "category": "Security", + "title": "Move sanitize_headers to forge_infra", + "problem": "Duplicate sanitize_headers implementation existed in provider utils.", + "root_cause": "Utility duplication across modules resulted in inconsistent logic.", + "solution": "Remove local sanitize_headers and import forge_infra::sanitize_headers where needed. Tests moved to forge_infra.", + "lesson_learned": "Keep shared security utilities in infra; update imports and remove duplicates to avoid divergence.", + "commits": [ + "54b2ccc" + ], + "constructs": [ + "create_headers", + "format_http_context" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/tool_services/fs_write.rs": { + "short_description": "File write service with snapshot coordination and validation", + "category": "SOURCE_CODE", + "description": "Implements ForgeFsWrite which writes files while preserving line endings, computing content hashes, coordinating snapshots for undo, and invoking file validation hooks. It enforces absolute paths, handles directory creation, and respects overwrite semantics, returning metadata including before-content, errors, and content hash.", + "key_constructs": [ + { + "name": "ForgeFsWrite", + "type": "struct", + "purpose": "Service that coordinates file writing with infra and repository concerns (snapshots, validation)" + }, + { + "name": "FsWriteService::write", + "type": "function", + "purpose": "Performs validation, snapshot insertion, line ending normalization, writes content, and returns FsWriteOutput" + }, + { + "name": "compute_hash", + "type": "function", + "purpose": "Computes a hash of file content used to detect changes (referenced from infra)" + } + ], + "semantic_tags": [ + "file-io", + "snapshots", + "validation", + "line-endings", + "fs-service" + ], + "handles_entities": [ + "files", + "snapshots", + "FsWriteOutput" + ], + "key_behaviors": [ + "writes files with normalized line endings", + "inserts snapshots before modification when applicable", + "validates file content and returns errors and content hash" + ], + "insights": [ + { + "type": "bug_fix", + "category": "Edge Case", + "title": "Preserve and normalize line endings on write; compute hash of written content", + "problem": "Writes could introduce inconsistent line endings; hash was computed on input content and would diverge from actual written bytes.", + "root_cause": "Content was written as-is without normalizing to target file's existing line ending style; hash computed used original content variable rather than normalized content.", + "solution": "Detect existing file's line endings, determine target_line_ending (or default by platform), normalize input content to target_line_ending before writing, write normalized content, and compute hash on normalized content. Added tests verifying normalization and hash behavior.", + "lesson_learned": "Filesystem write operations must normalize line endings to preserve repository expectations and ensure hashes reflect the actual bytes on disk. Always compute derived values (hash) on what is actually written.", + "commits": [ + "0943a9f" + ], + "constructs": [ + "ForgeFsWrite::create (write flow)", + "compute_hash usage" + ] + } + ], + "tests": { + "exercised_by": [ + "inline tests at end of fs_write.rs (normalization and hash tests)" + ], + "test_functions": [], + "source_commits": [ + "0943a9f" + ] + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/tool_services/mod.rs": { + "short_description": "Re-exports the tool service modules for the crate", + "category": "SOURCE_CODE", + "description": "Declares submodules implementing individual tool services (filesystem, shell, fetch, image, plan, etc.) and publicly re-exports them so other crates can import service implementations from a single module path. It centralizes the tool service API surface for the forge_services crate.", + "key_constructs": [ + { + "name": "fetch", + "type": "class", + "purpose": "Module implementing network fetch tool service" + }, + { + "name": "fs_read", + "type": "class", + "purpose": "Module implementing filesystem read tool service" + }, + { + "name": "fs_write", + "type": "class", + "purpose": "Module implementing filesystem write tool service" + }, + { + "name": "shell", + "type": "class", + "purpose": "Module implementing shell execution tool service" + }, + { + "name": "skill", + "type": "class", + "purpose": "Module implementing generic skill/tool orchestration" + } + ], + "semantic_tags": [ + "tools", + "services", + "filesystem", + "shell", + "reexports" + ], + "handles_entities": [], + "key_behaviors": [ + "exposes individual tool service modules through a single public module", + "centralizes imports of tool implementations for consumers" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/openai/response.rs": { + "short_description": "Parses provider responses and converts them into internal chat messages", + "category": "SOURCE_CODE", + "description": "Defines DTOs for OpenAI-like responses (choices, usage, tool calls, extra content) and contains conversion logic to map those responses into the project's internal ChatCompletionMessage and Usage types. It handles streaming vs non-streaming choices, cost extraction, tool-call assembly, and provider-specific quirks (e.g., GitHub Copilot fields and extra metadata).", + "key_constructs": [ + { + "name": "Response", + "type": "class", + "purpose": "Top-level enum representing Success, CostOnly, or Failure provider responses", + "callers": [ + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 8, + "context": "use forge_app::dto::openai::{ListModelResponse, ProviderPipeline, Request, Response};" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 218, + "context": "let stream = into_chat_completion_message::(url, es);" + }, + { + "file": "crates/forge_repo/src/provider/openai.rs", + "line": 633, + "context": "let message = serde_json::from_str::(&content)" + } + ] + }, + { + "name": "Choice", + "type": "class", + "purpose": "Enum representing non-chat, non-streaming and streaming choice variants from providers" + }, + { + "name": "ResponseUsage", + "type": "class", + "purpose": "Struct capturing token counts and optional cost details from provider responses" + }, + { + "name": "ToolCall", + "type": "class", + "purpose": "Struct representing a tool call embedded in a provider response, including extra content", + "callers": [ + { + "file": "crates/forge_app/src/dto/openai/transformers/strip_thought_signature.rs", + "line": 42, + "context": "ToolCall," + }, + { + "file": "crates/forge_app/src/dto/openai/transformers/strip_thought_signature.rs", + "line": 75, + "context": "tool_calls: Some(vec![ToolCall {" + }, + { + "file": "crates/forge_app/src/dto/openai/request.rs", + "line": 9, + "context": "use super::response::{ExtraContent, FunctionCall, ToolCall};" + }, + { + "file": "crates/forge_app/src/dto/openai/request.rs", + "line": 34, + "context": "pub tool_calls: Option>," + }, + { + "file": "crates/forge_app/src/dto/openai/request.rs", + "line": 431, + "context": "impl From for ToolCall {" + }, + { + "file": "crates/forge_app/src/dto/openai/request.rs", + "line": 455, + "context": ".map(|tool_calls| tool_calls.into_iter().map(ToolCall::from).collect())," + }, + { + "file": "crates/forge_app/src/dto/openai/request.rs", + "line": 432, + "context": "fn from(value: ToolCallFull) -> Self {" + }, + { + "file": "crates/forge_app/src/dto/openai/request.rs", + "line": 436, + "context": "Self {" + }, + { + "file": "crates/forge_app/src/dto/openai/transformers/trim_tool_call_ids.rs", + "line": 43, + "context": "use crate::dto::openai::response::{FunctionCall, ToolCall as ResponseToolCall};" + } + ] + }, + { + "name": "impl TryFrom for ChatCompletionMessage", + "type": "function", + "purpose": "Converts a parsed provider Response into the internal ChatCompletionMessage, handling tool calls, reasoning, streaming parts, and usage" + } + ], + "semantic_tags": [ + "dto", + "response-parsing", + "openai", + "usage-costs", + "tool-calls" + ], + "handles_entities": [ + "ChatCompletionMessage", + "Usage", + "ToolCallFull", + "ReasoningDetail" + ], + "key_behaviors": [ + "parses provider (OpenAI-like) JSON responses into structured DTOs", + "converts provider responses to internal ChatCompletionMessage values", + "extracts and normalizes usage and cost information" + ], + "insights": [ + { + "type": "feature", + "category": "Parsing", + "title": "Support cost-only / cost-as-string events and other proxy formats", + "problem": "Some proxies (OpenCode Zen, OpenRouter variants) emit cost as a string or as a separate cost-only response (no tokens).", + "root_cause": "Different backends encode cost differently (string vs number) and may emit cost as separate messages.", + "solution": "Added StringOrF64 enum untagged to accept numeric or string costs; added Response::CostOnly variant and handling that produces ChatCompletionMessage with usage.cost populated. Tests added for numeric and string cost forms.", + "lesson_learned": "Provider ecosystem is heterogeneous; DTOs must be tolerant (accept string/number) and explicitly handle cost-only messages to avoid losing billing data.", + "commits": [ + "40cfcc8" + ], + "constructs": [ + "StringOrF64", + "Response::CostOnly -> TryFrom" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_app/src/dto/openai/response.rs (unit tests added)" + ], + "source_commits": [ + "40cfcc8" + ] + } + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/call/context.rs": { + "short_description": "Context wrapper used during tool call execution for metrics and messaging", + "category": "SOURCE_CODE", + "description": "ToolCallContext provides a small execution context for tool calls, exposing a sender for streaming ChatResponse events and a shared, mutex-protected Metrics instance. It offers helpers to send messages (including specialized tool-input messages), and synchronous accessors to read or mutate metrics and todo lists in a thread-safe manner.", + "key_constructs": [ + { + "name": "ToolCallContext", + "type": "class", + "purpose": "Struct storing optional sender and shared Metrics for use during tool executions", + "callers": [ + { + "file": "crates/forge_app/src/tool_executor.rs", + "line": 5, + "context": "use forge_domain::{CodebaseQueryResult, ToolCallContext, ToolCatalog, ToolOutput};" + }, + { + "file": "crates/forge_app/src/tool_executor.rs", + "line": 48, + "context": "context: &ToolCallContext," + }, + { + "file": "crates/forge_app/src/tool_executor.rs", + "line": 154, + "context": "context: &ToolCallContext," + }, + { + "file": "crates/forge_app/src/tool_executor.rs", + "line": 337, + "context": "context: &ToolCallContext," + }, + { + "file": "crates/forge_app/src/orch.rs", + "line": 60, + "context": "tool_context: &ToolCallContext," + }, + { + "file": "crates/forge_app/src/orch.rs", + "line": 254, + "context": "ToolCallContext::new(self.conversation.metrics.clone()).sender(self.sender.clone());" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 8, + "context": "Model, SystemContext, TemplateConfig, ToolCallContext, ToolCallFull, ToolCatalog," + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 67, + "context": "context: &ToolCallContext," + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 97, + "context": "context: &ToolCallContext," + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 216, + "context": "context: &ToolCallContext," + }, + { + "file": "crates/forge_app/src/hooks/title_generation.rs", + "line": 167, + "context": "ProviderId, Role, TextMessage, ToolCallContext, ToolCallFull, ToolResult," + }, + { + "file": "crates/forge_app/src/hooks/title_generation.rs", + "line": 190, + "context": "_context: &ToolCallContext," + }, + { + "file": "crates/forge_app/src/orch_spec/orch_runner.rs", + "line": 175, + "context": "_: &forge_domain::ToolCallContext," + }, + { + "file": "crates/forge_app/src/agent.rs", + "line": 6, + "context": "ProviderId, ReasoningConfig, ResultStream, Temperature, ToolCallContext, ToolCallFull," + }, + { + "file": "crates/forge_app/src/agent.rs", + "line": 31, + "context": "context: &ToolCallContext," + } + ] + }, + { + "name": "ToolCallContext::new", + "type": "function", + "purpose": "Creates a new ToolCallContext with initial Metrics" + }, + { + "name": "ToolCallContext::send", + "type": "function", + "purpose": "Sends a ChatResponse via the optional ArcSender to stream results to callers" + }, + { + "name": "ToolCallContext::with_metrics", + "type": "function", + "purpose": "Provides synchronous access to mutate or read metrics under lock" + }, + { + "name": "ToolCallContext::update_todos", + "type": "function", + "purpose": "Applies incremental todo changes to the metrics' todo list" + } + ], + "semantic_tags": [ + "tool-calls", + "metrics", + "streaming", + "concurrency", + "todo-management" + ], + "handles_entities": [ + "Metrics", + "Todo", + "ChatResponse" + ], + "key_behaviors": [ + "streams tool-execution messages to a consumer", + "provides thread-safe access and mutation of execution metrics", + "manages todo items tracked during tool usage" + ], + "insights": [ + { + "type": "refactoring", + "category": "API", + "title": "Clarify todo access semantics in ToolCallContext docs", + "problem": "ToolCallContext methods were documented as returning/clobbering todos ambiguously; changes to Metrics altered expectations about what callers receive.", + "root_cause": "Metrics gained history-preserving semantics, so context-level docs needed update.", + "solution": "Updated doc comments to clarify that get_todos returns all known todos including historical completed todos and update_todos replaces active todos and returns only active todos.", + "lesson_learned": "When the underlying storage semantics change, update the user-facing context docs and the function contracts to avoid caller confusion.", + "commits": [ + "970a75f", + "4f1ad6b" + ], + "constructs": [ + "ToolCallContext::get_todos", + "ToolCallContext::update_todos" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/call/tool_call.rs": { + "short_description": "Structures and utilities for assembling and tracking tool calls", + "category": "SOURCE_CODE", + "description": "Defines ToolCallId, ToolCallPart, ToolCallFull and the ToolCall enum, plus parsing helpers that assemble full tool calls from streaming parts or XML-encoded calls. It also implements a ToolErrorTracker for counting consecutive tool failures and calculating when a tool is temporarily disabled by the agent logic.", + "key_constructs": [ + { + "name": "ToolCallId", + "type": "class", + "purpose": "Opaque identifier wrapper used to tag individual tool calls", + "callers": [ + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 291, + "context": "BoxStream, Content, FinishReason, TokenCount, ToolCall, ToolCallArguments, ToolCallId," + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 716, + "context": "call_id: Some(ToolCallId::new(\"call_123\"))," + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 748, + "context": "use crate::{ToolCallId, ToolCallPart, ToolName};" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 752, + "context": "call_id: Some(ToolCallId::new(\"call_123\"))," + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 773, + "context": "call_id: Some(ToolCallId::new(\"call_123\"))," + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 1229, + "context": "call_id: Some(ToolCallId::new(\"call_123\"))," + }, + { + "file": "crates/forge_domain/src/tools/result.rs", + "line": 5, + "context": "use crate::{ConversationId, Image, ToolCallFull, ToolCallId, ToolName};" + }, + { + "file": "crates/forge_domain/src/tools/result.rs", + "line": 14, + "context": "pub call_id: Option," + }, + { + "file": "crates/forge_app/src/dto/openai/request.rs", + "line": 12, + "context": "Context, ContextMessage, ModelId, ToolCallFull, ToolCallId, ToolCatalog, ToolDefinition," + }, + { + "file": "crates/forge_app/src/dto/openai/request.rs", + "line": 32, + "context": "pub tool_call_id: Option," + }, + { + "file": "crates/forge_app/src/dto/openai/request.rs", + "line": 704, + "context": "ContextMessage, Role, TextMessage, ToolCallFull, ToolCallId, ToolCatalog, ToolName," + }, + { + "file": "crates/forge_app/src/dto/openai/request.rs", + "line": 740, + "context": "call_id: Some(ToolCallId::new(\"123\"))," + }, + { + "file": "crates/forge_app/src/dto/openai/request.rs", + "line": 769, + "context": ".call_id(ToolCallId::new(\"123\"));" + }, + { + "file": "crates/forge_app/src/dto/openai/request.rs", + "line": 795, + "context": ".call_id(ToolCallId::new(\"123\"))" + }, + { + "file": "crates/forge_app/src/dto/openai/request.rs", + "line": 812, + "context": ".call_id(ToolCallId::new(\"456\"))" + }, + { + "file": "crates/forge_app/src/dto/openai/request.rs", + "line": 832, + "context": ".call_id(ToolCallId::new(\"456\"))" + }, + { + "file": "crates/forge_app/src/dto/openai/response.rs", + "line": 4, + "context": "ChatCompletionMessage, Content, FinishReason, TokenCount, ToolCallFull, ToolCallId," + }, + { + "file": "crates/forge_app/src/dto/openai/response.rs", + "line": 200, + "context": "pub id: Option," + }, + { + "file": "crates/forge_app/src/transformers/trim_context_summary.rs", + "line": 103, + "context": "use forge_domain::{Role, SummaryBlock, SummaryToolCall, ToolCallId};" + }, + { + "file": "crates/forge_app/src/transformers/trim_context_summary.rs", + "line": 152, + "context": ".id(ToolCallId::new(\"call1\"))" + } + ] + }, + { + "name": "ToolCallPart", + "type": "class", + "purpose": "Represents a partial tool-call chunk received during streaming", + "callers": [ + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 7, + "context": "ToolCallFull, ToolCallPart, Usage," + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 210, + "context": "let tool_call_parts: Vec = messages" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 748, + "context": "use crate::{ToolCallId, ToolCallPart, ToolName};" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 751, + "context": "let invalid_tool_call_part = ToolCallPart {" + }, + { + "file": "crates/forge_app/src/dto/openai/response.rs", + "line": 5, + "context": "ToolCallPart, ToolName, Usage," + }, + { + "file": "crates/forge_app/src/dto/openai/response.rs", + "line": 431, + "context": "resp = resp.add_tool_call(ToolCallPart {" + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 297, + "context": "ChatCompletionMessage, Content, FinishReason, ToolCallId, ToolCallPart, ToolName," + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 310, + "context": "ToolCallPart {" + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 370, + "context": "ToolCallPart {" + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 1332, + "context": "use forge_domain::{ChatCompletionMessage, Content, ToolCallId, ToolCallPart, ToolName};" + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 1350, + "context": "ChatCompletionMessage::assistant(Content::part(\"\")).add_tool_call(ToolCallPart {" + }, + { + "file": "crates/forge_app/src/dto/anthropic/response.rs", + "line": 3, + "context": "ToolCallPart, ToolName," + }, + { + "file": "crates/forge_app/src/dto/anthropic/response.rs", + "line": 388, + "context": "ChatCompletionMessage::assistant(Content::part(\"\")).add_tool_call(ToolCallPart {" + }, + { + "file": "crates/forge_app/src/dto/anthropic/response.rs", + "line": 400, + "context": "ChatCompletionMessage::assistant(Content::part(\"\")).add_tool_call(ToolCallPart {" + }, + { + "file": "crates/forge_app/src/dto/google/response.rs", + "line": 3, + "context": "ToolCallPart, ToolName," + }, + { + "file": "crates/forge_app/src/dto/google/response.rs", + "line": 429, + "context": "ToolCallPart {" + } + ] + }, + { + "name": "ToolCallFull", + "type": "class", + "purpose": "Represents a fully assembled tool call with parsed arguments and optional thought_signature", + "callers": [ + { + "file": "crates/forge_domain/src/hook.rs", + "line": 5, + "context": "use crate::{Agent, ChatCompletionMessageFull, Conversation, ModelId, ToolCallFull, ToolResult};" + }, + { + "file": "crates/forge_domain/src/hook.rs", + "line": 71, + "context": "pub tool_call: ToolCallFull," + }, + { + "file": "crates/forge_domain/src/hook.rs", + "line": 76, + "context": "pub fn new(tool_call: ToolCallFull) -> Self {" + }, + { + "file": "crates/forge_domain/src/hook.rs", + "line": 86, + "context": "pub tool_call: ToolCallFull," + }, + { + "file": "crates/forge_domain/src/hook.rs", + "line": 93, + "context": "pub fn new(tool_call: ToolCallFull, result: ToolResult) -> Self {" + }, + { + "file": "crates/forge_domain/src/hook.rs", + "line": 649, + "context": "ToolcallStartPayload::new(ToolCallFull::new(\"test_tool\"))," + }, + { + "file": "crates/forge_domain/src/hook.rs", + "line": 655, + "context": "ToolCallFull::new(\"test_tool\")," + }, + { + "file": "crates/forge_domain/src/message.rs", + "line": 6, + "context": "use super::{ToolCall, ToolCallFull};" + }, + { + "file": "crates/forge_domain/src/message.rs", + "line": 222, + "context": "pub tool_calls: Vec," + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 7, + "context": "ToolCallFull, ToolCallPart, Usage," + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 156, + "context": "if let Some(tool_call) = ToolCallFull::try_from_xml(&content)" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 203, + "context": "let initial_tool_calls: Vec = messages" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 219, + "context": "let partial_tool_calls = ToolCallFull::try_from_parts(&tool_call_parts)" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 224, + "context": "let tool_calls: Vec = initial_tool_calls" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 714, + "context": "let tool_call = ToolCallFull {" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 771, + "context": "let expected = ToolCallFull {" + }, + { + "file": "crates/forge_domain/src/result_stream_ext.rs", + "line": 1227, + "context": "let tool_call = ToolCallFull {" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 10, + "context": "ResultStream, Scope, SearchParams, SyncProgress, SyntaxError, Template, ToolCallFull," + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 234, + "context": "async fn execute_mcp(&self, call: ToolCallFull) -> anyhow::Result;" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 699, + "context": "async fn execute_mcp(&self, call: ToolCallFull) -> anyhow::Result {" + } + ] + }, + { + "name": "ToolCallFull::try_from_parts", + "type": "function", + "purpose": "Assembles multiple ToolCallPart entries into one or more ToolCallFull instances, including workarounds for malformed model output" + }, + { + "name": "ToolErrorTracker", + "type": "class", + "purpose": "Tracks failure counts per tool and computes when a tool is 'maxed out' due to repeated errors", + "callers": [ + { + "file": "crates/forge_app/src/orch_spec/orch_runner.rs", + "line": 6, + "context": "Event, Hook, ProviderId, ToolCallFull, ToolErrorTracker, ToolResult," + }, + { + "file": "crates/forge_app/src/orch_spec/orch_runner.rs", + "line": 127, + "context": ".error_tracker(ToolErrorTracker::new(3))" + }, + { + "file": "crates/forge_app/src/orch.rs", + "line": 25, + "context": "error_tracker: ToolErrorTracker," + } + ] + } + ], + "semantic_tags": [ + "tool-calls", + "streaming-assembly", + "parsing", + "error-tracking", + "xml" + ], + "handles_entities": [ + "ToolCallFull", + "ToolCallPart", + "ToolName", + "ToolResult" + ], + "key_behaviors": [ + "assembles tool calls from streaming parts", + "parses tool calls embedded in XML", + "tracks tool failure counts and remaining attempts" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/openai/transformers/drop_tool_call.rs": { + "short_description": "Transformer that removes tool call metadata for providers without tool support", + "category": "SOURCE_CODE", + "description": "Implements a Transformer (DropToolCalls) that strips tool call messages and tools metadata from Request DTOs: tool messages are converted to user messages and assistant messages have tool_calls removed. This is used to adapt the request payload for providers or models that don't support tool invocation semantics.", + "key_constructs": [ + { + "name": "DropToolCalls", + "type": "class", + "purpose": "Transformer that mutates Request values to drop tool call fields and reset tools", + "callers": [ + { + "file": "crates/forge_app/src/dto/openai/transformers/pipeline.rs", + "line": 4, + "context": "use super::drop_tool_call::DropToolCalls;" + }, + { + "file": "crates/forge_app/src/dto/openai/transformers/pipeline.rs", + "line": 46, + "context": ".pipe(DropToolCalls.when(when_model(\"mistral\")))" + } + ] + }, + { + "name": "Transformer::transform (impl for DropToolCalls)", + "type": "function", + "purpose": "Walks Request.messages and removes or converts tool-related fields and clears the tools list" + }, + { + "name": "test_mistral_transformer_tools_not_supported", + "type": "function", + "purpose": "Unit test validating that tool messages are converted and tools cleared" + } + ], + "semantic_tags": [ + "transformer", + "dto", + "compatibility", + "tool-calls", + "request-mutation" + ], + "handles_entities": [ + "Request", + "ContextMessage", + "ToolResult", + "ToolDefinition" + ], + "key_behaviors": [ + "converts tool messages into plain user messages", + "removes tool_calls from assistant messages", + "clears tools field to request providers without tool support" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/dto/mod.rs": { + "short_description": "Top-level DTO module that namespaces provider DTOs", + "category": "SOURCE_CODE", + "description": "Namespaces and re-exports provider-specific DTO modules (anthropic, google, openai) and a tools overview module to avoid naming collisions between providers. This file organizes DTOs so callers can import provider DTOs via the dto:: namespace.", + "key_constructs": [ + { + "name": "anthropic", + "type": "class", + "purpose": "Module containing Anthropic-specific DTOs" + }, + { + "name": "google", + "type": "class", + "purpose": "Module containing Google-specific DTOs" + }, + { + "name": "openai", + "type": "class", + "purpose": "Module containing OpenAI-like DTOs" + }, + { + "name": "tools_overview", + "type": "class", + "purpose": "Module exposing a summary/overview of tools" + } + ], + "semantic_tags": [ + "dto", + "namespacing", + "openai", + "anthropic", + "google" + ], + "handles_entities": [ + "Provider-specific DTOs" + ], + "key_behaviors": [ + "organizes and exposes provider DTO modules under a single namespace" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/hooks/compaction.rs": { + "short_description": "Event hook that triggers context compaction when thresholds are exceeded", + "category": "SOURCE_CODE", + "description": "CompactionHandler is an event hook that inspects a Conversation's context and triggers the Compactor to summarize/compact it when the agent's Compact configuration indicates compaction is needed. It is intended to be invoked after responses to keep conversation contexts bounded and efficient.", + "key_constructs": [ + { + "name": "CompactionHandler", + "type": "class", + "purpose": "Hook struct holding Agent and Environment used to decide and run compaction", + "callers": [ + { + "file": "crates/forge_app/src/hooks/mod.rs", + "line": 6, + "context": "pub use compaction::CompactionHandler;" + }, + { + "file": "crates/forge_app/src/app.rs", + "line": 12, + "context": "use crate::hooks::{CompactionHandler, DoomLoopDetector, TitleGenerationHandler, TracingHandler};" + }, + { + "file": "crates/forge_app/src/app.rs", + "line": 151, + "context": ".and(CompactionHandler::new(agent.clone(), environment.clone()))," + } + ] + }, + { + "name": "CompactionHandler::new", + "type": "function", + "purpose": "Constructs a new handler with the agent and environment" + }, + { + "name": "EventHandle> for CompactionHandler::handle", + "type": "function", + "purpose": "Checks context token count and invokes Compactor when compaction should occur" + } + ], + "semantic_tags": [ + "compaction", + "hooks", + "conversation", + "event-handling", + "context-management" + ], + "handles_entities": [ + "Conversation", + "Context", + "Agent", + "Environment" + ], + "key_behaviors": [ + "detects when conversation context exceeds configured thresholds", + "runs the Compactor to replace full context with a compacted summary" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/src/jobs/mod.rs": { + "short_description": "Re-exports CI job modules for workflow generation", + "category": "BUILD", + "description": "Declares and publicly re-exports several GitHub Actions job generator modules (bounty_job, release_build_job, lint, etc.) used to programmatically build CI workflows. This central module organizes job constructors for use by workflow definition code.", + "key_constructs": [], + "semantic_tags": [ + "ci", + "github-actions", + "jobs", + "workflow-generation" + ], + "handles_entities": [], + "key_behaviors": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/src/jobs/release_draft.rs": { + "short_description": "Generates a GitHub Actions job to create a draft release", + "category": "BUILD", + "description": "Provides create_draft_release_job which builds a Job that runs on pushes to main, checks out the repo, runs release-drafter, and exposes the draft release name/id as outputs for downstream jobs. It's used by the CI workflow generator to add a draft release step into the release pipeline.", + "key_constructs": [], + "semantic_tags": [ + "ci", + "release", + "github-actions", + "draft-release", + "workflow" + ], + "handles_entities": [], + "key_behaviors": [ + "creates a workflow job that drafts a release on push to main", + "exports release id and tag name as job outputs" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/src/steps/setup_protoc.rs": { + "short_description": "Reusable CI step to install the Protobuf compiler", + "category": "BUILD", + "description": "Defines setup_protoc() that returns a GitHub Actions Step configured to use an action which installs protoc (the Protobuf compiler). This lets workflow generators include a standardized step for Protobuf compilation across CI jobs.", + "key_constructs": [], + "semantic_tags": [ + "ci", + "protoc", + "setup", + "workflow-steps" + ], + "handles_entities": [], + "key_behaviors": [ + "adds a reusable GitHub Actions step to setup protoc for builds" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_ci/src/workflows/mod.rs": { + "short_description": "Re-exports CI workflow definitions", + "category": "BUILD", + "description": "Aggregates and re-exports various workflow builders (ci, autofix, release_publish, stale, etc.) so higher-level code can assemble full GitHub Actions workflows programmatically. It centralizes workflow modules for the CI generation tooling.", + "key_constructs": [], + "semantic_tags": [ + "ci", + "workflows", + "automation", + "github-actions" + ], + "handles_entities": [], + "key_behaviors": [], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/auth/auth_token_response.rs": { + "short_description": "Data structure representing OAuth token responses", + "category": "SOURCE_CODE", + "description": "Defines OAuthTokenResponse which models typical OAuth/OpenID token fields (access_token, refresh_token, expires_in/at, token_type, scopes, id_token). It provides canonical serialization/deserialization and a default token type helper to normalize token responses from providers.", + "key_constructs": [ + { + "name": "OAuthTokenResponse", + "type": "class", + "purpose": "Struct modeling OAuth token response payloads with access/refresh tokens and expiry", + "callers": [ + { + "file": "crates/forge_app/src/infra.rs", + "line": 9, + "context": "OAuthConfig, OAuthTokenResponse, ToolDefinition, ToolName, ToolOutput," + }, + { + "file": "crates/forge_app/src/infra.rs", + "line": 332, + "context": ") -> anyhow::Result;" + }, + { + "file": "crates/forge_infra/src/auth/http/standard.rs", + "line": 2, + "context": "use forge_domain::{AuthCodeParams, OAuthConfig, OAuthTokenResponse};" + }, + { + "file": "crates/forge_infra/src/auth/http/standard.rs", + "line": 60, + "context": ") -> anyhow::Result {" + }, + { + "file": "crates/forge_infra/src/auth/http/anthropic.rs", + "line": 2, + "context": "use forge_domain::{AuthCodeParams, OAuthConfig, OAuthTokenResponse};" + }, + { + "file": "crates/forge_infra/src/auth/http/anthropic.rs", + "line": 69, + "context": ") -> anyhow::Result {" + }, + { + "file": "crates/forge_infra/src/auth/http/github.rs", + "line": 2, + "context": "use forge_domain::{AuthCodeParams, OAuthConfig, OAuthTokenResponse};" + }, + { + "file": "crates/forge_infra/src/auth/http/github.rs", + "line": 22, + "context": ") -> anyhow::Result {" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 6, + "context": "DeviceCodeRequest, OAuthConfig, OAuthTokenResponse, OAuthTokens, ProviderId, URLParamSpec," + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 704, + "context": ") -> anyhow::Result {" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 829, + "context": ") -> anyhow::Result {" + }, + { + "file": "crates/forge_infra/src/auth/strategy.rs", + "line": 981, + "context": "let OAuthTokenResponse { access_token, expires_at, .. } =" + }, + { + "file": "crates/forge_infra/src/auth/util.rs", + "line": 5, + "context": "AuthCredential, AuthDetails, OAuthConfig, OAuthTokenResponse, OAuthTokens, ProviderId," + }, + { + "file": "crates/forge_infra/src/auth/util.rs", + "line": 25, + "context": "pub(crate) fn into_domain(token: T) -> OAuthTokenResponse {" + }, + { + "file": "crates/forge_infra/src/auth/util.rs", + "line": 26, + "context": "OAuthTokenResponse {" + }, + { + "file": "crates/forge_infra/src/auth/util.rs", + "line": 72, + "context": "token_response: OAuthTokenResponse," + }, + { + "file": "crates/forge_infra/src/auth/util.rs", + "line": 94, + "context": ") -> OAuthTokenResponse {" + }, + { + "file": "crates/forge_infra/src/auth/util.rs", + "line": 95, + "context": "OAuthTokenResponse {" + }, + { + "file": "crates/forge_infra/src/auth/util.rs", + "line": 121, + "context": ") -> anyhow::Result {" + } + ] + }, + { + "name": "default_token_type", + "type": "function", + "purpose": "Returns the default token type string 'Bearer'" + } + ], + "semantic_tags": [ + "authentication", + "oauth", + "tokens", + "dto" + ], + "handles_entities": [ + "OAuthTokenResponse", + "access_token", + "refresh_token" + ], + "key_behaviors": [ + "represents OAuth provider token responses for authentication flows" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/compact/compact_config.rs": { + "short_description": "Configuration and trigger logic for context compaction", + "category": "SOURCE_CODE", + "description": "Defines the Compact configuration struct with fields for retention window, eviction percentage, token/turn/message thresholds, and compaction model overrides. It includes logic to decide when compaction should occur based on token counts, number of user turns, message counts, or last-message policies and validates percentage inputs during deserialization.", + "key_constructs": [ + { + "name": "Compact", + "type": "class", + "purpose": "Configuration struct that governs automatic context compaction behavior and thresholds", + "callers": [ + { + "file": "crates/forge_app/src/compact.rs", + "line": 2, + "context": "Compact, CompactionStrategy, Context, ContextMessage, ContextSummary, Environment," + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 12, + "context": "compact: Compact," + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 17, + "context": "pub fn new(compact: Compact, environment: Environment) -> Self {" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 179, + "context": "let compactor = Compactor::new(Compact::new(), environment);" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 236, + "context": "let compactor = Compactor::new(Compact::new(), environment);" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 297, + "context": "let compactor = Compactor::new(Compact::new(), environment);" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 493, + "context": "let compactor = Compactor::new(Compact::new(), environment);" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 518, + "context": "let compactor = Compactor::new(Compact::new(), environment);" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 559, + "context": "let compactor = Compactor::new(Compact::new(), environment);" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 661, + "context": "let fixture = Compact::new().model(\"test-model\");" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 669, + "context": "let fixture = Compact::new()" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 679, + "context": "let fixture = Compact::new().model(\"test-model\").turn_threshold(1_usize);" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 687, + "context": "let fixture = Compact::new()" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 697, + "context": "let fixture = Compact::new()" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 709, + "context": "let fixture = Compact::new()" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 721, + "context": "let fixture = Compact::new()" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 731, + "context": "let fixture = Compact::new().model(\"test-model\").on_turn_end(true);" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 739, + "context": "let fixture = Compact::new().model(\"test-model\").on_turn_end(false);" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 747, + "context": "let fixture = Compact::new()" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 758, + "context": "let compact = Compact::new()" + } + ] + }, + { + "name": "Compact::should_compact", + "type": "function", + "purpose": "Determines if compaction should be triggered given a Context and token count" + }, + { + "name": "deserialize_percentage", + "type": "function", + "purpose": "Custom deserializer ensuring eviction percentages are within 0.0..=1.0" + } + ], + "semantic_tags": [ + "compaction", + "configuration", + "thresholds", + "context-management", + "validation" + ], + "handles_entities": [ + "Context", + "ModelId" + ], + "key_behaviors": [ + "decides when to trigger context compaction based on configured thresholds", + "validates percentage configuration for eviction window" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/compact/strategy.rs": { + "short_description": "Algorithms to compute which context messages to evict during compaction", + "category": "SOURCE_CODE", + "description": "Implements CompactionStrategy (Evict, Retain, Min, Max) with conversion to fixed retention and logic to compute an eviction_range over a Context. It carefully preserves tool-call atomicity and assistant boundaries when selecting the message sequence to compact.", + "key_constructs": [ + { + "name": "CompactionStrategy", + "type": "class", + "purpose": "Enum representing different compaction strategies and combinators (evict/retain/min/max)", + "callers": [ + { + "file": "crates/forge_app/src/compact.rs", + "line": 2, + "context": "Compact, CompactionStrategy, Context, ContextMessage, ContextSummary, Environment," + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 42, + "context": "let eviction = CompactionStrategy::evict(self.compact.eviction_window);" + }, + { + "file": "crates/forge_app/src/compact.rs", + "line": 43, + "context": "let retention = CompactionStrategy::retain(self.compact.retention_window);" + } + ] + }, + { + "name": "CompactionStrategy::to_fixed", + "type": "function", + "purpose": "Converts percentage-based strategy to an equivalent fixed preserve-last-N value based on token counts" + }, + { + "name": "CompactionStrategy::eviction_range", + "type": "function", + "purpose": "Computes the (start, end) message indices to evict from a Context" + }, + { + "name": "find_sequence_preserving_last_n", + "type": "function", + "purpose": "Finds the compactable sequence while preserving assistant/message/tool-call invariants" + } + ], + "semantic_tags": [ + "compaction", + "algorithm", + "strategy", + "message-eviction", + "tool-call-preservation" + ], + "handles_entities": [ + "Context", + "Message" + ], + "key_behaviors": [ + "computes which span of messages to evict during compaction", + "preserves tool-call/result atomicity when selecting eviction ranges" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/policies/engine.rs": { + "short_description": "Policy engine to evaluate permissions for operations", + "category": "SOURCE_CODE", + "description": "PolicyEngine wraps a PolicyConfig and provides convenient methods to determine whether a PermissionOperation is allowed, denied, or requires confirmation. It evaluates policies in order, short-circuits on Deny/Confirm, and returns the last Allow if no denials are found, supporting workflow-level policy resolution.", + "key_constructs": [ + { + "name": "PolicyEngine", + "type": "class", + "purpose": "Struct that encapsulates PolicyConfig and provides evaluation helpers", + "callers": [ + { + "file": "crates/forge_services/src/policy.rs", + "line": 7, + "context": "ExecuteRule, Fetch, Permission, PermissionOperation, Policy, PolicyConfig, PolicyEngine," + }, + { + "file": "crates/forge_services/src/policy.rs", + "line": 167, + "context": "let engine = PolicyEngine::new(&policies);" + } + ] + }, + { + "name": "PolicyEngine::can_perform", + "type": "function", + "purpose": "Public API to check permission for a given PermissionOperation" + }, + { + "name": "PolicyEngine::evaluate_policies", + "type": "function", + "purpose": "Internal routine that evaluates configured policies and applies precedence rules" + }, + { + "name": "PolicyEngine::evaluate_policy_set", + "type": "function", + "purpose": "Evaluates a single policy set and returns the effective permission" + } + ], + "semantic_tags": [ + "policy", + "permissions", + "access-control", + "workflow", + "authorization" + ], + "handles_entities": [ + "PolicyConfig", + "Permission", + "PermissionOperation" + ], + "key_behaviors": [ + "evaluates policies to allow/deny/confirm operations", + "applies evaluation order and precedence among policies" + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_api/src/api.rs": { + "file_path": "crates/forge_api/src/api.rs", + "short_description": "Defines the async API trait surface used by the rest of the application and external callers", + "category": "SOURCE_CODE", + "description": "This file declares the API trait that represents the full high-level surface of operations the UI and other callers use to interact with Forge runtime services. It exists to decouple callers (CLI/UI/tests/remote transports) from concrete service implementations (ForgeAPI/ForgeServices/ForgeRepo/ForgeInfra). The trait is async and Send+Sync so implementations can be used across threads and in async contexts. The trait groups methods by functional areas: file discovery, tools/models/providers/agents, chat/streaming, conversation persistence and manipulation, shell execution, MCP config/read/write, provider auth and credential management, workspace indexing and search, config mutation operations, user info/usage, and data generation streams.\n\nThe file was designed as a single authoritative interface boundary for the control-plane operations Forge needs at runtime. The method signatures are explicit about domain types from forge_domain and forge_app (Conversation, ConversationId, ChatRequest/Response, Provider/ProviderId, ModelId, WorkspaceId, SyncProgress, etc.) and return Result wrappers allowing implementors to propagate errors with anyhow::Error. Streaming outputs use forge_stream::MpscStream or futures::BoxStream for long-running operations. The trait is intentionally broad because multiple subsystems (UI, CLI, remote transports, tests) are expected to call into the same surface. Implementations (e.g., ForgeAPI in crates/forge_api/src/forge_api.rs) create a ForgeApp instance and delegate. Several methods are synchronous (environment(), hydrate_channel()) where appropriate.\n\nEditing this file changes the public API surface. The commit history indicates past breaking changes (rename/split of API methods to support per-agent providers/models and additions for commit/suggest config). The trait is implemented by concrete adapters tied to other crates; consumers (UI and CLI) rely on these method names and semantics. The trait also documents runtime contracts, such as how provider/model resolution is expected (get_agent_provider/get_default_provider) and how streaming responses should be produced. Respecting method shapes and return types is critical for compatibility across the codebase.", + "key_constructs": [ + { + "name": "API", + "type": "trait", + "purpose": "Primary async interface for runtime operations exposed to UI/CLI and other callers", + "reasoning": "This trait centralizes all high-level operations (chat, providers, conversations, workspace sync/search, config changes, auth) so edits here affect many consumers; maintain exact method signatures and semantics to avoid breaking implementors and callers." + } + ], + "semantic_tags": [ + "api", + "async", + "providers", + "conversation", + "workspace", + "streaming" + ], + "handles_entities": [ + "Conversation", + "ConversationId", + "Provider", + "ProviderId", + "AgentId", + "Model", + "ModelId", + "WorkspaceId", + "ChatRequest", + "ChatResponse", + "McpConfig" + ], + "key_behaviors": [ + "discovers files for autocompletion", + "lists tools, models, providers, and agents", + "executes chat requests and returns streaming responses", + "persists and manipulates conversations (upsert, delete, rename, compact)", + "manages provider authentication and credentials", + "syncs and queries workspace indexes for semantic search", + "applies atomic configuration mutations", + "provides user and usage metadata", + "generates streamed JSON data via generate_data" + ], + "pitfalls": [ + { + "mistake": "Changing method names, signatures, or types in this trait without updating all implementors (ForgeAPI) and callers (UI/CLI)", + "consequence": "Compiler errors across multiple crates and potential runtime mismatches; breaking public API used by many components", + "prevention": "Keep trait method signatures stable; coordinate changes with implementations in crates/forge_api/src/forge_api.rs and consumers like crates/forge_main/src/ui.rs" + }, + { + "mistake": "Conflating synchronous and asynchronous responsibilities (e.g., turning a sync method into async or vice versa) without updating callers", + "consequence": "Deadlocks or changed runtime behavior; callers may be forced to await where they previously didn't", + "prevention": "Preserve async/sync boundary choices; if changing, update all callers and ensure no blocking operations on async runtime" + }, + { + "mistake": "Altering streaming types (MpscStream / BoxStream) or their error wrappers", + "consequence": "Breaks streaming consumers and adapters expecting a specific stream type and message shape", + "prevention": "Maintain stream type contracts; update all stream producers/consumers atomically" + } + ], + "reading_guide": { + "start_here": "API", + "key_sections": [ + "chat: chat(&self, ChatRequest) -> MpscStream> \u2014 defines streaming chat behavior", + "conversation CRUD: upsert_conversation, conversation, get_conversations, delete_conversation, rename_conversation, compact_conversation \u2014 how persistence is expected to behave", + "provider/auth: get_provider, get_providers, get_agent_provider, get_default_provider, init_provider_auth, complete_provider_auth, remove_provider \u2014 auth and provider lifecycle", + "workspace: sync_workspace, query_workspace, list_workspaces, get_workspace_info, get_workspace_status, delete_workspaces \u2014 indexing and query expectations" + ], + "skip_unless_needed": [ + "less-frequently-called helper methods (hydrate_channel, migrate_env_credentials) when you're focused on chat/conversation changes" + ] + }, + "tests": { + "exercised_by": [], + "test_functions": [], + "example_command": "cargo test -p forge_api", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "crates/forge_api/src/forge_api.rs", + "relationship": "Concrete implementation of this API trait; any change here requires corresponding changes there", + "likely_co_change": true, + "reason_to_check": "ForgeAPI delegates to ForgeApp/Services and implements the trait methods; changes must stay in sync" + }, + { + "path": "crates/forge_main/src/ui.rs", + "relationship": "UI uses this trait to call into runtime operations (chat, conversation, provider lookup)", + "likely_co_change": true, + "reason_to_check": "UI expects specific method semantics (e.g., get_default_provider/get_agent_provider) and uses streamed chat responses" + }, + { + "path": "crates/forge_app/src/services.rs", + "relationship": "Domain-level service trait definitions and types used by API implementations", + "likely_co_change": true, + "reason_to_check": "Service layer types and contracts (models(), chat(), migrate_env_credentials) inform API semantics" + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_api", + "cargo test --workspace" + ], + "data_constants_to_check": [], + "owns_authoritative_data": false, + "public_api_surface": true + }, + "insights": [ + { + "type": "feature", + "category": "API", + "title": "Expose commit/suggest config in API surface and provider models across all providers", + "problem": "API lacked endpoints for commit/suggest config and for cross-provider model listing.", + "root_cause": "New features required API surfaces to persist and retrieve commit/suggest settings and to list models per-provider.", + "solution": "Added get_commit_config/set_commit_config and get_suggest_config/set_suggest_config to API trait. Added get_all_provider_models to API to return models grouped by provider.", + "lesson_learned": "When adding CLI commands or UI features, make corresponding additions to API traits and implement routing in forge_api::forge_api so services can implement them.", + "commits": [ + "f8a260e", + "da37b43", + "0577b3a" + ], + "constructs": [ + "API::get_commit_config", + "API::set_commit_config", + "API::get_suggest_config", + "API::set_suggest_config", + "API::get_all_provider_models" + ] + }, + { + "type": "breaking_change", + "category": "API", + "title": "API trait renamed and split to support per-agent providers/models", + "problem": "API surface previously assumed a single global provider/model and had ambiguous method names (tools, models, providers, list_conversations, etc.).", + "root_cause": "Introducing per-agent provider/model semantics required clearer API names and new operations to get/set active agent and default provider/model.", + "solution": "Renamed and reorganized API trait methods (e.g., tools\u2192get_tools, models\u2192get_models, providers\u2192get_providers, list_conversations\u2192get_conversations). Added explicit methods for get_agent_provider, get_default_provider, set_default_provider, get_active_agent, set_active_agent, get_agent_model, get_default_model, set_default_model, and auth/login helper methods. Adjusted ForgeAPI implementation to delegate to ForgeApp::new(self.services.clone()). Changes are breaking to external API implementations.", + "commit": [ + "d9207f" + ], + "constructs": [ + "API trait methods (get_tools, get_models, get_providers, get_conversations, init_login, login, logout, get_agent_provider, get_default_provider, set_default_provider, get_active_agent, set_active_agent, get_agent_model, get_default_model, set_default_model)" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/cli.rs": { + "file_path": "crates/forge_main/src/cli.rs", + "short_description": "Clap-based CLI definitions and top-level command/subcommand structure for the forge binary", + "category": "CLI", + "description": "This file declares the entire command-line interface (CLI) shape using clap Parser/Subcommand/ValueEnum derives. It exists to provide a single, strongly-typed source of truth for all command names, flags, and positional arguments that the forge binary accepts. The CLI is intentionally structured with singular command names and typed subcommands (e.g., ConfigCommandGroup, WorkspaceCommandGroup) to improve help text, validation, and to support programmatic use by the UI and zsh plugin.\n\nThe CLI structure is tightly coupled to other parts of the system. The top-level Cli struct contains global flags (prompt, piped_input, conversation_id, directory, sandbox, verbose, agent, event) and a TopLevelCommand enum which enumerates all high-level actions (Agent, Zsh, List, Info, Commit, Mcp, Suggest, Provider, Cmd, Workspace, Data, Vscode, Update, Setup, Doctor, etc.). Many subcommands use domain types defined in forge_domain (AgentId, ConversationId, Effort, ModelId, ProviderId). There are helper conversions (From for forge_domain::Scope) and ValueEnum-derived enums for typed choices (Scope, Transport). The file includes a small utility method Cli::is_interactive which determines interactive mode based on presence/absence of prompt/piped_input/subcommands. There is a top-of-file comment warning: changes to CLI structure must be verified against the ZSH plugin (shell-plugin/forge.plugin.zsh) because the plugin implements completion and shortcuts that depend on the exact structure.\n\nHistorically, this file has been extended when new config surfaces (commit/suggest) and runtime overrides (model/provider flags) were added; commit messages referenced in context show many breaking/feature changes affecting parsing and downstream behavior. Because the CLI is the external user-visible contract and is consumed by the UI and shell integration, edits often require coordinated updates across forge_main/src/ui.rs and forge_api implementations. The file is large and contains many subcommand groups; when modifying command shapes, ensure aliases, short flags, and ValueEnum usage remain consistent and that parsing tests (if any) are updated.", + "key_constructs": [ + { + "name": "Cli", + "type": "struct", + "purpose": "Root CLI parser that holds global flags and the chosen top-level subcommand", + "reasoning": "Cli is the entry point for command dispatch. Top-level flags (prompt, agent, conversation_id, directory, etc.) and subcommands drive program behavior; changes here affect startup and interactive behavior." + }, + { + "name": "Cli::is_interactive", + "type": "function", + "purpose": "Determines whether to start interactive mode based on flags", + "reasoning": "Used by the UI to decide whether to show the banner and start interactive session; keep its logic consistent with expectations around piped input, prompt, and subcommands." + }, + { + "name": "TopLevelCommand", + "type": "enum", + "purpose": "Enumerates all top-level CLI commands and maps them to command groups or inline actions", + "reasoning": "Altering variants or names here changes CLI grammar and affects completion and plugin scripts; maintain singular naming and aliases as documented at the top of the file." + }, + { + "name": "Scope", + "type": "enum", + "purpose": "Typed configuration scope for commands (Local or User) with conversion to forge_domain::Scope", + "reasoning": "Used by MCP-related commands and config import/remove to decide where to read/write; ValueEnum ensures clap validation. The From impl maps CLI enum to domain enum." + }, + { + "name": "Transport", + "type": "enum", + "purpose": "Selects transport protocol for IPC/communication (Stdio or Sse)", + "reasoning": "Used by commands that can choose how they communicate (stdio vs SSE). ValueEnum and clap rename_all are used so textual command-line values match expected casing." + } + ], + "semantic_tags": [ + "cli", + "argument-parsing", + "subcommands", + "zsh-integration", + "config" + ], + "handles_entities": [ + "AgentId", + "ConversationId", + "ModelId", + "ProviderId", + "Scope", + "Transport" + ], + "key_behaviors": [ + "parses command-line arguments to select actions and parameters", + "determines interactive vs one-shot execution (is_interactive)", + "provides typed subcommands for configuration, workspace, provider, and MCP management", + "exposes runtime overrides (agent, model, provider) via top-level flags" + ], + "pitfalls": [ + { + "mistake": "Renaming or re-structuring subcommands without updating the ZSH plugin (shell-plugin/forge.plugin.zsh)", + "consequence": "Shell completion, shortcuts, and plugin behavior will break; users relying on plugin will experience incorrect completions or broken shortcuts", + "prevention": "Keep CLI structure and aliases stable or update the plugin in lockstep; verify zsh plugin compatibility after changes" + }, + { + "mistake": "Adding or removing flags that are expected to be threaded through to UI/API (e.g., --agent, --model, --provider) without updating the UI startup logic", + "consequence": "Runtime overrides may silently not apply, leading to confusing behavior when scripting or in interactive sessions", + "prevention": "When changing top-level flags, ensure UI.new_api initialization and API initializations accept and propagate them" + }, + { + "mistake": "Changing ValueEnum variants or rename rules for enums like Scope and Transport", + "consequence": "Clap parsing will break for existing scripts and CI; tests relying on parsing will fail", + "prevention": "Preserve textual representations or update documentation/tests and consumer code" + }, + { + "mistake": "Ignoring CI guard rails (RUSTFLAGS -Dwarnings) when editing -- leaving warnings", + "consequence": "CI treats warnings as errors and will fail; local builds may differ from CI", + "prevention": "Ensure code compiles cleanly without warnings" + } + ], + "reading_guide": { + "start_here": "Cli and Cli::is_interactive", + "key_sections": [ + "TopLevelCommand: enumerates the top-level actions \u2014 changes here are highest-impact", + "ConfigCommandGroup/ConfigCommand: typed config get/set subcommands and variants", + "WorkspaceCommandGroup: workspace sync/query/init commands and associated argument parsing (path, init flags, limit, top_k, starts_with/ends_with)" + ], + "skip_unless_needed": [ + "individual, long subcommand definitions and help text for commands you are not modifying (the file is large and many subcommands follow the same pattern)" + ] + }, + "tests": { + "exercised_by": [ + "inline tests in crates/forge_main/src/cli.rs updated for parsing model/provider and conversation --md" + ], + "test_functions": [], + "source_commits": [ + "0328695", + "a8d3acc" + ] + }, + "related_files": [ + { + "path": "crates/forge_api/src/forge_api.rs", + "relationship": "The CLI dispatch maps parsed arguments to API calls implemented by ForgeAPI; changing CLI semantics often requires changes in API/ForgeAPI to handle new flags.", + "likely_co_change": true, + "reason_to_check": "When adding/removing top-level flags or subcommands, ensure the implementor (ForgeAPI) and the UI wiring accept and use them" + }, + { + "path": "crates/forge_main/src/ui.rs", + "relationship": "UI consumes parsed Cli and dispatches work (e.g., on_new uses cli.agent to set active agent); changes to Cli shape require UI updates", + "likely_co_change": true, + "reason_to_check": "Top-level flags and subcommand semantics must be handled by UI startup and interactive flows" + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_main", + "cargo test --workspace" + ], + "data_constants_to_check": [], + "owns_authoritative_data": true, + "public_api_surface": true + }, + "insights": [ + { + "type": "feature", + "category": "Usability", + "title": "Allow atomic provider set with optional model via CLI --model flag", + "problem": "Setting provider from CLI then prompting for model caused extra interactions and mismatch when scripting.", + "root_cause": "The config set provider command didn't accept a model parameter.", + "solution": "Added optional --model argument to ConfigSetField::Provider to allow setting provider and model atomically. Updated tests to validate CLI parsing.", + "lesson_learned": "Expose combined operations in CLI when they are logically atomic to support scripting and reduce UI friction.", + "commits": [ + "03741f7" + ], + "constructs": [ + "ConfigSetField::Provider" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_main/src/cli.rs (unit tests edited)" + ] + } + }, + { + "type": "feature", + "category": "API", + "title": "Add typed subcommands for config get/set and new zsh alias commands", + "problem": "Config get/set previously used a single free-form field which was error-prone; new commit/suggest config needed typed args.", + "root_cause": "Evolving config surface required structured CLI parsing.", + "solution": "Replace ConfigField with ConfigSetField/ConfigGetField subcommands (Model, Provider, Commit, Suggest). Add Setup and Doctor top-level aliases for zsh integration. Update tests to validate parsing.", + "lesson_learned": "When a CLI gains multiple related options, prefer typed subcommands (clap Subcommand) to prevent brittle string parsing and to provide better auto-generated help & validation.", + "commits": [ + "f8a260e", + "da37b43", + "6cd0e96" + ], + "constructs": [ + "ConfigSetField", + "ConfigGetField", + "TopLevelCommand::Setup", + "TopLevelCommand::Doctor" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_main/src/cli.rs::tests" + ], + "source_commits": [ + "f8a260e", + "6cd0e96" + ] + } + }, + { + "type": "breaking_change", + "category": "Configuration", + "title": "Add --model and --provider CLI flags and deprecate env overrides", + "problem": "Previously users could override provider/model via FORGE_OVERRIDE_MODEL/FORGE_OVERRIDE_PROVIDER env vars; runtime overrides were needed on CLI.", + "root_cause": "Environment-based overrides had implicit global effect and were inconvenient for per-run usage.", + "solution": "Added --model and --provider flags to Cli, added tests for parsing, and propagated flags through UI/ForgeAPI initialization so CLI runtime overrides take precedence over env configuration.", + "lesson_learned": "When adding runtime overrides ensure they thread through the CLI -> UI -> API initialization path and update tests; prefer explicit CLI flags for per-run overrides.", + "commits": [ + "0328695" + ], + "constructs": [ + "Cli::parse", + "Cli struct fields: model, provider" + ] + }, + { + "type": "feature", + "category": "UX", + "title": "Add conversation show --md flag", + "problem": "Need ability to get raw markdown of last assistant message without renderer.", + "root_cause": "Show command previously always rendered markdown which prevented piping raw markdown to clipboard.", + "solution": "Added md boolean arg to ConversationCommand::Show and tests to validate parsing.", + "lesson_learned": "When exposing machine-friendly outputs, add explicit flags and route them through UI to output raw content.", + "commits": [ + "a8d3acc" + ], + "constructs": [ + "ConversationCommand::Show" + ] + }, + { + "type": "refactoring", + "category": "CLI", + "title": "Introduce session-scoped flags (agent, conversation_id) and simplify config set", + "problem": "Multiple CLI argument styles and a verbose config set that accepted multiple flags in one invocation were confusing and hard to test.", + "root_cause": "Earlier CLI accepted --agent-id, --aid etc in a non-uniform way and ConfigSetArgs had multiple optional fields.", + "solution": "Add top-level --agent and --conversation-id flags (with aliases). Simplified config set command to select a single field (ConfigField) and value (positional) rather than multiple named optional flags. Tests updated to reflect new CLIs.", + "commits": [ + "fc3dedd", + "a90be20", + "2070dba" + ], + "constructs": [ + "Cli struct fields (agent, conversation_id)", + "ConfigSetArgs (field, value)", + "SessionCommand variants (Show, Info)" + ] + }, + { + "type": "refactoring", + "category": "Typing", + "title": "Use enum for config field parsing", + "problem": "Config get command accepted arbitrary strings and required runtime validation and error types", + "root_cause": "String-based field selection was error-prone and required manual validation", + "solution": "Add ConfigField enum (Agent, Model, Provider) and change ConfigGetArgs.field to ConfigField; update handler to match on enum", + "commits": [ + "d0087f5" + ], + "constructs": [ + "ConfigField", + "ConfigGetArgs" + ], + "lesson_learned": "Prefer clap ValueEnum for enumerated CLI options to catch invalid values at parse time and simplify handler logic." + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_api/src/forge_api.rs": { + "file_path": "crates/forge_api/src/forge_api.rs", + "short_description": "Concrete Forge API implementation that bridges app services and environment infra to the API trait.", + "category": "SOURCE_CODE", + "description": "This file implements ForgeAPI, a concrete adapter that wires together the in-process Services implementation (ForgeServices over a ForgeRepo) with the environment/infra (ForgeInfra) and implements the crate's API trait. It exists so that external callers (the CLI, TUI, or an RPC server) can interact with the application surface via a single object that exposes async methods for discovery, chat, provider/model resolution, conversation persistence, workspace indexing/querying, provider auth, and other top-level capabilities.\n\nDesign-wise ForgeAPI is a thin fa\u00e7ade: it holds Arc references to services and infra, offers a small helper app() that instantiates ForgeApp from the current services, and implements the crate::API trait methods by delegating work to either services (business-logic layer) or infra (environmental operations like shell execution, hydration, and skill loading). There is also a specialized impl that constructs a fully-wired ForgeAPI>, ForgeRepo> via init(cwd, config, services_url).\n\nThe file handles cross-cutting concerns important to correctness: update_config computes whether any operations require agent cache invalidation and triggers reload_agents; methods that require the active agent obtain it via services.get_active_agent_id(); write/err forwarding to a ConsoleWriter is provided via a ConsoleWriter impl which delegates to infra. Many methods return streaming types (MpscStream/BoxStream) to support long-running or streaming operations. The implementation respects trait bounds so that different test or deployment wiring can provide alternate Services/Infra instances.", + "key_constructs": [ + { + "name": "ForgeAPI", + "type": "class", + "purpose": "Holds Arced references to services and infra and represents the concrete API implementation.", + "reasoning": "Centralizes access to Services and EnvironmentInfra; changes here affect how higher-level CLI/UI and tests instantiate the application surface.", + "callers": [ + { + "file": "crates/forge_main/src/main.rs", + "line": 7, + "context": "use forge_api::ForgeAPI;" + }, + { + "file": "crates/forge_main/src/main.rs", + "line": 132, + "context": "ForgeAPI::init(cwd.clone(), config, services_url.clone())" + } + ] + }, + { + "name": "new", + "type": "function", + "purpose": "ForgeAPI::new(services, infra) constructs the struct from Arcs.", + "reasoning": "Used by tests and init paths; preserves lifetime and sharing semantics expected by other modules via Arc." + }, + { + "name": "init", + "type": "function", + "purpose": "Specialized constructor that wires ForgeInfra, ForgeRepo, and ForgeServices producing a fully initialized ForgeAPI for normal runtime.", + "reasoning": "This is the canonical runtime wiring path. Callers that need the real environment/integration use this; tests may provide alternate wiring.", + "callers": [ + { + "file": "crates/forge_main/src/main.rs", + "line": 132, + "context": "ForgeAPI::init(cwd.clone(), config, services_url.clone())" + } + ] + }, + { + "name": "app", + "type": "function", + "purpose": "Helper that creates a ForgeApp instance from the current services (fresh when called).", + "reasoning": "Keeps logic that wants a ForgeApp scoped to methods that need it; the function's bounds enforce that Services and Infra implement EnvironmentInfra with the ForgeConfig type." + }, + { + "name": "get_skills_internal", + "type": "function", + "purpose": "Loads skills via the infra SkillRepository and returns them.", + "reasoning": "Used to expose skill loading to callers; important that it uses infra.load_skills() and is async." + }, + { + "name": "update_config", + "type": "function", + "purpose": "Applies a list of ConfigOperation mutations and triggers agent reload when necessary.", + "reasoning": "Contains logic to detect SetSessionConfig ops and triggers services.reload_agents(); changes here affect cache invalidation semantics and must be preserved when editing config mutation flows." + }, + { + "name": "environment", + "type": "function", + "purpose": "Returns a cloned Environment from services (synchronous method required by the API trait).", + "reasoning": "Used by UI to get cwd/log paths; must remain synchronous and cheap to clone." + }, + { + "name": "hydrate_channel", + "type": "function", + "purpose": "Calls infra.hydrate() to reinitialize any communication channels or caches.", + "reasoning": "This is a synchronous notification trigger used by UI to warm caches; keep signature and behavior (no error returned to caller)." + }, + { + "name": "impl API for ForgeAPI>, ForgeRepo>", + "type": "function", + "purpose": "Implements the API trait for the concrete runtime ForgeAPI wiring.", + "reasoning": "This implementation block contains the bulk of method bodies that orchestrate services/infra interactions; editing it changes external API behavior consumed by the UI/CLI." + }, + { + "name": "impl ConsoleWriter for ForgeAPI", + "type": "function", + "purpose": "Delegates ConsoleWriter methods (write/write_err/flush/flush_err) through to infra.", + "reasoning": "Enables ForgeAPI to be used as a ConsoleWriter in other parts of the code (spinner/streaming); retain exact delegation semantics to avoid breaking terminal output handling." + } + ], + "semantic_tags": [ + "api", + "services", + "infra", + "async", + "console", + "configuration" + ], + "handles_entities": [ + "Agent", + "Conversation", + "Provider", + "Model", + "Workspace", + "McpConfig" + ], + "key_behaviors": [ + "lists available tools and models", + "executes chat and returns a streaming response", + "persists and queries conversations", + "executes shell commands via infra", + "manages provider auth flows (init/complete/remove)", + "syncs and queries workspace indexes", + "applies atomic configuration updates and reloads agent cache when needed" + ], + "pitfalls": [ + { + "mistake": "Treat ForgeAPI.init's pre-read ForgeConfig as the immutable source of truth", + "consequence": "Could reintroduce stale config usage; agent/model resolution may become inconsistent after config changes", + "prevention": "Respect that update_config triggers reloads and that infra/services can be re-read; rely on services.get_provider_model/get_default_provider rather than storing a single config snapshot in long-lived state" + }, + { + "mistake": "Changing the signature or trait bounds of app() or the API impl without updating dependent modules", + "consequence": "Breaks compile-time trait constraints and runtime wiring; many callers depend on exact trait bounds (Services + EnvironmentInfra)", + "prevention": "Preserve trait bounds or update all consumers (UI, tests, and alternative infra wiring) when changing bounds" + }, + { + "mistake": "Modifying update_config without preserving the agent reload detection logic", + "consequence": "Agent cache may not be invalidated after provider/model changes, causing stale provider/model resolution", + "prevention": "Keep the logic that detects ConfigOperation::SetSessionConfig (or equivalent) and call services.reload_agents() when necessary" + }, + { + "mistake": "Altering ConsoleWriter delegation semantics", + "consequence": "Terminal and spinner output may be misrouted or lost, causing UI interleaving or missing output", + "prevention": "Preserve direct delegation to infra.write/write_err/flush/flush_err" + } + ], + "reading_guide": { + "start_here": "ForgeAPI (struct) and the impl API for ForgeAPI>, ForgeRepo> block", + "key_sections": [ + "init: runtime wiring of infra, repo, and services", + "update_config: atomic config mutation and agent cache reload logic", + "chat/commit/commit-related methods: show how app/GitApp are instantiated and used", + "hydrate_channel and ConsoleWriter impl: how output and hydration are delegated to infra" + ], + "skip_unless_needed": [ + "per-method simple delegations (e.g., get_conversations, delete_conversation) which are thin wrappers around services" + ] + }, + "tests": { + "exercised_by": [], + "test_functions": [], + "example_command": "cargo test -p forge_api --lib -q", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "crates/forge_api/src/api.rs", + "relationship": "Defines the API trait that this file implements; changes to method signatures here must match that trait.", + "likely_co_change": true, + "reason_to_check": "When editing methods or signatures ensure api.rs trait stays in sync." + }, + { + "path": "crates/forge_app/src/services.rs", + "relationship": "Business-logic services that ForgeAPI delegates to; many methods call into services that are implemented here.", + "likely_co_change": true, + "reason_to_check": "If you change how service methods are invoked (args/semantics), update services implementations/tests accordingly." + }, + { + "path": "crates/forge_main/src/ui.rs", + "relationship": "Primary consumer of the API implementation; UI expects exact behavior for chat streaming, provider auth, config updates, and hydration.", + "likely_co_change": true, + "reason_to_check": "UI flows (on_new, provider/model selection, spinner behavior) depend on API semantics such as update_config triggering reloads and hydrate_channel being synchronous." + }, + { + "path": "crates/forge_services", + "relationship": "Concrete Services implementation used by ForgeAPI::init wiring.", + "likely_co_change": false, + "reason_to_check": "If modifying how ForgeServices is constructed or its required traits, verify wiring here." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_api --lib", + "cargo test --workspace" + ], + "data_constants_to_check": [ + "forge_config::ForgeConfig (changes affect runtime wiring and behavior)" + ], + "owns_authoritative_data": false, + "public_api_surface": true + }, + "insights": [ + { + "type": "bug_fix", + "category": "State Management", + "title": "Re-read and centralize config usage; add update_config op", + "problem": "ForgeAPI previously kept a single ForgeConfig copy; after config updates the API or services could operate against stale configuration leading to inconsistent behavior (agent resolution, retries, etc.).", + "root_cause": "Configuration was threaded at initialization time and used globally instead of being fetched via infra when needed. There was also no single atomic API to mutate configuration (provider+model) causing potential partial writes.", + "solution": "ForgeAPI now holds infra and services, exposes update_config(ops) API that accepts a Vec, determines whether agent caches need reloading, writes via services.update_config, and triggers agent reload when necessary. init() now accepts a pre-read config and uses infra that can re-read config; app() constructs ForgeApp with latest config when needed.", + "lesson_learned": "Keep write paths atomic and centralize mutations; make it easy to invalidate dependent caches after config mutations. Prefer infra-backed reads for fresh values.", + "commits": [ + "7e8a51d", + "5bd0b94", + "bfd9f7f" + ], + "constructs": [ + "ForgeAPI::new", + "ForgeAPI::init", + "ForgeAPI::app", + "update_config", + "get_commit_config", + "get_suggest_config" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/ui.rs": { + "file_path": "crates/forge_main/src/ui.rs", + "short_description": "Terminal UI orchestration: interactive loop, subcommand handling, prompts, and display wiring.", + "category": "SOURCE_CODE", + "description": "This file contains the terminal UI layer (struct UI) used by the forge CLI/TUI. It orchestrates interactive sessions: building prompts, driving the event loop, handling subcommands, invoking API methods, rendering output via MarkdownFormat and SharedSpinner, and running background hydration tasks. The UI also contains helper formatting functions (e.g., format_mcp_server and format_mcp_headers) and many of the high-level CLI command handlers that present results or interactive pickers.\n\nThe UI is designed around an API factory closure (new_api in the struct) that allows the UI to re-create an API instance with a freshly read ForgeConfig. This enables the '/new' command to reflect config changes without restarting the process. The run/run_inner loop accommodates three modes: subcommand-only execution, one-shot prompt/piped input execution, and an interactive prompt loop. It carefully handles Ctrl+C interruptions, ReadLine errors (treated as fatal when originating from TTY issues), and tracks usage and spinner state around operations.\n\nUI contains logic that must be respected when editing: it spawns background tasks to hydrate caches (get_models, get_tools, get_agents, hydrate_channel) to optimize responsiveness but previous commits show these tasks can race with short-lived command execution; the code therefore separates heavy init (on_new) from light init (init_state) for commands that should not spawn background tasks. The UI integrates with global tracking (TRACKER) and the spinner/writer orchestration that synchronizes header rendering with tool output \u2014 these interactions are sensitive to ordering and notification semantics.", + "key_constructs": [ + { + "name": "format_mcp_server", + "type": "function", + "purpose": "Formats an MCP server config for display while redacting sensitive parameters; returns the command/URL string.", + "reasoning": "Used by MCP listing and import flows; keep redaction behavior (env keys masked) intact when updating MCP display." + }, + { + "name": "format_mcp_headers", + "type": "function", + "purpose": "Formats HTTP headers for display with values redacted; returns None when no headers.", + "reasoning": "Used to display header summary for HTTP MCP servers; behavior must remain consistent when updating MCP UI." + }, + { + "name": "UI", + "type": "class", + "purpose": "Main UI struct that holds API factory, console, spinner, state, and other UI components.", + "reasoning": "This is the main entrypoint for interactive behavior; altering fields or their types requires updating many call sites and the new_api closure semantics.", + "callers": [ + { + "file": "crates/forge_main/src/lib.rs", + "line": 32, + "context": "pub use ui::UI;" + }, + { + "file": "crates/forge_main/src/main.rs", + "line": 10, + "context": "use forge_main::{Cli, Sandbox, TitleDisplayExt, UI, tracker};" + }, + { + "file": "crates/forge_main/src/main.rs", + "line": 131, + "context": "let mut ui = UI::init(cli, config, move |config| {" + } + ] + }, + { + "name": "init", + "type": "function", + "purpose": "Initializes UI with Cli, pre-read ForgeConfig, and an API factory closure (Fn(ForgeConfig) -> A).", + "reasoning": "Centralizes startup wiring and creates Console and Spinner; important because the new_api factory is used again on /new to reflect config changes.", + "callers": [ + { + "file": "crates/forge_main/src/main.rs", + "line": 131, + "context": "let mut ui = UI::init(cli, config, move |config| {" + } + ] + }, + { + "name": "run", + "type": "function", + "purpose": "Top-level async entry that calls run_inner and prints/logs full error chains on failure.", + "reasoning": "Wraps run_inner to ensure any runtime errors are surfaced to the user and tracked; keep error printing behavior consistent.", + "callers": [ + { + "file": "crates/forge_main/src/main.rs", + "line": 134, + "context": "ui.run().await;" + } + ] + }, + { + "name": "run_inner", + "type": "function", + "purpose": "Core interactive loop: handles subcommands, direct prompt/piped input, and the main prompt loop with Ctrl+C handling and per-command error reporting.", + "reasoning": "Contains the main control flow. Editing this may change UX and interruption semantics; preserve how errors from ReadLine are treated and how spinner is started/stopped around command execution." + }, + { + "name": "prompt", + "type": "function", + "purpose": "Builds ForgePrompt with usage/model/agent and delegates to console.prompt to obtain a SlashCommand.", + "reasoning": "Generates input context for the editor; changes here affect prompt behavior and how usage/model selection is shown to the user." + }, + { + "name": "on_new", + "type": "function", + "purpose": "Handler for creating a fresh conversation context: re-reads config, rebinds api via new_api, initializes state, optionally sets the agent passed via CLI, resets CLI conversation flags, and triggers spinner/banner/hydration and trackers.", + "reasoning": "Per commit history this function was a source of race conditions and heavy init; it intentionally spawns background hydration tasks and thus should remain distinct from light-weight init routines." + }, + { + "name": "on_agent_change", + "type": "function", + "purpose": "Sets the active agent in the services API and prints a formatted title update to the UI.", + "reasoning": "Validates the provided agent id against api.get_agents(); changing it affects how session-scoped agent selection is persisted and displayed." + }, + { + "name": "hydrate_caches", + "type": "function", + "purpose": "Fire-and-forget spawns to warm models/tools/agents and hydrate_channel in background.", + "reasoning": "Optimizes future calls; but spawning these tasks caused races in short-lived commands in the past, so callers use init_state(false) instead of on_new when they must avoid background tasks." + }, + { + "name": "handle_subcommands", + "type": "function", + "purpose": "Matches and executes the top-level subcommands enumerated in CLI: List, Zsh, Agent, Mcp, etc.", + "reasoning": "Contains a large match tree implementing many CLI actions. When editing CLI subcommands or porcelain output, this code is frequently updated; maintain compatibility with shell-plugin expectations when adjusting top-level commands or argument shapes." + } + ], + "semantic_tags": [ + "ui", + "interactive", + "cli", + "prompt", + "spinner", + "provider-selection" + ], + "handles_entities": [ + "Conversation", + "Agent", + "Model", + "Provider", + "McpConfig", + "Workspace" + ], + "key_behaviors": [ + "starts interactive prompt loop and processes SlashCommand objects", + "runs subcommands and one-off prompt/piped input in non-interactive mode", + "hydrates API caches in background to speed up subsequent calls", + "delegates provider/model resolution to the API and persists active agent", + "manages spinner and streaming output synchronization" + ], + "pitfalls": [ + { + "mistake": "Spawn heavy background hydration unconditionally from light-weight command paths", + "consequence": "Can race with short-lived commands and produce 'JoinHandle polled after completion' or other panics; causes flaky behavior in scripts/automation", + "prevention": "Respect the distinction between on_new (heavy init with hydrate_caches) and init_state(false) (light init). When adding or moving initialization logic, preserve this separation." + }, + { + "mistake": "Modifying CLI command shapes or top-level subcommands without updating the zsh plugin", + "consequence": "Shell plugin completion and shortcuts break; users' shell integration may become incompatible", + "prevention": "When changing CLI command names/structure, update shell-plugin/forge.plugin.zsh and verify compatibility." + }, + { + "mistake": "Changing prompt error handling semantics (e.g., swallowing ReadLineError)", + "consequence": "TTY/readline failures may be hidden and the process may continue in an inconsistent state; interactive session should exit on unrecoverable TTY errors", + "prevention": "Keep ReadLineError propagation path intact so the first prompt failure can short-circuit run_inner and exit appropriately." + }, + { + "mistake": "Breaking formatting/porcelain output conventions", + "consequence": "Shell pickers and machine-parsing consumers receive unexpected columns/lengths; porcelain tests and shell-plugin behavior can fail", + "prevention": "When modifying output, ensure porcelain/truncation rules and header row counts remain consistent and adjust shell-plugin expectations." + } + ], + "reading_guide": { + "start_here": "UI struct and UI::init to understand how the UI is constructed and how the API factory (new_api) is used.", + "key_sections": [ + "run_inner: central control flow for interactive and non-interactive modes", + "on_new and init_state flows: differences between heavy and light initialization", + "prompt: how prompts are composed with usage/model/agent", + "hydrate_caches: background warming tasks that affect startup performance and potential races", + "handle_subcommands: where top-level CLI commands are implemented" + ], + "skip_unless_needed": [ + "Large match arms for specific subcommands (they handle many display details) when making unrelated changes" + ] + }, + "tests": { + "exercised_by": [ + "crates/forge_main/src/**/tests (various unit tests in ui/info/porcelain changes)" + ], + "test_functions": [], + "source_commits": [ + "5bd0b94", + "44d22ee", + "bfd9f7f", + "ed24862", + "cbe2825", + "e523f6f" + ] + }, + "related_files": [ + { + "path": "crates/forge_api/src/forge_api.rs", + "relationship": "Primary backend API that the UI calls into for business operations (models, providers, chat, config updates).", + "likely_co_change": true, + "reason_to_check": "When changing UI workflows that call API methods (e.g., update_config, set_active_agent, get_models), verify behavior and signatures in the API implementation." + }, + { + "path": "crates/forge_main/src/cli.rs", + "relationship": "Defines the CLI structure and TopLevelCommand enum consumed by the UI's handle_subcommands.", + "likely_co_change": true, + "reason_to_check": "If altering subcommand handling or adding flags, ensure cli.rs and shell-plugin remain aligned." + }, + { + "path": "crates/forge_app/src/services.rs", + "relationship": "Business logic that services API calls; UI expects certain semantics from service-backed operations (commit generation, tool lists, provider models).", + "likely_co_change": false, + "reason_to_check": "If changing how warnings/errors are surfaced from services, UI handlers should adapt how they display those messages." + }, + { + "path": "shell-plugin/forge.plugin.zsh", + "relationship": "Zsh plugin integration depends on CLI semantics and porcelain output formatting from UI.", + "likely_co_change": true, + "reason_to_check": "Any CLI or porcelain output changes require verifying plugin compatibility." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_main --lib", + "cargo test --workspace", + "cargo clippy --workspace --all-targets -- -D warnings" + ], + "data_constants_to_check": [ + "forge_config::ForgeConfig (UI reads config at init and rebinds API via new_api)", + "Porcelain truncation rules and TitleFormat strings (used by shell-plugin)" + ], + "owns_authoritative_data": false, + "public_api_surface": false + }, + "insights": [ + { + "type": "bug_fix", + "category": "State Management", + "title": "Allow model selection when provider activation completes without selection; atomic provider+model write support", + "problem": "Provider activation flows could end with inconsistent state when provider credential flows completed but model selection wasn't performed atomically, leading to a situation where the UI believed a model had been selected while the persisted config had mismatched provider/model.", + "root_cause": "UI previously called set_default_provider and set_default_model as separate operations; cancellation during model selection sometimes left provider set without an appropriate model. Additionally, provider activation could complete model selection and the UI needed to detect that.", + "solution": "Add API path set_default_provider_and_model to perform an atomic write for provider+model. Adjust UI flows: display_credential_success no longer prompts for active provider choice; on_provider_selection returns bool indicating whether provider was saved; on_model_selection accepts optional provider_to_activate and uses set_default_provider_and_model when present. Also allow short-circuit when provider activation already set default model.", + "lesson_learned": "Mutations that must be consistent (provider+model) should be written atomically via a single API operation; UI flows must handle partial cancellations and check persisted state after interactive flows.", + "commits": [ + "bfd9f7f", + "44d22ee" + ], + "constructs": [ + "UI::on_model_selection", + "UI::on_provider_selection", + "display_credential_success", + "get_models", + "init_state" + ] + }, + { + "type": "refactoring", + "category": "Configuration", + "title": "UI now accepts pre-read ForgeConfig and new_api factory receives ForgeConfig", + "problem": "Refactor changed how config is seeded into UI and how new API instances are created for /new conversations.", + "root_cause": "Config was being read lazily; the startup path now pre-reads config to surface errors early.", + "solution": "UI::init signature changed to accept ForgeConfig and new_api is Fn(ForgeConfig) -> A; UI stores config for reads to avoid calling api.get_config repeatedly for startup-read-only fields.", + "lesson_learned": "Pre-validate and pass startup config to UI and infra factories to surface config parsing errors early and to provide consistent initial state.", + "commits": [ + "5bd0b94" + ], + "constructs": [ + "UI::init", + "UI::on_new", + "UI::on_env" + ] + }, + { + "type": "bug_fix", + "category": "Concurrency", + "title": "Initialize UI state selectively to avoid background task race at process exit", + "problem": "Calling on_new() started background tasks (hydrate_caches) which could race with process exit and cause 'JoinHandle polled after completion' panics when UI actions were short-lived (e.g., info command).", + "root_cause": "on_new performed heavy initialization including spawning fire-and-forget background tasks; some commands only needed minimal state init.", + "solution": "Replace on_new() call in Info command path with self.init_state(false) which only resolves agent/provider/model without spawning hydration background tasks.", + "commits": [ + "e37bbb9" + ], + "constructs": [ + "on_info", + "on_new", + "init_state" + ], + "lesson_learned": "Separate heavy background initialization from light-weight state setup to avoid races and spurious panics in short-lived CLI commands." + }, + { + "type": "bug_fix", + "category": "State Management", + "title": "Prevent partial config write when model selection is cancelled", + "problem": "During onboarding/provider activation the provider was persisted immediately. If model selection was cancelled afterwards the config ended up partially updated (provider set without model), causing user-visible inconsistent state.", + "root_cause": "The code wrote the provider to disk (set_default_provider) before verifying that a model selection had been confirmed. User cancellation after provider selection therefore left an incomplete config.", + "solution": "Delay writing the provider into the config until after model selection completes successfully. Functions that previously returned () were changed to return Option/boolean to allow early return when user cancels. The provider activation flow now validates preselected models, scopes model selection to the activated provider, and aborts without writing on cancel.", + "lesson_learned": "Avoid persisting partial state during interactive flows. Return richer status from selection flows (e.g., bool/Option) so callers can decide whether to persist changes only when a full, valid configuration is available.", + "commits": [ + "6a36b83", + "b8866ef" + ], + "constructs": [ + "on_provider_selection", + "activate_provider", + "on_model_selection", + "select_model", + "validate_model" + ] + }, + { + "type": "refactoring", + "category": "Configuration", + "title": "Scope model selection to activated provider during onboarding", + "problem": "Model list shown during onboarding could include models from other providers, confusing users and enabling selecting an incompatible model for the newly-activated provider.", + "root_cause": "select_model previously fetched all provider models but didn't filter when used during provider activation.", + "solution": "Add optional provider_filter parameter to select_model/on_model_selection and, when provided, filter retrieved provider models to the active provider.", + "lesson_learned": "When UI actions happen as part of a multi-step activation flow, ensure subsequent selections respect the context established by previous steps (scope lists by prior choice).", + "commits": [ + "b8866ef", + "6a36b83" + ], + "constructs": [ + "select_model", + "on_model_selection" + ] + }, + { + "type": "bug_fix", + "category": "Configuration", + "title": "Trim trailing slash from URL param values during provider login", + "problem": "User supplied base URLs with a trailing slash resulted in malformed composed provider URLs.", + "root_cause": "UI accepted free-text URL params and did not normalize trailing slashes before persisting/using the param.", + "solution": "Trim trailing '/' from param_value with trim_end_matches before storing.", + "lesson_learned": "Normalize user-supplied URLs/paths (trim trailing slashes) early to avoid inconsistent template rendering downstream.", + "commits": [ + "b282602" + ], + "constructs": [ + "provider param input mapping" + ] + }, + { + "type": "bug_fix", + "category": "Concurrency", + "title": "Notify orchestrator before running tool to avoid stdout interleaving", + "problem": "Tool stdout could interleave with UI header rendering, causing output to appear before the tool header.", + "root_cause": "Tool execution began immediately after sending ToolCallStart; UI rendering of the header was asynchronous and could be delayed, leading to interleaving.", + "solution": "ToolCallStart now carries an Arc notifier. Orchestrator creates notifier and awaits notifier.notified() after sending ToolCallStart. UI handles ToolCallStart by finishing writer/spinner and notifying orchestrator (NotifyGuard) to let tool execution start after the header is rendered.", + "lesson_learned": "When separate tasks produce related terminal output, coordinate via a simple synchronization primitive (Notify/oneshot) so that headers and content aren't interleaved. Ensure UI always notifies even on errors (scope guard).", + "commits": [ + "c1c0506" + ], + "constructs": [ + "NotifyGuard (local struct in ui handler)", + "finalize_provider_activation", + "activate_provider_with_model" + ] + }, + { + "type": "bug_fix", + "category": "Error Handling", + "title": "Propagate Readline/TTY errors as quick exit", + "problem": "Readline/TUI errors were printed as warnings but the UI continued, leading to confusing state. Under missing TTY the first prompt should cause process exit.", + "root_cause": "Readline errors were being swallowed and converted to messages instead of short-circuiting.", + "solution": "Editor.prompt now maps errors to ReadLineError; UI checks for ReadLineError and returns Err to exit quickly when prompt can't access TTY.", + "lesson_learned": "TTY/readline failures should often be fatal early; distinguish interactive failures and propagate them to stop processing.", + "commits": [ + "92321fe" + ], + "constructs": [ + "prompt handling", + "ReadLineError" + ] + }, + { + "type": "refactoring", + "category": "Usability", + "title": "Activate provider+model atomically when provided", + "problem": "Changing provider then prompting for model selection created a race/extra interaction when a preselected model is known.", + "root_cause": "Activation flow separately set provider and then launched interactive model selection; losing opportunity to set both atomically.", + "solution": "Added activate_provider_with_model(provider, model: Option) to optionally skip interactive model prompt and set model directly. CLI set subcommand exposes --model to preselect model when setting provider.", + "lesson_learned": "When operations are logically atomic from a user perspective (provider+model), provide an API to set them together to reduce UX friction and race conditions.", + "commits": [ + "03741f7" + ], + "constructs": [ + "activate_provider_with_model", + "finalize_provider_activation" + ] + }, + { + "type": "refactoring", + "category": "UI", + "title": "Adopt porcelain tables + fzf selection for multiple UI flows and centralize provider/model selection helpers", + "problem": "Multiple in-app selectors used different formatting approaches and dialoguer; selection UX needed consistency with shell plugin and fzf capabilities (header rows, starting cursor).", + "root_cause": "Fragmented selection code paths and dialoguer removal.", + "solution": "Refactor to build Info -> Porcelain tables and pass to ForgeSelect rows (fzf-wrapped) with header_lines and starting cursor. Add helper select_provider_from_list to centralize provider selection logic and adapt many flows: provider login/logout, agent selection, model selection. Also remove CliModel/CliProvider types from production code and keep test-only wrappers where necessary.", + "commits": [ + "7fc0c5e", + "2ba208b" + ], + "constructs": [ + "UI::select_provider_from_list", + "UI::select_provider", + "UI::select_model", + "UI::select_model (async_recursion)", + "UI::on_agent_change", + "UI::display_banner" + ] + }, + { + "type": "bug_fix", + "category": "Error Handling", + "title": "Show configured providers only and use provider.is_configured filter", + "problem": "Some provider lists included non-configured providers where expected only configured ones.", + "root_cause": "Earlier helper get_configured_providers was removed; flows didn't consistently filter providers.", + "solution": "Filter providers via .is_configured() at call sites and use select_provider_from_list which shows 'logged in' column as appropriate.", + "commits": [ + "7fc0c5e" + ], + "constructs": [ + "UI::select_provider", + "UI::select_provider_from_list" + ] + }, + { + "type": "feature", + "category": "UI", + "title": "Show commit/suggest config and allow setting commit/suggest model", + "problem": "Users could not set dedicated models for commit generation or suggestion generation via UI commands.", + "root_cause": "Config command lacked subcommands for commit/suggest, and UI didn't surface these config values.", + "solution": "Extended UI to fetch get_commit_config and get_suggest_config, show Commit/Suggest Provider and Model in configuration panel, added handling for ConfigSetField::Commit and ConfigSetField::Suggest to validate model and set the new config via API. Added ConfigGetField::Commit and ConfigGetField::Suggest handlers to display values.", + "lesson_learned": "When adding a new configuration shape, expand CLI enums, UI display, validation flow (validate_model scoped to provider), and services/api plumbing together to keep behavior consistent.", + "commits": [ + "f8a260e", + "da37b43", + "5becac1", + "a437d86", + "970a75f" + ], + "constructs": [ + "handle_config_set", + "handle_config_get", + "validate_model", + "on_zsh_doctor", + "on_zsh_setup", + "on_show_models" + ] + }, + { + "type": "bug_fix", + "category": "Error Handling", + "title": "Improve zsh plugin doctor output and error messages", + "problem": "Doctor script output used ambiguous symbols and error messages for install scripts had unhelpful formatting.", + "root_cause": "Script used [!!] and [--] symbols and UI code formatted the execution exit code with debug {:?} rather than string interpolation.", + "solution": "Normalized symbols to [ERROR]/[WARN], changed doctor script formatting to clearer dimmed detail prefixes, and improved execute_zsh_script_with_streaming to produce a human-friendly exit code string in error messages. UI now prints actionable warnings when zsh doctor completes successfully instructing to restart shell.", + "lesson_learned": "End-user shell tooling should produce clear, action-oriented messages; prefer warnings over errors for less critical diagnostics and include next steps.", + "commits": [ + "5becac1" + ], + "constructs": [ + "on_zsh_doctor", + "execute_zsh_script_with_streaming" + ] + }, + { + "type": "refactoring", + "category": "UX", + "title": "Truncate model names in porcelain output", + "problem": "Porcelain output for models could produce excessively long names causing misaligned UIs in shell plugin.", + "root_cause": "Porcelain output pipeline didn't truncate long fields.", + "solution": "Apply Porcelain::truncate(1, 40) when writing the configuration info headers for porcelain to keep columns readable.", + "lesson_learned": "When exposing machine-friendly (porcelain) output intended for pickers, limit field lengths to keep shell pickers stable and readable.", + "commits": [ + "a437d86" + ], + "constructs": [ + "UI::writeln (porcelain path)" + ] + }, + { + "type": "security", + "category": "Security", + "title": "Hide raw API key in UI after creating Forge auth credentials", + "problem": "CLI printed the Forge API key/token directly to the UI on creation, exposing secrets in terminal output.", + "root_cause": "Previous behavior surfaced auth.token in writeln_title calls.", + "solution": "Do not print the API key. Instead, show the path to the credentials file where keys are stored (via Environment::credentials_path()) and introduce init_forge_services() to create credentials and display the path.", + "commit": [ + "a1e7f35" + ], + "constructs": [ + "init_forge_services", + "prompt", + "configure_provider" + ] + }, + { + "type": "refactoring", + "category": "State Management", + "title": "CLI session flags and agent handling moved to session-level (multiple commits)", + "problem": "Multiple UI flows and CLI commands previously relied on a global persisted 'operating agent'/'operating model' and environment variables, leading to confusing behaviour for session-scoped agent selection.", + "root_cause": "Agent selection was stored as a global persistent app configuration and retrieved in various places (API calls, env variables, CLI plugin), causing multiple places to be updated for agent/session changes.", + "solution": "UI now uses new API methods (get_active_agent / set_active_agent / get_default_provider / get_agent_model) and sets CLI-provided agent at startup; conversation initialization respects CLI flags; UI helpers get_provider/get_agent_model added to centralize per-agent provider/model resolution. Many command handlers now accept session-scoped agent info.", + "commits": [ + "94ac901", + "a90be20", + "2070dba", + "fc3dedd", + "d9207fc" + ], + "constructs": [ + "init_forge_services", + "init_conversation", + "get_provider", + "get_agent_model", + "on_show_config", + "on_show_providers", + "on_show_tools", + "on_info", + "prompt", + "on_command" + ] + }, + { + "type": "refactoring", + "category": "UX", + "title": "Make Info display keys fixed-width per section", + "problem": "Info display alignment varied and was visually inconsistent across sections.", + "root_cause": "Key column width was not computed per-section, causing misaligned colons and values.", + "solution": "Compute max key width for items under each Title and pad keys within that section; updated Info formatting and tests to assert consistent padding.", + "commits": [ + "95a0bb5" + ], + "constructs": [ + "on_show_tools", + "on_show_config", + "on_info", + "display_banner" + ] + }, + { + "type": "feature", + "category": "Configuration", + "title": "Completion prompt can be disabled via env var", + "problem": "Users couldn't opt out of completion prompt after tasks", + "root_cause": "UI.on_completion unconditionally prompted user", + "solution": "Check should_show_completion_prompt() at start of on_completion and skip early if false", + "commits": [ + "e523f6f" + ], + "constructs": [ + "UI::on_completion" + ], + "lesson_learned": "Gate interactive UI flows with explicit env/config checks so automation and CI users can disable prompts." + }, + { + "type": "refactoring", + "category": "State Management", + "title": "Config command handling moved into UI (simplified)", + "problem": "ConfigManager module spread config logic across separate files and required an extra abstraction", + "root_cause": "Config submodule was split into many files (handler/display/helpers/error) increasing indirection", + "solution": "Inline config handling into UI: implement handle_config_command, handle_config_set/get/list inside UI, remove ConfigManager exports and deleted config modules", + "commits": [ + "cbe2825", + "d0087f5" + ], + "constructs": [ + "UI::handle_config_command", + "UI::handle_config_set", + "UI::handle_config_get", + "UI::handle_config_set_non_interactive (handle_non_interactive_config_set)", + "UI::validate_agent", + "UI::validate_model", + "UI::validate_provider" + ], + "lesson_learned": "Moving small CLI command handlers into the top-level UI can reduce module churn, but it's a breaking API change for consumers of ConfigManager. When removing modules, update exports (lib.rs) and note the API surface removal." + }, + { + "type": "bug_fix", + "category": "Parsing / Formatting", + "title": "Porcelain output adjustments and skipping header rows", + "problem": "Porcelain-mode tabular output had incorrect column mapping and skipped wrong number of rows", + "root_cause": "Porcelain formatting changed semantics (long vs short, columns) and code used inconsistent skip/drop counts and title positioning", + "solution": "Introduce Porcelain module and modify UI to call Porcelain::from(&info).into_long()/.skip(n)/.drop_col(0) or .skip(2) depending on context; adjust when to include title and how many rows to skip", + "commits": [ + "ed24862", + "9b3b618", + "c71b2a4" + ], + "constructs": [ + "write_info_or_porcelain (removed)", + "UI::on_show_* helpers that call Porcelain", + "Porcelain::from" + ], + "lesson_learned": "Porcelain (machine-friendly) output must maintain consistent column counts and explicit row skipping/truncation when adding human-readable headers. Introduce a dedicated porcelain formatter instead of ad-hoc column helpers to avoid brittle logic scattered across UI." + }, + { + "type": "refactoring", + "category": "Formatting", + "title": "Config display moved from module to UI wiring", + "problem": "config module and helper display functions were deleted and inlined into UI to reduce indirection", + "root_cause": "Simplify command flow and reduce small-module overhead", + "solution": "UI now directly builds Info and handles porcelain vs normal display", + "commits": [ + "cbe2825", + "ed24862" + ], + "constructs": [ + "UI::on_show_config", + "UI::handle_config_command" + ], + "lesson_learned": "Inlining small logic might simplify maintenance but watch for duplication; consider extracting reusable formatters (Info/Porcelain) rather than duplicated print logic." + }, + { + "type": "bug_fix", + "category": "UX/Presentation", + "title": "Reorder conversation info key/value for better readability", + "problem": "Conversation listing showed Id before Updated causing minor UX confusion", + "root_cause": "Field insertion order produced unintuitive ordering", + "solution": "Swap add_key_value calls so Updated displays before Id", + "commits": [ + "cb93ac7" + ], + "constructs": [ + "UI::on_show_conversations" + ], + "lesson_learned": "Small ordering changes in display code can affect UX; prefer consistent ordering across commands." + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/env.rs": { + "file_path": "crates/forge_domain/src/env.rs", + "short_description": "Domain-level environment and session configuration helpers plus path helpers for runtime artifacts.", + "category": "SOURCE_CODE", + "description": "Defines domain-level types and helpers that represent the minimal runtime environment and small session/config abstractions decoupled from on-disk formats. This file exists to provide a centralized, testable representation of the environment (OS, CWD, home, shell, base_path) and a set of convenience path-building methods used throughout the application (logs, snapshots, agents folder, credentials file, skills and commands locations, etc.). It also defines SessionConfig and ConfigOperation (domain-level mutation descriptors) which are used by infra components to apply changes to persisted ForgeConfig.\n\nThe design keeps runtime-only environment data separate from ForgeConfig (on-disk config). Environment has only the fields that cannot be sourced from ForgeConfig and exposes many small helpers so callers don't replicate base_path.join(\"...\") logic. SessionConfig and ConfigOperation exist to represent higher-level changes to app configuration in an atomic, explicit manner (SetSessionConfig, SetCommitConfig, SetSuggestConfig, SetReasoningEffort). WorkspaceHash is a thin wrapper around a u64 derived by hashing cwd (used for workspace-scoped identity). Several unit tests are embedded in the module to validate path-building expectations and independence between base_path and cwd-based paths.", + "key_constructs": [ + { + "name": "SessionConfig", + "type": "struct", + "purpose": "Represents the active provider and model for a runtime session (provider_id, model_id).", + "reasoning": "Used as a minimal, in-memory representation of 'which provider/model is currently active' separate from the persisted ForgeConfig; implementations should prefer this domain struct when mutating or serializing session choices." + }, + { + "name": "ConfigOperation", + "type": "enum", + "purpose": "Describes discrete configuration mutations to be applied to persisted configuration.", + "reasoning": "Used to pass a small list of precise operations into infra functions that can apply them atomically and persist the result without replacing the entire config blob." + }, + { + "name": "VERSION", + "type": "constant", + "purpose": "Provides the application version string, using build-time APP_VERSION if present, otherwise CARGO_PKG_VERSION.", + "reasoning": "Central single-source for the runtime version used by Environment::version(); editing must preserve APP_VERSION / CARGO_PKG_VERSION resolution semantics." + }, + { + "name": "Environment", + "type": "struct", + "purpose": "Minimal runtime environment (os, cwd, home, shell, base_path) plus numerous helper methods that return canonical file/directory paths used by the app.", + "reasoning": "Encapsulates where artifacts are stored so call sites use Environment APIs rather than constructing paths ad-hoc; this reduces duplication and keeps path layout consistent." + }, + { + "name": "history_path", + "type": "function", + "purpose": "Returns history file path, using a provided custom path when present or defaulting to base_path/.forge_history.", + "reasoning": "Tested behavior: custom path should override default; callers must pass Option<&PathBuf> and expect cloning behavior.", + "callers": [ + { + "file": "crates/forge_main/src/info.rs", + "line": 299, + "context": "format_path_for_display(env, &env.history_path(None))," + }, + { + "file": "crates/forge_main/src/editor.rs", + "line": 73, + "context": "let history_file = env.history_path(custom_history_path.as_ref());" + } + ] + }, + { + "name": "credentials_path", + "type": "function", + "purpose": "Returns path for provider credentials file (.credentials.json under base_path).", + "reasoning": "This helper centralizes where credentials are stored; commit history shows it was added to avoid duplicated base_path.join logic elsewhere.", + "callers": [ + { + "file": "crates/forge_main/src/ui.rs", + "line": 2556, + "context": "let credentials_path = crate::info::format_path_for_display(&env, &env.credentials_path());" + }, + { + "file": "crates/forge_repo/src/provider/provider_repo.rs", + "line": 280, + "context": "let path = self.infra.get_environment().credentials_path();" + }, + { + "file": "crates/forge_repo/src/provider/provider_repo.rs", + "line": 511, + "context": "let path = self.infra.get_environment().credentials_path();" + }, + { + "file": "crates/forge_repo/src/provider/provider_repo.rs", + "line": 521, + "context": "let path = self.infra.get_environment().credentials_path();" + }, + { + "file": "crates/forge_repo/src/provider/provider_repo.rs", + "line": 840, + "context": "if path == self.get_environment().credentials_path() {" + }, + { + "file": "crates/forge_repo/src/provider/provider_repo.rs", + "line": 875, + "context": "if path == self.get_environment().credentials_path() {" + } + ] + }, + { + "name": "workspace_hash", + "type": "function", + "purpose": "Produces a WorkspaceHash computed by hashing cwd using DefaultHasher.", + "reasoning": "Used for workspace scoping; important to recognize DefaultHasher's non-deterministic seed across processes if deterministic IDs are assumed.", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 69, + "context": "env.workspace_hash()," + } + ] + }, + { + "name": "WorkspaceHash", + "type": "struct", + "purpose": "Thin wrapper around u64 id produced by hashing workspace cwd.", + "reasoning": "Provides an explicit type for passing around workspace identifiers and a stable API (new, id())." + } + ], + "semantic_tags": [ + "environment", + "paths", + "config", + "session", + "testing" + ], + "handles_entities": [ + "Environment", + "SessionConfig", + "WorkspaceHash", + "ConfigOperation" + ], + "key_behaviors": [ + "returns canonical paths for app artifacts relative to base_path or cwd", + "represents active session/provider/model in memory", + "computes workspace-scoped hash from cwd" + ], + "pitfalls": [ + { + "mistake": "Assuming WorkspaceHash is deterministic across processes or machines.", + "consequence": "Different runs/processes may produce different WorkspaceHash values because DefaultHasher is not guaranteed to be seeded identically across processes; code that persists or compares WorkspaceHash across process boundaries may be fragile.", + "prevention": "Treat WorkspaceHash as a local/process-scoped identifier; do not rely on it for stable cross-process IDs." + }, + { + "mistake": "Constructing paths duplicating base_path.join(\"...\") instead of using Environment helpers.", + "consequence": "Scattered duplication of path logic increases chance of inconsistent locations (e.g., different file names or missing dot-prefixed filenames).", + "prevention": "Use the Environment::*_path() helpers in this file as the authoritative source for all canonical runtime paths." + }, + { + "mistake": "Changing Environment fields without updating all test fixtures and downstream uses (TestContext).", + "consequence": "Tests or code constructing Environment literals will fail to compile or behave incorrectly if new fields are added.", + "prevention": "When modifying Environment signature, update test fixtures (e.g., Default impls) and any code that constructs Environment directly." + } + ], + "reading_guide": { + "start_here": "Environment", + "key_sections": [ + "Environment: the struct and its path helper methods (log_path, history_path, credentials_path, agent_path, etc.)", + "ConfigOperation: operation types for mutating persisted config", + "workspace_hash + WorkspaceHash: hashing behavior and id accessor" + ], + "skip_unless_needed": [ + "tests module: unit tests validating path helpers (useful for verifying behavior but not required to understand runtime helpers)" + ] + }, + "tests": { + "exercised_by": [ + "crates/forge_domain/src/env.rs (inline unit tests)" + ], + "test_functions": [ + "test_agent_cwd_path", + "test_agent_cwd_path_independent_from_agent_path", + "test_global_skills_path", + "test_local_skills_path", + "test_skills_paths_independent", + "test_command_path", + "test_command_path_local", + "test_command_paths_independent", + "test_global_agents_md_path", + "test_local_agents_md_path", + "test_plans_path", + "test_provider_config_path" + ], + "example_command": "cargo test -p forge_domain --tests -q", + "relevant_snippets": [ + { + "file": "crates/forge_domain/src/env.rs", + "lines": "55-110", + "description": "Environment struct and path helper methods (core behavior exercised by tests)." + }, + { + "file": "crates/forge_domain/src/env.rs", + "lines": "112-148", + "description": "workspace_hash and WorkspaceHash implementation." + } + ] + }, + "related_files": [ + { + "path": "crates/forge_infra/src/env.rs", + "relationship": "Forge infra constructs runtime Environment instances and applies ConfigOperation values to persisted ForgeConfig; infra depends on the domain Environment helpers and ConfigOperation variants.", + "likely_co_change": true, + "reason_to_check": "Any change to Environment struct or ConfigOperation requires updating to_environment, apply_config_op, and tests that construct or use those types." + }, + { + "path": "crates/forge_app/src/orch_spec/orch_setup.rs", + "relationship": "Test harness constructs Environment literals to seed orchestrator tests; relies on the Environment shape and helper semantics.", + "likely_co_change": true, + "reason_to_check": "If Environment is extended with additional fields or behavior, TestContext::default must be updated to supply those fields." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_domain --lib", + "cargo test --workspace (if making cross-crate changes)" + ], + "data_constants_to_check": [ + "VERSION constant handling (APP_VERSION vs CARGO_PKG_VERSION)" + ], + "owns_authoritative_data": true, + "public_api_surface": true + }, + "insights": [ + { + "type": "refactoring", + "category": "Configuration", + "title": "Add Environment.credentials_path helper", + "problem": "Credentials file path was constructed at multiple call sites which made changes error-prone and tests less robust.", + "root_cause": "Repeated base_path.join(\".credentials.json\") usage across codebase.", + "solution": "Added Environment::credentials_path() which returns base_path.join('.credentials.json') and updated call sites to use it.", + "commit": [ + "a1e7f35" + ], + "constructs": [ + "credentials_path" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/env.rs": { + "file_path": "crates/forge_infra/src/env.rs", + "short_description": "Infra layer that constructs domain Environment, provides cached ForgeConfig, and applies ConfigOperation mutations to persisted ForgeConfig.", + "category": "SOURCE_CODE", + "description": "Provides conversion from runtime inputs to the domain Environment and the ForgeEnvironmentInfra type which manages a cached ForgeConfig and a single code path for reading/writing configuration. This file exists to centralize how the app discovers runtime environment details (cwd, os, shell, base_path via ConfigReader::base_path) and to provide atomic-ish application of domain-level configuration operations through apply_config_op. ForgeEnvironmentInfra offers cached_config() to return the in-memory or re-read ForgeConfig, get_config() for EnvironmentInfra trait compatibility, and update_environment to apply a sequence of ConfigOperation and persist changes to disk while invalidating the cache.\n\nThe module contains tests that exercise to_environment behavior (including FORGE_CONFIG env var overriding base_path) and apply_config_op semantics for SetSessionConfig operations in multiple cases (new session, same provider update, different provider replacement). Tests serialize modifications to process environment via an ENV_MUTEX and EnvGuard to avoid inter-test races when setting/removing env vars. The implementation uses Arc>> for caching; update_environment replaces disk content via ForgeConfig::write() and then clears the cache so future get_config calls re-read from disk.", + "key_constructs": [ + { + "name": "to_environment", + "type": "function", + "purpose": "Builds a forge_domain::Environment from the current runtime context and a supplied cwd; obtains base_path from ConfigReader::base_path().", + "reasoning": "Central point where OS env, SHELL/COMSPEC, home dir, and FORGE_CONFIG override are normalized into the domain Environment used by the rest of the system.", + "callers": [ + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 21, + "context": "use crate::env::{ForgeEnvironmentInfra, to_environment};" + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 68, + "context": "let env = to_environment(cwd.clone());" + } + ] + }, + { + "name": "apply_config_op", + "type": "function", + "purpose": "Applies a single forge_domain::ConfigOperation to an in-memory forge_config::ForgeConfig instance.", + "reasoning": "Transforms domain-level mutation descriptors into appropriate changes on the Option-backed ForgeConfig; important to maintain the mapping consistency between domain ModelConfig and ForgeConfig::ModelConfig." + }, + { + "name": "ForgeEnvironmentInfra", + "type": "struct", + "purpose": "Infra implementation holding a cwd and an Arc>> cache, implementing EnvironmentInfra trait.", + "reasoning": "Provides a cached read path for ForgeConfig and a mutation path via update_environment. The cache lifecycle (seeded by new, invalidated after update_environment) is a critical behavioral contract for callers." + }, + { + "name": "cached_config", + "type": "function", + "purpose": "Returns the cached ForgeConfig when present or re-reads from disk (ConfigReader pipeline) when the cache is None.", + "reasoning": "Exposes an error-returning single point of truth for retrieving resolved ForgeConfig; callers must handle errors when the disk read fails.", + "callers": [ + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 73, + "context": "config_infra.cached_config().unwrap_or(config)," + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 80, + "context": ".cached_config()" + }, + { + "file": "crates/forge_infra/src/forge_infra.rs", + "line": 118, + "context": "self.config_infra.cached_config()" + } + ] + } + ], + "semantic_tags": [ + "configuration", + "caching", + "env", + "persistence", + "tests" + ], + "handles_entities": [ + "ForgeConfig", + "Environment", + "ConfigOperation" + ], + "key_behaviors": [ + "creates domain Environment from runtime variables", + "caches ForgeConfig to avoid repeated disk reads", + "atomically applies a list of ConfigOperation values and persists them", + "invalidates cache after write so next get reads updated values" + ], + "pitfalls": [ + { + "mistake": "Mutating or reading environment variables in tests without serializing access.", + "consequence": "Tests can race on global process environment, causing flakes or incorrect base_path resolution.", + "prevention": "Keep or reuse ENV_MUTEX / EnvGuard semantics when adding tests that set or remove env vars; avoid parallel tests that change env without synchronization." + }, + { + "mistake": "Assuming cached_config never fails or always returns the pre-seeded value.", + "consequence": "If the cache is invalidated (None) and disk read fails, get_config will return an error which callers must handle; tests that expect a config must ensure a successful read or seed the cache via ForgeEnvironmentInfra::new.", + "prevention": "When writing tests or code, either pre-seed the cache with a valid ForgeConfig (new(cwd, config)) or handle get_config errors properly." + }, + { + "mistake": "Changing apply_config_op mapping logic without updating domain/ForgeConfig shapes.", + "consequence": "Mismatches between domain ModelConfig and ForgeConfig::ModelConfig will cause incorrect persisted values or tests to fail (e.g., session provider vs model update semantics).", + "prevention": "When changing ModelConfig fields or Effort enum variants, update apply_config_op mapping and its tests to reflect expected persisted shape." + }, + { + "mistake": "Assuming Mutex lock poisoning won't occur and ignoring expect() messages.", + "consequence": "If a panicking thread poisons the mutex, expect() will panic in tests or runtime; code currently asserts with expect(\"cache mutex poisoned\").", + "prevention": "Be cognizant of possible poisoning (e.g., panics in other tests) and ensure tests avoid poisoning shared mutexes or handle potential poisoning explicitly if changing synchronization." + } + ], + "reading_guide": { + "start_here": "to_environment", + "key_sections": [ + "to_environment: how base_path and shell/home detection are derived (look here to understand base_path override via FORGE_CONFIG)", + "apply_config_op: mapping from domain operations to ForgeConfig fields and reasoning behavior for provider/model replacement", + "ForgeEnvironmentInfra: constructor (new), cached_config, and update_environment behavior" + ], + "skip_unless_needed": [ + "EnvGuard and mutex test helpers: only necessary when modifying tests that change process env" + ] + }, + "tests": { + "exercised_by": [ + "crates/forge_infra/src/env.rs (inline unit tests)" + ], + "test_functions": [ + "test_to_environment_sets_cwd", + "test_to_environment_uses_forge_config_env_var", + "test_to_environment_falls_back_to_home_dir_when_env_var_absent", + "test_apply_config_op_set_model", + "test_apply_config_op_set_model_matching_provider", + "test_apply_config_op_set_model_different_provider_replaces_session" + ], + "example_command": "cargo test -p forge_infra --tests -q", + "relevant_snippets": [ + { + "file": "crates/forge_infra/src/env.rs", + "lines": "1-40", + "description": "to_environment implementation and doc comment showing which fields are set from runtime." + }, + { + "file": "crates/forge_infra/src/env.rs", + "lines": "42-120", + "description": "apply_config_op function mapping ConfigOperation -> ForgeConfig modifications; core logic exercised by tests." + } + ] + }, + "related_files": [ + { + "path": "crates/forge_domain/src/env.rs", + "relationship": "Produces and consumes forge_domain::Environment and ConfigOperation; domain defines the types and path helpers while infra constructs instances and applies operations.", + "likely_co_change": true, + "reason_to_check": "Any change to ConfigOperation variants or Environment fields requires updating apply_config_op, to_environment, and infra tests." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_infra --lib", + "cargo test --workspace (if changing cross-crate shapes like ModelConfig or Effort)" + ], + "data_constants_to_check": [ + "ConfigReader::base_path behavior (FORGE_CONFIG env var influence) when modifying to_environment" + ], + "owns_authoritative_data": false, + "public_api_surface": true + }, + "insights": [ + { + "type": "bug_fix", + "category": "State Management", + "title": "Config cache invalidation and re-read", + "problem": "ForgeConfig was read once at startup and cached without a clear re-read path; after config updates the in-memory cache could remain stale or callers had to rely on previously-threaded config values.", + "root_cause": "A startup-time refactor threaded a single ForgeConfig instance through the stack. Subsequent updates (update_environment) did not reliably cause callers to re-read disk-backed config, and callers expected synchronous get_config semantics.", + "solution": "Introduce cached_config() -> Result that returns the cached config when present or re-reads from disk (ConfigReader::build()) when invalidated. update_environment clears the cache so the next get will re-read. Added tests and EnvGuard helpers to avoid environment races.", + "lesson_learned": "Maintain a single authoritative config read path via the infra and treat disk reads as fallible; invalidate the cache on mutations and surface errors rather than silently falling back.", + "commits": [ + "5bd0b94", + "7e8a51d", + "279ec19" + ], + "constructs": [ + "to_environment", + "ForgeEnvironmentInfra::new", + "ForgeEnvironmentInfra::cached_config", + "to_environment (public)", + "cached_config" + ] + }, + { + "type": "refactoring", + "category": "Configuration", + "title": "Base path driven by FORGE_CONFIG env var", + "problem": "Previously base_path always used home_dir()/\"forge\" and couldn't be overridden at runtime via env var.", + "root_cause": "No environment variable was consulted for the config base path.", + "solution": "ConfigReader::base_path consults FORGE_CONFIG env var if present; to_environment uses ConfigReader::base_path() for base_path.", + "lesson_learned": "Provide a documented env override for global config paths and add tests that guard env mutation.", + "commits": [ + "279ec19" + ], + "constructs": [ + "to_environment", + "ConfigReader::base_path" + ] + }, + { + "type": "refactoring", + "category": "Configuration", + "title": "Convert between ForgeConfig (Option-backed) and Environment with defaults resolved", + "problem": "Environment previously carried many configuration fields and mapping between ForgeConfig and Environment was brittle, allowing defaults to be lost when layering.", + "root_cause": "ForgeConfig field types changed to Option; conversions needed to unwrap with defaults.", + "solution": "Update to_environment and to_forge_config logic to use unwrap_or_default() and to set ForgeConfig Option values from Environment, ensuring defaults are preserved in conversions. Adjust service use-calls to read config via get_config rather than embedding config into Environment.", + "commits": [ + "69882c6", + "fbeea84" + ], + "constructs": [ + "to_environment", + "to_forge_config" + ], + "lesson_learned": "Keep a clear distinction between fully-resolved runtime Environment and layered ForgeConfig; conversion functions must be explicit and preserve defaults when layering config sources." + }, + { + "type": "bug_fix", + "category": "Configuration", + "title": "Increase default file/image size limits to 10 MiB", + "problem": "Default maximum file and image size limits were very small (256 KiB), causing larger file/image reads to be rejected/unavailable by default.", + "root_cause": "Conservative hard-coded defaults (256 << 10) in ForgeEnvironmentInfra were insufficient for typical user files.", + "solution": "Bumped max_file_size and default FORGE_MAX_IMAGE_SIZE fallback to 10 << 20 (10 MiB); updated tests and README to match the new defaults.", + "commits": [ + "c78894a" + ], + "constructs": [ + "ForgeEnvironmentInfra", + "get_environment" + ] + }, + { + "type": "refactoring", + "category": "Configuration", + "title": "Expose FORGE_PARALLEL_FILE_READS environment variable", + "problem": "No way to configure parallel file read cap; default could cause EMFILE on some systems.", + "root_cause": "Static assumptions about parallelism without runtime tweakability.", + "solution": "Add parallel_file_reads field in Environment infra and parse FORGE_PARALLEL_FILE_READS with a sensible default based on available_parallelism * 2 or 32.", + "lesson_learned": "Make concurrency caps configurable via environment to support varying host limits and CI environments.", + "commits": [ + "e25c1c0" + ], + "constructs": [ + "ForgeEnvironmentInfra::new (env parsing)" + ] + }, + { + "type": "feature", + "category": "Configuration", + "title": "Support FORGE_MAX_IMAGE_SIZE env var and propagate to Environment", + "problem": "Image reading needed a configurable size limit", + "root_cause": "No environment variable existed to cap image reads separately from file reads", + "solution": "Add parsing of FORGE_MAX_IMAGE_SIZE with fallback default 256 KiB and tests ensuring behavior for default/valid/invalid values", + "commits": [ + "5c86244" + ], + "constructs": [ + "ForgeEnvironmentInfra::get_environment (max_image_size parsing)", + "test_max_image_size_env_var" + ], + "lesson_learned": "Expose resource-sensitive limits (like binary read sizes) via environment and add tests for default/override/invalid inputs to avoid surprising resource consumption." + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/orch_spec/orch_setup.rs": { + "file_path": "crates/forge_app/src/orch_spec/orch_setup.rs", + "short_description": "Test harness types and default fixtures used to run orchestrator integration-style tests.", + "category": "SOURCE_CODE", + "description": "Provides TestContext and TestOutput structures used by orchestrator tests to simulate agent runs with mock responses, tools, files, and environment. The file exists to consolidate test fixtures that are reused by many orchestrator tests so that test scenarios can focus on inputs and expected outputs rather than constructing all dependencies from scratch. TestContext default seeds a realistic environment (agent, templates, mock tool/shell responses, ForgeConfig), and exposes run/run_event convenience methods that delegate execution to Runner::run (the orchestrator test runner).\n\nTestOutput accumulates conversation_history and chat_responses produced by the orchestrator; helpers extract system messages, context messages and tools from the last conversation. The default values for TestContext are intentionally comprehensive to match expectations the orchestrator has for agent tooling, template rendering and environment. This file must remain synchronized with domain Environment and any additions to Environment fields or Template/Tool config types because tests instantiate Environment and ForgeConfig directly here.", + "key_constructs": [ + { + "name": "TestContext", + "type": "struct", + "purpose": "Holds all inputs and mocks required to run an orchestrator test (mock responses, files, env, agent, tools, config) and stores final output.", + "reasoning": "Central test fixture to standardize orchestrator test setup; tests call TestContext::run/run_event to execute scenarios with consistent defaults." + }, + { + "name": "Default for TestContext", + "type": "impl", + "purpose": "Supplies a realistic default test fixture including a sample Agent, Environment, ForgeConfig and tools.", + "reasoning": "Ensures test cases don't need to duplicate common fixture setup; must be kept up-to-date with changes to Environment or ForgeConfig shapes." + }, + { + "name": "run", + "type": "function", + "purpose": "Async helper that turns a string event into an Event and delegates to Runner::run via run_event.", + "reasoning": "Convenience API used by tests to invoke the orchestrator with a simple string payload.", + "callers": [ + { + "file": "crates/forge_app/src/orch_spec/orch_system_spec.rs", + "line": 13, + "context": "ctx.run(\"This is a test\").await.unwrap();" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_system_spec.rs", + "line": 30, + "context": "ctx.run(\"This is a test\").await.unwrap();" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_system_spec.rs", + "line": 56, + "context": "ctx.run(\"This is a test\").await.unwrap();" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_system_spec.rs", + "line": 92, + "context": "ctx.run(\"This is a test\").await.unwrap();" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_spec.rs", + "line": 15, + "context": "ctx.run(\"This is a test\").await.unwrap();" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_spec.rs", + "line": 26, + "context": "ctx.run(\"Hi\").await.unwrap();" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_spec.rs", + "line": 51, + "context": "ctx.run(\"Hi\").await.unwrap();" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_spec.rs", + "line": 81, + "context": "ctx.run(\"Ask a follow-up question\").await.unwrap();" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_spec.rs", + "line": 120, + "context": "let _ = ctx.run(\"Read a file\").await;" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_spec.rs", + "line": 148, + "context": "ctx.run(\"Read a file\").await.unwrap();" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_spec.rs", + "line": 218, + "context": "ctx.run(\"Analyze code\").await.unwrap();" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_spec.rs", + "line": 271, + "context": "ctx.run(\"Read file and analyze\").await.unwrap();" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_spec.rs", + "line": 340, + "context": "ctx.run(\"Solve a complex problem\").await.unwrap();" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_spec.rs", + "line": 361, + "context": "ctx.run(\"Solve a complex problem\").await.unwrap();" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_spec.rs", + "line": 390, + "context": "let _ = ctx.run(\"Read a file\").await;" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_spec.rs", + "line": 424, + "context": "ctx.run(\"Test doom loop\").await.unwrap();" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_spec.rs", + "line": 495, + "context": "ctx.run(\"test\").await.unwrap();" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_spec.rs", + "line": 517, + "context": "ctx.run(raw_task).await.unwrap();" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_spec.rs", + "line": 547, + "context": "ctx.run(\"Complete this task\").await.unwrap();" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_spec.rs", + "line": 581, + "context": "ctx.run(\"Read a file\").await.unwrap();" + } + ] + }, + { + "name": "TestOutput", + "type": "struct", + "purpose": "Collects final outputs from orchestrator runs (conversation history and chat responses) and provides helpers to extract messages and tool lists.", + "reasoning": "Encapsulates final results and makes it easy for tests to assert on system messages, conversation context, and tools." + } + ], + "semantic_tags": [ + "testing", + "fixtures", + "orchestrator", + "mocks" + ], + "handles_entities": [ + "Agent", + "Environment", + "ForgeConfig", + "Conversation", + "ToolDefinition", + "ChatResponse" + ], + "key_behaviors": [ + "sets up default agent, environment and config for tests", + "collects orchestrator outputs for assertions", + "provides convenient runners to execute events under test harness" + ], + "pitfalls": [ + { + "mistake": "Not updating TestContext::default when Environment or ForgeConfig gains new required fields.", + "consequence": "Compilation errors or panicking tests due to missing fields or mismatched types; tests will not reflect runtime expectations.", + "prevention": "When Environment or ForgeConfig changes, update TestContext::default to provide those fields and values consistent with the new shapes." + }, + { + "mistake": "Assuming the TestContext defaults fully match production behavior.", + "consequence": "Tests may pass with defaults but miss edge cases caused by different real-world config values; false confidence in behavior.", + "prevention": "Explicitly set non-default values in tests when verifying behaviors that depend on those settings rather than over-relying on fixture defaults." + } + ], + "reading_guide": { + "start_here": "TestContext (and its Default impl)", + "key_sections": [ + "Default for TestContext: review agent, env, and config that are seeded", + "TestOutput helpers: system_messages, context_messages, tools used by assertions in tests" + ], + "skip_unless_needed": [ + "the USER_PROMPT constant: useful when changing template rendering, otherwise peripheral" + ] + }, + "tests": { + "exercised_by": [], + "test_functions": [], + "example_command": "cargo test -p forge_app --tests -q", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "crates/forge_domain/src/env.rs", + "relationship": "TestContext constructs a forge_domain::Environment literal; must be compatible with the domain environment shape and semantics.", + "likely_co_change": true, + "reason_to_check": "If Environment adds/changes fields (commit history shows parallel_file_reads was added historically), TestContext::default must be updated accordingly." + }, + { + "path": "crates/forge_app/src/orch_spec/orch_runner.rs", + "relationship": "Runner::run is invoked by TestContext::run_event; orchestration logic in runner consumes TestContext inputs and produces TestOutput.", + "likely_co_change": true, + "reason_to_check": "Any change to Runner::run signatures or required test fixtures will require updating TestContext and the tests that use it." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_app --lib", + "cargo test --workspace (recommended when changing shared domain types)" + ], + "data_constants_to_check": [ + "USER_PROMPT template and TemplateConfig interaction with ForgeConfig used in tests" + ], + "owns_authoritative_data": false, + "public_api_surface": false + }, + "insights": [ + { + "type": "refactoring", + "category": "Testing", + "title": "Set parallel_file_reads in test fixture environment", + "problem": "Tests constructing TestContext did not provide the new parallel_file_reads field.", + "root_cause": "Introduced environment field required test updates.", + "solution": "Set parallel_file_reads: 64 in the test TestContext default.", + "lesson_learned": "When extending environment structs, update test fixtures to include defaults to avoid panics or incorrect behavior in tests.", + "commits": [ + "e25c1c0" + ], + "constructs": [ + "Default for TestContext" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/context_engine.rs": { + "file_path": "crates/forge_services/src/context_engine.rs", + "short_description": "Workspace indexing/search service bridge that orchestrates sync, search, and workspace lifecycle.", + "category": "SOURCE_CODE", + "description": "This file implements ForgeWorkspaceService, a concrete WorkspaceService that connects higher-level application infra (file IO, credentials, command runner, config) to the workspace indexing and semantic-search functionality. It exists to: (1) obtain authentication and workspace metadata from the infra layer, (2) choose the correct workspace root for discovery, (3) delegate heavy-lifting sync / status computation to WorkspaceSyncEngine, and (4) implement workspace lifecycle operations (list/get/delete/init) and semantic code search via the WorkspaceService trait. The design separates orchestration (this file) from low-level discovery/upload logic (WorkspaceSyncEngine in sync.rs and FileDiscovery in fd.rs).\n\nThe implementation favors explicit error contexts (anyhow::Context), preserves canonical paths from remote workspace records (to avoid repeated canonicalize IO), and streams sync progress out via an MpscStream (async message channel). Trait bounds on the generic infra type F are intentionally broad: ProviderRepository, WorkspaceIndexRepository, FileReaderInfra, EnvironmentInfra (specialized to forge_config::ForgeConfig), CommandInfra, and WalkerInfra \u2014 these are required because underlying sync and discovery (git ls-files vs walker fallback) and credential storage are performed by infra implementations. The service also handles parallel deletion of workspaces and aggregates errors produced by those operations.\n\nOperationally, the file includes a private sync_codebase_internal that: acquires credentials and user id, canonicalizes the provided path, finds the workspace on the server (exact match or closest ancestor), and then constructs a WorkspaceSyncEngine to run the sync with a provided emit closure for progress events. The public WorkspaceService impl wires sync_workspace to spawn an MpscStream, and implements other operations like query_workspace (semantic search), list_workspaces, get_workspace_info, delete_workspace(s), is_indexed, get_workspace_status, is_authenticated, init_auth_credentials, and init_workspace. Many behaviors rely on conventions (e.g. user_id stored in credential.url_params) so callers and editors must preserve these shapes when modifying code.", + "key_constructs": [ + { + "name": "ForgeWorkspaceService", + "type": "struct", + "purpose": "High-level service that orchestrates workspace indexing, search, and lifecycle operations.", + "reasoning": "Represents the bridge between infra implementations and workspace sync/search logic; maintains references to infra and file discovery strategy as Arcs so methods can spawn async tasks and clones cheaply." + }, + { + "name": "sync_codebase_internal", + "type": "function", + "purpose": "Internal async routine that orchestrates a workspace sync and emits SyncProgress events via a provided emit closure.", + "reasoning": "Delegates heavy lifting to WorkspaceSyncEngine after resolving credentials, batch size, and canonical workspace root. Critical for streaming progress and error semantics of the sync operation." + }, + { + "name": "get_workspace_credentials", + "type": "function", + "purpose": "Fetches the Forge services credential from infra and extracts an API key and UserId.", + "reasoning": "Parses AuthCredential.auth_details expecting an ApiKey and retrieves user_id from credential.url_params. Many other methods depend on this exact extraction and error messages." + }, + { + "name": "find_workspace_by_path", + "type": "function", + "purpose": "Finds a workspace record that matches the provided path, preferring exact match then closest ancestor (longest path prefix).", + "reasoning": "Implements business logic for choosing which remote workspace to use for operations; uses canonicalized paths and string-length-based best match selection." + }, + { + "name": "get_workspace_by_path", + "type": "function", + "purpose": "Wrapper over find_workspace_by_path that returns an error if no workspace is found (used for operations that require an indexed workspace).", + "reasoning": "Converts an Option into a contextualized error telling users to run `forge workspace init` if not indexed; callers expect this error behavior." + }, + { + "name": "_init_workspace", + "type": "function", + "purpose": "Internal initialization helper that either reuses an existing workspace or prepares to create a new workspace id.", + "reasoning": "Encapsulates lookup-vs-create decision and returns whether workspace is new plus the WorkspaceId; used by init_workspace public method which maps is_new to either Ok or WorkspaceAlreadyInitialized error." + }, + { + "name": "WorkspaceService::sync_workspace", + "type": "function", + "purpose": "Implementation of the WorkspaceService trait method that returns an MpscStream of SyncProgress for a sync operation.", + "reasoning": "Spawns an async task with an emit closure bound to the stream sender. Ensures any error produced by sync_codebase_internal is sent over the stream as Err." + }, + { + "name": "WorkspaceService::query_workspace", + "type": "function", + "purpose": "Performs semantic code search by building a CodeBase query and invoking infra.search with the user's API key.", + "reasoning": "Relies on get_workspace_credentials and find_workspace_by_path semantics; maps domain SearchParams into a CodeBase record consumed by infra.search." + } + ], + "semantic_tags": [ + "workspace", + "indexing", + "authentication", + "async", + "orchestration" + ], + "handles_entities": [ + "WorkspaceInfo", + "WorkspaceId", + "ApiKey", + "UserId", + "SyncProgress", + "WorkspaceAuth", + "Node", + "FileStatus" + ], + "key_behaviors": [ + "streams sync progress events during workspace sync", + "delegates file discovery and upload work to WorkspaceSyncEngine", + "performs semantic workspace search using remote index", + "lists, gets, deletes, and initializes workspaces on remote server", + "manages Forge services authentication credentials in infra store" + ], + "pitfalls": [ + { + "mistake": "Assuming get_workspace_credentials always returns an ApiKey without checking url_params", + "consequence": "A missing or mis-shaped url_params (missing user_id) will cause an error and break downstream calls that expect a user_id.", + "prevention": "Preserve the code path that reads user_id from credential.url_params and maintain that key when creating/updating credentials." + }, + { + "mistake": "Changing find_workspace_by_path matching logic (exact then longest prefix) without updating callers", + "consequence": "Operations could pick wrong workspace root (e.g., sibling vs ancestor) and sync/query against an unintended workspace, causing inconsistent canonical paths and surprising content uploads.", + "prevention": "Respect exact-match-first then longest-prefix selection; if adjusting, ensure any consumer assumptions (canonical working_dir usage) are updated." + }, + { + "mistake": "Modifying sync error propagation (e.g., swallowing errors) in MpscStream spawn closure", + "consequence": "Errors during sync would not reach the stream consumer and overall UX would show a stalled or incomplete sync.", + "prevention": "Keep the pattern of sending Err(e) on failure via tx.send(Err(e)).await and ensure the stream's lifecycle semantics are maintained." + }, + { + "mistake": "Altering is_indexed to propagate errors instead of returning Ok(false) on internal errors", + "consequence": "Higher-level callers that expect a boolean may start to receive errors that change control flow (e.g., CLI diagnostics), potentially exposing low-level IO errors to end users.", + "prevention": "Recognize that current behavior swallows find_workspace_by_path errors and returns false; any change must consider CLI UX and error surfaces." + }, + { + "mistake": "Removing or changing reliance on workspace.working_dir canonical path", + "consequence": "Discovery and status computation could canonicalize using a different root than was used during prior syncs, causing mismatches and incorrect hashing/diffs.", + "prevention": "Continue to use workspace.working_dir as authoritative canonical root when present." + } + ], + "reading_guide": { + "start_here": "ForgeWorkspaceService", + "key_sections": [ + "sync_codebase_internal: orchestrates auth lookup, canonicalization, and delegates to WorkspaceSyncEngine", + "get_workspace_credentials: critical auth parsing and error messages (user_id extraction)", + "find_workspace_by_path: exact-match then ancestor selection \u2014 business logic for workspace selection", + "WorkspaceService impl methods: entry points used by other crates (sync_workspace, query_workspace, list_workspaces, get_workspace_status, init_workspace, delete*)" + ], + "skip_unless_needed": [ + "Clone impl: trivial Arc cloning", + "low-level logging lines" + ] + }, + "tests": { + "exercised_by": [], + "test_functions": [], + "example_command": "cargo test -p forge_services", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "crates/forge_services/src/sync.rs", + "relationship": "Delegation target \u2013 WorkspaceSyncEngine performs file discovery, reading and upload work that this file orchestrates.", + "likely_co_change": true, + "reason_to_check": "Modifications to sync orchestration, progress events, batch_size handling, or trait bounds will likely require updating sync.rs to match calling conventions and vice versa." + }, + { + "path": "crates/forge_services/src/fd.rs", + "relationship": "File discovery strategy abstraction referenced as FileDiscovery in this file.", + "likely_co_change": true, + "reason_to_check": "Changes to discovery contract (returned path shapes, error behavior) will affect how canonicalization and WorkspaceSyncEngine expect inputs." + }, + { + "path": "crates/forge_config/src/lib.rs", + "relationship": "Source of ForgeConfig that provides max_file_read_batch_size used here.", + "likely_co_change": false, + "reason_to_check": "If max_file_read_batch_size is renamed or semantics change, this file must adapt how it reads batch_size." + }, + { + "path": "crates/forge_app/src/lib.rs", + "relationship": "Defines infra traits (CommandInfra, WalkerInfra, etc.) that F must implement \u2014 used throughout this file.", + "likely_co_change": true, + "reason_to_check": "Changing trait method signatures or semantics (e.g., CommandInfra execution behavior) will affect this file's trait bounds and runtime behavior." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_services", + "cargo test --workspace" + ], + "data_constants_to_check": [ + "forge_config::ForgeConfig::max_file_read_batch_size" + ], + "owns_authoritative_data": false, + "public_api_surface": true + }, + "insights": [ + { + "type": "refactoring", + "category": "Performance", + "title": "Delegate workspace sync to new WorkspaceSyncEngine", + "problem": "Large sync function in context_engine had many responsibilities and duplicated logic.", + "root_cause": "Monolithic sync implementation made it hard to optimize and maintain.", + "solution": "Simplify context_engine by delegating to WorkspaceSyncEngine (new sync.rs). Kept fetch_remote_hashes and high-level orchestration but removed low-level file read/upload logic.", + "commits": [ + "b924d21" + ], + "constructs": [ + "sync_codebase_internal (delegation)", + "compute_status (delegation)" + ] + }, + { + "type": "bug_fix", + "category": "Error Handling", + "title": "Return real git ls-files errors and add walker fallback", + "problem": "git_ls_files previously swallowed errors (returned None) making it hard to diagnose failures; also git ls-files could succeed but return empty results causing no-file discovery in some valid scenarios.", + "root_cause": "git command wrapper returned Option and lost stderr/exit code details; empty git output was not treated as fallback-worthy.", + "solution": "Make git_ls_files return anyhow::Result> and surface the actual stderr/exit-code error. Add walker fallback when git ls-files fails or returns no files. Log and yield appropriate errors. Introduced FileReadError type to represent individual file read errors and propagate them to status as SyncStatus::Failed.", + "commits": [ + "892e34d", + "e6dd682", + "7b3c74b" + ], + "constructs": [ + "git_ls_files", + "discover_sync_file_paths", + "walk_directory", + "read_files", + "FileReadError" + ] + }, + { + "type": "refactoring", + "category": "State Management", + "title": "Change file discovery pipeline to separate discovery and reading", + "problem": "Earlier implementation combined discovery, reading batches, and error handling in a single streaming function that made error propagation awkward.", + "root_cause": "Monolithic stream and a prior API for read_batch_utf8 required grouping results into batches; refactor needed to handle individual file read errors gracefully.", + "solution": "Split discovery (discover_sync_file_paths / walk_directory) and read_files: discovery returns Vec or error, read_files consumes the discovery result and returns a Stream> where individual read failures are returned as FileReadError; caller collects both successes and failed statuses and merges into final statuses with SyncStatus::Failed entries.", + "commits": [ + "e6dd682", + "1b114a4" + ], + "constructs": [ + "discover_sync_file_paths", + "walk_directory", + "read_files" + ] + }, + { + "type": "refactoring", + "category": "Performance", + "title": "Use git ls-files for file discovery instead of walker", + "problem": "Recursive walking via Walker performed heavier work and was less aligned with git-tracked content; it also duplicated discovery logic.", + "root_cause": "WalkerInfra provided a generic filesystem walk but git-tracked file discovery is more accurate and faster for the workspace sync scenario.", + "solution": "Add git_ls_files that runs `git ls-files` via CommandInfra and parse results into WalkedFile entries. Update read_files to call git_ls_files and handle failure when git isn't available. Adjust trait bounds to require CommandInfra instead of WalkerInfra.", + "lesson_learned": "Prefer repository-native discovery (git ls-files) for workspace indexing to align with tracked files and avoid unnecessary I/O; when switching underlying mechanism, update trait bounds and failure modes gracefully.", + "commits": [ + "eafdac7" + ], + "constructs": [ + "ForgeWorkspaceService::git_ls_files", + "read_files" + ] + }, + { + "type": "refactoring", + "category": "Logging", + "title": "Improve file sync logging wording", + "problem": "Log messages for file upload were inconsistent (used 'started' and 'synced successfully'). The inconsistency could be confusing when correlating start/finish events.", + "root_cause": "Minor wording inconsistency in tracing/info calls around async upload tasks.", + "solution": "Changed final log message to 'File sync completed' and added a 'File sync started' info log at task start to make start/finish messages symmetric.", + "commit": [ + "e077ddc" + ], + "constructs": [ + "upload" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/tool_executor.rs": { + "file_path": "crates/forge_app/src/tool_executor.rs", + "short_description": "Executes tool calls by routing ToolCatalog inputs to services, enforcing policies and producing ToolOutput.", + "category": "SOURCE_CODE", + "description": "ToolExecutor is the runtime bridge between high-level tool call messages (ToolCatalog) and the lower-level service implementations. It centralizes path normalization, read-before-edit enforcement, temporary file creation for truncated outputs, and conversion of service responses into ToolOperation values and then ToolOutput(s). The struct wraps a generic Arc services object constrained by many service traits so it can call read/write/search/shell/fetch/follow-up/plan/skill/undo operations.\n\nThe file exists to provide a single place to enforce cross-cutting rules (normalize relative paths to the current working directory, require a prior read before destructive edits, create temp files for truncated fetch/shell outputs) and to package service results into ToolOperation and ToolOutput. It keeps policy logic (like truncation thresholds and read-before-edit rules) close to execution and uses the ToolCallContext to read/write conversation-level state (metrics and todos) and to send formatted tool output back to the conversation.\n\nThis module integrates with: the services trait implementations (the concrete I/O and network behavior), the operation formatting layer (ToolOperation and to_content -> ChatResponseContent), and the domain ToolCatalog type. Tests that exercise features around todo write/read, read-before-edit, normalization, and truncation live in crates/forge_app (see fmt/todo_fmt and operation related tests/snapshots). Recent commit history shows careful changes here: call_internal gained access to ToolCallContext, normalization of paths was centralized, and todo write semantics were adjusted to read the authoritative post-update state rather than relying on update_todos' return value.", + "key_constructs": [ + { + "name": "ToolExecutor", + "type": "class", + "purpose": "Primary executor that receives a ToolCatalog and ToolCallContext and runs the corresponding service operation.", + "reasoning": "An agent editing code must call services consistently (path normalization, metrics, temp files). Any editing agent/change must respect how ToolExecutor calls services and uses ToolCallContext for side effects.", + "callers": [ + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 22, + "context": "use crate::tool_executor::ToolExecutor;" + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 29, + "context": "tool_executor: ToolExecutor," + }, + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 39, + "context": "tool_executor: ToolExecutor::new(services.clone())," + } + ] + }, + { + "name": "require_prior_read", + "type": "function", + "purpose": "Enforces that a file was previously read in this conversation before allowing edits/overwrites.", + "reasoning": "This method looks at conversation metrics (files_accessed) via ToolCallContext; callers must preserve semantics when changing metrics representation or naming to avoid bypassing the safety check." + }, + { + "name": "normalize_path", + "type": "function", + "purpose": "Converts relative file paths into absolute paths by joining with the environment cwd.", + "reasoning": "Normalization is applied consistently across many tool arms. Changing path handling or cwd semantics must preserve backward compatibility." + }, + { + "name": "create_temp_file", + "type": "function", + "purpose": "Creates a persistent-temp file for large/truncated outputs and writes content using services.write.", + "reasoning": "Temp files are created with tempfile::Builder::disable_cleanup(true) and then written via services; callers should never assume automatic cleanup and must respect the service write contract." + }, + { + "name": "call_internal", + "type": "function", + "purpose": "Matches on ToolCatalog and invokes the appropriate service method, returning a ToolOperation.", + "reasoning": "This is the core routing method. It now accepts &ToolCallContext (commit history) so it can use context when needed (e.g., todos). Changes here must preserve the mapping between ToolCatalog variants and service calls." + }, + { + "name": "dump_operation", + "type": "function", + "purpose": "Inspects ToolOperation to determine if any large outputs should be written to temp files (shell stdout/stderr or fetch content) and returns TempContentFiles.", + "reasoning": "This function uses config thresholds to decide truncation. Test and CI expectations rely on exact temp-file behavior and the keys used (prefix/suffix)." + }, + { + "name": "execute", + "type": "function", + "purpose": "Public entrypoint: runs require_prior_read checks, calls call_internal, sends formatted output to context, dumps truncation files, and converts the operation into a ToolOutput updating metrics.", + "reasoning": "All policy checks are performed before calling into services. Mutating this flow can affect safety (read-before-edit), messaging, and metrics updates across the system.", + "callers": [ + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 164, + "context": "self.tool_executor.execute(tool_input, context)" + } + ] + } + ], + "semantic_tags": [ + "tool-execution", + "io", + "policy", + "path-normalization", + "truncation" + ], + "handles_entities": [ + "ToolCatalog", + "ToolOperation", + "ToolOutput", + "TempContentFiles", + "Metrics", + "Todo" + ], + "key_behaviors": [ + "normalizes relative file paths to an absolute path using environment cwd", + "enforces read-before-edit and read-before-overwrite policies", + "routes each ToolCatalog variant to the proper service call", + "creates temporary files for truncated outputs and includes truncation paths in ToolOutput metrics" + ], + "pitfalls": [ + { + "mistake": "Calling services directly without normalizing paths", + "consequence": "Services or downstream code may receive relative paths when absolute paths are expected, causing failures or inconsistent indexing.", + "prevention": "Always use normalize_path on any path-like input before calling services; maintain existing normalize_path semantics." + }, + { + "mistake": "Relying on update_todos return value for authoritative post-update state", + "consequence": "You may read a stale or inconsistent todo list if update_todos' contract changes; previous bug fixes indicate ordering matters.", + "prevention": "When mutating conversation state via ToolCallContext, read the authoritative state using context accessors (get_todos) after the mutation." + }, + { + "mistake": "Assuming temporary files are auto-removed or ephemeral", + "consequence": "Temp files are created with disable_cleanup(true); they persist and are written with services.write. Tests and consumers may expect them to exist and be accessible; assuming cleanup will remove them can cause leaks or test flakiness.", + "prevention": "Treat created temp files as durable artifacts for the duration expected by the code; respect disable_cleanup(true) semantics." + } + ], + "reading_guide": { + "start_here": "ToolExecutor::execute", + "key_sections": [ + "call_internal: the mapping from each ToolCatalog variant to a service call", + "require_prior_read: safety enforcement logic used for patch/write operations", + "dump_operation: truncation file creation and how truncation decisions are made" + ], + "skip_unless_needed": [ + "small helper functions (create_temp_file) once you understand how temp files are written", + "the trait bounds on S (long list) \u2014 only relevant when changing the services interface" + ] + }, + "tests": { + "exercised_by": [ + "crates/forge_app/src/fmt/todo_fmt.rs tests", + "crates/forge_app/src/operation.rs tests" + ], + "test_functions": [], + "example_command": "cargo test -p forge_app --lib", + "relevant_snippets": [ + { + "file": "crates/forge_app/src/fmt/todo_fmt.rs", + "lines": "1-200", + "description": "Tests around TodoWrite/TodoRead formatting and how ToolOperation Todo variants produce ChatResponseContent; exercise call_internal todo flow via ToolExecutor in integration tests." + }, + { + "file": "crates/forge_app/src/operation.rs", + "lines": "1-200", + "description": "Snapshots and unit tests that assert XML/text outputs of ToolOperation and how metrics/total_lines/truncation are reported." + } + ] + }, + "related_files": [ + { + "path": "crates/forge_app/src/operation.rs", + "relationship": "Converts ToolOperation into ToolOutput and contains truncation/formatting logic consumed by ToolExecutor; co-changes likely when altering ToolOperation variants or output attributes.", + "likely_co_change": true, + "reason_to_check": "When changing how ToolExecutor builds or dumps ToolOperation, operation.rs controls the final ToolOutput serialization and metrics updates." + }, + { + "path": "crates/forge_domain/src/tools/catalog.rs", + "relationship": "Defines ToolCatalog variants that ToolExecutor matches on; any change to the enum or variant names requires updating call_internal arms.", + "likely_co_change": true, + "reason_to_check": "Adding/removing ToolCatalog variants or changing field names will necessitate parallel updates in call_internal and read-before-edit checks." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_app --lib", + "cargo test --workspace" + ], + "data_constants_to_check": [ + "forge_config::ForgeConfig (max_fetch_chars, max_stdout_prefix_lines, max_stdout_suffix_lines, max_search_result_bytes, max_sem_search_results)" + ], + "owns_authoritative_data": false, + "public_api_surface": false + }, + "insights": [ + { + "type": "bug_fix", + "category": "State Management", + "title": "Ensure context metrics are updated/read after todo write", + "problem": "ToolExecutor previously called update_todos then immediately read todos into `after` by the previous order, but some call paths assumed the old ordering or returned wrong list.", + "root_cause": "Call order: before = context.get_todos(); after = context.update_todos(...) returned different semantics where update_todos previously returned active todos but internal metrics update semantics changed.", + "solution": "Change call to first call context.get_todos() to get before, then call context.update_todos(input.todos.clone()) without depending on its return value, and then call context.get_todos() to get the authoritative after-state.", + "lesson_learned": "When APIs mutate shared state, avoid relying on the mutating call's return value unless its contract is explicit and stable; after mutation read authoritative state via a separate accessor.", + "commits": [ + "970a75f", + "4f1ad6b" + ], + "constructs": [ + "call_internal", + "ToolExecutor::call_internal" + ] + }, + { + "type": "refactoring", + "category": "API", + "title": "call_internal now receives ToolCallContext", + "problem": "call_internal previously lacked access to ToolCallContext causing awkward service access patterns.", + "root_cause": "Need for context access (todos, metrics) inside tool execution.", + "solution": "Added context param to call_internal and propagated it through the executor call chain.", + "lesson_learned": "Prefer passing explicit execution context objects into lower-level functions rather than capturing services implicitly; makes side-effects and dependencies clearer and easier to test.", + "commits": [ + "4f1ad6b" + ], + "constructs": [ + "call_internal" + ] + }, + { + "type": "bug_fix", + "category": "Edge Case", + "title": "Normalize relative paths for all tools", + "problem": "Tools accepted relative paths which could be interpreted differently by services or cause operations to fail when absolute paths expected", + "root_cause": "Tool handlers passed input.path directly to service layer; some services expect absolute paths", + "solution": "Introduce normalize_path() which joins relative paths with env.cwd; call normalize_path for Read, Write, Search, Patch, Remove, Undo, Shell cwd, and ReadImage (consistent normalization across tools)", + "commits": [ + "2d1fcf3", + "8f49a41" + ], + "constructs": [ + "ToolExecutor::normalize_path", + "ToolExecutor::create_temp_file (uses normalized paths)", + "match arms for Tools::Read/Write/Search/Patch/Remove/Undo/Shell/ReadImage" + ], + "lesson_learned": "Normalize file paths at the CLI/tool-executor boundary so the rest of the service layer can rely on absolute paths. This centralizes path handling and avoids duplicated checks downstream." + }, + { + "type": "feature", + "category": "Functionality", + "title": "Add ReadImage handling to tool executor", + "problem": "Image read tool needs to be routed through the executor but lacked normalization", + "root_cause": "New image read tool variant required integration in executor match", + "solution": "Add Tools::ReadImage branch to call services.read_image(normalized_path) and convert into ToolOutput::image", + "commits": [ + "5c86244", + "8f49a41" + ], + "constructs": [ + "Tools::ReadImage match arm" + ], + "lesson_learned": "Whenever adding a new tool variant, update executor path normalization and service routing to maintain consistent behavior across tools." + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/tools/catalog.rs": { + "file_path": "crates/forge_domain/src/tools/catalog.rs", + "short_description": "Domain tool schema: ToolCatalog enum and tool input/output structs used to express agent tool calls.", + "category": "SOURCE_CODE", + "description": "This file defines the canonical set of tools (ToolCatalog) that agents and the system can invoke. It contains a tagged enum ToolCatalog with variants for filesystem read/write/search/patch, shell execution, network fetch, followups, plan creation, skill fetch, todo operations, and a Task/agent-delegation variant. Additionally, the file defines the detailed input structs for each tool (FSRead, FSWrite, FSSearch, SemanticSearch, SearchQuery, TaskInput, etc.) plus helper types such as Todo and TodoStatus.\n\nIts primary purpose is to be the single source of truth for the tool call contract exchanged between LLMs/agents and the executor: schemas, doc strings (via ToolDescription macros), JSON serialization attributes, and JSON Schema support. The file is designed to be both machine- and human-readable: tool descriptions are provided in external markdown files for UX, and many serde attributes are present for stable wire format. Commit history shows this file has been the place for API changes like introducing a TodoItem type, adding normalized tool-name lookup helpers, and adding case-insensitive name mapping for robust parsing of LLM-provided names.", + "key_constructs": [ + { + "name": "ToolCatalog", + "type": "constant", + "purpose": "Enum enumerating all supported tool input types and their argument structures.", + "reasoning": "ToolCatalog is matched by the executor \u2014 any edit here must preserve tag names, serde rename conventions, and content field names used by deserialization and by ToolExecutor." + }, + { + "name": "SearchQuery", + "type": "struct", + "purpose": "Represents a pair of embedding query and reranking use_case used for semantic search.", + "reasoning": "Semantic search relies on both an embedding query and a reranker use case. Tests and services expect both fields to be present; changing semantics here alters search behavior." + }, + { + "name": "FSRead", + "type": "struct", + "purpose": "Input for file read operations (path, optional start/end lines, show_line_numbers).", + "reasoning": "ToolExecutor and operation serialization rely on start_line/end_line and show_line_numbers; ensure field names and types remain stable." + }, + { + "name": "FSWrite", + "type": "struct", + "purpose": "Input for write operations (file path, content, overwrite flag).", + "reasoning": "The overwrite boolean is used by ToolExecutor to trigger read-before-overwrite enforcement; changing defaults or serde behavior affects safety checks." + }, + { + "name": "Todo", + "type": "struct", + "purpose": "Represents a todo item with id, content, and status.", + "reasoning": "Todo is persisted and shown to the user; earlier commits indicate differences between server-managed IDs and model-visible content. Respect the validation semantics (validate method) when editing." + }, + { + "name": "TodoStatus", + "type": "enum", + "purpose": "Enumerates todo lifecycle states: Pending, InProgress, Completed, Cancelled.", + "reasoning": "Serialization uses snake_case; logic that consumes TodoStatus must account for Completed vs incomplete semantics used in ToolOperation diffs." + }, + { + "name": "TaskInput", + "type": "struct", + "purpose": "Input to Task tool variant to delegate work to a named agent (tasks list, agent_id, optional session_id).", + "reasoning": "ToolRegistry handles Task tools specially and ToolExecutor marks reachability as unreachable for Task at runtime; preserving TaskInput shape is necessary to integrate with agent registry code." + }, + { + "name": "SimpleEnumSchema", + "type": "trait", + "purpose": "Helper for generating simple string-based JSON Schema representations for AsRefStr+EnumIter enums.", + "reasoning": "Used to provide stable JSON Schema for unit enums (like TodoStatus/OutputMode). Changes here affect schema generation and documentation." + } + ], + "semantic_tags": [ + "tool-schema", + "serialization", + "domain", + "todo", + "semantic-search" + ], + "handles_entities": [ + "ToolCatalog", + "FSRead", + "FSWrite", + "FSSearch", + "SemanticSearch", + "SearchQuery", + "Todo", + "TaskInput" + ], + "key_behaviors": [ + "defines the wire-format for all tool calls between agents and executor", + "provides JSON Schema and serde attributes for each tool struct", + "encodes todo validation rules and todo status semantics" + ], + "pitfalls": [ + { + "mistake": "Changing variant names, serde tags, or field names without updating deserializers and executor match arms", + "consequence": "Deserialization or matching will break; LLM-produced tool calls or saved conversation objects may fail to parse.", + "prevention": "Any change to ToolCatalog must be co-updated in ToolExecutor::call_internal, ToolRegistry, and any persisted conversation formats." + }, + { + "mistake": "Exposing server-managed identifiers (Todo.id) to model-side inputs or expecting the model to supply them", + "consequence": "Confusion between client-provided content and server-managed IDs; earlier refactor introduced TodoItem to avoid this.", + "prevention": "Respect commit history: use content-keyed diffs for todo operations and avoid making model callers responsible for ids." + }, + { + "mistake": "Assuming search queries can omit use_case", + "consequence": "Reranker behavior degrades; code and documentation mandate both query and use_case to get relevant code results.", + "prevention": "Always populate both SearchQuery.query and SearchQuery.use_case; tools/services may assert this contract." + } + ], + "reading_guide": { + "start_here": "ToolCatalog enum", + "key_sections": [ + "ToolCatalog variants: understand the set of supported tools and their argument structs", + "SearchQuery docs: long doc comments explain how to craft embedding vs reranking queries", + "Todo, TodoStatus, and Todo.validate: content validation behavior for todo items" + ], + "skip_unless_needed": [ + "SimpleEnumSchema implementation details unless you are changing schema generation", + "derive macro annotations and tool_description_file links if only changing runtime logic" + ] + }, + "tests": { + "exercised_by": [ + "inline tests in catalog.rs covering normalize_tool_name and TryFrom behavior" + ], + "test_functions": [], + "example_command": "cargo test -p forge_domain --lib", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "crates/forge_app/src/tool_executor.rs", + "relationship": "ToolExecutor matches on ToolCatalog and routes tool calls to services.", + "likely_co_change": true, + "reason_to_check": "Changing the tool schema requires updating executor pattern matches and service calling conventions." + }, + { + "path": "crates/forge_app/src/operation.rs", + "relationship": "Operation and output serialization depend on fields (start_line/end_line, FileInfo) described in tool inputs; co-change likely when altering read/write behavior.", + "likely_co_change": true, + "reason_to_check": "If FSRead/FSWrite semantics change (e.g., showing line numbers), operation serialization and metrics must be updated." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_domain --lib", + "cargo test --workspace" + ], + "data_constants_to_check": [], + "owns_authoritative_data": true, + "public_api_surface": true + }, + "insights": [ + { + "type": "refactoring", + "category": "API", + "title": "Introduce TodoItem type and document content-keyed behavior", + "problem": "Tool schema previously exposed full Todo records (with server IDs) to the model which led to confusion and mismatch with new incremental update semantics.", + "root_cause": "Model-side callers should not manage server IDs; protocol shifted to content-keyed diffs and 'cancelled' status for deletions.", + "solution": "Added TodoItem struct (content + status) used by TodoWrite tool; updated TodoWrite to use Vec and documented rules in tool description comments and help text.", + "lesson_learned": "Tool contract must clearly express which fields are keys vs server-managed IDs. Use lightweight DTOs for model input that avoid exposing server internals.", + "commits": [ + "e84bc7f" + ], + "constructs": [ + "TodoItem", + "TodoWrite", + "tool_call_todo_write" + ] + }, + { + "type": "bug_fix", + "category": "Parsing", + "title": "Normalize tool names (trim + case-insensitive) and add fast lookup map", + "problem": "Tool names returned by LLMs varied in casing/whitespace causing catalog lookups to fail; previous code only handled a couple of capitalized aliases.", + "root_cause": "Tool matching used exact string comparisons and ad-hoc alias mapping. This caused failures for 'READ'/'Read' or whitespace-padded tool names.", + "solution": "Introduced FORGE_TOOLS (LazyLock HashSet) and FORGE_TOOLS_LOWER map for case-insensitive lookup, implemented normalize_tool_name that trims and lowercases and returns canonical ToolName when available; updated TryFrom to use normalized name for parsing and added multiple tests.", + "lesson_learned": "Normalize external inputs (trim, lowercase) before catalog matching; maintain a precomputed case-insensitive lookup map for fast matching and add tests for uppercase/whitespace variants.", + "commits": [ + "7934cfb" + ], + "constructs": [ + "normalize_tool_name", + "FORGE_TOOLS_LOWER", + "TryFrom for ToolCatalog::try_from" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/operation.rs": { + "file_path": "crates/forge_app/src/operation.rs", + "short_description": "Represents tool operations and translates them into user-facing ToolOutput with truncation and metrics bookkeeping.", + "category": "SOURCE_CODE", + "description": "This file defines the ToolOperation enum which wraps concrete inputs and outputs for each tool invocation (FsRead, FsWrite, FsPatch, Shell, NetFetch, TodoWrite, etc.), plus utilities to convert ToolOperation into the XML/textish Element representation and into forge_domain::ToolOutput. It centralizes truncation logic and metrics updates: for each operation variant the into_tool_output method computes elements, updates conversation Metrics (FileOperation entries), and applies truncation/formatting rules using helpers from truncation.rs and forge_display::DiffFormat.\n\nOperation.rs exists to provide a single authoritative mapping from service outputs to what the user (or the conversation) will see: file diffs, search results (with truncation metadata), sem-search grouping, and todo diffs. The code includes serialization helpers (create_stream_element, create_validation_warning) and integrates with Element (XML-like) builder used in ToolOutput text. The file is tightly coupled with ToolExecutor (which produces ToolOperation instances) and the formatting layer (fmt::fmt_output.rs) which uses to_content for chat-visible tool outputs. Commit history highlights fixes around total_lines correctness, avoiding leaking internal IDs, and adding todo operation serialization.", + "key_constructs": [ + { + "name": "TempContentFiles", + "type": "struct", + "purpose": "Holds optional paths to temp files containing full stdout/stderr or fetch content when outputs were truncated.", + "reasoning": "ToolExecutor.dump_operation returns this struct and it is consumed when producing ToolOutput; editors must preserve field names and semantics to maintain truncation plumbing." + }, + { + "name": "ToolOperation", + "type": "enum", + "purpose": "Enum capturing all concrete tool operation results (inputs + outputs) used to produce serialized outputs and update metrics.", + "reasoning": "ToolOperation variants are key to output generation and metrics bookkeeping; adding/removing variants requires updating into_tool_output and any match sites in the codebase." + }, + { + "name": "StreamElement", + "type": "trait", + "purpose": "Abstracts the behavior of truncated streams (Stdout, Stderr) for reuse in create_stream_element.", + "reasoning": "Used to avoid duplication when building elements for truncated outputs; changes affect create_stream_element behavior." + }, + { + "name": "create_stream_element", + "type": "function", + "purpose": "Builds an Element for stdout/stderr truncated outputs including head/tail and full_output path attribute if present.", + "reasoning": "This function formats the streaming output representation used by the UI and needs to be preserved for snapshot tests." + }, + { + "name": "create_validation_warning", + "type": "function", + "purpose": "Generates an XML Element capturing syntax validation warnings and details for files written with syntax errors.", + "reasoning": "Used by FsWrite/FsPatch output paths; tests assert presence/format of this warning element when errors exist." + }, + { + "name": "into_tool_output", + "type": "function", + "purpose": "Converts a ToolOperation into a forge_domain::ToolOutput, updates metrics, and applies truncation/formatting rules per operation variant.", + "reasoning": "This is the canonical serialization step; many snapshot tests and UI logic depend on exact attributes (display_lines, total_lines, content_hash).", + "callers": [ + { + "file": "crates/forge_app/src/tool_executor.rs", + "line": 377, + "context": "operation.into_tool_output(tool_kind, truncation_path, &env, &config, metrics)" + } + ] + } + ], + "semantic_tags": [ + "serialization", + "metrics", + "truncation", + "diffing", + "tool-output" + ], + "handles_entities": [ + "ToolOperation", + "ToolOutput", + "Element", + "FileOperation", + "Metrics", + "SearchResult", + "HttpResponse", + "ShellOutput", + "Todo" + ], + "key_behaviors": [ + "generates a structured Element representing tool results (files, diffs, search results)", + "updates conversation Metrics with file operation metadata and content hashes", + "applies truncation strategies and attaches full_output temp file paths when necessary" + ], + "pitfalls": [ + { + "mistake": "Using the length of returned content slices for total_lines instead of authoritative FileInfo.total_lines", + "consequence": "Displayed total_lines would be incorrect, failing snapshot tests and misleading users about file size (previous bug fixed in commits).", + "prevention": "Use output.info.total_lines (the authoritative counter) when serializing read outputs; preserve FileInfo usage." + }, + { + "mistake": "Exposing internal IDs (todo/file ids) in serialized Elements", + "consequence": "Leaks server-managed identifiers to UI or agents; previous refactor removed id attributes for privacy and stability.", + "prevention": "Avoid adding server-managed IDs into the Element attributes; rely on content and FileInfo metadata instead." + }, + { + "mistake": "Changing diff formatting or Element attribute names without updating snapshot tests", + "consequence": "Snapshot tests in crates/forge_app will fail; many UI flows depend on exact XML structure and attribute names.", + "prevention": "Run snapshot tests and update snapshots only with conscious, coordinated changes." + } + ], + "reading_guide": { + "start_here": "ToolOperation enum and the impl ToolOperation::into_tool_output", + "key_sections": [ + "Variant-specific branches inside into_tool_output (FsRead, FsWrite, FsSearch, CodebaseSearch, FsPatch, FsMultiPatch, Shell, NetFetch, TodoWrite/Read) \u2014 they define output structure and metrics updates", + "Truncation-related helpers and create_stream_element for stdout/stderr handling", + "create_validation_warning for syntax-error reporting" + ], + "skip_unless_needed": [ + "detailed per-variant formatting for variants you aren't changing; focus on the variant you will touch", + "imports and small helper trait implementations once the main flow is understood" + ] + }, + "tests": { + "exercised_by": [ + "crates/forge_app/src/snapshots/forge_app__operation__tests__fs_read_with_truncation_path.snap", + "crates/forge_app/src/snapshots/forge_app__operation__tests__fs_read_with_explicit_range.snap", + "crates/forge_app/src/fmt/fmt_output.rs tests" + ], + "test_functions": [], + "example_command": "cargo test -p forge_app --lib", + "relevant_snippets": [ + { + "file": "crates/forge_app/src/snapshots/forge_app__operation__tests__fs_read_with_explicit_range.snap", + "lines": "1-12", + "description": "Snapshot asserting file element path, display_lines and total_lines for an explicit read range." + }, + { + "file": "crates/forge_app/src/snapshots/forge_app__operation__tests__fs_read_with_truncation_path.snap", + "lines": "1-12", + "description": "Snapshot asserting truncated file representation and presence of display_lines/total_lines attributes." + }, + { + "file": "crates/forge_app/src/fmt/fmt_output.rs", + "lines": "1-200", + "description": "Unit tests that construct ToolOperation variants and call to_content which exercises formatting behaviors and diff generation used by Operation." + } + ] + }, + "related_files": [ + { + "path": "crates/forge_app/src/tool_executor.rs", + "relationship": "ToolExecutor produces ToolOperation instances consumed by into_tool_output; co-change required when adding/removing ToolOperation variants.", + "likely_co_change": true, + "reason_to_check": "If you modify how operations are produced (e.g., add Todo variants) update both call_internal and this serialization to match." + }, + { + "path": "crates/forge_domain/src/tools/catalog.rs", + "relationship": "Defines input structs (FSRead/FSWrite etc.) that are embedded in ToolOperation variants.", + "likely_co_change": true, + "reason_to_check": "Changing input field names/types affects serialization and formatting in into_tool_output." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_app --lib", + "cargo test --workspace" + ], + "data_constants_to_check": [ + "config.max_search_result_bytes", + "config.max_search_lines" + ], + "owns_authoritative_data": false, + "public_api_surface": false + }, + "insights": [ + { + "type": "bug_fix", + "category": "Edge Case", + "title": "Report actual file total_lines instead of content slice line count", + "problem": "UI operation output used content.lines().count() from the slice that was returned (which could be truncated), therefore total_lines reported was incorrect for display and snapshots.", + "root_cause": "Used the length of the returned content slice instead of using the pre-computed total_lines metadata.", + "solution": "Use output.info.total_lines when serializing file output attr total_lines so it reflects the whole file's line count rather than the slice length.", + "commits": [ + "9aa2a80" + ], + "constructs": [ + "ToolOperation serialization block (attr total_lines)" + ], + "lesson_learned": "When reporting metadata, prefer stored authoritative counters (total_lines) instead of derived values from truncated payloads." + }, + { + "type": "refactoring", + "category": "State Management", + "title": "Stop exporting internal IDs in tool XML and use FileInfo meta for read outputs", + "problem": "Operation serialization exposed internal todo/file IDs and used separate file range fields; diffs and snapshots show removal of id attributes and consolidation of file range metadata into FileInfo.", + "root_cause": "Protocol and privacy: IDs are server-managed and should not be exposed. File read outputs were represented with separate fields (start/end/total/content_hash) leading to duplication.", + "solution": "Removed .attr(\"id\", &todo.id) from XML rendering, changed file-related attributes to use output.info.* and use FileInfo struct for consolidated metadata. Updated metrics insertion to use FileOperation with content_hash from FileInfo.", + "lesson_learned": "Avoid leaking server-managed IDs in external representations. Consolidate related metadata into a single struct (FileInfo) to reduce coupling and duplication.", + "commits": [ + "e84bc7f", + "29db91a" + ], + "constructs": [ + "ToolOperation::to_content", + "FileOperation usage" + ] + }, + { + "type": "refactoring", + "category": "Formatting", + "title": "Ensure numbered content is converted to String before further joins", + "problem": "Previously code sometimes left numbered content as an object or formatted value; needed to consistently produce String when building outputs.", + "root_cause": "LineNumber API change to return Displayable type required conversion to String for concatenation/joins.", + "solution": "Call .to_string() on numbered content where needed to produce String for output assembly.", + "commits": [ + "70cba43" + ], + "constructs": [ + "ToolOperation::render_output_chunks" + ] + }, + { + "type": "feature", + "category": "Other", + "title": "Add XML-ish tool output representation for todo operations", + "problem": "New TodoWrite/TodoRead operations had no serialization representation used by the tool output layer.", + "root_cause": "ToolOperation enum lacked todo variants and conversion to ToolOutput.", + "solution": "Added ToolOperation::TodoWrite and TodoRead variants and implemented Element-based construction producing text ToolOutput containing structured todo changes (added/updated/removed).", + "lesson_learned": "Tool operation outputs should include structured metadata (ids/status/change_kind) to drive both UI rendering and summary extraction; ensure ToolOperation -> ToolOutput mapping is added in the same change as the tool definition.", + "commits": [ + "4f1ad6b" + ], + "constructs": [ + "ToolOperation::TodoWrite", + "ToolOperation::TodoRead" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/forge_repo.rs": { + "file_path": "crates/forge_repo/src/forge_repo.rs", + "short_description": "Repository fa\u00e7ade that aggregates infra and persistence implementations and delegates domain repository traits.", + "category": "SOURCE_CODE", + "description": "This file defines ForgeRepo, a single aggregated repository layer that composes multiple lower-level repository/infra implementations and exposes the various domain repository traits used across the application. It exists to centralize persistence and environment/infra operations behind one object that other crates can hold and use. ForgeRepo is constructed with an Arc infra object (F implements multiple infra traits) and creates internal implementations such as ConversationRepositoryImpl, ForgeProviderRepository, ForgeChatRepository, ForgeFileSnapshotService, CacacheStorage, and other Forge* repositories. The ctor uses the environment returned by infra.get_environment() to configure DB pool, snapshot/cache directories and other repo initializations.\n\nThe file is designed around delegation: each domain trait implementation for ForgeRepo largely forwards calls to either an internal specialized repository (conversation_repository, provider_repository, chat_repository, file_snapshot_service, etc.) or directly to the wrapped infra (self.infra) for infra traits (file I/O, HTTP, command execution, user prompts, walker, MCP connect, etc.). The use of Arc for internal repo fields indicates these components are intended to be shared and clonable cheaply across tasks. Many trait impls carry explicit type bounds on F (e.g., EnvironmentInfra + FileReaderInfra + HttpInfra + Send + Sync) \u2014 these bounds are integral to where the repo can be used and how it delegates.\n\nThis file is a central integration point between 'forge_domain' traits and the concrete infra/repository implementations in this crate and others. Because it forwards many behaviors, edits here must preserve trait signatures, the forwarding semantics, and the environmental assumptions (paths, cache TTL, DB initialization). Historical commit notes indicate the file has evolved to follow changes in infra signatures (for example read_batch_utf8 stream return type) and to support runtime overrides in the past; those patterns are important when changing constructor parameters or the signatures of forwarded calls.", + "key_constructs": [ + { + "name": "ForgeRepo", + "type": "struct", + "purpose": "Aggregates infra and repository implementations and provides a single facade implementing many domain and infra traits.", + "reasoning": "This is the canonical repository object other parts of the system receive and call; changes here affect how all domain persistence and infra access are performed, so keep its shape stable and ensure required internals are created consistently." + }, + { + "name": "new", + "type": "function", + "purpose": "Constructor that initializes internal repo implementations (DB pool, snapshot service, cache storage, provider/chat/skill/etc. repos) from the provided infra Environment.", + "reasoning": "New wires up environment-dependent config such as database path, workspace hash and cache directory; editing it requires preserving that wiring and the assumptions it encodes (e.g., location of cache_dir/mcp_cache and TTL).", + "callers": [ + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 53, + "context": "let repo = Arc::new(ForgeRepo::new(infra.clone()));" + } + ] + }, + { + "name": "SnapshotRepository::insert_snapshot", + "type": "function", + "purpose": "Exposes snapshot insertion by delegating to ForgeFileSnapshotService.", + "reasoning": "Snapshot semantics (create/undo) are implemented in another module; this delegate must preserve async signature and error propagation." + }, + { + "name": "ConversationRepository::upsert_conversation", + "type": "function", + "purpose": "Persists a conversation by deferring to ConversationRepositoryImpl.", + "reasoning": "Conversation persistence is important for conversation history features \u2014 ensure transaction/DB pool expectations and error handling remain intact when touching this code." + }, + { + "name": "KVStore::cache_get", + "type": "function", + "purpose": "Retrieves cached entries from the CacacheStorage-backed cache.", + "reasoning": "Cache TTL and serialization semantics are handled by the cache implementation; keep the delegating behavior and signatures unchanged to avoid breaking consumers and serialized shapes." + }, + { + "name": "HttpInfra::http_get", + "type": "function", + "purpose": "Forwards HTTP GET requests to the underlying infra implementation.", + "reasoning": "The file relies on reqwest types (Response, HeaderMap, Url, EventSource) and expects the infra to provide concrete HTTP behavior \u2014 ensure these types and async behavior remain honored." + }, + { + "name": "FileReaderInfra::read_batch_utf8", + "type": "function", + "purpose": "Forwards streaming batch reads of files to the underlying infra, returning an impl Stream of (PathBuf, Result).", + "reasoning": "This forwarding was updated historically to match a changed streaming contract; preserve the return type and streaming behavior when editing to avoid breaking callers." + }, + { + "name": "CommandInfra::execute_command", + "type": "function", + "purpose": "Delegates command execution to the underlying infra, returning CommandOutput.", + "reasoning": "Command execution is a privileged operation; keep environment and working dir semantics and the method's async signature consistent." + } + ], + "semantic_tags": [ + "persistence", + "infra-delegation", + "environment", + "caching", + "snapshot" + ], + "handles_entities": [ + "Snapshot", + "Conversation", + "ProviderCredential", + "Model", + "ChatCompletionMessage", + "WalkedFile" + ], + "key_behaviors": [ + "creates file snapshots for file-change operations", + "persists and retrieves conversations", + "delegates chat requests to chat repository for streaming responses", + "manages provider credentials and migrations", + "forwards file system, HTTP, command and MCP operations to infra", + "provides a KV cache backed by CacacheStorage" + ], + "pitfalls": [ + { + "mistake": "Removing or changing the trait bounds on the impl blocks for ForgeRepo without updating call sites or F implementations.", + "consequence": "Compilation failures across the codebase because impl blocks won't apply or delegations won't type-check; subtle runtime mismatches if infra doesn't actually provide required capabilities.", + "prevention": "Preserve the explicit trait bounds or ensure coordinated changes across infra trait definitions and callers." + }, + { + "mistake": "Modifying the constructor to change how environment is read (e.g., removing env.workspace_hash() or changing database_path usage) or changing the cache dir path/TTL.", + "consequence": "Database initialization or cache locations could shift unexpectedly, breaking persistence, tests, or causing data to be stored in different directories.", + "prevention": "Respect that new() uses env.get_environment() and relies on database_path(), workspace_hash(), and cache_dir(); any change must preserve those semantics or be coordinated with migration code." + }, + { + "mistake": "Removing the unwrap() on DatabasePool::try_from(PoolConfig::new(...)).", + "consequence": "While unwrap is currently present and can panic during construction, changing it without considering callers may change failure modes; conversely, leaving it may cause test or runtime panics on config errors.", + "prevention": "If altering error handling, ensure callers or initialization flow handle Result properly and that tests exercise the new behavior." + }, + { + "mistake": "Altering the read_batch_utf8 return type or streaming contract.", + "consequence": "Breaks compatibility with downstream code that expects impl Stream)>, as indicated by historical refactor commits.", + "prevention": "Preserve the streaming return type and item shape when forwarding to infra.read_batch_utf8." + }, + { + "mistake": "Changing the visibility or replacement of internal repo fields (e.g., provider_repository) without changing the delegating implementations.", + "consequence": "Delegated methods will fail to compile or silently change behavior if the underlying implementation semantics differ.", + "prevention": "If you change internal repo types or names, update every delegating method to call the new implementations and ensure their behavior matches the expected domain contract." + } + ], + "reading_guide": { + "start_here": "ForgeRepo", + "key_sections": [ + "new: construction wiring (DB pool, snapshot service, cache dir, repo initializations) \u2014 why the repo is configured this way", + "impl SnapshotRepository: snapshot insert/undo delegation (touches snapshot logic)", + "impl ConversationRepository: CRUD methods delegating to ConversationRepositoryImpl", + "impl ProviderRepository and ChatRepository: behavior for credentials, provider lists and chat models", + "impl EnvironmentInfra/HttpInfra/FileReaderInfra/other infra traits: these are thin delegations to self.infra and indicate required trait bounds" + ], + "skip_unless_needed": [ + "repetitive delegating impl blocks (many infra traits simply forward to self.infra) \u2014 focus on ones that call internal specialized repos (conversation, provider, chat, file snapshots)" + ] + }, + "tests": { + "exercised_by": [], + "test_functions": [], + "example_command": "cargo test -p forge_repo --lib -- --nocapture", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "crates/forge_repo/src/fs_snap.rs", + "relationship": "Provides the implementation for file snapshot operations used by SnapshotRepository delegates", + "likely_co_change": true, + "reason_to_check": "If snapshot semantics or initialization change, fs_snap must be consistent with how ForgeRepo constructs and calls it." + }, + { + "path": "crates/forge_repo/src/conversation.rs", + "relationship": "Implements ConversationRepositoryImpl which ForgeRepo delegates conversation persistence to", + "likely_co_change": true, + "reason_to_check": "Changes in conversation persistence API (DB pool or workspace hash) require adjusting both constructor wiring and conversational method expectations." + }, + { + "path": "crates/forge_infra/src/cache.rs", + "relationship": "CacacheStorage (re-exported) is used for mcp cache storage in ForgeRepo", + "likely_co_change": true, + "reason_to_check": "Cache serialization, TTL, or storage paths changes here directly affect KVStore behavior and must be coordinated." + }, + { + "path": "crates/forge_repo/src/provider.rs", + "relationship": "ForgeProviderRepository used by ForgeRepo for credential storage and provider listing", + "likely_co_change": true, + "reason_to_check": "Modifications to provider credential formats or migration behavior should be synchronized with the provider_repository usage here." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test --workspace", + "cargo test -p forge_repo --lib", + "cargo check --workspace", + "cargo clippy --workspace -- -D warnings" + ], + "data_constants_to_check": [ + "environment.database_path()", + "environment.cache_dir()", + "mcp_cache TTL value (currently Some(3600))" + ], + "owns_authoritative_data": false, + "public_api_surface": true + }, + "insights": [ + { + "type": "refactoring", + "category": "API", + "title": "Update repo layer to accept new read_batch_utf8 streaming contract", + "problem": "Repository layer depended on old batch-wise read signature.", + "root_cause": "Downstream propagation of FileReaderInfra signature change.", + "solution": "Change return type to impl Stream)> and forward infra.read_batch_utf8.", + "commits": [ + "1b114a4" + ], + "constructs": [ + "read_batch_utf8" + ] + }, + { + "type": "refactoring", + "category": "State Management", + "title": "Pass override_model/provider to ForgeRepo and constructed repositories", + "problem": "ForgeRepo constructor didn't accept runtime overrides; earlier practice relied on EnvironmentInfra.", + "root_cause": "Environment-driven overrides prevented per-instance override propagation.", + "solution": "Changed ForgeRepo::new to accept override_model and override_provider and pass them to AppConfigRepositoryImpl via setters.", + "lesson_learned": "Propagate runtime configuration explicitly to lower-level repositories; avoid hidden global env reliance.", + "commits": [ + "0328695" + ], + "constructs": [ + "ForgeRepo::new" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/tool_registry.rs": { + "file_path": "crates/forge_app/src/tool_registry.rs", + "short_description": "Registry and dispatcher for executing tools (Forge tools, delegated agents, MCP tools) with permission, modality and timeout checks", + "category": "SOURCE_CODE", + "description": "This file implements ToolRegistry, the central component that resolves, validates and dispatches tool calls coming from agents. It exists to: (1) determine if a requested tool is a built-in Forge tool (ToolCatalog), an agent-delegated tool (via AgentExecutor), or an MCP-exposed tool (via McpExecutor); (2) enforce workflow and environment policies (permission checks when in restricted mode); (3) validate modality requirements for tools that may require image inputs; (4) apply per-tool timeouts configured in ForgeConfig; and (5) produce ToolDefinition listings with rendered descriptions for UI/tool introspection.\n\nToolRegistry connects to the system via a generic Services + EnvironmentInfra trait object (Arc). It contains and composes ToolExecutor, AgentExecutor and McpExecutor instances to perform actual execution work. It also renders dynamic tool descriptions using a template engine and a TemplateConfig assembled from the Forge configuration. The call flow has clear branching: built-in ToolCatalog items (special-case Task delegation to AgentExecutor), agent tools (delegated to AgentExecutor), MCP tools (delegated to McpExecutor), otherwise return NotFound. Permission checks are done before starting the timeouted execution when in restricted mode. The file also provides static helpers for validating allowed tool names (including glob patterns), checking image-like file extensions, and validating modality support based on the current agent model.", + "key_constructs": [ + { + "name": "ToolRegistry", + "type": "class", + "purpose": "Central registry and dispatcher responsible for validating and executing tool calls", + "reasoning": "ToolRegistry mediates between configuration/environment, permissions, template rendering and the concrete executors (ToolExecutor, AgentExecutor, McpExecutor). When editing code that affects tool resolution, validation, or execution flow, this is the primary type to update." + }, + { + "name": "ToolRegistry::new", + "type": "function", + "purpose": "Constructs a ToolRegistry with executor instances wired to the provided Services implementation", + "reasoning": "Ensures ToolExecutor, AgentExecutor and McpExecutor are all created with the same Services Arc so state and config access are consistent." + }, + { + "name": "ToolRegistry::call", + "type": "function", + "purpose": "Public entry point to invoke a tool call, wraps result metadata and forwards to call_inner", + "reasoning": "Call is the stable outward-facing method returning ToolResult; it preserves call_id and tool name for the caller." + }, + { + "name": "ToolRegistry::call_inner", + "type": "function", + "purpose": "Implements the core branching logic that resolves a ToolCallFull and executes the appropriate tool path", + "reasoning": "Contains the complex control flow (ToolCatalog handling incl. Task, agent delegation, MCP execution, permission checks, modality checks and timeouts). This is the most change-sensitive function." + }, + { + "name": "ToolRegistry::call_with_timeout", + "type": "function", + "purpose": "Runs tool execution futures under a timeout derived from ForgeConfig.tool_timeout_secs", + "reasoning": "All tool invocations (except multi-agent Task delegations/agent executor paths) are wrapped with this helper to enforce configured timeouts; it also produces a context-rich timeout error if needed." + }, + { + "name": "ToolRegistry::check_tool_permission", + "type": "function", + "purpose": "Checks workflow policies for a tool operation and optionally sends a policy update message to the tool context", + "reasoning": "Used only in restricted mode before executing a tool to ensure the configured policy decision is enforced and user-facing messages are delivered when a policy file was created." + }, + { + "name": "ToolRegistry::get_current_model", + "type": "function", + "purpose": "Finds the Model for the currently active agent by querying Services for active agent and provider models", + "reasoning": "Used for modality validation and dynamic tool description rendering; returns Option so callers must handle None." + }, + { + "name": "ToolRegistry::tools_overview", + "type": "function", + "purpose": "Assembles a ToolsOverview that lists system tools (rendered templates), agent-available tools, and MCP tools", + "reasoning": "Builds TemplateConfig from ForgeConfig and renders tool descriptions with the template engine using environment, model and agent list context." + }, + { + "name": "ToolRegistry::get_system_tools", + "type": "function", + "purpose": "Static helper to produce system ToolDefinitions with template-rendered descriptions and tool name map", + "reasoning": "Encapsulates template rendering and filters out sem_search when not supported (controlled by is_indexed && is_authenticated)." + }, + { + "name": "ToolRegistry::validate_tool_call", + "type": "function", + "purpose": "Checks whether a given tool name is allowed by the agent's configured tools (supports glob patterns via ToolResolver)", + "reasoning": "Central policy enforcement for agent-scoped tool availability; it returns a specific NotAllowed Error with a message that is asserted in tests, so its behavior and error message must be preserved." + }, + { + "name": "ToolRegistry::validate_tool_modality", + "type": "function", + "purpose": "Ensures a tool's modality requirements (currently image input for read) are supported by the active model", + "reasoning": "Prevents calling tools that will produce image inputs against models that don't support image modalities; this check is done prior to executing the tool." + }, + { + "name": "ToolRegistry::has_image_extension", + "type": "function", + "purpose": "Lightweight check to detect image-like file paths by extension", + "reasoning": "Used to decide whether modality validation is required without reading file contents." + } + ], + "semantic_tags": [ + "tools", + "agents", + "permissions", + "timeouts", + "modality", + "templating" + ], + "handles_entities": [ + "Agent", + "ToolCallFull", + "ToolDefinition", + "ToolOutput", + "ToolCatalog", + "Model", + "Environment", + "ToolResult" + ], + "key_behaviors": [ + "resolves and dispatches tool calls to Forge tools, agents or MCP tools", + "enforces restricted-mode policy checks and emits user-facing policy messages", + "applies configured timeouts to tool executions", + "renders dynamic tool descriptions using templates and TemplateConfig", + "validates model input modalities for image-based read operations" + ], + "pitfalls": [ + { + "mistake": "Changing validate_tool_call's error message or returned Error variant", + "consequence": "Unit tests assert the exact error string and code paths rely on Error::NotAllowed; altering it will break tests and callers expecting that message", + "prevention": "Preserve Error::NotAllowed semantics and display string when editing validation logic" + }, + { + "mistake": "Applying call_with_timeout to AgentExecutor paths that were intentionally not timed out", + "consequence": "Agent executions (Task branch and delegated agent tools) are explicitly not wrapped in the timeout helper; adding timeouts may break long-running agent tasks or change intended behavior", + "prevention": "Respect the existing comment and code path that avoids timeouts for AgentExecutor flows" + }, + { + "mistake": "Reordering permission checks relative to timeout wrapping", + "consequence": "If permission checks are performed under the timeout, the permission flow could fail with a timeout error rather than a clear permission denial and user-facing policy message", + "prevention": "Keep permission checks outside and before the timeout wrapping as currently implemented" + }, + { + "mistake": "Altering how ToolCatalog::Task is handled without updating AgentExecutor API", + "consequence": "Task handling transforms tasks into AgentExecutor.execute calls; changing the ToolCatalog Task shape or AgentExecutor signature without coordinating both sides will introduce compile/runtime errors", + "prevention": "Check AgentExecutor::execute signature and Task input structure when modifying Task handling" + } + ], + "reading_guide": { + "start_here": "ToolRegistry::call_inner", + "key_sections": [ + "ToolRegistry::call_inner: main dispatch logic for Forge tools, Task delegation, agent delegation and MCP tools", + "ToolRegistry::check_tool_permission: how restricted-mode policy checks are implemented and how policy messages are emitted", + "ToolRegistry::call_with_timeout: how timeouts are applied and the source of the timeout value (ForgeConfig)", + "ToolRegistry::get_system_tools: template rendering for tool descriptions using TemplateConfig and TemplateEngine" + ], + "skip_unless_needed": [ + "minor helpers in tests (create_test_agents) if not working on tests", + "has_image_extension implementation unless changing modality logic" + ] + }, + "tests": { + "exercised_by": [ + "crates/forge_app/src/tool_registry.rs tests (module at bottom of file)" + ], + "test_functions": [ + "test_restricted_tool_call", + "test_restricted_tool_call_err", + "test_validate_tool_call_with_glob_pattern_wildcard", + "test_validate_tool_call_with_glob_pattern_multiple_tools" + ], + "example_command": "cargo test -p forge_app", + "relevant_snippets": [ + { + "file": "crates/forge_app/src/tool_registry.rs", + "lines": "start of #[cfg(test)] module .. ~ the following 60 lines", + "description": "Unit tests that validate validate_tool_call behavior (allowed tools, error message formatting, glob pattern support)" + } + ] + }, + "related_files": [ + { + "path": "crates/forge_app/src/agent_executor.rs", + "relationship": "Executor used by ToolRegistry to launch delegated agent tasks (Task tool and agent tools)", + "likely_co_change": true, + "reason_to_check": "AgentExecutor signatures or behavior changes require corresponding updates in ToolRegistry::call_inner Task and agent delegation branches" + }, + { + "path": "crates/forge_app/src/tool_executor.rs", + "relationship": "Executes built-in Forge ToolCatalog tools; ToolRegistry forwards ToolCatalog items here", + "likely_co_change": true, + "reason_to_check": "Changes to ToolExecutor::execute API or ToolCatalog input handling should be reflected in ToolRegistry::call_inner" + }, + { + "path": "crates/forge_app/src/mcp_executor.rs", + "relationship": "Executes MCP tools and returns outputs; ToolRegistry delegates to it for mcp_* tools", + "likely_co_change": true, + "reason_to_check": "McpExecutor output format and how textual outputs are extracted and sent to context are handled here" + }, + { + "path": "crates/forge_domain", + "relationship": "Domain types (ToolCatalog, ToolName, Agent, Model, InputModality, ToolOutput, etc.) used throughout ToolRegistry", + "likely_co_change": true, + "reason_to_check": "Any domain model changes (e.g. ToolCatalog variants, Model input_modalities) impact validation and dispatch logic" + }, + { + "path": "crates/forge_config/src/lib.rs", + "relationship": "Provides ForgeConfig values such as tool_timeout_secs and modality/templating defaults used by ToolRegistry", + "likely_co_change": false, + "reason_to_check": "When changing config keys or defaults, ensure the TemplateConfig mapping and timeout behavior remain correct" + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_app", + "cargo test --workspace (if broader changes touch domain types or executors)" + ], + "data_constants_to_check": [ + "forge_config: tool_timeout_secs (controls call_with_timeout duration)", + "forge_config: restricted (controls whether permission flows run)", + "forge_config: max_read_lines / max_line_chars / max_image_size_bytes etc. (used for TemplateConfig)" + ], + "owns_authoritative_data": false, + "public_api_surface": true + }, + "insights": [ + { + "type": "feature", + "category": "API", + "title": "Introduce Task tool (launch subagents) and integrate with AgentExecutor", + "problem": "Need to delegate complex multi-step work to specialized subagents and optionally execute them in parallel.", + "root_cause": "Tool catalog lacked a dedicated Task tool, and agent execution didn't support reusing or resuming conversation context.", + "solution": "Added Task tool definition to domain, ToolRegistry special-cases ToolCatalog::Task to delegate tasks to AgentExecutor (supports optional session_id -> ConversationId reuse). ToolRegistry::get_system_tools now accepts agents for description rendering. Tests and snapshots updated. AgentExecutor.execute updated to accept optional conversation_id and reuse conversation or create new conversation with initiator set to 'agent'.", + "lesson_learned": "When adding high-level delegation tools, wire them through the existing executor abstraction and preserve conversation context/resumption semantics. Ensure UI/tool descriptions render available agents.", + "commits": [ + "9d5094f" + ], + "constructs": [ + "ToolRegistry::new", + "ToolRegistry::call (Task branch)", + "AgentExecutor::execute", + "AgentExecutor::agent_definitions" + ] + }, + { + "type": "bug_fix", + "category": "Typing", + "title": "Fix agent executor invocation to pass AgentId, not String", + "problem": "Executor.execute was previously invoked with input.name.to_string() which matched old API expecting String; after refactor AgentId type should be passed explicitly.", + "root_cause": "API changed to use AgentId type; callsites needed updating to construct AgentId.", + "solution": "Pass AgentId::new(input.name.as_str()) to executor.execute and adjust signature accordingly.", + "commit": [ + "b22ee2e" + ], + "constructs": [ + "ToolRegistry::call executor invocation" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/services.rs": { + "file_path": "crates/forge_app/src/services.rs", + "short_description": "Central collection of service interface traits and small DTOs used across the application", + "category": "SOURCE_CODE", + "description": "This file declares the public service interfaces (async traits) and a set of small data-transfer structs/enums that model outputs for file, workspace, provider, conversation, and network operations. It exists to define the contract between the application core (CLI/TUI, agents, command handlers) and concrete implementations (infra layer, provider clients, workspace indexers, file system helpers). The traits are narrow and focused (one responsibility per trait) and are intended to be implemented by components in other crates that provide the real behavior (for example, concrete provider clients, workspace indexer, and FS implementation).\n\nThe file groups domain-focused abstractions (ShellOutput, ReadOutput, PatchOutput, SearchResult, HttpResponse, PolicyDecision, etc.) and many async traits (ProviderService, AppConfigService, McpService, ConversationService, WorkspaceService, FsReadService, FsWriteService, ShellService, AuthService, etc.). Many of the traits reference types from the forge_domain crate (ModelId, ProviderId, ConversationId, SearchParams, File, Node, WorkspaceId, etc.), which makes this file the public surface that connects the UI/agent layers to the domain and infra layers. Several functions return streaming types (ResultStream, forge_stream::MpscStream) or use anyhow::Result, which indicates asynchronous, fallible operations are expected and will be handled by callers.\n\nBecause these traits form an application-wide API, any edit must respect binary and behavioral compatibility with existing implementations across the workspace (implementations live in other crates). The file uses async_trait for async trait methods and derives/utility crates (derive_setters, derive_more) for convenience types. Many methods return domain types by value (e.g., Conversation), or optional responses to express 'not found' semantics. The file is long and acts as an authoritative list of service contracts \u2014 changing signatures, types, or error semantics will require coordinated changes to all implementers and tests that depend on these traits.", + "key_constructs": [ + { + "name": "ShellOutput", + "type": "struct", + "purpose": "Models the result of executing a shell command (stdout/stderr content plus metadata).", + "reasoning": "Used by any ShellService implementation and consumers that present command output; preserves both the raw CommandOutput and contextual info (shell used, optional description). Consumers expect CommandOutput inside; changing fields will break callers." + }, + { + "name": "PatchOutput", + "type": "struct", + "purpose": "Represents the result of patching a file: syntax errors, file before/after text, and a content hash.", + "reasoning": "Returned by FsPatchService methods and used by UI/consumers to show diffs and validation issues. Implementations compute content_hash and may rely on before/after being set when file existed." + }, + { + "name": "ReadOutput", + "type": "struct", + "purpose": "Wrapper for file-reading results, pairing Content (file text or image) with FileInfo metadata.", + "reasoning": "Unifies text vs image reads via the Content enum; consumers call file_content or as_image to branch on result. The Setters derive enables ergonomic construction; altering shape affects all read call sites." + }, + { + "name": "Content", + "type": "enum", + "purpose": "Discriminates between textual file contents and images when reading files.", + "reasoning": "Provides helper methods (file, image, file_content, as_image) that callers use; maintaining those helpers' behavior is important for downstream code that expects empty string for file_content on images." + }, + { + "name": "SearchResult", + "type": "struct", + "purpose": "Holds a vector of Match entries returned by FsSearchService.", + "reasoning": "Represents search results in different modes; the semantics of Match and MatchResult must align with search implementations." + }, + { + "name": "Match", + "type": "struct", + "purpose": "Entry for a search result: path and optional MatchResult.", + "reasoning": "Path is String and result optional to represent errors or missing details; consumers handle Option." + }, + { + "name": "MatchResult", + "type": "enum", + "purpose": "Different possible outcomes for a single match (error, found, count, file match, context).", + "reasoning": "Encodes multiple search output modes (single-line found, counts, context windows). Implementations must construct the correct variant for each search mode." + }, + { + "name": "HttpResponse", + "type": "struct", + "purpose": "Represents fetched HTTP content with status code, parsed/raw context marker, and content type.", + "reasoning": "Returned by NetFetchService::fetch; callers may rely on ResponseContext::Parsed vs Raw to decide downstream parsing behavior." + }, + { + "name": "ResponseContext", + "type": "enum", + "purpose": "Marks whether fetched HTTP content has been parsed or should be treated raw.", + "reasoning": "Affects how consumers render or further process response content." + }, + { + "name": "FsWriteOutput", + "type": "struct", + "purpose": "Return type for file write operations: path, optional previous content, syntax errors, and content hash.", + "reasoning": "Used by FsWriteService::write; consumers expect before to be set when file existed, and errors to contain syntax validation info when present." + }, + { + "name": "FsRemoveOutput", + "type": "struct", + "purpose": "Return value for file remove operations, containing the removed file content.", + "reasoning": "Used by FsRemoveService::remove; allows undo/preview features to access removed content." + }, + { + "name": "PlanCreateOutput", + "type": "struct", + "purpose": "Return type for creating 'plan' files; includes path and optional previous content.", + "reasoning": "Used by PlanCreateService::create_plan; mirrors FsWrite semantics for plan-specific creation." + }, + { + "name": "FsUndoOutput", + "type": "struct", + "purpose": "Return type for undo operations with optional before/after snapshots.", + "reasoning": "Wraps optional strings and uses derive_more::From to allow convenient conversions; intended for FsUndoService::undo." + }, + { + "name": "TodoWriteOutput", + "type": "struct", + "purpose": "Models the output of todo_write tool: list of todos persisted.", + "reasoning": "Specific to a tool; present as a typed result to surface structured data to callers." + }, + { + "name": "PolicyDecision", + "type": "struct", + "purpose": "Represents outcome of PolicyService checks (allowed and optional policy file path).", + "reasoning": "Used by PolicyService::check_operation_permission to indicate whether an operation may proceed and if a policy file was created." + }, + { + "name": "ProviderService", + "type": "trait", + "purpose": "Interface for calling model providers: chat completions, listing models, credential management and migration.", + "reasoning": "Central for all LLM/provider interactions; implementations must produce ResultStream for streaming chat messages and correctly map provider ids/URLs. This trait is an integration point with provider clients and affects features like suggest/commit generation." + }, + { + "name": "AppConfigService", + "type": "trait", + "purpose": "Manages user configuration choices like default provider/model, commit/suggest configs, reasoning effort, and atomic config updates.", + "reasoning": "Exposes a single write path (update_config) which must be implemented atomically; callers expect specific error semantics (NoDefaultProvider, NoDefaultModel) documented in comments." + }, + { + "name": "McpConfigManager", + "type": "trait", + "purpose": "Loads and writes MCP server configurations, optionally constrained to a Scope.", + "reasoning": "Used when reading/writing .mcp.json or similar configs; Scope-aware reads are important because callers sometimes require unmerged (scoped) configs." + }, + { + "name": "McpService", + "type": "trait", + "purpose": "Runs MCP calls, fetches available servers, and refreshes MCP cache.", + "reasoning": "MCP execution may run external tools/services and return structured ToolOutput; implementations must handle caching and refresh semantics." + }, + { + "name": "ConversationService", + "type": "trait", + "purpose": "CRUD and atomic modification operations for conversations persisted by the app.", + "reasoning": "Provides find/upsert/modify/get/delete semantics; modify_conversation is specifically intended for atomic in-place updates via a closure and must be implemented to guarantee atomicity across callers." + }, + { + "name": "TemplateService", + "type": "trait", + "purpose": "Registers and renders templates with typed context objects.", + "reasoning": "Template rendering is generic over the context type (serde::Serialize); implementations must serialize objects and render templates safely." + }, + { + "name": "AttachmentService", + "type": "trait", + "purpose": "Fetches attachments for a given URL.", + "reasoning": "Abstracts attachment retrieval which may be local or remote; return type is Vec from forge_domain." + }, + { + "name": "CustomInstructionsService", + "type": "trait", + "purpose": "Provides custom instruction strings to be included in prompts/context.", + "reasoning": "Used by prompt assembly; returns Vec directly (not Result), which implies inexpensive retrieval." + }, + { + "name": "WorkspaceService", + "type": "trait", + "purpose": "Indexing and querying workspaces for semantic search and workspace lifecycle operations.", + "reasoning": "Large surface area: sync returns a streaming progress channel, query returns nodes, listing and status checks, auth init, and indexing checks. Implementations must coordinate with external workspace server URL and potentially send file contents externally; callers rely on is_indexed/is_authenticated behaviors." + }, + { + "name": "FileDiscoveryService", + "type": "trait", + "purpose": "Collects files based on a Walker config and lists current directory entries.", + "reasoning": "Used by file listing/selection UIs; sort order and File typing (dirs first) are contract expectations for consumers." + }, + { + "name": "FsWriteService", + "type": "trait", + "purpose": "Creates or overwrites files atomically with content and returns FsWriteOutput.", + "reasoning": "Used by commands that write files; overwrite flag semantics must be respected and output must include before when appropriate." + }, + { + "name": "PlanCreateService", + "type": "trait", + "purpose": "Specific service to create 'plan' files with name and version metadata.", + "reasoning": "Separate from generic FsWriteService to encapsulate plan-specific validation or storage locations." + }, + { + "name": "FsPatchService", + "type": "trait", + "purpose": "Provides single and multiple patch operations against files.", + "reasoning": "Patch semantics include search/replace and multiple edits sequencing; implementations must return PatchOutput with syntax errors and before/after states." + }, + { + "name": "FsReadService", + "type": "trait", + "purpose": "Reads text files with optional line-range slicing and returns ReadOutput.", + "reasoning": "Consumers expect line slicing behavior and metadata in FileInfo; implementations must produce Content::File for text files." + }, + { + "name": "ImageReadService", + "type": "trait", + "purpose": "Reads image files and returns an Image domain type.", + "reasoning": "Separate trait because image handling differs from text; callers use Content::as_image when applicable." + }, + { + "name": "FsRemoveService", + "type": "trait", + "purpose": "Removes files and returns FsRemoveOutput with removed content.", + "reasoning": "Used by remove commands and to support undo; implementations should preserve removed content for potential restoration." + }, + { + "name": "FsSearchService", + "type": "trait", + "purpose": "Searches files/content according to FSSearch params and returns optional SearchResult.", + "reasoning": "Returns Option so 'no matches' is distinct from an Error; implementations must honor different output modes (counts, file matches, context) encoded by MatchResult." + }, + { + "name": "FollowUpService", + "type": "trait", + "purpose": "Provides a follow-up selection/answer flow for tool calls (e.g., picking an option).", + "reasoning": "Interactive follow-ups might be backed by UI or automated logic; signature returns Option for nullable selection." + }, + { + "name": "FsUndoService", + "type": "trait", + "purpose": "Undoes last file operation for a path and returns snapshot info.", + "reasoning": "Marked with a TODO in file to note crossing responsibility with infra snapshot service; consumers expect filesystem undo semantics." + }, + { + "name": "NetFetchService", + "type": "trait", + "purpose": "Fetches remote HTTP resources producing HttpResponse with parsed/raw flag.", + "reasoning": "Network fetching may include parsing (HTML/JSON) or raw delivery; implementations must set content_type and ResponseContext appropriately." + }, + { + "name": "ShellService", + "type": "trait", + "purpose": "Executes shell commands with cwd, ANSI preserving option, silent mode, env vars, and returns ShellOutput.", + "reasoning": "High-safety surface used by features that run arbitrary commands; contains flags for behavior control and should be implemented carefully to maintain escape/safety semantics." + }, + { + "name": "AuthService", + "type": "trait", + "purpose": "Resolves user info and usage metrics from API keys.", + "reasoning": "Used in provider login and billing/usage UIs; implementations consult provider/backing auth services." + }, + { + "name": "AgentRegistry", + "type": "trait", + "purpose": "Registry for agent metadata: get/set active agent, list agents, reload cache.", + "reasoning": "Used by agent selection UIs and command flows to locate and activate agents." + }, + { + "name": "CommandLoaderService", + "type": "trait", + "purpose": "Loads command definitions from forge/commands directory.", + "reasoning": "Decouples discovery of command descriptors from execution environment; consumers expect up-to-date command list." + }, + { + "name": "PolicyService", + "type": "trait", + "purpose": "Checks permissions for operations and returns PolicyDecision (may create policy file).", + "reasoning": "Permission checks may be interactive (user confirmation) and can produce a policy file path to be stored; implementations must reflect policy semantics used by the CLI/UI." + }, + { + "name": "SkillFetchService", + "type": "trait", + "purpose": "(Declared but truncated in snapshot) Intended to fetch skills definitions or metadata.", + "reasoning": "Skill fetching is typically used by agent/tool selection flows; ensure to review the complete trait definition in file for method signatures and expected return types." + } + ], + "semantic_tags": [ + "services", + "async-traits", + "filesystem", + "workspace", + "providers", + "network", + "auth" + ], + "handles_entities": [ + "Conversation", + "Provider", + "Model", + "File", + "Workspace", + "Attachment", + "Template", + "Agent", + "ToolOutput", + "McpServers" + ], + "key_behaviors": [ + "execute shell commands and return structured output", + "read and write files, patch and undo operations", + "index and query workspaces for semantic search", + "manage provider credentials and stream chat completions", + "manage conversations and perform atomic modifications", + "fetch network resources with parsed/raw options", + "perform policy checks for operations" + ], + "pitfalls": [ + { + "mistake": "Modifying trait method signatures or types in this file without updating all implementations", + "consequence": "Build failures across multiple crates and runtime incompatibilities; implementations in infra and tests will break.", + "prevention": "Search for implementations (impl for some struct) across workspace and run full workspace tests after edits." + }, + { + "mistake": "Changing domain types used in method signatures (e.g., switching Provider to another generic)", + "consequence": "Breaks compile-time contracts with forge_domain-dependent code and provider clients expecting Url-based provider types.", + "prevention": "Respect forge_domain types and maintain generic shapes; coordinate changes with forge_domain crate if necessary." + }, + { + "mistake": "Altering semantics of Result/Option return values (e.g., turning Option into Result)", + "consequence": "Callers that rely on distinct 'no matches' (Ok(None)) vs error will behave incorrectly; changes are breaking.", + "prevention": "Preserve the documented behavior or update all callers and document the behavioral change thoroughly." + }, + { + "mistake": "Assuming streaming types (ResultStream, MpscStream) are trivial to replace", + "consequence": "Streaming behavior and backpressure semantics expected by callers will change, possibly causing deadlocks or dropped messages.", + "prevention": "When touching streaming return types, verify producers and consumers handle the new stream type; run integration tests that exercise sync_workspace and chat streaming." + } + ], + "reading_guide": { + "start_here": "ProviderService", + "key_sections": [ + "ProviderService: central provider/chat model integration surface", + "WorkspaceService: indexing/querying workspaces and lifecycle", + "ConversationService: persistence and atomic modifications for conversation state", + "Filesystem traits (FsReadService, FsWriteService, FsPatchService, FsSearchService): I/O operations and expected outputs" + ], + "skip_unless_needed": [ + "Small DTO structs at top (SearchResult, MatchResult variants) once you understand the returned shapes", + "Tool-specific outputs like TodoWriteOutput if not working on that tooling flow" + ] + }, + "tests": { + "exercised_by": [], + "test_functions": [], + "example_command": "cargo test --workspace", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "crates/forge_domain", + "relationship": "Defines the domain types (Conversation, ProviderId, ModelId, File, Node, etc.) used extensively by these traits", + "likely_co_change": true, + "reason_to_check": "If changing method signatures or domain types you must update forge_domain and all implementations that rely on those types." + }, + { + "path": "crates/forge_app/src/infra.rs", + "relationship": "Likely contains concrete implementations wiring these traits to platform/IO; edits to traits require updating infra implementations", + "likely_co_change": true, + "reason_to_check": "Trait changes will most likely require corresponding changes to infra implementations and wiring." + }, + { + "path": "crates/forge_main/src/main.rs", + "relationship": "CLI/TUI entrypoints call into services defined here; behavior changes here affect user-facing flows", + "likely_co_change": false, + "reason_to_check": "When behavior or API semantics change (streaming, error handling), UI code that composes and displays results needs review." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test --workspace", + "cargo test -p crates/forge_app" + ], + "data_constants_to_check": [], + "owns_authoritative_data": true, + "public_api_surface": true + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/orch.rs": { + "file_path": "crates/forge_app/src/orch.rs", + "short_description": "Orchestrates an agent turn/loop: chat calls, tool execution, lifecycle events, and streaming.", + "category": "SOURCE_CODE", + "description": "Orchestrator is the central control loop for executing an agent's conversation turn(s). It coordinates building the context, invoking the provider chat API, applying transformation pipelines to model responses, running tool calls (parallelizing 'Task'-style calls vs sequential system tools), firing lifecycle hook events, handling retries and error tracking, and streaming UI events via an optional sender. The module encapsulates policy about when to yield/stop (finish reason + tool catalog hints), how to update persisted Conversation state, and how to marshal tool outputs and metrics back into the conversation context.\n\nThis file exists to keep the high-level orchestration logic in one place so other smaller services (AgentService, EnvironmentInfra, hooks, template engine, tool services, error tracker) can remain focused on their responsibilities. The code intentionally separates: (1) transport-level chat invocation and streaming (execute_chat_turn), (2) tool execution details including notifier handshake and lifecycle events (execute_tool_calls), and (3) the loop that sequences requests, applies retry/backoff config, and decides when to interrupt or finish (run). Several cross-cutting concerns are handled here: transformation pipeline for incoming messages (including NormalizeToolCallArguments and reasoning normalization), retry reporting to the UI, and integration with Hook handlers to allow pluggable lifecycle behavior (Start/Request/Response/ToolcallStart/ToolcallEnd/End).\n\nKey design choices visible here: case-insensitive identification of Task tools and system tools, explicit handshake with the UI before running system tools using tokio::sync::Notify to avoid stdout interleaving, reconstructing tool results in the original order of calls after parallel/sequential execution, and preserving model-specific reasoning semantics by applying transformers conditioned on model and reasoning support. The orchestrator relies on several external types and traits (AgentService, EnvironmentInfra, Hook, ToolErrorTracker, TemplateEngine, ToolCatalog, ToolCallContext) and mutates Conversation state while persisting it via services.update().", + "key_constructs": [ + { + "name": "Orchestrator", + "type": "class", + "purpose": "Holds services, conversation, agent and orchestration configuration; primary object coordinating an agent's run loop.", + "reasoning": "When modifying state, adding fields, or adjusting lifecycle behavior, changes should respect the existing responsibilities of Orchestrator: coordinating chat calls, invoking lifecycle hooks, executing tools with notifier handshakes, applying retry behavior, and updating Conversation persistence.", + "callers": [ + { + "file": "crates/forge_app/src/orch_spec/orch_runner.rs", + "line": 17, + "context": "use crate::orch::Orchestrator;" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_runner.rs", + "line": 126, + "context": "let orch = Orchestrator::new(services.clone(), conversation, agent, setup.config.clone())" + }, + { + "file": "crates/forge_app/src/app.rs", + "line": 14, + "context": "use crate::orch::Orchestrator;" + }, + { + "file": "crates/forge_app/src/app.rs", + "line": 157, + "context": "let orch = Orchestrator::new(" + } + ] + }, + { + "name": "new", + "type": "function", + "purpose": "Constructs an Orchestrator instance with initial conversation, agent, services and config.", + "reasoning": "This ctor sets many defaulted fields (sender, tool_definitions, models, error_tracker, hook). Any change to initialization must ensure defaults and ownership (Arc) semantics remain consistent with how callers construct and use Orchestrator.", + "callers": [ + { + "file": "crates/forge_app/src/orch_spec/orch_runner.rs", + "line": 126, + "context": "let orch = Orchestrator::new(services.clone(), conversation, agent, setup.config.clone())" + }, + { + "file": "crates/forge_app/src/app.rs", + "line": 157, + "context": "let orch = Orchestrator::new(" + } + ] + }, + { + "name": "get_conversation", + "type": "function", + "purpose": "Accessor that returns a reference to the internal Conversation.", + "reasoning": "Used by external consumers/tests to inspect conversation state without consuming Orchestrator; maintain signature stability if tests or other modules rely on borrowing semantics.", + "callers": [ + { + "file": "crates/forge_app/src/app.rs", + "line": 177, + "context": "let conversation = orch.get_conversation().clone();" + } + ] + }, + { + "name": "execute_tool_calls", + "type": "function", + "purpose": "Executes a slice of ToolCallFull: parallelizes 'Task' calls, sequences system tool calls with notifier/handshake and lifecycle events, and returns results in original call order.", + "reasoning": "This function contains subtle concurrency and ordering semantics: case-insensitive Task detection, constructing and waiting on Notify for system tools, firing ToolcallStart/ToolcallEnd events via Hook, and reconstructing results using expect() guarantees. Any edits must preserve the notifier handshake and original-order reconstruction logic." + }, + { + "name": "send", + "type": "function", + "purpose": "Sends a ChatResponse over the optional ArcSender if present.", + "reasoning": "This is how streaming messages and UI events are emitted. Changes to sender handling (e.g., introducing different error handling) will affect streaming and retry reporting paths that rely on try_send/try_send semantics used elsewhere." + }, + { + "name": "is_tool_supported", + "type": "function", + "purpose": "Determines whether tools are supported by checking agent.tool_supported or falling back to the model's tools_supported.", + "reasoning": "Used to gate TransformToolCalls and streaming behavior. Keep the priority: agent-level override, otherwise model-level. Altering precedence changes tool behavior across agents/models." + }, + { + "name": "execute_chat_turn", + "type": "function", + "purpose": "Builds a transformation pipeline (tool normalization, image handling, reasoning drops when unsupported, and reasoning normalization across model changes), calls services.chat_agent, and streams a full ChatCompletionMessageFull.", + "reasoning": "This method composes DefaultTransformation pipes \u2014 NormalizeToolCallArguments was explicitly introduced to normalize persisted/resumed tool arguments. Transformer ordering and conditions are model/tool-support-dependent; changes can subtly alter tool-call parsing and reasoning visibility." + }, + { + "name": "run", + "type": "function", + "purpose": "Main orchestration loop: repeatedly runs chat turns, processes tool calls, applies error tracking and retry messaging, updates Conversation and metrics, enforces per-turn request limits, fires lifecycle End event, and signals task completion when appropriate.", + "reasoning": "This is the entrypoint to the agentic loop. It contains many stop/interrupt conditions (finish_reason + ToolCatalog.should_yield, error_tracker limits, max_requests_per_turn), retry wrapper integration that emits ChatResponse::RetryAttempt, and persistence via services.update(). Any change to loop control, event ordering, or context mutation will impact conversation correctness and may break tests that assert stop conditions or lifecycle order.", + "callers": [ + { + "file": "crates/forge_app/src/orch_spec/orch_runner.rs", + "line": 136, + "context": "let result = orch.run().await;" + }, + { + "file": "crates/forge_app/src/app.rs", + "line": 174, + "context": "let dispatch_result = orch.run().await;" + } + ] + }, + { + "name": "get_model", + "type": "function", + "purpose": "Returns the active ModelId for the agent.", + "reasoning": "Simple accessor but used to anchor transformer ReasoningNormalizer and lifecycle events; altering its behavior affects model-specific logic." + } + ], + "semantic_tags": [ + "orchestration", + "agent", + "tools", + "concurrency", + "lifecycle", + "streaming" + ], + "handles_entities": [ + "Conversation", + "Agent", + "ToolCallFull", + "ToolResult", + "ChatResponse", + "Model", + "ToolDefinition", + "ToolErrorTracker", + "Hook", + "ToolCallContext" + ], + "key_behaviors": [ + "runs the agentic request/response loop until yield or completion", + "executes tool calls: parallel 'Task' tools and sequential system tools", + "fires lifecycle events (Start, Request, Response, ToolcallStart, ToolcallEnd, End)", + "streams chat and retry events via an optional sender", + "applies transformation pipeline to model responses before calling chat agent", + "persists conversation state and metrics via services.update" + ], + "pitfalls": [ + { + "mistake": "Removing or bypassing the Notify-based handshake before running system tools.", + "consequence": "Tool stdout or side-effect output may interleave with the UI header, causing confusing output ordering and breaking tests that assert UI ordering.", + "prevention": "Preserve creation, sending, and awaited notifier.notified() sequence in execute_tool_calls for system tools." + }, + { + "mistake": "Treating tool name comparisons as case-sensitive or changing Task detection logic.", + "consequence": "Task-style calls may be misclassified, leading to incorrect parallelization and result ordering assumptions; tests assume case-insensitive detection.", + "prevention": "Keep the case-insensitive checks (eq_ignore_ascii_case or lowercase comparison) when identifying Task tools." + }, + { + "mistake": "Altering the reconstruction logic that maps parallel and sequential results back to the original tool_calls order (the expect()s on iterators).", + "consequence": "Mismatched lengths or reordered results could panic at runtime (expect) or produce a wrong sequence of tool outputs in the conversation.", + "prevention": "Ensure partition sizes and iterator consumption remain aligned; if changing flow, preserve original-order mapping semantics and handle mismatches explicitly." + }, + { + "mistake": "Removing NormalizeToolCallArguments or changing transformer order in execute_chat_turn.", + "consequence": "Resumed/persisted tool call arguments may be stringified or incorrectly typed, causing downstream provider transforms or tool invocation to fail silently or behave incorrectly.", + "prevention": "Keep NormalizeToolCallArguments in the transformation pipeline and preserve its relative position to other transformers." + }, + { + "mistake": "Changing stop condition logic to use tool_calls.is_empty() instead of model-provided finish_reason.", + "consequence": "Agentic loops might terminate prematurely or incorrectly; commit history shows this was a past bug fix.", + "prevention": "Respect the finish_reason == Some(FinishReason::Stop) combined with tool_calls.is_empty() logic and ToolCatalog.should_yield checks." + } + ], + "reading_guide": { + "start_here": "run", + "key_sections": [ + "execute_tool_calls: concurrency model, Notify handshake, lifecycle events, order-preserving reconstruction", + "execute_chat_turn: transformer pipeline composition and chat_agent invocation (NormalizeToolCallArguments inclusion)", + "run: orchestration loop, retry wrapper integration, stop/yield conditions, error_tracker behavior and interrupt emission", + "is_tool_supported/send/get_model: small helpers that affect branching logic elsewhere" + ], + "skip_unless_needed": [ + "simple getters (get_model, get_conversation) and trivial send wrapper" + ] + }, + "tests": { + "exercised_by": [ + "crates/forge_app/src/orch_spec/orch_runner.rs", + "crates/forge_app/src/orch_spec/orch_spec.rs" + ], + "test_functions": [ + "test_multi_turn_conversation_stops_only_on_finish_reason", + "test_toolcall_start_notifier_prevents_stdout_interleaving", + "tests verifying NormalizeToolCallArguments behavior for resumed contexts", + "tests asserting lifecycle event order (Start/Request/Response/ToolcallStart/ToolcallEnd/End)" + ], + "example_command": "cargo test -p forge_app -- --nocapture", + "relevant_snippets": [ + { + "file": "crates/forge_app/src/orch_spec/orch_spec.rs", + "lines": "unknown in provided snapshot", + "description": "Tests that multi-turn conversations only stop when the model finish_reason is Stop (not merely because tool_calls was empty)." + }, + { + "file": "crates/forge_app/src/orch_spec/orch_runner.rs", + "lines": "unknown in provided snapshot", + "description": "Simulates UI acknowledgement of Notify sent in ToolCallStart and verifies no stdout interleaving occurs." + } + ] + }, + "related_files": [ + { + "path": "crates/forge_app/src/agent.rs", + "relationship": "Defines AgentService trait and AgentService implementation used by Orchestrator to invoke tools and chat; changes to Orchestrator calling conventions likely require checking this file.", + "likely_co_change": true, + "reason_to_check": "AgentService::call and chat_agent signatures, error/return types, and streaming expectations." + }, + { + "path": "crates/forge_config/src/lib.rs", + "relationship": "Provides ForgeConfig types referenced for retry configuration and other per-run settings consumed by Orchestrator.", + "likely_co_change": true, + "reason_to_check": "Edits that alter or add retry behavior, or read different config fields, should be validated against config schema and defaults." + }, + { + "path": "crates/forge_app/src/hook.rs", + "relationship": "Hook handlers are invoked for lifecycle events (Start, Request, Response, ToolcallStart/End, End) and are responsible for side-effects like title generation.", + "likely_co_change": true, + "reason_to_check": "Changes to lifecycle ordering or event payloads need corresponding hook handler updates; commit history moved title generation into hooks." + }, + { + "path": "crates/forge_app/src/error_tracker.rs", + "relationship": "ToolErrorTracker is used to track tool failures, compute remaining attempts and limits, and decide when to interrupt the agent.", + "likely_co_change": true, + "reason_to_check": "Modifying error handling, retry-message rendering, or interruption thresholds requires cross-checking logic here." + }, + { + "path": "crates/forge_template", + "relationship": "TemplateEngine used to render retry messages that are appended to tool outputs.", + "likely_co_change": false, + "reason_to_check": "If retry messaging format or template names change, templates or TemplateEngine API must be validated." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_app", + "cargo test --workspace", + "cargo test -p forge_app --test orch_spec -- --nocapture (if test target exists)" + ], + "data_constants_to_check": [ + "forge_config::ForgeConfig.retry (used by retry wrapper)", + "Agent.max_requests_per_turn (per-turn request limit)", + "ToolErrorTracker.limit() (max tool failure attempts per turn)" + ], + "owns_authoritative_data": false, + "public_api_surface": false + }, + "insights": [ + { + "type": "refactoring", + "category": "State Management", + "title": "Insert tool-args normalization into orchestrator transformer pipeline", + "problem": "Tool calls persisted with stringified/unrepaired arguments caused provider-specific transforms to operate on wrong types.", + "root_cause": "No normalizer step in pipeline for resumed contexts.", + "solution": "Pipe NormalizeToolCallArguments::new() into the default transformation pipeline so transforms downstream see consistent Parsed argument values.", + "commits": [ + "3253412", + "69882c6" + ], + "constructs": [ + "DefaultTransformation::pipe", + "NormalizeToolCallArguments" + ] + }, + { + "type": "bug_fix", + "category": "Concurrency", + "title": "Add notifier to ToolCallStart to prevent tool stdout interleaving", + "problem": "Orchestrator sent ToolCallStart and executed tool immediately; UI header rendering could lag and tool stdout interleaved.", + "root_cause": "No coordination between orchard/task runner and UI to ensure header rendered first.", + "solution": "Orchestrator constructs Arc, includes it in ChatResponse::ToolCallStart, sends it, then awaits notifier.notified() before executing the tool. Tests in orch_spec simulate notifier acknowledgement.", + "lesson_learned": "Simple cross-task ordering can be implemented with Notify and should be used for UI/IO ordering guarantees.", + "commits": [ + "c1c0506" + ], + "constructs": [ + "Orchestrator::run (ToolCallStart handling)" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_app/src/orch_spec/orch_runner.rs", + "crates/forge_app/src/orch_spec/orch_spec.rs" + ] + } + }, + { + "type": "refactoring", + "category": "Concurrency", + "title": "Remove direct title generation from Orchestrator", + "problem": "Orchestrator contained title generation flow; moving logic into a hook reduces duplication and centralizes lifecycle handling.", + "root_cause": "Title generation was tightly coupled to Orchestrator and duplicated responsibilities handled by hooks.", + "solution": "Removed internal generate_title() and spawn/wait logic from Orchestrator; rely on Hook handlers (TitleGenerationHandler) to generate and set conversation.title during lifecycle events.", + "commit": [ + "0cf8736" + ], + "constructs": [ + "generate_title (removed)" + ] + }, + { + "type": "bug_fix", + "category": "State Management", + "title": "Fix exit condition for agentic loop/orchestration", + "problem": "Orchestrator would stop turns based on tool_calls.is_empty() which prematurely ended multi-turn agentic loops", + "root_cause": "Tool call presence isn't a reliable stop condition; finish_reason can indicate explicit stop from model", + "solution": "Set is_complete when finish_reason == Some(FinishReason::Stop) and only then stop the turn; tests updated/added to assert behavior", + "commits": [ + "f1f8b5a" + ], + "constructs": [ + "Orchestrator::some_loop (where is_complete assigned)", + "FinishReason usage" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_app/src/orch_spec/orch_spec.rs (new test test_multi_turn_conversation_stops_only_on_finish_reason)" + ], + "source_commits": [ + "f1f8b5a" + ] + }, + "lesson_learned": "Use explicit model-provided finish reasons to determine loop termination instead of inferred conditions like empty tool call lists; models control stop semantics." + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/provider.rs": { + "file_path": "crates/forge_domain/src/provider.rs", + "short_description": "Domain types and helpers representing external model providers, their IDs, URLs, models and credentials", + "category": "SOURCE_CODE", + "description": "This file contains domain-level representations for external AI/model providers (LLM and context engines) used across the Forge codebase. It centralizes provider identity (ProviderId), provider category (ProviderType), how provider endpoints and models are represented (Provider, ModelSource), and a small abstraction (AnyProvider) that allows callers to treat configured URL-backed providers and template-based provider definitions uniformly where possible. The module exists to keep provider metadata, serialization rules, and helper test fixtures together so other crates (configuration loading, service clients, UI) can consume provider definitions in a stable, well-typed form.\n\nDesign decisions reflected here: ProviderId uses Cow<'static, str> so built-in providers can be static borrowed constants while allowing custom providers to own their ID strings at runtime. Provider is generic over a URL-like type (Template<> or Url) to represent both template provider descriptors (unresolved) and configured, runtime providers. ModelSource captures that models can either be a URL (to query) or a hardcoded list. AnyProvider is an enum wrapper used by listings and UI to present either a configured Provider or an unresolved ProviderTemplate. Serialization and schema behavior is explicitly controlled (serde, schemars) \u2014 ProviderId is serialized/deserialized as String and Provider has serde defaults/skip_serializing_if annotations for optional fields.\n\nThis file also supplies many test helper constructors (zai, openai, vertex_ai, azure, etc.) inside a cfg(test) module; these are intended to be reused by unit tests in the crate and possibly other crates (pub(super) visibility). Historical commit notes indicate a fix to AnyProvider::url: it now returns Option and special-cases template providers with no url_params by parsing the template string into a Url so callers can get a usable URL for hardcoded-template providers. That change is important for code interacting with provider URLs and must be respected by future edits.", + "key_constructs": [ + { + "name": "ProviderType", + "type": "class", + "purpose": "Enumeration that distinguishes provider categories (Llm vs ContextEngine)", + "reasoning": "Used to route behavior/features depending on whether a provider is an LLM or a context/indexing engine; default is Llm for backward compatibility.", + "callers": [ + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 461, + "context": "provider_type: forge_domain::ProviderType::Llm," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 691, + "context": "provider_type: forge_domain::ProviderType::Llm," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 717, + "context": "provider_type: forge_domain::ProviderType::Llm," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 743, + "context": "provider_type: forge_domain::ProviderType::Llm," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 784, + "context": "provider_type: forge_domain::ProviderType::Llm," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 824, + "context": "provider_type: forge_domain::ProviderType::Llm," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 856, + "context": "provider_type: forge_domain::ProviderType::Llm," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 896, + "context": "provider_type: forge_domain::ProviderType::Llm," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 963, + "context": "provider_type: forge_domain::ProviderType::Llm," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 984, + "context": "provider_type: forge_domain::ProviderType::Llm," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 1028, + "context": "provider_type: forge_domain::ProviderType::Llm," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 1078, + "context": "provider_type: forge_domain::ProviderType::Llm," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 1125, + "context": "provider_type: forge_domain::ProviderType::Llm," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 1153, + "context": "provider_type: forge_domain::ProviderType::Llm," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 1181, + "context": "provider_type: forge_domain::ProviderType::Llm," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 1230, + "context": "provider_type: forge_domain::ProviderType::Llm," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 1371, + "context": "provider_type: forge_domain::ProviderType::Llm," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 1452, + "context": "provider_type: forge_domain::ProviderType::Llm," + }, + { + "file": "crates/forge_repo/src/provider/openai_responses/repository.rs", + "line": 1500, + "context": "provider_type: forge_domain::ProviderType::Llm," + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 1043, + "context": "provider_type: forge_domain::ProviderType::Llm," + } + ] + }, + { + "name": "ProviderId", + "type": "class", + "purpose": "Wrapper around provider identifier string with built-in constants and display formatting", + "reasoning": "Encapsulates provider identity, supports static borrowed constants for built-in providers and owned strings for custom ones (Cow<'static, str>), and provides display and parsing logic used throughout configuration and UI.", + "callers": [ + { + "file": "crates/forge_api/src/api.rs", + "line": 36, + "context": "async fn get_provider(&self, id: &ProviderId) -> Result;" + }, + { + "file": "crates/forge_api/src/api.rs", + "line": 177, + "context": "provider_id: ProviderId," + }, + { + "file": "crates/forge_api/src/api.rs", + "line": 184, + "context": "provider_id: ProviderId," + }, + { + "file": "crates/forge_api/src/api.rs", + "line": 190, + "context": "async fn remove_provider(&self, provider_id: &ProviderId) -> Result<()>;" + }, + { + "file": "crates/forge_domain/src/migration.rs", + "line": 3, + "context": "use crate::ProviderId;" + }, + { + "file": "crates/forge_domain/src/migration.rs", + "line": 13, + "context": "pub migrated_providers: Vec," + }, + { + "file": "crates/forge_domain/src/migration.rs", + "line": 18, + "context": "pub fn new(credentials_path: PathBuf, migrated_providers: Vec) -> Self {" + }, + { + "file": "crates/forge_domain/src/migration.rs", + "line": 34, + "context": "let providers = vec![ProviderId::OPENAI, ProviderId::ANTHROPIC];" + }, + { + "file": "crates/forge_domain/src/migration.rs", + "line": 34, + "context": "let providers = vec![ProviderId::OPENAI, ProviderId::ANTHROPIC];" + }, + { + "file": "crates/forge_domain/src/hook.rs", + "line": 405, + "context": "use crate::{Agent, AgentId, Conversation, ModelId, ProviderId};" + }, + { + "file": "crates/forge_domain/src/hook.rs", + "line": 410, + "context": "ProviderId::FORGE," + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 120, + "context": "async fn get_provider(&self, id: &ProviderId) -> Result {" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 319, + "context": "provider_id: ProviderId," + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 330, + "context": "provider_id: ProviderId," + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 340, + "context": "async fn remove_provider(&self, provider_id: &ProviderId) -> Result<()> {" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 9, + "context": "FileStatus, Image, McpConfig, McpServers, Model, ModelId, Node, Provider, ProviderId," + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 167, + "context": "async fn get_provider(&self, id: forge_domain::ProviderId) -> anyhow::Result>;" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 173, + "context": "async fn remove_credential(&self, id: &forge_domain::ProviderId) -> anyhow::Result<()>;" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 185, + "context": "async fn get_default_provider(&self) -> anyhow::Result;" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 198, + "context": "provider_id: Option<&forge_domain::ProviderId>," + } + ] + }, + { + "name": "ProviderResponse", + "type": "class", + "purpose": "Enum indicating response protocol/compatibility (OpenAI, Anthropic, Bedrock, etc.)", + "reasoning": "Used to choose client behavior or request/response handling based on provider compatibility.", + "callers": [ + { + "file": "crates/forge_services/src/app_config.rs", + "line": 130, + "context": "Model, ModelId, ModelSource, Provider, ProviderId, ProviderResponse, ProviderTemplate," + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 151, + "context": "response: Some(ProviderResponse::OpenAI)," + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 177, + "context": "response: Some(ProviderResponse::Anthropic)," + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 506, + "context": "AnyProvider, InputModality, Model, ModelId, ModelSource, ProviderId, ProviderResponse," + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 1044, + "context": "response: Some(ProviderResponse::OpenAI)," + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 1065, + "context": "response: Some(ProviderResponse::OpenAI)," + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 1086, + "context": "response: Some(ProviderResponse::OpenAI)," + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 1107, + "context": "response: Some(ProviderResponse::Anthropic)," + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 1128, + "context": "response: Some(ProviderResponse::OpenAI)," + }, + { + "file": "crates/forge_app/src/command_generator.rs", + "line": 106, + "context": "ModelSource, ProviderId, ProviderResponse, ResultStream," + }, + { + "file": "crates/forge_app/src/command_generator.rs", + "line": 219, + "context": "response: Some(ProviderResponse::OpenAI)," + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 978, + "context": "ApiKey, AuthCredential, AuthDetails, ProviderId, ProviderResponse, ProviderType," + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 993, + "context": "response: Some(ProviderResponse::Bedrock)," + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 436, + "context": "response: Some(forge_app::domain::ProviderResponse::Anthropic)," + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 504, + "context": "response: Some(forge_app::domain::ProviderResponse::Anthropic)," + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 644, + "context": "response: Some(forge_app::domain::ProviderResponse::Anthropic)," + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 708, + "context": "response: Some(forge_app::domain::ProviderResponse::Anthropic)," + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 800, + "context": "response: Some(forge_app::domain::ProviderResponse::Anthropic)," + }, + { + "file": "crates/forge_repo/src/provider/provider_repo.rs", + "line": 4, + "context": "use forge_app::domain::{ProviderId, ProviderResponse};" + }, + { + "file": "crates/forge_repo/src/provider/provider_repo.rs", + "line": 69, + "context": "response_type: Option," + } + ] + }, + { + "name": "ModelSource", + "type": "class", + "purpose": "Represents where a provider's models come from: either a URL (or template) or a hardcoded Vec", + "reasoning": "Supports both providers that expose model lists over HTTP and providers that ship a built-in list of models.", + "callers": [ + { + "file": "crates/forge_services/src/app_config.rs", + "line": 130, + "context": "Model, ModelId, ModelSource, Provider, ProviderId, ProviderResponse, ProviderTemplate," + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 162, + "context": "models: Some(ModelSource::Hardcoded(vec![Model {" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 188, + "context": "models: Some(ModelSource::Hardcoded(vec![Model {" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 318, + "context": "ModelSource::Url(url) => ModelSource::Url(forge_domain::Template::<" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 318, + "context": "ModelSource::Url(url) => ModelSource::Url(forge_domain::Template::<" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 323, + "context": "ModelSource::Hardcoded(list) => ModelSource::Hardcoded(list.clone())," + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 323, + "context": "ModelSource::Hardcoded(list) => ModelSource::Hardcoded(list.clone())," + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 506, + "context": "AnyProvider, InputModality, Model, ModelId, ModelSource, ProviderId, ProviderResponse," + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 1050, + "context": "models: Some(ModelSource::Url(" + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 1071, + "context": "models: Some(ModelSource::Url(" + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 1092, + "context": "models: Some(ModelSource::Url(" + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 1113, + "context": "models: Some(ModelSource::Url(Template::new(" + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 1134, + "context": "models: Some(ModelSource::Url(" + }, + { + "file": "crates/forge_app/src/command_generator.rs", + "line": 106, + "context": "ModelSource, ProviderId, ProviderResponse, ResultStream," + }, + { + "file": "crates/forge_app/src/command_generator.rs", + "line": 221, + "context": "models: Some(ModelSource::Url(" + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 284, + "context": "Some(forge_domain::ModelSource::Hardcoded(models)) => Ok(models.clone())," + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 1256, + "context": "use forge_domain::{Model, ModelId, ModelSource};" + }, + { + "file": "crates/forge_repo/src/provider/bedrock.rs", + "line": 1281, + "context": "fixture_provider.models = Some(ModelSource::Hardcoded(fixture_models.clone()));" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 234, + "context": "forge_domain::ModelSource::Url(url) => {" + }, + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "line": 264, + "context": "forge_domain::ModelSource::Hardcoded(models) => {" + } + ] + }, + { + "name": "Provider", + "type": "class", + "purpose": "Generic representation of a provider's configuration, parametrized over T (Url or Template)", + "reasoning": "Generic design allows the same struct to model unresolved provider descriptors (Template) loaded from provider.json and resolved runtime providers (Url) with credentials.", + "callers": [ + { + "file": "crates/forge_api/src/api.rs", + "line": 120, + "context": "async fn get_agent_provider(&self, agent_id: AgentId) -> anyhow::Result>;" + }, + { + "file": "crates/forge_api/src/api.rs", + "line": 123, + "context": "async fn get_default_provider(&self) -> anyhow::Result>;" + }, + { + "file": "crates/forge_app/src/agent_provider_resolver.rs", + "line": 4, + "context": "use forge_domain::{AgentId, ModelId, Provider};" + }, + { + "file": "crates/forge_app/src/agent_provider_resolver.rs", + "line": 26, + "context": "pub async fn get_provider(&self, agent_id: Option) -> Result> {" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 228, + "context": "async fn get_agent_provider(&self, agent_id: AgentId) -> anyhow::Result> {" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 402, + "context": "async fn get_default_provider(&self) -> Result> {" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 9, + "context": "FileStatus, Image, McpConfig, McpServers, Model, ModelId, Node, Provider, ProviderId," + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 164, + "context": "provider: Provider," + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 166, + "context": "async fn models(&self, provider: Provider) -> anyhow::Result>;" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 167, + "context": "async fn get_provider(&self, id: forge_domain::ProviderId) -> anyhow::Result>;" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 538, + "context": "provider: Provider," + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 539, + "context": ") -> anyhow::Result>;" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 643, + "context": "provider: Provider," + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 650, + "context": "async fn models(&self, provider: Provider) -> anyhow::Result> {" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 654, + "context": "async fn get_provider(&self, id: forge_domain::ProviderId) -> anyhow::Result> {" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 1015, + "context": "provider: Provider," + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 1016, + "context": ") -> anyhow::Result> {" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 130, + "context": "Model, ModelId, ModelSource, Provider, ProviderId, ProviderResponse, ProviderTemplate," + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 140, + "context": "providers: Vec>," + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 148, + "context": "Provider {" + } + ] + }, + { + "name": "ProviderTemplate", + "type": "constant", + "purpose": "Type alias Provider> for template-based provider definitions", + "reasoning": "Convenience alias used to represent provider descriptors that may require URL parameters before they can be used." + }, + { + "name": "AnyProvider", + "type": "class", + "purpose": "Enum wrapper that can hold either a configured Provider or ProviderTemplate", + "reasoning": "Used by listings and UI components to present providers uniformly and to query configuration state, id, auth methods, and optionally resolve a usable Url when possible.", + "callers": [ + { + "file": "crates/forge_api/src/api.rs", + "line": 33, + "context": "async fn get_providers(&self) -> Result>;" + }, + { + "file": "crates/forge_api/src/api.rs", + "line": 36, + "context": "async fn get_provider(&self, id: &ProviderId) -> Result;" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 95, + "context": "async fn get_providers(&self) -> Result> {" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 120, + "context": "async fn get_provider(&self, id: &ProviderId) -> Result {" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 7, + "context": "AgentId, AnyProvider, Attachment, AuthContextRequest, AuthContextResponse, AuthMethod," + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 168, + "context": "async fn get_all_providers(&self) -> anyhow::Result>;" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 658, + "context": "async fn get_all_providers(&self) -> anyhow::Result> {" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 129, + "context": "AnyProvider, ChatRepository, ConfigOperation, Environment, InputModality, MigrationResult," + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 299, + "context": "async fn get_all_providers(&self) -> anyhow::Result> {" + }, + { + "file": "crates/forge_services/src/app_config.rs", + "line": 303, + "context": ".map(|p| AnyProvider::Url(p.clone()))" + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 506, + "context": "AnyProvider, InputModality, Model, ModelId, ModelSource, ProviderId, ProviderResponse," + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 550, + "context": "struct CliProvider(AnyProvider);" + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 563, + "context": "AnyProvider::Url(provider) => {" + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 571, + "context": "AnyProvider::Template(_) => {" + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 1041, + "context": "let fixture = AnyProvider::Url(Provider {" + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 1062, + "context": "let fixture = AnyProvider::Url(Provider {" + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 1083, + "context": "let fixture = AnyProvider::Url(Provider {" + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 1104, + "context": "let fixture = AnyProvider::Template(Provider {" + }, + { + "file": "crates/forge_main/src/model.rs", + "line": 1125, + "context": "let fixture = AnyProvider::Url(Provider {" + }, + { + "file": "crates/forge_app/src/command_generator.rs", + "line": 235, + "context": "async fn get_all_providers(&self) -> Result> {" + } + ] + }, + { + "name": "ProviderModels", + "type": "class", + "purpose": "Simple struct pairing a ProviderId with its available models", + "reasoning": "Used to serialize/return model lists from providers.", + "callers": [ + { + "file": "crates/forge_api/src/api.rs", + "line": 6, + "context": "use forge_domain::{AgentId, Effort, ModelId, ProviderModels};" + }, + { + "file": "crates/forge_api/src/api.rs", + "line": 28, + "context": "async fn get_all_provider_models(&self) -> Result>;" + }, + { + "file": "crates/forge_app/src/app.rs", + "line": 288, + "context": "pub async fn get_all_provider_models(&self) -> Result> {" + }, + { + "file": "crates/forge_app/src/app.rs", + "line": 305, + "context": "Some(ProviderModels { provider_id, models })" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 87, + "context": "async fn get_all_provider_models(&self) -> Result> {" + } + ] + }, + { + "name": "AnyProvider::url", + "type": "function", + "purpose": "Return a usable Url for configured providers or for template providers with no URL parameters", + "reasoning": "Important consumer-facing behavior: returns Some(Url) when a provider is configured (Provider) and for template providers that have a hardcoded template string (no url_params). Returns None for template providers that require user-supplied parameters." + }, + { + "name": "Provider::api_key", + "type": "function", + "purpose": "Helper to extract an ApiKey from the provider credential if present", + "reasoning": "Simplifies callers that need to access API key credentials by pattern-matching AuthDetails::ApiKey safely." + } + ], + "semantic_tags": [ + "providers", + "credentials", + "serialization", + "templating", + "models" + ], + "handles_entities": [ + "Provider", + "ProviderId", + "Model", + "AuthCredential", + "ApiKey" + ], + "key_behaviors": [ + "represents built-in and custom provider identities", + "models provider configuration and optional URL templating", + "exposes whether a provider is configured and what auth methods it supports", + "parses and formats provider display names" + ], + "pitfalls": [ + { + "mistake": "Treat ProviderId as a plain String or replace Cow<'static, str> with owned String without considering constants", + "consequence": "Will break the assumption that built-in provider constants are static borrows and could increase allocations or alter equality/ordering semantics relied on elsewhere.", + "prevention": "Respect ProviderId's Cow design; if changing representation, update all usages, serde behavior, and ProviderId::built_in_providers accordingly." + }, + { + "mistake": "Assume AnyProvider::url always returns a Url for template providers", + "consequence": "Code will panic or behave incorrectly for template providers that require url_params. Historically this was a bug that returned None for hardcoded templates \u2014 a fix changed the return type to Option and special-cased templates with empty url_params.", + "prevention": "Check for None before using the Url and be aware of the rule: Template providers return Some(parsed_url) only when url_params.is_empty()." + }, + { + "mistake": "Modify serde/schemars annotations on ProviderId or Provider without understanding downstream storage/transport", + "consequence": "Changes can break persisted configuration formats, API contracts, or JSON schema generation used for validation.", + "prevention": "Preserve #[serde(from = \"String\")] and #[schemars(with = \"String\")] unless intentionally changing wire format; update consumers and migrations if altering." + }, + { + "mistake": "Changing Provider::credential/AuthDetails shape or auth-method handling without updating Provider::api_key and code that matches AuthDetails", + "consequence": "Credential extraction will break, causing providers to be treated as unconfigured or mis-handle authentication.", + "prevention": "When editing AuthDetails or AuthCredential structures, update pattern matches in Provider::api_key and associated callers." + }, + { + "mistake": "Re-ordering built-in provider constants in ProviderId::built_in_providers without considering comment", + "consequence": "Provider resolution order may change leading to different selection semantics in multi-provider setups.", + "prevention": "Respect the comment that order is significant; check consumers that iterate built_in_providers before changing order." + } + ], + "reading_guide": { + "start_here": "ProviderId", + "key_sections": [ + "ProviderId: constants, built_in_providers, Display & FromStr \u2014 identity and formatting", + "Provider: generic provider representation including fields (id, url, auth_methods, credential, models)", + "AnyProvider: wrapper and helper methods (is_configured, url, auth_methods, into_configured)", + "cfg(test) test_helpers: test provider constructors used by unit tests" + ], + "skip_unless_needed": [ + "derive_more / attribute boilerplate at top", + "individual per-provider test helper implementations unless adding tests" + ] + }, + "tests": { + "exercised_by": [], + "test_functions": [], + "example_command": "cargo test -p forge_domain", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "crates/forge_config", + "relationship": "Where user/project forge.yaml provider configuration is parsed/merged with built-in descriptors; changes to Provider types affect config loading and validation.", + "likely_co_change": true, + "reason_to_check": "Editing Provider serialization/fields or ProviderId format requires updating configuration parsing/validation." + }, + { + "path": "crates/forge_services", + "relationship": "Clients that call external provider endpoints; they consume Provider and AnyProvider::url and use auth methods.", + "likely_co_change": true, + "reason_to_check": "Altering URL resolution or auth extraction (Provider::api_key) affects request construction and service clients." + }, + { + "path": "crates/forge_repo", + "relationship": "Conversation persistence and provider model lists may reference ProviderModels or ProviderId; used when persisting provider-related metadata.", + "likely_co_change": false, + "reason_to_check": "If ProviderModels or ProviderId wire format changes, persisted data or proto bindings may need migration." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_domain", + "cargo test --workspace" + ], + "data_constants_to_check": [ + "ProviderId::built_in_providers order and contents", + "serde/schemars annotations on ProviderId and Provider fields" + ], + "owns_authoritative_data": false, + "public_api_surface": true + }, + "insights": [ + { + "type": "bug_fix", + "category": "Edge Case", + "title": "Return URL for template providers that have a hardcoded URL (no parameters)", + "problem": "Template providers that include a fixed url string but have empty url_params were returning None from url(), even though a usable URL exists.", + "root_cause": "url() previously returned Option<&Url> and treated AnyProvider::Template uniformly as requiring params (returned None).", + "solution": "Change url() to return Option, return cloned Url for AnyProvider::Url, and parse/return template string as Url for AnyProvider::Template when url_params is empty.", + "commits": [ + "021c122" + ], + "constructs": [ + "AnyProvider::url" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_infra/src/forge_infra.rs": { + "file_path": "crates/forge_infra/src/forge_infra.rs", + "short_description": "Central infrastructure aggregator that implements infra traits for file, HTTP, gRPC, auth, commands, and user I/O.", + "category": "SOURCE_CODE", + "description": "This file defines ForgeInfra, a concrete composition of lower-level services that implements the various infrastructure traits used across the Forge codebase (file IO, directory reading, HTTP, gRPC, MCP server, command execution, user prompts, environment/config access, auth strategy creation, and console output). The purpose of this module is to provide a single, testable object that higher-level application code depends on via trait bounds (EnvironmentInfra, FileReaderInfra, FileWriterInfra, CommandInfra, HttpInfra, GrpcInfra, etc.), enabling dependency injection and easier mocking in tests.\n\nForgeInfra is designed as a thin delegating fa\u00e7ade: construction wires together specialized service implementations (file reader/writer/meta, directory reader, walker, HTTP infra, gRPC client, MCP server, command executor, inquire/prompt service, console writer, and auth strategy factory). After construction, each trait method simply forwards to the appropriate underlying service. This design centralizes construction and configuration (cwd, ForgeConfig, services_url) in one place so that other crates / modules can accept trait objects and remain decoupled from concrete types. The new() constructor reads cached configuration when constructing services (e.g., configuring the http service and directory reader) and creates shared Arcs for thread-safe reuse.", + "key_constructs": [ + { + "name": "ForgeInfra", + "type": "class", + "purpose": "Holds all concrete infra services and provides implementations for the infra traits.", + "reasoning": "An aggregation type that centralizes service construction and wiring so the rest of the app can depend on trait interfaces. Modifications to infra wiring (e.g., constructor parameters or service types) are concentrated here.", + "callers": [ + { + "file": "crates/forge_repo/src/skill.rs", + "line": 292, + "context": "use forge_infra::ForgeInfra;" + }, + { + "file": "crates/forge_repo/src/skill.rs", + "line": 297, + "context": "fn fixture_skill_repo() -> (ForgeSkillRepository, std::path::PathBuf) {" + }, + { + "file": "crates/forge_repo/src/skill.rs", + "line": 302, + "context": "let infra = Arc::new(ForgeInfra::new(" + } + ] + }, + { + "name": "new", + "type": "function", + "purpose": "ForgeInfra::new(cwd: PathBuf, config: forge_config::ForgeConfig, services_url: Url) -> Self: constructs the ForgeInfra with all services initialized.", + "reasoning": "This is the canonical place where environment, config, and runtime dependencies are resolved and passed to sub-services. Any change to service constructors or required configuration must be reflected here.", + "callers": [ + { + "file": "crates/forge_repo/src/skill.rs", + "line": 302, + "context": "let infra = Arc::new(ForgeInfra::new(" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 52, + "context": "let infra = Arc::new(ForgeInfra::new(cwd, config, services_url));" + } + ] + }, + { + "name": "config", + "type": "function", + "purpose": "Returns the current Forge configuration, delegating to the internal ForgeEnvironmentInfra cached reader.", + "reasoning": "Exposes the authoritative config read path for callers; must preserve error semantics since it can return underlying IO errors." + }, + { + "name": "read_batch_utf8", + "type": "function", + "purpose": "Delegates a batched file read stream to the underlying ForgeFileReadService.", + "reasoning": "This function was the subject of a recent API refactor (commit insights). The trait signature changed and ForgeInfra must forward the new streaming signature exactly to remain compatible." + }, + { + "name": "create_auth_strategy", + "type": "function", + "purpose": "Creates an auth strategy for a provider by delegating to ForgeAuthStrategyFactory.", + "reasoning": "Responsible for producing AnyAuthStrategy which other parts of the system use for provider login/requests. The semantics and returned type are authoritative for provider auth flows." + }, + { + "name": "http_eventsource", + "type": "function", + "purpose": "Exposes an HTTP EventSource (server-sent events) stream to callers by delegating to ForgeHttpInfra.", + "reasoning": "Used where streaming HTTP SSE is required; must preserve header/body param semantics and return type (EventSource) - the caller expects the streaming behavior." + } + ], + "semantic_tags": [ + "infrastructure", + "filesystem", + "http", + "grpc", + "auth", + "commands", + "async" + ], + "handles_entities": [ + "File", + "ForgeConfig", + "Environment", + "CommandOutput", + "McpServerClient", + "ProviderAuthStrategy", + "HTTP Response/EventSource", + "WalkedFile", + "ConsoleWriter" + ], + "key_behaviors": [ + "reads and writes files (single and batched)", + "removes files and creates directories", + "executes shell commands and raw commands returning ExitStatus", + "performs HTTP operations including eventsource streaming", + "provides gRPC channel access and hydration", + "creates provider auth strategies", + "connects to MCP servers and returns clients", + "prompts and selects user input via inquire service", + "lists and reads directory entries with configured concurrency" + ], + "pitfalls": [ + { + "mistake": "Modifying the signature of a forwarded trait method (for example, read_batch_utf8) without updating the forwarding call.", + "consequence": "Compilation errors or mismatched behavior causing higher-level code to expect a different stream signature; subtle runtime mismatches for async stream types.", + "prevention": "When editing trait signatures, ensure ForgeInfra forwards with the exact updated signature and types." + }, + { + "mistake": "Changing construction-time configuration usage (e.g., using config_infra.cached_config().unwrap_or(config)) without preserving fallback semantics.", + "consequence": "Service initialization may use incorrect defaults or consume config unexpectedly leading to different concurrency settings (directory reader) or HTTP/file write behavior.", + "prevention": "Keep the same cached_config fallback behavior and ensure values like max_parallel_file_reads are preserved when constructing services." + }, + { + "mistake": "Replacing Arc-wrapped services with non-Arc instances or removing clones when services are used across impl blocks.", + "consequence": "Thread-safety and sharing assumptions break; code may no longer compile under Send/Sync bounds or cause duplication of state.", + "prevention": "Respect the use of Arc for services intended to be shared; follow existing ownership and cloning patterns when editing fields or constructors." + } + ], + "reading_guide": { + "start_here": "ForgeInfra", + "key_sections": [ + "new: constructs and wires all dependent services; examine how cached_config() is used", + "impl EnvironmentInfra: exposes config and env access; check cached_config semantics", + "impl FileReaderInfra and FileWriterInfra: forwards to file service implementations (read_batch_utf8 was recently refactored)", + "impl HttpInfra: ensures eventsource and other HTTP methods are available to callers" + ], + "skip_unless_needed": [ + "Per-trait forwarding methods that are one-line delegations (unless you are changing underlying service behavior)", + "ConsoleWriter trait impls for basic write/flush unless modifying output formatting" + ] + }, + "tests": { + "exercised_by": [], + "test_functions": [], + "example_command": "cargo test -p forge_infra --lib", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "crates/forge_infra/src/auth.rs", + "relationship": "Defines AnyAuthStrategy and ForgeAuthStrategyFactory used to create provider auth strategies.", + "likely_co_change": true, + "reason_to_check": "If auth factory signatures or returned types change, update create_auth_strategy forwarding in ForgeInfra." + }, + { + "path": "crates/forge_infra/src/fs_read.rs", + "relationship": "Implementation of ForgeFileReadService that ForgeInfra delegates file reading to.", + "likely_co_change": true, + "reason_to_check": "If read_batch_utf8 or range_read_utf8 signatures change, ForgeInfra must forward matching types." + }, + { + "path": "crates/forge_infra/src/fs_read_dir.rs", + "relationship": "ForgeDirectoryReaderService construction is parameterized by parallel read settings read from config. New constructor params require infra updates.", + "likely_co_change": true, + "reason_to_check": "Constructor signature or concurrency config changes must be propagated in ForgeInfra::new." + }, + { + "path": "crates/forge_infra/src/http.rs", + "relationship": "ForgeHttpInfra provides http_get/http_post/http_eventsource that ForgeInfra forwards.", + "likely_co_change": true, + "reason_to_check": "When HTTP client behavior or generic type parameters (e.g., file sink) change, check ForgeInfra's http_service field and constructor call." + }, + { + "path": "crates/forge_infra/src/grpc.rs", + "relationship": "ForgeGrpcClient provides channel() and hydrate() used by GrpcInfra impl.", + "likely_co_change": true, + "reason_to_check": "gRPC client API changes require updating the GrpcInfra impl methods in this file." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_infra --lib", + "cargo test --workspace" + ], + "data_constants_to_check": [], + "owns_authoritative_data": false, + "public_api_surface": true + }, + "insights": [ + { + "type": "refactoring", + "category": "API", + "title": "Propagate new read_batch_utf8 streaming signature", + "problem": "Infrastructure layer must match changed FileReaderInfra signature.", + "root_cause": "Higher-level trait change required infra implementation changes.", + "solution": "Update ForgeInfra::read_batch_utf8 to forward the new stream signature.", + "commits": [ + "1b114a4" + ], + "constructs": [ + "ForgeInfra::read_batch_utf8" + ] + }, + { + "type": "refactoring", + "category": "Configuration", + "title": "Pass parallel_file_reads to directory reader service", + "problem": "ForgeInfra constructed ForgeDirectoryReaderService without the newly required concurrency parameter.", + "root_cause": "ForgeDirectoryReaderService::new(parallel_file_reads) signature changed.", + "solution": "Construct ForgeDirectoryReaderService::new(env.parallel_file_reads) while building infra.", + "lesson_learned": "When constructor signatures change, update all central places (infra builders) that create those services to surface new configuration.", + "commits": [ + "e25c1c0" + ], + "constructs": [ + "ForgeInfra::new" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/command_generator.rs": { + "file_path": "crates/forge_app/src/command_generator.rs", + "short_description": "Generates a single-shell command from a natural language prompt via an LLM and JSON-schema response.", + "category": "SOURCE_CODE", + "description": "This file implements the CommandGenerator abstraction which transforms a user Natural Language prompt into a single shell command string by calling an LLM provider and parsing a structured JSON response. It exists to centralize the small, cost-sensitive flow of generating command suggestions (e.g., `ls -la`, `pwd`) rather than invoking heavier models or flows used elsewhere. The implementation composes context for the LLM using environment information, a file listing for the current directory, and a system prompt rendered from a template. It then selects a provider and model: first preferring a suggest-specific config (if present) and falling back to the default provider/model if not.\n\nThe file defines a response DTO (ShellCommandResponse) annotated with serde and schemars so the code can emit a JSON Schema for the provider to respond with a validated JSON structure. The CommandGenerator::generate workflow contains the essential steps: gather environment and file list, render the system prompt template, choose provider/model (respecting suggest-config), build a Context with response format set to the JSON Schema for ShellCommandResponse, call ProviderService::chat to get a ResultStream, collect the full returned message, and deserialize the message into ShellCommandResponse.command. Errors include any provider/chat errors and explicit parse errors which wrap the response body for debugging.\n\nThere is a tests module included in the same file that defines a MockServices implementation (implementing EnvironmentInfra, FileDiscoveryService, ProviderService, AppConfigService) used to exercise CommandGenerator behaviors: normal success with files present, empty-directory behavior, and error behavior when the LLM response doesn't match the expected shape. Tests also capture and snapshot the Context passed into the provider chat so the exact LLM prompt and metadata are asserted via insta snapshots. The commit history for this file shows an explicit feature change: prefer a dedicated suggest config before falling back to defaults to allow cheaper/faster models for command suggestions.\n\nThis file is focused and opinionated: it provides a single-purpose generator that must respect templating filenames and JSON schema shapes, and it depends on multiple trait-based services (EnvironmentInfra, FileDiscoveryService, ProviderService, AppConfigService) that must be implemented by callers or tests. Any edits must preserve the provider selection semantics, the JSON Schema generation for ShellCommandResponse, and the template name used to render the system prompt (forge-command-generator-prompt.md) because tests and the LLM response expectations depend on these artifacts.", + "key_constructs": [ + { + "name": "ShellCommandResponse", + "type": "class", + "purpose": "DTO used to deserialize structured JSON responses from the LLM representing the generated shell command.", + "reasoning": "This type drives the response schema (schemars::schema_for!) used to instruct the model about the exact JSON form to return, and is also what serde uses to parse the assistant output. Changes to this shape directly change the JSON schema and thus the LLM contract and tests." + }, + { + "name": "CommandGenerator", + "type": "class", + "purpose": "Main abstraction that produces a shell command string from a UserPrompt using injected service traits.", + "reasoning": "This struct encapsulates all steps needed to build LLM context and call the provider. It is generic over a services object that must implement the required trait set (EnvironmentInfra + FileDiscoveryService + ProviderService + AppConfigService), enabling test injection and different runtime implementations.", + "callers": [ + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 312, + "context": "use forge_app::CommandGenerator;" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 313, + "context": "let generator = CommandGenerator::new(self.services.clone());" + } + ] + }, + { + "name": "CommandGenerator::generate", + "type": "function", + "purpose": "Orchestrates environment gathering, template rendering, provider/model selection, LLM chat call, and JSON parsing to return a command string.", + "reasoning": "This is the key entry point used by other code to obtain a shell command suggestion; it must preserve ordering of steps (render system prompt before context creation, ask suggest-config first, then default provider fallback) and error handling semantics (wrap JSON parse errors with the raw response content)." + }, + { + "name": "CommandGenerator::create_context", + "type": "function", + "purpose": "Builds a Context that contains a system message and a user message and sets ResponseFormat to the JSON Schema of ShellCommandResponse.", + "reasoning": "This function is where the JSON Schema is attached to the LLM request. Any modification to schema generation, message roles, or response format changes the contract with the provider and will affect both runtime behavior and tests." + } + ], + "semantic_tags": [ + "command-generation", + "llm", + "provider-integration", + "templating", + "json-schema", + "testing" + ], + "handles_entities": [ + "ShellCommandResponse", + "Context", + "ContextMessage", + "Provider", + "ModelId", + "File", + "UserPrompt", + "Environment" + ], + "key_behaviors": [ + "gathers local environment and file-list context for LLM prompts", + "renders a system prompt using a template and the environment/files", + "selects a suggest-specific provider/model when available, else uses default", + "constructs an LLM Context with a JSON Schema response format", + "parses the assistant's JSON response into ShellCommandResponse and returns command" + ], + "pitfalls": [ + { + "mistake": "Changing the ShellCommandResponse shape (field names or types) without updating schema consumers and tests.", + "consequence": "LLM responses guided by the old schema may no longer be accepted; tests that snapshot the Context or assert command parsing will fail and runtime deserialization will error.", + "prevention": "If altering the DTO ensure schema_for! and any templates or assistant expectations are updated and the tests are adjusted accordingly." + }, + { + "mistake": "Altering provider/model selection order (e.g., removing get_suggest_config usage).", + "consequence": "Command generation could start using heavier/more expensive models by default, changing costs/latency and breaking assumptions in tests that rely on suggest config behavior.", + "prevention": "Keep the explicit attempt to use get_suggest_config() first, then fall back to default provider/model logic." + }, + { + "mistake": "Mutating the template name or template variables used when rendering the system prompt (forge-command-generator-prompt.md and json keys env/files).", + "consequence": "Snapshots of the captured Context used in tests will diverge; the LLM may receive different instructions and produce unexpected output.", + "prevention": "Preserve template filename and JSON structure expected by the template or update tests/snapshots in lockstep." + }, + { + "mistake": "Changing how the provider chat stream is consumed (e.g., altering into_full argument semantics).", + "consequence": "May change blocking/stream behavior and test expectations; could inadvertently change whether the full assistant message is awaited or partial streaming is used.", + "prevention": "Respect the current call sequence: chat -> into_full(false) -> await -> parse. If changing, validate streaming semantics in tests." + } + ], + "reading_guide": { + "start_here": "CommandGenerator::generate", + "key_sections": [ + "ShellCommandResponse: defines the JSON contract and schema used to instruct the model", + "CommandGenerator::generate: end-to-end orchestration for command generation (env, files, template rendering, provider selection, LLM call, parsing)", + "CommandGenerator::create_context: where the JSON schema is attached to the LLM context" + ], + "skip_unless_needed": [ + "test module setup boilerplate (MockServices implementations) if not modifying tests", + "detailed trait definitions for the injected services (look up the traits in their crates when necessary)" + ] + }, + "tests": { + "exercised_by": [ + "crates/forge_app/src/command_generator.rs::tests" + ], + "test_functions": [ + "test_generate_simple_command", + "test_generate_with_no_files", + "test_generate_fails_when_missing_tag" + ], + "example_command": "cargo test -p forge_app --test-threads=1 --lib", + "relevant_snippets": [ + { + "file": "crates/forge_app/src/command_generator.rs", + "lines": "approx. lines covering the tests module (contains MockServices and three tokio::test functions)", + "description": "Unit tests create a MockServices fixture, call CommandGenerator::generate, assert the returned command, and assert the captured Context via insta snapshots." + } + ] + }, + "related_files": [ + { + "path": "crates/forge_domain", + "relationship": "Provides domain types used throughout (Context, ContextMessage, ModelId, File, UserPrompt, Environment).", + "likely_co_change": true, + "reason_to_check": "If types or serialization semantics are modified in forge_domain, this file's schema generation and parsing logic must be revisited." + }, + { + "path": "crates/forge_config", + "relationship": "AppConfigService uses types and defaults from configuration crate (ModelConfig, ForgeConfig).", + "likely_co_change": true, + "reason_to_check": "Provider/model selection relies on configuration semantics (get_suggest_config/get_default_provider/get_provider_model)." + }, + { + "path": "templates/forge-command-generator-prompt.md", + "relationship": "Template rendered to build system prompt; name is referenced directly in code.", + "likely_co_change": true, + "reason_to_check": "If the template content or available template variables change, the Context content that is tested and sent to LLM changes too." + }, + { + "path": "crates/forge_app/src/lib.rs", + "relationship": "Where AppConfigService, ProviderService, EnvironmentInfra and FileDiscoveryService trait aliases or implementations may be exposed for wiring.", + "likely_co_change": false, + "reason_to_check": "When wiring real runtime services into CommandGenerator, check trait implementations/exported types here." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_app", + "cargo test --workspace (if changes touch shared domain types)" + ], + "data_constants_to_check": [ + "template filename: \"forge-command-generator-prompt.md\"", + "JSON schema source: ShellCommandResponse type", + "get_suggest_config / get_default_provider / get_provider_model interactions" + ], + "owns_authoritative_data": false, + "public_api_surface": true + }, + "insights": [ + { + "type": "feature", + "category": "API", + "title": "Use dedicated suggest config when generating shell command suggestions", + "problem": "Command suggestion generation always used the active provider/model, which could be expensive for simple command suggestions.", + "root_cause": "No suggest config existed to select a cheaper/faster provider/model.", + "solution": "Check services.get_suggest_config(); if present use the specified provider and model, otherwise fall back to default provider/model. Added AppConfigService trait stubs for get_suggest_config/set_suggest_config in tests.", + "lesson_learned": "Allow dedicated, cheaper suggestion models to reduce cost and latency for small tasks and ensure the command generation pipeline looks up a dedicated config first.", + "commits": [ + "da37b43" + ], + "constructs": [ + "generate (command generation flow)" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_app/src/command_generator.rs::tests" + ], + "source_commits": [ + "da37b43" + ] + } + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/git_app.rs": { + "file_path": "crates/forge_app/src/git_app.rs", + "short_description": "Implements git-related flows: fetching diffs/context, generating commit messages via LLM, and performing commits.", + "category": "SOURCE_CODE", + "description": "This file encapsulates Git-related application logic used by Forge's higher-level workflows. It exists to centralize the behavior for generating conventional commit messages (using an LLM provider) and for executing git commits in a way that attributes the commit to the user while setting ForgeCode as the committer. The code separates concerns: obtaining git state (branch name, recent commits, staged/unstaged diffs), preparing a structured request and schema for the LLM, invoking the chat API via Services, parsing structured responses (JSON schema fallback), and finally running the git commit command with careful quoting and attribution.\n\nThe design reflects the need to interact with heterogeneous provider clients (providers/models can come from commit-specific config or an active agent), shell execution via an injected Services implementation, and retry semantics around potentially transient LLM failures. The file uses small, focused structures (CommitResult, DiffContext, CommitMessageResponse) to keep the dataflow explicit and serializable where needed. It depends on traits and types in the surrounding crate (Services, EnvironmentInfra, TemplateService, etc.) and domain helpers from forge_domain (Context, ContextMessage, ResponseFormat).", + "key_constructs": [ + { + "name": "GitApp", + "type": "struct", + "purpose": "Primary entry point struct that holds an Arc services instance and exposes commit-related methods.", + "reasoning": "Most operations require access to the runtime environment and cross-cutting helpers (execute commands, render templates, chat). Keeping services behind a type parameter makes the module testable and allows swapping implementations while preserving the same logic." + }, + { + "name": "GitAppError", + "type": "enum", + "purpose": "Domain-specific error (currently models no-changes-to-commit case).", + "reasoning": "Provides typed error for the specific situation where there are no staged/unstaged changes. It is converted into anyhow::Error where needed, so preserving this variant matters for callers expecting that semantic." + }, + { + "name": "CommitResult", + "type": "struct", + "purpose": "Holds result metadata for commit operations: message, whether commit was executed, staged-file flag, and git output.", + "reasoning": "Returned to higher layers (likely UI or CLI) to report both a preview result and the final execution feedback. Consumers rely on fields like committed and git_output to inform user-facing output." + }, + { + "name": "CommitMessageDetails", + "type": "struct", + "purpose": "Internal result from message generation: message text and whether staged files exist.", + "reasoning": "Separates generation details from the commit execution result; used by commit_message() and commit() flows." + }, + { + "name": "CommitMessageResponse", + "type": "struct", + "purpose": "Schema for structured LLM response (used for JSON schema validation/formatting).", + "reasoning": "Used to request/parse structured JSON output from LLMs. Schemars is used to generate the schema sent as response_format so structured providers can return disciplined JSON. Code falls back to plain text when parsing fails." + }, + { + "name": "DiffContext", + "type": "struct", + "purpose": "Aggregates the git diff content, branch name, recent commits, staged flag, and any additional context for the LLM prompt.", + "reasoning": "Bundling diff/context into a single clonable struct enables the retry wrapper to invoke generation multiple times without re-fetching git state or recomputing context." + }, + { + "name": "truncate_diff", + "type": "function", + "purpose": "Truncates diff content to a maximum byte/char size while respecting character boundaries, returning truncated string and bool indicating truncation.", + "reasoning": "Prevents sending excessively large diffs to the LLM; truncation uses char_indices to avoid splitting UTF-8 characters." + }, + { + "name": "commit_message", + "type": "function", + "purpose": "Public async method for generating a commit message without performing the commit (preview mode).", + "reasoning": "Called by CLI/TUI flows that show users the generated commit message prior to executing a commit. It delegates to generate_commit_message and returns CommitResult with committed=false.", + "callers": [ + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 108, + "context": ".commit_message(max_diff_size, diff, additional_context)" + } + ] + }, + { + "name": "commit", + "type": "function", + "purpose": "Public async method that executes git commit using an already-generated message and has_staged_files flag.", + "reasoning": "Implements the specific git command invocation, setting GIT_COMMITTER_* to attribute ForgeCode as committer while preserving user author; handles escaping and combines stdout/stderr for git_output.", + "callers": [ + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 115, + "context": ".commit(result.message, result.has_staged_files)" + } + ] + }, + { + "name": "generate_commit_message", + "type": "function", + "purpose": "Core orchestration: fetch git context/diff (or use piped diff), truncate, create DiffContext, and retry generate_message_from_diff per retry configuration.", + "reasoning": "This function isolates steps that are not provider-specific (git interactions, diff handling, truncation, retry orchestration) so generate_message_from_diff can focus on provider/model resolution and LLM interaction." + }, + { + "name": "fetch_git_context", + "type": "function", + "purpose": "Executes git log and git rev-parse to obtain recent commit messages and current branch name.", + "reasoning": "Gathering recent commit messages and branch name is part of the context sent to the LLM; this function wraps concurrent execution and error context." + }, + { + "name": "fetch_git_diff", + "type": "function", + "purpose": "Retrieves staged and unstaged diffs, chooses staged if present, otherwise unstaged, returns diff content, size, and staged flag.", + "reasoning": "Centralizes logic that decides which diff to present to LLM and returns a typed error (GitAppError::NoChangesToCommit) when no changes exist." + }, + { + "name": "resolve_agent_provider_and_model", + "type": "function", + "purpose": "Resolves a provider and model from the active agent configuration, refreshing credentials as needed.", + "reasoning": "Used as a fallback when commit-specific provider/model is not configured or fails; abstracts the steps to acquire provider credentials and the associated model for chat calls." + }, + { + "name": "generate_message_from_diff", + "type": "function", + "purpose": "Builds the LLM Context (system and user messages), requests structured JSON via schemars schema, calls services.chat, and parses the response into a commit message.", + "reasoning": "Encapsulates the provider/model resolution, template rendering, JSON schema creation, and the final parse/fallback behavior. It also treats empty results as a retryable failure to play nice with retry logic." + } + ], + "semantic_tags": [ + "git", + "commit", + "llm", + "provider", + "retry", + "shell-exec" + ], + "handles_entities": [ + "Commit", + "Diff", + "Provider", + "Model", + "Agent" + ], + "key_behaviors": [ + "generates commit messages from diffs using a provider LLM", + "fetches git context (branch and recent commits)", + "chooses staged vs unstaged diffs and errors when no changes", + "executes git commit with ForgeCode as committer", + "resolves provider and model, with commit-config fallback to agent defaults", + "retries message generation per configured retry policy" + ], + "pitfalls": [ + { + "mistake": "Remove or bypass the retry wrapper around generate_message_from_diff.", + "consequence": "Transient LLM failures or empty messages may not be retried and will cause visible failures to users; the code historically added retry behavior to handle this.", + "prevention": "Keep retry_with_config invocation intact and ensure DiffContext remains clonable for retries." + }, + { + "mistake": "Change how messages are escaped when constructing the git commit command (especially single quotes).", + "consequence": "Improper quoting leads to broken shell commands or injection vulnerabilities, causing commits to fail or execute unintended commands.", + "prevention": "Preserve the current single-quote escaping pattern (message.replace(''',''\\'')') and validate shell quoting when modifying commit invocation." + }, + { + "mistake": "Assume diff sizes are byte-length-safe when truncating UTF-8 content.", + "consequence": "Truncation at arbitrary byte offsets could produce invalid UTF-8 and panic or corrupt the prompt sent to providers.", + "prevention": "Retain char boundary truncation using char_indices instead of slicing by bytes." + }, + { + "mistake": "Change the semantics of has_staged_files (e.g., assuming piped diff implies staged).", + "consequence": "Commit flags may be incorrect (using -a when unintended) and could include/exclude files unexpectedly.", + "prevention": "Respect current assumptions: piped diffs set has_staged_files=false, fetch_git_diff determines staged vs unstaged by stdout presence." + }, + { + "mistake": "Remove the fallback parsing behavior for structured JSON -> plain text.", + "consequence": "Responses from providers that don't produce the JSON schema could be rejected resulting in failures; existing code intentionally falls back to plain text.", + "prevention": "Preserve the attempt to parse CommitMessageResponse and fallback to trimmed message content when parse fails." + } + ], + "reading_guide": { + "start_here": "GitApp::generate_commit_message", + "key_sections": [ + "generate_commit_message: orchestration of git fetch, truncation, and retry", + "generate_message_from_diff: provider/model resolution, LLM prompt building, parsing", + "commit: how the actual git commit command is composed and executed", + "fetch_git_diff and fetch_git_context: concrete git command invocations and error cases" + ], + "skip_unless_needed": [ + "truncate_diff: small utility logic for truncation", + "struct/type definitions for serializable response (CommitMessageResponse) if not working with providers directly" + ] + }, + "tests": { + "exercised_by": [], + "test_functions": [], + "example_command": "cargo test -p forge_app --lib", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "crates/forge_app/src/services.rs", + "relationship": "Defines the Services trait and concrete service interfaces used heavily by GitApp (execute, chat, render_template, get_config, etc.).", + "likely_co_change": true, + "reason_to_check": "Changes to Services signature or execute/chat method behavior will directly impact how GitApp calls git and provider APIs." + }, + { + "path": "crates/forge_app/src/retry.rs", + "relationship": "Contains retry_with_config used to retry LLM calls; GitApp relies on it to handle transient provider failures.", + "likely_co_change": true, + "reason_to_check": "Modifications to retry semantics or signature require corresponding adjustments to generate_commit_message's usage." + }, + { + "path": "crates/forge_config/src/lib.rs", + "relationship": "Defines ForgeConfig including retry settings and max_commit_count referenced by GitApp.", + "likely_co_change": true, + "reason_to_check": "Config shape changes (e.g., moving max_commit_count or retry) affect fetch_git_context and retry behavior." + }, + { + "path": "crates/forge_domain/src/lib.rs", + "relationship": "Provides Context, ContextMessage, ResponseFormat and related types used to build chat requests to providers.", + "likely_co_change": true, + "reason_to_check": "Any change to how ResponseFormat or Context is constructed alters generate_message_from_diff behavior and parsing expectations." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_app --lib", + "cargo test --workspace" + ], + "data_constants_to_check": [ + "forge_config.max_commit_count", + "forge_config.retry settings (used by retry_with_config)", + "services.execute semantics for shell quoting and environment variables" + ], + "owns_authoritative_data": false, + "public_api_surface": true + }, + "commit_history_insights": [ + { + "title": "Retry commit message generation and treat empty results as retryable failure", + "description": "A past change made DiffContext clonable and wrapped generate_message_from_diff in retry_with_config; empty generated commit messages are converted to a retryable error so the retry wrapper can attempt again. This is important historical context and explains why DiffContext derives Clone and why generate_commit_message uses a retry wrapper." + } + ], + "insights": [ + { + "type": "bug_fix", + "category": "Reliability", + "title": "Retry commit message generation and treat empty results as retryable failure", + "problem": "Commit message generation could fail transiently or produce empty messages; previously there was no retry or empty-result detection.", + "root_cause": "No retry policy applied to the call that generates commit messages from diffs.", + "solution": "Make DiffContext clonable, wrap generate_message_from_diff in retry_with_config using services.get_config().retry, and treat empty commit message as retryable error (return Err::Retryable) so retry loop can act.", + "commits": [ + "86d9778" + ], + "constructs": [ + "DiffContext (made Clone)", + "generate_message_from_diff usage", + "retry_with_config invocation" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/lib.rs": { + "file_path": "crates/forge_domain/src/lib.rs", + "short_description": "Crate root that declares domain modules and re-exports the entire domain API for other crates.", + "category": "SOURCE_CODE", + "description": "This file is the public entry point for the forge_domain crate. Its main role is to declare the internal modules that implement domain types and behaviors (agent, message, conversation, model_config, provider, etc.) and to re-export their public symbols so other crates in the workspace can consume a single consolidated API. The crate centralizes the domain model for the whole project (conversations, messages, providers, tools, workspace constructs, etc.) so that other crates (CLI, services, repo, UI) import domain types from forge_domain rather than referencing multiple internal modules directly.\n\nDesign-wise, lib.rs keeps a very flat public surface by doing `pub use ...::*;` for each module, creating a stable and broad public API that other crates depend on. It also declares a small helper type alias (ArcSender) used to send ChatResponse results across tokio mpsc channels. The file intentionally centralizes exports rather than exposing internal module structure; this pattern enforces a single source-of-truth domain crate and simplifies imports across the workspace.\n\nWhen making changes, treat this file as the authoritative public API gate for domain types. Re-ordering of declarations is present (for example, pub use fuzzy_search::* appears before the final `mod fuzzy_search;` declaration) \u2014 Rust accepts this ordering, but it reflects that this file is primarily about re-exports rather than implementation detail ordering. Any changes that remove or rename re-exports will be breaking for downstream consumers and must be treated as API changes. Also note the ArcSender alias type is named `ArcSender` but is an alias of `tokio::sync::mpsc::Sender>` (no Arc involved), so its name is part of the public surface and used elsewhere as a domain type alias.", + "key_constructs": [ + { + "name": "ArcSender", + "type": "constant", + "purpose": "Type alias for a tokio mpsc Sender that transmits anyhow::Result values.", + "reasoning": "ArcSender is a small, public type alias used across the codebase wherever asynchronous components need to return ChatResponse results via channels. Its exact concrete type (tokio::sync::mpsc::Sender>) is relied on by producers/consumers, so renaming or changing the alias changes compile-time signatures across many modules." + }, + { + "name": "line_numbers", + "type": "constant", + "purpose": "Public submodule exposed as `pub mod line_numbers;` and re-exported for use by other crates.", + "reasoning": "The line_numbers module is explicitly exposed (pub mod) and re-exported into the crate's public API. Because it is exported as a module here, external callers import domain::line_numbers. Any structural changes to that module should be reflected in this file only if you change how it should be exposed publicly." + }, + { + "name": "fuzzy_search", + "type": "constant", + "purpose": "Internal module that implements fuzzy search utilities and is re-exported into the crate API.", + "reasoning": "The fuzzy_search module is declared (mod fuzzy_search;) and its symbols are re-exported via `pub use fuzzy_search::*;`. Downstream code may depend on the exact symbols exposed by this module. The declaration appears after the pub use list, which compiles in Rust but is a stylistic quirk to be aware of when adjusting module declarations." + } + ], + "semantic_tags": [ + "domain", + "reexports", + "api-surface", + "types", + "channel", + "modules" + ], + "handles_entities": [ + "Conversation", + "ChatResponse", + "Message", + "Agent", + "ModelConfig", + "Provider", + "Workspace" + ], + "key_behaviors": [ + "exposes domain types and helpers for the rest of the workspace", + "declares internal modules where domain implementations live", + "provides a shared Sender alias for streaming chat responses" + ], + "pitfalls": [ + { + "mistake": "Removing or renaming a `pub use ...::*` re-export without considering downstream consumers.", + "consequence": "Breaks compilations in dependent crates across the workspace and constitutes an API-breaking change.", + "prevention": "Treat the re-export list as the authoritative public API; ensure downstream crates are updated or maintain backward-compatible re-exports when refactoring." + }, + { + "mistake": "Assuming ArcSender implies an Arc-wrapped sender or reference-counted semantics.", + "consequence": "Misunderstanding may lead to incorrect synchronization/clone assumptions and API mismatches in calling code.", + "prevention": "Respect the concrete alias type: tokio::sync::mpsc::Sender>. Any change to this type alias will affect all code that uses it." + }, + { + "mistake": "Reordering or deleting module declarations thinking order matters more than it does.", + "consequence": "Although Rust accepts different declaration orders, reordering can confuse readers and may hide intentions; adding new modules without updating re-exports can make symbols inaccessible externally.", + "prevention": "Keep a consistent structure: declare modules and explicitly re-export the public API. When adding modules, add matching `mod` and `pub use` entries as intended for external visibility." + }, + { + "mistake": "Adding heavy dependencies or expensive initialization in this root file.", + "consequence": "Increases compile times and can pull in transitive dependencies for all consumers of the domain crate.", + "prevention": "Place implementation-heavy code inside submodules; keep lib.rs a lightweight aggregator of modules and re-exports." + } + ], + "reading_guide": { + "start_here": "ArcSender", + "key_sections": [ + "pub use ...::* lines: the canonical public API exported by the domain crate", + "pub mod line_numbers; and pub use line_numbers::*: modules explicitly exposed as submodules", + "Type alias ArcSender: small shared alias used across async messaging in the project" + ], + "skip_unless_needed": [ + "The long list of pub use lines can be skimmed; focus on the modules relevant to your change", + "Module declaration order (e.g., mod fuzzy_search at the end) is a stylistic detail unless you're modifying module boundaries" + ] + }, + "tests": { + "exercised_by": [], + "test_functions": [], + "example_command": "cargo test -p forge_domain", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "crates/forge_main/src/main.rs", + "relationship": "Consumes many domain types (conversations, messages, providers) to build CLI/TUI flows; changes to the public API here will affect main's imports.", + "likely_co_change": true, + "reason_to_check": "If a domain type or re-export is renamed/removed, update imports in main.rs and CLI code." + }, + { + "path": "crates/forge_repo", + "relationship": "Persistence and conversation storage use domain types such as Conversation and ChatResponse.", + "likely_co_change": true, + "reason_to_check": "Persistence code depends on domain shapes and serde contracts; changes to domain types can affect repository serialization and proto bindings." + }, + { + "path": "crates/forge_config", + "relationship": "ModelConfig and other domain configuration types are re-exported from this crate and used by config loading logic.", + "likely_co_change": true, + "reason_to_check": "Renaming or altering ModelConfig or model-related types requires checking configuration deserialization and defaults." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_domain --all-features", + "cargo test --workspace", + "cargo clippy --workspace --all-targets -- -D warnings" + ], + "data_constants_to_check": [], + "owns_authoritative_data": true, + "public_api_surface": true + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/node.rs": { + "file_path": "crates/forge_domain/src/node.rs", + "short_description": "Domain types for workspace indexing, code search nodes, and sync progress events", + "category": "SOURCE_CODE", + "description": "This file defines the core domain primitives used by the workspace indexing and codebase search subsystems. It contains enums and structs that represent sync progress events (SyncProgress), authentication state for the workspace indexing service (WorkspaceAuth), payloads for file uploads and deletions (FileRead, FileUpload, FileDeletion), search parameters (SearchParams), identifier wrappers (UserId, NodeId), file/git/workspace metadata (FileNode, FileRef, GitInfo, WorkspaceInfo), upload statistics (FileUploadInfo, FileUploadResponse) and the search result representation (Node, NodeData, CodebaseQueryResult, CodebaseSearchResults). Many types are serde Serialize/Deserialize so they are intended to cross crate and network boundaries (JSON API or RPC). The file also uses small, deliberate wrapper types (UserId, NodeId) instead of primitives to provide strong typing and stable string/display formats used in tests and client/server communication.\n\nThese types are the authoritative domain schema used by indexing clients, the workspace server, and search consumers. Particular design choices visible in the code: NodeData is a tagged serde enum (#[serde(tag = \"type\", rename_all = \"snake_case\")]) to enable explicit runtime type discrimination across the wire; Node contains a flattened node field so the node data merges with outer Node JSON when serialized; SearchParams uses derive_setters with strip_option and into so callers can fluently build queries while preserving lifetimes and optional flags; many small wrappers implement Display and From/AsRef to make conversions ergonomic while preserving canonical serialization formats. There are small helper impls (SyncProgress::weight, FileUploadInfo Add impl, WorkspaceAuth Into) that encode cross-cutting semantics used by callers.\n\nEdits to this file will affect serialization shape, public API surface (struct and enum field names), and conversions relied upon by other crates (forge_repo, forge_services, embedding/indexing clients). The module also contains unit tests (cfg(test)) that assert roundtrip behavior for UserId/WorkspaceId and correctness of SearchParams setters; these tests act as guards for the formatting and builder semantics of the types here.", + "key_constructs": [ + { + "name": "SyncProgress", + "type": "class", + "purpose": "Represents progress events emitted during workspace indexing (phases and numeric progress).", + "reasoning": "This enum encodes indexer lifecycle stages and progress tracking; its variants are consumed by UI/telemetry and may be serialized or pattern-matched by sync orchestrators. The weight() method produces a 0-100 progress snapshot for Syncing variants; callers rely on its numeric semantics.", + "callers": [ + { + "file": "crates/forge_api/src/api.rs", + "line": 196, + "context": ") -> Result>>;" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 347, + "context": ") -> Result>> {" + }, + { + "file": "crates/forge_main/src/sync_display.rs", + "line": 1, + "context": "use forge_domain::SyncProgress;" + }, + { + "file": "crates/forge_main/src/sync_display.rs", + "line": 14, + "context": "impl SyncProgressDisplay for SyncProgress {" + }, + { + "file": "crates/forge_main/src/sync_display.rs", + "line": 89, + "context": "let fixture = SyncProgress::Starting;" + }, + { + "file": "crates/forge_main/src/sync_display.rs", + "line": 97, + "context": "let fixture = SyncProgress::DiffComputed { added: 0, deleted: 0, modified: 0 };" + }, + { + "file": "crates/forge_main/src/sync_display.rs", + "line": 105, + "context": "let fixture = SyncProgress::DiffComputed { added: 3, deleted: 1, modified: 2 };" + }, + { + "file": "crates/forge_main/src/sync_display.rs", + "line": 113, + "context": "let fixture = SyncProgress::Syncing { current: 1, total: 1 };" + }, + { + "file": "crates/forge_main/src/sync_display.rs", + "line": 121, + "context": "let fixture = SyncProgress::Syncing { current: 5, total: 10 };" + }, + { + "file": "crates/forge_main/src/sync_display.rs", + "line": 130, + "context": "SyncProgress::Completed { uploaded_files: 0, total_files: 100, failed_files: 0 };" + }, + { + "file": "crates/forge_main/src/sync_display.rs", + "line": 139, + "context": "SyncProgress::Completed { uploaded_files: 5, total_files: 100, failed_files: 0 };" + }, + { + "file": "crates/forge_main/src/sync_display.rs", + "line": 148, + "context": "SyncProgress::Completed { uploaded_files: 5, total_files: 100, failed_files: 3 };" + }, + { + "file": "crates/forge_main/src/sync_display.rs", + "line": 158, + "context": "let fixture = SyncProgress::DiscoveringFiles {" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 10, + "context": "ResultStream, Scope, SearchParams, SyncProgress, SyntaxError, Template, ToolCallFull," + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 292, + "context": ") -> anyhow::Result>>;" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 1028, + "context": ") -> anyhow::Result>> {" + }, + { + "file": "crates/forge_services/src/context_engine.rs", + "line": 9, + "context": "AuthCredential, AuthDetails, ProviderId, ProviderRepository, SyncProgress, UserId, WorkspaceId," + }, + { + "file": "crates/forge_services/src/context_engine.rs", + "line": 59, + "context": "E: Fn(SyncProgress) -> Fut + Send + Sync," + } + ] + }, + { + "name": "WorkspaceAuth", + "type": "class", + "purpose": "Stores a persistent API token associated with a user for the indexing service.", + "reasoning": "Serialized to persist authentication for the workspace indexing service; implements From for crate::AuthDetails to interoperate with authentication abstractions elsewhere.", + "callers": [ + { + "file": "crates/forge_api/src/api.rs", + "line": 227, + "context": "async fn create_auth_credentials(&self) -> Result;" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 382, + "context": "async fn create_auth_credentials(&self) -> Result {" + }, + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 530, + "context": "async fn authenticate(&self) -> anyhow::Result {" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 11, + "context": "ToolOutput, WorkspaceAuth, WorkspaceId, WorkspaceInfo," + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 323, + "context": "async fn init_auth_credentials(&self) -> anyhow::Result;" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 1072, + "context": "async fn init_auth_credentials(&self) -> anyhow::Result {" + }, + { + "file": "crates/forge_services/src/context_engine.rs", + "line": 385, + "context": "async fn init_auth_credentials(&self) -> Result {" + }, + { + "file": "crates/forge_domain/src/repo.rs", + "line": 9, + "context": "SearchMatch, Skill, Snapshot, WorkspaceAuth, WorkspaceId," + }, + { + "file": "crates/forge_domain/src/repo.rs", + "line": 117, + "context": "async fn authenticate(&self) -> anyhow::Result;" + }, + { + "file": "crates/forge_repo/src/context_engine.rs", + "line": 8, + "context": "ApiKey, FileUploadInfo, Node, UserId, WorkspaceAuth, WorkspaceId, WorkspaceIndexRepository," + }, + { + "file": "crates/forge_repo/src/context_engine.rs", + "line": 17, + "context": "impl TryFrom for WorkspaceAuth {" + }, + { + "file": "crates/forge_repo/src/context_engine.rs", + "line": 25, + "context": "Ok(WorkspaceAuth { user_id, token, created_at: Utc::now() })" + }, + { + "file": "crates/forge_repo/src/context_engine.rs", + "line": 118, + "context": "async fn authenticate(&self) -> Result {" + }, + { + "file": "crates/forge_repo/src/context_engine.rs", + "line": 20, + "context": "fn try_from(response: CreateApiKeyResponse) -> Result {" + } + ] + }, + { + "name": "FileRead", + "type": "class", + "purpose": "Represents a single file's path and UTF-8 content for upload.", + "reasoning": "Used as the unit of file transfer to the workspace server and must remain UTF-8; it is simple but fundamental for uploads and tests may expect its fields and construction semantics.", + "callers": [ + { + "file": "crates/forge_services/src/sync.rs", + "line": 298, + "context": "let file = forge_domain::FileRead::new(path_str, content);" + } + ] + }, + { + "name": "CodeBase", + "type": "class", + "purpose": "Generic wrapper that binds a user_id and workspace_id to arbitrary payload data.", + "reasoning": "Used as a convenient container for codebase-scoped operations (search, uploads, deletions) so payloads carry context. Keeping this wrapper stable is important for API boundaries.", + "callers": [ + { + "file": "crates/forge_services/src/context_engine.rs", + "line": 267, + "context": "forge_domain::CodeBase::new(user_id, workspace.workspace_id.clone(), params);" + }, + { + "file": "crates/forge_services/src/sync.rs", + "line": 232, + "context": "forge_domain::CodeBase::new(self.user_id.clone(), self.workspace_id.clone(), ());" + }, + { + "file": "crates/forge_services/src/sync.rs", + "line": 252, + "context": "forge_domain::CodeBase::new(self.user_id.clone(), self.workspace_id.clone(), paths);" + }, + { + "file": "crates/forge_services/src/sync.rs", + "line": 299, + "context": "let upload = forge_domain::CodeBase::new(" + } + ] + }, + { + "name": "SearchParams", + "type": "class", + "purpose": "Parameters for a code search query (query string, limits, use case, filename filters).", + "reasoning": "Exposes builder-style setters (derive_setters) and uses a lifetime for the query string; tests exercise setter behavior. Changing setter attributes or field names will affect callers and serialized form.", + "callers": [ + { + "file": "crates/forge_api/src/api.rs", + "line": 202, + "context": "params: forge_domain::SearchParams<'_>," + }, + { + "file": "crates/forge_app/src/tool_executor.rs", + "line": 198, + "context": "forge_domain::SearchParams::new(&search_query.query, &search_query.use_case)" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 354, + "context": "params: forge_domain::SearchParams<'_>," + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 10, + "context": "ResultStream, Scope, SearchParams, SyncProgress, SyntaxError, Template, ToolCallFull," + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 298, + "context": "params: SearchParams<'_>," + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 1035, + "context": "params: SearchParams<'_>," + }, + { + "file": "crates/forge_services/src/context_engine.rs", + "line": 257, + "context": "params: forge_domain::SearchParams<'_>," + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 632, + "context": "forge_domain::SearchParams::new(&query, &use_case).limit(limit);" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 3872, + "context": "params: forge_domain::SearchParams<'_>," + } + ] + }, + { + "name": "UserId", + "type": "class", + "purpose": "Opaque wrapper around a UUID used to identify a user for codebase operations.", + "reasoning": "Encapsulates format/serialization and provides generation & string parsing helpers used throughout the codebase; test roundtrips ensure Display/parse behavior is preserved.", + "callers": [ + { + "file": "crates/forge_services/src/context_engine.rs", + "line": 9, + "context": "AuthCredential, AuthDetails, ProviderId, ProviderRepository, SyncProgress, UserId, WorkspaceId," + }, + { + "file": "crates/forge_services/src/context_engine.rs", + "line": 98, + "context": "async fn get_workspace_credentials(&self) -> Result<(forge_domain::ApiKey, UserId)> {" + }, + { + "file": "crates/forge_services/src/context_engine.rs", + "line": 114, + "context": "let user_id = UserId::from_string(user_id_str.as_str())?;" + }, + { + "file": "crates/forge_services/src/sync.rs", + "line": 6, + "context": "use forge_domain::{ApiKey, FileHash, SyncProgress, UserId, WorkspaceId, WorkspaceIndexRepository};" + }, + { + "file": "crates/forge_services/src/sync.rs", + "line": 55, + "context": "user_id: UserId," + }, + { + "file": "crates/forge_services/src/sync.rs", + "line": 68, + "context": "user_id: UserId," + }, + { + "file": "crates/forge_repo/src/context_engine.rs", + "line": 8, + "context": "ApiKey, FileUploadInfo, Node, UserId, WorkspaceAuth, WorkspaceId, WorkspaceIndexRepository," + }, + { + "file": "crates/forge_repo/src/context_engine.rs", + "line": 22, + "context": "let user_id = UserId::from_string(&user_id).context(\"Invalid user_id returned from API\")?;" + } + ] + }, + { + "name": "NodeId", + "type": "class", + "purpose": "Opaque identifier for nodes in the code graph (files, chunks, notes).", + "reasoning": "Used to uniquely reference graph entities; provides From, From<&str>, AsRef to make conversions ergonomic; many systems will persist or compare NodeId as a string.", + "callers": [ + { + "file": "crates/forge_app/src/search_dedup.rs", + "line": 11, + "context": "use forge_domain::{Node, NodeId};" + }, + { + "file": "crates/forge_app/src/search_dedup.rs", + "line": 82, + "context": "let mut best_scores: HashMap = HashMap::new();" + } + ] + }, + { + "name": "FileUploadInfo", + "type": "class", + "purpose": "Holds numeric statistics about uploaded nodes and relations.", + "reasoning": "Aggregatable summary used in FileUploadResponse; implements Add for accumulation which is likely used when combining results from multiple batches.", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 548, + "context": ") -> anyhow::Result {" + }, + { + "file": "crates/forge_domain/src/repo.rs", + "line": 131, + "context": ") -> anyhow::Result;" + }, + { + "file": "crates/forge_repo/src/context_engine.rs", + "line": 8, + "context": "ApiKey, FileUploadInfo, Node, UserId, WorkspaceAuth, WorkspaceId, WorkspaceIndexRepository," + }, + { + "file": "crates/forge_repo/src/context_engine.rs", + "line": 157, + "context": ") -> Result {" + }, + { + "file": "crates/forge_repo/src/context_engine.rs", + "line": 185, + "context": "Ok(FileUploadInfo::new(" + } + ] + }, + { + "name": "Node", + "type": "class", + "purpose": "Wrapper for a NodeData variant with optional ranking metadata (relevance/distance).", + "reasoning": "Search results are returned as Node so UI or callers can access the underlying NodeData and separate out scoring; the node field is marked #[serde(flatten)] which affects JSON shape\u2014important for compatibility.", + "callers": [ + { + "file": "crates/forge_api/src/api.rs", + "line": 203, + "context": ") -> Result>;" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 355, + "context": ") -> Result> {" + }, + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 556, + "context": ") -> anyhow::Result> {" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 809, + "context": "use forge_domain::{CodebaseQueryResult, CodebaseSearchResults, FileChunk, Node, NodeData};" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 822, + "context": "pub fn chunk_node(file_path: &str, content: &str, start_line: u32) -> Node {" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 828, + "context": "Node {" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 845, + "context": "nodes: Vec," + }, + { + "file": "crates/forge_app/src/search_dedup.rs", + "line": 11, + "context": "use forge_domain::{Node, NodeId};" + }, + { + "file": "crates/forge_app/src/search_dedup.rs", + "line": 27, + "context": "fn new(query_idx: usize, result: &Node) -> Self {" + }, + { + "file": "crates/forge_app/src/search_dedup.rs", + "line": 80, + "context": "pub fn deduplicate_results(results: &mut [Vec]) {" + }, + { + "file": "crates/forge_app/src/search_dedup.rs", + "line": 113, + "context": "use forge_domain::{Node, NodeData};" + }, + { + "file": "crates/forge_app/src/search_dedup.rs", + "line": 119, + "context": "fn result(node_id: &str) -> Node {" + }, + { + "file": "crates/forge_app/src/search_dedup.rs", + "line": 120, + "context": "Node {" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 9, + "context": "FileStatus, Image, McpConfig, McpServers, Model, ModelId, Node, Provider, ProviderId," + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 299, + "context": ") -> anyhow::Result>;" + }, + { + "file": "crates/forge_app/src/services.rs", + "line": 1036, + "context": ") -> anyhow::Result> {" + }, + { + "file": "crates/forge_services/src/context_engine.rs", + "line": 258, + "context": ") -> Result> {" + }, + { + "file": "crates/forge_domain/src/repo.rs", + "line": 138, + "context": ") -> anyhow::Result>;" + }, + { + "file": "crates/forge_repo/src/context_engine.rs", + "line": 8, + "context": "ApiKey, FileUploadInfo, Node, UserId, WorkspaceAuth, WorkspaceId, WorkspaceIndexRepository," + } + ] + }, + { + "name": "NodeData", + "type": "class", + "purpose": "Enum of the different possible node payloads (FileChunk, File, FileRef, Note, Task).", + "reasoning": "Tagged serde enum (type field) used across the wire to discriminate node kinds. Variants carry only the fields relevant to that node kind; changing tag name or rename_all would break interoperability.", + "callers": [ + { + "file": "crates/forge_repo/src/context_engine.rs", + "line": 237, + "context": "forge_domain::NodeData::FileChunk(forge_domain::FileChunk {" + }, + { + "file": "crates/forge_repo/src/context_engine.rs", + "line": 245, + "context": "forge_domain::NodeData::File(forge_domain::FileNode {" + }, + { + "file": "crates/forge_repo/src/context_engine.rs", + "line": 252, + "context": "forge_domain::NodeData::FileRef(forge_domain::FileRef {" + }, + { + "file": "crates/forge_repo/src/context_engine.rs", + "line": 258, + "context": "forge_domain::NodeData::Note(forge_domain::Note { content: note.content })" + }, + { + "file": "crates/forge_repo/src/context_engine.rs", + "line": 261, + "context": "forge_domain::NodeData::Task(forge_domain::Task { task: task.task })" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 424, + "context": "if let forge_domain::NodeData::FileChunk(file_chunk) = &data.node {" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 809, + "context": "use forge_domain::{CodebaseQueryResult, CodebaseSearchResults, FileChunk, Node, NodeData};" + }, + { + "file": "crates/forge_app/src/operation.rs", + "line": 830, + "context": "node: NodeData::FileChunk(FileChunk {" + }, + { + "file": "crates/forge_app/src/search_dedup.rs", + "line": 113, + "context": "use forge_domain::{Node, NodeData};" + }, + { + "file": "crates/forge_app/src/search_dedup.rs", + "line": 122, + "context": "node: NodeData::FileChunk(forge_domain::FileChunk {" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 3890, + "context": "forge_domain::NodeData::FileChunk(chunk) => {" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 3899, + "context": "forge_domain::NodeData::File(file) => {" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 3902, + "context": "forge_domain::NodeData::FileRef(file_ref) => {" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 3906, + "context": "forge_domain::NodeData::Note(note) => {" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 3909, + "context": "forge_domain::NodeData::Task(task) => {" + } + ] + } + ], + "semantic_tags": [ + "indexing", + "search", + "serialization", + "identifiers", + "telemetry" + ], + "handles_entities": [ + "UserId", + "WorkspaceId", + "Node", + "FileNode", + "FileChunk" + ], + "key_behaviors": [ + "represents sync lifecycle events for the indexer", + "serializes domain types for API exchange", + "wraps search results with ranking metadata", + "aggregates upload statistics across batches" + ], + "pitfalls": [ + { + "mistake": "Altering serde attributes (tag names, rename rules, or #[serde(flatten)])", + "consequence": "Breaks client/server communication and persisted data formats; other crates and services expect the current JSON shape.", + "prevention": "Respect existing serde annotations; if schema changes are necessary, versioning or compatibility layers must be considered." + }, + { + "mistake": "Changing Display or parsing behavior for UserId/NodeId (to_string/from_string)", + "consequence": "Unit tests (roundtrip tests) and any persisted identifiers will fail to parse, causing runtime errors and inconsistent IDs.", + "prevention": "Keep Display/parse behavior stable; update all dependent tests and consumers when making breaking changes." + }, + { + "mistake": "Modifying SearchParams lifetime/Setters behavior or field names", + "consequence": "Callers that rely on fluent setters, strip_option semantics, or the exact field names will break compile-time or change serialized payloads.", + "prevention": "Preserve the Setters derive attributes and the field names; update callers and tests together if changing." + }, + { + "mistake": "Changing SyncProgress::weight numeric calculation or return type", + "consequence": "UI progress bars and telemetry that rely on the returned Option semantics may display incorrect progress.", + "prevention": "Retain current behavior or ensure downstream consumers are updated in lockstep." + } + ], + "reading_guide": { + "start_here": "SyncProgress", + "key_sections": [ + "SyncProgress: captures sync lifecycle and progress weights (used by UI & telemetry)", + "WorkspaceAuth: token storage and conversion to crate::AuthDetails for auth plumbing", + "SearchParams: builder pattern with Setters and lifetime considerations for queries", + "Node / NodeData: canonical search result representation and serde flatten/tag semantics" + ], + "skip_unless_needed": [ + "small POD structs (FileRead, FileNode, FileRef, Note, Task) which are straightforward data holders" + ] + }, + "tests": { + "exercised_by": [ + "crates/forge_domain/src/node.rs (inline tests)" + ], + "test_functions": [ + "test_user_id_roundtrip", + "test_workspace_id_roundtrip", + "test_search_params_with_file_extension", + "test_search_params_with_multiple_file_extensions", + "test_search_params_without_file_extension" + ], + "example_command": "cargo test -p forge_domain -q", + "relevant_snippets": [ + { + "file": "crates/forge_domain/src/node.rs", + "lines": "cfg(test) module (tests for ID roundtrip and SearchParams setter behavior)", + "description": "Inline unit tests that verify generation/parsing roundtrips for UserId/WorkspaceId and the setter/builder semantics for SearchParams (these guard Display/parse and derive_setters behavior)." + } + ] + }, + "related_files": [ + { + "path": "crates/forge_domain/src/id.rs", + "relationship": "Likely contains WorkspaceId and other identifier helpers referenced here (UserId/WorkspaceId roundtrip tests depend on it).", + "likely_co_change": true, + "reason_to_check": "Modifying ID formats, Display, or parsing will need coordinated changes in id.rs to keep roundtrips and serde compatibility intact." + }, + { + "path": "crates/forge_repo/src/sync.rs", + "relationship": "Sync orchestration uses SyncProgress events and FileRead/FileUpload types when interacting with the workspace server.", + "likely_co_change": true, + "reason_to_check": "Changes to SyncProgress variants, FileRead shape, or FileUploadInfo semantics affect how syncing and reporting are performed." + }, + { + "path": "crates/forge_services/src/indexing_client.rs", + "relationship": "Client code that sends FileUpload/FileDeletion and receives FileUploadResponse/WorkspaceInfo; must preserve serialization.", + "likely_co_change": true, + "reason_to_check": "Any change to serde shapes or field names will require updating the client and server request/response handling." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_domain", + "cargo test --workspace (if you changed serde shapes used externally)" + ], + "data_constants_to_check": [], + "owns_authoritative_data": true, + "public_api_surface": true + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/app.rs": { + "file_path": "crates/forge_app/src/app.rs", + "short_description": "Application-layer orchestrator that runs chat flows, compaction, and model/tool queries.", + "category": "SOURCE_CODE", + "description": "This file implements ForgeApp, the high-level application glue that executes chat requests and related operations (compaction, tool listing, model queries). It exists to centralize the chat flow which composes domain objects (Conversation, Context) with infra services (Service trait implementations) and the Orchestrator. The chat() method is the core: it loads the conversation, reads configuration and environment data, resolves agent/provider/model/tool definitions for the active agent, injects system and user prompts, applies a number of pre-send transformations (changed-files notice, metrics, tunables), builds an Orchestrator with lifecycle hooks, and returns a streaming response (MpscStream) that runs the orchestrator and always persists the resulting conversation after execution.\n\nDesign-wise, ForgeApp is generic over S where S: Services + EnvironmentInfra, so it delegates environment and persistence details to an injected services implementation. The file also contains compact_conversation which performs compaction of an existing conversation's context using a Compactor and persists the compacted conversation; and convenience methods to list tools and fetch models (single provider and all providers). The build_template_config helper maps ForgeConfig fields into a domain TemplateConfig used by SystemPrompt/template rendering. This file is the application boundary between request-level orchestration and lower-level services/providers/tools, so changes here must respect service abstractions, conversation persistence semantics, and the streaming behavior of chat responses.", + "key_constructs": [ + { + "name": "build_template_config", + "type": "function", + "purpose": "Convert ForgeConfig fields to forge_domain::TemplateConfig used for template rendering and tool description limits.", + "reasoning": "Other parts of the chat flow (SystemPrompt) expect domain-layer TemplateConfig. If config keys change, this mapping must be updated here to keep domain behavior consistent.", + "callers": [ + { + "file": "crates/forge_app/src/orch_spec/orch_runner.rs", + "line": 13, + "context": "use crate::app::build_template_config;" + }, + { + "file": "crates/forge_app/src/orch_spec/orch_runner.rs", + "line": 107, + "context": ".template_config(build_template_config(&setup.config))" + } + ] + }, + { + "name": "ForgeApp", + "type": "class", + "purpose": "Application struct encapsulating services and a ToolRegistry to run chat flows and related operations.", + "reasoning": "ForgeApp centralizes orchestration logic and depends on a Services+EnvironmentInfra implementation. Edits here affect how chat flows are composed and how infra is called.", + "callers": [ + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 10, + "context": "FileDiscoveryService, ForgeApp, GitApp, GrpcInfra, McpConfigManager, McpService," + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 35, + "context": "fn app(&self) -> ForgeApp" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 40, + "context": "ForgeApp::new(self.services.clone())" + }, + { + "file": "crates/forge_app/src/agent_executor.rs", + "line": 79, + "context": "let app = crate::ForgeApp::new(self.services.clone());" + } + ] + }, + { + "name": "new", + "type": "function", + "purpose": "Constructor for ForgeApp, initializes the ToolRegistry with the provided services.", + "reasoning": "ToolRegistry is created from services.clone(); changes to initialization order or ToolRegistry expectations must be coordinated with this constructor.", + "callers": [ + { + "file": "crates/forge_app/src/agent_executor.rs", + "line": 79, + "context": "let app = crate::ForgeApp::new(self.services.clone());" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 40, + "context": "ForgeApp::new(self.services.clone())" + } + ] + }, + { + "name": "chat", + "type": "function", + "purpose": "Execute a chat request for a given agent_id and return an async MpscStream of ChatResponse results.", + "reasoning": "This is the central request handler. It performs conversation lookup, config reading, agent and provider resolution, prompt insertion, hook construction, Orchestrator creation, and spawns a stream that runs the orchestrator and persists the conversation. Changes must maintain error propagation, persistence guarantees, and streaming behavior.", + "callers": [ + { + "file": "crates/forge_app/src/agent_executor.rs", + "line": 81, + "context": ".chat(" + }, + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 137, + "context": "self.app().chat(agent_id, chat).await" + } + ] + }, + { + "name": "compact_conversation", + "type": "function", + "purpose": "Compact a conversation's context using the Compactor and persist the updated conversation; returns compaction metrics.", + "reasoning": "Compaction uses agent compact settings and environment. It must handle absent conversations or absent context gracefully and return metrics. Persistence and token/message accounting are important here.", + "callers": [ + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 154, + "context": ".compact_conversation(agent_id, conversation_id)" + } + ] + }, + { + "name": "list_tools", + "type": "function", + "purpose": "Return a ToolsOverview from the ToolRegistry.", + "reasoning": "Delegates to tool_registry.tools_overview(); tool registry behavior and async semantics are relevant when changing this path.", + "callers": [ + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 80, + "context": "self.app().list_tools().await" + } + ] + }, + { + "name": "get_models", + "type": "function", + "purpose": "Fetch available models for the default provider, performing automatic credential refresh.", + "reasoning": "Resolves default provider via AgentProviderResolver and refreshes credentials via ProviderAuthService before querying services.models.", + "callers": [ + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 84, + "context": "self.app().get_models().await" + } + ] + }, + { + "name": "get_all_provider_models", + "type": "function", + "purpose": "Concurrent fetch of models from all configured providers; returns ProviderModels per provider, skipping failing providers.", + "reasoning": "Builds one future per configured provider and join_all collects successes, silently skipping failures. Changes here affect concurrency, error handling, and the shape of returned results.", + "callers": [ + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 88, + "context": "self.app().get_all_provider_models().await" + } + ] + } + ], + "semantic_tags": [ + "orchestration", + "conversation", + "agents", + "providers", + "streaming", + "persistence" + ], + "handles_entities": [ + "Conversation", + "Context", + "Agent", + "Provider", + "Model", + "ToolDefinition", + "ChatRequest", + "ChatResponse", + "CompactionResult", + "ProviderModels", + "ToolsOverview" + ], + "key_behaviors": [ + "executes chat flows and streams ChatResponse messages", + "resolves agent-specific provider, models, and tool definitions", + "inserts system and user prompts into a conversation", + "compacts conversation contexts and persists results", + "fetches available models from configured providers concurrently", + "lists available tools from the ToolRegistry" + ], + "pitfalls": [ + { + "mistake": "Assume find_conversation always returns Some or unwrap the Result without propagating errors", + "consequence": "Hidden IO/DB errors can be masked, or the code may panic (previous bug) instead of returning a controlled domain error.", + "prevention": "Respect the Result and Option returns from services.find_conversation and propagate errors or return ConversationNotFound when None." + }, + { + "mistake": "Change streaming closure behavior (MpscStream::spawn) such that the conversation isn't saved after orchestration runs", + "consequence": "Conversation state would not be persisted, causing loss of messages or incorrect conversation state.", + "prevention": "Maintain the pattern that dispatch_result and save_result are both considered and errors sent to the stream; always call services.upsert_conversation with the conversation from orch.get_conversation()." + }, + { + "mistake": "Modify agent/provider/model resolution without updating consumer call sites", + "consequence": "Agents may use incorrect provider/model or credential refresh logic, causing model lookup or tool resolution failures.", + "prevention": "Respect AgentProviderResolver and ProviderAuthService flows; ensure get_provider(Some(agent.id)) and refresh logic remain consistent with how models() is called." + }, + { + "mistake": "Assume get_all_provider_models returns all providers and panics on error", + "consequence": "Some providers may fail silently; treating the result as exhaustive may lead to missing provider coverage or misleading UI.", + "prevention": "Handle the fact that failing providers are filtered out and ensure caller tolerates missing providers." + }, + { + "mistake": "Ignore configuration re-fetch semantics (calling services.get_config() only once)", + "consequence": "Runtime behavior may use stale configuration (limits, tunables) if config is expected to be current per-call.", + "prevention": "Follow existing pattern of calling services.get_config() at the points where up-to-date config is required (chat start, compaction)." + } + ], + "reading_guide": { + "start_here": "chat", + "key_sections": [ + "build_template_config: maps ForgeConfig into domain TemplateConfig used by SystemPrompt", + "chat: end-to-end chat flow including conversation lookup, agent/provider/model resolution, system/user prompt injection, hook/orchestrator construction and streaming", + "the stream spawn closure inside chat: how orchestration is run asynchronously and how conversation persistence and error handling are performed", + "compact_conversation: compaction flow, metrics calculation, and persistence" + ], + "skip_unless_needed": [ + "the small helper methods (list_tools, get_models, get_all_provider_models) if you are only debugging chat flow internals" + ] + }, + "tests": { + "exercised_by": [], + "test_functions": [], + "example_command": "cargo test -p forge_app -- --nocapture", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "crates/forge_config/src/lib.rs", + "relationship": "Contains ForgeConfig schema and defaults used by build_template_config and other config reads; co-change likely when config fields (max_read_lines, max_image_size_bytes, etc.) change.", + "likely_co_change": true, + "reason_to_check": "If config field names or semantics change, update build_template_config mapping and places calling services.get_config()." + }, + { + "path": "crates/forge_repo/src/lib.rs", + "relationship": "Persistence layer for conversations; services.find_conversation and services.upsert_conversation interact with this crate.", + "likely_co_change": true, + "reason_to_check": "Modifications to conversation schema, persistence semantics, or error handling must be reflected in chat() and compact_conversation flows." + }, + { + "path": "crates/forge_services/src/lib.rs", + "relationship": "Contains Services trait, ProviderAuthService, AgentRegistry and other service implementations injected into ForgeApp.", + "likely_co_change": true, + "reason_to_check": "Changes to service trait signatures or provider auth behavior affect agent/provider/model resolution and credential refresh used by get_models/get_all_provider_models and chat." + }, + { + "path": "crates/forge_app/src/orch.rs", + "relationship": "Orchestrator implementation used by ForgeApp::chat; Orchestrator.run and orch.get_conversation are relied upon heavily.", + "likely_co_change": true, + "reason_to_check": "If orchestrator API or lifecycle hooks change, update hook construction and how the stream closure invokes orchestration and saves the conversation." + }, + { + "path": "crates/forge_app/src/tool_registry.rs", + "relationship": "ToolRegistry is constructed in ForgeApp::new and used to enumerate tool definitions in chat and list_tools.", + "likely_co_change": true, + "reason_to_check": "Tool resolution logic, ToolDefinition types, or overview format changes require changes in chat and list_tools." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_app", + "cargo test --workspace", + "cargo test -p forge_app -- --nocapture" + ], + "data_constants_to_check": [ + "forge_config: max_read_lines", + "forge_config: max_read_chars / max_read_lines", + "forge_config: max_image_size_bytes", + "forge_config: max_stdout_prefix_lines/suffix/line_chars", + "agent: max_tool_failure_per_turn" + ], + "owns_authoritative_data": false, + "public_api_surface": true + }, + "insights": [ + { + "type": "bug_fix", + "category": "Edge Case", + "title": "Return an error when conversation is not found", + "problem": "Previously code used unwrap_or_default() then expect() on find_conversation, which masked errors and panicked rather than returning a controlled error.", + "root_cause": "A prior refactor used unwrap_or_default() which hid the underlying Result and left an expectation that panicked in production paths.", + "solution": "Replace unwrap_or_default()/expect with proper propagation: .await? and .ok_or_else(... ConversationNotFound).", + "lesson_learned": "Propagate underlying IO/DB errors rather than forcing panics; use domain errors to signal 'not found' conditions.", + "commits": [ + "050c476" + ], + "constructs": [ + "ForgeApp::chat (conversation lookup)" + ] + }, + { + "type": "refactoring", + "category": "Configuration", + "title": "ForgeApp reads config via services/get_config or injected config", + "problem": "Earlier refactor threaded config through many constructors; later fixes moved to re-reading via infra or injecting config where appropriate.", + "root_cause": "Trade-off between passing config copies and ensuring up-to-date values.", + "solution": "ForgeApp constructor was updated to accept a ForgeConfig in some versions and to call services.get_config() in other places. The code was adjusted to consistently obtain current config when needed (retry_config, max_parallel_file_reads, etc.).", + "lesson_learned": "For values that affect runtime behavior (timeouts, limits), prefer fetching from infra/get_config at call sites or inject an up-to-date config object explicitly at creation time.", + "commits": [ + "5bd0b94", + "7e8a51d" + ], + "constructs": [ + "ForgeApp::new", + "ForgeApp::chat", + "ForgeApp::commit_message" + ] + }, + { + "type": "breaking_change", + "category": "API", + "title": "ForgeApp.chat now accepts an AgentId and resolves per-agent provider/model", + "problem": "Chat flow needed to support sub-agent and per-agent provider/model selection; previous chat() used global active agent via services.", + "root_cause": "Shift from global to session-scoped agent model and per-agent provider support required changing chat() signature and internal agent/provider/model resolution.", + "solution": "Change ForgeApp::chat to accept agent_id: AgentId param, resolve the active_model via get_model(Some(agent_id)), find configured agent by id, get agent-specific provider via get_provider(Some(agent.id)) and fetch models via services.models(agent_provider). Implement helper get_provider(agent: Option) and get_model(agent_id: Option) that fall back to default provider/model when agent-specific values don't exist.", + "commit": [ + "b22ee2e", + "d9207f" + ], + "constructs": [ + "chat", + "get_provider", + "get_model", + "set_default_model" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/infra.rs": { + "file_path": "crates/forge_app/src/infra.rs", + "short_description": "Trait-based infrastructure abstraction for filesystem, env, HTTP, OAuth, MCP, gRPC and user I/O", + "category": "SOURCE_CODE", + "description": "This file defines the set of infrastructure-facing traits that the forge_app crate expects to be implemented by runtime/host code. Its purpose is to separate application logic from platform specifics (file IO, HTTP, OAuth, MCP servers/clients, gRPC channel, user prompts, command execution, caching, and directory walking). Each trait represents a capability that can be provided by different concrete implementations (e.g., real filesystem vs test doubles, local MCP server vs remote), enabling the rest of the codebase to depend only on these interfaces.\n\nThe design emphasizes small focused traits (single responsibility) so implementors can provide only what they need and tests can mock narrow behavior. Several traits are async (using async_trait) because most operations are I/O-bound. Some traits contain additional constraints (Clone, 'static) where the code expects to clone clients cheaply (McpClientInfra) or reuse channels (GrpcInfra). The file also contains higher-level domain ties: it references domain types from forge_domain (Environment, FileInfo, ToolDefinition, OAuthConfig, etc.), making it the bridge between domain models and infrastructural implementation.\n\nThe file is a public API surface inside the crate and therefore sensitive to signature and semantic changes: the commit history indicates recent breaking and refactoring changes (e.g., read_batch_utf8 now returns a stream of per-file results and EnvironmentInfra exposes an associated Config type via get_config). Callers expect per-file error reporting on batch reads and correct semantics for get_config (fresh read vs cached value). Implementations must respect the documented behaviors (atomic updates in update_environment, detection of binary files in range_read_utf8, content-hash calculation for full-file read, cloning and hydration semantics for gRPC channel).", + "key_constructs": [ + { + "name": "EnvironmentInfra", + "type": "trait", + "purpose": "Abstract access to environment variables and persisted application configuration, exposing both a lightweight Environment and a full resolved Config type.", + "reasoning": "This trait is the canonical entrypoint for configuration/environment concerns. It returns an associated Config type (to allow implementations to choose concrete config types) and includes update semantics that may invalidate caches; consumers rely on get_config returning up-to-date values when update_environment has been called." + }, + { + "name": "FileReaderInfra", + "type": "trait", + "purpose": "Reads files from the filesystem with multiple helper methods: whole-file as UTF-8, raw bytes, ranged line reads, and a batched-stream reader.", + "reasoning": "The batched stream signature intentionally yields per-file Result values so callers can handle partial failures. range_read_utf8 must detect binary files and return FileInfo that includes full-file content hash for external-change detection." + }, + { + "name": "FileWriterInfra", + "type": "trait", + "purpose": "Writes, appends, and writes temporary files, returning paths for created temp files.", + "reasoning": "Implementations must ensure atomic semantics where expected by callers (e.g., write should be durable) and write_temp must preserve the file after creation (explicitly documented)." + }, + { + "name": "FileRemoverInfra", + "type": "trait", + "purpose": "Removes files at provided paths.", + "reasoning": "Simple removal contract but callers expect consistent error handling and that removal works with files created by FileWriterInfra implementations." + }, + { + "name": "FileInfoInfra", + "type": "trait", + "purpose": "Queries about file metadata: binary detection, existence, is_file, and size.", + "reasoning": "range_read_utf8 uses binary detection behavior from here; consistent binary detection across implementations is important for upper-layer logic to avoid processing binary files as UTF-8." + }, + { + "name": "FileDirectoryInfra", + "type": "trait", + "purpose": "Create directory trees.", + "reasoning": "Used before writes; callers assume idempotent directory creation (create_dirs should succeed if dirs already exist)." + }, + { + "name": "CommandInfra", + "type": "trait", + "purpose": "Executes shell commands either capturing output (execute_command) or running on present stdio (execute_command_raw).", + "reasoning": "This trait is used for tools/skill execution and must respect working_dir, environment variables, and silent flag semantics. The raw variant returns ExitStatus, while the other returns domain CommandOutput." + }, + { + "name": "UserInfra", + "type": "trait", + "purpose": "User interaction primitives for prompting and selection (single/multiple).", + "reasoning": "Implementations will be TUI, CLI or test-mocks. select_one_enum provides a default implementation that enumerates variants using strum; callers rely on the provided generic helper." + }, + { + "name": "McpClientInfra", + "type": "trait", + "purpose": "Client abstraction for Model Context Protocol servers: listing tools and invoking them with JSON input.", + "reasoning": "This trait is Clone + Send + Sync + 'static because clients are expected to be cheaply clonable and shareable across tasks/threads." + }, + { + "name": "McpServerInfra", + "type": "trait", + "purpose": "Connects to an MCP server and returns a client typed to McpClientInfra.", + "reasoning": "Abstracts connection establishment; implementations receive resolved env vars and a typed McpServerConfig. Returning a client allows callers to perform tool listing and calls." + }, + { + "name": "WalkerInfra", + "type": "trait", + "purpose": "Walks the filesystem and returns a vector of WalkedFile with configured semantics.", + "reasoning": "Used by workspace indexing and search; implementations must honor the Walker config semantics (e.g., include/exclude patterns) and produce WalkedFile metadata expected by embedding/indexing code." + }, + { + "name": "HttpInfra", + "type": "trait", + "purpose": "Low-level HTTP operations used across the app: GET/POST/DELETE and EventSource (server-sent events).", + "reasoning": "Provides raw reqwest Response and EventSource so upper layers can stream responses. Implementations must honor provided HeaderMap and Bytes body types and preserve streaming semantics." + }, + { + "name": "DirectoryReaderInfra", + "type": "trait", + "purpose": "List directory entries efficiently and read directory files in parallel with optional filtering patterns.", + "reasoning": "Two distinct use cases are separated: listing entries (cheap) and reading contents (expensive, parallel). Implementations should optimize accordingly." + }, + { + "name": "KVStore", + "type": "trait", + "purpose": "Generic content-addressable caching API with get/set/clear operations for serializable keys and values.", + "reasoning": "Designed to be generic over key/value types and intended for backends like cacache. Generic bounds require Hash on keys and serde Serialize/Deserialize for values; callers expect content-addressable semantics and stable error behavior." + }, + { + "name": "OAuthHttpProvider", + "type": "trait", + "purpose": "OAuth-related HTTP behavior: build auth URL, exchange codes, and build provider-specific HTTP clients.", + "reasoning": "Encapsulates provider-specific differences in auth flows; callers expect provider-specific headers, token exchange handling, and proper AuthCodeParams formation." + }, + { + "name": "AuthStrategy", + "type": "trait", + "purpose": "High-level authentication lifecycle: init, complete, and refresh flows returning domain auth types.", + "reasoning": "Used by provider login flows; implementations map provider-specific flows to domain AuthCredential. Async lifecycle methods must return the domain types used by the rest of the application." + }, + { + "name": "StrategyFactory", + "type": "trait", + "purpose": "Factory for creating AuthStrategy instances based on provider and method configuration.", + "reasoning": "Decouples selection logic for strategies from usage; callers pass provider id, method, and required params to obtain a typed AuthStrategy implementation." + }, + { + "name": "AgentRepository", + "type": "trait", + "purpose": "Loads resolved Agent definitions from built-in, global, and project-local sources while applying precedence rules.", + "reasoning": "This trait centralizes agent discovery and conflict resolution (CWD > global > built-in). Consumers rely on the ordering semantics for override behavior." + }, + { + "name": "GrpcInfra", + "type": "trait", + "purpose": "Provides a shared, clonable gRPC channel and a hydrate method to establish/refresh connections.", + "reasoning": "Upper layers expect that channel() returns a cloneable tonic Channel that can be used by multiple clients; hydrate() is used to warm/establish network-level resources ahead of first use." + } + ], + "semantic_tags": [ + "filesystem", + "http", + "authentication", + "caching", + "mcp", + "grpc" + ], + "handles_entities": [ + "Environment", + "Config", + "FileInfo", + "WalkedFile", + "ToolDefinition", + "ToolOutput", + "OAuthTokenResponse", + "AuthCredential", + "Agent", + "CommandOutput" + ], + "key_behaviors": [ + "reads files as UTF-8 or raw bytes", + "reads file ranges with metadata and binary detection", + "writes, appends, and creates temporary files", + "executes shell commands and returns structured output", + "performs HTTP requests and returns streaming EventSource", + "performs OAuth code exchange and builds auth URLs", + "lists and calls MCP tools via JSON input", + "provides a shared gRPC channel and hydration" + ], + "pitfalls": [ + { + "mistake": "Altering trait method signatures (types or return futures) without updating all implementations and callers", + "consequence": "Breaks compile-time trait object compatibility and causes widespread build failures; historic commits indicate read_batch_utf8 was a breaking change.", + "prevention": "Respect existing signatures and documented semantics; update dependent modules and tests together." + }, + { + "mistake": "Treating FileReaderInfra::read_batch_utf8 as returning aggregated success-only results", + "consequence": "Loss of per-file error handling; callers expect a Stream)> to inspect failures per file.", + "prevention": "Preserve the per-file Result semantics for batch readers and handle partial failures in calling code." + }, + { + "mistake": "Assuming EnvironmentInfra::get_config returns a simple cloneable snapshot without considering update_environment cache invalidation", + "consequence": "Stale configuration or race conditions where callers use an outdated Config after an update_environment call.", + "prevention": "Respect the contract that implementations may re-read from disk; callers should not assume immutability across update_environment calls." + }, + { + "mistake": "Ignoring Clone/'static bounds on McpClientInfra or changing those bounds silently", + "consequence": "Clients that expect to clone MCP clients across tasks will fail to compile or behave unexpectedly.", + "prevention": "Keep required trait bounds and ensure implementors satisfy Clone + Send + Sync + 'static." + } + ], + "reading_guide": { + "start_here": "EnvironmentInfra", + "key_sections": [ + "EnvironmentInfra: configuration lifecycle and get_config/get_environment/update_environment semantics", + "FileReaderInfra: read_utf8, read_batch_utf8 (note stream-of-results behavior), and range_read_utf8 (binary detection and FileInfo hash expectations)", + "McpClientInfra / McpServerInfra: clone bounds and JSON call/list interfaces", + "HttpInfra: streaming and EventSource behavior that callers rely on" + ], + "skip_unless_needed": [ + "FileDirectoryInfra (single create_dirs method) when not implementing directory creation", + "FileRemoverInfra for features that don't perform deletions" + ] + }, + "tests": { + "exercised_by": [], + "test_functions": [], + "example_command": "cargo test --package forge_app --workspace", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "crates/forge_config", + "relationship": "Provides configuration types (Environment, Config) referenced by EnvironmentInfra", + "likely_co_change": true, + "reason_to_check": "Changing EnvironmentInfra::get_config's associated type or semantics will require updating config types and loaders." + }, + { + "path": "crates/forge_fs", + "relationship": "Concrete filesystem helpers that likely implement FileReaderInfra/FileWriterInfra/FileInfoInfra", + "likely_co_change": true, + "reason_to_check": "Any change to file-read/write/range semantics must be reconciled with these implementations." + }, + { + "path": "crates/forge_repo", + "relationship": "Uses persisted conversations/config and may interact with EnvironmentInfra or KVStore", + "likely_co_change": true, + "reason_to_check": "Updates to KVStore or EnvironmentInfra could affect persistence and conversation storage contracts." + }, + { + "path": "crates/forge_services", + "relationship": "Contains provider integrations and likely uses OAuthHttpProvider, AuthStrategy, and McpServerInfra", + "likely_co_change": true, + "reason_to_check": "OAuth and MCP changes will require updating service integrations." + }, + { + "path": "crates/forge_main/src/main.rs", + "relationship": "Application entrypoint that wires concrete infra implementations into the app", + "likely_co_change": true, + "reason_to_check": "Modifications to traits or their constructors require updating wiring code that creates concrete implementations and supplies them to app components." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test --package forge_app --workspace", + "cargo test --workspace", + "cargo clippy --workspace --all-targets -- -D warnings" + ], + "data_constants_to_check": [], + "owns_authoritative_data": false, + "public_api_surface": true + }, + "insights": [ + { + "type": "refactoring", + "category": "API", + "title": "EnvironmentInfra gains get_config() returning Config type", + "problem": "Callers needed access to the full ForgeConfig; previous trait exposed only environment and update hooks, or threaded config through constructors.", + "root_cause": "A refactor threaded a ForgeConfig through many constructors which made it hard to guarantee fresh reads after updates and increased coupling.", + "solution": "EnvironmentInfra now exposes associated type Config and a fn get_config(&self) -> Result to allow infra implementations to re-read or return cached config; added doc/comments to encourage re-read-on-update semantics.", + "lesson_learned": "Expose a minimal infra API that can supply up-to-date config; prefer method calls over threading copies for mutable configuration.", + "commits": [ + "7e8a51d", + "5bd0b94" + ], + "constructs": [ + "EnvironmentInfra", + "get_config", + "get_environment" + ] + }, + { + "type": "breaking_change", + "category": "API", + "title": "Change read_batch_utf8 signature to stream individual file results", + "problem": "Several places previously expected read_batch_utf8 to return batches of (PathBuf, String) inside a Result; that made handling individual file read errors harder.", + "root_cause": "Batch-based API hid per-file errors and required additional coordination to map failures to files.", + "solution": "Change trait signature: read_batch_utf8 now returns a Stream)>. This allows callers to handle individual file read failures without discarding whole batches.", + "commits": [ + "1b114a4" + ], + "constructs": [ + "FileReaderInfra::read_batch_utf8" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/app_config.rs": { + "file_path": "crates/forge_services/src/app_config.rs", + "short_description": "Infra-backed application config service that reads/updates provider & model defaults.", + "category": "SOURCE_CODE", + "description": "This file provides an AppConfigService implementation (ForgeAppConfigService) that reads and updates user-level preferences such as the default provider, default model, commit/suggest model configs, and reasoning effort. It exists to adapt the forge_app::AppConfigService trait to an underlying EnvironmentInfra (forge_config::ForgeConfig) and a ProviderRepository, ensuring that all reads come from the infra's current on-disk configuration and all writes are performed via infra.update_environment operations. The implementation centralizes mapping between the string-based on-disk ForgeConfig representation and the domain-level types (forge_domain::ProviderId, ModelId, ModelConfig, Effort), so other services can depend on domain types and not worry about config storage details.\n\nKey design choices are: (1) reads always use infra.get_config() so callers see the latest persisted state after writes; (2) update_config simply forwards a Vec to infra.update_environment(ops).await, with a debug log; (3) conversions between forge_config types (strings and enums) and forge_domain types are explicit and localized here (e.g., mapping forge_config::Effort to forge_domain::Effort). The trait implementation has bounds requiring the infra type F to implement both ProviderRepository and EnvironmentInfra and to be Send + Sync, reflecting that production infra must be able to supply provider metadata as well as configuration persistence.\n\nThe file also contains a fairly comprehensive inline test module (#[cfg(test)]) that defines a MockInfra implementing EnvironmentInfra, ProviderRepository, and ChatRepository to exercise the service behaviors. Tests cover reading an unset default provider, setting defaults via ConfigOperation::SetSessionConfig, retrieving provider-specific default models, and handling switches between providers. The tests are asynchronous (tokio) and demonstrate the expected mapping between domain model/provider types and the on-disk ForgeConfig representation.\n", + "key_constructs": [ + { + "name": "ForgeAppConfigService", + "type": "class", + "purpose": "Wrapper service implementing forge_app::AppConfigService backed by an EnvironmentInfra instance.", + "reasoning": "This struct hides the infra details and provides a typed domain-facing API for retrieving and updating provider/model/default configuration; edits must preserve the conversion and trait-bound behavior.", + "callers": [ + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 15, + "context": "use crate::app_config::ForgeAppConfigService;" + }, + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 59, + "context": "config_service: Arc>," + }, + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 119, + "context": "let config_service = Arc::new(ForgeAppConfigService::new(infra.clone()));" + }, + { + "file": "crates/forge_services/src/forge_services.rs", + "line": 206, + "context": "type AppConfigService = ForgeAppConfigService;" + } + ] + }, + { + "name": "get_default_provider", + "type": "function", + "purpose": "Return the currently configured default ProviderId from the infra config.", + "reasoning": "Reads config.session.provider_id (string) and maps to forge_domain::ProviderId; it returns an error if no default is set. Changes must keep the read-from-infra semantics and error mapping (NoDefaultProvider)." + }, + { + "name": "get_provider_model", + "type": "function", + "purpose": "Return the default ModelId for a given provider (or the active session provider if None).", + "reasoning": "Only returns the session.model_id when the session.provider_id matches the requested provider; this conditional is deliberate to avoid returning a model that belongs to a different provider. Caller-visible behavior depends on exact string comparisons between session.provider_id and provider_id; preserve that logic." + }, + { + "name": "get_commit_config", + "type": "function", + "purpose": "Map the optional commit model config from locksmith config to a domain ModelConfig.", + "reasoning": "Transforms forge_config::ModelConfig (with Option fields) into Option only when both provider_id and model_id are present (uses zip). Keep the mapping and the use of ProviderId::from and ModelId::new consistent." + }, + { + "name": "get_suggest_config", + "type": "function", + "purpose": "Map the optional suggest model config from the infra config to a domain ModelConfig.", + "reasoning": "Same mapping semantics as get_commit_config; tests or dependent code may expect None when either provider or model is missing in the on-disk config." + }, + { + "name": "get_reasoning_effort", + "type": "function", + "purpose": "Map the optional reasoning effort enum from forge_config to the forge_domain::Effort enum.", + "reasoning": "Performs an explicit match from forge_config::Effort variants to forge_domain::Effort. New variants in either enum or a change in variant names requires updating this mapping to avoid mismatches." + }, + { + "name": "update_config", + "type": "function", + "purpose": "Persist a sequence of ConfigOperation changes via infra.update_environment.", + "reasoning": "Forwards operations to the infra's async update_environment and logs ops at debug level. Because writes are performed via the infra, tests and production rely on infra.update_environment to mutate config; changes should respect that contract and its async semantics." + }, + { + "name": "MockInfra", + "type": "class", + "purpose": "Test fixture implementing EnvironmentInfra, ProviderRepository, and ChatRepository used by the inline unit tests.", + "reasoning": "MockInfra demonstrates expected behaviors of production infra: it stores ForgeConfig in a Mutex for shared test modification, provides a hardcoded provider list, converts Provider to ProviderTemplate on get_provider, and applies ConfigOperation semantics in update_environment. Any changes to service semantics must keep compatibility with this mock's expectations or update tests accordingly." + } + ], + "semantic_tags": [ + "configuration", + "providers", + "models", + "async", + "testing" + ], + "handles_entities": [ + "Provider", + "Model", + "ModelConfig", + "ProviderId", + "ModelId", + "ForgeConfig", + "Effort", + "ConfigOperation", + "Session" + ], + "key_behaviors": [ + "reads default provider from infra config", + "reads default model for a specified provider (only if session provider matches)", + "maps commit and suggest configs from on-disk strings to domain ModelConfig", + "maps reasoning effort enum from config to domain enum", + "applies configuration changes by forwarding ConfigOperation to infra.update_environment" + ], + "pitfalls": [ + { + "mistake": "Change get_provider_model to return a session model regardless of whether the session provider matches the requested provider.", + "consequence": "Clients might receive a model that belongs to a different provider, causing downstream provider/model mismatches and runtime failures.", + "prevention": "Preserve the comparison between session.provider_id and the requested provider; treat mismatch as no-default-model." + }, + { + "mistake": "Modify update_config to mutate in-memory state without calling infra.update_environment.", + "consequence": "Reads via infra.get_config() will not reflect changes, breaking the invariant that reads always reflect persisted state and likely failing tests and production behavior.", + "prevention": "Keep update_config as a thin forwarder to infra.update_environment and log ops, relying on infra to persist changes." + }, + { + "mistake": "Alter enum mapping in get_reasoning_effort without updating domain or config enums in lockstep.", + "consequence": "New or renamed variants could be unmapped, returning incorrect values or panicking if an exhaustive match is assumed elsewhere.", + "prevention": "When changing enums, update this explicit mapping and run tests; ensure exhaustive coverage of variants." + }, + { + "mistake": "Change trait bounds on the impl (ProviderRepository + EnvironmentInfra) without updating callers/fixtures.", + "consequence": "Production infra types or test mocks may no longer satisfy the bounds, causing compilation errors across the codebase.", + "prevention": "If trait bounds must change, update MockInfra and any concrete infra types accordingly and run the crate tests." + } + ], + "reading_guide": { + "start_here": "ForgeAppConfigService", + "key_sections": [ + "get_default_provider: how defaults are read and error behavior (NoDefaultProvider)", + "get_provider_model: provider/model matching logic, Option handling", + "get_commit_config/get_suggest_config: mapping of persisted strings to domain ModelConfig", + "update_config: forwards changes to infra.update_environment and logs ops" + ], + "skip_unless_needed": [ + "the debug import and the trivial new() constructor", + "test scaffolding details unless modifying tests or infra behavior" + ] + }, + "tests": { + "exercised_by": [ + "crates/forge_services/src/app_config.rs (inline #[cfg(test)] module)" + ], + "test_functions": [ + "test_get_default_provider_when_none_set", + "test_get_default_provider_when_set", + "test_get_default_provider_when_configured_provider_not_available", + "test_set_default_provider", + "test_get_default_model_when_none_set", + "test_get_default_model_when_set", + "test_set_default_model", + "test_set_multiple_default_models" + ], + "example_command": "cargo test -p forge_services --lib\n# Run a single test by name:\n# cargo test -p forge_services test_get_default_provider_when_set -- --exact", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "crates/forge_config", + "relationship": "Defines ForgeConfig and forge_config::ModelConfig used for on-disk config representation. This file converts between the on-disk types and domain types.", + "likely_co_change": true, + "reason_to_check": "If ForgeConfig fields or ModelConfig representation change (e.g., field renames or type changes), mapping code in get_commit_config/get_suggest_config and get_default_provider/get_provider_model must be updated." + }, + { + "path": "crates/forge_domain", + "relationship": "Provides domain types (ProviderId, ModelId, ModelConfig, Effort, ConfigOperation) that this service exposes to callers.", + "likely_co_change": true, + "reason_to_check": "Changes to domain enums/constructors require updating conversions and tests in this file." + }, + { + "path": "crates/forge_app", + "relationship": "Contains the AppConfigService trait that ForgeAppConfigService implements; defines the domain-facing interface.", + "likely_co_change": true, + "reason_to_check": "Interface or semantic changes to AppConfigService require updating this implementation and tests." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_services --lib", + "cargo test --workspace (if changing public APIs or shared types)" + ], + "data_constants_to_check": [ + "crates/forge_config::ForgeConfig session/commit/suggest fields and forge_config::Effort enum" + ], + "owns_authoritative_data": false, + "public_api_surface": true + }, + "insights": [ + { + "type": "feature", + "category": "Configuration", + "title": "Persist commit and suggest configuration in app config service", + "problem": "AppConfig lacked persistent storage and accessors for commit/suggest configs.", + "root_cause": "New commit/suggest config structs were introduced; infra needed to read/write them.", + "solution": "Implemented get_commit_config/set_commit_config and get_suggest_config/set_suggest_config by reading/updating app config via infra.get_app_config()/update closures.", + "lesson_learned": "Service layers should map new domain config types into infra-backed app config and provide Async methods for reading/writing, with clear error propagation.", + "commits": [ + "f8a260e", + "da37b43" + ], + "constructs": [ + "get_commit_config", + "set_commit_config", + "get_suggest_config", + "set_suggest_config" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/fmt/fmt_output.rs": { + "file_path": "crates/forge_app/src/fmt/fmt_output.rs", + "short_description": "Converts ToolOperation variants into optional ChatResponseContent for UI output (diffs, titles, todo text); includes unit tests.", + "category": "SOURCE_CODE", + "description": "This file implements the formatting adapter that turns ToolOperation values (the domain representation of tool invocations like filesystem writes, patches, searches, network fetches, shell invocations, todos, and plan creation) into ChatResponseContent elements consumed by the UI/streaming layers. The primary purpose is to decide which tool outputs should be surfaced to the user as a tool output (ChatResponseContent::ToolOutput) and to produce the string payload for those outputs (diffs, todo formatting, or a title). It centralizes the logic that maps domain-level operation outputs to displayable strings, relying on other formatting helpers (forge_display::DiffFormat, todo formatting helpers, TitleFormat, and path formatting utilities). This separation keeps formatting concerns out of operation execution code and makes it easy to reason about what the user will see for each operation type.\n\n>The core implementation is an impl of the FormatContent trait for ToolOperation with a single method, to_content(&self, env: &Environment) -> Option. The method pattern-matches all ToolOperation variants and returns either Some(ChatResponseContent::ToolOutput(...)) for operations that should produce visible tool output, or None when the formatter intentionally suppresses output for a given operation. For example, FsWrite only returns a diff when there is a before snapshot (overwrite case); FsPatch and FsMultiPatch always return a diff built from output.before and output.after; PlanCreate returns a title string wrapped via TitleFormat::debug; TodoWrite/TodoRead return formatted todo diffs or todo listings via format_todos_diff and format_todos. Many read/search/shell/network/follow-up/undo/remove operations are intentionally suppressed and return None so they don't clutter the chat stream.\n\n>Tests are embedded in this file under a #[cfg(test)] mod tests. Those tests exercise the mapping behavior and edge cases: FsRead (single/multi-line), FsWrite (create new file vs overwrite vs create-with-warnings), FsRemove, FsSearch (matches, no matches, None output), FsPatch (success and with warnings), FsUndo, NetFetch (success and error), Shell (success, success-with-stderr, failure), FollowUp, PlanCreate, TodoWrite/TodoRead behaviors, and more. The tests also use helpers such as forge_domain::FileInfo, compute_hash in the crate, and insta snapshot for diffs. The file contains both unit test fixtures and assertions that lock the expected behavior of to_content for each ToolOperation variant.", + "key_constructs": [ + { + "name": "to_content", + "type": "function", + "purpose": "Method implementation of FormatContent for ToolOperation that converts a ToolOperation into an Option.", + "reasoning": "This is the single entry point for producing UI-visible content from ToolOperation values; editing it changes which operation outputs are surfaced and how they are formatted, so any change must preserve expected behavior across variants and tests.", + "callers": [ + { + "file": "crates/forge_app/src/tool_executor.rs", + "line": 370, + "context": "if let Some(output) = operation.to_content(&env) {" + }, + { + "file": "crates/forge_app/src/fmt/todo_fmt.rs", + "line": 190, + "context": "let actual = setup.to_content(&fixture_environment());" + }, + { + "file": "crates/forge_app/src/fmt/todo_fmt.rs", + "line": 238, + "context": "let actual = setup.to_content(&fixture_environment());" + } + ] + }, + { + "name": "FormatContent", + "type": "constant", + "purpose": "Trait imported from crate::fmt::content; this file provides the implementation for ToolOperation.", + "reasoning": "The trait defines the contract this file satisfies \u2014 conversion into ChatResponseContent \u2014 and other formatters in the codebase follow the same pattern." + }, + { + "name": "tests", + "type": "module", + "purpose": "Unit tests for the mapping behavior exercised by to_content; ensure formatting outputs and suppression behavior are correct.", + "reasoning": "Many behaviors (when to return None, when to return diffs, snapshot content) are verified here; changes to to_content must keep tests passing or update tests/snapshots accordingly." + }, + { + "name": "fixture_environment", + "type": "function", + "purpose": "Test helper that produces a fake Environment for unit tests.", + "reasoning": "Used by all tests in this file to create an Environment value; test fixtures rely on this to be stable and produce deterministic values." + } + ], + "semantic_tags": [ + "formatting", + "diff", + "tool-output", + "todo", + "tests" + ], + "handles_entities": [ + "ToolOperation", + "ChatResponseContent", + "PatchOutput", + "FsWriteOutput", + "ReadOutput", + "SearchResult", + "HttpResponse", + "ShellOutput", + "Todo" + ], + "key_behaviors": [ + "returns diffs for overwrite writes and patches as ChatResponseContent::ToolOutput", + "returns formatted todo diffs and todo listings for TodoWrite/TodoRead operations", + "returns a debug title for PlanCreate operations", + "suppresses output (returns None) for most read/search/net/shell/remove/undo/follow-up operations" + ], + "pitfalls": [ + { + "mistake": "Assuming all filesystem writes should produce a diff", + "consequence": "New-file writes (before == None) are intentionally suppressed; changing that will alter UI behavior and invalidate tests/snapshots.", + "prevention": "Preserve the conditional logic that checks output.before for FsWrite; if altering, update tests and snapshots." + }, + { + "mistake": "Changing diff formatting or the DiffFormat contract", + "consequence": "Snapshot tests (insta) and consumers of ChatResponseContent will break; diffs displayed in the UI will change unexpectedly.", + "prevention": "Respect uses of forge_display::DiffFormat::format(...).diff().to_string() and update insta snapshots deliberately when intended." + }, + { + "mistake": "Removing TodoWrite/TodoRead formatting branches", + "consequence": "Todo tool outputs will no longer be surfaced, breaking tests and user-visible behavior added in prior commits.", + "prevention": "Keep format_todos and format_todos_diff usage intact or mirror exact output if refactoring." + }, + { + "mistake": "Modifying test fixtures without updating FileInfo usage", + "consequence": "Unit tests will no longer compile or will have incorrect assumptions about read output parameters (see historical refactor to FileInfo).", + "prevention": "When editing test fixtures, use FileInfo::new(...) for ReadOutput to match the current ReadOutput shape." + } + ], + "reading_guide": { + "start_here": "to_content", + "key_sections": [ + "to_content: the match over ToolOperation variants and the mapping rules (which branches produce Some vs None)", + "FsWrite branch: logic uses output.before to decide whether to return a diff", + "FsPatch & FsMultiPatch branches: always produce a diff using output.before and output.after", + "PlanCreate branch: produces a debug title using format_display_path and TitleFormat::debug", + "TodoWrite/TodoRead branches: call format_todos_diff and format_todos respectively" + ], + "skip_unless_needed": [ + "imports block (unless adding a new dependency)", + "test helper definitions that are standard (e.g., crate::compute_hash usage) if not modifying tests" + ] + }, + "tests": { + "exercised_by": [ + "embedded unit tests in crates/forge_app/src/fmt/fmt_output.rs" + ], + "test_functions": [ + "test_fs_read_single_line", + "test_fs_read_multiple_lines", + "test_fs_create_new_file", + "test_fs_create_overwrite", + "test_fs_create_with_warning", + "test_fs_remove", + "test_fs_search_with_matches", + "test_fs_search_no_matches", + "test_fs_search_none", + "test_fs_patch_success", + "test_fs_patch_with_warning", + "test_fs_undo", + "test_net_fetch_success", + "test_net_fetch_error", + "test_shell_success", + "test_shell_success_with_stderr", + "test_shell_failure", + "test_follow_up_with_response" + ], + "example_command": "cargo test -p forge_app --tests --quiet", + "relevant_snippets": [ + { + "file": "crates/forge_app/src/fmt/fmt_output.rs", + "lines": "40-120", + "description": "Tests asserting that FsRead returns None and FsWrite returns diffs only when before is Some." + }, + { + "file": "crates/forge_app/src/fmt/fmt_output.rs", + "lines": "160-220", + "description": "test_fs_patch_success uses insta::assert_snapshot! on the diff output \u2014 snapshot is authoritative for diff formatting." + } + ] + }, + "related_files": [ + { + "path": "crates/forge_app/src/operation.rs", + "relationship": "Defines the ToolOperation enum variants and output DTO types that this formatter consumes; changes to ToolOperation require co-ordinated changes here.", + "likely_co_change": true, + "reason_to_check": "If adding/removing ToolOperation variants or changing the shape of outputs (e.g., ReadOutput replaced fields with FileInfo), this file must be updated to keep mappings and tests consistent." + }, + { + "path": "crates/forge_display/src/lib.rs", + "relationship": "Provides DiffFormat and TitleFormat used to produce diffs and titles in this file.", + "likely_co_change": false, + "reason_to_check": "If DiffFormat::format or TitleFormat APIs change or their output shape/markup changes, snapshot tests and display behavior here should be re-verified." + }, + { + "path": "crates/forge_app/src/fmt/todo_fmt.rs", + "relationship": "Provides format_todos and format_todos_diff used for TodoRead/TodoWrite formatting.", + "likely_co_change": true, + "reason_to_check": "When todo formatting logic or its outputs change, tests here need to match the new formatting and snapshot expectations." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_app", + "cargo test --workspace (if broader changes touch multiple crates)", + "cargo insta test --package forge_app --accept (only if intentionally updating snapshots)" + ], + "data_constants_to_check": [], + "owns_authoritative_data": false, + "public_api_surface": false + }, + "commit_insights": [ + { + "type": "refactoring", + "summary": "Test fixtures were updated to use FileInfo in ReadOutput after a DTO migration; tests in this file reflect that change and should be kept consistent with ReadOutput shape." + }, + { + "type": "feature", + "summary": "TodoRead/TodoWrite outputs were wired into this formatter to surface todo diffs/listings as tool outputs; this behavior was recently added and tests/snapshots cover it." + } + ], + "insights": [ + { + "type": "refactoring", + "category": "Testing", + "title": "Update test fixtures to use FileInfo struct in ReadOutput", + "problem": "Formatter tests relied on start/end/total/content_hash fields; after migration they needed FileInfo.", + "root_cause": "Consolidation into FileInfo changed ReadOutput layout.", + "solution": "Tests replaced separate fields with info: FileInfo::new(...).", + "lesson_learned": "Keep test fixtures aligned with DTO migrations; prefer using constructors to avoid forgetting new fields.", + "commits": [ + "29db91a" + ], + "constructs": [ + "test fixtures in fmt_output" + ] + }, + { + "type": "feature", + "category": "Other", + "title": "Hooked todo read/write tool outputs into formatter", + "problem": "ToolOperation outputs for todo_write/todo_read were not mapped to ChatResponseContent tool output formatting.", + "root_cause": "New todo tools introduced but the formatting layer wasn't wired to render their output.", + "solution": "Added ToolOperation::TodoWrite and TodoRead branches to format as tool outputs using format_todos_diff and format_todos.", + "lesson_learned": "When adding new tool operations, update formatting/rendering layers so CLI/TUI reflects new tool outputs; tests/snapshots should cover these outputs.", + "commits": [ + "4f1ad6b" + ], + "constructs": [ + "FormatContent for ToolOperation" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/forge_services.rs": { + "file_path": "crates/forge_services/src/forge_services.rs", + "short_description": "Application container that composes and exposes Forge runtime services built on top of an infra implementation", + "category": "SOURCE_CODE", + "description": "This file defines ForgeServices, the central service container used by the Forge application layer. It exists to wire together concrete service implementations (conversation, template, provider/chat, file-system tooling, MCP management, policy/auth, workspace context engine, etc.) on top of a supplied infrastructure implementation (Arc). ForgeServices implements the forge_app::Services trait to present a uniform service API to application code; it also implements forge_app::EnvironmentInfra and delegates environment/config operations to the underlying infra. The primary design goal is dependency composition: the infra implementation F provides primitives (file IO, HTTP, env/config access, repositories) and ForgeServices instantiates higher-level services that rely on those primitives and other domain repositories.\n\nThe file uses heavy generics to ensure that only infra implementations that satisfy required traits are accepted (HttpInfra, EnvironmentInfra, McpServerInfra, WalkerInfra, repositories, etc.). The constructor (pub fn new) constructs many Arc-wrapped service instances (ForgeMcpManager, ForgeMcpService, ForgeTemplateService, ForgeChatRequest, ForgeConversationService, ForgeAuthService, ForgeProviderService, discovery-backed workspace service, multiple file tooling services, skill fetch, etc.) and stores them in the struct. The impl block for the Services trait maps the trait's associated types to these concrete service types and returns references to the stored Arc instances, so application modules that depend on forge_app::Services get access to the concrete implementations via this container.\n\nWhy it exists: centralizes wiring for many domain-level services and enforces compile-time guarantees about the capabilities of the underlying infra. It isolates construction logic (how services are instantiated and how they depend on each other) from higher-level application code which only depends on the Services trait. The file is the bridge between low-level infra and high-level application behavior (e.g., conversation persistence, provider calls, file patches, MCP tool execution, workspace indexing). It also contains a targeted impl of EnvironmentInfra to satisfy consumers that expect an EnvironmentInfra implementation; that impl simply forwards to the embedded infra instance.\n\nHow it fits in the system: other crates call into forge_app APIs and expect a Services implementation. ForgeServices is the concrete implementation used by the CLI/TUI runtime (crates/forge_main) and other runtime components that need access to these composed services. Changes to this file can affect the types of services exposed, trait bounds required on infra implementors, and how services are initialized and wired together.", + "key_constructs": [ + { + "name": "ForgeServices", + "type": "class", + "purpose": "A container struct that holds Arc-wrapped concrete services and the underlying infra Arc.", + "reasoning": "Editing or reordering fields can change initialization order and dependencies \u2014 the struct is the authoritative runtime service registry used everywhere the Services trait is required." + }, + { + "name": "new", + "type": "function", + "purpose": "Constructor: builds concrete service instances and returns a populated ForgeServices struct.", + "reasoning": "The initialization sequence is important (e.g., mcp_manager is created then passed to mcp_service); modifications must respect service dependency ordering and required infra clones.", + "callers": [ + { + "file": "crates/forge_api/src/forge_api.rs", + "line": 54, + "context": "let app = Arc::new(ForgeServices::new(repo.clone()));" + } + ] + }, + { + "name": "McpService", + "type": "constant", + "purpose": "Type alias: ForgeMcpService parameterized with ForgeMcpManager and the Mcp server client type from the infra.", + "reasoning": "This alias centralizes the concrete Mcp service type for readability and must match how ForgeMcpService is parameterized; changing it could require updating trait associated types." + }, + { + "name": "AuthService", + "type": "constant", + "purpose": "Type alias to ForgeAuthService used in the Services trait mapping.", + "reasoning": "Keeps naming consistent in the trait impl; if changed, associated types and method return types must match the new alias." + }, + { + "name": "Services for ForgeServices", + "type": "function", + "purpose": "Trait impl: implements forge_app::Services for ForgeServices, mapping associated types and providing accessor methods.", + "reasoning": "This is the public API surface of the container; modifications affect all consumers expecting forge_app::Services and must preserve method signatures and associated types." + }, + { + "name": "EnvironmentInfra for ForgeServices", + "type": "function", + "purpose": "Trait impl: implements forge_app::EnvironmentInfra for ForgeServices, delegating environment/config operations to the inner infra.", + "reasoning": "Ensures ForgeServices can be used as an EnvironmentInfra; changes here affect calls that rely on get_environment/get_config/update_environment/get_env_var(s)." + } + ], + "semantic_tags": [ + "dependency-injection", + "service-composition", + "infrastructure-adapter", + "mcp", + "auth", + "workspace" + ], + "handles_entities": [ + "Conversation", + "ChatRequest/Attachment", + "Workspace", + "McpConfig", + "Provider" + ], + "key_behaviors": [ + "constructs and composes runtime services on top of an infra implementation", + "exposes concrete services via the forge_app::Services trait", + "forwards environment/config calls to the underlying infra" + ], + "pitfalls": [ + { + "mistake": "Removing or loosening generic trait bounds on F without updating all dependents.", + "consequence": "Compilation errors or incorrect trait implementations; runtime components may not get required capabilities from infra.", + "prevention": "Respect the existing trait bounds when modifying generics; update all associated types and method signatures in the Services impl if changing bounds." + }, + { + "mistake": "Changing the initialization order in new() (e.g., creating mcp_service before mcp_manager).", + "consequence": "You may attempt to construct a service that expects another service to exist, causing compilation failures or logical bugs when passing references.", + "prevention": "Maintain dependency creation order. If a service depends on another, ensure the dependency is created first and its Arc is passed down." + }, + { + "mistake": "Altering the Services trait mapping (associated types or accessor return types) without updating consumers.", + "consequence": "Massive compilation cascade across crates relying on the Services trait; trait coherence errors.", + "prevention": "Treat the Services impl as part of the crate's public API \u2014 run workspace tests and check other crates that import forge_app::Services when modifying." + }, + { + "mistake": "Breaking the EnvironmentInfra delegation semantics (e.g., changing return types or semantics of get_config/update_environment).", + "consequence": "Consumers expecting the infra to behave the same way will break; config or env updates might be lost or behave differently.", + "prevention": "Delegate straight to self.infra and preserve the exact method signatures and async behavior (returning futures) as currently implemented." + }, + { + "mistake": "Introducing runtime references or cycles between Arcs stored in the struct.", + "consequence": "Memory leaks due to reference cycles, making shutdown/cleanup impossible.", + "prevention": "Avoid storing Arcs that capture/own other Arcs in ways that create cycles; prefer weak references if necessary." + } + ], + "reading_guide": { + "start_here": "ForgeServices (struct) and the pub fn new constructor", + "key_sections": [ + "ForgeServices: shows every concrete service field the container owns and how they relate to each other", + "pub fn new: details construction order and dependencies (mcp_manager -> mcp_service, discovery -> workspace_service)", + "impl Services for ForgeServices: maps associated types and provides accessors used by the rest of the app", + "EnvironmentInfra impl: small but important delegation to infra for environment/config operations" + ], + "skip_unless_needed": [ + "type aliases (McpService, AuthService) \u2014 read only if you need precise type signatures", + "repeated trait bounds in impl blocks \u2014 necessary but verbose; only inspect if changing generic constraints" + ] + }, + "tests": { + "exercised_by": [], + "test_functions": [], + "example_command": "cargo test --package forge_services --lib", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "crates/forge_app/src/lib.rs", + "relationship": "ForgeServices implements forge_app::Services and must match that trait's associated types and method signatures.", + "likely_co_change": true, + "reason_to_check": "If you change any associated types or accessors in this file, verify forge_app::Services and its consumers for compatibility." + }, + { + "path": "crates/forge_config/src/lib.rs", + "relationship": "The EnvironmentInfra implementation here uses forge_config::ForgeConfig as Config associated type.", + "likely_co_change": false, + "reason_to_check": "If you change the Config type or its API, update the EnvironmentInfra impl and ensure get_config returns the expected type." + }, + { + "path": "crates/forge_services/src/mcp.rs", + "relationship": "Defines ForgeMcpService and ForgeMcpManager used and parameterized by McpService alias.", + "likely_co_change": true, + "reason_to_check": "Constructor wiring relies on ForgeMcpManager -> ForgeMcpService creation order; changes to those types affect the McpService alias and the constructor." + }, + { + "path": "crates/forge_services/src/conversation.rs", + "relationship": "Defines ForgeConversationService which is constructed and held here.", + "likely_co_change": true, + "reason_to_check": "Changing conversation service APIs or constructors requires updating how it's instantiated and exposed by ForgeServices." + }, + { + "path": "crates/forge_services/src/context_engine.rs", + "relationship": "ForgeWorkspaceService is constructed here with a discovery implementation (FdDefault) \u2014 the wiring for workspace context is defined across these files.", + "likely_co_change": true, + "reason_to_check": "Modifying workspace service APIs or discovery types will require updates to the constructor and trait mappings." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test --package forge_services --lib", + "cargo test --workspace", + "cargo check --workspace", + "cargo clippy --workspace -- -D warnings" + ], + "data_constants_to_check": [], + "owns_authoritative_data": false, + "public_api_surface": true + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_services/src/attachment.rs": { + "file_path": "crates/forge_services/src/attachment.rs", + "short_description": "Produces Attachment values (file contents or directory listings) from file tags/URLs using infra traits.", + "category": "SOURCE_CODE", + "description": "This file implements an AttachmentService wrapper (ForgeChatRequest) that converts FileTag-derived paths into Attachment domain objects consumed by higher-level chat/agent code. It exists to centralize the logic that (a) resolves relative paths against the current environment cwd, (b) distinguishes directories from files, (c) reads image bytes for known image extensions, and (d) reads a line-range of text files while preserving a canonical full-file content hash in the returned FileInfo.\n\nDesign decisions visible in the code: the implementation is generic over an infra 'F' which must implement several File/Environment infra traits (FileReaderInfra, EnvironmentInfra, FileInfoInfra, DirectoryReaderInfra). That lets production code provide actual filesystem/network-backed infra and tests inject in-memory mocks. For text files it delegates to range_read_utf8 and uses resolve_range to enforce configured max_read_lines; range_read_utf8 returns both the extracted slice and a FileInfo that already contains the content_hash for the entire file \u2014 this value is intentionally used unchanged when building AttachmentContent::FileContent so external-change detection (which hashes full files) remains consistent. For directories it uses DirectoryReaderInfra::list_directory_entries and builds sorted DirectoryEntry vectors (directories first, then lexicographic by path). The service exposes a single trait method attachments(&self, url: &str) that parses all FileTag occurrences and populates attachments concurrently via futures::future::join_all.\n\nThe file contains an extensive in-module test suite (mod tests) which defines multiple mock infra implementations (MockEnvironmentInfra, MockFileService, MockCompositeService) used to exercise behavior and edge-cases: relative path resolution, directory listing, binary vs text detection, range slicing, and content hashing. The commit history (included as context) shows past fixes related to using the FileInfo-provided full-file hash instead of recomputing a hash after applying line-number formatting \u2014 tests were updated to assert that exact behavior.\n\nWhen editing, respect that this module is the authoritative bridge between low-level infra reads and Attachment domain semantics: it must not re-hash or alter the FileInfo.hash returned by the infra, must apply the same slicing rules as infra.range_read_utf8, and must keep path normalization that uses infra.get_environment().cwd.", + "key_constructs": [ + { + "name": "ForgeChatRequest", + "type": "class", + "purpose": "Generic service struct that holds an Arc infra and implements AttachmentService for that infra.", + "reasoning": "Core entry point for producing Attachment objects; generic to allow production and test infra implementations. Any changes to behavior must preserve the contract seen by AttachmentService consumers." + }, + { + "name": "prepare_attachments", + "type": "function", + "purpose": "Given Vec, concurrently populates Attachment objects by mapping each tag through populate_attachments and collecting results.", + "reasoning": "Handles concurrency and error aggregation for multiple tags; ordering of returned attachments follows input order via join_all + collect." + }, + { + "name": "populate_attachments", + "type": "function", + "purpose": "Core logic that resolves a single FileTag to an Attachment: resolves relative paths, handles directory listings, detects image mime types and reads bytes, or reads text ranges and attaches FileInfo.", + "reasoning": "This function contains the nuanced behavior: path normalization, directory vs file detection, extension->mime mapping, and ensuring FileInfo (with full-file hash) is used. Agents editing must preserve those semantics exactly where other systems rely on them." + }, + { + "name": "attachments", + "type": "function", + "purpose": "Trait method implementation (AttachmentService::attachments) that parses a URL into FileTag items and calls prepare_attachments to produce Attachments.", + "reasoning": "Public facing method used by callers; signature and behavior are part of the public API of the crate and are exercised by tests. Keep contract and error propagation stable." + } + ], + "semantic_tags": [ + "attachments", + "filesystem", + "images", + "directory-listing", + "infra-abstraction", + "tests" + ], + "handles_entities": [ + "Attachment", + "AttachmentContent", + "DirectoryEntry", + "FileInfo", + "Image", + "FileTag" + ], + "key_behaviors": [ + "parses urls into FileTag and returns a list of Attachments", + "resolves relative file paths against infra.get_environment().cwd", + "returns directory listing attachments when path exists but is not a file", + "detects images by extension and returns raw bytes with mime type", + "reads text file line ranges via infra.range_read_utf8 and attaches FileInfo with full-file hash", + "sorts directory entries with directories first then by path" + ], + "pitfalls": [ + { + "mistake": "Recompute or replace the content_hash in FileInfo after applying formatting (e.g., adding line numbers).", + "consequence": "External-change detection will see mismatched hashes and incorrectly report files as modified externally.", + "prevention": "Always use the FileInfo returned by range_read_utf8 (it should contain the full-file hash). Do not hash the transformed/display content." + }, + { + "mistake": "Assume Infra::list_directory_entries returns recursive entries.", + "consequence": "Directory listing logic and tests expect only direct children; changing to recursive results will break ordering and consumer expectations.", + "prevention": "Only expect direct child entries and preserve existing sorting (dirs first, then lexicographic)." + }, + { + "mistake": "Treat a path that exists but is_file == false as a file.", + "consequence": "Directory entries would be treated as files and attempts to read bytes could fail or return empty results.", + "prevention": "Check exists(path) and is_file(path) as currently done; if exists && !is_file => directory handling branch." + }, + { + "mistake": "Alter path normalization logic (resolving relative paths using cwd) without considering tests/mock environments.", + "consequence": "Tests that rely on MockEnvironmentInfra(cd = /test) or external code relying on cwd semantics will fail.", + "prevention": "Keep normalization behavior: non-absolute tags are joined with infra.get_environment().cwd." + }, + { + "mistake": "Change concurrency/order behavior of prepare_attachments (e.g., switching to unordered concurrency).", + "consequence": "Ordering consumers expect (same order as parsed tags) can change, breaking call sites and tests.", + "prevention": "Preserve join_all mapping pattern that returns results in input order." + } + ], + "reading_guide": { + "start_here": "ForgeChatRequest", + "key_sections": [ + "populate_attachments: contains the file-vs-directory detection, image extension->mime mapping, and uses range_read_utf8 + FileInfo.", + "prepare_attachments: orchestrates concurrent population and preserves input ordering.", + "impl AttachmentService for ForgeChatRequest: public entrypoint (attachments) used by callers." + ], + "skip_unless_needed": [ + "long test fixtures and mock infra implementations inside mod tests (only read when debugging behaviors or adding tests)" + ] + }, + "tests": { + "exercised_by": [ + "crates/forge_services/src/attachment.rs (mod tests inside same file)" + ], + "test_functions": [], + "example_command": "cargo test -p forge_services --lib", + "relevant_snippets": [ + { + "file": "crates/forge_services/src/attachment.rs", + "lines": "mod tests (inside file)", + "description": "Mocks and unit tests that exercise path resolution, directory listing formatting/sorting, image vs text handling, range slicing, and the requirement that FileInfo.content_hash represents the full file hash." + } + ] + }, + "related_files": [ + { + "path": "crates/forge_app/src/domain.rs (Attachment, AttachmentContent, FileTag types)", + "relationship": "Defines the domain types used and produced by this module (Attachment, AttachmentContent, DirectoryEntry, FileTag).", + "likely_co_change": true, + "reason_to_check": "If Attachment or FileTag shapes change, this file must be updated to construct correct AttachmentContent variants." + }, + { + "path": "crates/forge_services/src/range.rs (resolve_range)", + "relationship": "Provides resolve_range logic used to translate optional start/end into concrete line ranges respecting max_read_lines.", + "likely_co_change": true, + "reason_to_check": "populate_attachments depends on resolve_range for correct slicing semantics and to match infra.range_read_utf8's expectations." + }, + { + "path": "crates/forge_config/src/lib.rs (ForgeConfig.max_read_lines)", + "relationship": "Configuration value inspected to compute max_read_lines when reading text ranges.", + "likely_co_change": true, + "reason_to_check": "Changes to max_read_lines semantics or default values alter how populate_attachments slices file content." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_services --lib", + "cargo test --workspace (if making cross-crate changes)" + ], + "data_constants_to_check": [ + "forge_config::ForgeConfig::max_read_lines (used when resolving ranges)" + ], + "owns_authoritative_data": false, + "public_api_surface": true + }, + "insights": [ + { + "type": "refactoring", + "category": "State Management", + "title": "Use FileInfo returned from range_read_utf8 and compute full-file hash consistently", + "problem": "Attachment service previously hashed range content to produce content_hash; needed to use full-file hash to match detector behaviour.", + "root_cause": "Inconsistent hashing between FS layer and attachment layer.", + "solution": "Consume (file_content, file_info) from infra.read_range_utf8 and populate AttachmentContent::FileContent.info with provided FileInfo (which includes full-file hash). Tests updated to compute full_file_hash and assert equality.", + "lesson_learned": "Ensure service layers rely on canonical data provided by infra (don't recompute or duplicate semantics).", + "commits": [ + "29db91a" + ], + "constructs": [ + "FsReadService::read", + "AttachmentContent::FileContent construction" + ] + }, + { + "type": "bug_fix", + "category": "State Management", + "title": "Store and use raw content hash for attachments to avoid false external-change warnings", + "problem": "External-change detector hashed raw file on disk, but Attachment stored hash after line-numbering transformation, causing mismatches and false 'modified externally' warnings.", + "root_cause": "Attachment file content was hashed after applying line-number formatting (number prefixes); external detector computes hash on raw bytes of file.", + "solution": "Compute content_hash from raw file content before line-numbering and store it in AttachmentContent::FileContent; ensure displayed content is line-numbered via to_numbered_from(...).to_string(); tests updated to assert hashes.", + "commits": [ + "70cba43" + ], + "constructs": [ + "AttachmentService::read_attachment", + "AttachmentContent::FileContent" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_domain/src/repo.rs": { + "file_path": "crates/forge_domain/src/repo.rs", + "short_description": "Trait definitions for repository interfaces covering snapshots, conversations, providers, workspace indexing, validation and fuzzy search", + "category": "SOURCE_CODE", + "description": "This file defines the domain-level repository interfaces (traits) used across the Forge application. It centralizes abstract contracts for persistence and external integrations so other crates can program to these traits rather than concrete implementations. The traits cover: file snapshot undo/redo (SnapshotRepository), conversation CRUD (ConversationRepository), streaming chat and model listing (ChatRepository), provider credential management and migration (ProviderRepository), workspace indexing and semantic search (WorkspaceIndexRepository), skill loading (SkillRepository), remote syntax validation (ValidationRepository), and fuzzy text searching (FuzzySearchRepository).\n\nThese interfaces exist to decouple higher-level logic (agents, CLI/TUI, services) from concrete storage and network implementations. Implementations of these traits live elsewhere (other crates or test doubles) and are injected where needed. Many method signatures reference crate-level domain types (Conversation, Snapshot, WorkspaceId, ApiKey, SyntaxError, FileUpload, etc.), and some use external types (url::Url). Error types are a mix of the crate-specific Result alias and anyhow::Result / anyhow::Error, which reflects different historical error-handling choices across methods.\n\nBecause these are trait signatures used broadly, changes here affect many implementers. Note method-level behaviors encoded in return types (for example, get_all_conversations returns Result>> rather than Result>), and streaming return types (ResultStream) used by ChatRepository.chat. The file uses async_trait::async_trait to allow async trait methods, which is important for implementers and tests to import. Implementations must preserve signatures, error types, and generic parameters (e.g., Provider) to remain compatible with callers across the workspace.", + "key_constructs": [ + { + "name": "SnapshotRepository", + "type": "class", + "purpose": "Trait defining async operations for creating and restoring file snapshots to enable undo functionality.", + "reasoning": "This is the authoritative interface for snapshot behavior; implementations will be used by components that modify file contents and need undo support. Maintain method signatures and Path usage so file-system semantics remain consistent across platforms." + }, + { + "name": "ConversationRepository", + "type": "class", + "purpose": "Trait for persisting, retrieving, listing, and deleting Conversation records.", + "reasoning": "This trait is central to conversation persistence; many features (conversation history, last conversation recovery, listing) depend on its exact semantics and return types (including Option wrappers). Changes here ripple to storage implementations and UI layers that display conversations." + }, + { + "name": "ChatRepository", + "type": "class", + "purpose": "Trait exposing methods to perform chat completions (streamed) and list models from a provider.", + "reasoning": "It returns a ResultStream for streaming chat messages and a Vec for models. Note the provider argument is Provider; preserve generic usage since implementations will use the Url type." + }, + { + "name": "ProviderRepository", + "type": "class", + "purpose": "Trait for managing provider templates and credentials, and for migrating environment-based credentials.", + "reasoning": "Credential storage and migration are security-sensitive; method semantics (upsert/remove/get) and migration result shape must be kept stable to avoid breaking login and migration flows." + }, + { + "name": "WorkspaceIndexRepository", + "type": "class", + "purpose": "Trait for authenticating with and interacting with the external workspace indexing/search service (create workspace, upload, search, list, delete).", + "reasoning": "This trait forms the surface area for the semantic search/indexing subsystem. Methods accept and return many crate-level types (ApiKey, FileUpload, Node, WorkspaceInfo, etc.); implementations talk over gRPC/HTTP and must match these contracts exactly." + }, + { + "name": "SkillRepository", + "type": "class", + "purpose": "Trait for loading Skill objects (typically from project skill markdown files).", + "reasoning": "Skill loading semantics (locations, parsing) are handled by implementations; the trait keeps the higher-level code agnostic of where and how skills are discovered." + }, + { + "name": "ValidationRepository", + "type": "class", + "purpose": "Trait for remote syntax validation of files, returning a list of SyntaxError or Ok(vec![]) when file type unsupported or valid.", + "reasoning": "The contract explicitly documents the three result modes (valid/unsupported => Ok(empty), Ok(errors) => validation errors, Err => communication error). Implementers must follow this contract so callers can differentiate error classes correctly." + }, + { + "name": "FuzzySearchRepository", + "type": "class", + "purpose": "Trait for performing fuzzy search of a needle inside a haystack with an optional search_all flag.", + "reasoning": "Used for in-memory or external fuzzy matching. Signature returns Vec. Keep the search_all boolean semantics intact to avoid altering matching behavior." + } + ], + "semantic_tags": [ + "repository", + "persistence", + "providers", + "workspace-indexing", + "validation", + "fuzzy-search" + ], + "handles_entities": [ + "Snapshot", + "Conversation", + "ChatCompletionMessage", + "Model", + "Provider", + "WorkspaceAuth", + "WorkspaceId", + "Skill", + "SyntaxError", + "SearchMatch" + ], + "key_behaviors": [ + "creates file snapshots for undo", + "restores file content from snapshots", + "persists conversations (upsert/get/list/delete)", + "streams chat completions from provider models", + "manages provider credentials and migrations", + "authenticates and manipulates workspace indexes", + "loads skill definitions", + "validates source files remotely", + "performs fuzzy text search" + ], + "pitfalls": [ + { + "mistake": "Changing method signatures (names, parameter types, return types, or error types)", + "consequence": "Will break all implementations and callers across the workspace; compilation errors and runtime incompatibilities in many crates.", + "prevention": "Preserve exact signatures and error semantics when editing; coordinate changes across all dependents." + }, + { + "mistake": "Swapping the error type used (between crate::Result and anyhow::Result) or narrowing errors", + "consequence": "Incompatible error handling for implementers and callers; may force large changes to propagation and handling code.", + "prevention": "Respect existing mixed usage; if modifications are needed, ensure all implementers are updated in lockstep." + }, + { + "mistake": "Altering the Option wrappers in return types (e.g., get_all_conversations returns Option>)", + "consequence": "Semantic change in callers expecting None vs empty Vec to mean different things (presence vs absence).", + "prevention": "Keep Option usage and document reasoning; if you must change, update all consumers to handle the new semantics." + }, + { + "mistake": "Removing or changing Provider generic / Url dependency", + "consequence": "Breaks provider-related implementations that rely on the Url type and may change serialization/transport assumptions.", + "prevention": "Keep Provider generic parameter as Url or update every provider implementation accordingly." + }, + { + "mistake": "Ignoring platform path semantics when using Path in SnapshotRepository", + "consequence": "Implementations may mis-handle path canonicalization, symlinks, or Windows path separators leading to incorrect snapshot lookups.", + "prevention": "Respect std::path::Path semantics in implementations and tests; canonicalize/normalize paths consistently." + } + ], + "reading_guide": { + "start_here": "SnapshotRepository", + "key_sections": [ + "SnapshotRepository: file snapshot lifecycle (insert_snapshot, undo_snapshot)", + "ConversationRepository: persistence contract for upsert/get/get_all/get_last/delete", + "WorkspaceIndexRepository: auth, upload, search, and workspace management signatures" + ], + "skip_unless_needed": [ + "SkillRepository and FuzzySearchRepository: small and straightforward unless you\u2019re implementing those features", + "doc comments that restate parameter names" + ] + }, + "tests": { + "exercised_by": [], + "test_functions": [], + "example_command": "cargo test -p forge_domain --lib", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "crates/forge_repo", + "relationship": "Likely contains concrete implementations of conversation persistence and proto bindings that implement ConversationRepository and SnapshotRepository.", + "likely_co_change": true, + "reason_to_check": "If these traits change, implementers in forge_repo must be updated; check proto-generated types and storage layers." + }, + { + "path": "crates/forge_services", + "relationship": "Likely consumers of ChatRepository, ProviderRepository, and WorkspaceIndexRepository for networked provider interactions and model calls.", + "likely_co_change": true, + "reason_to_check": "Changes to provider or chat method signatures affect client-side service code that calls providers and models." + }, + { + "path": "crates/forge_embed", + "relationship": "Uses workspace indexing and semantic search features; may rely on WorkspaceIndexRepository contracts.", + "likely_co_change": true, + "reason_to_check": "Search/upload method changes will impact embedding/indexing flows." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test --workspace", + "cargo test -p forge_domain --lib" + ], + "data_constants_to_check": [], + "owns_authoritative_data": false, + "public_api_surface": true + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/fmt/fmt_input.rs": { + "file_path": "crates/forge_app/src/fmt/fmt_input.rs", + "short_description": "Converts ToolCatalog inputs into user-facing ChatResponseContent titles/subtitles for CLI/TUI display", + "category": "SOURCE_CODE", + "description": "This file implements the FormatContent trait for the ToolCatalog enum (from forge_domain). Its single public entrypoint is the to_content method which inspects the concrete tool invocation (Read, Write, FsSearch, SemSearch, Patch, Shell, etc.) and returns an optional ChatResponseContent that represents a short title and optional subtitle for display in the CLI/TUI. The produced ChatResponseContent objects are built via TitleFormat::debug(...).sub_title(...).into(), providing a consistent one-line summary for each tool call in conversational output.\n\nWhy it exists: in the interactive user interface and streaming responses, tool invocations (file reads/writes, searches, patches, shell commands, network fetches, follow-ups, tasks, todos, etc.) should be communicated concisely to the user. This mapping centralizes how each ToolCatalog variant is rendered as a short, user-visible label. It also contains presentation logic for special cases (e.g., showing explicit line ranges, hiding write titles when write would error, different labels for Create vs Overwrite).\n\nHow it fits into the larger system: ToolCatalog is a domain-level enum representing tool calls; this file translates domain events into UI-friendly titles. It depends on forge_domain types (ToolCatalog, Environment, ChatResponseContent) and a local helper format_display_path (crate::utils::format_display_path) to compute human-friendly paths relative to the current working directory from env.cwd. UI components that display tool activity will consume ChatResponseContent produced by this implementation.\n\nNon-obvious behaviors/edge cases: the Write arm explicitly checks if the target path exists on disk and combines that with input.overwrite to decide between returning \"Overwrite\", \"Create\", or None (the latter when the file exists and overwrite == false because the actual tool will error). The Read arm appends explicit line ranges when start_line/end_line are set, with specific formatting for partially-provided ranges (start-only, end-only). Several variants intentionally return None (e.g., Plan) meaning \"do not render a title\"; the Shell arm uses env.shell in the title (so env.shell must be valid). Commit history shows at least one recent fix where the Write title logic was corrected to check path.exists() rather than relying only on overwrite flag.", + "key_constructs": [ + { + "name": "FormatContent", + "type": "trait", + "purpose": "Trait providing to_content conversion from a domain type into an optional ChatResponseContent for UI display", + "reasoning": "This file provides a concrete implementation of the trait for ToolCatalog; an agent editing the code must preserve the trait contract: return Some(ChatResponseContent) for renderable tool calls or None when the tool should produce no CLI title." + }, + { + "name": "ToolCatalog", + "type": "enum", + "purpose": "Domain enum representing concrete tool invocations (Read, Write, FsSearch, SemSearch, Patch, Shell, etc.)", + "reasoning": "This impl matches on variants of ToolCatalog; changes to ToolCatalog will require revisiting each match arm here to ensure display behavior remains consistent." + }, + { + "name": "to_content", + "type": "function", + "purpose": "Main conversion function that maps a ToolCatalog instance to an Option", + "reasoning": "Entry point used by UI rendering; contains all the per-variant formatting logic (titles/subtitles) \u2014 edits must keep edge-case handling (Write semantics, Read line ranges, Plan -> None).", + "callers": [ + { + "file": "crates/forge_app/src/tool_registry.rs", + "line": 136, + "context": "if let Some(content) = tool_input.to_content(&env) {" + } + ] + } + ], + "semantic_tags": [ + "formatting", + "ui", + "tools", + "file-paths", + "titles" + ], + "handles_entities": [ + "ToolCatalog", + "ChatResponseContent", + "Environment", + "TitleFormat" + ], + "key_behaviors": [ + "produces a short title for each tool invocation", + "computes subtitle using formatted display path relative to env.cwd", + "suppresses titles for variants that should not be displayed (Plan)", + "formats explicit line ranges for Read" + ], + "pitfalls": [ + { + "mistake": "Deciding write title solely from input.overwrite without checking filesystem", + "consequence": "Could show 'Overwrite' when the file does not exist or show a title for a write that will immediately error (misleading the user)", + "prevention": "Always check path.exists() and combine with input.overwrite semantics (as already implemented)" + }, + { + "mistake": "Assuming env.cwd or env.shell is always present or valid", + "consequence": "Formatting/display may be incorrect or panic if env values are unexpected (e.g., non-UTF8 paths/strings)", + "prevention": "Use format_display_path helper and treat environment values as authoritative for display; avoid replacing or removing these uses without verifying its behavior" + }, + { + "mistake": "Removing the special-case None returns (e.g., for Plan or Write when file exists and overwrite is false)", + "consequence": "UI would show titles for operations that should be hidden or for operations that will error, leading to confusing UX", + "prevention": "Preserve explicit None branches and their reasons when editing" + } + ], + "reading_guide": { + "start_here": "to_content", + "key_sections": [ + "ToolCatalog::Read: constructs display path and appends line range when start/end provided", + "ToolCatalog::Write: checks path.exists() and input.overwrite to choose between 'Create', 'Overwrite' or to return None", + "ToolCatalog::FsSearch and SemSearch: build human-readable search titles including pattern/glob/file types", + "ToolCatalog::Shell, Fetch, Followup, Skill, TodoWrite, TodoRead, Task: straightforward title/subtitle construction using env or input fields" + ], + "skip_unless_needed": [ + "the local display_path_for closure (small helper) \u2014 it delegates to crate::utils::format_display_path", + "typical repeated TitleFormat boilerplate where only strings differ" + ] + }, + "tests": { + "exercised_by": [], + "test_functions": [], + "example_command": "cargo test -p forge_app", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "crates/forge_domain/src/lib.rs (ToolCatalog definition)", + "relationship": "Defines the ToolCatalog enum and input payload types that this file matches on", + "likely_co_change": true, + "reason_to_check": "Any changes to ToolCatalog variants or input field names/types require updating each match arm here to maintain correct display behavior" + }, + { + "path": "crates/forge_app/src/utils.rs (format_display_path)", + "relationship": "format_display_path is used to produce user-facing, cwd-relative paths for subtitles", + "likely_co_change": true, + "reason_to_check": "If format_display_path behavior changes (formatting rules, path normalization), subtitles here will change and tests or UI expectations should be validated" + }, + { + "path": "crates/forge_app/src/fmt/content.rs (FormatContent trait & ChatResponseContent conversions)", + "relationship": "Contains the FormatContent trait and conversions to ChatResponseContent used by this impl", + "likely_co_change": true, + "reason_to_check": "Trait signature changes or TitleFormat conversion utilities will affect to_content's return type and construction" + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_app --lib", + "cargo test --workspace" + ], + "data_constants_to_check": [], + "owns_authoritative_data": false, + "public_api_surface": false + }, + "insights": [ + { + "type": "bug_fix", + "category": "Edge Case", + "title": "Show correct title for write tool operations (Create/Overwrite)", + "problem": "Write tool title incorrectly chosen based solely on input.overwrite flag which could show 'Overwrite' when file didn't exist or show titles in cases where the tool would error.", + "root_cause": "Title decision didn't check whether the file actually exists on disk.", + "solution": "Create a PathBuf from input.path and use path.exists() combined with input.overwrite to decide between 'Create' and 'Overwrite' (and avoid printing when file exists but overwrite==false because the tool will error).", + "commit": [ + "03f42a3" + ], + "constructs": [ + "FormatContent for Tools::Write", + "display_path_for" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_spinner/src/lib.rs": { + "file_path": "crates/forge_spinner/src/lib.rs", + "short_description": "Spinner and progress-bar manager that handles terminal spinner lifecycle and elapsed-time formatting.", + "category": "SOURCE_CODE", + "description": "This file provides a reusable SpinnerManager generic over a ConsoleWriter implementation and a companion elapsed-time formatter used by the progress spinner. It exists to centralize spinner lifecycle behavior used by the CLI/TUI: starting and stopping a terminal spinner, preserving accumulated elapsed time across start/stop cycles, caching a random status word for consistent UX across starts, suspending the spinner for writes to stdout/stderr, and ensuring the spinner is cleaned up and output is flushed when dropped. The implementation builds on indicatif::ProgressBar and customizes the template with tick glyphs and a custom elapsed formatter (format_elapsed_time) exposed as an internal helper.\n\nDesign decisions visible in the file: use of an Arc makes SpinnerManager independent from concrete IO implementations (the crate's forge_domain::ConsoleWriter trait is used). Accumulated elapsed time is tracked in a Duration so that start/stop preserves time; the code advances the spinner tick to maintain animation continuity. The file also re-exports a local progress_bar module to share progress-bar related utilities with other parts of the crate. The Drop impl ensures the spinner is stopped and both stdout/stderr are flushed to avoid leaving the terminal in an inconsistent state when the manager is dropped.\n\nThe file contains unit tests (inline #[cfg(test)] mod tests) that validate format_elapsed_time behavior and SpinnerManager state manipulations (reset behavior and the word-index caching across starts). Tests use a DirectPrinter implementing ConsoleWriter that writes directly to stdout/stderr, and one async Tokio test to exercise start/stop caching behavior.", + "key_constructs": [ + { + "name": "TICK_DURATION_MS", + "type": "constant", + "purpose": "Defines the millisecond duration per spinner tick (60ms).", + "reasoning": "Used to compute cycles and advance the spinner to the appropriate tick position so animation continuity is preserved after resuming a spinner." + }, + { + "name": "TICKS", + "type": "constant", + "purpose": "List of tick glyph strings used by the spinner animation.", + "reasoning": "Defines visual frames for the spinner; changing this affects all spinner animations and cycle length assumptions." + }, + { + "name": "format_elapsed_time", + "type": "function", + "purpose": "Format a Duration into compact strings like \"01s\", \"1:01m\", or \"1:01h\".", + "reasoning": "Used in the indicatif template via a custom key to display elapsed time in a compact, human-friendly format; tests exercise its exact output, so changes must preserve expected formatting." + }, + { + "name": "SpinnerManager", + "type": "class", + "purpose": "Main spinner lifecycle manager providing start/stop/reset/set_message and IO-safe printing methods.", + "reasoning": "Encapsulates spinner state (ProgressBar), elapsed time accumulation, message and word-index caching, and ensures safe writes while the spinner is active. It is the primary integration point for other code that needs a spinner.", + "callers": [ + { + "file": "crates/forge_main/src/stream_renderer.rs", + "line": 8, + "context": "use forge_spinner::SpinnerManager;" + }, + { + "file": "crates/forge_main/src/stream_renderer.rs", + "line": 15, + "context": "pub struct SharedSpinner(Arc>>);" + }, + { + "file": "crates/forge_main/src/stream_renderer.rs", + "line": 25, + "context": "pub fn new(spinner: SpinnerManager

) -> Self {" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 26, + "context": "use forge_spinner::SpinnerManager;" + }, + { + "file": "crates/forge_main/src/ui.rs", + "line": 221, + "context": "let spinner = SharedSpinner::new(SpinnerManager::new(api.clone()));" + } + ] + }, + { + "name": "SpinnerManager::start", + "type": "function", + "purpose": "Create and start an indicatif spinner with accumulated elapsed time and a message (or random cached word).", + "reasoning": "Centralizes the logic to construct the ProgressBar with style, attach the custom elapsed formatter key, restore the spinner's tick position, and enable steady tick. Other code will call this to show progress for a long-running operation." + }, + { + "name": "SpinnerManager::stop", + "type": "function", + "purpose": "Stop the spinner, capture and store elapsed time, finish/clear the spinner, and optionally print a message.", + "reasoning": "Ensures elapsed time is persisted into accumulated_elapsed so subsequent starts continue the timer and ensures the spinner is cleared from the terminal before printing any final message." + }, + { + "name": "SpinnerManager::reset", + "type": "function", + "purpose": "Reset accumulated elapsed time, cached word index, and displayed message to start a fresh task.", + "reasoning": "Used to indicate a new operation; important to clear state so subsequent starts don't continue previous elapsed time or reuse old words." + }, + { + "name": "Drop for SpinnerManager", + "type": "function", + "purpose": "Ensure spinner is stopped and both stdout and stderr are flushed when the manager is dropped.", + "reasoning": "Prevents terminal artifacts (like missing newline or leftover spinner characters) and race conditions with shell prompts by guaranteeing cleanup in Drop." + } + ], + "semantic_tags": [ + "spinner", + "console", + "progress", + "timing", + "ui" + ], + "handles_entities": [ + "Spinner (ProgressBar)", + "ConsoleWriter", + "ElapsedTime" + ], + "key_behaviors": [ + "starts a terminal spinner with a message and custom elapsed display", + "preserves and restores accumulated elapsed time across stop/start cycles", + "caches a randomly selected status word for consistent messages", + "suspends spinner to safely write stdout/stderr lines", + "ensures spinner is stopped and outputs flushed on Drop" + ], + "pitfalls": [ + { + "mistake": "Modifying the format_elapsed_time output string formats without updating tests or templates", + "consequence": "Unit tests (format_elapsed_time tests) will fail and terminal display may change unexpectedly; other code that parses or relies on exact strings may break.", + "prevention": "Respect exact string formats used in tests; run unit tests after changes." + }, + { + "mistake": "Altering accumulated_elapsed behavior (e.g., clearing it on stop or not updating it before finish)", + "consequence": "Elapsed time would not be preserved across start/stop cycles, causing visual timer regressions and breaking any logic that expects time carry-over.", + "prevention": "When editing stop/start, ensure spinner.elapsed() is captured into accumulated_elapsed before finish_and_clear() is called." + }, + { + "mistake": "Changing how the spinner tick position is advanced or changing TICK_DURATION_MS/TICKS without coordinating both", + "consequence": "Animation continuity will break (spinner may jump frames) and timing calculations will become incorrect.", + "prevention": "When modifying tick characters or timings, update the cycle computation and tests accordingly." + }, + { + "mistake": "Removing or changing the Drop behavior that stops the spinner and flushes printers", + "consequence": "Terminal can be left in an inconsistent state (missing newline or spinner artifacts) and may create race conditions with shell prompt resets.", + "prevention": "Keep Drop semantics (stop + flush) or ensure equivalent cleanup elsewhere when changing lifecycle behavior." + }, + { + "mistake": "Changing ConsoleWriter interactions (write/write_err/flush/flush_err) or signature expectations", + "consequence": "Any other code depending on forge_domain::ConsoleWriter may break; tests that rely on DirectPrinter may fail.", + "prevention": "Check forge_domain::ConsoleWriter trait contract before editing code that invokes it; run crate tests after any changes." + } + ], + "reading_guide": { + "start_here": "SpinnerManager (struct) and its impl block, especially start and stop methods", + "key_sections": [ + "format_elapsed_time: provides the exact time-string formatting required by the spinner template", + "SpinnerManager::start: shows how the ProgressBar is constructed, custom template and elapsed key wiring, random word caching and tick position restoration", + "SpinnerManager::stop and Drop impl: ensure correct elapsed capture, clearing, printing and flushing behavior" + ], + "skip_unless_needed": [ + "the small helper println/eprintln methods (thin wrappers around ConsoleWriter)", + "the test DirectPrinter implementation unless debugging test IO behavior" + ] + }, + "tests": { + "exercised_by": [ + "inline tests in crates/forge_spinner/src/lib.rs (#[cfg(test)])" + ], + "test_functions": [ + "test_spinner_reset_clears_accumulated_time", + "test_spinner_reset_clears_word_index", + "test_spinner_reset_clears_message", + "test_word_index_caching_behavior", + "test_format_elapsed_time_seconds_only", + "test_format_elapsed_time_minutes_and_seconds", + "test_format_elapsed_time_hours_and_minutes", + "test_format_elapsed_time_zero" + ], + "example_command": "cargo test -p forge_spinner --lib", + "relevant_snippets": [ + { + "file": "crates/forge_spinner/src/lib.rs", + "lines": "98-140", + "description": "SpinnerManager::start: constructs ProgressBar, sets template, custom elapsed formatter, and advances ticks for continuity." + }, + { + "file": "crates/forge_spinner/src/lib.rs", + "lines": "142-177", + "description": "SpinnerManager::stop: captures elapsed time, finishes and clears spinner, and prints optional message." + }, + { + "file": "crates/forge_spinner/src/lib.rs", + "lines": "209-260", + "description": "Unit tests validating reset and elapsed formatting behavior (format_elapsed_time and reset semantics)." + } + ] + }, + "related_files": [ + { + "path": "crates/forge_domain/src/lib.rs (ConsoleWriter trait)", + "relationship": "SpinnerManager depends on the ConsoleWriter trait for platform-agnostic stdout/stderr operations and flushing", + "likely_co_change": true, + "reason_to_check": "If the ConsoleWriter trait changes (method names, semantics, error handling), SpinnerManager must be updated to maintain compatibility." + }, + { + "path": "crates/forge_spinner/src/progress_bar.rs", + "relationship": "Local module re-exported by lib.rs (progress_bar module is pub use'd); likely contains additional spinner/progress helpers", + "likely_co_change": true, + "reason_to_check": "Changes in progress_bar.rs may alter expectations or exported utilities used by other crates; coordinate when modifying progress-bar APIs or style." + }, + { + "path": "Cargo.toml (forge_spinner crate)", + "relationship": "Build-time dependency configuration (indicatif, rand, colored, pretty_assertions for tests)", + "likely_co_change": false, + "reason_to_check": "If API usage of indicatif or rand is modified, ensure Cargo.toml dependencies and versions remain compatible." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_spinner --lib", + "cargo test --workspace (to ensure no workspace-wide regressions)" + ], + "data_constants_to_check": [ + "TICK_DURATION_MS", + "TICKS" + ], + "owns_authoritative_data": false, + "public_api_surface": true + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_main/src/info.rs": { + "file_path": "crates/forge_main/src/info.rs", + "short_description": "Terminal information display builder that formats Environment, Config, Metrics, Usage, and Conversation data into aligned sections for CLI output", + "category": "SOURCE_CODE", + "description": "This file implements a reusable, structured way to render hierarchical informational output to the terminal. It centralizes the presentation logic used by the CLI/TUI to display environment details, configuration values, metrics, token usage, and conversation summaries. The design is intentionally value-centric: domain objects (Environment, ForgeConfig, Metrics, Usage, Conversation) are converted into an intermediate Info representation that describes titles, key/value items, and standalone values. The final formatted output (fmt::Display for Info, later in the file) computes per-section alignment/padding, applies constants from crate::display_constants (placeholders, status markers, command types), and emits colorized or plain text rows for the CLI.\n\nWhy it exists: the CLI surfaces many heterogeneous diagnostic items (paths, numbers, statuses, lists, task/feedback from conversations). Info provides a single canonical representation so that all consumers render consistently, and so porcelain/portrayal modes can consistently align keys within a section and preserve ordering across runs. The commit history shows multiple UX-driven changes here (task/feedback extraction for conversations, per-section fixed-width key columns, and fixes for key-only/value-only rows), so tests depend on stable behavior of ordering, padding, and empty/value handling.\n\nHow it fits the system: callers construct Info from domain objects (there are From<&T> impls for Environment, ForgeConfig, Metrics, Usage, and Conversation). The rest of the application (CLI commands in crates/forge_main and the TUI code) consumes the Display output to show information to users. This file depends on and uses types from forge_api (Environment, ForgeConfig, Metrics, Usage, Conversation) and display helpers/constants from crate::display_constants; it also references forge_tracker::VERSION. It is a presentation-layer module and does not mutate domain state.\n\nImportant design decisions: (1) Section::Items stores an Option and a value string \u2014 this is deliberate to support three row shapes (key+value, key-only, value-only). (2) Keys are normalized (lowercased) during add_key_value to produce consistent internal keys while rendering uses Title Case / colorization as required. (3) The file chooses to treat missing/None values by replacing them with markers::EMPTY. (4) Per-section padding / max-key-width computation is enforced (see commit history) so modifications must preserve that behavior to avoid breaking tests that assert alignment.\n", + "key_constructs": [ + { + "name": "Section", + "type": "enum", + "purpose": "Represents a row in an Info output: either a Title or an Items(row) which holds an optional key and a value string.", + "reasoning": "Section unifies title rows and item rows so the rendering logic can iterate a single vector and decide per-entry how to print it. The Option key allows key-only, value-only, and key/value rows without changing the type shape elsewhere." + }, + { + "name": "Section::key", + "type": "function", + "purpose": "Accessor that returns the key string slice for Section::Items entries, or None otherwise.", + "reasoning": "Used by rendering and by column-width computations to discover which rows in a section have labeled keys; keep this behavior stable because rendering logic relies on Option semantics." + }, + { + "name": "Info", + "type": "struct", + "purpose": "Holds a Vec

representing the ordered rows (titles and items) to be printed.", + "reasoning": "Acts as the canonical intermediate representation between domain objects and textual output. Methods allow building Info fluently (add_title, add_key_value, add_value, add_key, extend). Any edits to the representation or builder methods will affect all consumers' display output." + }, + { + "name": "Info::add_title", + "type": "function", + "purpose": "Pushes a Title row into the Info.sections vector and returns self for chaining.", + "reasoning": "Section titles mark group boundaries. Tests and consumers expect titles to be present and in uppercase; altering when and how titles are added or their normalization can affect grouping and alignment logic." + }, + { + "name": "Info::add_key_value", + "type": "function", + "purpose": "Adds a key-value pair to Info, normalizing the key to lowercase internally and delegating to add_item.", + "reasoning": "The key normalization behavior (to lowercase) is part of the file's canonicalization \u2014 altering it changes how keys are aggregated and padded during printing and could break tests that assert alignment/ordering." + }, + { + "name": "Info::add_value", + "type": "function", + "purpose": "Adds a standalone value (value-only row) with no key.", + "reasoning": "Value-only rows are used for lists or bullet-like displays and are treated differently in formatting; the Item variant with None key must be preserved to avoid mis-rendering lists." + }, + { + "name": "Info::add_key", + "type": "function", + "purpose": "Adds a key-only row (key with no value).", + "reasoning": "Key-only rows are expected in some displays (labels without values) and must render without trailing value placeholders unless intended (markers::EMPTY is used when add_item maps None values)." + }, + { + "name": "Info::add_item", + "type": "function", + "purpose": "Internal helper that pushes a Section::Items entry using the provided key Option and IntoInfoValue, converting None to markers::EMPTY.", + "reasoning": "Centralized conversion ensures consistent behavior for all builder entry points (key-only, value-only, key-value) and sets the canonical handling of empty values. Changing it impacts rendering of missing values across the board." + }, + { + "name": "IntoInfoValue", + "type": "trait", + "purpose": "Trait that unifies various types (String, &str, Option, CommandType, etc.) into Option used by Info builder.", + "reasoning": "This trait allows the builder API to accept many input shapes ergonomically. Keep trait implementations consistent; adding/removing impls changes caller ergonomics and may require adapting many call sites." + }, + { + "name": "From<&Environment> for Info", + "type": "impl", + "purpose": "Converts Environment into an Info instance containing environment and path-related sections.", + "reasoning": "This is how the app renders environment diagnostics (cwd, shell, git branch, logs/agents/history paths). It performs conditional inclusion (only shows logs path if folder exists), uses helpers get_git_branch and format_path_for_display, and inserts VERSION. Preserving these conditionals and path formatting is important for reproducible CLI output." + }, + { + "name": "From<&ForgeConfig> for Info", + "type": "impl", + "purpose": "Converts ForgeConfig into an Info instance listing retry, http, API/tool, and system configuration.", + "reasoning": "This function enumerates many configuration fields (timeouts, TLS backend, root cert paths, tool timeouts) and uses markers::EMPTY for absent values. Tests may depend on exact keys/labels and formats (e.g., 'Connect Timeout' in seconds)." + }, + { + "name": "From<&Metrics> for Info", + "type": "impl", + "purpose": "Converts Metrics into an Info instance describing task completion time and file changes; filters out files with no changes.", + "reasoning": "This impl contains non-obvious behavior: it computes and formats durations (using chrono::Utc and humantime), only includes files with lines_added or lines_removed > 0, and formats added/removed counts with +/\u2212 signs. Tests assert filtering and formatting behavior, so keep the conditions and formatting stable." + }, + { + "name": "From<&Usage> for Info", + "type": "impl", + "purpose": "Converts Usage into a token usage Info block (input, cached, output tokens, and optional cost), computing a cached percentage.", + "reasoning": "This implementation uses num_format for localized formatting and has logic to show cached tokens with a '[%]' suffix only when cache hits exist. The exact string formats are part of the UI contract." + }, + { + "name": "format_user_message", + "type": "function", + "purpose": "Helper used (per commit history) to produce a display string for a user message preferring raw_content then falling back to rendered content.", + "reasoning": "Conversation display semantics rely on extracting the 'task' (first user message) and 'feedback' (last user message) in multi-turn conversations. Tests were added to ensure this preference; maintain the message preference and trimming rules." + } + ], + "semantic_tags": [ + "display", + "formatting", + "cli", + "metrics", + "environment", + "presentation" + ], + "handles_entities": [ + "Environment", + "ForgeConfig", + "Metrics", + "Usage", + "Conversation" + ], + "key_behaviors": [ + "formats environment, config, metrics, usage, and conversation data into terminal sections", + "normalizes keys and aligns them per-section for consistent display", + "replaces missing values with markers::EMPTY and conditionally hides empty sections" + ], + "pitfalls": [ + { + "mistake": "Modify Section::Items shape or the Option semantics without updating rendering logic.", + "consequence": "Value-only and key-only rows will render incorrectly; tests that assert row shapes and alignment will break.", + "prevention": "Preserve Option handling and ensure render code (fmt::Display) still branches on presence/absence of the key." + }, + { + "mistake": "Change key normalization (lowercasing) in add_key_value or change labels/phrasing of keys.", + "consequence": "Per-section padding logic and tests that rely on specific key strings/alignment will fail, and CLIs might display inconsistent key labels.", + "prevention": "When altering key text, also update any tests and the display expectations; prefer keeping the internal normalization logic unchanged unless all call sites are updated." + }, + { + "mistake": "Remove or alter the filtering logic in From<&Metrics> that skips files with zero line changes.", + "consequence": "Noisy output with irrelevant files; tests expecting filtered output will fail.", + "prevention": "Keep the filter that shows only files where lines_added > 0 || lines_removed > 0, or update tests accordingly." + }, + { + "mistake": "Change representations of empty values (markers::EMPTY) or how Option values are rendered without coordinating with display_constants.", + "consequence": "Users or tests expecting '[empty]' may see different tokens; colorization or downstream parsers that rely on these exact markers may break.", + "prevention": "Ensure markers::EMPTY is still used as the canonical placeholder and that display_constants remains the single source of truth." + }, + { + "mistake": "Modify the formatting of numbers/durations (num_format Locale usage or humantime duration formatting) without preserving locale/precision.", + "consequence": "Localization-sensitive output can change; tests that check formatted output strings will fail.", + "prevention": "Retain use of Locale::en and the current format invocations or update tests to the new formats." + } + ], + "reading_guide": { + "start_here": "Info (struct) and the impl fmt::Display for Info (the rendering logic) \u2014 understand how sections are iterated and padded before changing builders or conversions", + "key_sections": [ + "Info::add_item: central place where items are normalized and markers::EMPTY is applied", + "From<&Environment> for Info: shows how environment data and paths are selected and conditionally included", + "From<&Metrics> for Info: contains non-obvious filtering and formatting for file changes and durations", + "IntoInfoValue impls: shows allowed input types for builder methods" + ], + "skip_unless_needed": [ + "Top-of-file documentation examples and long doc comments when only changing small builder behavior", + "Internal tests (if any) that only assert cosmetic variants unless you are changing formatting" + ] + }, + "tests": { + "exercised_by": [ + "inline tests in crates/forge_main/src/info.rs" + ], + "test_functions": [ + "test_conversation_info_display_with_task", + "test_info_display_with_consistent_key_padding" + ], + "example_command": "cargo test -p forge_main --lib", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "crates/forge_main/src/display_constants.rs", + "relationship": "Provides placeholders, status markers, and CommandType used by Info for empty-value tokens and value formatting.", + "likely_co_change": true, + "reason_to_check": "If you change the placeholder token or marker names used by Info, update display_constants and tests that assert those tokens." + }, + { + "path": "crates/forge_api/src/lib.rs (types)", + "relationship": "Defines domain types Environment, ForgeConfig, Metrics, Usage, Conversation used as sources for Info conversions.", + "likely_co_change": true, + "reason_to_check": "If domain fields or names change (e.g., metrics.file_operations shape or Usage.cost), you must adapt the From impls in this file." + }, + { + "path": "crates/forge_tracker/src/lib.rs", + "relationship": "Exports VERSION consumed by Info to show the app version in ENVIRONMENT section.", + "likely_co_change": false, + "reason_to_check": "If VERSION format/semantics change, the ENVIRONMENT display will change." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_main --lib", + "cargo test --workspace (runs full test-suite; CI uses this)" + ], + "data_constants_to_check": [ + "crates/forge_main/src/display_constants.rs: markers::EMPTY", + "crates/forge_api types: Metrics.file_operations shape, Usage fields, ForgeConfig.http fields" + ], + "owns_authoritative_data": false, + "public_api_surface": false + }, + "insights": [ + { + "type": "feature", + "category": "UX", + "title": "Display user Task and Feedback in conversation info", + "problem": "Conversation summaries didn't surface the user's task or follow-up feedback clearly.", + "root_cause": "Only last user message wasn't always the best representation; multi-turn user sequences needed first-of-sequence and last-of-sequence extraction.", + "solution": "Add Conversation::first_user_messages() helper and Info construction uses the first user message as Task and last user message (if more than one) as Feedback. Implement format_user_message() helper that prefers raw_content and falls back to rendered content; tests added.", + "commit": [ + "2437d84", + "c796876" + ], + "constructs": [ + "format_user_message", + "Conversation::first_user_messages", + "Info::from(&Conversation)" + ] + }, + { + "type": "refactoring", + "category": "Display", + "title": "Make info key column fixed width per section", + "problem": "Info display keys misaligned; inconsistent spacing reduced readability.", + "root_cause": "No per-section key-width computation.", + "solution": "Compute max key width per section in Info fmt and pad keys within that section. Tests added to ensure consistent padding within sections and allow different sections to have different widths.", + "commit": [ + "95a0bb5" + ], + "constructs": [ + "Info::fmt", + "Section::key" + ] + }, + { + "type": "refactoring", + "category": "Formatting", + "title": "Info API redesigned to support flexible porcelain rows", + "problem": "Previous Info->to_rows logic couldn't handle varying row lengths and inconsistent field orders across sections", + "root_cause": "Old to_rows produced fragile column sets; porcelain formatting required consistent columns even when fields varied per title", + "solution": "Rewrite Info internal representation: Section::Items stores Option + value; add methods to_rows_with_title and to_rows_without_title; implement build_consistent_row to align fields across titles using a discovered field_order", + "commits": [ + "c71b2a4", + "ed24862" + ], + "constructs": [ + "Info::to_rows", + "Info::to_rows_with_title", + "Info::to_rows_without_title", + "Info::build_consistent_row", + "Section::Items" + ], + "lesson_learned": "When producing tabular/porcelain output from semi-structured data, first canonicalize field order then emit rows filling missing cells with empty strings. Centralize row-building to avoid duplicated alignment logic." + }, + { + "type": "bug_fix", + "category": "Edge Case", + "title": "Fix display of key-only items and value-only items", + "problem": "Key-only items (like a tools list) and value-only items were mis-rendered after API changes", + "root_cause": "Info API changed signature of add_key_value/add_value and the rendering didn't account for Option semantics", + "solution": "Adjust Section variants and display logic to correctly print key/value or value-only lines; update fmt::Display branches", + "commits": [ + "9b3b618", + "c71b2a4" + ], + "constructs": [ + "impl fmt::Display for Info" + ], + "lesson_learned": "When changing data model shapes (key/value/optional), ensure printing/display logic is updated to avoid inverted semantics." + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_config/src/config.rs": { + "file_path": "crates/forge_config/src/config.rs", + "short_description": "Defines the top-level ForgeConfig schema and provider configuration structures with read/write helpers.", + "category": "SOURCE_CODE", + "description": "This file declares the Forge configuration data model used throughout the application. It exists to provide a single strongly-typed representation (ForgeConfig) that is (de)serializable, documented with schemars JsonSchema, and merged from multiple sources via the ConfigReader/ConfigWriter helpers. The design puts provider-related types (ProviderEntry, ProviderUrlParam, ProviderAuthMethod, ProviderResponseType, ProviderTypeEntry) in the same module because provider definitions are a first-class part of the global configuration and need to be represented in the same persisted/readable schema (TOML/JSON). Many fields use Option and other wrapper types to allow layering of configuration sources without unintentionally overwriting defaults (historical commit notes indicate this conversion from concrete types to Option was made to avoid legacy-layer overwrites).\n\nThe ForgeConfig struct centralizes many operational limits (file/line size limits, timeouts), HTTP settings, provider additions/overrides, sampling parameters (temperature/top_p/top_k/max_tokens), reasoning/compaction settings, and references to ModelConfig for different use-cases (session/commit/suggest). It exposes two convenience methods: ForgeConfig::read to read and merge config sources using ConfigReader, and ForgeConfig::write to persist the effective config using ConfigWriter. The file also contains unit tests that validate TOML serialization round-trip behavior for Decimal-wrapped floating values (temperature, top_p), and that deserialization via the ConfigReader preserves Decimal values.", + "key_constructs": [ + { + "name": "ProviderResponseType", + "type": "enum", + "purpose": "Represents the wire protocol a provider uses for chat completions (OpenAI, Anthropic, Google, Bedrock, etc.).", + "reasoning": "Consumers (provider clients, model selection UI) need to know which protocol/wire semantics to use per provider. This enum is small and explicit so downstream code can match on known protocols." + }, + { + "name": "ProviderTypeEntry", + "type": "enum", + "purpose": "Categorizes a provider (e.g., llm or context engine).", + "reasoning": "Used to distinguish providers by capability; defaults to Llm. It's serialized with snake_case and has a Default variant; preserving that default behavior matters when merging provider entries." + }, + { + "name": "ProviderAuthMethod", + "type": "enum", + "purpose": "Enumerates supported non-OAuth authentication methods for inline provider entries (ApiKey, GoogleAdc).", + "reasoning": "Inline provider entries in forge.toml are limited to simple auth methods; OAuth-based providers are handled via file overrides. This enum makes that explicit in the schema." + }, + { + "name": "ProviderUrlParam", + "type": "struct", + "purpose": "Describes an environment-variable-backed substitution parameter used inside provider URL templates.", + "reasoning": "Template variable support for provider URL strings requires a typed description so UI and serialization can surface parameter names and optional suggestion lists." + }, + { + "name": "ProviderEntry", + "type": "struct", + "purpose": "A single inline provider definition that can override or extend built-in providers (id, url templates, auth methods, headers, etc.).", + "reasoning": "Forge allows merging of inline provider definitions with the built-in list; ProviderEntry fields are optional where appropriate to allow field-by-field overrides. Editors must treat these fields as mergeable rather than authoritative replacements." + }, + { + "name": "ForgeConfig", + "type": "struct", + "purpose": "Primary application configuration structure that captures defaults, limits, provider extensions, model defaults, and behavioral toggles.", + "reasoning": "This is the central authoritative configuration that other crates consume. Many fields are Option or have serde defaults; any change to field types or serde attributes will affect config layering, defaults, and backward compatibility." + }, + { + "name": "ForgeConfig::read", + "type": "function", + "purpose": "High-level method that delegates to ConfigReader to read and merge configuration from defaults, legacy files, global config file, and environment variables.", + "reasoning": "All callers should use this to obtain the merged configuration; it encodes the source order and merging semantics via ConfigReader. Tests and other crates rely on this merging behavior." + }, + { + "name": "ForgeConfig::write", + "type": "function", + "purpose": "Serializes and writes the configuration to the user config file using ConfigWriter.", + "reasoning": "Used to persist user changes to config. It uses ConfigReader::config_path() to determine where to write; changing this behavior may break expected file locations." + } + ], + "semantic_tags": [ + "configuration", + "serialization", + "providers", + "defaults", + "toml" + ], + "handles_entities": [ + "ForgeConfig", + "ProviderEntry", + "ProviderUrlParam", + "ModelConfig", + "RetryConfig", + "HttpConfig" + ], + "key_behaviors": [ + "reads and merges configuration from defaults, legacy, global file, and environment", + "serializes/deserializes configuration to/from TOML/JSON", + "represents inline provider overrides that merge with built-in providers", + "exposes application-level resource/timeout limits and model defaults" + ], + "pitfalls": [ + { + "mistake": "Changing field types (e.g., converting Option to T) or serde attributes without coordinating ConfigReader merging semantics.", + "consequence": "Will break layering semantics and can cause legacy or partial config files to overwrite defaults unintentionally.", + "prevention": "Respect existing Option patterns and merging behavior implemented in ConfigReader when modifying fields or serialization attributes." + }, + { + "mistake": "Altering TOML/serialization formatting for Decimal/wrapped float fields.", + "consequence": "Unit tests that assert exact TOML fragments (e.g., 'temperature = 0.1\\n') will fail and consumers expecting that format may misparse values.", + "prevention": "Preserve Decimal wrapper semantics and serde formatting for floating values; run the crate's tests after changes." + }, + { + "mistake": "Modifying provider merging semantics or renaming provider fields without updating reader/writer and any code that depends on provider id-based merging.", + "consequence": "Provider overrides in forge.toml may no longer merge with built-ins correctly, leading to missing providers or incorrect provider configurations at runtime.", + "prevention": "Check ConfigReader/ConfigWriter behavior and other crates that consume providers (service clients, model selection UI) when touching ProviderEntry or provider-related fields." + }, + { + "mistake": "Changing the default services_url value or its Dummy default expression carelessly.", + "consequence": "Tests and behavior that assume the default base URL (https://api.forgecode.dev/api) could break; tooling that relies on default will change endpoint targets.", + "prevention": "If adjusting defaults, ensure tests and any external integration points are updated accordingly." + } + ], + "reading_guide": { + "start_here": "ForgeConfig (the struct) - it is the canonical configuration object and the best place to understand what the app reads and writes.", + "key_sections": [ + "ForgeConfig: lists all configurable fields and docs about their purpose and defaults", + "ProviderEntry and related enums: how provider overrides are represented and merged", + "impl ForgeConfig::read / ForgeConfig::write: shows how callers should obtain/persist the merged config" + ], + "skip_unless_needed": [ + "derive and attribute annotations (serde, JsonSchema, Dummy) - relevant for schema and tests but can be skipped on behavioral edits", + "unit tests at the bottom (unless validating serialization behavior)" + ] + }, + "tests": { + "exercised_by": [ + "crates/forge_config/src/config.rs (inline unit tests)" + ], + "test_functions": [ + "test_f32_temperature_round_trip", + "test_f32_top_p_round_trip", + "test_f32_temperature_deserialize_round_trip" + ], + "example_command": "cargo test --manifest-path crates/forge_config/Cargo.toml --quiet", + "relevant_snippets": [ + { + "file": "crates/forge_config/src/config.rs", + "lines": "116-140", + "description": "test_f32_temperature_round_trip: ensures temperature Decimal serializes to 'temperature = 0.1' in TOML." + }, + { + "file": "crates/forge_config/src/config.rs", + "lines": "142-164", + "description": "test_f32_top_p_round_trip: ensures top_p Decimal serializes to 'top_p = 0.9' in TOML." + }, + { + "file": "crates/forge_config/src/config.rs", + "lines": "166-190", + "description": "test_f32_temperature_deserialize_round_trip: serializes then reads back via ConfigReader::read_toml to ensure the Decimal value round-trips through the reader." + } + ] + }, + "related_files": [ + { + "path": "crates/forge_config/src/reader.rs", + "relationship": "Reads and merges the configuration sources (defaults, legacy, global, environment) referenced by ForgeConfig::read. Changes in ForgeConfig fields or serde attributes must be coordinated with reader merging logic.", + "likely_co_change": true, + "reason_to_check": "If editing field types, default values, or merging semantics, update reader.rs to preserve layering and legacy behavior." + }, + { + "path": "crates/forge_config/src/writer.rs", + "relationship": "Serializes and persists the ForgeConfig to disk as used by ForgeConfig::write.", + "likely_co_change": true, + "reason_to_check": "If altering serialization or path resolution semantics, ensure writer.rs continues to write in the expected location and format." + }, + { + "path": "crates/forge_config/src/types.rs (or other crate modules declaring ModelConfig/RetryConfig/etc.)", + "relationship": "ForgeConfig references types (ModelConfig, RetryConfig, HttpConfig, Compact, Decimal, ReasoningConfig, Update) that are defined elsewhere in the crate.", + "likely_co_change": true, + "reason_to_check": "Modifying the referenced types or their serde behavior can impact ForgeConfig serialization and consumers." + }, + { + "path": "crates/forge_main/src/main.rs", + "relationship": "Top-level application that loads configuration via ForgeConfig::read and relies on config defaults and provider entries at startup.", + "likely_co_change": false, + "reason_to_check": "If changing defaults or provider semantics, confirm application behavior and startup logic remain correct." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test --manifest-path crates/forge_config/Cargo.toml", + "cargo test --workspace (if making cross-crate changes)" + ], + "data_constants_to_check": [ + "services_url default value (Dummy expression) - ensure any endpoint defaults are intentional", + "serialization of Decimal-wrapped floats - confirm TOML/serde output remains stable" + ], + "owns_authoritative_data": true, + "public_api_surface": true + }, + "insights": [ + { + "type": "refactoring", + "category": "Configuration", + "title": "Make many ForgeConfig fields optional to avoid legacy overwrites and centralize defaults", + "problem": "Legacy JSON/TOML layers could overwrite default configuration values unintentionally when fields were present but didn't represent full configuration.", + "root_cause": "ForgeConfig used concrete types for many fields; when layering legacy config, missing fields would be zeroed instead of preserved.", + "solution": "Convert many ForgeConfig fields to Option, update schema and reader logic so that missing fields don't override defaults, add tests ensuring legacy layer doesn't overwrite defaults.", + "commits": [ + "69882c6", + "fbeea84" + ], + "constructs": [ + "struct ForgeConfig (changed field types)", + "ConfigReader/reader changes (legacy handling)" + ], + "lesson_learned": "When layering configuration sources, represent optional overrides as Option so default values can be preserved unless explicitly set by higher-priority layers." + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_app/src/compact.rs": { + "file_path": "crates/forge_app/src/compact.rs", + "short_description": "Context compaction service: summarize and compress assistant message sequences while preserving usage and reasoning continuity.", + "category": "SOURCE_CODE", + "description": "This file defines a Compactor service responsible for performing compaction (summarization) of conversation context. It exists to reduce long assistant/user message sequences into a single user-facing summary message while preserving important metadata: accumulated usage metrics and the most recent non-empty structured reasoning. It also applies a transformer pipeline (SummaryTransformer) to a ContextSummary before rendering the summary via the TemplateEngine; this pipeline normalizes and trims operations like deduplication and path-stripping so the summary is compact and stable.\n\nThe Compactor is designed to be used by higher-level conversation management code when the context exceeds configured retention/eviction windows. It respects a Compact configuration (eviction and retention windows) and delegates the actual transformer work to SummaryTransformer and the rendering to TemplateEngine. Important design goals visible in the implementation: (1) do not lose usage/tokens when replacing a range of messages \u2014 accumulate usage across the range and attach it to the inserted summary MessageEntry; (2) preserve continuity of structured reasoning chains by extracting the most recent non-empty reasoning_details from the compacted sequence and injecting it into the first remaining assistant message (without allowing exponential accumulation of reasoning across repeated compactions); (3) remove droppable messages (attachments, etc.) from the final context.\n\nThe file also includes a rich set of unit tests (inline module tests) that exercise reasoning preservation, reasoning non-accumulation across compactions, filtering of empty reasoning blocks, rendering of summary templates for a variety of tool call types (including todo_write tools), and an async snapshot-based rendering test that uses a conversation fixture. These tests both validate compaction logic and lock the textual output for summary templates via insta snapshots. Commit history (provided) documents previous bugs fixed here: usage preservation, reasoning continuity, and respecting per-agent compaction configuration and token counts.", + "key_constructs": [ + { + "name": "Compactor", + "type": "struct", + "purpose": "Encapsulates compaction configuration (Compact) and environment for performing context compaction operations.", + "reasoning": "This is the main entry point consumers will construct and call to compact a Context. An agent editing this file must preserve how Compact and Environment are stored and used (they determine eviction/retention behavior and path transformations)." + }, + { + "name": "transform", + "type": "function", + "purpose": "Apply the SummaryTransformer pipeline to a ContextSummary (normalization, deduplication, trimming, path-strip).", + "reasoning": "This function centralizes the transformer pipeline applied to summaries prior to rendering. Changes to transformer ordering or inputs must retain semantics relied on by tests (especially template rendering snapshots)." + }, + { + "name": "compact", + "type": "function", + "purpose": "Given a Context and a boolean max flag, pick a compaction strategy (eviction/retention) and, if a sequence to evict is found, compress it.", + "reasoning": "This function translates Compact settings into a CompactionStrategy and invokes compress_single_sequence when needed. Its behavior affects when and how sequences are chosen for compaction; preservation of the strategy selection logic and the max flag semantics is important for correct disk/UX behavior.", + "callers": [ + { + "file": "crates/forge_app/src/app.rs", + "line": 246, + "context": "let compacted_context = Compactor::new(compact, environment).compact(context, true)?;" + }, + { + "file": "crates/forge_app/src/hooks/compaction.rs", + "line": 43, + "context": ".compact(context.clone(), false)?;" + } + ] + }, + { + "name": "compress_single_sequence", + "type": "function", + "purpose": "Compress a single identified (start,end) message sequence: filter droppables, create a summary, accumulate usage, inject preserved reasoning, splice summary into the context, and remove droppable messages.", + "reasoning": "This routine contains the complex state transitions and invariants: it must accumulate usage across the replaced range, preserve exactly the last non-empty reasoning_details (and not accumulate them over repeated compactions), filter droppable messages, render the summary via TemplateEngine and replace the message slice atomically. Editors must respect the sequence mutation and the conditional reasoning injection logic." + } + ], + "semantic_tags": [ + "compaction", + "context", + "template-rendering", + "reasoning-preservation", + "usage-accumulation" + ], + "handles_entities": [ + "Context", + "ContextMessage", + "ContextSummary", + "MessageEntry", + "ReasoningFull", + "Usage" + ], + "key_behaviors": [ + "replaces a sequence of messages with a rendered summary frame", + "accumulates and transfers usage metrics from compacted messages to the summary entry", + "extracts and injects the last non-empty reasoning_details into the first assistant message after compaction", + "removes droppable messages from context after compaction", + "applies a transformer pipeline to ContextSummary before rendering" + ], + "pitfalls": [ + { + "mistake": "Splicing the message range without aggregating per-message Usage", + "consequence": "Total usage/token counts are lost or underreported after compaction; metrics and billing estimates become incorrect", + "prevention": "Preserve the reduction step that iterates over the range and accumulates/attaches usage to the new summary MessageEntry" + }, + { + "mistake": "Naively copying reasoning_details from the first assistant message in the range", + "consequence": "Reasoning chains can be broken or reasoning blocks can accumulate exponentially across repeated compactions", + "prevention": "Keep the exact behavior: find the last non-empty reasoning_details in the compacted sequence and inject only that reasoning into the first remaining assistant message (and only if that first assistant has no reasoning)" + }, + { + "mistake": "Assuming droppable messages should remain in the messages vector", + "consequence": "Droppable items (attachments/tool outputs) may be duplicated, cause noise in summaries, or influence compaction ranges incorrectly", + "prevention": "Respect the existing filtering (.is_droppable()) and the final retain() call that removes droppables after splicing" + }, + { + "mistake": "Changing the template output shape without updating snapshot tests", + "consequence": "Snapshot tests (insta) will fail; CI treats test regressions seriously and snapshots must be intentionally updated with acceptance tooling", + "prevention": "When modifying templates or the data passed into them, run snapshot tests and use the project's snapshot acceptance flow if intended (see edit_checklist)" + } + ], + "reading_guide": { + "start_here": "Compactor", + "key_sections": [ + "compact: chooses compaction strategy and routes to compress_single_sequence", + "compress_single_sequence: the mutation-heavy logic that builds the summary, accumulates usage, filters droppables and injects reasoning", + "transform: where SummaryTransformer is applied before rendering", + "tests module: concrete examples of expected behavior (reasoning preservation, no-accumulation, template snapshots)" + ], + "skip_unless_needed": [ + "tracing::info logging lines", + "detailed template content in tests except when changing rendering or template inputs" + ] + }, + "tests": { + "exercised_by": [ + "crates/forge_app/src/compact.rs (inline unit tests in mod tests)" + ], + "test_functions": [ + "test_compress_single_sequence_preserves_only_last_reasoning", + "test_compress_single_sequence_no_reasoning_accumulation", + "test_compress_single_sequence_filters_empty_reasoning", + "test_template_engine_renders_summary_frame", + "test_template_engine_renders_todo_write", + "test_render_summary_frame_snapshot" + ], + "example_command": "cargo test -p forge_app --lib --tests", + "relevant_snippets": [ + { + "file": "crates/forge_app/src/compact.rs", + "lines": "mod tests (inline) entire block", + "description": "Unit tests that validate reasoning preservation, non-accumulation across compactions, droppable filtering, and template snapshot rendering" + } + ] + }, + "related_files": [ + { + "path": "crates/forge_domain", + "relationship": "Defines domain types used here (Context, ContextMessage, ContextSummary, Usage, ReasoningFull, CompactionStrategy, Compact). Core invariants and types come from this crate.", + "likely_co_change": true, + "reason_to_check": "Changes to the domain types (e.g., fields on Usage or ReasoningFull, or API changes to Context::messages) will require corresponding updates in compaction logic and tests." + }, + { + "path": "crates/forge_app/src/transformers.rs", + "relationship": "Implements SummaryTransformer used in transform(); transformer pipeline semantics (deduplication/trim/path-strip) directly affect summary content.", + "likely_co_change": true, + "reason_to_check": "If transformer behavior or its contract changes, the rendered summary and the tests that assert snapshot outputs must be reviewed." + }, + { + "path": "crates/forge_app/src/template_engine.rs", + "relationship": "Provides TemplateEngine::default().render used to render 'forge-partial-summary-frame.md'. The template output format is asserted in snapshot tests.", + "likely_co_change": true, + "reason_to_check": "Template changes or changes to the data shape passed to templates will impact snapshot tests and compaction outputs." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_app", + "cargo test -p forge_app --lib --tests", + "cargo insta test --package forge_app --accept (only when intentionally updating snapshots)" + ], + "data_constants_to_check": [], + "owns_authoritative_data": false, + "public_api_surface": false + }, + "insights": [ + { + "type": "feature", + "category": "Other", + "title": "Template rendering for todo_write included in compact summary tests", + "problem": "Summary templates needed to include representation for todo_write calls.", + "root_cause": "New todo_write tool inserted ToolCatalog entries; template engine had to render Task frames.", + "solution": "Added a test that composes a ContextSummary with a TodoWrite SummaryToolCall and asserts rendered template snapshot.", + "lesson_learned": "When extending tool set, add snapshot tests for summary rendering to ensure the assistant's system prompts and summary frames incorporate new tool outputs.", + "commits": [ + "4f1ad6b" + ], + "constructs": [ + "render_template (test)", + "ContextSummary building in tests" + ], + "test_mappings": { + "exercised_by": [ + "crates/forge_app/src/snapshots/forge_app__compact__tests__template_engine_renders_todo_write.snap" + ], + "source_commits": [ + "4f1ad6b" + ] + } + }, + { + "type": "bug_fix", + "category": "State Management", + "title": "Preserve accumulated usage during compaction", + "problem": "During context compaction a range of messages was replaced by a summary message but accumulated usage metrics were discarded, causing token counts and usage aggregation to be incorrect after compaction.", + "root_cause": "Compaction replaced message range without transferring/accumulating per-message Usage into the summary entry; accumulate_usage() therefore lost those counts.", + "solution": "Before splicing, iterate over messages in the compaction range and reduce/accumulate their Usage into a single Usage value; attach that Usage to the summary MessageEntry inserted into the messages vector. Tests updated to assert preserved per-message and total usage.", + "commit": [ + "fbaedf7" + ], + "constructs": [ + "compress_single_sequence", + "MessageEntry::from", + "accumulate_usage" + ] + }, + { + "type": "bug_fix", + "category": "State Management", + "title": "Preserve reasoning continuity after compaction without accumulation", + "problem": "Compaction could break extended reasoning chains or cause reasoning_details to accumulate exponentially across compactions", + "root_cause": "Compaction replaced message ranges with summaries but did not preserve the last assistant reasoning, leading to broken chains or duplicates after multiple compactions", + "solution": "When compressing a sequence, extract the last non-empty reasoning_details from the sequence and inject it into the first assistant message remaining after compaction (preserving single last reasoning only). Also added tests covering preserve-last-reasoning, no-accumulation, and skipping empty reasoning entries.", + "commits": [ + "e7bde70", + "b0ba8c2" + ], + "constructs": [ + "Compactor::compress_single_sequence", + "Compactor::generate_summary_for_sequence", + "Compactor::compress", + "compress_single_sequence (test helpers)" + ], + "test_mappings": { + "exercised_by": [ + "tests in crates/forge_app/src/compact.rs (new tokio::tests in commit e7bde70)" + ], + "source_commits": [ + "e7bde70", + "b0ba8c2" + ] + }, + "lesson_learned": "When summarizing message history that includes structured reasoning, preserve the minimal necessary reasoning (the most recent non-empty block) to maintain a consistent reasoning chain while avoiding duplication across repeated compactions." + }, + { + "type": "performance", + "category": "Edge Case", + "title": "Compactor uses configured compact settings and returns metrics", + "problem": "Compaction used approximated token counts and naive agent selection", + "root_cause": "Original compaction calculated token estimates and chose the first agent rather than an agent with compaction config", + "solution": "Use actual token count (Context.token_count), find the agent matching active_agent and use its compact config if present; compactor now constructed with compact configuration and returns actual token counts in CompactionResult.", + "commits": [ + "b0ba8c2" + ], + "constructs": [ + "Compactor::new(..., compact)", + "Compactor::compact", + "CompactionResult::new" + ], + "lesson_learned": "Prefer precise counts over approximations where available for metrics; respect per-agent compaction configuration and handle missing config by returning no-op compaction result." + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/context_engine.rs": { + "file_path": "crates/forge_repo/src/context_engine.rs", + "short_description": "gRPC-backed WorkspaceIndexRepository implementation that maps proto RPCs to domain types.", + "category": "SOURCE_CODE", + "description": "This file implements a gRPC-based repository (ForgeContextEngineRepository) that satisfies the WorkspaceIndexRepository trait from the domain layer. Its purpose is to translate domain-level workspace operations (authenticate, create workspace, upload files, search, list/get workspace info, list/delete files, delete workspace) into protobuf/tonic gRPC requests against the remote ForgeService. It centralizes the network serialization/deserialization boundary, conversion between proto types and forge_domain types, and authorization header injection.\n\nDesign choices are pragmatic: conversions are implemented inline using TryFrom and manual mapping for search results (mapping QueryItems and proto Node variants into domain Node/NodeData). The repository uses an injectable GrpcInfra (via Arc) to obtain a tonic::Channel so tests and runtime can replace the transport. The code normalizes Windows paths when creating a workspace (explicit replace of backslashes to '/'), encodes API keys in the Authorization metadata as \"Bearer \", and carefully handles optional proto Timestamp fields, marking created_at as required while last_updated is optional. Error contexts use anyhow::Context to produce descriptive failure messages across RPC/parse boundaries.", + "key_constructs": [ + { + "name": "ForgeContextEngineRepository", + "type": "class", + "purpose": "Repository struct that holds the shared GrpcInfra and implements WorkspaceIndexRepository via gRPC.", + "reasoning": "This is the main entrypoint for workspace-related RPCs. Changes here affect all workspace operations that go over the network and therefore must respect GrpcInfra, authorization, and proto conversion logic.", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 26, + "context": "use crate::context_engine::ForgeContextEngineRepository;" + }, + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 47, + "context": "codebase_repo: Arc>," + }, + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 80, + "context": "let codebase_repo = Arc::new(ForgeContextEngineRepository::new(infra.clone()));" + } + ] + }, + { + "name": "new", + "type": "function", + "purpose": "Constructor: builds ForgeContextEngineRepository with an Arc-wrapped GrpcInfra.", + "reasoning": "Simple constructor used by callers to create the repository; preserves Arc semantics for shared use across async tasks.", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 80, + "context": "let codebase_repo = Arc::new(ForgeContextEngineRepository::new(infra.clone()));" + } + ] + }, + { + "name": "with_auth", + "type": "function", + "purpose": "Helper that inserts an Authorization Bearer header into a tonic::Request from an ApiKey.", + "reasoning": "Centralizes metadata mutation and header formatting; callers rely on the exact header key and value format, so its behavior is critical to authentication." + }, + { + "name": "authenticate", + "type": "function", + "purpose": "Trait implementation method that calls CreateApiKey gRPC and converts the response into WorkspaceAuth.", + "reasoning": "Creates a workspace-level API key by calling the server and mapping the proto CreateApiKeyResponse into domain WorkspaceAuth (with a Utc::now timestamp). Any modifications to the mapping or error handling here change how credentials are created and exposed to the rest of the system.", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 531, + "context": "self.codebase_repo.authenticate().await" + } + ] + }, + { + "name": "create_workspace", + "type": "function", + "purpose": "Trait implementation method that sends CreateWorkspaceRequest and returns a WorkspaceId.", + "reasoning": "Constructs the WorkspaceDefinition RPC payload (normalizing working_dir to forward slashes) and depends on with_auth for credentials; sensitive to path formatting expectations of the remote service.", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 540, + "context": ".create_workspace(working_dir, auth_token)" + } + ] + }, + { + "name": "upload_files", + "type": "function", + "purpose": "Trait implementation method that streams file content in an UploadFilesRequest and returns FileUploadInfo.", + "reasoning": "Converts domain FileUpload into proto File messages and interprets the returned Upload result. The mapping of counts (node_ids.len(), relations.len()) is relied on by callers for progress or validation.", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 549, + "context": "self.codebase_repo.upload_files(upload, auth_token).await" + } + ] + }, + { + "name": "search", + "type": "function", + "purpose": "Trait implementation method that performs semantic code search and maps proto results to domain Node list.", + "reasoning": "Contains non-trivial conversion logic: builds Query proto from CodeSearchQuery, restricts NodeKind to FileChunk, and translates node_data.Kind variants into domain NodeData (FileChunk, File, FileRef, Note, Task). Callers expect relevance and distance fields to be preserved and node IDs to be converted into domain IDs.", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 557, + "context": "self.codebase_repo.search(query, auth_token).await" + } + ] + }, + { + "name": "list_workspace_files", + "type": "function", + "purpose": "Trait implementation method that requests file references and converts them to domain FileHash entries.", + "reasoning": "Relies on TryFrom for FileHash and therefore on proto message shapes. Useful for detecting which files changed and for delta uploads.", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 583, + "context": ".list_workspace_files(workspace, auth_token)" + } + ] + }, + { + "name": "delete_files", + "type": "function", + "purpose": "Trait implementation method that issues DeleteFilesRequest for a workspace; no-op if deletion list is empty.", + "reasoning": "Implements an early return when there's nothing to delete; callers expect silence (Ok(())) for empty deletions.", + "callers": [ + { + "file": "crates/forge_repo/src/forge_repo.rs", + "line": 592, + "context": "self.codebase_repo.delete_files(deletion, auth_token).await" + } + ] + } + ], + "semantic_tags": [ + "grpc", + "workspace", + "indexing", + "authentication", + "proto-conversion", + "network" + ], + "handles_entities": [ + "WorkspaceAuth", + "WorkspaceId", + "FileUploadInfo", + "Node", + "WorkspaceInfo", + "FileHash", + "FileUpload" + ], + "key_behaviors": [ + "creates API key by calling CreateApiKey", + "creates workspace by sending CreateWorkspaceRequest", + "uploads files and returns upload counts", + "performs semantic search and returns domain nodes", + "lists and fetches workspace metadata", + "lists and deletes workspace files", + "deletes workspace by ID" + ], + "pitfalls": [ + { + "mistake": "Assuming filesystem paths can remain platform-native when sent to the remote service.", + "consequence": "Remote workspace server may misinterpret backslashes from Windows, leading to incorrect indexing or broken paths.", + "prevention": "Respect existing normalization: working_dir is converted with to_string_lossy().replace(\"\\\\\", \"/\"). If changing this logic, ensure the remote service expectations are preserved." + }, + { + "mistake": "Altering the Authorization metadata key or formatting (e.g., changing from \"Bearer {}\" or header name).", + "consequence": "gRPC calls will fail with authentication errors or be rejected by the server.", + "prevention": "Maintain with_auth behavior: insert metadata key \"authorization\" and format value as \"Bearer \". Be cautious if ApiKey's internal representation changes; with_auth currently dereferences with &**auth_token." + }, + { + "mistake": "Treating proto Timestamp values as always present or changing how missing timestamps are handled without checking callers.", + "consequence": "Parsing errors or panics when converting to chrono DateTime, or created_at becoming optional when callers expect it to exist.", + "prevention": "Observe that created_at is required (context() used), while last_updated is optional. Keep conversion and error contexts consistent with domain invariants." + }, + { + "mistake": "Changing node conversion mapping in search (node_data.kind?) without ensuring all proto variants and optional fields are handled.", + "consequence": "search may return incomplete/incorrect NodeData or panic on None variants; callers may mis-handle relevance/distance/ids.", + "prevention": "Preserve the exhaustive match over node_data.Kind variants and the safe handling of optional fields (using map/unwrap_or_default and filter_map patterns)." + }, + { + "mistake": "Assuming infra.channel() returns an already-connected client or mutability semantics that change.", + "consequence": "Unexpected connection errors or misuse of tonic client construction expectations.", + "prevention": "Keep the pattern of creating a ForgeServiceClient::new(channel) per call and handling async errors with .context for clarity." + } + ], + "reading_guide": { + "start_here": "ForgeContextEngineRepository", + "key_sections": [ + "with_auth: essential for authorization header format and metadata handling", + "create_workspace: shows working_dir normalization (Windows path handling)", + "search: complex mapping from proto search results to domain Node/NodeData and scoring fields" + ], + "skip_unless_needed": [ + "TryFrom implementations at top for specific proto->domain conversions (read when touching conversions)", + "trivial pass-through RPC wrappers like list_workspaces/delete_workspace (only change if RPC contract changes)" + ] + }, + "tests": { + "exercised_by": [], + "test_functions": [], + "example_command": "cargo test -p forge_repo", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "crates/forge_repo/proto_generated.rs (generated module)", + "relationship": "Defines the proto message types and gRPC client used throughout; any change to proto shapes or field names requires updating conversions here.", + "likely_co_change": true, + "reason_to_check": "When modifying TryFrom mappings or field access (e.g., node.hash or workspace.created_at), verify proto definitions and generated names match." + }, + { + "path": "crates/forge_domain/src/lib.rs", + "relationship": "Defines domain types (WorkspaceAuth, WorkspaceId, Node, FileUpload) that this file converts to/from; changes to domain types will require corresponding mapping updates.", + "likely_co_change": true, + "reason_to_check": "If domain type constructors, fields, or string parsing (UserId::from_string, WorkspaceId::from_string) change, adjust conversion logic and error contexts." + }, + { + "path": "crates/forge_app/src/lib.rs (GrpcInfra trait)", + "relationship": "Provides GrpcInfra trait used to obtain tonic channels; repository depends on this abstraction for network transport.", + "likely_co_change": true, + "reason_to_check": "If GrpcInfra.channel() signature or semantics change, update how the repository constructs clients." + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_repo", + "cargo test --workspace" + ], + "data_constants_to_check": [], + "owns_authoritative_data": false, + "public_api_surface": true + }, + "insights": [ + { + "type": "bug_fix", + "category": "Platform/Edge Case", + "title": "Normalize Windows paths for workspace indexing to forward slashes", + "problem": "Workspace indexing on Windows sent backslash paths which the workspace server expected forward slashes.", + "root_cause": "Filesystem path lossily converted via to_string_lossy() yielding backslashes on Windows; remote indexing expects POSIX-style paths.", + "solution": "Replace backslashes with '/' in working_dir when building CreateWorkspaceRequest.", + "lesson_learned": "When communicating filesystem paths across platforms or to services expecting POSIX paths, normalize path separators. Be explicit about canonical path representation in RPCs.", + "commits": [ + "7072fa8" + ], + "constructs": [ + "CreateWorkspaceRequest construction" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "benchmarks/cli.ts": { + "file_path": "benchmarks/cli.ts", + "short_description": "CLI entrypoint for running evaluation tasks: reads task.yml, generates contexts, runs commands in parallel, validates outputs, and emits summary/logs.", + "category": "CLI", + "description": "This file is the top-level command-line driver for the benchmark/eval utility in the repository. It parses CLI arguments, loads a task specification (task.yml), prepares a debug and temporary workspace, expands input sources into contexts (CSV or inline values), and runs the configured task.run commands for each context row. Execution is orchestrated with configurable parallelism (p-limit) while each task executes its own sequence of commands sequentially in a per-task temporary workdir. After running commands it runs validation logic (processValidations) against the combined command output, aggregates per-task results into a summary, logs using pino (human or JSON output), and exits with an appropriate exit code.\n\nThe file exists to provide a single tool to perform automated evaluation runs across many parameterized inputs and to integrate with the other modules in the benchmarks folder: parse CLI args, build commands (command-generator), execute commands (task-executor), parse/validate outputs (verification), and utility helpers (utils). It centralizes lifecycle actions: setup (before_run), per-task temp dirs and logging, sequential execution of multiple commands per task, early-exit on command error/timeout, and final aggregation of results into a concise summary. The design treats command generation/execution/validation as modular steps performed by other modules (imports), keeping this file focused on orchestration, logging and process lifecycle (including explicit process.exit calls).", + "key_constructs": [ + { + "name": "main", + "type": "function", + "purpose": "Primary async entry point that coordinates CLI parsing, setup, source expansion, task execution and final aggregation/exit.", + "reasoning": "All major program flow lives inside main; editing should preserve its lifecycle semantics (setup/tmp dirs, per-task parallelism, execution ordering, logging and exit codes)." + }, + { + "name": "logger", + "type": "constant", + "purpose": "Pino logger instance configured for human-readable or JSON output depending on LOG_JSON env var.", + "reasoning": "Logging format and level are controlled here; tests/consumers may rely on structured keys in logs. Changing logger shape impacts downstream log parsing, CI behavior, and piping to tools like jq." + }, + { + "name": "execAsync", + "type": "constant", + "purpose": "Promisified child_process.exec used for running before_run setup commands.", + "reasoning": "execAsync is used synchronously for setup commands; its error and stdout/stderr behavior affects setup failure handling and must be preserved." + }, + { + "name": "TaskResult", + "type": "constant", + "purpose": "Type alias describing per-task summary returned by the runner (index, status, command, duration, validationResults).", + "reasoning": "TaskResult is the canonical shape used to assemble results and compute the summary; other modules and any consumers may parse this shape." + }, + { + "name": "__dirname", + "type": "constant", + "purpose": "ESM-compatible emulation of CommonJS __dirname derived via fileURLToPath(import.meta.url).", + "reasoning": "Used to resolve paths and pass into parseCliArgs. Keep as-is so CLI arg parsing receives expected working dir context." + } + ], + "semantic_tags": [ + "cli", + "benchmarks", + "task-orchestration", + "concurrency", + "logging", + "validation" + ], + "handles_entities": [ + "Task", + "TaskResult", + "ValidationResult", + "TaskExecutionResult", + "context rows", + "temporary directories", + "debug directory" + ], + "key_behaviors": [ + "parses CLI arguments and loads task.yml", + "creates debug and per-task temporary directories", + "runs before_run setup commands in a temp dir", + "expands sources into a cross product of contexts", + "executes run commands sequentially per task and in parallel between tasks", + "aggregates and logs validation results and exits with appropriate code" + ], + "pitfalls": [ + { + "mistake": "Removing or altering the top-level process.stdout EPIPE handler.", + "consequence": "Piping output through head/jq could produce unhandled EPIPE errors and crash the process instead of exiting cleanly.", + "prevention": "Keep the EPIPE handler logic intact or replace with equivalent handling that gracefully exits on 'EPIPE'." + }, + { + "mistake": "Changing exit paths (process.exit calls) without preserving status semantics.", + "consequence": "External tooling and CI rely on specific exit codes (non-zero when tasks failed); changing when/what code is used may hide failures or incorrectly fail CI.", + "prevention": "Respect existing exit behavior: exit(1) on parse/setup errors and on any task errors (failCount > 0); exit(0) otherwise. Ensure main().catch still logs and exits 1 on unexpected errors." + }, + { + "mistake": "Altering temporary directory handling and the expectation that createTempDir returns an object with a .name property.", + "consequence": "Subsequent filesystem operations (mkdir, write logs, pass workdir to executeTask) will break if the shape or cleanup contract of createTempDir changes.", + "prevention": "When editing, preserve usage of taskTmpDir.name and setupTmpDir.name, and verify createTempDir implementation matches this contract." + }, + { + "mistake": "Assuming CSV, value, or cmd sources behave equally (cmd source is explicitly unimplemented).", + "consequence": "Introducing a 'cmd' source without adjusting the parsing and validation logic will lead to unhandled cases / unexpected exits.", + "prevention": "Respect the current supported source types (csv and value) unless intentionally implementing 'cmd' with corresponding tests and CLI behavior changes." + }, + { + "mistake": "Modifying the logging format keys or removing fields like task_id, command_id.", + "consequence": "Downstream parsers, log aggregators, and human-readable summaries expect these fields; changing them will break tooling and observability.", + "prevention": "Preserve key log fields and structure, or update all consumers and tests at once." + } + ], + "reading_guide": { + "start_here": "main", + "key_sections": [ + "parseCliArgs: CLI parsing and validation early-exit behavior", + "setup 'before_run' loop: executes setup commands in createTempDir", + "sources loading: CSV and value handling, path resolution relative to evalDir", + "taskPromises map: per-row temp directory creation, command interpolation, command execution loop (executeTask) and early-exit logic", + "final aggregation and exit logic: computes counts and decides process.exit status" + ], + "skip_unless_needed": [ + "logger configuration block (unless adjusting log output)", + "shebang and EPIPE handler (only relevant for runtime behavior)" + ] + }, + "tests": { + "exercised_by": [], + "test_functions": [], + "example_command": "npm run eval # (project-level scripts reference benchmarks; environment-specific - see repository package.json)", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "benchmarks/model.js", + "relationship": "Defines Task and TaskStatus types used for parsing task.yml and shaping TaskResult statuses.", + "likely_co_change": true, + "reason_to_check": "If Task shape or TaskStatus enum changes, this file must be updated to handle new fields or statuses and tests will need adjustment." + }, + { + "path": "benchmarks/command-generator.js", + "relationship": "generateCommand is used to interpolate context values into command templates before execution.", + "likely_co_change": true, + "reason_to_check": "Changes to templating or context shape (e.g., new context property names) must be synchronized so produced commands remain correct." + }, + { + "path": "benchmarks/parse.js", + "relationship": "parseCliArgs builds the args consumed by main (evalName, evalDir, taskFile).", + "likely_co_change": true, + "reason_to_check": "CLI argument contract changes require corresponding updates here and in any integration or documentation referencing CLI flags." + }, + { + "path": "benchmarks/task-executor.js", + "relationship": "executeTask performs the actual command invocation per task and returns TaskExecutionResult used to decide early exit, timeout, and collect output.", + "likely_co_change": true, + "reason_to_check": "Behavioral changes to executeTask (return shape, timeout flags, append semantics) will directly impact orchestration and validation logic in this file." + }, + { + "path": "benchmarks/verification.js", + "relationship": "processValidations consumes combined output and verifies it against task validation rules to produce ValidationResult(s) and a validation status.", + "likely_co_change": true, + "reason_to_check": "Validation contract changes (shapes, status names like 'passed') will affect result mapping to TaskStatus and summary counts here." + }, + { + "path": "benchmarks/utils.js", + "relationship": "Utilities such as createTempDir and parseCsvAsync are used to prepare per-task directories and parse CSV sources.", + "likely_co_change": true, + "reason_to_check": "createTempDir API or parseCsvAsync behavior changes (return types, exception behavior) require updates in this orchestration code." + } + ], + "edit_checklist": { + "tests_to_run": [ + "node-based tests (if present): check package.json scripts (e.g., npm run eval or npm test)", + "When editing, run any repository-wide Node/TS benchmarks scripts referenced in README (e.g., npm run eval, npm run test:bounty) to validate runtime behavior" + ], + "data_constants_to_check": [ + "task.yml schema and any Task field names in benchmarks/model.js", + "env variables: LOG_JSON and LOG_LEVEL affect logger behavior" + ], + "owns_authoritative_data": false, + "public_api_surface": false + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "crates/forge_repo/src/provider/anthropic.rs": { + "file_path": "crates/forge_repo/src/provider/anthropic.rs", + "short_description": "Anthropic provider client and ChatRepository implementation with streaming SSE support and model discovery.", + "category": "SOURCE_CODE", + "description": "This file provides an Anthropic provider client and a ChatRepository implementation that the rest of the system uses to call Anthropic-style LLM endpoints. It exists to (1) build and sign requests appropriate for Anthropic (and Anthropic-compatible endpoints such as Vertex AI and other proxies), (2) send chat requests and stream responses back as ChatCompletionMessage items, and (3) fetch model lists from configured model endpoints or use hardcoded models from provider configuration. The file centralizes provider-specific behaviors (headers, beta flags, OAuth vs API key handling, Vertex AI differences), and it maps low-level transport and parse errors into the system's retry/error semantics.\n\nKey design decisions visible in this file: an Anthropic struct parameterized over an HttpInfra implementation is used to abstract HTTP interactions for production and tests; a transform pipeline built from DTO transformers (AuthSystemMessage, CapitalizeToolNames, DropInvalidToolUse, SanitizeToolIds, EnforceStrictObjectSchema or RemoveOutputFormat, SetCache) is applied to requests before serialization; two streaming approaches are supported \u2014 a standard reqwest-eventsource path and a raw SSE parsing path (chat_raw_sse) for providers that return non-standard content-types; and error contexts are enriched with format_http_context to produce helpful messages. The file also implements ChatRepository for AnthropicResponseRepository so this provider can be used polymorphically by higher-level code that expects a ChatRepository.\n\nThe test module inside the same file contains unit tests and small integration-style tests that exercise model fetching and other behaviors. Tests use a MockHttpClient (implementing HttpInfra minimally) and a MockServer helper (from the provider test utilities) to simulate upstream responses. Several non-obvious behaviors are covered by tests and by runtime code paths: the selection between x-api-key and Authorization bearer header, the special-case handling for Vertex AI (model in URL path, different anthropic version, no structured output), and the fallback to raw SSE parsing for providers that need it (should_use_raw_sse). The file is intentionally defensive in its error handling and contextual error messages because upstream provider responses and content-types are diverse and sometimes non-standard.", + "key_constructs": [ + { + "name": "Anthropic", + "type": "struct", + "purpose": "Encapsulates an Anthropic provider client with an HttpInfra for making requests, provider metadata, API version string, and OAuth usage flag.", + "reasoning": "Central abstraction so callers can create a client bound to a provider configuration and call chat or models methods without managing headers, URL variations, or streaming semantics themselves. Any edits that change the constructor or fields will affect header construction, URL behavior, and streaming decisions." + }, + { + "name": "new", + "type": "function", + "purpose": "Constructor for Anthropic instances (impl Anthropic).", + "reasoning": "Creates a client from an HttpInfra, Provider config, version string, and oauth flag \u2014 ensure caller-supplied provider and flags correspond to the expected behavior (Vertex vs normal Anthropic, oauth usage)." + }, + { + "name": "get_headers", + "type": "function", + "purpose": "Builds the HTTP headers required for Anthropic requests, including anthropic-version, authorization/x-api-key, and anthropic-beta flags.", + "reasoning": "Encodes auth and feature-flag behavior. The selection between x-api-key and Authorization: Bearer depends on provider.id and use_oauth; beta flags differ for Vertex AI and OAuth vs API key. Changing header strings or logic will change what upstream accepts and must be handled carefully." + }, + { + "name": "should_use_raw_sse", + "type": "function", + "purpose": "Determines whether to bypass reqwest-eventsource content-type validation and parse SSE from raw bytes for certain providers.", + "reasoning": "Used to select the raw SSE parsing codepath (chat_raw_sse) versus the eventsource/http_eventsource path; this conditional is tied to provider id (OPENCODE_ZEN). Modifying it changes which providers get the fallback parser." + }, + { + "name": "chat", + "type": "function", + "purpose": "Sends a chat request to the Anthropic endpoint, applies request transformers, serializes request JSON, chooses streaming path, and returns a ResultStream of ChatCompletionMessage.", + "reasoning": "High level method used by callers to stream completions. The method transforms context into a Request (including ReasoningTransform), sets model or Vertex-specific URL behavior, serializes, and either uses http_eventsource or chat_raw_sse. Any edits changing serialization, transforms, or error handling affect streaming behavior across the system." + }, + { + "name": "chat_raw_sse", + "type": "function", + "purpose": "Consumes an HTTP response body as raw bytes, parses SSE events manually via eventsource_stream, converts each event JSON into EventData and then ChatCompletionMessage, and streams the results.", + "reasoning": "Fallback parsing for upstreams returning non-standard SSE content types. This function uses bytes_stream().eventsource() and maps parsing errors into retryable or non-retryable errors via into_sse_parse_error. Changing parsing semantics, filtering of '[DONE]' events, or error mapping changes runtime retry behavior." + }, + { + "name": "models", + "type": "function", + "purpose": "Fetches model metadata from a provider-configured URL or returns hardcoded models from provider config.", + "reasoning": "Supports model discovery for Anthropic providers. It performs an http_get, checks status, decodes JSON into ListModelResponse (and maps to Model), and treats non-success statuses as errors. Tests exercise this path extensively, so behavior and error contexts are important." + }, + { + "name": "into_sse_parse_error", + "type": "function", + "purpose": "Converts eventsource_stream parse errors into anyhow::Error and wraps Transport errors as forge_domain::Error::Retryable.", + "reasoning": "Encapsulates retry semantics for SSE parse/transport errors. This function distinguishes retryable transport errors from parse errors so retries are applied correctly upstream; modifying it will change retry behavior for streaming failures." + }, + { + "name": "AnthropicResponseRepository", + "type": "struct", + "purpose": "Repository wrapper that implements ChatRepository for Anthropic provider responses using an infra instance.", + "reasoning": "This is the adapter that higher-level code binds to when it needs a ChatRepository backed by Anthropic. It uses infra.get_config() to obtain retry settings and create_client to generate an Anthropic client per provider." + }, + { + "name": "create_client", + "type": "function", + "purpose": "Validates provider credentials and constructs an Anthropic client with correct version and OAuth flag.", + "reasoning": "Per-provider validation and version selection (Vertex AI vs normal) happen here; callers rely on this to ensure credentials exist before making requests. Changes will affect which providers are allowed and what API version is requested." + } + ], + "semantic_tags": [ + "provider", + "anthropic", + "sse", + "streaming", + "http", + "auth" + ], + "handles_entities": [ + "ChatCompletionMessage", + "Model", + "Provider", + "Context", + "EventData" + ], + "key_behaviors": [ + "streams chat completion messages from Anthropic endpoints", + "parses raw SSE events for non-standard content types", + "fetches model lists from provider URLs or uses hardcoded models", + "builds provider-specific headers (beta flags, API key vs bearer)" + ], + "pitfalls": [ + { + "mistake": "Removing or changing the raw-SSE fallback (should_use_raw_sse / chat_raw_sse) without coordinating providers that need it", + "consequence": "Providers or proxies that return non-standard SSE content-types will fail to stream, causing runtime errors and dropped functionality for those providers", + "prevention": "Respect the conditional raw SSE decision and tests that cover streaming behavior for non-standard responses" + }, + { + "mistake": "Altering header selection logic (x-api-key vs Authorization Bearer) or anthropic-beta flags arbitrarily", + "consequence": "Upstream provider authentication or feature flags may stop working; Vertex AI and OAuth flows depend on specific headers", + "prevention": "Keep provider.id checks and use_oauth semantics intact and validate behavior against tests and a live provider when necessary" + }, + { + "mistake": "Changing error mapping in into_sse_parse_error (Transport -> retryable) without considering retry semantics", + "consequence": "Retryable transport errors might be treated as terminal errors or non-retryable parse errors might get retried incorrectly, altering the resilience of streaming", + "prevention": "Preserve the distinction between transport vs parse errors when changing this function; run retry-related tests" + }, + { + "mistake": "Modifying request transformation pipeline ordering or removing transformers (e.g., SanitizeToolIds, EnforceStrictObjectSchema)", + "consequence": "Upstream endpoints (especially strict ones like Vertex) could reject requests or behave unexpectedly due to invalid tool IDs or output schemas", + "prevention": "Keep the pipeline transforms consistent with provider expectations; test with both Vertex and standard Anthropic provider configurations" + }, + { + "mistake": "Assuming http_eventsource is always used and removing code that handles response.status() checking in chat_raw_sse", + "consequence": "Non-success responses may not be detected correctly when using raw SSE path, leading to unclear failures or panics when reading body", + "prevention": "Retain status checks and text-body extraction for error contexts in the raw SSE path" + } + ], + "reading_guide": { + "start_here": "Anthropic::chat", + "key_sections": [ + "Anthropic struct and new: understand fields (http infra, provider, anthropic_version, use_oauth)", + "get_headers: auth header selection and beta flags are built here", + "chat: main flow for request transformation, serialization, and choosing streaming path", + "chat_raw_sse: raw parsing fallback and error mapping details", + "models: model discovery via URL or hardcoded configuration", + "AnthropicResponseRepository impl: how the ChatRepository trait is implemented and how retry config is pulled from infra" + ], + "skip_unless_needed": [ + "the unit test module at the bottom when making non-test code changes", + "detailed imports at the top aside from types referenced above" + ] + }, + "tests": { + "exercised_by": [ + "crates/forge_repo/src/provider/anthropic.rs (module tests, uses MockServer and MockHttpClient)" + ], + "test_functions": [ + "test_url_for_models" + ], + "example_command": "cargo test -p forge_repo --lib --tests", + "relevant_snippets": [ + { + "file": "crates/forge_repo/src/provider/anthropic.rs", + "lines": "1-200", + "description": "The test module at the end of the file defines MockHttpClient and tests for models endpoint behavior (model URL construction and mock server responses)." + } + ] + }, + "related_files": [ + { + "path": "crates/forge_repo/src/provider/utils.rs", + "relationship": "helper functions used to build headers and format HTTP contexts (create_headers, format_http_context) used throughout this file", + "likely_co_change": true, + "reason_to_check": "Any edits to header construction, error context strings, or format_http_context must be aligned with this file's usage" + }, + { + "path": "crates/forge_repo/src/provider/retry.rs", + "relationship": "contains into_retry mapping used to wrap errors with retry semantics before returning to callers", + "likely_co_change": true, + "reason_to_check": "Changes to error types or retry logic should remain consistent between into_sse_parse_error and into_retry" + }, + { + "path": "crates/forge_app/src/dto/anthropic.rs", + "relationship": "contains DTOs and transformers (AuthSystemMessage, EnforceStrictObjectSchema, EventData, Request) used to build and parse Anthropic requests and responses", + "likely_co_change": true, + "reason_to_check": "Modifications to DTO shapes or transforms will directly affect request serialization and response parsing in this file" + }, + { + "path": "crates/forge_app/src/http_infra.rs", + "relationship": "defines the HttpInfra trait that Anthropic is parameterized over (http_get, http_post, http_eventsource)", + "likely_co_change": true, + "reason_to_check": "Altering HttpInfra signatures or behavior requires updating MockHttpClient and production implementations used here" + } + ], + "edit_checklist": { + "tests_to_run": [ + "cargo test -p forge_repo --lib --tests", + "cargo test --workspace (if changes affect crates across workspace)" + ], + "data_constants_to_check": [ + "forge_config retry settings (infra.get_config()?.retry) used to wrap errors with into_retry", + "anthropic-beta header strings and anthropic-version constant strings" + ], + "owns_authoritative_data": false, + "public_api_surface": true + }, + "insights": [ + { + "type": "feature", + "category": "Parsing", + "title": "Allow raw SSE parsing for providers requiring relaxed content-type validation and add eventsource_stream usage", + "problem": "Some providers/proxies require raw SSE parsing or send data with non-standard content types; previously strict validation prevented processing.", + "root_cause": "eventsource_stream or reqwest headers validation can reject some legitimate provider streams.", + "solution": "Added eventsource_stream usage conditional (should_use_raw_sse) and adjusted imports. Several provider repo changes for streaming behavior and transport error handling were added across commits (eventsource error handling being marked retryable in other commits).", + "lesson_learned": "Be tolerant when integrating with diverse provider proxies: offer a raw parsing fallback and careful error/retry semantics for transport vs parse errors.", + "commits": [ + "40cfcc8", + "7d63501" + ], + "constructs": [ + "Anthropic::should_use_raw_sse", + "eventsource_stream usage" + ] + }, + { + "type": "refactoring", + "category": "API", + "title": "Integrate SanitizeToolIds into Anthropic request pipeline", + "problem": "Anthropic provider requests lacked sanitization step, causing rejections by strict backends (Vertex).", + "root_cause": "Pipeline omitted a sanitization transformer for tool IDs.", + "solution": "Pipe SanitizeToolIds into the existing request transformation pipeline (after DropInvalidToolUse), ensuring tool IDs are safe for Anthropic endpoints.", + "commits": [ + "23096da" + ], + "constructs": [ + "pipeline.pipe(SanitizeToolIds)" + ] + } + ], + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "benchmarks/command-generator.ts": { + "file_path": "benchmarks/command-generator.ts", + "short_description": "Utility functions to parse CSV-like input, produce cross-product contexts, and render Handlebars command templates.", + "category": "SOURCE_CODE", + "description": "This file exists to support benchmark/test workflows that generate shell/command strings from tabular data and templates. It provides pure, deterministic helpers: a CSV string parser that produces arrays of Record rows, a cross-product combinator for multiple such row-sets, and Handlebars-based template rendering to produce final command strings. The utilities are small, focused, and intended for composing many permutations of inputs for automated benchmarking or test harnesses in the repository's benchmarks collection.\n\nDesign-wise, the functions are intentionally simple and functional: loadCsvData converts a newline-separated CSV string into an array of key/value objects using the first line as headers; createCrossProduct computes Cartesian products of multiple sets of those row objects by shallow-merging row dictionaries; generateCommand compiles and renders a Handlebars template with strict mode enabled to produce a concrete command string; generateCommands (internal) maps the cross product to rendered commands; getContextsFromSources is an exported helper that returns the cross-product contexts for consumption elsewhere. The file avoids side effects and file I/O, so callers are expected to load CSV content and pass it in as strings/arrays.\n\nWhen making edits, respect the behavioral contracts that callers likely depend on: header-first parsing, trimming of values/headers, naive comma splitting (no CSV quoting/escaping support), strict Handlebars compilation (which throws on missing properties), and the cross-product semantics where an empty input set yields an empty result. This file sits in the benchmarks area (Node/TypeScript utilities) and will be type-checked/transpiled with the repository's Node toolchain; run the relevant TypeScript checks and local benchmark/test scripts after edits.", + "key_constructs": [ + { + "name": "loadCsvData", + "type": "function", + "purpose": "Parse a CSV content string into an array of Record using the first line as headers.", + "reasoning": "This helper is the single place CSV-like input is tokenized into row objects; maintain its contract (first-line headers, trimming, skipping empty lines) because downstream code and tests will expect header keys and trimmed values. Edits must preserve behavior around empty input, missing first line, and unmatched value counts (missing values become empty strings)." + }, + { + "name": "createCrossProduct", + "type": "function", + "purpose": "Compute the Cartesian product of multiple arrays of Record, merging records shallowly.", + "reasoning": "This function defines how multiple parameter sources are combined to produce contexts. It returns [] when sourcesData is empty and produces an empty result if any source array is empty. The shallow merge of objects implies later sources override earlier keys on conflict; preserve this merge order semantics if editing." + }, + { + "name": "generateCommand", + "type": "function", + "purpose": "Compile and render a Handlebars template (strict mode) with a single Record context to produce a command string.", + "reasoning": "Strict mode in Handlebars means missing properties referenced in the template will throw at render time. Consumers rely on this behavior to catch misconfigured contexts; do not silently relax this unless tests and callers are updated accordingly." + }, + { + "name": "generateCommands", + "type": "function", + "purpose": "Internal helper: produce all rendered command strings by taking the cross-product of given sources and rendering each context.", + "reasoning": "This helper combines createCrossProduct and generateCommand. It is not exported, so changes to its signature or behavior will only affect local usage; still preserve the mapping order (deterministic output order) so downstream benchmarks that compare outputs remain stable." + }, + { + "name": "getContextsFromSources", + "type": "function", + "purpose": "Exported pure function exposing the cross-product contexts for external consumers.", + "reasoning": "This is the single exported helper intended for use outside the file to get contexts. It directly delegates to createCrossProduct; callers may expect stable ordering and structure (array of objects)." + } + ], + "semantic_tags": [ + "templating", + "csv-parsing", + "cartesian-product", + "benchmarks", + "handlebars" + ], + "handles_entities": [ + "Record", + "CSV row", + "command string", + "template context" + ], + "key_behaviors": [ + "parses CSV content into header-keyed row objects", + "produces Cartesian cross-product of multiple row-sets", + "renders Handlebars templates in strict mode to create commands" + ], + "pitfalls": [ + { + "mistake": "Assuming the CSV parser supports quoted fields or commas inside fields.", + "consequence": "Commas inside quoted CSV fields will be split incorrectly, producing malformed rows and wrong keys/values in contexts.", + "prevention": "Recognize the parser is a simple split-on-comma implementation; if handling quoted CSV is required consider preprocessing externally or ensure input CSVs contain no embedded commas." + }, + { + "mistake": "Changing Handlebars.compile options (e.g., removing strict: true) without updating callers/tests.", + "consequence": "Templates that reference missing keys will behave differently (no throw), potentially hiding configuration errors and changing benchmark outputs.", + "prevention": "Respect that strict mode is deliberate: keep it or update all dependent code and tests that rely on strict failure behavior." + }, + { + "mistake": "Altering createCrossProduct merge order or initial accumulator behavior.", + "consequence": "The deterministic ordering and conflict resolution (later sources override earlier keys) will change, which can break tests or expected command generation sequences.", + "prevention": "Preserve shallow-merge semantics and iteration order when editing; be mindful that an empty array in sourcesData yields an empty overall result." + }, + { + "mistake": "Treating an empty sourcesData array as producing one empty context.", + "consequence": "Logic in many callers assumes no contexts when sourcesData is empty; changing to a single empty context would generate unexpected commands.", + "prevention": "Keep the current behavior where an empty sourcesData returns an empty array." + } + ], + "reading_guide": { + "start_here": "getContextsFromSources", + "key_sections": [ + "loadCsvData: CSV parsing contract (headers, trimming, missing values)", + "createCrossProduct: how contexts are combined and merged (ordering matters)", + "generateCommand: Handlebars compile/render options (strict: true)" + ], + "skip_unless_needed": [ + "generateCommands: internal mapping (straightforward composition if you only need contexts)" + ] + }, + "tests": { + "exercised_by": [], + "test_functions": [], + "example_command": "npx tsc benchmarks/command-generator.ts --noEmit", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "benchmarks/package.json", + "relationship": "Likely houses scripts (e.g., npm run eval/test) and dependencies (handlebars) used to run or typecheck these utilities.", + "likely_co_change": true, + "reason_to_check": "If changing import style or Handlebars usage, update package.json (dependency versions, esm/cjs interop) and npm scripts used to run benchmarks." + }, + { + "path": "package.json", + "relationship": "Top-level repo scripts and dev tooling (tsc, eslint) influence how this file is built/checked.", + "likely_co_change": false, + "reason_to_check": "Ensure repository-level TypeScript configuration and build/test commands remain compatible with edits here." + } + ], + "edit_checklist": { + "tests_to_run": [ + "npx tsc --noEmit (to typecheck benchmarks/command-generator.ts and repository TS files)", + "Run benchmark/test scripts in the benchmarks folder if present (e.g., npm run eval, npm run test:bounty from project root or benchmarks dir)" + ], + "data_constants_to_check": [], + "owns_authoritative_data": false, + "public_api_surface": true + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "benchmarks/model.ts": { + "file_path": "benchmarks/model.ts", + "short_description": "TypeScript type definitions for benchmark tasks, validations, sources, and task status enums", + "category": "SOURCE_CODE", + "description": "This file declares the canonical TypeScript types used by the project's benchmarks tooling. It defines the shape of a Task (what steps run, pre-steps, timeouts, validations and data sources), the union type Validation (three distinct validation strategies), the Source union (CSV, command, or inline structured values), and the TaskStatus enum (runtime result states). The types exist so the benchmark runner and any tooling around it (CLI scripts, test harnesses, reporters) can share a single, authoritative contract for how benchmark tasks are described, validated, and reported.\n\nThese typings are a small but central piece of the Node/TypeScript benchmark/eval subsystem referenced in the repository overview. The file functions as the schema between static tool authors and runtime code that implements execution, serialization, and reporting. Decisions visible here \u2014 union-based Validation variants, permitting Task.run to be a string or an array, and using Record[] for inline source values \u2014 directly shape how task manifests are parsed and how results are interpreted by the rest of the benchmark tooling.\n\nBecause this file is pure type declarations and an enum, changes to it are purely API/contract changes: editors must respect backwards compatibility with any JSON/YAML task manifest readers, benchmark runners, and consumers of TaskStatus values. The file intentionally keeps runtime footprint zero (no functions), delegating behaviors to the runner/validators implemented elsewhere in the repo.", + "key_constructs": [ + { + "name": "Task", + "type": "constant", + "purpose": "Type describing a benchmark task: pre-run commands, main run command(s), optional execution controls, validations, and input sources.", + "reasoning": "Task is the primary data model used throughout benchmark tooling; its precise field names and types (e.g., run can be string|string[]) determine parsing, execution logic (parallelism, timeout, early_exit) and how validations are applied." + }, + { + "name": "Validation", + "type": "constant", + "purpose": "Discriminated union type describing the supported validation strategies: regex, shell, and llm evaluators.", + "reasoning": "The union enables pattern-matching on validation.type in runtime validators and enforces required fields per validation kind. Editors must preserve the discriminant strings (\"regex\", \"shell\", \"llm\") and their associated required properties." + }, + { + "name": "Source", + "type": "constant", + "purpose": "Union describing how input data for tasks can be provided: CSV file path, command-produced content, or an inline array of records.", + "reasoning": "Source shapes affect how the runner will fetch or construct input rows for parametrized benchmarks; the inline form uses Record[] which implies string-only cell values." + }, + { + "name": "TaskStatus", + "type": "constant", + "purpose": "Enum of possible result statuses emitted by a benchmark task (passed, validation_failed, timeout, failed).", + "reasoning": "TaskStatus strings are likely used in datastore, reports, and CI assertions. Any alteration of these enum values will require updates in reporting/consuming code." + } + ], + "semantic_tags": [ + "typings", + "benchmarks", + "validation", + "models", + "task-spec" + ], + "handles_entities": [ + "Task", + "Validation", + "Source", + "TaskStatus" + ], + "key_behaviors": [ + "defines the schema for benchmark tasks and their inputs", + "expresses validation strategy variants for task results", + "standardizes runtime status values for reporting" + ], + "pitfalls": [ + { + "mistake": "Renaming or changing the discriminant strings in Validation (\"regex\", \"shell\", \"llm\") or removing required fields", + "consequence": "Runtime validators that switch on validation.type will fail to match cases or will have missing fields at runtime, leading to runtime errors or skipped validations.", + "prevention": "When modifying Validation, update all consumer code (parsers and validators) and any serialized task manifests to use the new discriminants and fields." + }, + { + "mistake": "Altering Task.run from string|Array to another shape (e.g., always array) without updating callers", + "consequence": "Code that assumes a single string command will break, and serialized task definitions may no longer be parsed correctly.", + "prevention": "Verify all code paths that construct or consume Task.run (runners, CLI input, manifest loaders) before changing the type." + }, + { + "mistake": "Changing Source.value from Record[] to allow non-string cells (e.g., any)", + "consequence": "Consumers that expect string values (CSV-like semantics) may mis-handle numbers/objects and validation logic that relies on string operations may fail.", + "prevention": "Audit downstream code that reads Source.value and update parsing/serialization logic together with the type change." + }, + { + "mistake": "Modifying TaskStatus enum string values or order without touching stored data or report generators", + "consequence": "Stored results, CI checks, or report consumers that match exact strings will become inconsistent and could misclassify task outcomes.", + "prevention": "If TaskStatus strings must change, migrate persisted data and update all report/CI code paths in lockstep." + } + ], + "reading_guide": { + "start_here": "Task", + "key_sections": [ + "Task: main schema that describes what a benchmark run looks like and the options that control execution", + "Validation: discriminated union showing supported validators and their required fields", + "Source: union of input data sources and the implied content types" + ], + "skip_unless_needed": [ + "the export of the TaskStatus enum if you're not concerned with reporting or persisted result formats" + ] + }, + "tests": { + "exercised_by": [], + "test_functions": [], + "example_command": "npm run eval", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "benchmarks/run.ts", + "relationship": "Likely the benchmark runner that consumes Task and Validation types to execute tasks and evaluate validations.", + "likely_co_change": true, + "reason_to_check": "Changes to Task, Validation, Source, or TaskStatus will almost certainly require changes in the runner implementation and its parsing/serialization logic." + }, + { + "path": "package.json", + "relationship": "Contains npm scripts referenced in project docs (e.g., npm run eval, npm run test:bounty) which likely invoke the TypeScript benchmark tooling that uses these types.", + "likely_co_change": false, + "reason_to_check": "If types change and scripts or build steps depend on them, ensure build/test tasks still succeed." + }, + { + "path": "benchmarks/README.md", + "relationship": "Documentation for how to author task manifests and run the benchmarks; must stay consistent with these type definitions.", + "likely_co_change": true, + "reason_to_check": "Type changes should be reflected in user-facing documentation for writing task files or validation blocks." + } + ], + "edit_checklist": { + "tests_to_run": [ + "npm run lint (or yarn lint)", + "npm run build (TypeScript compile)", + "npm run eval (run sample benchmarks)" + ], + "data_constants_to_check": [], + "owns_authoritative_data": true, + "public_api_surface": true + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "benchmarks/parse.ts": { + "file_path": "benchmarks/parse.ts", + "short_description": "CLI argument parsing and path resolution for benchmark evaluations", + "category": "CLI", + "description": "This TypeScript module exists to parse command-line arguments for the benchmarks/evaluation runner and to resolve the evaluation directory and task file paths. It centralizes the logic that accepts either an evaluation directory name or a direct path to a task YAML file and returns a normalized set of values (evalName, evalDir, taskFile) that the rest of the benchmark tooling consumes. The file is intentionally small and focused: it uses yargs for robust CLI parsing and Node's path utilities for cross-platform path composition.\n\nThe module is used by benchmark runner code that needs a single place to interpret user input (positional eval-name) and produce a canonical path representation. The design separates parsing (yargs) from path resolution (path.join/isAbsolute/dirname), allowing callers to pass in a base dirname (the caller's context) so that relative eval names are resolved relative to a known directory. The function is async because it uses yargs.parseAsync, which can support asynchronous middleware or future async flags even though current logic is synchronous.\n\nImportant behaviors: it enforces that an eval-name positional argument is present, supports direct task file paths (if the provided name ends with \"task.yml\", \".yml\" or \".yaml\"), resolves relative file paths against the provided dirname, and otherwise treats the eval-name as a directory under dirname and expects a task.yml inside. The returned shape is the CliArgs type which documents the contract between this module and consumers.", + "key_constructs": [ + { + "name": "CliArgs", + "type": "constant", + "purpose": "TypeScript type that describes the normalized CLI output: evalName, evalDir, taskFile", + "reasoning": "Consumers rely on this exact shape when receiving parsed CLI values; editors must keep the type consistent with the returned object keys and callers expecting these fields." + }, + { + "name": "parseCliArgs", + "type": "function", + "purpose": "Parses argv using yargs and resolves evalDir and taskFile from an eval-name positional argument", + "reasoning": "This is the main entry point for benchmark CLI argument handling. Any edits must preserve how positional arguments map to either a directory+task.yml or a direct task file, and must respect path resolution semantics (relative vs absolute) based on the dirname parameter." + } + ], + "semantic_tags": [ + "cli", + "args-parsing", + "path-resolution", + "benchmarks", + "typescript" + ], + "handles_entities": [ + "evalName", + "evalDir", + "taskFile" + ], + "key_behaviors": [ + "parses a positional eval-name from CLI", + "validates presence of eval-name and throws if missing", + "resolves direct task file paths and directory-based evaluations", + "converts relative paths to absolute via provided dirname" + ], + "pitfalls": [ + { + "mistake": "Assuming parseCliArgs will resolve paths relative to process.cwd() rather than the dirname parameter", + "consequence": "Relative evalName inputs will be incorrectly resolved if the caller passes the wrong dirname, causing consumers to look up the wrong task file or directory", + "prevention": "When calling parseCliArgs, ensure the correct dirname (e.g., __dirname or process.cwd() as intended by the caller) is supplied; do not change how dirname is used without updating callers" + }, + { + "mistake": "Treating the endsWith checks (.yml/.yaml) as case-insensitive", + "consequence": "Files with uppercase extensions (e.g., TASK.YML) will not be detected as direct task files and will be treated as directories, potentially producing invalid taskFile paths", + "prevention": "Respect the current case-sensitive suffix checks when modifying logic; if you need case-insensitive behavior, check all callers and tests that might depend on current case sensitivity" + }, + { + "mistake": "Replacing path.join with path.resolve or changing path normalization semantics without considering Windows vs POSIX behavior", + "consequence": "Different path composition may change absolute/relative resolution and break callers that expect the current join/dirname logic", + "prevention": "If altering path utilities, verify behavior across platforms and update callers accordingly" + }, + { + "mistake": "Removing or changing the error thrown when eval-name is missing", + "consequence": "Callers may receive undefined or partial values and fail later in more confusing ways; command usage and help behavior would differ", + "prevention": "Preserve explicit validation (throwing on missing eval-name) or align the CLI help/usage behavior to ensure the user always gets clear feedback" + } + ], + "reading_guide": { + "start_here": "parseCliArgs", + "key_sections": [ + "yargs parsing: sets up usage, positional argument and help handling (how eval-name is named in argv)", + "direct path detection: the if branch that checks suffixes and uses path.isAbsolute to resolve taskFile", + "directory path handling: the else branch that composes evalDir and expects task.yml inside" + ], + "skip_unless_needed": [ + "import lines (yargs, hideBin, path) \u2014 standard dependencies", + "type declaration CliArgs (simple thin type)" + ] + }, + "tests": { + "exercised_by": [], + "test_functions": [], + "example_command": "", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "benchmarks/* (other benchmark runner scripts)", + "relationship": "These files are likely callers of parseCliArgs and form the remainder of the benchmark runner CLI flow", + "likely_co_change": true, + "reason_to_check": "If parseCliArgs semantics change (dirname handling, returned keys, suffix detection), callers in the benchmarks directory must be updated to match" + }, + { + "path": "package.json (scripts)", + "relationship": "May contain npm scripts that invoke benchmark runner; ensures CLI interface expectations match how scripts call the runner", + "likely_co_change": false, + "reason_to_check": "If command-line flags or positional arguments change, package.json scripts or documentation may need updates" + } + ], + "edit_checklist": { + "tests_to_run": [], + "data_constants_to_check": [], + "owns_authoritative_data": false, + "public_api_surface": false + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "benchmarks/task-executor.ts": { + "file_path": "benchmarks/task-executor.ts", + "short_description": "Runs a single benchmark task command, streams stdout/stderr to a log, enforces timeout and optional early-exit validations, and returns a structured result.", + "category": "SOURCE_CODE", + "description": "This TypeScript module is a focused executor for a single benchmark task command. It exists to run shell commands in a controlled way for the repository's benchmarking/verification pipeline: it spawns a child process (shell=true), streams stdout/stderr into a per-task log file, captures output in memory for later validation, enforces a configurable timeout, and supports an early-exit mechanism that kills the running process if specified validations pass while the process is still producing output.\n\nThe module returns a typed TaskExecutionResult that includes timing, the combined captured output, and flags describing timeout or early-exit. The design favors streaming logs to disk (with ANSI sequences stripped for logs) while preserving the original captured output in memory (ANSI sequences are preserved in the returned output). Validation logic is invoked incrementally as output is produced using runValidations and allValidationsPassed (imported from verification.js). The module handles child process close and error events, and attempts to avoid double-resolving by respecting local flags (timedOut and exitedEarly).", + "key_constructs": [ + { + "name": "TaskExecutionResult", + "type": "constant", + "purpose": "Type alias describing the structure returned by executeTask (index, command, duration, optional output/error, isTimeout, earlyExit).", + "reasoning": "This type is the authoritative shape consumers of executeTask will parse; editors must preserve the fields and types when changing return paths or error handling so callers remain compatible." + }, + { + "name": "executeTask", + "type": "function", + "purpose": "Executes a shell command for a Task, logs streaming output to a file, runs validations for early exit, enforces timeout, and returns a TaskExecutionResult.", + "reasoning": "This is the main entry point for running a benchmark task. Any edits to lifecycle management, log handling, resolution/rejection logic, timeout behavior, or signals must respect how the function composes promises, sets the timedOut/exitedEarly flags, and writes/ends the log stream." + } + ], + "semantic_tags": [ + "task-runner", + "process", + "logging", + "validation", + "timeout", + "typescript" + ], + "handles_entities": [ + "Task", + "TaskExecutionResult", + "LogFile", + "ChildProcess" + ], + "key_behaviors": [ + "executes shell commands and captures stdout/stderr", + "streams output to a log file while stripping ANSI sequences in the log", + "retains original captured output (including ANSI) in the returned result", + "runs incremental validations on output to enable early exit and kills the process when validations all pass", + "enforces per-task timeout and kills process with SIGKILL when exceeded", + "returns structured timing and status flags for downstream consumers" + ], + "pitfalls": [ + { + "mistake": "Modifying how or when the promise returned by the internal new Promise is resolved/rejected without preserving the timedOut/exitedEarly checks.", + "consequence": "Could lead to double resolutions, unhandled promise rejections, or returning inconsistent TaskExecutionResult flags (e.g., timedOut not reflecting the actual timeout path).", + "prevention": "Respect the use of timedOut and exitedEarly flags; ensure any new resolution/rejection paths check them and clear/stop timers and streams appropriately." + }, + { + "mistake": "Changing the signals used to kill child processes (SIGTERM for early exit, SIGKILL for timeout) or altering kill ordering.", + "consequence": "May change child process cleanup semantics and could leave children running or cause different cleanup behavior on different platforms.", + "prevention": "If signals must be changed, consider platform differences and preserve intended semantics (graceful termination on early exit, forcible kill on timeout)." + }, + { + "mistake": "Altering logStream lifecycle (writing after end, closing too early, or removing stripAnsi usage when writing logs).", + "consequence": "Could result in runtime errors (writes after stream end), corrupted logs, or logs containing ANSI control characters that were previously stripped, making logs harder to read or parse.", + "prevention": "Keep the logStream.write/ end ordering and the stripAnsi calls when writing to the log; ensure writes are guarded by logStream.writable checks as present." + } + ], + "reading_guide": { + "start_here": "executeTask", + "key_sections": [ + "timeout setup within the Promise: sets timedOut and resolves with captured output after SIGKILL", + "checkValidations: incremental validation logic that triggers early exit (SIGTERM) when validations pass", + "child.stdout/stderr handlers: streaming to log and accumulating stdout/stderr for validations", + "child.on('close') and child.on('error') handlers: final resolution/rejection and log finalization" + ], + "skip_unless_needed": [ + "imports at top (boilerplate), the exact formatting of the log header lines" + ] + }, + "tests": { + "exercised_by": [], + "test_functions": [], + "example_command": "npx tsc --noEmit && node -e \"require('./benchmarks/task-executor').executeTask && console.log('typecheck ok')\"", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "benchmarks/verification.js", + "relationship": "imports runValidations and allValidationsPassed; validation behavior and early-exit semantics depend on this module", + "likely_co_change": true, + "reason_to_check": "If validation result shape or runtime behavior changes, executeTask's early-exit behavior and calls to allValidationsPassed must be updated accordingly." + }, + { + "path": "benchmarks/model.js", + "relationship": "imports Task type used to describe task shape (timeout, early_exit, validations, etc.)", + "likely_co_change": true, + "reason_to_check": "If Task shape changes (renamed fields or types), executeTask parameter handling and field access must be adjusted." + }, + { + "path": "benchmarks/utils.js", + "relationship": "imports formatTimestamp used to write human-readable timestamps into logs", + "likely_co_change": false, + "reason_to_check": "Only necessary to inspect when log timestamp format changes or if utils exports are reorganized." + } + ], + "edit_checklist": { + "tests_to_run": [ + "npx tsc --noEmit (type-check the TypeScript)", + "If the repo has tests: npm test or yarn test that cover benchmarks (none detected in provided files)" + ], + "data_constants_to_check": [ + "task.timeout (interpreted as seconds) and signal semantics (SIGTERM vs SIGKILL)" + ], + "owns_authoritative_data": false, + "public_api_surface": false + }, + "generated_at_commit": "7e8a51dda37b40ab46312285fd57a684743beec2" + }, + "benchmarks/utils.ts": { + "file_path": "benchmarks/utils.ts", + "short_description": "Small TypeScript utility helpers used by benchmark scripts (timestamp formatting, regex escaping, temp dir, CSV parsing).", + "category": "SOURCE_CODE", + "description": "This file contains a minimal collection of utility functions used by benchmark scripts under the benchmarks/ directory. It exists to centralize small, commonly used helpers so benchmark scripts can format timestamps with local timezone info, escape arbitrary strings for use in regular expressions, create temporary directories asynchronously, and parse CSV content via a promise-based wrapper. The functions are thin wrappers over platform or third-party APIs: Date.prototype.toISOString + getTimezoneOffset for timestamp formatting, a single-regex replace for escaping, the tmp package for creating temp directories, and csv-parse for CSV parsing.\n\nThe design is intentionally small and pragmatic: callback-based APIs (tmp.dir and csv-parse's parse) are wrapped in Promises so benchmark code can use async/await. The file uses named exports so other modules can import just the functions they need. The type signatures are concise and assume particular usage patterns (e.g., parseCsvAsync returns Record[] and the options typed include columns and skip_empty_lines), which reflect how the benchmarks expect to consume CSVs and temporary directories. Because these utilities wrap platform/third-party behavior, callers must respect the semantics of those underlying APIs (tmp cleanup callbacks, csv-parse column handling, timezone offset quirks).", + "key_constructs": [ + { + "name": "formatTimestamp", + "type": "function", + "purpose": "Return an ISO-like timestamp string that includes the local timezone offset (e.g., 2023-01-01T12:00:00.000+02:00).", + "reasoning": "Used to create human- and machine-readable timestamps for logs or CSVs where local timezone information is required. It composes toISOString() with a computed timezone offset string using getTimezoneOffset(). Consumers must accept that the function removes the trailing 'Z' from toISOString() and appends the local offset." + }, + { + "name": "escapeRegex", + "type": "function", + "purpose": "Escape regex-special characters in an input string so it can be safely interpolated into a regular expression.", + "reasoning": "A small utility to prevent user-provided or generated strings from being interpreted as regex metacharacters. It relies on a single replace with a character class covering common metacharacters; callers should still consider double-escaping when constructing RegExp objects with flags or combining with other escaped fragments." + }, + { + "name": "createTempDir", + "type": "function", + "purpose": "Create a temporary directory using the 'tmp' package and return a Promise resolving to the path and cleanup callback.", + "reasoning": "Provides an async/await-friendly wrapper for tmp.dir. The returned removeCallback must be invoked by callers to clean up the directory; failure to call it will leave artifacts on disk. The function passes the provided prefix directly to tmp.dir's options." + }, + { + "name": "parseCsvAsync", + "type": "function", + "purpose": "Parse CSV text into records via csv-parse and return a Promise resolving to an array of records (typed as Record[]).", + "reasoning": "Wraps the callback-based csv-parse API in a Promise. The typed return shape assumes column-based parsing (object records), so callers should pass options compatible with returning column objects. Incorrect options (e.g., columns: false) will produce array results but are cast to Record[], which the type system will not enforce at runtime." + } + ], + "semantic_tags": [ + "utility", + "io", + "csv", + "tempfile", + "date" + ], + "handles_entities": [ + "TemporaryDirectory", + "CSVRecord", + "TimestampString" + ], + "key_behaviors": [ + "formats date with local timezone for logs or CSVs", + "escapes strings for safe insertion into regular expressions", + "creates a temporary directory and provides a cleanup callback", + "parses CSV content into an array of records asynchronously" + ], + "pitfalls": [ + { + "mistake": "Assuming parseCsvAsync always returns Record[] regardless of options", + "consequence": "If csv-parse is called with columns: false (or other options that return arrays), the runtime data will be arrays but the function casts to Record[], causing type mismatches or runtime errors in consumers.", + "prevention": "When editing callers or this function, ensure the options passed enforce columns-based (object) parsing, or change the function signature/handling if supporting both shapes." + }, + { + "mistake": "Not calling the removeCallback returned by createTempDir", + "consequence": "Temporary directories will persist on disk, possibly polluting CI runners or local machines and affecting subsequent test runs.", + "prevention": "Call the returned removeCallback in finally/cleanup blocks; treat the returned cleanup callback as owning the directory lifecycle." + }, + { + "mistake": "Altering formatTimestamp's timezone handling without understanding getTimezoneOffset semantics", + "consequence": "Incorrect timezone sign or offset formatting (e.g., inverted sign or two-digit issues) can produce incorrect timestamps and lead to mis-sorted logs or wrong CSV timestamps.", + "prevention": "Respect that getTimezoneOffset returns minutes to add to local time to get UTC and that the function intentionally negates it; any change must preserve the conversion semantics and zero-pad hours/minutes." + }, + { + "mistake": "Using escapeRegex result blindly for complex regex assembly without re-escaping", + "consequence": "Incorrect assembly may still introduce unintended regex behavior (e.g., when assembling a pattern that itself includes escapes or when used with RegExp constructor flags).", + "prevention": "When combining escaped fragments, ensure you understand whether additional escaping or quoting is required in the final RegExp context." + } + ], + "reading_guide": { + "start_here": "formatTimestamp", + "key_sections": [ + "formatTimestamp: computes local timezone offset and appends to toISOString()", + "createTempDir: promise wrapper around tmp.dir returning path and cleanup callback", + "parseCsvAsync: promise wrapper around csv-parse; assumes column objects", + "escapeRegex: single-line replacement to escape regex metacharacters" + ], + "skip_unless_needed": [ + "the implementation details of the regex in escapeRegex (one-liner) if only using the function", + "Promise-wrapping boilerplate if you are only changing consumers" + ] + }, + "tests": { + "exercised_by": [], + "test_functions": [], + "example_command": "npm run build (or tsc) to validate TypeScript compile; run project-specific benchmark tests if present (no unit tests provided for this file).", + "relevant_snippets": [] + }, + "related_files": [ + { + "path": "package.json", + "relationship": "Dependency and script declarations (checks that 'tmp' and 'csv-parse' are present and any benchmark scripts reference these utilities).", + "likely_co_change": true, + "reason_to_check": "If changing imports, types, or runtime behavior (e.g., switching csv parser or tmp options), ensure package.json has appropriate dependency versions and scripts for running benchmarks or build steps." + }, + { + "path": "benchmarks", + "relationship": "This file is intended to be used by other files in the benchmarks directory (benchmark scripts, CSV generators, or loggers).", + "likely_co_change": true, + "reason_to_check": "When adjusting APIs or return types, check benchmark scripts that import these utilities to ensure call sites align (especially CSV options and temp-dir cleanup)." + } + ], + "edit_checklist": { + "tests_to_run": [ + "npm run build (or tsc) to confirm TypeScript compiles", + "Run any benchmark-specific scripts under benchmarks/ that import these functions, e.g. node ./benchmarks/