diff --git a/bin/gstack-telemetry-sync b/bin/gstack-telemetry-sync index be767c23e..93cf2707a 100755 --- a/bin/gstack-telemetry-sync +++ b/bin/gstack-telemetry-sync @@ -122,6 +122,11 @@ case "$HTTP_CODE" in # Advance by SENT count (not inserted count) because we can't map inserted back to # source lines. If inserted==0, something is systemically wrong — don't advance. INSERTED="$(grep -o '"inserted":[0-9]*' "$RESP_FILE" 2>/dev/null | grep -o '[0-9]*' || echo "0")" + # Check for upsert errors (installation tracking failures) — log but don't block cursor advance + UPSERT_ERRORS="$(grep -o '"upsertErrors"' "$RESP_FILE" 2>/dev/null || true)" + if [ -n "$UPSERT_ERRORS" ]; then + echo "[gstack-telemetry-sync] Warning: installation upsert errors in response" >&2 + fi if [ "${INSERTED:-0}" -gt 0 ] 2>/dev/null; then NEW_CURSOR=$(( CURSOR + COUNT )) echo "$NEW_CURSOR" > "$CURSOR_FILE" 2>/dev/null || true diff --git a/browse/src/browser-manager.ts b/browse/src/browser-manager.ts index f4ade9e1e..1d6219e34 100644 --- a/browse/src/browser-manager.ts +++ b/browse/src/browser-manager.ts @@ -694,7 +694,14 @@ export class BrowserManager { this.wirePageEvents(page); if (saved.url) { - await page.goto(saved.url, { waitUntil: 'domcontentloaded', timeout: 15000 }).catch(() => {}); + // Validate URL before navigation — state files loaded from disk could + // contain file://, chrome://, or cloud metadata URLs + try { + await validateNavigationUrl(saved.url); + await page.goto(saved.url, { waitUntil: 'domcontentloaded', timeout: 15000 }).catch(() => {}); + } catch { + // Blocked URL — skip navigation for this page + } } if (saved.storage) { diff --git a/browse/src/meta-commands.ts b/browse/src/meta-commands.ts index e2060c214..ef162b9a6 100644 --- a/browse/src/meta-commands.ts +++ b/browse/src/meta-commands.ts @@ -15,11 +15,31 @@ import { resolveConfig } from './config'; import type { Frame } from 'playwright'; // Security: Path validation to prevent path traversal attacks -const SAFE_DIRECTORIES = [TEMP_DIR, process.cwd()]; +// Resolve safe directories through realpathSync to handle symlinks (e.g., macOS /tmp → /private/tmp) +const SAFE_DIRECTORIES = [TEMP_DIR, process.cwd()].map(d => { + try { return fs.realpathSync(d); } catch { return d; } +}); export function validateOutputPath(filePath: string): void { const resolved = path.resolve(filePath); - const isSafe = SAFE_DIRECTORIES.some(dir => isPathWithin(resolved, dir)); + // Resolve symlinks to prevent symlink-based escapes (e.g., /tmp/link → /etc/crontab) + let realPath: string; + try { + realPath = fs.realpathSync(resolved); + } catch (err: any) { + if (err.code === 'ENOENT') { + // File doesn't exist yet (output path) — resolve directory part for symlinks + try { + const dir = fs.realpathSync(path.dirname(resolved)); + realPath = path.join(dir, path.basename(resolved)); + } catch { + realPath = resolved; + } + } else { + throw new Error(`Cannot resolve real path: ${filePath} (${err.code})`); + } + } + const isSafe = SAFE_DIRECTORIES.some(dir => isPathWithin(realPath, dir)); if (!isSafe) { throw new Error(`Path must be within: ${SAFE_DIRECTORIES.join(', ')}`); } diff --git a/browse/src/read-commands.ts b/browse/src/read-commands.ts index 83c791a3d..03b327af5 100644 --- a/browse/src/read-commands.ts +++ b/browse/src/read-commands.ts @@ -13,6 +13,10 @@ import * as path from 'path'; import { TEMP_DIR, isPathWithin } from './platform'; import { inspectElement, formatInspectorResult, getModificationHistory } from './cdp-inspector'; +// Redaction patterns for sensitive cookie/storage values — exported for test coverage +export const SENSITIVE_COOKIE_NAME = /(^|[_.-])(token|secret|key|password|credential|auth|jwt|session|csrf|sid)($|[_.-])|api.?key/i; +export const SENSITIVE_COOKIE_VALUE = /^(eyJ|sk-|sk_live_|sk_test_|pk_live_|pk_test_|rk_live_|sk-ant-|ghp_|gho_|github_pat_|xox[bpsa]-|AKIA[A-Z0-9]{16}|AIza|SG\.|Bearer\s|sbp_)/; + /** Detect await keyword, ignoring comments. Accepted risk: await in string literals triggers wrapping (harmless). */ function hasAwait(code: string): boolean { const stripped = code.replace(/\/\/.*$/gm, '').replace(/\/\*[\s\S]*?\*\//g, ''); @@ -300,7 +304,14 @@ export async function handleReadCommand( case 'cookies': { const cookies = await page.context().cookies(); - return JSON.stringify(cookies, null, 2); + // Redact cookie values that look like secrets (consistent with storage redaction) + const redacted = cookies.map(c => { + if (SENSITIVE_COOKIE_NAME.test(c.name) || SENSITIVE_COOKIE_VALUE.test(c.value)) { + return { ...c, value: `[REDACTED — ${c.value.length} chars]` }; + } + return c; + }); + return JSON.stringify(redacted, null, 2); } case 'storage': { diff --git a/browse/src/server.ts b/browse/src/server.ts index 110b9d3ea..4039b053d 100644 --- a/browse/src/server.ts +++ b/browse/src/server.ts @@ -519,11 +519,19 @@ function spawnClaude(userMessage: string, extensionUrl?: string | null, forTabId // Agent status transitions happen when we receive agent_done/agent_error events. } -function killAgent(): void { +function killAgent(targetTabId?: number | null): void { if (agentProcess) { try { agentProcess.kill('SIGTERM'); } catch {} setTimeout(() => { try { agentProcess?.kill('SIGKILL'); } catch {} }, 3000); } + // Signal the sidebar-agent worker to cancel via a per-tab cancel file. + // Using per-tab files prevents race conditions where one agent's cancel + // signal is consumed by a different tab's agent in concurrent mode. + // When targetTabId is provided, only that tab's agent is cancelled. + const cancelDir = path.join(process.env.HOME || '/tmp', '.gstack'); + const tabId = targetTabId ?? agentTabId ?? 0; + const cancelFile = path.join(cancelDir, `sidebar-agent-cancel-${tabId}`); + try { fs.writeFileSync(cancelFile, Date.now().toString()); } catch {} agentProcess = null; agentStartTime = null; currentMessage = null; @@ -1198,7 +1206,8 @@ async function start() { if (!validateAuth(req)) { return new Response(JSON.stringify({ error: 'Unauthorized' }), { status: 401, headers: { 'Content-Type': 'application/json' } }); } - killAgent(); + const killBody = await req.json().catch(() => ({})); + killAgent(killBody.tabId ?? null); addChatEntry({ ts: new Date().toISOString(), role: 'agent', type: 'agent_error', error: 'Killed by user' }); // Process next in queue if (messageQueue.length > 0) { @@ -1213,7 +1222,8 @@ async function start() { if (!validateAuth(req)) { return new Response(JSON.stringify({ error: 'Unauthorized' }), { status: 401, headers: { 'Content-Type': 'application/json' } }); } - killAgent(); + const stopBody = await req.json().catch(() => ({})); + killAgent(stopBody.tabId ?? null); addChatEntry({ ts: new Date().toISOString(), role: 'agent', type: 'agent_error', error: 'Stopped by user' }); return new Response(JSON.stringify({ ok: true, queuedMessages: messageQueue.length }), { status: 200, headers: { 'Content-Type': 'application/json' }, diff --git a/browse/src/sidebar-agent.ts b/browse/src/sidebar-agent.ts index c2d314c5d..178818750 100644 --- a/browse/src/sidebar-agent.ts +++ b/browse/src/sidebar-agent.ts @@ -19,10 +19,16 @@ const SERVER_URL = `http://127.0.0.1:${SERVER_PORT}`; const POLL_MS = 200; // 200ms poll — keeps time-to-first-token low const B = process.env.BROWSE_BIN || path.resolve(__dirname, '../../.claude/skills/gstack/browse/dist/browse'); +const CANCEL_DIR = path.join(process.env.HOME || '/tmp', '.gstack'); +function cancelFileForTab(tabId: number): string { + return path.join(CANCEL_DIR, `sidebar-agent-cancel-${tabId}`); +} + let lastLine = 0; let authToken: string | null = null; // Per-tab processing — each tab can run its own agent concurrently const processingTabs = new Set(); +let activeProc: ReturnType | null = null; // ─── File drop relay ────────────────────────────────────────── @@ -236,6 +242,10 @@ async function askClaude(queueEntry: any): Promise { let effectiveCwd = cwd || process.cwd(); try { fs.accessSync(effectiveCwd); } catch { effectiveCwd = process.cwd(); } + // Clear any stale cancel signal for this tab before starting + const cancelFile = cancelFileForTab(tid); + try { fs.unlinkSync(cancelFile); } catch {} + const proc = spawn('claude', claudeArgs, { stdio: ['pipe', 'pipe', 'pipe'], cwd: effectiveCwd, @@ -247,9 +257,23 @@ async function askClaude(queueEntry: any): Promise { BROWSE_TAB: String(tid), }, }); + activeProc = proc; proc.stdin.end(); + // Poll for per-tab cancel signal from server's killAgent() + const cancelCheck = setInterval(() => { + try { + if (fs.existsSync(cancelFile)) { + console.log(`[sidebar-agent] Cancel signal received for tab ${tid} — killing claude subprocess`); + try { proc.kill('SIGTERM'); } catch {} + setTimeout(() => { try { proc.kill('SIGKILL'); } catch {} }, 3000); + fs.unlinkSync(cancelFile); + clearInterval(cancelCheck); + } + } catch {} + }, 500); + let buffer = ''; proc.stdout.on('data', (data: Buffer) => { @@ -268,6 +292,8 @@ async function askClaude(queueEntry: any): Promise { }); proc.on('close', (code) => { + clearInterval(cancelCheck); + activeProc = null; if (buffer.trim()) { try { handleStreamEvent(JSON.parse(buffer), tid); } catch {} } @@ -282,6 +308,8 @@ async function askClaude(queueEntry: any): Promise { }); proc.on('error', (err) => { + clearInterval(cancelCheck); + activeProc = null; const errorMsg = stderrBuffer.trim() ? `${err.message}\nstderr: ${stderrBuffer.trim().slice(-500)}` : err.message; diff --git a/browse/src/url-validation.ts b/browse/src/url-validation.ts index 4f2c922c1..d1cccf111 100644 --- a/browse/src/url-validation.ts +++ b/browse/src/url-validation.ts @@ -3,13 +3,32 @@ * Localhost and private IPs are allowed (primary use case: QA testing local dev servers). */ -const BLOCKED_METADATA_HOSTS = new Set([ +export const BLOCKED_METADATA_HOSTS = new Set([ '169.254.169.254', // AWS/GCP/Azure instance metadata - 'fd00::', // IPv6 unique local (metadata in some cloud setups) 'metadata.google.internal', // GCP metadata 'metadata.azure.internal', // Azure IMDS ]); +/** + * IPv6 prefixes to block (CIDR-style). Any address starting with these + * hex prefixes is rejected. Covers the full ULA range (fc00::/7 = fc00:: and fd00::). + */ +const BLOCKED_IPV6_PREFIXES = ['fc', 'fd']; + +/** + * Check if an IPv6 address falls within a blocked prefix range. + * Handles the full ULA range (fc00::/7) — not just the exact literal fd00::. + * Only matches actual IPv6 addresses (must contain ':'), not hostnames + * like fd.example.com or fcustomer.com. + */ +function isBlockedIpv6(addr: string): boolean { + const normalized = addr.toLowerCase().replace(/^\[|\]$/g, ''); + // Must contain a colon to be an IPv6 address — avoids false positives on + // hostnames like fd.example.com or fcustomer.com + if (!normalized.includes(':')) return false; + return BLOCKED_IPV6_PREFIXES.some(prefix => normalized.startsWith(prefix)); +} + /** * Normalize hostname for blocklist comparison: * - Strip trailing dot (DNS fully-qualified notation) @@ -35,7 +54,7 @@ function isMetadataIp(hostname: string): boolean { try { const probe = new URL(`http://${hostname}`); const normalized = probe.hostname; - if (BLOCKED_METADATA_HOSTS.has(normalized)) return true; + if (BLOCKED_METADATA_HOSTS.has(normalized) || isBlockedIpv6(normalized)) return true; // Also check after stripping trailing dot if (normalized.endsWith('.') && BLOCKED_METADATA_HOSTS.has(normalized.slice(0, -1))) return true; } catch { @@ -51,9 +70,13 @@ function isMetadataIp(hostname: string): boolean { async function resolvesToBlockedIp(hostname: string): Promise { try { const dns = await import('node:dns'); - const { resolve4 } = dns.promises; - const addresses = await resolve4(hostname); - return addresses.some(addr => BLOCKED_METADATA_HOSTS.has(addr)); + const { resolve4, resolve6 } = dns.promises; + // Check both IPv4 (A) and IPv6 (AAAA) records — an attacker can use either + const [v4Addrs, v6Addrs] = await Promise.all([ + resolve4(hostname).catch(() => [] as string[]), + resolve6(hostname).catch(() => [] as string[]), + ]); + return [...v4Addrs, ...v6Addrs].some(addr => BLOCKED_METADATA_HOSTS.has(addr) || isBlockedIpv6(addr)); } catch { // DNS resolution failed — not a rebinding risk return false; @@ -76,7 +99,7 @@ export async function validateNavigationUrl(url: string): Promise { const hostname = normalizeHostname(parsed.hostname.toLowerCase()); - if (BLOCKED_METADATA_HOSTS.has(hostname) || isMetadataIp(hostname)) { + if (BLOCKED_METADATA_HOSTS.has(hostname) || isMetadataIp(hostname) || isBlockedIpv6(hostname)) { throw new Error( `Blocked: ${parsed.hostname} is a cloud metadata endpoint. Access is denied for security.` ); diff --git a/browse/src/write-commands.ts b/browse/src/write-commands.ts index 19283fef0..cb6d62653 100644 --- a/browse/src/write-commands.ts +++ b/browse/src/write-commands.ts @@ -370,19 +370,30 @@ export async function handleWriteCommand( const [selector, ...filePaths] = args; if (!selector || filePaths.length === 0) throw new Error('Usage: browse upload [file2...]'); - // Validate all files exist before upload + // Security: resolve and validate all file paths within safe directories before upload. + // Use resolved paths for the actual operation to eliminate TOCTOU symlink races. + const safeDirs = [TEMP_DIR, process.cwd()]; + const resolvedPaths: string[] = []; for (const fp of filePaths) { if (!fs.existsSync(fp)) throw new Error(`File not found: ${fp}`); + const realFp = fs.realpathSync(path.resolve(fp)); + const isSafe = safeDirs.some(dir => { + try { return isPathWithin(realFp, fs.realpathSync(dir)); } catch { return isPathWithin(realFp, dir); } + }); + if (!isSafe) { + throw new Error(`Upload path must be within: ${safeDirs.join(', ')}`); + } + resolvedPaths.push(realFp); } const resolved = await bm.resolveRef(selector); if ('locator' in resolved) { - await resolved.locator.setInputFiles(filePaths); + await resolved.locator.setInputFiles(resolvedPaths); } else { - await target.locator(resolved.selector).setInputFiles(filePaths); + await target.locator(resolved.selector).setInputFiles(resolvedPaths); } - const fileInfo = filePaths.map(fp => { + const fileInfo = resolvedPaths.map(fp => { const stat = fs.statSync(fp); return `${path.basename(fp)} (${stat.size}B)`; }).join(', '); @@ -407,19 +418,17 @@ export async function handleWriteCommand( case 'cookie-import': { const filePath = args[0]; if (!filePath) throw new Error('Usage: browse cookie-import '); - // Path validation — prevent reading arbitrary files - if (path.isAbsolute(filePath)) { - const safeDirs = [TEMP_DIR, process.cwd()]; - const resolved = path.resolve(filePath); - if (!safeDirs.some(dir => isPathWithin(resolved, dir))) { - throw new Error(`Path must be within: ${safeDirs.join(', ')}`); - } - } - if (path.normalize(filePath).includes('..')) { - throw new Error('Path traversal sequences (..) are not allowed'); - } + // Path validation — resolve symlinks to prevent reading arbitrary files if (!fs.existsSync(filePath)) throw new Error(`File not found: ${filePath}`); - const raw = fs.readFileSync(filePath, 'utf-8'); + const realFilePath = fs.realpathSync(path.resolve(filePath)); + const safeDirs = [TEMP_DIR, process.cwd()]; + const isSafe = safeDirs.some(dir => { + try { return isPathWithin(realFilePath, fs.realpathSync(dir)); } catch { return isPathWithin(realFilePath, dir); } + }); + if (!isSafe) { + throw new Error(`Path must be within: ${safeDirs.join(', ')}`); + } + const raw = fs.readFileSync(realFilePath, 'utf-8'); let cookies: any[]; try { cookies = JSON.parse(raw); } catch { throw new Error(`Invalid JSON in ${filePath}`); } if (!Array.isArray(cookies)) throw new Error('Cookie file must contain a JSON array'); diff --git a/browse/test/path-validation.test.ts b/browse/test/path-validation.test.ts index 8a26436ca..ccdb1ecc4 100644 --- a/browse/test/path-validation.test.ts +++ b/browse/test/path-validation.test.ts @@ -1,7 +1,8 @@ import { describe, it, expect } from 'bun:test'; import { validateOutputPath } from '../src/meta-commands'; -import { validateReadPath } from '../src/read-commands'; -import { symlinkSync, unlinkSync, writeFileSync } from 'fs'; +import { validateReadPath, SENSITIVE_COOKIE_NAME, SENSITIVE_COOKIE_VALUE } from '../src/read-commands'; +import { BLOCKED_METADATA_HOSTS } from '../src/url-validation'; +import { symlinkSync, unlinkSync, writeFileSync, mkdirSync, existsSync, realpathSync } from 'fs'; import { tmpdir } from 'os'; import { join } from 'path'; @@ -89,3 +90,88 @@ describe('validateReadPath', () => { } }); }); + +describe('validateOutputPath — symlink resolution', () => { + it('blocks symlink inside /tmp pointing outside safe dirs', () => { + const linkPath = join(tmpdir(), 'test-output-symlink-' + Date.now() + '.png'); + try { + symlinkSync('/etc/crontab', linkPath); + expect(() => validateOutputPath(linkPath)).toThrow(/Path must be within/); + } finally { + try { unlinkSync(linkPath); } catch {} + } + }); + + it('allows symlink inside /tmp pointing to another /tmp path', () => { + const realTmp = realpathSync(tmpdir()); + const targetPath = join(realTmp, 'test-output-real-' + Date.now() + '.png'); + const linkPath = join(realTmp, 'test-output-link-' + Date.now() + '.png'); + try { + writeFileSync(targetPath, ''); + symlinkSync(targetPath, linkPath); + expect(() => validateOutputPath(linkPath)).not.toThrow(); + } finally { + try { unlinkSync(linkPath); } catch {} + try { unlinkSync(targetPath); } catch {} + } + }); + + it('blocks new file in symlinked directory pointing outside', () => { + // A symlinked directory under /tmp pointing to /etc should be caught + const linkDir = join(tmpdir(), 'test-dirlink-' + Date.now()); + try { + symlinkSync('/etc', linkDir); + expect(() => validateOutputPath(join(linkDir, 'evil.png'))).toThrow(/Path must be within/); + } finally { + try { unlinkSync(linkDir); } catch {} + } + }); +}); + +describe('cookie redaction — production patterns', () => { + // Import production regexes directly to prevent drift between tests and implementation + it('detects sensitive cookie names', () => { + expect(SENSITIVE_COOKIE_NAME.test('session_id')).toBe(true); + expect(SENSITIVE_COOKIE_NAME.test('auth_token')).toBe(true); + expect(SENSITIVE_COOKIE_NAME.test('csrf-token')).toBe(true); + expect(SENSITIVE_COOKIE_NAME.test('api_key')).toBe(true); + expect(SENSITIVE_COOKIE_NAME.test('jwt.payload')).toBe(true); + }); + + it('ignores non-sensitive cookie names', () => { + expect(SENSITIVE_COOKIE_NAME.test('theme')).toBe(false); + expect(SENSITIVE_COOKIE_NAME.test('locale')).toBe(false); + expect(SENSITIVE_COOKIE_NAME.test('_ga')).toBe(false); + }); + + it('detects sensitive cookie value prefixes', () => { + expect(SENSITIVE_COOKIE_VALUE.test('eyJhbGciOiJIUzI1NiJ9')).toBe(true); // JWT + expect(SENSITIVE_COOKIE_VALUE.test('sk-ant-abc123')).toBe(true); // Anthropic + expect(SENSITIVE_COOKIE_VALUE.test('ghp_xxxxxxxxxxxx')).toBe(true); // GitHub PAT + expect(SENSITIVE_COOKIE_VALUE.test('xoxb-token')).toBe(true); // Slack + }); + + it('ignores non-sensitive values', () => { + expect(SENSITIVE_COOKIE_VALUE.test('dark')).toBe(false); + expect(SENSITIVE_COOKIE_VALUE.test('en-US')).toBe(false); + expect(SENSITIVE_COOKIE_VALUE.test('1234567890')).toBe(false); + }); +}); + +describe('DNS rebinding — production blocklist', () => { + // Import production blocklist directly to prevent drift between tests and implementation + it('blocks fd00:: IPv6 metadata address via validateNavigationUrl', async () => { + // fd00:: is now blocked via prefix matching (isBlockedIpv6), not the exact-match set + const { validateNavigationUrl } = await import('../src/url-validation'); + await expect(validateNavigationUrl('http://[fd00::]/')).rejects.toThrow(/cloud metadata/i); + }); + + it('blocks AWS/GCP IPv4 metadata address', () => { + expect(BLOCKED_METADATA_HOSTS.has('169.254.169.254')).toBe(true); + }); + + it('does not block normal addresses', () => { + expect(BLOCKED_METADATA_HOSTS.has('8.8.8.8')).toBe(false); + expect(BLOCKED_METADATA_HOSTS.has('2001:4860:4860::8888')).toBe(false); + }); +}); diff --git a/browse/test/url-validation.test.ts b/browse/test/url-validation.test.ts index 9b09db2fd..db12f1735 100644 --- a/browse/test/url-validation.test.ts +++ b/browse/test/url-validation.test.ts @@ -62,11 +62,56 @@ describe('validateNavigationUrl', () => { await expect(validateNavigationUrl('http://0251.0376.0251.0376/')).rejects.toThrow(/cloud metadata/i); }); - it('blocks IPv6 metadata with brackets', async () => { + it('blocks IPv6 metadata with brackets (fd00::)', async () => { await expect(validateNavigationUrl('http://[fd00::]/')).rejects.toThrow(/cloud metadata/i); }); + it('blocks IPv6 ULA fd00::1 (not just fd00::)', async () => { + await expect(validateNavigationUrl('http://[fd00::1]/')).rejects.toThrow(/cloud metadata/i); + }); + + it('blocks IPv6 ULA fd12:3456::1', async () => { + await expect(validateNavigationUrl('http://[fd12:3456::1]/')).rejects.toThrow(/cloud metadata/i); + }); + + it('blocks IPv6 ULA fc00:: (full fc00::/7 range)', async () => { + await expect(validateNavigationUrl('http://[fc00::]/')).rejects.toThrow(/cloud metadata/i); + }); + + it('does not block hostnames starting with fd (e.g. fd.example.com)', async () => { + await expect(validateNavigationUrl('https://fd.example.com/')).resolves.toBeUndefined(); + }); + + it('does not block hostnames starting with fc (e.g. fcustomer.com)', async () => { + await expect(validateNavigationUrl('https://fcustomer.com/')).resolves.toBeUndefined(); + }); + it('throws on malformed URLs', async () => { await expect(validateNavigationUrl('not-a-url')).rejects.toThrow(/Invalid URL/i); }); }); + +describe('validateNavigationUrl — restoreState coverage', () => { + // These test the URL patterns that restoreState must now block + // (previously it called page.goto without validation) + + it('blocks file:// URLs that could appear in saved state', async () => { + await expect(validateNavigationUrl('file:///etc/passwd')).rejects.toThrow(/scheme.*not allowed/i); + }); + + it('blocks chrome:// URLs that could appear in saved state', async () => { + await expect(validateNavigationUrl('chrome://settings')).rejects.toThrow(/scheme.*not allowed/i); + }); + + it('blocks metadata IPs that could be injected into state files', async () => { + await expect(validateNavigationUrl('http://169.254.169.254/latest/meta-data/')).rejects.toThrow(/cloud metadata/i); + }); + + it('allows normal https URLs from saved state', async () => { + await expect(validateNavigationUrl('https://example.com/page')).resolves.toBeUndefined(); + }); + + it('allows localhost URLs from saved state', async () => { + await expect(validateNavigationUrl('http://localhost:3000/app')).resolves.toBeUndefined(); + }); +}); diff --git a/design/src/serve.ts b/design/src/serve.ts index 7d974905c..8f737ecad 100644 --- a/design/src/serve.ts +++ b/design/src/serve.ts @@ -53,6 +53,10 @@ export async function serve(options: ServeOptions): Promise { process.exit(1); } + // Security: anchor all file reads to the initial HTML's directory. + // Prevents /api/reload from reading arbitrary files via path traversal. + const allowedDir = fs.realpathSync(path.dirname(path.resolve(html))); + let htmlContent = fs.readFileSync(html, "utf-8"); let state: ServerState = "serving"; let timeoutTimer: ReturnType | null = null; @@ -182,8 +186,19 @@ export async function serve(options: ServeOptions): Promise { ); } + // Security: resolve symlinks and validate the reload path is within the + // allowed directory (anchored to the initial HTML file's parent). + // Prevents path traversal via /api/reload reading arbitrary files. + const resolvedReload = fs.realpathSync(path.resolve(newHtmlPath)); + if (!resolvedReload.startsWith(allowedDir + path.sep) && resolvedReload !== allowedDir) { + return Response.json( + { error: `Path must be within: ${allowedDir}` }, + { status: 403 } + ); + } + // Swap the HTML content - htmlContent = fs.readFileSync(newHtmlPath, "utf-8"); + htmlContent = fs.readFileSync(resolvedReload, "utf-8"); state = "serving"; console.error(`SERVE_RELOADED: html=${newHtmlPath}`); diff --git a/design/test/serve.test.ts b/design/test/serve.test.ts index 439e4ba71..f222a6364 100644 --- a/design/test/serve.test.ts +++ b/design/test/serve.test.ts @@ -274,6 +274,103 @@ describe('Serve HTTP endpoints', () => { }); }); +// ─── Path traversal protection in /api/reload ───────────────────── + +describe('Serve /api/reload — path traversal protection', () => { + let server: ReturnType; + let baseUrl: string; + let htmlContent: string; + let allowedDir: string; + + beforeAll(() => { + // Production-equivalent allowedDir anchored to tmpDir + allowedDir = fs.realpathSync(tmpDir); + htmlContent = fs.readFileSync(boardHtml, 'utf-8'); + + // This server mirrors the production serve() with the path validation fix + server = Bun.serve({ + port: 0, + fetch(req) { + const url = new URL(req.url); + + if (req.method === 'GET' && url.pathname === '/') { + return new Response(htmlContent, { + headers: { 'Content-Type': 'text/html; charset=utf-8' }, + }); + } + + if (req.method === 'POST' && url.pathname === '/api/reload') { + return (async () => { + let body: any; + try { body = await req.json(); } catch { return Response.json({ error: 'Invalid JSON' }, { status: 400 }); } + if (!body.html || !fs.existsSync(body.html)) { + return Response.json({ error: `HTML file not found: ${body.html}` }, { status: 400 }); + } + // Production path validation — same as design/src/serve.ts + const resolvedReload = fs.realpathSync(path.resolve(body.html)); + if (!resolvedReload.startsWith(allowedDir + path.sep) && resolvedReload !== allowedDir) { + return Response.json({ error: `Path must be within: ${allowedDir}` }, { status: 403 }); + } + htmlContent = fs.readFileSync(resolvedReload, 'utf-8'); + return Response.json({ reloaded: true }); + })(); + } + + return new Response('Not found', { status: 404 }); + }, + }); + baseUrl = `http://localhost:${server.port}`; + }); + + afterAll(() => { + server.stop(); + }); + + test('blocks reload with path outside allowed directory', async () => { + const res = await fetch(`${baseUrl}/api/reload`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ html: '/etc/passwd' }), + }); + expect(res.status).toBe(403); + const data = await res.json(); + expect(data.error).toContain('Path must be within'); + }); + + test('blocks reload with symlink pointing outside allowed directory', async () => { + const linkPath = path.join(tmpDir, 'evil-link.html'); + try { + fs.symlinkSync('/etc/passwd', linkPath); + const res = await fetch(`${baseUrl}/api/reload`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ html: linkPath }), + }); + expect(res.status).toBe(403); + } finally { + try { fs.unlinkSync(linkPath); } catch {} + } + }); + + test('allows reload with file inside allowed directory', async () => { + const goodPath = path.join(tmpDir, 'safe-board.html'); + fs.writeFileSync(goodPath, 'Safe reload'); + + const res = await fetch(`${baseUrl}/api/reload`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ html: goodPath }), + }); + expect(res.status).toBe(200); + const data = await res.json(); + expect(data.reloaded).toBe(true); + + // Verify the new content is served + const page = await fetch(baseUrl); + expect(await page.text()).toContain('Safe reload'); + }); +}); + // ─── Full lifecycle: regeneration round-trip ────────────────────── describe('Full regeneration lifecycle', () => { diff --git a/extension/background.js b/extension/background.js index 4084acaf3..c0a7ca56b 100644 --- a/extension/background.js +++ b/extension/background.js @@ -76,8 +76,8 @@ function setConnected(healthData) { chrome.action.setBadgeBackgroundColor({ color: '#F59E0B' }); chrome.action.setBadgeText({ text: ' ' }); - // Broadcast health to popup and side panel (include token for sidepanel auth) - chrome.runtime.sendMessage({ type: 'health', data: { ...healthData, token: authToken } }).catch(() => {}); + // Broadcast health to popup and side panel (token excluded — use getToken message instead) + chrome.runtime.sendMessage({ type: 'health', data: healthData }).catch(() => {}); // Notify content scripts on connection change if (wasDisconnected) { @@ -252,7 +252,7 @@ chrome.runtime.onMessage.addListener((msg, sender, sendResponse) => { } const ALLOWED_TYPES = new Set([ - 'getPort', 'setPort', 'getServerUrl', 'fetchRefs', + 'getPort', 'setPort', 'getServerUrl', 'getToken', 'fetchRefs', 'openSidePanel', 'command', 'sidebar-command', // Inspector message types 'startInspector', 'stopInspector', 'elementPicked', 'pickerCancelled', @@ -282,7 +282,18 @@ chrome.runtime.onMessage.addListener((msg, sender, sendResponse) => { return true; } - // getToken handler removed — token distributed via health broadcast + // Token delivered via targeted sendResponse, not broadcast — limits exposure. + // Only respond to extension pages (sidepanel/popup) — content scripts have + // sender.tab set, so reject those to prevent token access from injected contexts. + if (msg.type === 'getToken') { + if (sender.tab) { + console.warn('[gstack] Rejected getToken from content script context'); + sendResponse({ token: null }); + } else { + sendResponse({ token: authToken }); + } + return true; + } if (msg.type === 'fetchRefs') { fetchAndRelayRefs().then(() => sendResponse({ ok: true })); diff --git a/extension/sidepanel.js b/extension/sidepanel.js index 9e6626fc6..b9bb33688 100644 --- a/extension/sidepanel.js +++ b/extension/sidepanel.js @@ -1406,7 +1406,10 @@ chrome.runtime.onMessage.addListener((msg) => { if (msg.type === 'health') { if (msg.data) { const url = `http://127.0.0.1:${msg.data.port || 34567}`; - updateConnection(url, msg.data.token); + // Request token via targeted sendResponse (not broadcast) to limit exposure + chrome.runtime.sendMessage({ type: 'getToken' }, (resp) => { + updateConnection(url, resp?.token || null); + }); applyChatEnabled(!!msg.data.chatEnabled); } else { updateConnection(null); diff --git a/supabase/functions/telemetry-ingest/index.ts b/supabase/functions/telemetry-ingest/index.ts index 07d65d364..6f9670810 100644 --- a/supabase/functions/telemetry-ingest/index.ts +++ b/supabase/functions/telemetry-ingest/index.ts @@ -43,9 +43,18 @@ Deno.serve(async (req) => { return new Response(`Batch too large (max ${MAX_BATCH_SIZE})`, { status: 400 }); } + // Use the caller's apikey (anon key) instead of the service role key. + // RLS policies allow anon INSERT on telemetry_events and INSERT/UPDATE + // on installations (for upsert). Column-level GRANT restricts anon + // UPDATE to (last_seen, gstack_version, os) only — see migration 003. + const callerKey = req.headers.get("apikey"); + if (!callerKey) { + return new Response("Unauthorized — apikey header required", { status: 401 }); + } + const supabase = createClient( Deno.env.get("SUPABASE_URL") ?? "", - Deno.env.get("SUPABASE_SERVICE_ROLE_KEY") ?? "" + callerKey ); // Validate and transform events @@ -111,8 +120,9 @@ Deno.serve(async (req) => { } // Upsert installations (update last_seen) + const upsertErrors: string[] = []; for (const [id, data] of installationUpserts) { - await supabase + const { error: upsertError } = await supabase .from("installations") .upsert( { @@ -123,9 +133,16 @@ Deno.serve(async (req) => { }, { onConflict: "installation_id" } ); + if (upsertError) { + upsertErrors.push(`${id}: ${upsertError.message}`); + } } - return new Response(JSON.stringify({ inserted: rows.length }), { + const result: Record = { inserted: rows.length }; + if (upsertErrors.length > 0) { + result.upsertErrors = upsertErrors; + } + return new Response(JSON.stringify(result), { status: 200, headers: { "Content-Type": "application/json" }, }); diff --git a/supabase/migrations/003_installations_upsert_policy.sql b/supabase/migrations/003_installations_upsert_policy.sql new file mode 100644 index 000000000..078be7f53 --- /dev/null +++ b/supabase/migrations/003_installations_upsert_policy.sql @@ -0,0 +1,25 @@ +-- 003_installations_upsert_policy.sql +-- Re-add a scoped UPDATE policy for installations so the telemetry-ingest +-- edge function can upsert (update last_seen) using the caller's anon key +-- instead of the service role key. +-- +-- Migration 002 dropped the overly broad "anon_update_last_seen" policy +-- (which allowed UPDATE on ALL columns). This replacement uses: +-- 1. An RLS policy to allow UPDATE (required for any row access) +-- 2. Column-level GRANT to restrict anon to only the tracking columns +-- the edge function actually writes (last_seen, gstack_version, os) +-- +-- This means anon callers cannot UPDATE first_seen or installation_id, +-- closing the residual risk from the broad RLS-only approach. + +-- RLS policy: allow UPDATE on rows (required for PostgREST/upsert) +CREATE POLICY "anon_update_tracking" ON installations + FOR UPDATE + USING (true) + WITH CHECK (true); + +-- Column-level restriction: anon can only UPDATE these three columns. +-- PostgreSQL GRANT UPDATE (col, ...) is enforced at the query level — +-- any UPDATE touching other columns will be rejected with a permission error. +REVOKE UPDATE ON installations FROM anon; +GRANT UPDATE (last_seen, gstack_version, os) ON installations TO anon;