diff --git a/.bin/bun b/.bin/bun index e457aed40..a50a26907 100755 --- a/.bin/bun +++ b/.bin/bun @@ -172,6 +172,14 @@ create_cache() { fi } +# Function to check if tmux is installed +check_tmux_installed() { + if ! command -v tmux &> /dev/null; then + return 1 + fi + return 0 +} + # Function to check if command doesn't need secrets # Returns 0 if secrets are NOT needed, 1 if they ARE needed doesnt_need_secrets() { @@ -248,6 +256,43 @@ doesnt_need_secrets() { ;; esac ;; + # Test command needs special handling + test) + # Check for integration/e2e tests that require tmux + # Convention: test files matching *integration*.test.ts or *e2e*.test.ts + local needs_tmux=false + + for arg in "$@"; do + # Check if running integration or e2e tests + if [[ "$arg" =~ (integration|e2e).*\.test\.(ts|tsx|js|jsx) ]]; then + needs_tmux=true + break + fi + # Also check if running all tests and integration files exist + if [[ "$arg" == "test" ]] || [[ -z "$arg" ]]; then + if ls */src/__tests__/*integration*.test.ts 2>/dev/null || ls */src/__tests__/*e2e*.test.ts 2>/dev/null; then + needs_tmux=true + break + fi + fi + done + + # If running integration/e2e tests, check tmux availability + if [ "$needs_tmux" = true ]; then + if ! check_tmux_installed; then + echo "โš ๏ธ tmux not found but required for integration/E2E tests" + echo "" + echo "๐Ÿ“ฆ Install tmux:" + echo " macOS: brew install tmux" + echo " Ubuntu: sudo apt-get install tmux" + echo " Windows: Use WSL and run 'sudo apt-get install tmux'" + echo "" + echo "โ„น๏ธ Skipping tmux-dependent tests..." + echo "" + fi + fi + return 1 # Tests need secrets + ;; *) # Default to needing secrets for all other commands return 1 diff --git a/.github/actions/setup-project/action.yml b/.github/actions/setup-project/action.yml new file mode 100644 index 000000000..d759b6766 --- /dev/null +++ b/.github/actions/setup-project/action.yml @@ -0,0 +1,31 @@ +name: 'Setup Project' +description: 'Setup Bun, cache dependencies, and install packages' + +inputs: + bun-version: + description: 'Bun version to install' + required: false + default: '1.3.0' + +runs: + using: 'composite' + steps: + - name: Set up Bun + uses: oven-sh/setup-bun@v2 + with: + bun-version: ${{ inputs.bun-version }} + + - name: Cache dependencies + uses: actions/cache@v4 + with: + path: | + node_modules + */node_modules + key: ${{ runner.os }}-deps-${{ hashFiles('**/bun.lock*', '**/package.json') }} + restore-keys: | + ${{ runner.os }}-deps-${{ hashFiles('**/bun.lock*') }} + ${{ runner.os }}-deps- + + - name: Install dependencies + shell: bash + run: bun install --frozen-lockfile diff --git a/.github/knowledge.md b/.github/knowledge.md index 24fdb589a..eaa9e8ef5 100644 --- a/.github/knowledge.md +++ b/.github/knowledge.md @@ -1,4 +1,39 @@ -# GitHub Actions Knowledge +# GitHub Workflows + +## Refactoring Patterns + +### Composite Actions + +Common setup steps (checkout, Bun setup, caching, installation) have been extracted to `.github/actions/setup-project/action.yml`. + +Usage: + +```yaml +steps: + - uses: actions/checkout@v4 + with: + # checkout-specific params + + - uses: ./.github/actions/setup-project +``` + +Note: Checkout must be separate from the composite action to avoid circular dependencies. + +### Environment Variables + +GitHub API URLs are extracted as environment variables to avoid duplication: + +```yaml +env: + GITHUB_API_URL: https://api.github.com/repos/CodebuffAI/codebuff + GITHUB_UPLOADS_URL: https://uploads.github.com/repos/CodebuffAI/codebuff +``` + +This pattern: + +- Reduces duplication across workflow steps +- Makes repository changes easier (single point of change) +- Improves readability and maintainability ## CI/CD Pipeline Overview diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4bb5e98f4..8bc487863 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -43,7 +43,7 @@ jobs: env: SECRETS_CONTEXT: ${{ toJSON(secrets) }} run: | - VAR_NAMES=$(node scripts/generate-ci-env.js) + VAR_NAMES=$(bun scripts/generate-ci-env.js) echo "$SECRETS_CONTEXT" | jq -r --argjson vars "$VAR_NAMES" ' to_entries | .[] | select(.key as $k | $vars | index($k)) | .key + "=" + .value ' >> $GITHUB_ENV @@ -121,7 +121,7 @@ jobs: env: SECRETS_CONTEXT: ${{ toJSON(secrets) }} run: | - VAR_NAMES=$(node scripts/generate-ci-env.js) + VAR_NAMES=$(bun scripts/generate-ci-env.js) echo "$SECRETS_CONTEXT" | jq -r --argjson vars "$VAR_NAMES" ' to_entries | .[] | select(.key as $k | $vars | index($k)) | .key + "=" + .value ' >> $GITHUB_ENV @@ -130,6 +130,9 @@ jobs: echo "NEXT_PUBLIC_INFISICAL_UP=true" >> $GITHUB_ENV echo "CODEBUFF_GITHUB_TOKEN=${{ secrets.CODEBUFF_GITHUB_TOKEN }}" >> $GITHUB_ENV + - name: Build SDK before tests + run: cd sdk && bun run build + - name: Run ${{ matrix.package }} tests uses: nick-fields/retry@v3 with: @@ -192,7 +195,7 @@ jobs: env: SECRETS_CONTEXT: ${{ toJSON(secrets) }} run: | - VAR_NAMES=$(node scripts/generate-ci-env.js) + VAR_NAMES=$(bun scripts/generate-ci-env.js) echo "$SECRETS_CONTEXT" | jq -r --argjson vars "$VAR_NAMES" ' to_entries | .[] | select(.key as $k | $vars | index($k)) | .key + "=" + .value ' >> $GITHUB_ENV @@ -201,6 +204,9 @@ jobs: echo "NEXT_PUBLIC_INFISICAL_UP=true" >> $GITHUB_ENV echo "CODEBUFF_GITHUB_TOKEN=${{ secrets.CODEBUFF_GITHUB_TOKEN }}" >> $GITHUB_ENV + - name: Build SDK before integration tests + run: cd sdk && bun run build + - name: Run ${{ matrix.package }} integration tests uses: nick-fields/retry@v3 with: diff --git a/.github/workflows/cli-release-build.yml b/.github/workflows/cli-release-build.yml index 356e78c7a..f03a40d32 100644 --- a/.github/workflows/cli-release-build.yml +++ b/.github/workflows/cli-release-build.yml @@ -42,6 +42,8 @@ jobs: bun_target: bun-linux-arm64 platform: linux arch: arm64 + # Cross-compiles on x64 runner; binary can't be executed here. + smoke_test: false - os: macos-13 target: darwin-x64 bun_target: bun-darwin-x64 @@ -52,17 +54,14 @@ jobs: bun_target: bun-darwin-arm64 platform: darwin arch: arm64 - - os: windows-latest - target: win32-x64 - bun_target: bun-windows-x64 - platform: win32 - arch: x64 runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 with: ref: ${{ inputs.checkout-ref || github.sha }} + - uses: ./.github/actions/setup-project + - name: Download staging metadata if: inputs.artifact-name != '' uses: actions/download-artifact@v4 @@ -70,24 +69,79 @@ jobs: name: ${{ inputs.artifact-name }} path: cli/release-staging/ - - name: Set up Bun - uses: oven-sh/setup-bun@v2 - with: - bun-version: '1.3.0' + - name: Ensure CLI dependencies + run: bun install --frozen-lockfile --cwd cli - - name: Cache dependencies - uses: actions/cache@v4 - with: - path: | - node_modules - */node_modules - key: ${{ runner.os }}-deps-${{ hashFiles('**/bun.lock*', '**/package.json') }} - restore-keys: | - ${{ runner.os }}-deps-${{ hashFiles('**/bun.lock*') }} - ${{ runner.os }}-deps- + - name: Fix OpenTUI module symlinks + shell: bash + run: | + set -euo pipefail + bun - <<'BUN' + import fs from 'fs'; + import path from 'path'; + + const rootDir = process.cwd(); + const rootOpenTui = path.join(rootDir, 'node_modules', '@opentui'); + const cliNodeModules = path.join(rootDir, 'cli', 'node_modules'); + const cliOpenTui = path.join(cliNodeModules, '@opentui'); + + if (!fs.existsSync(rootOpenTui)) { + console.log('Root @opentui packages missing; skipping fix'); + process.exit(0); + } + + fs.mkdirSync(cliOpenTui, { recursive: true }); + + const packages = ['core', 'react']; + for (const pkg of packages) { + const target = path.join(rootOpenTui, pkg); + const link = path.join(cliOpenTui, pkg); + + if (!fs.existsSync(target)) { + console.log(`Target ${target} missing; skipping ${pkg}`); + continue; + } - - name: Install dependencies - run: bun install --frozen-lockfile + let linkStats = null; + try { + linkStats = fs.lstatSync(link); + } catch (error) { + if (error?.code !== 'ENOENT') { + throw error; + } + } + + if (linkStats) { + let alreadyLinked = false; + try { + const actual = fs.realpathSync(link); + alreadyLinked = actual === target; + } catch { + // Broken symlink or unreadable target; we'll replace it. + } + + if (alreadyLinked) { + continue; + } + + fs.rmSync(link, { recursive: true, force: true }); + } + + const type = process.platform === 'win32' ? 'junction' : 'dir'; + try { + fs.symlinkSync(target, link, type); + console.log(`Linked ${link} -> ${target}`); + } catch (error) { + if (error?.code === 'EEXIST') { + fs.rmSync(link, { recursive: true, force: true }); + fs.symlinkSync(target, link, type); + console.log(`Re-linked ${link} -> ${target}`); + } else { + throw error; + } + } + } + BUN - name: Configure environment variables env: @@ -95,7 +149,7 @@ jobs: ENV_OVERRIDES: ${{ inputs.env-overrides }} shell: bash run: | - VAR_NAMES=$(node scripts/generate-ci-env.js --prefix NEXT_PUBLIC_) + VAR_NAMES=$(bun scripts/generate-ci-env.js --prefix NEXT_PUBLIC_) echo "$SECRETS_CONTEXT" | jq -r --argjson vars "$VAR_NAMES" ' to_entries | .[] | select(.key as $k | $vars | index($k)) | .key + "=" + .value @@ -117,6 +171,7 @@ jobs: OVERRIDE_ARCH: ${{ matrix.arch }} - name: Smoke test binary + if: matrix.smoke_test != false shell: bash run: | cd cli/bin @@ -133,10 +188,159 @@ jobs: if [[ "${{ runner.os }}" == "Windows" ]]; then BINARY_FILE="${{ inputs.binary-name }}.exe" fi - tar -czf codebuff-cli-${{ matrix.target }}.tar.gz -C cli/bin "$BINARY_FILE" + tar -czf codecane-${{ matrix.target }}.tar.gz -C cli/bin "$BINARY_FILE" + + - name: Upload binary artifact + uses: actions/upload-artifact@v4 + with: + name: codecane-${{ matrix.target }} + path: codecane-${{ matrix.target }}.tar.gz + + - name: Open debug shell on failure + if: failure() + uses: mxschmitt/action-tmate@v3 + with: + limit-access-to-actor: true + timeout-minutes: 15 + + build-windows-binary: + runs-on: windows-latest + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ inputs.checkout-ref || github.sha }} + + - uses: ./.github/actions/setup-project + + - name: Download staging metadata + if: inputs.artifact-name != '' + uses: actions/download-artifact@v4 + with: + name: ${{ inputs.artifact-name }} + path: cli/release-staging/ + + - name: Ensure CLI dependencies + run: bun install --frozen-lockfile --cwd cli + + - name: Fix OpenTUI module symlinks + shell: bash + run: | + set -euo pipefail + bun - <<'BUN' + import fs from 'fs'; + import path from 'path'; + + const rootDir = process.cwd(); + const rootOpenTui = path.join(rootDir, 'node_modules', '@opentui'); + const cliNodeModules = path.join(rootDir, 'cli', 'node_modules'); + const cliOpenTui = path.join(cliNodeModules, '@opentui'); + + if (!fs.existsSync(rootOpenTui)) { + console.log('Root @opentui packages missing; skipping fix'); + process.exit(0); + } + + fs.mkdirSync(cliOpenTui, { recursive: true }); + + const packages = ['core', 'react']; + for (const pkg of packages) { + const target = path.join(rootOpenTui, pkg); + const link = path.join(cliOpenTui, pkg); + + if (!fs.existsSync(target)) { + console.log(`Target ${target} missing; skipping ${pkg}`); + continue; + } + + let linkStats = null; + try { + linkStats = fs.lstatSync(link); + } catch (error) { + if (error?.code !== 'ENOENT') { + throw error; + } + } + + if (linkStats) { + let alreadyLinked = false; + try { + const actual = fs.realpathSync(link); + alreadyLinked = actual === target; + } catch { + // Broken symlink or unreadable target; we'll replace it. + } + + if (alreadyLinked) { + continue; + } + + fs.rmSync(link, { recursive: true, force: true }); + } + + const type = process.platform === 'win32' ? 'junction' : 'dir'; + try { + fs.symlinkSync(target, link, type); + console.log(`Linked ${link} -> ${target}`); + } catch (error) { + if (error?.code === 'EEXIST') { + fs.rmSync(link, { recursive: true, force: true }); + fs.symlinkSync(target, link, type); + console.log(`Re-linked ${link} -> ${target}`); + } else { + throw error; + } + } + } + BUN + + - name: Configure environment variables + env: + SECRETS_CONTEXT: ${{ toJSON(secrets) }} + ENV_OVERRIDES: ${{ inputs.env-overrides }} + shell: bash + run: | + VAR_NAMES=$(bun scripts/generate-ci-env.js --prefix NEXT_PUBLIC_) + + echo "$SECRETS_CONTEXT" | jq -r --argjson vars "$VAR_NAMES" ' + to_entries | .[] | select(.key as $k | $vars | index($k)) | .key + "=" + .value + ' >> $GITHUB_ENV + echo "CODEBUFF_GITHUB_ACTIONS=true" >> $GITHUB_ENV + echo "CODEBUFF_GITHUB_TOKEN=${{ secrets.CODEBUFF_GITHUB_TOKEN }}" >> $GITHUB_ENV + if [ "$ENV_OVERRIDES" != "{}" ]; then + echo "$ENV_OVERRIDES" | jq -r 'to_entries | .[] | .key + "=" + .value' >> $GITHUB_ENV + fi + + - name: Build binary + run: bun run scripts/build-binary.ts ${{ inputs.binary-name }} ${{ inputs.new-version }} + working-directory: cli + shell: bash + env: + VERBOSE: true + OVERRIDE_TARGET: bun-windows-x64 + OVERRIDE_PLATFORM: win32 + OVERRIDE_ARCH: x64 + + - name: Smoke test binary + shell: bash + run: | + cd cli/bin + ./${{ inputs.binary-name }}.exe --version + + - name: Create tarball + shell: bash + run: | + BINARY_FILE="${{ inputs.binary-name }}.exe" + tar -czf codecane-win32-x64.tar.gz -C cli/bin "$BINARY_FILE" - name: Upload binary artifact uses: actions/upload-artifact@v4 with: - name: codebuff-cli-${{ matrix.target }} - path: codebuff-cli-${{ matrix.target }}.tar.gz + name: codecane-win32-x64 + path: codecane-win32-x64.tar.gz + + - name: Open debug shell on failure + if: failure() + uses: mxschmitt/action-tmate@v3 + with: + limit-access-to-actor: true + timeout-minutes: 15 diff --git a/.github/workflows/cli-release-staging.yml b/.github/workflows/cli-release-staging.yml index a8edb139b..15db64961 100644 --- a/.github/workflows/cli-release-staging.yml +++ b/.github/workflows/cli-release-staging.yml @@ -3,6 +3,8 @@ name: CLI Release Staging on: pull_request: branches: ['main'] + push: + branches: ['**'] concurrency: group: cli-staging-release @@ -14,60 +16,48 @@ permissions: jobs: prepare-and-commit-staging: runs-on: ubuntu-latest - if: contains(github.event.pull_request.title, '[codebuff-cli]') + if: | + (github.event_name == 'pull_request' && contains(github.event.pull_request.title, '[codecane]')) || + (github.event_name == 'push' && contains(github.event.head_commit.message, '[codecane]')) outputs: new_version: ${{ steps.bump_version.outputs.new_version }} steps: - uses: actions/checkout@v4 with: token: ${{ secrets.GITHUB_TOKEN }} - ref: ${{ github.event.pull_request.head.sha }} + ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - - name: Set up Bun - uses: oven-sh/setup-bun@v2 - with: - bun-version: '1.3.0' - - - name: Cache dependencies - uses: actions/cache@v4 - with: - path: | - node_modules - */node_modules - key: ${{ runner.os }}-deps-${{ hashFiles('**/bun.lock*', '**/package.json') }} - restore-keys: | - ${{ runner.os }}-deps-${{ hashFiles('**/bun.lock*') }} - ${{ runner.os }}-deps- - - - name: Install dependencies - run: bun install --frozen-lockfile + - uses: ./.github/actions/setup-project - name: Calculate staging version id: bump_version env: GITHUB_TOKEN: ${{ secrets.CODEBUFF_GITHUB_TOKEN }} + GITHUB_API_URL: https://api.github.com/repos/CodebuffAI/codebuff-community run: | cd cli/release-staging - BASE_VERSION=$(node -e "console.log(require('./package.json').version)") + BASE_VERSION=$(bun -e "console.log(require('./package.json').version)") echo "Base version: $BASE_VERSION" echo "Fetching latest CLI prerelease from GitHub..." RELEASES_JSON=$(curl -s -H "Authorization: token ${GITHUB_TOKEN}" \ - "https://api.github.com/repos/CodebuffAI/codebuff/releases?per_page=100") + "${GITHUB_API_URL}/releases?per_page=100") - LATEST_BETA=$(echo "$RELEASES_JSON" | jq -r '.[] | select(.prerelease == true and (.name // "" | test("Codebuff CLI v"))) | .tag_name' | sort -V | tail -n 1) + LATEST_TAG=$(echo "$RELEASES_JSON" | jq -r '.[] | select(.prerelease == true and (.name // "" | test("Codebuff CLI v"))) | .tag_name' | sort -V | tail -n 1) - if [ "$LATEST_BETA" = "null" ]; then - LATEST_BETA="" + if [ "$LATEST_TAG" = "null" ] || [ -z "$LATEST_TAG" ]; then + echo "No existing CLI prerelease found via releases, falling back to tags..." + LATEST_TAG=$(git ls-remote --tags origin "v${BASE_VERSION}-beta.*" | awk '{print $2}' | sed 's@refs/tags/@@' | sort -V | tail -n 1) fi - if [ -z "$LATEST_BETA" ]; then - echo "No existing CLI beta releases found, starting with beta.1" - NEW_VERSION="${BASE_VERSION}-beta.1" - else - echo "Latest CLI beta tag: $LATEST_BETA" - LATEST_VERSION=${LATEST_BETA#v} + if [ -n "$LATEST_TAG" ] && [[ "$LATEST_TAG" != v* ]]; then + LATEST_TAG="v${LATEST_TAG}" + fi + + if [ -n "$LATEST_TAG" ]; then + echo "Latest CLI beta tag: $LATEST_TAG" + LATEST_VERSION=${LATEST_TAG#v} LATEST_BASE=$(echo "$LATEST_VERSION" | sed 's/-beta\..*$//') LATEST_BETA_NUM=$(echo "$LATEST_VERSION" | sed 's/.*-beta\.//') @@ -75,15 +65,18 @@ jobs: NEXT=$((LATEST_BETA_NUM + 1)) NEW_VERSION="${BASE_VERSION}-beta.${NEXT}" else - echo "Base version changed, resetting beta counter" + echo "Base version changed since last prerelease, resetting counter" NEW_VERSION="${BASE_VERSION}-beta.1" fi + else + echo "No existing CLI beta tags found, starting with beta.1" + NEW_VERSION="${BASE_VERSION}-beta.1" fi echo "New staging version: $NEW_VERSION" echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT - node -e " + bun -e " const fs = require('fs'); const path = require('path'); const version = '$NEW_VERSION'; @@ -105,7 +98,7 @@ jobs: - name: Commit staging release snapshot run: | git add -A - git commit -m "Staging CLI Release v${{ steps.bump_version.outputs.new_version }} [codebuff-cli] + git commit -m "Staging CLI Release v${{ steps.bump_version.outputs.new_version }} [codecane] Captures the staged state for the CLI prerelease, including the version bump. @@ -127,10 +120,10 @@ jobs: needs: prepare-and-commit-staging uses: ./.github/workflows/cli-release-build.yml with: - binary-name: codebuff-cli + binary-name: codecane new-version: ${{ needs.prepare-and-commit-staging.outputs.new_version }} artifact-name: cli-staging-metadata - checkout-ref: ${{ github.event.pull_request.head.sha }} + checkout-ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} env-overrides: '{}' secrets: inherit @@ -140,18 +133,20 @@ jobs: steps: - uses: actions/checkout@v4 with: - ref: ${{ github.event.pull_request.head.sha }} + ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - name: Clean up old CLI prereleases + env: + GITHUB_API_URL: https://api.github.com/repos/CodebuffAI/codebuff-community run: | ONE_WEEK_AGO=$(date -d '7 days ago' -u +%Y-%m-%dT%H:%M:%SZ) echo "Removing CLI prereleases older than: $ONE_WEEK_AGO" RELEASES=$(curl -s -H "Authorization: token ${{ secrets.CODEBUFF_GITHUB_TOKEN }}" \ - "https://api.github.com/repos/CodebuffAI/codebuff/releases?per_page=100") + "${GITHUB_API_URL}/releases?per_page=100") - if echo "$RELEASES" | jq -e . >/dev/null 2>&1; then - OLD=$(echo "$RELEASES" | jq -r '.[] | select(.prerelease == true and .created_at < "'$ONE_WEEK_AGO'" and (.tag_name | test("^v[0-9].*-beta\\.[0-9]+$"))) | "\(.id):\(.tag_name)"') + if echo "$RELEASES" | jq -e 'type == "array"' >/dev/null 2>&1; then + OLD=$(echo "$RELEASES" | jq -r --arg cutoff "$ONE_WEEK_AGO" '.[] | select(.prerelease == true and .created_at < $cutoff and (.tag_name | test("^v[0-9].*-beta\\.[0-9]+$"))) | "\(.id):\(.tag_name)"') if [ -n "$OLD" ]; then echo "Deleting old prereleases:" @@ -159,7 +154,7 @@ jobs: echo "$OLD" | while IFS=: read -r RELEASE_ID TAG_NAME; do curl -s -X DELETE \ -H "Authorization: token ${{ secrets.CODEBUFF_GITHUB_TOKEN }}" \ - "https://api.github.com/repos/CodebuffAI/codebuff/releases/$RELEASE_ID" + "${GITHUB_API_URL}/releases/$RELEASE_ID" done else echo "No stale prereleases found." @@ -180,60 +175,61 @@ jobs: name: cli-staging-metadata path: cli/release-staging/ - - name: Create GitHub prerelease - env: - VERSION: ${{ needs.prepare-and-commit-staging.outputs.new_version }} - run: | - CURRENT_TIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ") - RELEASE_BODY=$(cat <> $GITHUB_OUTPUT diff --git a/.github/workflows/npm-app-release-staging.yml b/.github/workflows/npm-app-release-staging.yml deleted file mode 100644 index ca9c2fd1b..000000000 --- a/.github/workflows/npm-app-release-staging.yml +++ /dev/null @@ -1,284 +0,0 @@ -name: Release Staging (Codecane) - -on: - pull_request: - branches: ['main'] - -# Ensure only one staging release runs at a time -concurrency: - group: staging-release - cancel-in-progress: false - -permissions: - contents: write - -jobs: - # First job: Check PR title and prepare staging release - prepare-and-commit-staging: - runs-on: ubuntu-latest - if: contains(github.event.pull_request.title, '[codecane]') - outputs: - new_version: ${{ steps.bump_version.outputs.new_version }} - steps: - - uses: actions/checkout@v4 - with: - token: ${{ secrets.GITHUB_TOKEN }} - ref: ${{ github.event.pull_request.head.sha }} - - - name: Set up Bun - uses: oven-sh/setup-bun@v2 - with: - bun-version: '1.3.0' - - # Cache dependencies for speed - - name: Cache dependencies - uses: actions/cache@v4 - with: - path: | - node_modules - */node_modules - key: ${{ runner.os }}-deps-${{ hashFiles('**/bun.lock*', '**/package.json') }} - restore-keys: | - ${{ runner.os }}-deps-${{ hashFiles('**/bun.lock*') }} - ${{ runner.os }}-deps- - - - name: Install dependencies - run: bun install --frozen-lockfile - - - name: Calculate and update staging version - id: bump_version - run: | - cd npm-app/release-staging - - # Use the current package.json version as base - CURRENT_VERSION=$(node -e "console.log(require('./package.json').version)") - echo "Current package.json version: $CURRENT_VERSION" - - # Get latest beta version from npm to check if we need to increment - echo "Fetching latest beta version from npm..." - LATEST_BETA=$(npm view codecane@latest version 2>/dev/null || echo "") - - if [ -z "$LATEST_BETA" ]; then - echo "No beta version found on npm, using current version as base" - NEW_VERSION="$CURRENT_VERSION-beta.1" - else - echo "Latest beta version: $LATEST_BETA" - - # Extract base version and beta number from npm - NPM_BASE_VERSION=$(echo "$LATEST_BETA" | sed 's/-beta\..*$//') - BETA_NUM=$(echo "$LATEST_BETA" | sed 's/.*-beta\.//') - - # Compare base versions - if [ "$CURRENT_VERSION" = "$NPM_BASE_VERSION" ]; then - # Same base version, increment beta number - NEW_BETA_NUM=$((BETA_NUM + 1)) - NEW_VERSION="$CURRENT_VERSION-beta.$NEW_BETA_NUM" - else - # Different base version, start with beta.1 - NEW_VERSION="$CURRENT_VERSION-beta.1" - fi - fi - - echo "New staging version: $NEW_VERSION" - echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT - - # Update package.json with new version - node -e " - const fs = require('fs'); - const pkg = JSON.parse(fs.readFileSync('package.json', 'utf8')); - pkg.version = '$NEW_VERSION'; - fs.writeFileSync('package.json', JSON.stringify(pkg, null, 2) + '\n'); - " - - - name: Configure git - run: | - git config --global user.name "github-actions[bot]" - git config --global user.email "github-actions[bot]@users.noreply.github.com" - - - name: Commit staging release state - run: | - # Add all changes (current state + version bump) - git add -A - git commit -m "Staging Release v${{ steps.bump_version.outputs.new_version }} (codecane) - - This commit captures the complete state being released for staging, - including any uncommitted changes and the version bump. - - ๐Ÿค– Generated with Codebuff - Co-Authored-By: Codebuff " - - - name: Create and push staging tag - run: | - # Show current commit info for debugging - echo "Current HEAD commit:" - git log -1 --format="%H %ci %s" - - # Create tag on current HEAD (the commit we just made) - git tag "v${{ steps.bump_version.outputs.new_version }}" - git push origin "v${{ steps.bump_version.outputs.new_version }}" - - echo "Tag created on commit:" - git show "v${{ steps.bump_version.outputs.new_version }}" --format="%H %ci %s" -s - - - name: Upload updated package - uses: actions/upload-artifact@v4 - with: - name: updated-staging-package - path: npm-app/release-staging/ - - build-staging-binaries: - needs: prepare-and-commit-staging - uses: ./.github/workflows/npm-app-release-build.yml - with: - binary-name: codecane - new-version: ${{ needs.prepare-and-commit-staging.outputs.new_version }} - artifact-name: updated-staging-package - checkout-ref: ${{ github.event.pull_request.head.sha }} - env-overrides: '{"NEXT_PUBLIC_CB_ENVIRONMENT": "prod", "NEXT_PUBLIC_CODEBUFF_BACKEND_URL": "backend-pr-312-3hui.onrender.com"}' - secrets: inherit - - # Create GitHub prerelease with all binaries - create-staging-release: - needs: [prepare-and-commit-staging, build-staging-binaries] - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - ref: ${{ github.event.pull_request.head.sha }} - - - name: Clean up old prereleases - run: | - # Calculate date one week ago - ONE_WEEK_AGO=$(date -d '7 days ago' -u +%Y-%m-%dT%H:%M:%SZ) - echo "Current date: $(date -u +%Y-%m-%dT%H:%M:%SZ)" - echo "Cleaning up prereleases older than: $ONE_WEEK_AGO" - - # Get all prereleases - echo "Fetching releases from GitHub API..." - RELEASES=$(curl -s -H "Authorization: token ${{ secrets.CODEBUFF_GITHUB_TOKEN }}" \ - "https://api.github.com/repos/CodebuffAI/codebuff-community/releases?per_page=100") - - # Check if we got a valid response - if echo "$RELEASES" | jq -e . >/dev/null 2>&1; then - echo "Successfully fetched releases JSON" - - # Count total releases and prereleases - TOTAL_RELEASES=$(echo "$RELEASES" | jq '. | length') - PRERELEASE_COUNT=$(echo "$RELEASES" | jq '[.[] | select(.prerelease == true)] | length') - echo "Total releases: $TOTAL_RELEASES" - echo "Total prereleases: $PRERELEASE_COUNT" - - # Show some example release dates for debugging - echo "Sample release dates:" - echo "$RELEASES" | jq -r '.[] | select(.prerelease == true) | "\(.tag_name): \(.created_at)"' | head -5 - - # Filter and show old prereleases before deleting - OLD_PRERELEASES=$(echo "$RELEASES" | jq -r '.[] | select(.prerelease == true and .created_at < "'$ONE_WEEK_AGO'") | "\(.id):\(.tag_name):\(.created_at)"') - - if [ -z "$OLD_PRERELEASES" ]; then - echo "No old prereleases found to delete" - else - echo "Found old prereleases to delete:" - echo "$OLD_PRERELEASES" - - # Delete old prereleases - echo "$RELEASES" | jq -r '.[] | select(.prerelease == true and .created_at < "'$ONE_WEEK_AGO'") | .id' | while read release_id; do - if [ -n "$release_id" ]; then - echo "Deleting prerelease with ID: $release_id" - DELETE_RESPONSE=$(curl -s -w "HTTP Status: %{http_code}" -X DELETE \ - -H "Authorization: token ${{ secrets.CODEBUFF_GITHUB_TOKEN }}" \ - "https://api.github.com/repos/CodebuffAI/codebuff-community/releases/$release_id") - echo "Delete response: $DELETE_RESPONSE" - fi - done - fi - else - echo "Failed to fetch releases or invalid JSON response:" - echo "$RELEASES" | head -10 - fi - - echo "Cleanup completed" - - - name: Download all binary artifacts - uses: actions/download-artifact@v4 - with: - path: binaries/ - - - name: Download updated package - uses: actions/download-artifact@v4 - with: - name: updated-staging-package - path: npm-app/release-staging/ - - - name: Create GitHub Prerelease - run: | - # Get current timestamp in ISO format - CURRENT_TIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ") - echo "Publishing release at: $CURRENT_TIME" - - # Create release with current timestamp - curl -s -X POST \ - -H "Accept: application/vnd.github.v3+json" \ - -H "Authorization: token ${{ secrets.CODEBUFF_GITHUB_TOKEN }}" \ - -H "Content-Type: application/json" \ - https://api.github.com/repos/CodebuffAI/codebuff-community/releases \ - -d "{ - \"tag_name\": \"v${{ needs.prepare-and-commit-staging.outputs.new_version }}\", - \"name\": \"Staging Release v${{ needs.prepare-and-commit-staging.outputs.new_version }} (Codecane)\", - \"body\": \"## Codecane v${{ needs.prepare-and-commit-staging.outputs.new_version }} (Staging)\n\n**โš ๏ธ This is a staging/beta release for testing purposes.**\n\nBinary releases for all supported platforms.\n\n### Installation\n\`\`\`bash\nnpm install -g codecane\n\`\`\`\n\n### Platform Binaries\n- \`codecane-linux-x64.tar.gz\` - Linux x64\n- \`codecane-linux-arm64.tar.gz\` - Linux ARM64\n- \`codecane-darwin-x64.tar.gz\` - macOS Intel\n- \`codecane-darwin-arm64.tar.gz\` - macOS Apple Silicon\n- \`codecane-win32-x64.tar.gz\` - Windows x64\", - \"prerelease\": true, - \"published_at\": \"$CURRENT_TIME\" - }" - - - name: Upload release assets - run: | - # Get the release ID - RELEASE_ID=$(curl -s -H "Authorization: token ${{ secrets.CODEBUFF_GITHUB_TOKEN }}" \ - "https://api.github.com/repos/CodebuffAI/codebuff-community/releases/tags/v${{ needs.prepare-and-commit-staging.outputs.new_version }}" | \ - jq -r '.id') - - echo "Release ID: $RELEASE_ID" - - # Upload all binary assets - for file in binaries/*/codecane-*; do - if [ -f "$file" ]; then - filename=$(basename "$file") - echo "Uploading $filename..." - curl -s -X POST \ - -H "Authorization: token ${{ secrets.CODEBUFF_GITHUB_TOKEN }}" \ - -H "Content-Type: application/octet-stream" \ - --data-binary @"$file" \ - "https://uploads.github.com/repos/CodebuffAI/codebuff-community/releases/$RELEASE_ID/assets?name=$filename" - fi - done - - # Publish npm package as prerelease - publish-staging-npm: - needs: [prepare-and-commit-staging, create-staging-release] - runs-on: ubuntu-latest - permissions: - contents: read - id-token: write - steps: - - uses: actions/checkout@v4 - with: - ref: ${{ github.event.pull_request.head.sha }} - - - name: Download updated package - uses: actions/download-artifact@v4 - with: - name: updated-staging-package - path: npm-app/release-staging/ - - - name: Set up Node.js for npm publishing - uses: actions/setup-node@v4 - with: - node-version: 20 - registry-url: https://registry.npmjs.org/ - - - name: Publish staging to npm - run: | - cd npm-app/release-staging - npm publish --access public - env: - NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3a84e3a15..0778c14d3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -114,35 +114,35 @@ Before you begin, you'll need to install a few tools: 10. **Running in other directories**: - In order to run the CLI from other directories, you need to first publish the agents to the database. +In order to run the CLI from other directories, you need to first publish the agents to the database. - - First, create a publisher profile at http://localhost:3000/publishers. Make sure the `publisher_id` is `codebuff`. +- First, create a publisher profile at http://localhost:3000/publishers. Make sure the `publisher_id` is `codebuff`. - - Run: +- Run: - ```bash - bun run start-bin publish base - ``` + ```bash + bun run start-bin publish base + ``` - - It will give you an error along the lines of `Invalid agent ID: [some agent ID]`, e.g. `Invalid agent ID: context-pruner`. You need to publish that agent at the same time, e.g.: +- It will give you an error along the lines of `Invalid agent ID: [some agent ID]`, e.g. `Invalid agent ID: context-pruner`. You need to publish that agent at the same time, e.g.: - ```bash - bun run start-bin publish base context-pruner - ``` + ```bash + bun run start-bin publish base context-pruner + ``` - - Repeat this until there are no more errors. +- Repeat this until there are no more errors. - - As of the time of writing, the command required is: + - As of the time of writing, the command required is: - ```bash - bun start-bin publish base context-pruner file-explorer file-picker researcher thinker reviewer - ``` + ```bash + bun start-bin publish base context-pruner file-explorer file-picker researcher thinker reviewer + ``` - - Now, you can start the CLI in any directory by running: +- Now, you can start the CLI in any directory by running: - ```bash - bun run start-bin --cwd [some/other/directory] - ``` + ```bash + bun run start-bin --cwd [some/other/directory] + ``` ## Understanding the Codebase @@ -204,6 +204,31 @@ bun test specific.test.ts # Run just one test file **Writing tests:** Use `spyOn()` for mocking functions (it's cleaner than `mock.module()`), and always clean up with `mock.restore()` in your `afterEach()` blocks. +#### Interactive CLI Testing + +For testing interactive CLI features (user input, real-time responses), install tmux: + +```bash +# macOS +brew install tmux + +# Ubuntu/Debian +sudo apt-get install tmux + +# Windows (via WSL) +wsl --install +sudo apt-get install tmux +``` + +Run the proof-of-concept to validate your setup: + +```bash +cd cli +bun run test:tmux-poc +``` + +See [cli/src/__tests__/README.md](cli/src/__tests__/README.md) for comprehensive interactive testing documentation. + ### Commit Messages We use conventional commit format: diff --git a/README.md b/README.md index 46045e83f..798237b58 100644 --- a/README.md +++ b/README.md @@ -148,6 +148,31 @@ We โค๏ธ contributions from the community - whether you're fixing bugs, tweakin **Want to contribute?** Check out our [Contributing Guide](./CONTRIBUTING.md) to get started. +### Running Tests + +To run the test suite: + +```bash +cd cli +bun test +``` + +**For interactive E2E testing**, install tmux: + +```bash +# macOS +brew install tmux + +# Ubuntu/Debian +sudo apt-get install tmux + +# Windows (via WSL) +wsl --install +sudo apt-get install tmux +``` + +See [cli/src/__tests__/README.md](cli/src/__tests__/README.md) for comprehensive testing documentation. + Some ways you can help: - ๐Ÿ› **Fix bugs** or add features diff --git a/bun.lock b/bun.lock index 67cc43e0a..c2cb99845 100644 --- a/bun.lock +++ b/bun.lock @@ -9,13 +9,13 @@ }, "devDependencies": { "@tanstack/react-query": "^5.59.16", - "@types/bun": "^1.3.0", + "@types/bun": "^1.2.11", "@types/lodash": "4.17.7", "@types/node": "^22.9.0", "@types/node-fetch": "^2.6.12", "@types/parse-path": "^7.1.0", "@typescript-eslint/eslint-plugin": "^6.17", - "bun-types": "^1.3.0", + "bun-types": "^1.2.2", "eslint-config-prettier": "^9.1.0", "eslint-plugin-import": "^2.29.1", "eslint-plugin-unused-imports": "^4.1.4", @@ -84,8 +84,9 @@ }, "dependencies": { "@codebuff/sdk": "workspace:*", - "@opentui/core": "^0.1.27", - "@opentui/react": "^0.1.27", + "@opentui/core": "^0.1.28", + "@opentui/react": "^0.1.28", + "commander": "^14.0.1", "immer": "^10.1.3", "react": "^19.0.0", "react-reconciler": "^0.32.0", @@ -99,6 +100,7 @@ "@types/node": "22", "@types/react": "^18.3.12", "@types/react-reconciler": "^0.32.0", + "strip-ansi": "^7.1.2", }, }, "common": { @@ -162,7 +164,6 @@ "@codebuff/common": "workspace:*", "@types/diff": "8.0.0", "@types/micromatch": "^4.0.9", - "@vscode/ripgrep": "1.15.9", "ai": "5.0.0", "axios": "1.7.4", "cli-highlight": "^2.1.11", @@ -1014,21 +1015,21 @@ "@opentelemetry/semantic-conventions": ["@opentelemetry/semantic-conventions@1.37.0", "", {}, "sha512-JD6DerIKdJGmRp4jQyX5FlrQjA4tjOw1cvfsPAZXfOOEErMUHjPcPSICS+6WnM0nB0efSFARh0KAZss+bvExOA=="], - "@opentui/core": ["@opentui/core@0.1.27", "", { "dependencies": { "jimp": "1.6.0", "yoga-layout": "3.2.1" }, "optionalDependencies": { "@dimforge/rapier2d-simd-compat": "^0.17.3", "@opentui/core-darwin-arm64": "0.1.27", "@opentui/core-darwin-x64": "0.1.27", "@opentui/core-linux-arm64": "0.1.27", "@opentui/core-linux-x64": "0.1.27", "@opentui/core-win32-arm64": "0.1.27", "@opentui/core-win32-x64": "0.1.27", "bun-webgpu": "0.1.3", "planck": "^1.4.2", "three": "0.177.0" }, "peerDependencies": { "web-tree-sitter": ">=0.26.0" } }, "sha512-aTIXZz+SKm2u7Fn86ZghOhZNL6MPo5XXy2SWhpSmAyoyjypZxaM361Xn0Vh3bruhlDswscQ4k6xO+X8jXhZocQ=="], + "@opentui/core": ["@opentui/core@0.1.28", "", { "dependencies": { "jimp": "1.6.0", "yoga-layout": "3.2.1" }, "optionalDependencies": { "@dimforge/rapier2d-simd-compat": "^0.17.3", "@opentui/core-darwin-arm64": "0.1.28", "@opentui/core-darwin-x64": "0.1.28", "@opentui/core-linux-arm64": "0.1.28", "@opentui/core-linux-x64": "0.1.28", "@opentui/core-win32-arm64": "0.1.28", "@opentui/core-win32-x64": "0.1.28", "bun-webgpu": "0.1.3", "planck": "^1.4.2", "three": "0.177.0" }, "peerDependencies": { "web-tree-sitter": ">=0.26.0" } }, "sha512-3GOnETvNeYcWcQPGaauNpPxgvglnvCfK4mmr7gkNsnVY5NEnrBbh7yuVHDXRNuzRldG4Aj5JEq7pWRexNhnL6g=="], - "@opentui/core-darwin-arm64": ["@opentui/core-darwin-arm64@0.1.27", "", { "os": "darwin", "cpu": "arm64" }, "sha512-tul6PtoGJCw3UVsZzGD/fY0n43StUEG9bx1p8BXllApEt/VbXiL+qJMfRvlT52Oyj4n3mxOyiX17WkulUBYDSg=="], + "@opentui/core-darwin-arm64": ["@opentui/core-darwin-arm64@0.1.28", "", { "os": "darwin", "cpu": "arm64" }, "sha512-ivJmq6NLNzWRiottzBE5DVLe/fOklj3WwGkFhnRTFDG2nDcc1/uyvvpCZRwkJcb+TpV5zpB8YWzzs3XahGA0oQ=="], - "@opentui/core-darwin-x64": ["@opentui/core-darwin-x64@0.1.27", "", { "os": "darwin", "cpu": "x64" }, "sha512-Iw+u8xSfYLAufuxJMQSFEhkj5VaRy/sHCVguHEcN+CdeF2c13e34Afq0KukD20CS4cQbR3S3xooU3MsaszMCbQ=="], + "@opentui/core-darwin-x64": ["@opentui/core-darwin-x64@0.1.28", "", { "os": "darwin", "cpu": "x64" }, "sha512-tiIiX9S5Gdz0DFfzqOv5WRjJsr+zEHWiYYoUlJ7/H9IRw0cj3Wq9V1LruZKd3WwbXwEVNkrdo9PoGotNGDSUxQ=="], - "@opentui/core-linux-arm64": ["@opentui/core-linux-arm64@0.1.27", "", { "os": "linux", "cpu": "arm64" }, "sha512-Qq5+OtLOaiHhL0XKF3Ulkv+BB1k6MF2Pkm8uQWWG6tog4rPQpHlCN+QKas9AuV8HwHFjHjOtC9rQ2XMe6q92Wg=="], + "@opentui/core-linux-arm64": ["@opentui/core-linux-arm64@0.1.28", "", { "os": "linux", "cpu": "arm64" }, "sha512-jZ6848fyF8wVElIsCiAC5hBvPjOWlVlpXQFL8XBiu4U47bI/Le6vpp9f/kg9iipFaq2pGxKYcQak1/35Ns+5UQ=="], - "@opentui/core-linux-x64": ["@opentui/core-linux-x64@0.1.27", "", { "os": "linux", "cpu": "x64" }, "sha512-dKaE2fSc9Fdo5iI3jvU+BNHG0tqR6o+1XEd9TX5QZG8A4cr2D8MfImlLmONbYI0eT7Lox/cPyamjzB64Al/HDw=="], + "@opentui/core-linux-x64": ["@opentui/core-linux-x64@0.1.28", "", { "os": "linux", "cpu": "x64" }, "sha512-xcQhFfCJZGZeq00ODflyRO1EcK1myb0CUaC0grpP2pvKdulHshn6gnLod7EoEHGboP3zzQrffPRjvYgd6JWKJg=="], - "@opentui/core-win32-arm64": ["@opentui/core-win32-arm64@0.1.27", "", { "os": "win32", "cpu": "arm64" }, "sha512-F5xYnaO1DVgyX8y/xpnjXOzplsw9ZOkwJ2IgEJC5nJVrhbVxBLE7Jc0jjHMoBzmLjEao/iCZY8WkzvlhuYxAtA=="], + "@opentui/core-win32-arm64": ["@opentui/core-win32-arm64@0.1.28", "", { "os": "win32", "cpu": "arm64" }, "sha512-SuDBSOZVaU/bS9ngs9ADQJaFfg3TmCTl4OBKQgrpGErGpG0fNZJMS4NqJTlBcGOGiT/JxgMIZly/Ku/Q2gdz5A=="], - "@opentui/core-win32-x64": ["@opentui/core-win32-x64@0.1.27", "", { "os": "win32", "cpu": "x64" }, "sha512-ioGGbx97u/Fy4ysEeagOz4sc2NIHDeYluE5oQz0ExlQI1V6hvnvJPHw6iVNpnJmRldO4EDTkXDi9o+jiPnSBhQ=="], + "@opentui/core-win32-x64": ["@opentui/core-win32-x64@0.1.28", "", { "os": "win32", "cpu": "x64" }, "sha512-oMO2d9+7HlGuQFX4j9ex31JkS7AiEkktUL0cjQsgqK09zyUz8tQdlb3l/5yzJ2dPJ00K7Ae1K+0HO+5ClADcuQ=="], - "@opentui/react": ["@opentui/react@0.1.27", "", { "dependencies": { "@opentui/core": "0.1.27", "react-reconciler": "^0.32.0" }, "peerDependencies": { "react": ">=19.0.0" } }, "sha512-YH70kzj5f+Vi29XHNfpwd3Hjfw7AyuyZj8d/P32U57gHWkwPxijulZxZASqjQhiEdTsTdU8+ZtqzACiX4wXrjw=="], + "@opentui/react": ["@opentui/react@0.1.28", "", { "dependencies": { "@opentui/core": "0.1.28", "react-reconciler": "^0.32.0" }, "peerDependencies": { "react": ">=19.0.0" } }, "sha512-ubHPv8ZCgb9nBI6Ibh9FYXAK6A49Wt4ab6AdJW0eIeWOUHAKb+5LlWNO6YS11h+HkPzkcYFZC0uUY08/YXv6qw=="], "@panva/hkdf": ["@panva/hkdf@1.2.1", "", {}, "sha512-6oclG6Y3PiDFcoyk8srjLfVKyMfVCKJ27JwNPViuXziFpmdz+MZnZN/aKY0JGXgYuO/VghU0jcOAZgWXZ1Dmrw=="], @@ -1538,8 +1539,6 @@ "@vladfrangu/async_event_emitter": ["@vladfrangu/async_event_emitter@2.4.7", "", {}, "sha512-Xfe6rpCTxSxfbswi/W/Pz7zp1WWSNn4A0eW4mLkQUewCrXXtMj31lCg+iQyTkh/CkusZSq9eDflu7tjEDXUY6g=="], - "@vscode/ripgrep": ["@vscode/ripgrep@1.15.9", "", { "dependencies": { "https-proxy-agent": "^7.0.2", "proxy-from-env": "^1.1.0", "yauzl": "^2.9.2" } }, "sha512-4q2PXRvUvr3bF+LsfrifmUZgSPmCNcUZo6SbEAZgArIChchkezaxLoIeQMJe/z3CCKStvaVKpBXLxN3Z8lQjFQ=="], - "@vscode/tree-sitter-wasm": ["@vscode/tree-sitter-wasm@0.1.4", "", {}, "sha512-kQVVg/CamCYDM+/XYCZuNTQyixjZd8ts/Gf84UzjEY0eRnbg6kiy5I9z2/2i3XdqwhI87iG07rkMR2KwhqcSbA=="], "@webgpu/types": ["@webgpu/types@0.1.66", "", {}, "sha512-YA2hLrwLpDsRueNDXIMqN9NTzD6bCDkuXbOSe0heS+f8YE8usA6Gbv1prj81pzVHrbaAma7zObnIC+I6/sXJgA=="], @@ -1848,7 +1847,7 @@ "comma-separated-tokens": ["comma-separated-tokens@2.0.3", "", {}, "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg=="], - "commander": ["commander@13.1.0", "", {}, "sha512-/rFeCpNJQbhSZjGVwO9RFV3xPqbnERS8MmIQzCtD/zl6gpJuV/bMLuN92oG3F7d8oDEHHRrujSXNUr8fpjntKw=="], + "commander": ["commander@14.0.1", "", {}, "sha512-2JkV3gUZUVrbNA+1sjBOYLsMZ5cEEl8GTFP2a4AVz5hvasAMCQ1D2l2le/cX+pV4N6ZU17zjUahLpIXRrnWL8A=="], "comment-json": ["comment-json@4.4.1", "", { "dependencies": { "array-timsort": "^1.0.3", "core-util-is": "^1.0.3", "esprima": "^4.0.1" } }, "sha512-r1To31BQD5060QdkC+Iheai7gHwoSZobzunqkf2/kQ6xIAfJyrKNAFUwdKvkK7Qgu7pVTKQEa7ok7Ed3ycAJgg=="], @@ -2484,7 +2483,7 @@ "http-proxy-agent": ["http-proxy-agent@5.0.0", "", { "dependencies": { "@tootallnate/once": "2", "agent-base": "6", "debug": "4" } }, "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w=="], - "https-proxy-agent": ["https-proxy-agent@7.0.6", "", { "dependencies": { "agent-base": "^7.1.2", "debug": "4" } }, "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw=="], + "https-proxy-agent": ["https-proxy-agent@6.2.1", "", { "dependencies": { "agent-base": "^7.0.2", "debug": "4" } }, "sha512-ONsE3+yfZF2caH5+bJlcddtWqNI3Gvs5A38+ngvljxaBiRXRswym2c7yf8UAeFpRFKjFNHIFEHqR/OLAWJzyiA=="], "human-signals": ["human-signals@4.3.1", "", {}, "sha512-nZXjEF2nbo7lIw3mgYjItAfgQXog3OjJogSbKa2CQIIvSGWcKgeJnQlNXip6NglNzYH45nSRiEVimMvYL8DDqQ=="], @@ -4064,6 +4063,8 @@ "@codebuff/npm-app/@types/diff": ["@types/diff@8.0.0", "", { "dependencies": { "diff": "*" } }, "sha512-o7jqJM04gfaYrdCecCVMbZhNdG6T1MHg/oQoRFdERLV+4d+V7FijhiEAbFu0Usww84Yijk9yH58U4Jk4HbtzZw=="], + "@codebuff/npm-app/commander": ["commander@13.1.0", "", {}, "sha512-/rFeCpNJQbhSZjGVwO9RFV3xPqbnERS8MmIQzCtD/zl6gpJuV/bMLuN92oG3F7d8oDEHHRrujSXNUr8fpjntKw=="], + "@codebuff/npm-app/diff": ["diff@8.0.2", "", {}, "sha512-sSuxWU5j5SR9QQji/o2qMvqRNYRDOcBTgsJ/DeCf4iSN4gW+gNMXM7wFIP+fdXZxoNiAnHUTGjCr+TSWXdRDKg=="], "@codebuff/npm-app/ignore": ["ignore@7.0.3", "", {}, "sha512-bAH5jbK/F3T3Jls4I0SO1hmPR0dKU0a7+SY6n1yzRtG54FLO8d6w/nxLFX2Nb7dBu6cCWXPaAME6cYqFUMmuCA=="], @@ -4318,8 +4319,6 @@ "aceternity-ui/dotenv": ["dotenv@16.6.1", "", {}, "sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow=="], - "aceternity-ui/https-proxy-agent": ["https-proxy-agent@6.2.1", "", { "dependencies": { "agent-base": "^7.0.2", "debug": "4" } }, "sha512-ONsE3+yfZF2caH5+bJlcddtWqNI3Gvs5A38+ngvljxaBiRXRswym2c7yf8UAeFpRFKjFNHIFEHqR/OLAWJzyiA=="], - "ai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.0", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.3", "zod-to-json-schema": "^3.24.1" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-BoQZtGcBxkeSH1zK+SRYNDtJPIPpacTeiMZqnG4Rv6xXjEwM0FH4MGs9c+PlhyEWmQCzjRM2HAotEydFhD4dYw=="], "autoprefixer/picocolors": ["picocolors@1.1.1", "", {}, "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA=="], @@ -4432,6 +4431,8 @@ "front-matter/js-yaml": ["js-yaml@3.14.1", "", { "dependencies": { "argparse": "^1.0.7", "esprima": "^4.0.0" }, "bin": { "js-yaml": "bin/js-yaml.js" } }, "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g=="], + "gaxios/https-proxy-agent": ["https-proxy-agent@7.0.6", "", { "dependencies": { "agent-base": "^7.1.2", "debug": "4" } }, "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw=="], + "gaxios/is-stream": ["is-stream@2.0.1", "", {}, "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg=="], "gaxios/node-fetch": ["node-fetch@2.7.0", "", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="], @@ -4540,6 +4541,8 @@ "lint-staged/chalk": ["chalk@5.6.2", "", {}, "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA=="], + "lint-staged/commander": ["commander@13.1.0", "", {}, "sha512-/rFeCpNJQbhSZjGVwO9RFV3xPqbnERS8MmIQzCtD/zl6gpJuV/bMLuN92oG3F7d8oDEHHRrujSXNUr8fpjntKw=="], + "lint-staged/execa": ["execa@8.0.1", "", { "dependencies": { "cross-spawn": "^7.0.3", "get-stream": "^8.0.1", "human-signals": "^5.0.0", "is-stream": "^3.0.0", "merge-stream": "^2.0.0", "npm-run-path": "^5.1.0", "onetime": "^6.0.0", "signal-exit": "^4.1.0", "strip-final-newline": "^3.0.0" } }, "sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg=="], "log-symbols/chalk": ["chalk@5.6.2", "", {}, "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA=="], @@ -4574,6 +4577,8 @@ "metro/yargs": ["yargs@17.7.2", "", { "dependencies": { "cliui": "^8.0.1", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.3", "y18n": "^5.0.5", "yargs-parser": "^21.1.1" } }, "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w=="], + "metro-cache/https-proxy-agent": ["https-proxy-agent@7.0.6", "", { "dependencies": { "agent-base": "^7.1.2", "debug": "4" } }, "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw=="], + "metro-source-map/source-map": ["source-map@0.5.7", "", {}, "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ=="], "metro-symbolicate/source-map": ["source-map@0.5.7", "", {}, "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ=="], @@ -4630,6 +4635,8 @@ "pac-proxy-agent/http-proxy-agent": ["http-proxy-agent@7.0.2", "", { "dependencies": { "agent-base": "^7.1.0", "debug": "^4.3.4" } }, "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig=="], + "pac-proxy-agent/https-proxy-agent": ["https-proxy-agent@7.0.6", "", { "dependencies": { "agent-base": "^7.1.2", "debug": "4" } }, "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw=="], + "parse-entities/@types/unist": ["@types/unist@2.0.11", "", {}, "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA=="], "parse-json/lines-and-columns": ["lines-and-columns@1.2.4", "", {}, "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg=="], @@ -4662,6 +4669,8 @@ "proxy-agent/http-proxy-agent": ["http-proxy-agent@7.0.2", "", { "dependencies": { "agent-base": "^7.1.0", "debug": "^4.3.4" } }, "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig=="], + "proxy-agent/https-proxy-agent": ["https-proxy-agent@7.0.6", "", { "dependencies": { "agent-base": "^7.1.2", "debug": "4" } }, "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw=="], + "proxy-agent/lru-cache": ["lru-cache@7.18.3", "", {}, "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA=="], "puppeteer-core/ws": ["ws@8.18.3", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg=="], diff --git a/cli/README.md b/cli/README.md index f2eabf007..c1239e097 100644 --- a/cli/README.md +++ b/cli/README.md @@ -16,6 +16,38 @@ Run the TUI in development mode: bun run dev ``` +## Testing + +Run the test suite: + +```bash +bun test +``` + +### Interactive E2E Testing + +For testing interactive CLI features, install tmux: + +```bash +# macOS +brew install tmux + +# Ubuntu/Debian +sudo apt-get install tmux + +# Windows (via WSL) +wsl --install +sudo apt-get install tmux +``` + +Then run the proof-of-concept: + +```bash +bun run test:tmux-poc +``` + +See [src/__tests__/README.md](src/__tests__/README.md) for comprehensive testing documentation. + ## Build Build the package: diff --git a/cli/knowledge.md b/cli/knowledge.md index 254279491..9de2e649d 100644 --- a/cli/knowledge.md +++ b/cli/knowledge.md @@ -1,8 +1,25 @@ # CLI Package Knowledge +## Test Naming Conventions + +**IMPORTANT**: Follow these naming patterns for automatic dependency detection: + +- **Unit tests:** `*.test.ts` (e.g., `cli-args.test.ts`) +- **E2E tests:** `e2e-*.test.ts` (e.g., `e2e-cli.test.ts`) +- **Integration tests:** `integration-*.test.ts` (e.g., `integration-tmux.test.ts`) + +**Why?** The `.bin/bun` wrapper detects files matching `*integration*.test.ts` or `*e2e*.test.ts` patterns and automatically checks for tmux availability. If tmux is missing, it shows installation instructions but lets tests continue (they skip gracefully). + +**Benefits:** + +- Project-wide convention (not CLI-specific) +- No hardcoded directory paths +- Automatic dependency validation +- Clear test categorization + ## Migration from Custom OpenTUI Fork -**October 2024**: Migrated from custom `CodebuffAI/opentui#codebuff/custom` fork to official `@opentui/react@^0.1.27` and `@opentui/core@^0.1.27` packages. +**October 2024**: Migrated from custom `CodebuffAI/opentui#codebuff/custom` fork to official `@opentui/react@^0.1.27` and `@opentui/core@^0.1.27` packages. Updated to `^0.1.28` in February 2025. **Lost Features from Custom Fork:** diff --git a/cli/package.json b/cli/package.json index cad91bb22..9bd7446c8 100644 --- a/cli/package.json +++ b/cli/package.json @@ -19,9 +19,12 @@ "prebuild": "bun run build:sdk", "build": "bun build src/index.tsx --outdir dist --target node --format esm", "build:sdk": "cd ../sdk && bun run build", - "build:binary": "bun ./scripts/build-binary.ts codebuff-cli $npm_package_version", + "build:sdk-types": "cd ../sdk && bun run build:types", + "build:binary": "bun ./scripts/build-binary.ts codecane $npm_package_version", "start": "bun run dist/index.js", - "pretypecheck": "bun run build:sdk", + "test": "bun test", + "test:tmux-poc": "bun run src/__tests__/tmux-poc.ts", + "pretypecheck": "bun run build:sdk-types", "typecheck": "tsc --noEmit -p ." }, "sideEffects": false, @@ -30,8 +33,9 @@ }, "dependencies": { "@codebuff/sdk": "workspace:*", - "@opentui/core": "^0.1.27", - "@opentui/react": "^0.1.27", + "@opentui/core": "^0.1.28", + "@opentui/react": "^0.1.28", + "commander": "^14.0.1", "immer": "^10.1.3", "react": "^19.0.0", "react-reconciler": "^0.32.0", @@ -44,6 +48,7 @@ "@types/bun": "^1.3.0", "@types/node": "22", "@types/react": "^18.3.12", - "@types/react-reconciler": "^0.32.0" + "@types/react-reconciler": "^0.32.0", + "strip-ansi": "^7.1.2" } } diff --git a/cli/release-staging/README.md b/cli/release-staging/README.md index d1ef99d9b..08194bffe 100644 --- a/cli/release-staging/README.md +++ b/cli/release-staging/README.md @@ -1,3 +1,71 @@ -# Codebuff CLI Staging +# ๐Ÿš€ Codecane - The most powerful coding agent (STAGING) -The staging workflow updates these files with the exact version being packaged. They are shipped as workflow artifacts so downstream jobs build and publish binaries from the same commit snapshot. +**โš ๏ธ This is a staging/beta release for testing purposes.** + +Codecane is a CLI tool that writes code for you. + +1. Run `codecane` from your project directory +2. Tell it what to do +3. It will read and write to files and run commands to produce the code you want + +Note: Codecane will run commands in your terminal as it deems necessary to fulfill your request. + +## Installation + +To install Codecane (staging), run: + +```bash +npm install -g codecane@beta +``` + +(Use `sudo` if you get a permission error.) + +## Usage + +After installation, you can start Codecane by running: + +```bash +codecane [project-directory] +``` + +If no project directory is specified, Codecane will use the current directory. + +Once running, simply chat with Codecane to say what coding task you want done. + +## Features + +- Understands your whole codebase +- Creates and edits multiple files based on your request +- Can run your tests or type checker or linter; can install packages +- It's powerful: ask Codecane to keep working until it reaches a condition and it will. + +Our users regularly use Codecane to implement new features, write unit tests, refactor code, write scripts, or give advice. + +## Knowledge Files + +To unlock the full benefits of modern LLMs, we recommend storing knowledge alongside your code. Add a `knowledge.md` file anywhere in your project to provide helpful context, guidance, and tips for the LLM as it performs tasks for you. + +Codecane can fluently read and write files, so it will add knowledge as it goes. You don't need to write knowledge manually! + +Some have said every change should be paired with a unit test. In 2024, every change should come with a knowledge update! + +## Tips + +1. Type '/help' or just '/' to see available commands. +2. Create a `knowledge.md` file and collect specific points of advice. The assistant will use this knowledge to improve its responses. +3. Type `undo` or `redo` to revert or reapply file changes from the conversation. +4. Press `Esc` or `Ctrl+C` while Codecane is generating a response to stop it. + +## Troubleshooting + +If you are getting permission errors during installation, try using sudo: + +``` +sudo npm install -g codecane@beta +``` + +If you still have errors, it's a good idea to [reinstall Node](https://nodejs.org/en/download). + +## Feedback + +We value your input! Please email your feedback to `founders@codebuff.com`. Thank you for using Codecane! diff --git a/cli/release-staging/index.js b/cli/release-staging/index.js new file mode 100644 index 000000000..8acdf66ce --- /dev/null +++ b/cli/release-staging/index.js @@ -0,0 +1,408 @@ +#!/usr/bin/env node + +const { spawn } = require('child_process') +const fs = require('fs') +const https = require('https') +const os = require('os') +const path = require('path') +const zlib = require('zlib') + +const tar = require('tar') + +const packageName = 'codecane' + +function createConfig(packageName) { + const homeDir = os.homedir() + const configDir = path.join(homeDir, '.config', 'manicode') + const binaryName = + process.platform === 'win32' ? `${packageName}.exe` : packageName + + return { + homeDir, + configDir, + binaryName, + binaryPath: path.join(configDir, binaryName), + userAgent: `${packageName}-cli`, + requestTimeout: 20000, + } +} + +const CONFIG = createConfig(packageName) + +const PLATFORM_TARGETS = { + 'linux-x64': `${packageName}-linux-x64.tar.gz`, + 'linux-arm64': `${packageName}-linux-arm64.tar.gz`, + 'darwin-x64': `${packageName}-darwin-x64.tar.gz`, + 'darwin-arm64': `${packageName}-darwin-arm64.tar.gz`, + 'win32-x64': `${packageName}-win32-x64.tar.gz`, +} + +const term = { + clearLine: () => { + if (process.stderr.isTTY) { + process.stderr.write('\r\x1b[K') + } + }, + write: (text) => { + term.clearLine() + process.stderr.write(text) + }, + writeLine: (text) => { + term.clearLine() + process.stderr.write(text + '\n') + }, +} + +function httpGet(url, options = {}) { + return new Promise((resolve, reject) => { + const parsedUrl = new URL(url) + const reqOptions = { + hostname: parsedUrl.hostname, + path: parsedUrl.pathname + parsedUrl.search, + headers: { + 'User-Agent': CONFIG.userAgent, + ...options.headers, + }, + } + + const req = https.get(reqOptions, (res) => { + if (res.statusCode === 302 || res.statusCode === 301) { + return httpGet(new URL(res.headers.location, url).href, options) + .then(resolve) + .catch(reject) + } + resolve(res) + }) + + req.on('error', reject) + + const timeout = options.timeout || CONFIG.requestTimeout + req.setTimeout(timeout, () => { + req.destroy() + reject(new Error('Request timeout.')) + }) + }) +} + +async function getLatestVersion() { + try { + const res = await httpGet( + `https://registry.npmjs.org/${packageName}/latest`, + ) + + if (res.statusCode !== 200) return null + + const body = await streamToString(res) + const packageData = JSON.parse(body) + + return packageData.version || null + } catch (error) { + return null + } +} + +function streamToString(stream) { + return new Promise((resolve, reject) => { + let data = '' + stream.on('data', (chunk) => (data += chunk)) + stream.on('end', () => resolve(data)) + stream.on('error', reject) + }) +} + +function getCurrentVersion() { + if (!fs.existsSync(CONFIG.binaryPath)) return null + + try { + return new Promise((resolve, reject) => { + const child = spawn(CONFIG.binaryPath, ['--version'], { + cwd: os.homedir(), + stdio: 'pipe', + }) + + let output = '' + let errorOutput = '' + + child.stdout.on('data', (data) => { + output += data.toString() + }) + + child.stderr.on('data', (data) => { + errorOutput += data.toString() + }) + + const timeout = setTimeout(() => { + child.kill('SIGTERM') + setTimeout(() => { + if (!child.killed) { + child.kill('SIGKILL') + } + }, 1000) + resolve('error') + }, 1000) + + child.on('exit', (code) => { + clearTimeout(timeout) + if (code === 0) { + resolve(output.trim()) + } else { + resolve('error') + } + }) + + child.on('error', () => { + clearTimeout(timeout) + resolve('error') + }) + }) + } catch (error) { + return 'error' + } +} + +function compareVersions(v1, v2) { + if (!v1 || !v2) return 0 + + const parseVersion = (version) => { + const parts = version.split('-') + const mainParts = parts[0].split('.').map(Number) + const prereleaseParts = parts[1] ? parts[1].split('.') : [] + return { main: mainParts, prerelease: prereleaseParts } + } + + const p1 = parseVersion(v1) + const p2 = parseVersion(v2) + + for (let i = 0; i < Math.max(p1.main.length, p2.main.length); i++) { + const n1 = p1.main[i] || 0 + const n2 = p2.main[i] || 0 + + if (n1 < n2) return -1 + if (n1 > n2) return 1 + } + + if (p1.prerelease.length === 0 && p2.prerelease.length === 0) { + return 0 + } else if (p1.prerelease.length === 0) { + return 1 + } else if (p2.prerelease.length === 0) { + return -1 + } else { + for ( + let i = 0; + i < Math.max(p1.prerelease.length, p2.prerelease.length); + i++ + ) { + const pr1 = p1.prerelease[i] || '' + const pr2 = p2.prerelease[i] || '' + + const isNum1 = !isNaN(parseInt(pr1)) + const isNum2 = !isNaN(parseInt(pr2)) + + if (isNum1 && isNum2) { + const num1 = parseInt(pr1) + const num2 = parseInt(pr2) + if (num1 < num2) return -1 + if (num1 > num2) return 1 + } else if (isNum1 && !isNum2) { + return 1 + } else if (!isNum1 && isNum2) { + return -1 + } else if (pr1 < pr2) { + return -1 + } else if (pr1 > pr2) { + return 1 + } + } + return 0 + } +} + +function formatBytes(bytes) { + if (bytes === 0) return '0 B' + const k = 1024 + const sizes = ['B', 'KB', 'MB', 'GB'] + const i = Math.floor(Math.log(bytes) / Math.log(k)) + return parseFloat((bytes / Math.pow(k, i)).toFixed(1)) + ' ' + sizes[i] +} + +function createProgressBar(percentage, width = 30) { + const filled = Math.round((width * percentage) / 100) + const empty = width - filled + return '[' + 'โ–ˆ'.repeat(filled) + 'โ–‘'.repeat(empty) + ']' +} + +async function downloadBinary(version) { + const platformKey = `${process.platform}-${process.arch}` + const fileName = PLATFORM_TARGETS[platformKey] + + if (!fileName) { + throw new Error(`Unsupported platform: ${process.platform} ${process.arch}`) + } + + const downloadUrl = `${process.env.NEXT_PUBLIC_CODEBUFF_APP_URL || 'https://codebuff.com'}/api/releases/download/${version}/${fileName}` + + fs.mkdirSync(CONFIG.configDir, { recursive: true }) + + if (fs.existsSync(CONFIG.binaryPath)) { + fs.unlinkSync(CONFIG.binaryPath) + } + + term.write('Downloading...') + + const res = await httpGet(downloadUrl) + + if (res.statusCode !== 200) { + throw new Error(`Download failed: HTTP ${res.statusCode}`) + } + + const totalSize = parseInt(res.headers['content-length'] || '0', 10) + let downloadedSize = 0 + let lastProgressTime = Date.now() + + res.on('data', (chunk) => { + downloadedSize += chunk.length + const now = Date.now() + if (now - lastProgressTime >= 100 || downloadedSize === totalSize) { + lastProgressTime = now + if (totalSize > 0) { + const pct = Math.round((downloadedSize / totalSize) * 100) + term.write( + `Downloading... ${createProgressBar(pct)} ${pct}% of ${formatBytes( + totalSize, + )}`, + ) + } else { + term.write(`Downloading... ${formatBytes(downloadedSize)}`) + } + } + }) + + await new Promise((resolve, reject) => { + res + .pipe(zlib.createGunzip()) + .pipe(tar.x({ cwd: CONFIG.configDir })) + .on('finish', resolve) + .on('error', reject) + }) + + try { + const files = fs.readdirSync(CONFIG.configDir) + const extractedPath = path.join(CONFIG.configDir, CONFIG.binaryName) + + if (fs.existsSync(extractedPath)) { + if (process.platform !== 'win32') { + fs.chmodSync(extractedPath, 0o755) + } + } else { + throw new Error( + `Binary not found after extraction. Expected: ${extractedPath}, Available files: ${files.join(', ')}`, + ) + } + } catch (error) { + term.clearLine() + console.error(`Extraction failed: ${error.message}`) + process.exit(1) + } + + term.clearLine() + console.log('Download complete! Starting Codecane...') +} + +async function ensureBinaryExists() { + if (!fs.existsSync(CONFIG.binaryPath)) { + const version = await getLatestVersion() + if (!version) { + console.error('โŒ Failed to determine latest version') + console.error('Please check your internet connection and try again') + process.exit(1) + } + + try { + await downloadBinary(version) + } catch (error) { + term.clearLine() + console.error('โŒ Failed to download codecane:', error.message) + console.error('Please check your internet connection and try again') + process.exit(1) + } + } +} + +async function checkForUpdates(runningProcess, exitListener) { + try { + const currentVersion = await getCurrentVersion() + if (!currentVersion) return + + const latestVersion = await getLatestVersion() + if (!latestVersion) return + + if ( + currentVersion === 'error' || + compareVersions(currentVersion, latestVersion) < 0 + ) { + term.clearLine() + + runningProcess.removeListener('exit', exitListener) + runningProcess.kill('SIGTERM') + + await new Promise((resolve) => { + runningProcess.on('exit', resolve) + setTimeout(() => { + if (!runningProcess.killed) { + runningProcess.kill('SIGKILL') + } + resolve() + }, 5000) + }) + + console.log(`Update available: ${currentVersion} โ†’ ${latestVersion}`) + + await downloadBinary(latestVersion) + + const newChild = spawn(CONFIG.binaryPath, process.argv.slice(2), { + stdio: 'inherit', + detached: false, + }) + + newChild.on('exit', (code) => { + process.exit(code || 0) + }) + + return new Promise(() => {}) + } + } catch (error) { + // Ignore update failures + } +} + +async function main() { + console.log('\x1b[1m\x1b[91m' + '='.repeat(60) + '\x1b[0m') + console.log('\x1b[1m\x1b[93mโ„๏ธ CODECANE STAGING ENVIRONMENT โ„๏ธ\x1b[0m') + console.log( + '\x1b[1m\x1b[91mFOR TESTING PURPOSES ONLY - NOT FOR PRODUCTION USE\x1b[0m', + ) + console.log('\x1b[1m\x1b[91m' + '='.repeat(60) + '\x1b[0m') + console.log('') + + await ensureBinaryExists() + + const child = spawn(CONFIG.binaryPath, process.argv.slice(2), { + stdio: 'inherit', + }) + + const exitListener = (code) => { + process.exit(code || 0) + } + + child.on('exit', exitListener) + + setTimeout(() => { + checkForUpdates(child, exitListener) + }, 100) +} + +main().catch((error) => { + console.error('โŒ Unexpected error:', error.message) + process.exit(1) +}) diff --git a/cli/release-staging/package.json b/cli/release-staging/package.json index aeb193c1a..8cd555f53 100644 --- a/cli/release-staging/package.json +++ b/cli/release-staging/package.json @@ -1,11 +1,39 @@ { - "name": "@codebuff/cli-staging", - "private": true, + "name": "codecane", "version": "1.0.420", - "description": "Staging release metadata for Codebuff CLI binaries", + "description": "AI coding agent CLI (staging)", + "license": "MIT", + "bin": { + "codecane": "index.js" + }, + "scripts": { + "preuninstall": "node -e \"const fs = require('fs'); const path = require('path'); const os = require('os'); const binaryPath = path.join(os.homedir(), '.config', 'manicode', process.platform === 'win32' ? 'codecane.exe' : 'codecane'); try { fs.unlinkSync(binaryPath) } catch (e) { /* ignore if file doesn't exist */ }\"" + }, + "files": [ + "index.js", + "README.md" + ], + "os": [ + "darwin", + "linux", + "win32" + ], + "cpu": [ + "x64", + "arm64" + ], + "engines": { + "node": ">=16" + }, + "dependencies": { + "tar": "^6.2.0" + }, "repository": { "type": "git", "url": "https://github.com/CodebuffAI/codebuff.git" }, - "homepage": "https://codebuff.com" + "homepage": "https://codebuff.com", + "publishConfig": { + "access": "public" + } } diff --git a/cli/scripts/build-binary.ts b/cli/scripts/build-binary.ts index 21d0818da..12537a843 100644 --- a/cli/scripts/build-binary.ts +++ b/cli/scripts/build-binary.ts @@ -1,9 +1,19 @@ #!/usr/bin/env bun import { spawnSync, type SpawnSyncOptions } from 'child_process' -import { chmodSync, existsSync, mkdirSync } from 'fs' +import { + chmodSync, + existsSync, + mkdirSync, + mkdtempSync, + readdirSync, + readFileSync, + rmSync, + writeFileSync, +} from 'fs' import { dirname, join } from 'path' import { fileURLToPath } from 'url' +import { tmpdir } from 'os' type TargetInfo = { bunTarget: string @@ -21,6 +31,7 @@ const OVERRIDE_ARCH = process.env.OVERRIDE_ARCH ?? undefined const __filename = fileURLToPath(import.meta.url) const __dirname = dirname(__filename) const cliRoot = join(__dirname, '..') +const repoRoot = dirname(cliRoot) function log(message: string) { if (VERBOSE) { @@ -101,7 +112,7 @@ function getTargetInfo(): TargetInfo { async function main() { const [, , binaryNameArg, version] = process.argv - const binaryName = binaryNameArg ?? 'codebuff-cli' + const binaryName = binaryNameArg ?? 'codecane' if (!version) { throw new Error('Version argument is required when building a binary') @@ -120,6 +131,9 @@ async function main() { log('Building SDK dependencies...') runCommand('bun', ['run', 'build:sdk'], { cwd: cliRoot }) + patchOpenTuiAssetPaths() + await ensureOpenTuiNativeBundle(targetInfo) + const outputFilename = targetInfo.platform === 'win32' ? `${binaryName}.exe` : binaryName const outputFile = join(binDir, outputFilename) @@ -167,3 +181,151 @@ main().catch((error: unknown) => { } process.exit(1) }) + +function patchOpenTuiAssetPaths() { + const coreDir = join(cliRoot, 'node_modules', '@opentui', 'core') + if (!existsSync(coreDir)) { + log('OpenTUI core package not found; skipping asset patch') + return + } + + const indexFile = readdirSync(coreDir).find( + (file) => file.startsWith('index') && file.endsWith('.js'), + ) + + if (!indexFile) { + log('OpenTUI core index bundle not found; skipping asset patch') + return + } + + const indexPath = join(coreDir, indexFile) + const content = readFileSync(indexPath, 'utf8') + + const absolutePathPattern = + /var __dirname = ".*?packages\/core\/src\/lib\/tree-sitter\/assets";/ + if (!absolutePathPattern.test(content)) { + log('OpenTUI core bundle already has relative asset paths') + return + } + + const replacement = + 'var __dirname = path3.join(path3.dirname(fileURLToPath(new URL(".", import.meta.url))), "lib/tree-sitter/assets");' + + const patched = content.replace(absolutePathPattern, replacement) + writeFileSync(indexPath, patched) + logAlways('Patched OpenTUI core tree-sitter asset paths') +} + +async function ensureOpenTuiNativeBundle(targetInfo: TargetInfo) { + const packageName = `@opentui/core-${targetInfo.platform}-${targetInfo.arch}` + const packageFolder = `core-${targetInfo.platform}-${targetInfo.arch}` + const installTargets = [ + { + label: 'workspace root', + packagesDir: join(repoRoot, 'node_modules', '@opentui'), + packageDir: join(repoRoot, 'node_modules', '@opentui', packageFolder), + }, + { + label: 'CLI workspace', + packagesDir: join(cliRoot, 'node_modules', '@opentui'), + packageDir: join(cliRoot, 'node_modules', '@opentui', packageFolder), + }, + ] + + const missingTargets = installTargets.filter(({ packageDir }) => !existsSync(packageDir)) + if (missingTargets.length === 0) { + log(`OpenTUI native bundle already present for ${targetInfo.platform}-${targetInfo.arch}`) + return + } + + const corePackagePath = + installTargets + .map(({ packagesDir }) => join(packagesDir, 'core', 'package.json')) + .find((candidate) => existsSync(candidate)) ?? null + + if (!corePackagePath) { + log('OpenTUI core package metadata missing; skipping native bundle fetch') + return + } + const corePackageJson = JSON.parse(readFileSync(corePackagePath, 'utf8')) as { + optionalDependencies?: Record + } + const version = corePackageJson.optionalDependencies?.[packageName] + if (!version) { + log(`No optional dependency declared for ${packageName}; skipping native bundle fetch`) + return + } + + const registryBase = + process.env.CODEBUFF_NPM_REGISTRY ?? + process.env.NPM_REGISTRY_URL ?? + 'https://registry.npmjs.org' + const metadataUrl = `${registryBase.replace(/\/$/, '')}/${encodeURIComponent(packageName)}` + log(`Fetching OpenTUI native bundle metadata from ${metadataUrl}`) + + const metadataResponse = await fetch(metadataUrl) + if (!metadataResponse.ok) { + throw new Error( + `Failed to fetch metadata for ${packageName}: ${metadataResponse.status} ${metadataResponse.statusText}`, + ) + } + + const metadata = (await metadataResponse.json()) as { + versions?: Record< + string, + { + dist?: { + tarball?: string + } + } + > + } + const tarballUrl = metadata.versions?.[version]?.dist?.tarball + if (!tarballUrl) { + throw new Error(`Tarball URL missing for ${packageName}@${version}`) + } + + log(`Downloading OpenTUI native bundle from ${tarballUrl}`) + const tarballResponse = await fetch(tarballUrl) + if (!tarballResponse.ok) { + throw new Error( + `Failed to download ${packageName}@${version}: ${tarballResponse.status} ${tarballResponse.statusText}`, + ) + } + + const tempDir = mkdtempSync(join(tmpdir(), 'opentui-')) + try { + const tarballPath = join( + tempDir, + `${packageName.split('/').pop() ?? 'package'}-${version}.tgz`, + ) + await Bun.write(tarballPath, await tarballResponse.arrayBuffer()) + + for (const target of missingTargets) { + mkdirSync(target.packagesDir, { recursive: true }) + mkdirSync(target.packageDir, { recursive: true }) + + if (!existsSync(target.packageDir)) { + throw new Error(`Failed to create directory for ${packageName}: ${target.packageDir}`) + } + + const tarballForTar = + process.platform === 'win32' ? tarballPath.replace(/\\/g, '/') : tarballPath + const extractDirForTar = + process.platform === 'win32' ? target.packageDir.replace(/\\/g, '/') : target.packageDir + + const tarArgs = ['-xzf', tarballForTar, '--strip-components=1', '-C', extractDirForTar] + if (process.platform === 'win32') { + tarArgs.unshift('--force-local') + } + + runCommand('tar', tarArgs) + log( + `Installed OpenTUI native bundle for ${targetInfo.platform}-${targetInfo.arch} in ${target.label}`, + ) + } + logAlways(`Fetched OpenTUI native bundle for ${targetInfo.platform}-${targetInfo.arch}`) + } finally { + rmSync(tempDir, { recursive: true, force: true }) + } +} diff --git a/cli/src/__tests__/README.md b/cli/src/__tests__/README.md new file mode 100644 index 000000000..fff137b66 --- /dev/null +++ b/cli/src/__tests__/README.md @@ -0,0 +1,296 @@ +# CLI Testing + +Comprehensive testing suite for the Codebuff CLI using tmux for interactive terminal emulation. + +## Test Naming Convention + +**IMPORTANT:** Follow these patterns for automatic tmux detection: + +- **Unit tests:** `*.test.ts` (e.g., `cli-args.test.ts`) +- **E2E tests:** `e2e-*.test.ts` (e.g., `e2e-cli.test.ts`) +- **Integration tests:** `integration-*.test.ts` (e.g., `integration-tmux.test.ts`) + +Files matching `*integration*.test.ts` or `*e2e*.test.ts` trigger automatic tmux availability checking in `.bin/bun`. + +## Quick Start + +```bash +cd cli +bun test +``` + +## Prerequisites + +### For Integration Tests + +Install tmux for interactive CLI testing: + +```bash +# macOS +brew install tmux + +# Ubuntu/Debian +sudo apt-get install tmux + +# Windows (via WSL) +wsl --install +sudo apt-get install tmux +``` + +### For E2E Tests + +Build the SDK first: + +```bash +cd sdk +bun run build +cd ../cli +``` + +## Running Tests + +### All Tests + +```bash +bun test +``` + +### Specific Test Suites + +```bash +# Unit tests +bun test cli-args.test.ts + +# E2E tests (requires SDK) +bun test e2e-cli.test.ts + +# Integration tests (requires tmux) +bun test integration-tmux.test.ts +``` + +### Manual tmux POC + +```bash +bun run test:tmux-poc +``` + +## Automatic tmux Detection + +The `.bin/bun` wrapper automatically checks for tmux when running integration/E2E tests: + +- **Detects** test files matching `*integration*.test.ts` or `*e2e*.test.ts` +- **Checks** if tmux is installed +- **Shows** installation instructions if missing +- **Skips** tests gracefully if tmux unavailable + +**Benefits:** +- โœ… Project-wide (works in any package) +- โœ… No hardcoded paths +- โœ… Clear test categorization +- โœ… Automatic dependency validation + +## Test Structure + +### Unit Tests + +Test individual functions in isolation: + +```typescript +import { describe, test, expect } from 'bun:test' + +describe('CLI Arguments', () => { + test('parses --agent flag', () => { + // Test implementation + }) +}) +``` + +### Integration Tests (tmux) + +Test interactive CLI with full terminal emulation: + +```typescript +import { describe, test, expect } from 'bun:test' +import { isTmuxAvailable } from './test-utils' + +const tmuxAvailable = isTmuxAvailable() + +describe.skipIf(!tmuxAvailable)('CLI Integration Tests', () => { + test('handles user input', async () => { + // Create tmux session + // Send commands + // Verify output + }) +}) +``` + +### E2E Tests + +Test complete CLI workflows: + +```typescript +import { describe, test, expect } from 'bun:test' +import { isSDKBuilt } from './test-utils' + +const sdkBuilt = isSDKBuilt() + +describe.skipIf(!sdkBuilt)('CLI E2E Tests', () => { + test('runs --help command', async () => { + // Test CLI behavior + }) +}) +``` + +## Test Utilities + +Shared utilities in `test-utils.ts`: + +```typescript +import { isTmuxAvailable, isSDKBuilt, sleep } from './test-utils' + +// Check for tmux +if (isTmuxAvailable()) { + // Run tmux tests +} + +// Check for SDK +if (isSDKBuilt()) { + // Run E2E tests +} + +// Async delay +await sleep(1000) +``` + +## tmux Testing Approach + +### Why tmux? + +- โœ… Full terminal emulation with PTY support +- โœ… No native compilation needed (Bun 1.3+ compatible) +- โœ… Send keystrokes, capture output +- โœ… Can attach to sessions for debugging +- โœ… Cross-platform (macOS, Linux, WSL) + +### Basic tmux Workflow + +```typescript +// 1. Create tmux session +await tmux(['new-session', '-d', '-s', sessionName, 'your-command']) + +// 2. Send commands +await tmux(['send-keys', '-t', sessionName, 'input text', 'Enter']) + +// 3. Wait for output +await sleep(1000) + +// 4. Capture output +const output = await tmux(['capture-pane', '-t', sessionName, '-p']) + +// 5. Clean up +await tmux(['kill-session', '-t', sessionName]) +``` + +### tmux Helper Function + +```typescript +function tmux(args: string[]): Promise { + return new Promise((resolve, reject) => { + const proc = spawn('tmux', args, { stdio: 'pipe' }) + let stdout = '' + + proc.stdout?.on('data', (data) => { + stdout += data.toString() + }) + + proc.on('close', (code) => { + code === 0 ? resolve(stdout) : reject(new Error('tmux failed')) + }) + }) +} +``` + +## Debugging Tests + +### Attach to tmux Session + +For debugging, keep session alive and attach: + +```typescript +// Don't kill session immediately +await tmux(['new-session', '-d', '-s', 'debug-session', 'cli-command']) + +// In another terminal +// tmux attach -t debug-session +``` + +### View Test Output + +```bash +# Verbose test output +bun test --verbose + +# Watch mode +bun test --watch +``` + +## Contributing + +When adding new tests: + +1. **Follow naming convention** (`*integration*.test.ts` or `*e2e*.test.ts`) +2. **Use test-utils.ts** for shared functionality +3. **Add graceful skipping** for missing dependencies +4. **Clean up resources** (tmux sessions, temp files) +5. **Document test purpose** clearly in test descriptions + +## Troubleshooting + +### tmux Not Found + +``` +โš ๏ธ tmux not found but required for integration/E2E tests +``` + +**Solution:** Install tmux (see Prerequisites above) + +### SDK Not Built + +``` +โœ“ Build SDK for E2E tests: cd sdk && bun run build [skip] +``` + +**Solution:** Build the SDK first (see Prerequisites above) + +### Tests Hanging + +- Check tmux session isn't waiting for input +- Ensure proper cleanup in `finally` blocks +- Use timeouts for tmux operations + +### Session Already Exists + +- Use unique session names (e.g., timestamp suffix) +- Clean up sessions in `beforeEach`/`afterEach` + +## Performance + +- **Unit tests:** ~100ms total +- **Integration tests:** ~2-5s per test (tmux overhead) +- **E2E tests:** ~3-10s per test (full CLI startup) + +## CI/CD + +For CI environments: + +```yaml +# Install tmux in CI +- name: Install tmux + run: | + sudo apt-get update + sudo apt-get install -y tmux + +# Run tests +- name: Run tests + run: bun test +``` diff --git a/cli/src/__tests__/cli-args.test.ts b/cli/src/__tests__/cli-args.test.ts new file mode 100644 index 000000000..2f1f08f04 --- /dev/null +++ b/cli/src/__tests__/cli-args.test.ts @@ -0,0 +1,121 @@ +import { describe, test, expect, beforeEach, afterEach } from 'bun:test' +import { Command } from 'commander' + +describe('CLI Argument Parsing', () => { + let originalArgv: string[] + + beforeEach(() => { + originalArgv = process.argv + }) + + afterEach(() => { + process.argv = originalArgv + }) + + function parseTestArgs(args: string[]) { + process.argv = ['node', 'codecane', ...args] + + const program = new Command() + program + .name('codecane') + .version('1.0.0', '-v, --version', 'Print the CLI version') + .option('--agent ', 'Specify which agent to use') + .option('--clear-logs', 'Remove any existing CLI log files') + .argument('[prompt...]', 'Initial prompt to send') + .allowExcessArguments(true) + .exitOverride() // Prevent process.exit in tests + + try { + program.parse(process.argv) + } catch (error) { + // Commander throws on --help, --version in exitOverride mode + if (error instanceof Error && error.message.includes('(outputHelp)')) { + return { help: true } + } + if (error instanceof Error && (error.message.includes('(version)') || error.message.includes('1.0.0'))) { + return { version: true } + } + throw error + } + + const options = program.opts() + const promptArgs = program.args + + return { + agent: options.agent, + clearLogs: options.clearLogs || false, + initialPrompt: promptArgs.length > 0 ? promptArgs.join(' ') : null, + } + } + + test('parses --agent flag correctly', () => { + const result = parseTestArgs(['--agent', 'file-picker', 'find all TypeScript files']) + expect(result.agent).toBe('file-picker') + expect(result.initialPrompt).toBe('find all TypeScript files') + }) + + test('parses --agent with full agent ID', () => { + const result = parseTestArgs(['--agent', 'codebuff/base-lite@1.0.0', 'hello']) + expect(result.agent).toBe('codebuff/base-lite@1.0.0') + expect(result.initialPrompt).toBe('hello') + }) + + test('works without --agent flag (defaults to base)', () => { + const result = parseTestArgs(['create a new component']) + expect(result.agent).toBeUndefined() + expect(result.initialPrompt).toBe('create a new component') + }) + + test('parses --clear-logs flag', () => { + const result = parseTestArgs(['--clear-logs', 'hello']) + expect(result.clearLogs).toBe(true) + expect(result.initialPrompt).toBe('hello') + }) + + test('handles multiple flags together', () => { + const result = parseTestArgs(['--agent', 'reviewer', '--clear-logs', 'review my code']) + expect(result.agent).toBe('reviewer') + expect(result.clearLogs).toBe(true) + expect(result.initialPrompt).toBe('review my code') + }) + + test('handles prompt with no flags', () => { + const result = parseTestArgs(['this is a test prompt']) + expect(result.agent).toBeUndefined() + expect(result.clearLogs).toBe(false) + expect(result.initialPrompt).toBe('this is a test prompt') + }) + + test('handles empty arguments', () => { + const result = parseTestArgs([]) + expect(result.agent).toBeUndefined() + expect(result.clearLogs).toBe(false) + expect(result.initialPrompt).toBeNull() + }) + + test('handles multi-word prompt', () => { + const result = parseTestArgs(['--agent', 'base', 'fix the bug in auth.ts file']) + expect(result.agent).toBe('base') + expect(result.initialPrompt).toBe('fix the bug in auth.ts file') + }) + + test('handles --help flag', () => { + const result = parseTestArgs(['--help']) + expect(result.help).toBe(true) + }) + + test('handles -h flag', () => { + const result = parseTestArgs(['-h']) + expect(result.help).toBe(true) + }) + + test('handles --version flag', () => { + const result = parseTestArgs(['--version']) + expect(result.version).toBe(true) + }) + + test('handles -v flag', () => { + const result = parseTestArgs(['-v']) + expect(result.version).toBe(true) + }) +}) diff --git a/cli/src/__tests__/e2e-cli.test.ts b/cli/src/__tests__/e2e-cli.test.ts new file mode 100644 index 000000000..cc2039e77 --- /dev/null +++ b/cli/src/__tests__/e2e-cli.test.ts @@ -0,0 +1,132 @@ +import { describe, test, expect } from 'bun:test' +import { spawn } from 'child_process' +import stripAnsi from 'strip-ansi' +import path from 'path' +import { isSDKBuilt } from './test-utils' + +const CLI_PATH = path.join(__dirname, '../index.tsx') +const TIMEOUT_MS = 10000 +const sdkBuilt = isSDKBuilt() + +function runCLI(args: string[]): Promise<{ stdout: string; stderr: string; exitCode: number | null }> { + return new Promise((resolve, reject) => { + const proc = spawn('bun', ['run', CLI_PATH, ...args], { + cwd: path.join(__dirname, '../..'), + stdio: 'pipe' + }) + + let stdout = '' + let stderr = '' + + proc.stdout?.on('data', (data) => { + stdout += data.toString() + }) + + proc.stderr?.on('data', (data) => { + stderr += data.toString() + }) + + const timeout = setTimeout(() => { + proc.kill('SIGTERM') + reject(new Error('Process timeout')) + }, TIMEOUT_MS) + + proc.on('exit', (code) => { + clearTimeout(timeout) + resolve({ stdout, stderr, exitCode: code }) + }) + + proc.on('error', (err) => { + clearTimeout(timeout) + reject(err) + }) + }) +} + +describe.skipIf(!sdkBuilt)('CLI End-to-End Tests', () => { + test('CLI shows help with --help flag', async () => { + const { stdout, stderr, exitCode } = await runCLI(['--help']) + + const cleanOutput = stripAnsi(stdout + stderr) + expect(cleanOutput).toContain('--agent') + expect(cleanOutput).toContain('Usage:') + expect(exitCode).toBe(0) + }, TIMEOUT_MS) + + test('CLI shows help with -h flag', async () => { + const { stdout, stderr, exitCode } = await runCLI(['-h']) + + const cleanOutput = stripAnsi(stdout + stderr) + expect(cleanOutput).toContain('--agent') + expect(exitCode).toBe(0) + }, TIMEOUT_MS) + + test('CLI shows version with --version flag', async () => { + const { stdout, stderr, exitCode } = await runCLI(['--version']) + + const cleanOutput = stripAnsi(stdout + stderr) + expect(cleanOutput).toMatch(/\d+\.\d+\.\d+|dev/) + expect(exitCode).toBe(0) + }, TIMEOUT_MS) + + test('CLI shows version with -v flag', async () => { + const { stdout, stderr, exitCode } = await runCLI(['-v']) + + const cleanOutput = stripAnsi(stdout + stderr) + expect(cleanOutput).toMatch(/\d+\.\d+\.\d+|dev/) + expect(exitCode).toBe(0) + }, TIMEOUT_MS) + + test('CLI accepts --agent flag', async () => { + // Note: This will timeout and exit because we can't interact with stdin + // But we can verify it starts without errors + const proc = spawn('bun', ['run', CLI_PATH, '--agent', 'ask'], { + cwd: path.join(__dirname, '../..'), + stdio: 'pipe' + }) + + let started = false + proc.stdout?.on('data', () => { + started = true + }) + + await new Promise(resolve => setTimeout(resolve, 1000)) + proc.kill('SIGTERM') + + expect(started).toBe(true) + }, TIMEOUT_MS) + + test('CLI accepts --clear-logs flag', async () => { + const proc = spawn('bun', ['run', CLI_PATH, '--clear-logs'], { + cwd: path.join(__dirname, '../..'), + stdio: 'pipe' + }) + + let started = false + proc.stdout?.on('data', () => { + started = true + }) + + await new Promise(resolve => setTimeout(resolve, 1000)) + proc.kill('SIGTERM') + + expect(started).toBe(true) + }, TIMEOUT_MS) + + test('CLI handles invalid flags gracefully', async () => { + const { stderr, exitCode } = await runCLI(['--invalid-flag']) + + // Commander should show an error + expect(exitCode).not.toBe(0) + expect(stripAnsi(stderr)).toContain('error') + }, TIMEOUT_MS) +}) + +// Show message when SDK tests are skipped +if (!sdkBuilt) { + describe('SDK Build Required', () => { + test.skip('Build SDK for E2E tests: cd sdk && bun run build', () => { + // This test is skipped to show the build instruction + }) + }) +} diff --git a/cli/src/__tests__/integration-tmux.test.ts b/cli/src/__tests__/integration-tmux.test.ts new file mode 100644 index 000000000..9b6514043 --- /dev/null +++ b/cli/src/__tests__/integration-tmux.test.ts @@ -0,0 +1,131 @@ +import { describe, test, expect, beforeAll } from 'bun:test' +import { spawn } from 'child_process' +import stripAnsi from 'strip-ansi' +import path from 'path' +import { isTmuxAvailable, isSDKBuilt, sleep } from './test-utils' + +const CLI_PATH = path.join(__dirname, '../index.tsx') +const TIMEOUT_MS = 15000 +const tmuxAvailable = isTmuxAvailable() +const sdkBuilt = isSDKBuilt() + +// Utility to run tmux commands +function tmux(args: string[]): Promise { + return new Promise((resolve, reject) => { + const proc = spawn('tmux', args, { stdio: 'pipe' }) + let stdout = '' + let stderr = '' + + proc.stdout?.on('data', (data) => { + stdout += data.toString() + }) + + proc.stderr?.on('data', (data) => { + stderr += data.toString() + }) + + proc.on('close', (code) => { + if (code === 0) { + resolve(stdout) + } else { + reject(new Error(`tmux command failed: ${stderr}`)) + } + }) + }) +} + +describe.skipIf(!tmuxAvailable || !sdkBuilt)('CLI Integration Tests with tmux', () => { + beforeAll(() => { + if (!tmuxAvailable) { + console.log('\nโš ๏ธ Skipping tmux tests - tmux not installed') + console.log('๐Ÿ“ฆ Install with: brew install tmux (macOS) or sudo apt-get install tmux (Linux)\n') + } + if (!sdkBuilt) { + console.log('\nโš ๏ธ Skipping tmux tests - SDK not built') + console.log('๐Ÿ”จ Build SDK: cd sdk && bun run build\n') + } + }) + + test('CLI starts and displays help output', async () => { + const sessionName = 'codebuff-test-' + Date.now() + + try { + // Create session with --help flag and keep it alive with '; sleep 2' + await tmux([ + 'new-session', + '-d', + '-s', sessionName, + '-x', '120', + '-y', '30', + `bun run ${CLI_PATH} --help; sleep 2` + ]) + + // Wait for output + await sleep(500) + + // Capture pane content + const output = await tmux(['capture-pane', '-t', sessionName, '-p']) + const cleanOutput = stripAnsi(output) + + // Verify help text + expect(cleanOutput).toContain('--agent') + expect(cleanOutput).toContain('Usage:') + + } finally { + // Cleanup + try { + await tmux(['kill-session', '-t', sessionName]) + } catch { + // Session may have already exited + } + } + }, TIMEOUT_MS) + + test('CLI accepts --agent flag', async () => { + const sessionName = 'codebuff-test-' + Date.now() + + try { + // Start CLI with --agent flag (it will wait for input, so we can capture) + await tmux([ + 'new-session', + '-d', + '-s', sessionName, + '-x', '120', + '-y', '30', + `bun run ${CLI_PATH} --agent ask` + ]) + + await sleep(1000) + + // Capture to verify it started + const output = await tmux(['capture-pane', '-t', sessionName, '-p']) + + // Should have started without errors + expect(output.length).toBeGreaterThan(0) + + } finally { + try { + await tmux(['kill-session', '-t', sessionName]) + } catch { + // Session may have already exited + } + } + }, TIMEOUT_MS) +}) + +// Always show installation message when tmux tests are skipped +if (!tmuxAvailable) { + describe('tmux Installation Required', () => { + test.skip('Install tmux for interactive CLI tests', () => { + // This test is intentionally skipped to show the message + }) + }) +} + +if (!sdkBuilt) { + describe('SDK Build Required', () => { + test.skip('Build SDK for integration tests: cd sdk && bun run build', () => { + // This test is intentionally skipped to show the message + }) + }) +} diff --git a/cli/src/__tests__/test-utils.ts b/cli/src/__tests__/test-utils.ts new file mode 100644 index 000000000..38039e75b --- /dev/null +++ b/cli/src/__tests__/test-utils.ts @@ -0,0 +1,32 @@ +import { execSync } from 'child_process' +import fs from 'fs' +import path from 'path' + +/** + * Check if tmux is available on the system + */ +export function isTmuxAvailable(): boolean { + try { + execSync('which tmux', { stdio: 'pipe' }) + return true + } catch { + return false + } +} + +/** + * Check if the SDK is built by checking for the dist directory + */ +export function isSDKBuilt(): boolean { + try { + const sdkDistPath = path.join(__dirname, '../../../sdk/dist/index.js') + return fs.existsSync(sdkDistPath) + } catch { + return false + } +} + +/** + * Sleep utility for async delays + */ +export const sleep = (ms: number) => new Promise(resolve => setTimeout(resolve, ms)) diff --git a/cli/src/__tests__/tmux-poc.ts b/cli/src/__tests__/tmux-poc.ts new file mode 100755 index 000000000..35006164b --- /dev/null +++ b/cli/src/__tests__/tmux-poc.ts @@ -0,0 +1,142 @@ +#!/usr/bin/env bun + +/** + * Proof of Concept: tmux-based CLI testing + * + * This script demonstrates how to: + * 1. Create a tmux session + * 2. Run the CLI in that session + * 3. Send commands to the CLI + * 4. Capture and verify output + * 5. Clean up the session + */ + +import { spawn } from 'child_process' +import stripAnsi from 'strip-ansi' +import { isTmuxAvailable, sleep } from './test-utils' + +// Utility to run tmux commands +function tmux(args: string[]): Promise { + return new Promise((resolve, reject) => { + const proc = spawn('tmux', args, { stdio: 'pipe' }) + let stdout = '' + let stderr = '' + + proc.stdout?.on('data', (data) => { + stdout += data.toString() + }) + + proc.stderr?.on('data', (data) => { + stderr += data.toString() + }) + + proc.on('close', (code) => { + if (code === 0) { + resolve(stdout) + } else { + reject(new Error(`tmux command failed: ${stderr}`)) + } + }) + }) +} + +// Send keys to tmux session +async function sendKeys(sessionName: string, keys: string) { + await tmux(['send-keys', '-t', sessionName, keys]) +} + +// Capture pane content +async function capturePane(sessionName: string): Promise { + return await tmux(['capture-pane', '-t', sessionName, '-p']) +} + +// Main test function +async function testCLIWithTmux() { + const sessionName = 'codebuff-test-' + Date.now() + + console.log('๐Ÿš€ Starting tmux-based CLI test...') + console.log(`๐Ÿ“ฆ Session: ${sessionName}`) + + // 1. Check if tmux is installed + if (!isTmuxAvailable()) { + console.error('โŒ tmux not found') + console.error('\n๐Ÿ“ฆ Installation:') + console.error(' macOS: brew install tmux') + console.error(' Ubuntu: sudo apt-get install tmux') + console.error(' Windows: Use WSL and run sudo apt-get install tmux') + console.error('\nโ„น๏ธ This is just a proof-of-concept. See the documentation for alternatives.') + process.exit(1) + } + + try { + const version = await tmux(['-V']) + console.log(`โœ… tmux is installed: ${version.trim()}`) + + // 2. Create new detached tmux session running the CLI + console.log('\n๐Ÿ“บ Creating tmux session...') + await tmux([ + 'new-session', + '-d', + '-s', sessionName, + '-x', '120', // width + '-y', '30', // height + 'bun', 'run', 'src/index.tsx', '--help' + ]) + console.log('โœ… Session created') + + // 3. Wait for CLI to start + await sleep(1000) + + // 4. Capture initial output + console.log('\n๐Ÿ“ธ Capturing initial output...') + const initialOutput = await capturePane(sessionName) + const cleanOutput = stripAnsi(initialOutput) + + console.log('\n--- Output ---') + console.log(cleanOutput) + console.log('--- End Output ---\n') + + // 5. Verify output contains expected text + const checks = [ + { text: '--agent', pass: cleanOutput.includes('--agent') }, + { text: 'Usage:', pass: cleanOutput.includes('Usage:') }, + { text: '--help', pass: cleanOutput.includes('--help') }, + ] + + console.log('๐Ÿ” Verification:') + checks.forEach(({ text, pass }) => { + console.log(` ${pass ? 'โœ…' : 'โŒ'} Contains "${text}"${pass ? '' : ' - NOT FOUND'}`) + }) + + const allPassed = checks.every(c => c.pass) + console.log(`\n${allPassed ? '๐ŸŽ‰ All checks passed!' : 'โš ๏ธ Some checks failed'}`) + + // 6. Example: Send interactive command (commented out for --help test) + /* + console.log('\nโŒจ๏ธ Sending test command...') + await sendKeys(sessionName, 'hello world') + await sendKeys(sessionName, 'Enter') + await sleep(2000) + + const responseOutput = await capturePane(sessionName) + console.log('\n--- Response ---') + console.log(stripAnsi(responseOutput)) + console.log('--- End Response ---') + */ + + } catch (error) { + console.error('\nโŒ Test failed:', error) + } finally { + // 7. Cleanup: kill the tmux session + console.log('\n๐Ÿงน Cleaning up...') + try { + await tmux(['kill-session', '-t', sessionName]) + console.log('โœ… Session cleaned up') + } catch (e) { + console.log('โš ๏ธ Session may have already exited') + } + } +} + +// Run the test +testCLIWithTmux().catch(console.error) diff --git a/cli/src/chat.tsx b/cli/src/chat.tsx index 8b3d6c6a0..8d1c431fe 100644 --- a/cli/src/chat.tsx +++ b/cli/src/chat.tsx @@ -75,7 +75,10 @@ export type ChatMessage = { isComplete?: boolean } -export const App = ({ initialPrompt }: { initialPrompt?: string } = {}) => { +export const App = ({ + initialPrompt, + agentId, +}: { initialPrompt?: string; agentId?: string } = {}) => { const renderer = useRenderer() const scrollRef = useRef(null) const inputRef = useRef(null) @@ -441,6 +444,7 @@ export const App = ({ initialPrompt }: { initialPrompt?: string } = {}) => { setIsStreaming, setCanProcessQueue, abortControllerRef, + agentId, }) sendMessageRef.current = sendMessage diff --git a/cli/src/hooks/use-send-message.ts b/cli/src/hooks/use-send-message.ts index 28b452a7e..663236adc 100644 --- a/cli/src/hooks/use-send-message.ts +++ b/cli/src/hooks/use-send-message.ts @@ -107,6 +107,7 @@ interface UseSendMessageOptions { setIsStreaming: (streaming: boolean) => void setCanProcessQueue: (can: boolean) => void abortControllerRef: React.MutableRefObject + agentId?: string } export const useSendMessage = ({ @@ -126,6 +127,7 @@ export const useSendMessage = ({ setIsStreaming, setCanProcessQueue, abortControllerRef, + agentId, }: UseSendMessageOptions) => { const previousRunStateRef = useRef(null) const spawnAgentsMapRef = useRef< @@ -558,7 +560,7 @@ export const useSendMessage = ({ const agent = agentMode === 'FAST' ? 'base2-fast' : 'base2-max' const result = await client.run({ - agent, + agent: agentId || agent, prompt: content, previousRun: previousRunStateRef.current, signal: abortController.signal, diff --git a/cli/src/index.tsx b/cli/src/index.tsx index acc0cae57..4fec3798a 100644 --- a/cli/src/index.tsx +++ b/cli/src/index.tsx @@ -3,6 +3,7 @@ import './polyfills/bun-strip-ansi' import { render } from '@opentui/react' import React from 'react' import { createRequire } from 'module' +import { Command } from 'commander' import { App } from './chat' import { clearLogFile } from './utils/logger' @@ -30,82 +31,45 @@ const VERSION = loadPackageVersion() type ParsedArgs = { initialPrompt: string | null + agent?: string clearLogs: boolean - showHelp: boolean - showVersion: boolean } function parseArgs(): ParsedArgs { - const args = process.argv.slice(2) - let clearLogs = false - let showHelp = false - let showVersion = false - const promptParts: string[] = [] - - for (const arg of args) { - switch (arg) { - case '--clear-logs': - clearLogs = true - break - case '--help': - case '-h': - showHelp = true - break - case '--version': - case '-v': - showVersion = true - break - default: - promptParts.push(arg) - break - } - } + const program = new Command() + + program + .name('codecane') + .description('Codecane CLI - AI-powered coding assistant') + .version(VERSION, '-v, --version', 'Print the CLI version') + .option( + '--agent ', + 'Specify which agent to use (e.g., "base", "ask", "file-picker")', + ) + .option('--clear-logs', 'Remove any existing CLI log files before starting') + .helpOption('-h, --help', 'Show this help message') + .argument('[prompt...]', 'Initial prompt to send to the agent') + .allowExcessArguments(true) + .parse(process.argv) + + const options = program.opts() + const args = program.args return { - initialPrompt: promptParts.length > 0 ? promptParts.join(' ') : null, - clearLogs, - showHelp, - showVersion, + initialPrompt: args.length > 0 ? args.join(' ') : null, + agent: options.agent, + clearLogs: options.clearLogs || false, } } -function printHelp() { - console.log(`Codebuff CLI v${VERSION}`) - console.log('') - console.log('Usage: codebuff-cli [options] [initial prompt]') - console.log('') - console.log('Options:') - console.log(' --help, -h Show this help message and exit') - console.log(' --version, -v Print the CLI version and exit') - console.log(' --clear-logs Remove any existing CLI log files before starting') - console.log('') - console.log( - 'Provide a prompt after the options to automatically seed the first conversation.', - ) -} - -function printVersion() { - console.log(`Codebuff CLI v${VERSION}`) -} - -const { initialPrompt, clearLogs, showHelp, showVersion } = parseArgs() - -if (showVersion) { - printVersion() - process.exit(0) -} - -if (showHelp) { - printHelp() - process.exit(0) -} +const { initialPrompt, agent, clearLogs } = parseArgs() if (clearLogs) { clearLogFile() } if (initialPrompt) { - render() + render() } else { - render() + render() } diff --git a/cli/src/state/chat-store.ts b/cli/src/state/chat-store.ts index 49e45ed4d..3442b81d2 100644 --- a/cli/src/state/chat-store.ts +++ b/cli/src/state/chat-store.ts @@ -21,13 +21,23 @@ export type ChatStoreState = { } type ChatStoreActions = { - setMessages: (value: ChatMessage[] | ((prev: ChatMessage[]) => ChatMessage[])) => void - setStreamingAgents: (value: Set | ((prev: Set) => Set)) => void - setCollapsedAgents: (value: Set | ((prev: Set) => Set)) => void - setFocusedAgentId: (value: string | null | ((prev: string | null) => string | null)) => void + setMessages: ( + value: ChatMessage[] | ((prev: ChatMessage[]) => ChatMessage[]), + ) => void + setStreamingAgents: ( + value: Set | ((prev: Set) => Set), + ) => void + setCollapsedAgents: ( + value: Set | ((prev: Set) => Set), + ) => void + setFocusedAgentId: ( + value: string | null | ((prev: string | null) => string | null), + ) => void setInputValue: (value: string | ((prev: string) => string)) => void setInputFocused: (focused: boolean) => void - setActiveSubagents: (value: Set | ((prev: Set) => Set)) => void + setActiveSubagents: ( + value: Set | ((prev: Set) => Set), + ) => void setIsChainInProgress: (active: boolean) => void setSlashSelectedIndex: (value: number | ((prev: number) => number)) => void setAgentSelectedIndex: (value: number | ((prev: number) => number)) => void @@ -41,15 +51,7 @@ type ChatStore = ChatStoreState & ChatStoreActions enableMapSet() const initialState: ChatStoreState = { - messages: [ - { - id: 'ai-seed-1', - variant: 'ai', - content: - "Hey there! Welcome to the demo โ€” feel free to ask anything or just say hello when you're ready.", - timestamp: formatTimestamp(), - }, - ], + messages: [], streamingAgents: new Set(), collapsedAgents: new Set(), focusedAgentId: null, @@ -62,81 +64,91 @@ const initialState: ChatStoreState = { agentMode: 'FAST', } -export const useChatStore = create()(immer((set) => ({ - ...initialState, - - setMessages: (value) => - set((state) => { - state.messages = typeof value === 'function' ? value(state.messages) : value - }), - - setStreamingAgents: (value) => - set((state) => { - state.streamingAgents = typeof value === 'function' ? value(state.streamingAgents) : value - }), - - setCollapsedAgents: (value) => - set((state) => { - state.collapsedAgents = typeof value === 'function' ? value(state.collapsedAgents) : value - }), - - setFocusedAgentId: (value) => - set((state) => { - state.focusedAgentId = typeof value === 'function' ? value(state.focusedAgentId) : value - }), - - setInputValue: (value) => - set((state) => { - state.inputValue = typeof value === 'function' ? value(state.inputValue) : value - }), - - setInputFocused: (focused) => - set((state) => { - state.inputFocused = focused - }), - - setActiveSubagents: (value) => - set((state) => { - state.activeSubagents = typeof value === 'function' ? value(state.activeSubagents) : value - }), - - setIsChainInProgress: (active) => - set((state) => { - state.isChainInProgress = active - }), - - setSlashSelectedIndex: (value) => - set((state) => { - state.slashSelectedIndex = typeof value === 'function' ? value(state.slashSelectedIndex) : value - }), - - setAgentSelectedIndex: (value) => - set((state) => { - state.agentSelectedIndex = typeof value === 'function' ? value(state.agentSelectedIndex) : value - }), - - setAgentMode: (mode) => - set((state) => { - state.agentMode = mode - }), - - toggleAgentMode: () => - set((state) => { - state.agentMode = state.agentMode === 'FAST' ? 'MAX' : 'FAST' - }), - - reset: () => - set((state) => { - state.messages = initialState.messages.slice() - state.streamingAgents = new Set(initialState.streamingAgents) - state.collapsedAgents = new Set(initialState.collapsedAgents) - state.focusedAgentId = initialState.focusedAgentId - state.inputValue = initialState.inputValue - state.inputFocused = initialState.inputFocused - state.activeSubagents = new Set(initialState.activeSubagents) - state.isChainInProgress = initialState.isChainInProgress - state.slashSelectedIndex = initialState.slashSelectedIndex - state.agentSelectedIndex = initialState.agentSelectedIndex - state.agentMode = initialState.agentMode - }), -}))) +export const useChatStore = create()( + immer((set) => ({ + ...initialState, + + setMessages: (value) => + set((state) => { + state.messages = + typeof value === 'function' ? value(state.messages) : value + }), + + setStreamingAgents: (value) => + set((state) => { + state.streamingAgents = + typeof value === 'function' ? value(state.streamingAgents) : value + }), + + setCollapsedAgents: (value) => + set((state) => { + state.collapsedAgents = + typeof value === 'function' ? value(state.collapsedAgents) : value + }), + + setFocusedAgentId: (value) => + set((state) => { + state.focusedAgentId = + typeof value === 'function' ? value(state.focusedAgentId) : value + }), + + setInputValue: (value) => + set((state) => { + state.inputValue = + typeof value === 'function' ? value(state.inputValue) : value + }), + + setInputFocused: (focused) => + set((state) => { + state.inputFocused = focused + }), + + setActiveSubagents: (value) => + set((state) => { + state.activeSubagents = + typeof value === 'function' ? value(state.activeSubagents) : value + }), + + setIsChainInProgress: (active) => + set((state) => { + state.isChainInProgress = active + }), + + setSlashSelectedIndex: (value) => + set((state) => { + state.slashSelectedIndex = + typeof value === 'function' ? value(state.slashSelectedIndex) : value + }), + + setAgentSelectedIndex: (value) => + set((state) => { + state.agentSelectedIndex = + typeof value === 'function' ? value(state.agentSelectedIndex) : value + }), + + setAgentMode: (mode) => + set((state) => { + state.agentMode = mode + }), + + toggleAgentMode: () => + set((state) => { + state.agentMode = state.agentMode === 'FAST' ? 'MAX' : 'FAST' + }), + + reset: () => + set((state) => { + state.messages = initialState.messages.slice() + state.streamingAgents = new Set(initialState.streamingAgents) + state.collapsedAgents = new Set(initialState.collapsedAgents) + state.focusedAgentId = initialState.focusedAgentId + state.inputValue = initialState.inputValue + state.inputFocused = initialState.inputFocused + state.activeSubagents = new Set(initialState.activeSubagents) + state.isChainInProgress = initialState.isChainInProgress + state.slashSelectedIndex = initialState.slashSelectedIndex + state.agentSelectedIndex = initialState.agentSelectedIndex + state.agentMode = initialState.agentMode + }), + })), +) diff --git a/evals/package.json b/evals/package.json index a8556049e..a0e423148 100644 --- a/evals/package.json +++ b/evals/package.json @@ -11,6 +11,7 @@ } }, "scripts": { + "pretypecheck": "cd ../sdk && bun run build:types", "typecheck": "tsc --noEmit -p .", "test": "bun test", "test:manifold": "bun test manifold.test.ts", diff --git a/npm-app/knowledge.md b/npm-app/knowledge.md new file mode 100644 index 000000000..3b3af82f5 --- /dev/null +++ b/npm-app/knowledge.md @@ -0,0 +1,3 @@ +# npm-app Knowledge + +- npm distribution scripts (e.g. `release` artifacts in `npm-app/release*`) still rely on Node-based uninstall helpers for compatibility with end users. The development workflows now require Bun 1.3.0+, so keep the legacy Node snippets only in the published package files. diff --git a/npm-app/package.json b/npm-app/package.json index f506a69b2..05b13e343 100644 --- a/npm-app/package.json +++ b/npm-app/package.json @@ -19,6 +19,8 @@ "codebuff": "dist/index.js" }, "scripts": { + "pretypecheck": "cd ../sdk && bun run build:types", + "pretest": "cd ../sdk && bun run build", "typecheck": "tsc --noEmit -p .", "build": "bun run scripts/build-binary.js codebuff $(node -p \"require('./release/package.json').version\")", "release": "bun run scripts/release.js", @@ -38,7 +40,6 @@ "@codebuff/common": "workspace:*", "@types/diff": "8.0.0", "@types/micromatch": "^4.0.9", - "@vscode/ripgrep": "1.15.9", "ai": "5.0.0", "axios": "1.7.4", "cli-highlight": "^2.1.11", diff --git a/npm-app/release-staging/index.js b/npm-app/release-staging/index.js index 347adccaa..6cb8893ba 100644 --- a/npm-app/release-staging/index.js +++ b/npm-app/release-staging/index.js @@ -250,9 +250,10 @@ async function downloadBinary(version) { throw new Error(`Unsupported platform: ${process.platform} ${process.arch}`) } - // For now, we get version info from npm but still download binaries from GitHub - // TODO: This assumes GitHub releases still exist with the same naming convention - const downloadUrl = `https://github.com/CodebuffAI/codebuff-community/releases/download/v${version}/${fileName}` + // Use proxy endpoint that handles version mapping from npm to GitHub releases + const downloadUrl = process.env.NEXT_PUBLIC_CODEBUFF_APP_URL + ? `${process.env.NEXT_PUBLIC_CODEBUFF_APP_URL}/api/releases/download/${version}/${fileName}` + : `https://codebuff.com/api/releases/download/${version}/${fileName}` // Ensure config directory exists fs.mkdirSync(CONFIG.configDir, { recursive: true }) diff --git a/npm-app/release/index.js b/npm-app/release/index.js index 93b7b937a..56ca0bd9b 100644 --- a/npm-app/release/index.js +++ b/npm-app/release/index.js @@ -215,7 +215,10 @@ async function downloadBinary(version) { throw new Error(`Unsupported platform: ${process.platform} ${process.arch}`) } - const downloadUrl = `https://github.com/${CONFIG.githubRepo}/releases/download/v${version}/${fileName}` + // Use proxy endpoint that handles version mapping + const downloadUrl = process.env.NEXT_PUBLIC_CODEBUFF_APP_URL + ? `${process.env.NEXT_PUBLIC_CODEBUFF_APP_URL}/api/releases/download/${version}/${fileName}` + : `https://codebuff.com/api/releases/download/${version}/${fileName}` // Ensure config directory exists fs.mkdirSync(CONFIG.configDir, { recursive: true }) diff --git a/npm-app/src/native/ripgrep.ts b/npm-app/src/native/ripgrep.ts index d9d6b1018..078097be1 100644 --- a/npm-app/src/native/ripgrep.ts +++ b/npm-app/src/native/ripgrep.ts @@ -1,51 +1,49 @@ import { mkdirSync } from 'fs' import path from 'path' -import { rgPath as vscodeRgPath } from '@vscode/ripgrep' import { spawnSync } from 'bun' +import { getBundledRgPath } from '@codebuff/sdk' import { CONFIG_DIR } from '../credentials' import { logger } from '../utils/logger' const getRipgrepPath = async (): Promise => { - // In dev mode, use the vscode ripgrep binary + let bundledRgPath: string + try { + bundledRgPath = getBundledRgPath() + } catch (error) { + logger.error({ error }, 'Failed to resolve bundled ripgrep path') + throw error + } + + // In dev mode, use the bundled path directly if (!process.env.IS_BINARY) { - return vscodeRgPath + return bundledRgPath } - // Compiled mode - self-extract the embedded binary + // Compiled mode - stage the bundled binary in the config directory const rgFileName = process.platform === 'win32' ? 'rg.exe' : 'rg' const outPath = path.join(CONFIG_DIR, rgFileName) - // Check if already extracted - if (await Bun.file(outPath).exists()) { - return outPath - } - - // Extract the embedded binary try { - // Use require() on a static string path to make sure rg is included in the compiled binary - const embeddedRgPath = - process.platform === 'win32' - ? require('../../../node_modules/@vscode/ripgrep/bin/rg.exe') - : require('../../../node_modules/@vscode/ripgrep/bin/rg') + if (await Bun.file(outPath).exists()) { + return outPath + } - // Create cache directory mkdirSync(path.dirname(outPath), { recursive: true }) + await Bun.write(outPath, await Bun.file(bundledRgPath).arrayBuffer()) - // Copy embedded binary to cache location - await Bun.write(outPath, await Bun.file(embeddedRgPath).arrayBuffer()) - - // Make executable on Unix systems if (process.platform !== 'win32') { spawnSync(['chmod', '+x', outPath]) } return outPath } catch (error) { - logger.error({ error }, 'Failed to extract ripgrep binary') - // Fallback to vscode ripgrep if extraction fails - return vscodeRgPath + logger.error( + { error }, + 'Failed to stage bundled ripgrep binary, using fallback path', + ) + return bundledRgPath } } diff --git a/package.json b/package.json index ac7e93dd6..1137f9e33 100644 --- a/package.json +++ b/package.json @@ -30,9 +30,9 @@ "start-vscode": "bun --cwd npm-app start-vscode", "start-nushell": "bun --cwd npm-app start-nushell", "format": "prettier --write \"**/*.{ts,tsx,json,md}\"", - "release:npm-app": "bun run --cwd npm-app release", + "release:npm-app": "bun run --cwd=npm-app release", "clean-ts": "find . -name '*.tsbuildinfo' -type f -delete && find . -name '.next' -type d -exec rm -rf {} + 2>/dev/null || true && find . -name 'node_modules' -type d -exec rm -rf {} + 2>/dev/null || true && bun install", - "typecheck": "bun --filter='*' run typecheck && echo 'โœ… All type checks passed!'", + "typecheck": "bun --cwd=sdk run build:types && bun --filter='*' run typecheck && echo 'โœ… All type checks passed!'", "test": "bun --filter='{@codebuff/backend,@codebuff/common,@codebuff/npm-app,@codebuff/agents}' run test", "init-worktree": "bun scripts/init-worktree.ts", "cleanup-worktree": "bash scripts/cleanup-worktree.sh", @@ -47,13 +47,13 @@ }, "devDependencies": { "@tanstack/react-query": "^5.59.16", - "@types/bun": "^1.3.0", + "@types/bun": "^1.2.11", "@types/lodash": "4.17.7", "@types/node": "^22.9.0", "@types/node-fetch": "^2.6.12", "@types/parse-path": "^7.1.0", "@typescript-eslint/eslint-plugin": "^6.17", - "bun-types": "^1.3.0", + "bun-types": "^1.2.2", "eslint-config-prettier": "^9.1.0", "eslint-plugin-import": "^2.29.1", "eslint-plugin-unused-imports": "^4.1.4", @@ -68,5 +68,8 @@ "typescript": "5.5.4", "typescript-eslint": "^7.17.0" }, + "engines": { + "bun": ">=1.3.0" + }, "packageManager": "bun@1.3.0" } diff --git a/packages/billing/src/__tests__/credit-delegation.test.ts b/packages/billing/src/__tests__/credit-delegation.test.ts index 5afe86f28..bf7d14406 100644 --- a/packages/billing/src/__tests__/credit-delegation.test.ts +++ b/packages/billing/src/__tests__/credit-delegation.test.ts @@ -17,9 +17,9 @@ describe('Credit Delegation', () => { error: () => {}, } - beforeAll(() => { + beforeAll(async () => { // Mock the org-billing functions that credit-delegation depends on - mockModule('@codebuff/billing/org-billing', () => ({ + await mockModule('@codebuff/billing/org-billing', () => ({ normalizeRepositoryUrl: mock((url: string) => url.toLowerCase().trim()), extractOwnerAndRepo: mock((url: string) => { if (url.includes('codebuffai/codebuff')) { @@ -31,26 +31,61 @@ describe('Credit Delegation', () => { })) // Mock common dependencies - mockModule('@codebuff/common/db', () => ({ - default: { - select: mock(() => ({ - from: mock(() => ({ - innerJoin: mock(() => ({ - where: mock(() => - Promise.resolve([{ orgId: 'org-123', orgName: 'CodebuffAI' }]), - ), - })), - })), - })), - }, - })) + await mockModule('@codebuff/common/db', () => { + const select = mock((fields: Record) => { + if ('orgId' in fields && 'orgName' in fields) { + return { + from: () => ({ + innerJoin: () => ({ + where: () => + Promise.resolve([ + { + orgId: 'org-123', + orgName: 'CodebuffAI', + orgSlug: 'codebuffai', + }, + ]), + }), + }), + } + } + + if ('repoUrl' in fields) { + return { + from: () => ({ + where: () => + Promise.resolve([ + { + repoUrl: 'https://github.com/codebuffai/codebuff', + repoName: 'codebuff', + isActive: true, + }, + ]), + }), + } + } + + return { + from: () => ({ + where: () => Promise.resolve([]), + }), + } + }) + + return { + default: { + select, + }, + } + }) - mockModule('@codebuff/common/db/schema', () => ({ + await mockModule('@codebuff/common/db/schema', () => ({ orgMember: { org_id: 'org_id', user_id: 'user_id' }, - org: { id: 'id', name: 'name' }, + org: { id: 'id', name: 'name', slug: 'slug' }, orgRepo: { org_id: 'org_id', repo_url: 'repo_url', + repo_name: 'repo_name', is_active: 'is_active', }, })) diff --git a/packages/billing/src/__tests__/org-billing.test.ts b/packages/billing/src/__tests__/org-billing.test.ts index fa8640c3d..8dbf8abb3 100644 --- a/packages/billing/src/__tests__/org-billing.test.ts +++ b/packages/billing/src/__tests__/org-billing.test.ts @@ -2,7 +2,7 @@ import { clearMockedModules, mockModule, } from '@codebuff/common/testing/mock-modules' -import { afterAll, beforeAll, describe, expect, it } from 'bun:test' +import { afterAll, afterEach, beforeAll, describe, expect, it } from 'bun:test' import { calculateOrganizationUsageAndBalance, @@ -49,44 +49,52 @@ const logger: Logger = { warn: () => {}, } -describe('Organization Billing', () => { - beforeAll(() => { - mockModule('@codebuff/common/db', () => ({ - default: { - select: () => ({ - from: () => ({ - where: () => ({ - orderBy: () => mockGrants, - }), - }), - }), - insert: () => ({ - values: () => Promise.resolve(), +const createDbMock = (options?: { + grants?: typeof mockGrants | any[] + insert?: () => { values: () => Promise } + update?: () => { set: () => { where: () => Promise } } +}) => { + const { + grants = mockGrants, + insert, + update, + } = options ?? {} + + return { + select: () => ({ + from: () => ({ + where: () => ({ + orderBy: () => grants, }), - update: () => ({ - set: () => ({ - where: () => Promise.resolve(), - }), + }), + }), + insert: + insert ?? + (() => ({ + values: () => Promise.resolve(), + })), + update: + update ?? + (() => ({ + set: () => ({ + where: () => Promise.resolve(), }), - }, + })), + } +} + +describe('Organization Billing', () => { + beforeAll(async () => { + await mockModule('@codebuff/common/db', () => ({ + default: createDbMock(), })) - mockModule('@codebuff/common/db/transaction', () => ({ - withSerializableTransaction: (fn: any) => - fn({ - select: () => ({ - from: () => ({ - where: () => ({ - orderBy: () => mockGrants, - }), - }), - }), - update: () => ({ - set: () => ({ - where: () => Promise.resolve(), - }), - }), - }), + await mockModule('@codebuff/common/db/transaction', () => ({ + withSerializableTransaction: async ({ + callback, + }: { + callback: (tx: any) => Promise | unknown + }) => await callback(createDbMock()), })) }) @@ -94,11 +102,17 @@ describe('Organization Billing', () => { clearMockedModules() }) - describe('calculateOrganizationUsageAndBalance', () => { - it('should calculate balance correctly with positive and negative balances', async () => { - const organizationId = 'org-123' - const quotaResetDate = new Date('2024-01-01') - const now = new Date('2024-06-01') + afterEach(async () => { + await mockModule('@codebuff/common/db', () => ({ + default: createDbMock(), + })) + }) + +describe('calculateOrganizationUsageAndBalance', () => { + it('should calculate balance correctly with positive and negative balances', async () => { + const organizationId = 'org-123' + const quotaResetDate = new Date('2024-01-01') + const now = new Date('2024-06-01') const result = await calculateOrganizationUsageAndBalance({ organizationId, @@ -118,19 +132,11 @@ describe('Organization Billing', () => { expect(result.usageThisCycle).toBe(800) }) - it('should handle organization with no grants', async () => { - // Mock empty grants - mockModule('@codebuff/common/db', () => ({ - default: { - select: () => ({ - from: () => ({ - where: () => ({ - orderBy: () => [], - }), - }), - }), - }, - })) + it('should handle organization with no grants', async () => { + // Mock empty grants + await mockModule('@codebuff/common/db', () => ({ + default: createDbMock({ grants: [] }), + })) const organizationId = 'org-empty' const quotaResetDate = new Date('2024-01-01') @@ -201,7 +207,7 @@ describe('Organization Billing', () => { it('should reject malformed URLs', () => { const result = validateAndNormalizeRepositoryUrl('not-a-url') expect(result.isValid).toBe(false) - expect(result.error).toBe('Invalid URL format') + expect(result.error).toBe('Repository domain not allowed') }) it('should accept allowed domains', () => { @@ -255,19 +261,19 @@ describe('Organization Billing', () => { }) it('should handle duplicate operation IDs gracefully', async () => { - // Mock database constraint error - mockModule('@codebuff/common/db', () => ({ - default: { - insert: () => ({ - values: () => { - const error = new Error('Duplicate key') - ;(error as any).code = '23505' - ;(error as any).constraint = 'credit_ledger_pkey' - throw error - }, - }), - }, - })) + // Mock database constraint error + await mockModule('@codebuff/common/db', () => ({ + default: createDbMock({ + insert: () => ({ + values: () => { + const error = new Error('Duplicate key') + ;(error as any).code = '23505' + ;(error as any).constraint = 'credit_ledger_pkey' + throw error + }, + }), + }), + })) const organizationId = 'org-123' const userId = 'user-123' diff --git a/packages/code-map/__tests__/languages.test.ts b/packages/code-map/__tests__/languages.test.ts index 877ccd998..543d99185 100644 --- a/packages/code-map/__tests__/languages.test.ts +++ b/packages/code-map/__tests__/languages.test.ts @@ -22,11 +22,11 @@ describe('languages module', () => { languageTable.forEach((config) => { expect(config).toHaveProperty('extensions') expect(config).toHaveProperty('wasmFile') - expect(config).toHaveProperty('queryText') + expect(config).toHaveProperty('queryPathOrContent') expect(Array.isArray(config.extensions)).toBe(true) expect(config.extensions.length).toBeGreaterThan(0) expect(typeof config.wasmFile).toBe('string') - expect(typeof config.queryText).toBe('string') + expect(typeof config.queryPathOrContent).toBe('string') }) }) @@ -34,7 +34,7 @@ describe('languages module', () => { const tsConfig = languageTable.find(c => c.extensions.includes('.ts')) expect(tsConfig).toBeDefined() expect(tsConfig?.wasmFile).toBe('tree-sitter-typescript.wasm') - expect(tsConfig?.queryText).toBeDefined() + expect(tsConfig?.queryPathOrContent).toBeDefined() }) it('should support TSX files', () => { @@ -209,12 +209,12 @@ describe('languages module', () => { const config: LanguageConfig = { extensions: ['.test'], wasmFile: 'test.wasm', - queryText: 'test query', + queryPathOrContent: 'test query', } expect(config.extensions).toEqual(['.test']) expect(config.wasmFile).toBe('test.wasm') - expect(config.queryText).toBe('test query') + expect(config.queryPathOrContent).toBe('test query') expect(config.parser).toBeUndefined() expect(config.query).toBeUndefined() expect(config.language).toBeUndefined() diff --git a/packages/internal/src/openrouter-ai-sdk/chat/index.test.ts b/packages/internal/src/openrouter-ai-sdk/chat/index.test.ts index 4b23a4c07..9676346df 100644 --- a/packages/internal/src/openrouter-ai-sdk/chat/index.test.ts +++ b/packages/internal/src/openrouter-ai-sdk/chat/index.test.ts @@ -1,8 +1,5 @@ -import { - convertReadableStreamToArray, - createTestServer, -} from '@ai-sdk/provider-utils/test' -import { describe, expect, it } from 'bun:test' +import { convertReadableStreamToArray } from '@ai-sdk/provider-utils/test' +import { afterEach, beforeEach, describe, expect, it } from 'bun:test' import { createOpenRouter } from '../provider' import { ReasoningDetailType } from '../schemas/reasoning-details' @@ -109,20 +106,181 @@ const TEST_LOGPROBS = { ], } -const provider = createOpenRouter({ - apiKey: 'test-api-key', - compatibility: 'strict', -}) +type MockResponseDefinition = + | { + type: 'json-value' + body: any + headers?: Record + status?: number + } + | { + type: 'stream-chunks' + chunks: string[] + headers?: Record + status?: number + } + +type MockServerRoute = { + response: MockResponseDefinition +} -const model = provider.chat('anthropic/claude-3.5-sonnet') +type MockServerCall = { + requestHeaders: Record + requestBodyJson: Promise +} + +const createStreamFromChunks = (chunks: string[]) => + new ReadableStream({ + start(controller) { + try { + for (const chunk of chunks) { + controller.enqueue(chunk) + } + } finally { + controller.close() + } + }, + }).pipeThrough(new TextEncoderStream()) + +function toHeadersRecord(headers?: HeadersInit): Record { + const result: Record = {} + + if (!headers) { + return result + } + + if (headers instanceof Headers) { + headers.forEach((value, key) => { + result[key.toLowerCase()] = value + }) + return result + } + + if (Array.isArray(headers)) { + for (const [key, value] of headers) { + result[String(key).toLowerCase()] = String(value) + } + return result + } + + for (const [key, value] of Object.entries(headers)) { + if (typeof value !== 'undefined') { + result[key.toLowerCase()] = String(value) + } + } + + return result +} + +function parseRequestBody(body: BodyInit | null | undefined): any { + if (body == null) { + return undefined + } + + if (typeof body === 'string') { + try { + return JSON.parse(body) + } catch { + return undefined + } + } + + return undefined +} + +function createMockServer(routes: Record) { + const urls: Record = Object.fromEntries( + Object.entries(routes).map(([url, config]) => [ + url, + { + response: { ...config.response }, + }, + ]), + ) + + const calls: MockServerCall[] = [] + + const buildResponse = (definition: MockResponseDefinition): Response => { + const status = definition.status ?? 200 + + if (definition.type === 'json-value') { + return new Response(JSON.stringify(definition.body), { + status, + headers: { + 'Content-Type': 'application/json', + ...definition.headers, + }, + }) + } + + return new Response(createStreamFromChunks(definition.chunks), { + status, + headers: { + 'Content-Type': 'text/event-stream', + 'Cache-Control': 'no-cache', + Connection: 'keep-alive', + ...definition.headers, + }, + }) + } + + const fetchImpl = async (input: RequestInfo, init: RequestInit = {}) => { + const url = + typeof input === 'string' + ? input + : input instanceof URL + ? input.toString() + : input.url + + const route = urls[url] + + if (!route) { + return new Response('Not Found', { status: 404 }) + } + + const requestHeaders = toHeadersRecord(init.headers) + const requestBodyJson = Promise.resolve(parseRequestBody(init.body)) + + calls.push({ requestHeaders, requestBodyJson }) + + return buildResponse(route.response) + } + + const fetch = ((input: RequestInfo | URL, init?: RequestInit) => + fetchImpl(input as RequestInfo, init ?? {})) as typeof global.fetch + + fetch.preconnect = async () => {} + + return { + urls, + calls, + fetch, + } +} describe('doGenerate', () => { - const server = createTestServer({ + const server = createMockServer({ 'https://openrouter.ai/api/v1/chat/completions': { response: { type: 'json-value', body: {} }, }, }) + const provider = createOpenRouter({ + apiKey: 'test-api-key', + compatibility: 'strict', + fetch: server.fetch, + }) + + const model = provider.chat('anthropic/claude-3.5-sonnet') + + beforeEach(() => { + server.calls.length = 0 + server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { + type: 'json-value', + body: {}, + } + }) + function prepareJsonResponse({ content = '', reasoning, @@ -490,6 +648,7 @@ describe('doGenerate', () => { headers: { 'Custom-Provider-Header': 'provider-header-value', }, + fetch: server.fetch, }) await provider.chat('openai/gpt-3.5-turbo').doGenerate({ @@ -584,12 +743,28 @@ describe('doGenerate', () => { }) describe('doStream', () => { - const server = createTestServer({ + const server = createMockServer({ 'https://openrouter.ai/api/v1/chat/completions': { - response: { type: 'json-value', body: {} }, + response: { type: 'stream-chunks', chunks: [] }, }, }) + const provider = createOpenRouter({ + apiKey: 'test-api-key', + compatibility: 'strict', + fetch: server.fetch, + }) + + const model = provider.chat('anthropic/claude-3.5-sonnet') + + beforeEach(() => { + server.calls.length = 0 + server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { + type: 'stream-chunks', + chunks: [], + } + }) + function prepareStreamResponse({ content, usage = { @@ -1308,6 +1483,7 @@ describe('doStream', () => { headers: { 'Custom-Provider-Header': 'provider-header-value', }, + fetch: server.fetch, }) await provider.chat('openai/gpt-3.5-turbo').doStream({ @@ -1340,6 +1516,7 @@ describe('doStream', () => { }, }, }, + fetch: server.fetch, }) await provider.chat('anthropic/claude-3.5-sonnet').doStream({ diff --git a/packages/internal/src/openrouter-ai-sdk/completion/index.test.ts b/packages/internal/src/openrouter-ai-sdk/completion/index.test.ts index 4c4db6bd2..3a4a63d1c 100644 --- a/packages/internal/src/openrouter-ai-sdk/completion/index.test.ts +++ b/packages/internal/src/openrouter-ai-sdk/completion/index.test.ts @@ -1,8 +1,5 @@ -import { - convertReadableStreamToArray, - createTestServer, -} from '@ai-sdk/provider-utils/test' -import { describe, expect, it } from 'bun:test' +import { convertReadableStreamToArray } from '@ai-sdk/provider-utils/test' +import { afterEach, beforeEach, describe, expect, it } from 'bun:test' import { createOpenRouter } from '../provider' @@ -39,20 +36,181 @@ const TEST_LOGPROBS = { ] as Record[], } -const provider = createOpenRouter({ - apiKey: 'test-api-key', - compatibility: 'strict', -}) +type MockResponseDefinition = + | { + type: 'json-value' + body: any + headers?: Record + status?: number + } + | { + type: 'stream-chunks' + chunks: string[] + headers?: Record + status?: number + } + +type MockServerRoute = { + response: MockResponseDefinition +} + +type MockServerCall = { + requestHeaders: Record + requestBodyJson: Promise +} + +const createStreamFromChunks = (chunks: string[]) => + new ReadableStream({ + start(controller) { + try { + for (const chunk of chunks) { + controller.enqueue(chunk) + } + } finally { + controller.close() + } + }, + }).pipeThrough(new TextEncoderStream()) + +function toHeadersRecord(headers?: HeadersInit): Record { + const result: Record = {} + + if (!headers) { + return result + } + + if (headers instanceof Headers) { + headers.forEach((value, key) => { + result[key.toLowerCase()] = value + }) + return result + } + + if (Array.isArray(headers)) { + for (const [key, value] of headers) { + result[String(key).toLowerCase()] = String(value) + } + return result + } + + for (const [key, value] of Object.entries(headers)) { + if (typeof value !== 'undefined') { + result[key.toLowerCase()] = String(value) + } + } + + return result +} + +function parseRequestBody(body: BodyInit | null | undefined): any { + if (body == null) { + return undefined + } -const model = provider.completion('openai/gpt-3.5-turbo-instruct') + if (typeof body === 'string') { + try { + return JSON.parse(body) + } catch { + return undefined + } + } + + return undefined +} + +function createMockServer(routes: Record) { + const urls: Record = Object.fromEntries( + Object.entries(routes).map(([url, config]) => [ + url, + { + response: { ...config.response }, + }, + ]), + ) + + const calls: MockServerCall[] = [] + + const buildResponse = (definition: MockResponseDefinition): Response => { + const status = definition.status ?? 200 + + if (definition.type === 'json-value') { + return new Response(JSON.stringify(definition.body), { + status, + headers: { + 'Content-Type': 'application/json', + ...definition.headers, + }, + }) + } + + return new Response(createStreamFromChunks(definition.chunks), { + status, + headers: { + 'Content-Type': 'text/event-stream', + 'Cache-Control': 'no-cache', + Connection: 'keep-alive', + ...definition.headers, + }, + }) + } + + const fetchImpl = async (input: RequestInfo, init: RequestInit = {}) => { + const url = + typeof input === 'string' + ? input + : input instanceof URL + ? input.toString() + : input.url + + const route = urls[url] + + if (!route) { + return new Response('Not Found', { status: 404 }) + } + + const requestHeaders = toHeadersRecord(init.headers) + const requestBodyJson = Promise.resolve(parseRequestBody(init.body)) + + calls.push({ requestHeaders, requestBodyJson }) + + return buildResponse(route.response) + } + + const fetch = ((input: RequestInfo | URL, init?: RequestInit) => + fetchImpl(input as RequestInfo, init ?? {})) as typeof global.fetch + + fetch.preconnect = async () => {} + + return { + urls, + calls, + fetch, + } +} describe('doGenerate', () => { - const server = createTestServer({ + const server = createMockServer({ 'https://openrouter.ai/api/v1/completions': { response: { type: 'json-value', body: {} }, }, }) + const provider = createOpenRouter({ + apiKey: 'test-api-key', + compatibility: 'strict', + fetch: server.fetch, + }) + + const model = provider.completion('openai/gpt-3.5-turbo-instruct') + + beforeEach(() => { + server.calls.length = 0 + server.urls['https://openrouter.ai/api/v1/completions']!.response = { + type: 'json-value', + body: {}, + } + }) + function prepareJsonResponse({ content = '', usage = { @@ -130,7 +288,10 @@ describe('doGenerate', () => { it('should extract logprobs', async () => { prepareJsonResponse({ logprobs: TEST_LOGPROBS }) - const provider = createOpenRouter({ apiKey: 'test-api-key' }) + const provider = createOpenRouter({ + apiKey: 'test-api-key', + fetch: server.fetch, + }) await provider .completion('openai/gpt-3.5-turbo', { logprobs: 1 }) @@ -208,6 +369,7 @@ describe('doGenerate', () => { headers: { 'Custom-Provider-Header': 'provider-header-value', }, + fetch: server.fetch, }) await provider.completion('openai/gpt-3.5-turbo-instruct').doGenerate({ @@ -229,12 +391,28 @@ describe('doGenerate', () => { }) describe('doStream', () => { - const server = createTestServer({ + const server = createMockServer({ 'https://openrouter.ai/api/v1/completions': { response: { type: 'stream-chunks', chunks: [] }, }, }) + const provider = createOpenRouter({ + apiKey: 'test-api-key', + compatibility: 'strict', + fetch: server.fetch, + }) + + const model = provider.completion('openai/gpt-3.5-turbo-instruct') + + beforeEach(() => { + server.calls.length = 0 + server.urls['https://openrouter.ai/api/v1/completions']!.response = { + type: 'stream-chunks', + chunks: [], + } + }) + function prepareStreamResponse({ content, finish_reason = 'stop', @@ -423,6 +601,7 @@ describe('doStream', () => { headers: { 'Custom-Provider-Header': 'provider-header-value', }, + fetch: server.fetch, }) await provider.completion('openai/gpt-3.5-turbo-instruct').doStream({ @@ -455,6 +634,7 @@ describe('doStream', () => { }, }, }, + fetch: server.fetch, }) await provider.completion('openai/gpt-4o').doStream({ diff --git a/packages/internal/src/openrouter-ai-sdk/tests/provider-options.test.ts b/packages/internal/src/openrouter-ai-sdk/tests/provider-options.test.ts index 4f8189199..dd5ac85ec 100644 --- a/packages/internal/src/openrouter-ai-sdk/tests/provider-options.test.ts +++ b/packages/internal/src/openrouter-ai-sdk/tests/provider-options.test.ts @@ -1,4 +1,3 @@ -import { createTestServer } from '@ai-sdk/provider-utils/test' import { streamText } from 'ai' import { beforeEach, describe, expect, it, mock } from 'bun:test' @@ -6,13 +5,165 @@ import { createOpenRouter } from '../provider' import type { ModelMessage } from 'ai' +type MockResponseDefinition = + | { + type: 'json-value' + body: any + headers?: Record + status?: number + } + | { + type: 'stream-chunks' + chunks: string[] + headers?: Record + status?: number + } + +type MockServerRoute = { + response: MockResponseDefinition +} + +type MockServerCall = { + requestHeaders: Record + requestBodyJson: Promise +} + +const createStreamFromChunks = (chunks: string[]) => + new ReadableStream({ + start(controller) { + try { + for (const chunk of chunks) { + controller.enqueue(chunk) + } + } finally { + controller.close() + } + }, + }).pipeThrough(new TextEncoderStream()) + +function toHeadersRecord(headers?: HeadersInit): Record { + const result: Record = {} + + if (!headers) { + return result + } + + if (headers instanceof Headers) { + headers.forEach((value, key) => { + result[key.toLowerCase()] = value + }) + return result + } + + if (Array.isArray(headers)) { + for (const [key, value] of headers) { + result[String(key).toLowerCase()] = String(value) + } + return result + } + + for (const [key, value] of Object.entries(headers)) { + if (typeof value !== 'undefined') { + result[key.toLowerCase()] = String(value) + } + } + + return result +} + +function parseRequestBody(body: BodyInit | null | undefined): any { + if (body == null) { + return undefined + } + + if (typeof body === 'string') { + try { + return JSON.parse(body) + } catch { + return undefined + } + } + + return undefined +} + +function createMockServer(routes: Record) { + const urls: Record = Object.fromEntries( + Object.entries(routes).map(([url, config]) => [ + url, + { + response: { ...config.response }, + }, + ]), + ) + + const calls: MockServerCall[] = [] + + const buildResponse = (definition: MockResponseDefinition): Response => { + const status = definition.status ?? 200 + + if (definition.type === 'json-value') { + return new Response(JSON.stringify(definition.body), { + status, + headers: { + 'Content-Type': 'application/json', + ...definition.headers, + }, + }) + } + + return new Response(createStreamFromChunks(definition.chunks), { + status, + headers: { + 'Content-Type': 'text/event-stream', + 'Cache-Control': 'no-cache', + Connection: 'keep-alive', + ...definition.headers, + }, + }) + } + + const fetchImpl = async (input: RequestInfo, init: RequestInit = {}) => { + const url = + typeof input === 'string' + ? input + : input instanceof URL + ? input.toString() + : input.url + + const route = urls[url] + + if (!route) { + return new Response('Not Found', { status: 404 }) + } + + const requestHeaders = toHeadersRecord(init.headers) + const requestBodyJson = Promise.resolve(parseRequestBody(init.body)) + + calls.push({ requestHeaders, requestBodyJson }) + + return buildResponse(route.response) + } + + const fetch = ((input: RequestInfo | URL, init?: RequestInit) => + fetchImpl(input as RequestInfo, init ?? {})) as typeof global.fetch + + fetch.preconnect = async () => {} + + return { + urls, + calls, + fetch, + } +} + // Add type assertions for the mocked classes const TEST_MESSAGES: ModelMessage[] = [ { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, ] describe('providerOptions', () => { - const server = createTestServer({ + const server = createMockServer({ 'https://openrouter.ai/api/v1/chat/completions': { response: { type: 'stream-chunks', @@ -21,14 +172,24 @@ describe('providerOptions', () => { }, }) + const openrouter = createOpenRouter({ + apiKey: 'test', + fetch: server.fetch, + }) + beforeEach(() => { mock.clearAllMocks() + server.calls.length = 0 + server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { + type: 'stream-chunks', + chunks: [ + 'data: {"choices":[{"delta":{"content":"ok"}}]}' + '\n\n', + 'data: [DONE]' + '\n\n', + ], + } }) it('should set providerOptions openrouter to extra body', async () => { - const openrouter = createOpenRouter({ - apiKey: 'test', - }) const model = openrouter('anthropic/claude-3.7-sonnet') await streamText({ diff --git a/packages/internal/src/openrouter-ai-sdk/tests/stream-usage-accounting.test.ts b/packages/internal/src/openrouter-ai-sdk/tests/stream-usage-accounting.test.ts index d3a6d056f..8091a61a1 100644 --- a/packages/internal/src/openrouter-ai-sdk/tests/stream-usage-accounting.test.ts +++ b/packages/internal/src/openrouter-ai-sdk/tests/stream-usage-accounting.test.ts @@ -1,28 +1,73 @@ -import { - convertReadableStreamToArray, - createTestServer, -} from '@ai-sdk/provider-utils/test' -import { describe, expect, it } from 'bun:test' +import { convertReadableStreamToArray } from '@ai-sdk/provider-utils/test' +import { afterEach, beforeEach, describe, expect, it } from 'bun:test' import { OpenRouterChatLanguageModel } from '../chat' import type { OpenRouterChatSettings } from '../types/openrouter-chat-settings' describe('OpenRouter Streaming Usage Accounting', () => { - const server = createTestServer({ - 'https://api.openrouter.ai/chat/completions': { - response: { type: 'stream-chunks', chunks: [] }, - }, + const originalFetch = global.fetch + let capturedRequests: Array<{ + url: string + body?: any + }> = [] + let nextResponseChunks: string[] = [] + + const createStreamFromChunks = (chunks: string[]) => + new ReadableStream({ + start(controller) { + for (const chunk of chunks) { + controller.enqueue(chunk) + } + controller.close() + }, + }).pipeThrough(new TextEncoderStream()) + + beforeEach(() => { + capturedRequests = [] + global.fetch = (async (input: RequestInfo, init?: RequestInit) => { + const url = + typeof input === 'string' + ? input + : input instanceof URL + ? input.toString() + : input.url + + let parsedBody: any + if (init?.body && typeof init.body === 'string') { + try { + parsedBody = JSON.parse(init.body) + } catch { + parsedBody = undefined + } + } + + capturedRequests.push({ url, body: parsedBody }) + + return new Response(createStreamFromChunks(nextResponseChunks), { + status: 200, + headers: { + 'Content-Type': 'text/event-stream', + 'Cache-Control': 'no-cache', + Connection: 'keep-alive', + }, + }) + }) as typeof global.fetch + }) + + afterEach(() => { + global.fetch = originalFetch + nextResponseChunks = [] }) function prepareStreamResponse(includeUsage = true) { - const chunks = [ + nextResponseChunks = [ `data: {"id":"test-id","model":"test-model","choices":[{"delta":{"content":"Hello"},"index":0}]}\n\n`, `data: {"choices":[{"finish_reason":"stop","index":0}]}\n\n`, ] if (includeUsage) { - chunks.push( + nextResponseChunks.push( `data: ${JSON.stringify({ usage: { prompt_tokens: 10, @@ -40,12 +85,7 @@ describe('OpenRouter Streaming Usage Accounting', () => { ) } - chunks.push('data: [DONE]\n\n') - - server.urls['https://api.openrouter.ai/chat/completions']!.response = { - type: 'stream-chunks', - chunks, - } + nextResponseChunks.push('data: [DONE]\n\n') } it('should include stream_options.include_usage in request when enabled', async () => { @@ -76,7 +116,7 @@ describe('OpenRouter Streaming Usage Accounting', () => { }) // Verify stream options - const requestBody = await server.calls[0]!.requestBodyJson + const requestBody = capturedRequests[0]?.body expect(requestBody).toBeDefined() expect(requestBody.stream).toBe(true) expect(requestBody.stream_options).toEqual({ diff --git a/sdk/package.json b/sdk/package.json index cbca08789..7da85dd43 100644 --- a/sdk/package.json +++ b/sdk/package.json @@ -23,7 +23,7 @@ ], "scripts": { "build": "bun run scripts/build.ts", - "build:types": "tsc -p tsconfig.build.json", + "build:types": "bunx dts-bundle-generator -o dist/index.d.ts --no-check --export-referenced-types=false src/index.ts", "build:verify": "bun run build && bun run smoke-test:dist && bun run test:cjs && bun run test:esm && bun run test:ripgrep && bun run test:tree-sitter-queries", "test:typecheck-strict": "tsc --noEmit --strict dist/index.d.ts", "smoke-test:dist": "bun run smoke-test-dist.ts", diff --git a/sdk/src/native/ripgrep.ts b/sdk/src/native/ripgrep.ts index 058b646d7..bab66b483 100644 --- a/sdk/src/native/ripgrep.ts +++ b/sdk/src/native/ripgrep.ts @@ -37,9 +37,12 @@ export function getBundledRgPath(importMetaUrl?: string): string { // Try to find the bundled binary relative to this module let vendorPath: string | undefined - if (importMetaUrl) { + // Use the SDK's own import.meta.url if none is provided + const metaUrl = importMetaUrl || import.meta.url + + if (metaUrl) { // ESM context - use import.meta.url to find relative path - const currentFile = fileURLToPath(importMetaUrl) + const currentFile = fileURLToPath(metaUrl) const currentDir = dirname(currentFile) // Try relative to current file (development - from src/native/ripgrep.ts to vendor/) @@ -55,6 +58,18 @@ export function getBundledRgPath(importMetaUrl?: string): string { if (existsSync(devPath)) { vendorPath = devPath } + + // Try relative to bundled dist file (production - from dist/index.mjs to dist/vendor/) + const distPath = join( + currentDir, + 'vendor', + 'ripgrep', + platformDir, + binaryName, + ) + if (existsSync(distPath)) { + vendorPath = distPath + } } // If not found via importMetaUrl, try CJS approach or other methods diff --git a/web/next.config.mjs b/web/next.config.mjs index 81f16d5cb..035d1e301 100644 --- a/web/next.config.mjs +++ b/web/next.config.mjs @@ -32,7 +32,7 @@ const nextConfig = { 'pino-pretty', 'encoding', 'perf_hooks', - 'async_hooks' + 'async_hooks', ) // Suppress contentlayer webpack cache warnings @@ -125,6 +125,12 @@ const nextConfig = { destination: 'https://discord.gg/mcWTGjgTj3', permanent: false, }, + { + source: '/releases', + destination: + 'https://github.com/CodebuffAI/codebuff-community/releases', + permanent: false, + }, ] }, images: { diff --git a/web/src/__tests__/e2e/home.spec.ts b/web/src/__tests__/e2e/home.spec.ts deleted file mode 100644 index eb3736ded..000000000 --- a/web/src/__tests__/e2e/home.spec.ts +++ /dev/null @@ -1,27 +0,0 @@ -import { expect, test } from '@playwright/test' - -test('has title', async ({ page }) => { - await page.goto('/') - await expect(page).toHaveTitle(/Codebuff/) -}) - -test('renders main sections', async ({ page }) => { - await page.goto('/') - - // Wait for the main content to be visible - await page.waitForSelector('main') - - // Check for key elements - await expect( - page.getByRole('heading', { name: /Supercharge/i }) - ).toBeVisible() - await expect( - page.getByRole('heading', { name: /Your Codebase/i }) - ).toBeVisible() - await expect( - page.getByRole('heading', { name: /Direct Your Codebase/i }) - ).toBeVisible() - await expect( - page.getByRole('heading', { name: /Better and Better Over Time/i }) - ).toBeVisible() -}) diff --git a/web/src/api/v1/chat/__tests__/completions.test.ts b/web/src/api/v1/chat/__tests__/completions.test.ts index a23ae4a04..96bdd9751 100644 --- a/web/src/api/v1/chat/__tests__/completions.test.ts +++ b/web/src/api/v1/chat/__tests__/completions.test.ts @@ -66,14 +66,14 @@ describe('/api/v1/chat/completions POST endpoint', () => { } }) - mockGetAgentRunFromId = mock((async ({ agentRunId }: any) => { - if (agentRunId === 'run-123') { + mockGetAgentRunFromId = mock((async ({ runId }: any) => { + if (runId === 'run-123') { return { agent_id: 'agent-123', status: 'running', } } - if (agentRunId === 'run-completed') { + if (runId === 'run-completed') { return { agent_id: 'agent-123', status: 'completed', @@ -229,7 +229,7 @@ describe('/api/v1/chat/completions POST endpoint', () => { expect(body).toEqual({ message: 'Invalid JSON in request body' }) }) - it('returns 400 when agent_run_id is missing', async () => { + it('returns 400 when run_id is missing', async () => { const req = new NextRequest( 'http://localhost:3000/api/v1/chat/completions', { @@ -252,7 +252,7 @@ describe('/api/v1/chat/completions POST endpoint', () => { expect(response.status).toBe(400) const body = await response.json() - expect(body).toEqual({ message: 'No agentRunId found in request body' }) + expect(body).toEqual({ message: 'No runId found in request body' }) }) it('returns 400 when agent run not found', async () => { @@ -263,7 +263,7 @@ describe('/api/v1/chat/completions POST endpoint', () => { headers: { Authorization: 'Bearer test-api-key-123' }, body: JSON.stringify({ stream: true, - codebuff_metadata: { agent_run_id: 'run-nonexistent' }, + codebuff_metadata: { run_id: 'run-nonexistent' }, }), }, ) @@ -282,7 +282,7 @@ describe('/api/v1/chat/completions POST endpoint', () => { expect(response.status).toBe(400) const body = await response.json() expect(body).toEqual({ - message: 'agentRunId Not Found: run-nonexistent', + message: 'runId Not Found: run-nonexistent', }) }) @@ -294,7 +294,7 @@ describe('/api/v1/chat/completions POST endpoint', () => { headers: { Authorization: 'Bearer test-api-key-123' }, body: JSON.stringify({ stream: true, - codebuff_metadata: { agent_run_id: 'run-completed' }, + codebuff_metadata: { run_id: 'run-completed' }, }), }, ) @@ -313,7 +313,7 @@ describe('/api/v1/chat/completions POST endpoint', () => { expect(response.status).toBe(400) const body = await response.json() expect(body).toEqual({ - message: 'agentRunId Not Running: run-completed', + message: 'runId Not Running: run-completed', }) }) }) @@ -327,7 +327,7 @@ describe('/api/v1/chat/completions POST endpoint', () => { headers: { Authorization: 'Bearer test-api-key-no-credits' }, body: JSON.stringify({ stream: true, - codebuff_metadata: { agent_run_id: 'run-123' }, + codebuff_metadata: { run_id: 'run-123' }, }), }, ) @@ -362,7 +362,7 @@ describe('/api/v1/chat/completions POST endpoint', () => { body: JSON.stringify({ stream: true, codebuff_metadata: { - agent_run_id: 'run-123', + run_id: 'run-123', client_id: 'test-client-id-123', client_request_id: 'test-client-session-id-123', }, @@ -400,7 +400,7 @@ describe('/api/v1/chat/completions POST endpoint', () => { body: JSON.stringify({ stream: false, codebuff_metadata: { - agent_run_id: 'run-123', + run_id: 'run-123', client_id: 'test-client-id-123', client_request_id: 'test-client-session-id-123', }, diff --git a/web/src/app/api/releases/download/[version]/[filename]/route.ts b/web/src/app/api/releases/download/[version]/[filename]/route.ts index fba55417a..b7ac5eea9 100644 --- a/web/src/app/api/releases/download/[version]/[filename]/route.ts +++ b/web/src/app/api/releases/download/[version]/[filename]/route.ts @@ -19,4 +19,4 @@ export async function GET( const downloadUrl = `https://github.com/CodebuffAI/codebuff-community/releases/download/v${version}/${filename}` return NextResponse.redirect(downloadUrl, 302) -} \ No newline at end of file +}