diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index a72a683fc..9012e6c11 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -5,10 +5,9 @@ on: branches: [main] pull_request: branches: [main] - workflow_dispatch: jobs: - unit: + test: runs-on: ubuntu-latest steps: @@ -23,203 +22,8 @@ jobs: - name: Install dependencies run: npm ci - - name: Lint - run: npm run lint - - - name: Format check - run: npm run format:check - - name: Type check run: npm run typecheck - name: Run tests run: npm test - - e2e: - runs-on: ubuntu-latest - timeout-minutes: 20 - permissions: - contents: write - pull-requests: write - - strategy: - fail-fast: false - matrix: - config: - - name: base - env: {} - - name: telegram - env: - TELEGRAM_BOT_TOKEN: "fake-telegram-bot-token-for-e2e" - TELEGRAM_DM_POLICY: "pairing" - - name: discord - env: - DISCORD_BOT_TOKEN: "fake-discord-bot-token-for-e2e" - DISCORD_DM_POLICY: "pairing" - - name: workers-ai - env: - CF_AI_GATEWAY_MODEL: "workers-ai/@cf/openai/gpt-oss-120b" - - name: e2e (${{ matrix.config.name }}) - - steps: - - uses: actions/checkout@v4 - - - name: Setup Node.js - uses: actions/setup-node@v4 - with: - node-version: 22 - cache: npm - - - name: Install dependencies - run: npm ci - - - name: Install Terraform - uses: hashicorp/setup-terraform@v3 - with: - terraform_wrapper: false - - - name: Install system dependencies - run: sudo apt-get update -qq && sudo apt-get install -y -qq ffmpeg imagemagick bc - - - name: Install cctr - uses: taiki-e/install-action@v2 - with: - tool: cctr - - - name: Install plwr - uses: taiki-e/install-action@v2 - with: - tool: plwr@0.7.2 - - - name: Install Playwright browsers - run: npm install -g playwright && npx playwright install --with-deps chromium - - - name: Run E2E tests (${{ matrix.config.name }}) - id: e2e - continue-on-error: true - env: - # Cloud infrastructure credentials (from repo secrets with E2E_ prefix) - CLOUDFLARE_API_TOKEN: ${{ secrets.E2E_CLOUDFLARE_API_TOKEN }} - CF_ACCOUNT_ID: ${{ secrets.E2E_CF_ACCOUNT_ID }} - WORKERS_SUBDOMAIN: ${{ secrets.E2E_WORKERS_SUBDOMAIN }} - CF_ACCESS_TEAM_DOMAIN: ${{ secrets.E2E_CF_ACCESS_TEAM_DOMAIN }} - R2_ACCESS_KEY_ID: ${{ secrets.E2E_R2_ACCESS_KEY_ID }} - R2_SECRET_ACCESS_KEY: ${{ secrets.E2E_R2_SECRET_ACCESS_KEY }} - # AI provider โ€” Cloudflare AI Gateway (preferred) - CLOUDFLARE_AI_GATEWAY_API_KEY: ${{ secrets.CLOUDFLARE_AI_GATEWAY_API_KEY }} - CF_AI_GATEWAY_ACCOUNT_ID: ${{ secrets.CF_AI_GATEWAY_ACCOUNT_ID }} - CF_AI_GATEWAY_GATEWAY_ID: ${{ secrets.CF_AI_GATEWAY_GATEWAY_ID }} - # AI provider โ€” legacy (still supported) - AI_GATEWAY_API_KEY: ${{ secrets.AI_GATEWAY_API_KEY }} - AI_GATEWAY_BASE_URL: ${{ secrets.AI_GATEWAY_BASE_URL }} - # Unique test run ID for parallel isolation - E2E_TEST_RUN_ID: ${{ github.run_id }}-${{ github.run_attempt }}-${{ matrix.config.name }} - # Matrix-specific config - TELEGRAM_BOT_TOKEN: ${{ matrix.config.env.TELEGRAM_BOT_TOKEN }} - TELEGRAM_DM_POLICY: ${{ matrix.config.env.TELEGRAM_DM_POLICY }} - DISCORD_BOT_TOKEN: ${{ matrix.config.env.DISCORD_BOT_TOKEN }} - DISCORD_DM_POLICY: ${{ matrix.config.env.DISCORD_DM_POLICY }} - CF_AI_GATEWAY_MODEL: ${{ matrix.config.env.CF_AI_GATEWAY_MODEL }} - run: cctr -vv test/e2e - - - name: Generate video thumbnail - id: video - if: always() - run: | - if ls /tmp/moltworker-e2e-videos/*.mp4 1>/dev/null 2>&1; then - for mp4 in /tmp/moltworker-e2e-videos/*.mp4; do - thumb="${mp4%.mp4}.png" - - # Extract middle frame as thumbnail - duration=$(ffprobe -v error -show_entries format=duration -of csv=p=0 "$mp4") - midpoint=$(echo "$duration / 2" | bc -l) - ffmpeg -y -ss "$midpoint" -i "$mp4" -vframes 1 -update 1 -q:v 2 "$thumb" - - # Add play button overlay - width=$(identify -format '%w' "$thumb") - height=$(identify -format '%h' "$thumb") - cx=$((width / 2)) - cy=$((height / 2)) - convert "$thumb" \ - -fill 'rgba(0,0,0,0.6)' -draw "circle ${cx},${cy} $((cx+50)),${cy}" \ - -fill 'white' -draw "polygon $((cx-15)),$((cy-25)) $((cx-15)),$((cy+25)) $((cx+30)),${cy}" \ - "$thumb" - - echo "video_path=$mp4" >> $GITHUB_OUTPUT - echo "video_name=$(basename $mp4)" >> $GITHUB_OUTPUT - echo "thumb_path=$thumb" >> $GITHUB_OUTPUT - echo "thumb_name=$(basename $thumb)" >> $GITHUB_OUTPUT - done - echo "has_video=true" >> $GITHUB_OUTPUT - else - echo "has_video=false" >> $GITHUB_OUTPUT - fi - - - name: Prepare video for upload - id: prepare - if: always() && steps.video.outputs.has_video == 'true' - run: | - mkdir -p /tmp/e2e-video-upload/videos/${{ github.run_id }}-${{ matrix.config.name }} - cp "${{ steps.video.outputs.video_path }}" /tmp/e2e-video-upload/videos/${{ github.run_id }}-${{ matrix.config.name }}/ - cp "${{ steps.video.outputs.thumb_path }}" /tmp/e2e-video-upload/videos/${{ github.run_id }}-${{ matrix.config.name }}/ - echo "video_url=https://github.com/${{ github.repository }}/raw/e2e-artifacts-${{ matrix.config.name }}/videos/${{ github.run_id }}-${{ matrix.config.name }}/${{ steps.video.outputs.video_name }}" >> $GITHUB_OUTPUT - echo "thumb_url=https://github.com/${{ github.repository }}/raw/e2e-artifacts-${{ matrix.config.name }}/videos/${{ github.run_id }}-${{ matrix.config.name }}/${{ steps.video.outputs.thumb_name }}" >> $GITHUB_OUTPUT - - - name: Upload video to e2e-artifacts branch - if: always() && steps.video.outputs.has_video == 'true' - uses: peaceiris/actions-gh-pages@v4 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - publish_dir: /tmp/e2e-video-upload - publish_branch: e2e-artifacts-${{ matrix.config.name }} - keep_files: true - - - name: Delete old video comments - if: always() && github.event_name == 'pull_request' - uses: actions/github-script@v7 - with: - script: | - const marker = ''; - const { data: comments } = await github.rest.issues.listComments({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: context.issue.number, - }); - for (const comment of comments) { - if (comment.body.includes(marker)) { - await github.rest.issues.deleteComment({ - owner: context.repo.owner, - repo: context.repo.repo, - comment_id: comment.id, - }); - } - } - - - name: Comment on PR with video - if: always() && github.event_name == 'pull_request' && steps.prepare.outputs.video_url - uses: peter-evans/create-or-update-comment@v4 - with: - issue-number: ${{ github.event.pull_request.number }} - body: | - - ## E2E Test Recording (${{ matrix.config.name }}) - - ${{ steps.e2e.outcome == 'success' && 'โœ… Tests passed' || 'โŒ Tests failed' }} - - [![E2E Test Video](${{ steps.prepare.outputs.thumb_url }})](${{ steps.prepare.outputs.video_url }}) - - - name: Add video link to summary - if: always() - run: | - echo "## E2E Test Recording" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - if [ "${{ steps.video.outputs.has_video }}" == "true" ]; then - echo "๐Ÿ“น [Download video](${{ steps.prepare.outputs.video_url }})" >> $GITHUB_STEP_SUMMARY - else - echo "โš ๏ธ No video recording found" >> $GITHUB_STEP_SUMMARY - fi - - - name: Fail if E2E tests failed - if: steps.e2e.outcome == 'failure' - run: exit 1 diff --git a/.gitignore b/.gitignore index 47777a5f1..215d6f0c0 100644 --- a/.gitignore +++ b/.gitignore @@ -37,6 +37,16 @@ Thumbs.db # Docker build artifacts *.tar +# Local Claude settings +.claude/ + +# Clawdbot runtime config (contains tokens) +clawdbot/ +.clawdhub/ + +# Custom skills (user-specific) +skills/prompt-guard/ + # Veta agent memory .veta/ @@ -60,4 +70,4 @@ test/e2e/.dev.vars .wrangler-e2e-*.jsonc # npm config -.npmrc \ No newline at end of file +.npmrc diff --git a/AGENT_COMMS_SETUP.md b/AGENT_COMMS_SETUP.md new file mode 100644 index 000000000..524fde964 --- /dev/null +++ b/AGENT_COMMS_SETUP.md @@ -0,0 +1,204 @@ +# Agent Communication System - Setup Guide + +This guide will help you deploy and configure the inter-agent communication system. + +## Overview + +The system allows multiple AI agents (like `jihwan_cat` and `jino`) to communicate with each other via: +- **Layer 1**: JSONL file-based messaging (bypasses Telegram bot-to-bot restrictions) +- **Layer 2**: Automatic mirroring to Telegram group (so you can observe and intervene) + +## Deployment Steps + +### 1. Set Environment Variable (Optional but Recommended) + +If you want messages mirrored to Telegram, set the group chat ID: + +```bash +cd "/Users/mac/Dropbox/๋‚ด Mac (MacBook-Air.local)/Downloads/moltworker" + +# Option A: Use your existing owner ID (messages go to DM) +# Already set if you have TELEGRAM_OWNER_ID + +# Option B: Create a group chat and use that ID +# 1. Create a Telegram group with your bot +# 2. Get the chat ID (it will be negative, like -1001234567890) +# 3. Set the secret: +echo "-1001234567890" | npx wrangler secret put TELEGRAM_AGENT_GROUP_ID --name moltbot-sandbox +``` + +### 2. Deploy the Worker + +```bash +cd "/Users/mac/Dropbox/๋‚ด Mac (MacBook-Air.local)/Downloads/moltworker" +npm run deploy +``` + +This will: +- Build and deploy the worker +- Upload all scripts including `scripts/agent-comms/*` +- The container will start with the new `start-openclaw.sh` + +### 3. Wait for Container to Start + +The container takes about 60-90 seconds to fully initialize. You can check status: + +```bash +curl -s "https://moltbot-sandbox.astin-43b.workers.dev/debug/processes" +``` + +Look for `openclaw gateway` in the running processes. + +### 4. Verify Setup + +Run the setup verification script via the debug CLI: + +```bash +curl -s "https://moltbot-sandbox.astin-43b.workers.dev/debug/cli?cmd=$(echo 'node /root/clawd/moltworker/scripts/agent-comms/setup-agents.js' | jq -sRr @uri)" +``` + +This will check: +- โœ“ All scripts are present +- โœ“ TOOLS.md is accessible +- โœ“ Message bus is initialized +- โœ“ Environment variables are set + +### 5. Test the System + +Run the test script: + +```bash +curl -s "https://moltbot-sandbox.astin-43b.workers.dev/debug/cli?cmd=$(echo 'bash /root/clawd/moltworker/scripts/agent-comms/test-system.sh' | jq -sRr @uri)" +``` + +This will: +- Send 3 test messages +- Show messages in the bus +- Test the Telegram mirroring (if configured) + +### 6. Restart Gateway (to Pick Up Changes) + +```bash +curl -s -X POST "https://moltbot-sandbox.astin-43b.workers.dev/api/admin/gateway/restart" +``` + +Wait ~60s for the gateway to restart, then the message watcher will start automatically. + +## Using the System + +### For Your Agents + +Agents can send messages using the `exec` tool in OpenClaw: + +**Example prompt to jihwan_cat:** +``` +Send a message to jino asking them to help with data analysis: +exec: node /root/clawd/moltworker/scripts/agent-comms/send-message.js --from jihwan_cat --to jino --message "Can you help analyze the latest metrics?" +``` + +The message will: +1. Be written to `/root/clawd/agent-messages.jsonl` +2. Within 30 seconds, appear in your Telegram group/chat as: + ``` + [jihwan_cat โ†’ jino] 02/19 15:30 + Can you help analyze the latest metrics? + ``` + +### For You (Human) + +- **Observe**: All agent-to-agent messages appear in Telegram +- **Intervene**: Reply in the group or send commands directly to agents +- **Monitor**: Check message bus file via debug CLI if needed + +## Troubleshooting + +### Messages Not Appearing in Telegram + +**Check if watcher is running:** +```bash +curl -s "https://moltbot-sandbox.astin-43b.workers.dev/debug/cli?cmd=$(echo 'ps aux | grep watch-messages' | jq -sRr @uri)" +``` + +**Check watcher logs:** +```bash +curl -s "https://moltbot-sandbox.astin-43b.workers.dev/debug/cli?cmd=$(echo 'tail -20 /tmp/r2-sync.log' | jq -sRr @uri)" +``` + +**Manually run watcher:** +```bash +curl -s "https://moltbot-sandbox.astin-43b.workers.dev/debug/cli?cmd=$(echo 'node /root/clawd/moltworker/scripts/agent-comms/watch-messages.js' | jq -sRr @uri)" +``` + +### Check Message Bus File + +```bash +curl -s "https://moltbot-sandbox.astin-43b.workers.dev/debug/cli?cmd=$(echo 'cat /root/clawd/agent-messages.jsonl | tail -10' | jq -sRr @uri)" +``` + +### Check if TOOLS.md is Loaded + +Agents should have TOOLS.md in their context. Verify: + +```bash +curl -s "https://moltbot-sandbox.astin-43b.workers.dev/debug/cli?cmd=$(echo 'ls -la /root/clawd/ | grep TOOLS' | jq -sRr @uri)" +``` + +### Force Restart Background Services + +```bash +# Restart the entire gateway (this restarts all background loops) +curl -s -X POST "https://moltbot-sandbox.astin-43b.workers.dev/api/admin/gateway/restart" +``` + +## Advanced Usage + +### Broadcast Messages + +Send to all agents: +```bash +node /root/clawd/moltworker/scripts/agent-comms/send-message.js \ + --from jihwan_cat \ + --to all \ + --message "Announcement: maintenance window at 3pm" +``` + +### Read Messages Programmatically + +From an agent or script: +```javascript +const { readNewMessages, markAsRead } = require('/root/clawd/moltworker/scripts/agent-comms/message-bus'); + +// Get messages for jino +const messages = readNewMessages('jino'); +messages.forEach(msg => { + console.log(`From ${msg.from}: ${msg.message}`); +}); + +// Mark as read +if (messages.length > 0) { + markAsRead('jino', messages[messages.length - 1].id); +} +``` + +### Inspect Message History + +```bash +# Last 20 messages +curl -s "https://moltbot-sandbox.astin-43b.workers.dev/debug/cli?cmd=$(echo 'tail -20 /root/clawd/agent-messages.jsonl' | jq -sRr @uri)" + +# Count total messages +curl -s "https://moltbot-sandbox.astin-43b.workers.dev/debug/cli?cmd=$(echo 'wc -l /root/clawd/agent-messages.jsonl' | jq -sRr @uri)" +``` + +## Architecture Details + +See `scripts/agent-comms/README.md` for detailed architecture documentation. + +## Next Steps + +1. **Configure agents**: Update each agent's identity/personality to know about other agents +2. **Define workflows**: Decide which agent handles which types of tasks +3. **Monitor interactions**: Watch the Telegram group to see how agents coordinate +4. **Iterate**: Adjust agent prompts based on how they communicate + +Enjoy your multi-agent system! ๐Ÿค–โœจ diff --git a/DEPLOYMENT_SUMMARY.md b/DEPLOYMENT_SUMMARY.md new file mode 100644 index 000000000..b63128dfb --- /dev/null +++ b/DEPLOYMENT_SUMMARY.md @@ -0,0 +1,118 @@ +# Agent Communication System - Deployment Summary + +## What Was Built + +A two-layer inter-agent communication system that allows `jihwan_cat` and `jino` to communicate via: + +### Layer 1: JSONL Message Bus +- File-based messaging at `/root/clawd/agent-messages.jsonl` +- Bypasses Telegram's bot-to-bot restriction +- Persistent across sessions + +### Layer 2: Telegram Mirroring +- Background watcher runs every 30s +- Mirrors all agent messages to Telegram group +- Human can observe and intervene + +## Files Created/Modified + +### New Files +``` +scripts/ +โ””โ”€โ”€ agent-comms/ + โ”œโ”€โ”€ README.md # Architecture documentation + โ”œโ”€โ”€ message-bus.js # Core library + โ”œโ”€โ”€ send-message.js # CLI to send messages + โ”œโ”€โ”€ watch-messages.js # Telegram mirroring daemon + โ”œโ”€โ”€ setup-agents.js # Setup verification script + โ””โ”€โ”€ test-system.sh # Testing script + +TOOLS.md # Agent documentation (auto-loaded by OpenClaw) +AGENT_COMMS_SETUP.md # Deployment guide (this file) +DEPLOYMENT_SUMMARY.md # This summary +``` + +### Modified Files +``` +Dockerfile # Added COPY for scripts/ and TOOLS.md +start-openclaw.sh # Added message watcher background loop +``` + +## Deployment Checklist + +- [ ] Commit changes to git +- [ ] Deploy via `npm run deploy` (builds Docker image and deploys to Cloudflare) +- [ ] Wait 60-90s for container to start +- [ ] (Optional) Set `TELEGRAM_AGENT_GROUP_ID` secret for group mirroring +- [ ] Verify setup via debug CLI +- [ ] Test with sample messages +- [ ] Restart gateway to activate watcher + +## Quick Start Commands + +### Deploy +```bash +cd "/Users/mac/Dropbox/๋‚ด Mac (MacBook-Air.local)/Downloads/moltworker" +npm run deploy +``` + +### Verify Setup +```bash +curl -s "https://moltbot-sandbox.astin-43b.workers.dev/debug/cli?cmd=$(echo 'node /root/clawd/moltworker/scripts/agent-comms/setup-agents.js' | jq -sRr @uri)" +``` + +### Test System +```bash +curl -s "https://moltbot-sandbox.astin-43b.workers.dev/debug/cli?cmd=$(echo 'bash /root/clawd/moltworker/scripts/agent-comms/test-system.sh' | jq -sRr @uri)" +``` + +### Restart Gateway +```bash +curl -s -X POST "https://moltbot-sandbox.astin-43b.workers.dev/api/admin/gateway/restart" +``` + +## Usage for Agents + +Agents use the `exec` tool to send messages: + +``` +node /root/clawd/moltworker/scripts/agent-comms/send-message.js \ + --from jihwan_cat \ + --to jino \ + --message "Can you help with this task?" +``` + +Messages appear in Telegram group within 30 seconds as: +``` +[jihwan_cat โ†’ jino] 02/19 15:30 +Can you help with this task? +``` + +## Environment Variables + +| Variable | Required | Purpose | +|----------|----------|---------| +| `TELEGRAM_AGENT_GROUP_ID` | Optional | Chat ID for message mirroring (defaults to `TELEGRAM_OWNER_ID`) | + +## Next Steps After Deployment + +1. **Test the system** with the test script +2. **Update agent identities** to know about each other +3. **Define agent roles** (dev, writing, finance, etc.) +4. **Monitor interactions** in Telegram group +5. **Scale to more agents** as needed + +## Architecture Benefits + +โœ… **Bypasses Telegram bot-to-bot restriction** - Uses file-based communication +โœ… **Observable** - All messages visible in Telegram +โœ… **Persistent** - Messages survive restarts +โœ… **Simple** - Just JSONL append operations +โœ… **Scalable** - Can add more agents easily +โœ… **Intervenable** - Human can jump in anytime + +## References + +- Full setup guide: `AGENT_COMMS_SETUP.md` +- Architecture details: `scripts/agent-comms/README.md` +- Agent documentation: `TOOLS.md` (auto-loaded into agent context) diff --git a/Dockerfile b/Dockerfile index 996fe9e91..de03a2555 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ FROM docker.io/cloudflare/sandbox:0.7.0 -# Install Node.js 22 (required by OpenClaw) and rclone (for R2 persistence) +# Install Node.js 22 (required by OpenClaw), rclone (for R2 persistence), and git (for repo clone) # The base image has Node 20, we need to replace it with Node 22 # Using direct binary download for reliability ENV NODE_VERSION=22.13.1 @@ -10,7 +10,7 @@ RUN ARCH="$(dpkg --print-architecture)" \ arm64) NODE_ARCH="arm64" ;; \ *) echo "Unsupported architecture: ${ARCH}" >&2; exit 1 ;; \ esac \ - && apt-get update && apt-get install -y xz-utils ca-certificates rclone \ + && apt-get update && apt-get install -y xz-utils ca-certificates rclone git \ && curl -fsSLk https://nodejs.org/dist/v${NODE_VERSION}/node-v${NODE_VERSION}-linux-${NODE_ARCH}.tar.xz -o /tmp/node.tar.xz \ && tar -xJf /tmp/node.tar.xz -C /usr/local --strip-components=1 \ && rm /tmp/node.tar.xz \ @@ -20,25 +20,36 @@ RUN ARCH="$(dpkg --print-architecture)" \ # Install pnpm globally RUN npm install -g pnpm -# Install OpenClaw (formerly clawdbot/moltbot) -# Pin to specific version for reproducible builds -RUN npm install -g openclaw@2026.2.3 \ +# Install OpenClaw (latest version) +RUN npm install -g openclaw@latest \ && openclaw --version +# Install ws module globally for CDP browser automation scripts +RUN npm install -g ws + +# Ensure globally installed modules are findable by scripts +ENV NODE_PATH=/usr/local/lib/node_modules + # Create OpenClaw directories -# Legacy .clawdbot paths are kept for R2 backup migration RUN mkdir -p /root/.openclaw \ && mkdir -p /root/clawd \ - && mkdir -p /root/clawd/skills + && mkdir -p /root/clawd/skills \ + && mkdir -p /root/clawd/warm-memory \ + && mkdir -p /root/clawd/.modification-history \ + && mkdir -p /root/clawd/brain-memory/reflections # Copy startup script -# Build cache bust: 2026-02-11-v30-rclone +# Build cache bust: 2026-02-19-v75-agent-comms COPY start-openclaw.sh /usr/local/bin/start-openclaw.sh RUN chmod +x /usr/local/bin/start-openclaw.sh # Copy custom skills COPY skills/ /root/clawd/skills/ +# Copy agent communication scripts +COPY scripts/ /root/clawd/moltworker/scripts/ +COPY TOOLS.md /root/clawd/moltworker/TOOLS.md + # Set working directory WORKDIR /root/clawd diff --git a/TOOLS.md b/TOOLS.md new file mode 100644 index 000000000..a88adf6b9 --- /dev/null +++ b/TOOLS.md @@ -0,0 +1,62 @@ +# Agent Tools & Capabilities + +This document describes the tools and capabilities available to AI agents. + +## Agent-to-Agent Communication + +You can communicate with other agents via the message bus. Messages are sent via file-based communication (Layer 1) and automatically mirrored to the Telegram group (Layer 2) so the human can observe. + +### Available Agents + +- `jihwan_cat` - Main development agent (Moltworker/OpenClaw) +- `jino` - Secondary agent + +### Sending Messages to Other Agents + +Use the `exec` tool to send messages: + +``` +node /root/clawd/moltworker/scripts/agent-comms/send-message.js --from YOUR_NAME --to RECIPIENT --message "Your message here" +``` + +**Parameters:** +- `--from`: Your agent name (jihwan_cat or jino) +- `--to`: Recipient agent name, or "all" for broadcast +- `--message`: Your message content + +**Example:** +``` +node /root/clawd/moltworker/scripts/agent-comms/send-message.js --from jihwan_cat --to jino --message "Can you help analyze this data?" +``` + +### When to Use Agent Communication + +**DO use agent-to-agent messages when:** +- You need another agent's specialized expertise +- You want to delegate a subtask to another agent +- You need to coordinate work or avoid duplicate effort +- You want to share findings or results + +**DON'T use for:** +- Simple questions you can answer yourself +- Information you can look up directly +- Tasks that don't need coordination + +### How Messages Work + +1. **Layer 1 (Underground)**: Messages are written to `/root/clawd/agent-messages.jsonl` +2. **Layer 2 (Mirroring)**: A background watcher reads new messages and posts them to the Telegram group every 30s +3. The human can see all agent-to-agent communication and intervene if needed +4. Messages persist across sessions in the JSONL file + +### Reading Your Messages + +Messages addressed to you will appear in your context when the human forwards them or when you check the message bus file directly: + +``` +node -e "require('/root/clawd/moltworker/scripts/agent-comms/message-bus').readNewMessages('YOUR_NAME').forEach(m => console.log(m))" +``` + +## Other Tools + +(Additional tools will be documented here as they are added) diff --git a/package.json b/package.json index c2801f422..de1229950 100644 --- a/package.json +++ b/package.json @@ -7,6 +7,7 @@ "scripts": { "build": "vite build", "deploy": "npm run build && wrangler deploy", + "postdeploy": "bash scripts/postdeploy.sh", "dev": "vite dev", "start": "wrangler dev", "types": "wrangler types", diff --git a/scripts/agent-comms/README.md b/scripts/agent-comms/README.md new file mode 100644 index 000000000..39ce17685 --- /dev/null +++ b/scripts/agent-comms/README.md @@ -0,0 +1,127 @@ +# Agent Communication System + +Two-layer inter-agent communication system that bypasses Telegram's bot-to-bot messaging restriction. + +## Architecture + +### Layer 1: JSONL Message Bus (Underground) +- Agents communicate via a shared JSONL file: `/root/clawd/agent-messages.jsonl` +- Messages are appended atomically (line-by-line) +- Each message has: `{id, from, to, message, timestamp}` +- Bypasses Telegram API restrictions on bot-to-bot communication + +### Layer 2: Telegram Mirroring (Observable) +- Background watcher (`watch-messages.js`) runs every 30s +- Reads new messages from JSONL and posts them to Telegram group +- Human can observe all agent communication in real-time +- Human can intervene by sending messages in the group + +## Files + +### Core Library +- `message-bus.js` - Core operations (send, read, mark as read/mirrored) + +### CLI Scripts +- `send-message.js` - Send a message to another agent +- `watch-messages.js` - Mirror new messages to Telegram (runs as background task) + +### Configuration +- `TOOLS.md` - Documentation for agents on how to use the system + +## Usage + +### For Agents (via exec tool) + +**Send a message:** +```bash +node /root/clawd/moltworker/scripts/agent-comms/send-message.js \ + --from jihwan_cat \ + --to jino \ + --message "Can you help analyze this data?" +``` + +**Read new messages addressed to you:** +```javascript +const { readNewMessages, markAsRead } = require('./message-bus'); +const messages = readNewMessages('jihwan_cat'); +messages.forEach(msg => { + console.log(`From ${msg.from}: ${msg.message}`); +}); +if (messages.length > 0) { + markAsRead('jihwan_cat', messages[messages.length - 1].id); +} +``` + +### For Humans (via Telegram) + +Just watch the group chat! All agent-to-agent messages will appear as: +``` +[jihwan_cat โ†’ jino] 02/19 15:30 +Can you help analyze this data? +``` + +You can intervene by: +1. Replying directly in the group +2. Sending commands to either agent +3. Manually sending messages via the CLI (for testing) + +## Setup + +The system is automatically set up by `start-openclaw.sh`: + +1. Scripts are deployed to `/root/clawd/moltworker/scripts/agent-comms/` +2. Background watcher starts after gateway is ready +3. Agents get `TOOLS.md` injected into their workspace + +### Required Environment Variables + +- `TELEGRAM_AGENT_GROUP_ID` - Telegram group/chat ID for mirroring (falls back to `TELEGRAM_OWNER_ID`) +- Optional: Watcher will skip Telegram mirroring if not set (messages still work via JSONL) + +## Message Flow Example + +``` +1. jihwan_cat executes: + node send-message.js --from jihwan_cat --to jino --message "Task complete" + +2. Message written to /root/clawd/agent-messages.jsonl: + {"id":"abc123","from":"jihwan_cat","to":"jino","message":"Task complete","timestamp":"2026-02-19T15:30:00Z"} + +3. Within 30s, watch-messages.js reads the new message + +4. Watcher posts to Telegram group: + [jihwan_cat โ†’ jino] 02/19 15:30 + Task complete + +5. jino (or human) sees the message and can respond +``` + +## Debugging + +**Check message bus file:** +```bash +cat /root/clawd/agent-messages.jsonl +``` + +**Check last read positions:** +```bash +cat /root/clawd/.agent-message-lastread +``` + +**Check mirror status:** +```bash +cat /root/clawd/.agent-message-mirrored +``` + +**Manually trigger watcher:** +```bash +node /root/clawd/moltworker/scripts/agent-comms/watch-messages.js +``` + +**Test sending a message:** +```bash +node /root/clawd/moltworker/scripts/agent-comms/send-message.js \ + --from test \ + --to all \ + --message "Test message" +``` diff --git a/scripts/agent-comms/message-bus.js b/scripts/agent-comms/message-bus.js new file mode 100755 index 000000000..63b2f213c --- /dev/null +++ b/scripts/agent-comms/message-bus.js @@ -0,0 +1,181 @@ +#!/usr/bin/env node +/** + * Agent Message Bus - Core operations for inter-agent communication via JSONL + * + * Layer 1: File-based message passing (bypasses Telegram bot-to-bot restriction) + * Layer 2: Messages are mirrored to Telegram group by watch-messages.js + */ + +const fs = require('fs'); +const path = require('path'); +const { randomUUID } = require('crypto'); + +const MESSAGE_BUS_FILE = '/root/clawd/agent-messages.jsonl'; +const LAST_READ_FILE = '/root/clawd/.agent-message-lastread'; + +/** + * Send a message to another agent + * @param {string} from - Sender agent name + * @param {string} to - Recipient agent name (or 'all' for broadcast) + * @param {string} message - Message content + * @returns {object} The message object that was written + */ +function sendMessage(from, to, message) { + const msg = { + id: randomUUID(), + from, + to, + message, + timestamp: new Date().toISOString(), + }; + + // Ensure message bus file exists + if (!fs.existsSync(MESSAGE_BUS_FILE)) { + fs.writeFileSync(MESSAGE_BUS_FILE, '', 'utf8'); + } + + // Append message as JSONL + fs.appendFileSync(MESSAGE_BUS_FILE, JSON.stringify(msg) + '\n', 'utf8'); + + console.log(`[MESSAGE-BUS] Sent: ${from} โ†’ ${to}`); + return msg; +} + +/** + * Read all messages from the bus + * @returns {Array} Array of message objects + */ +function readAllMessages() { + if (!fs.existsSync(MESSAGE_BUS_FILE)) { + return []; + } + + const content = fs.readFileSync(MESSAGE_BUS_FILE, 'utf8').trim(); + if (!content) return []; + + return content + .split('\n') + .filter(line => line.trim()) + .map(line => { + try { + return JSON.parse(line); + } catch (e) { + console.error('[MESSAGE-BUS] Failed to parse line:', line); + return null; + } + }) + .filter(msg => msg !== null); +} + +/** + * Read new messages since last check + * @param {string} agentName - Name of the agent reading messages + * @returns {Array} Array of new message objects + */ +function readNewMessages(agentName) { + const allMessages = readAllMessages(); + + // Load last read position for this agent + let lastReadId = null; + if (fs.existsSync(LAST_READ_FILE)) { + try { + const lastRead = JSON.parse(fs.readFileSync(LAST_READ_FILE, 'utf8')); + lastReadId = lastRead[agentName] || null; + } catch (e) { + // Ignore parse errors, start from beginning + } + } + + // Find messages after last read + const newMessages = []; + let foundLastRead = lastReadId === null; + + for (const msg of allMessages) { + if (!foundLastRead) { + if (msg.id === lastReadId) { + foundLastRead = true; + } + continue; + } + + // Include messages addressed to this agent or to 'all' + if (msg.to === agentName || msg.to === 'all') { + newMessages.push(msg); + } + } + + return newMessages; +} + +/** + * Mark messages as read up to a specific message ID + * @param {string} agentName - Name of the agent + * @param {string} messageId - Last message ID that was read + */ +function markAsRead(agentName, messageId) { + let lastRead = {}; + + if (fs.existsSync(LAST_READ_FILE)) { + try { + lastRead = JSON.parse(fs.readFileSync(LAST_READ_FILE, 'utf8')); + } catch (e) { + // Start fresh if parse fails + } + } + + lastRead[agentName] = messageId; + fs.writeFileSync(LAST_READ_FILE, JSON.stringify(lastRead, null, 2), 'utf8'); +} + +/** + * Get all new messages (for mirroring to Telegram) + * Returns messages that haven't been mirrored yet + */ +function getUnmirroredMessages() { + const MIRROR_MARKER_FILE = '/root/clawd/.agent-message-mirrored'; + + const allMessages = readAllMessages(); + + let lastMirroredId = null; + if (fs.existsSync(MIRROR_MARKER_FILE)) { + try { + const data = JSON.parse(fs.readFileSync(MIRROR_MARKER_FILE, 'utf8')); + lastMirroredId = data.lastId || null; + } catch (e) { + // Start from beginning if parse fails + } + } + + const unmirrored = []; + let foundLastMirrored = lastMirroredId === null; + + for (const msg of allMessages) { + if (!foundLastMirrored) { + if (msg.id === lastMirroredId) { + foundLastMirrored = true; + } + continue; + } + unmirrored.push(msg); + } + + return unmirrored; +} + +/** + * Mark messages as mirrored up to a specific message ID + */ +function markAsMirrored(messageId) { + const MIRROR_MARKER_FILE = '/root/clawd/.agent-message-mirrored'; + fs.writeFileSync(MIRROR_MARKER_FILE, JSON.stringify({ lastId: messageId }, null, 2), 'utf8'); +} + +module.exports = { + sendMessage, + readAllMessages, + readNewMessages, + markAsRead, + getUnmirroredMessages, + markAsMirrored, + MESSAGE_BUS_FILE, +}; diff --git a/scripts/agent-comms/send-message.js b/scripts/agent-comms/send-message.js new file mode 100755 index 000000000..fb156d194 --- /dev/null +++ b/scripts/agent-comms/send-message.js @@ -0,0 +1,35 @@ +#!/usr/bin/env node +/** + * CLI to send a message to another agent via the message bus + * Usage: node send-message.js --from jihwan_cat --to jino --message "Hello!" + */ + +const { sendMessage } = require('./message-bus'); + +const args = process.argv.slice(2); +const parseArgs = () => { + const parsed = {}; + for (let i = 0; i < args.length; i++) { + if (args[i].startsWith('--')) { + const key = args[i].slice(2); + const value = args[i + 1]; + parsed[key] = value; + i++; + } + } + return parsed; +}; + +const { from, to, message } = parseArgs(); + +if (!from || !to || !message) { + console.error('Usage: node send-message.js --from SENDER --to RECIPIENT --message "MESSAGE"'); + console.error('Example: node send-message.js --from jihwan_cat --to jino --message "Can you help with this task?"'); + process.exit(1); +} + +const msg = sendMessage(from, to, message); +console.log(`โœ“ Message sent: ${msg.id}`); +console.log(` From: ${from}`); +console.log(` To: ${to}`); +console.log(` Message: ${message}`); diff --git a/scripts/agent-comms/setup-agents.js b/scripts/agent-comms/setup-agents.js new file mode 100755 index 000000000..a0341b432 --- /dev/null +++ b/scripts/agent-comms/setup-agents.js @@ -0,0 +1,108 @@ +#!/usr/bin/env node +/** + * Setup script for configuring agent communication + * Run this after deployment to ensure agents are properly configured + */ + +const fs = require('fs'); +const path = require('path'); + +const CONFIG_DIR = '/root/.openclaw'; +const CONFIG_FILE = path.join(CONFIG_DIR, 'openclaw.json'); + +console.log('=== Agent Communication Setup ===\n'); + +// 1. Verify message bus scripts exist +const SCRIPTS_DIR = '/root/clawd/moltworker/scripts/agent-comms'; +const requiredScripts = [ + 'message-bus.js', + 'send-message.js', + 'watch-messages.js', +]; + +console.log('1. Checking scripts...'); +let scriptsOk = true; +for (const script of requiredScripts) { + const scriptPath = path.join(SCRIPTS_DIR, script); + if (fs.existsSync(scriptPath)) { + console.log(` โœ“ ${script}`); + } else { + console.log(` โœ— ${script} NOT FOUND`); + scriptsOk = false; + } +} + +if (!scriptsOk) { + console.error('\nโŒ Some scripts are missing. Please deploy the moltworker directory.'); + process.exit(1); +} + +// 2. Verify TOOLS.md exists +console.log('\n2. Checking TOOLS.md...'); +const TOOLS_MD = '/root/clawd/moltworker/TOOLS.md'; +if (fs.existsSync(TOOLS_MD)) { + console.log(' โœ“ TOOLS.md exists'); +} else { + console.log(' โœ— TOOLS.md NOT FOUND'); + console.log(' Creating symlink to workspace...'); + const symlinkTarget = '/root/clawd/TOOLS.md'; + try { + fs.symlinkSync(TOOLS_MD, symlinkTarget); + console.log(` โœ“ Symlinked ${symlinkTarget} โ†’ ${TOOLS_MD}`); + } catch (e) { + console.error(` โœ— Failed to create symlink: ${e.message}`); + } +} + +// 3. Check OpenClaw config +console.log('\n3. Checking OpenClaw config...'); +if (!fs.existsSync(CONFIG_FILE)) { + console.log(' โš  Config not found (gateway may not be running yet)'); +} else { + try { + const config = JSON.parse(fs.readFileSync(CONFIG_FILE, 'utf8')); + const workspace = config?.agents?.defaults?.workspace; + console.log(` โœ“ Workspace: ${workspace}`); + + // Verify workspace has access to scripts + const workspaceScripts = path.join(workspace || '/root/clawd', 'moltworker/scripts/agent-comms'); + if (fs.existsSync(workspaceScripts)) { + console.log(' โœ“ Scripts accessible from workspace'); + } else { + console.log(' โš  Scripts may not be accessible from workspace'); + console.log(` Expected: ${workspaceScripts}`); + } + } catch (e) { + console.error(` โœ— Failed to parse config: ${e.message}`); + } +} + +// 4. Check environment variables +console.log('\n4. Checking environment variables...'); +const TELEGRAM_GROUP_ID = process.env.TELEGRAM_AGENT_GROUP_ID || process.env.TELEGRAM_OWNER_ID; +if (TELEGRAM_GROUP_ID) { + console.log(` โœ“ TELEGRAM_GROUP_ID: ${TELEGRAM_GROUP_ID}`); +} else { + console.log(' โš  TELEGRAM_AGENT_GROUP_ID not set (Telegram mirroring will be disabled)'); + console.log(' Set via: wrangler secret put TELEGRAM_AGENT_GROUP_ID'); +} + +// 5. Initialize message bus file +console.log('\n5. Initializing message bus...'); +const MESSAGE_BUS_FILE = '/root/clawd/agent-messages.jsonl'; +if (!fs.existsSync(MESSAGE_BUS_FILE)) { + fs.writeFileSync(MESSAGE_BUS_FILE, '', 'utf8'); + console.log(` โœ“ Created ${MESSAGE_BUS_FILE}`); +} else { + const lineCount = fs.readFileSync(MESSAGE_BUS_FILE, 'utf8').split('\n').filter(l => l.trim()).length; + console.log(` โœ“ Message bus exists (${lineCount} messages)`); +} + +console.log('\n=== Setup Complete ===\n'); +console.log('Agent communication system is ready!'); +console.log('\nAvailable agents:'); +console.log(' - jihwan_cat'); +console.log(' - jino'); +console.log('\nTest the system:'); +console.log(' node /root/clawd/moltworker/scripts/agent-comms/send-message.js \\'); +console.log(' --from jihwan_cat --to jino --message "Hello!"'); diff --git a/scripts/agent-comms/test-system.sh b/scripts/agent-comms/test-system.sh new file mode 100755 index 000000000..990640bb8 --- /dev/null +++ b/scripts/agent-comms/test-system.sh @@ -0,0 +1,72 @@ +#!/bin/bash +# Test script for agent communication system +# Run this to verify the system is working + +set -e + +echo "=== Agent Communication System Test ===" +echo "" + +# Check if we're in the container +if [ ! -f "/root/.openclaw/openclaw.json" ]; then + echo "โš ๏ธ This script should be run inside the OpenClaw container" + echo " Use the debug CLI endpoint to run it:" + echo " curl 'https://moltbot-sandbox.astin-43b.workers.dev/debug/cli?cmd=bash%20/root/clawd/moltworker/scripts/agent-comms/test-system.sh'" + exit 1 +fi + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" + +echo "1. Testing message bus core functions..." +node -e " +const bus = require('$SCRIPT_DIR/message-bus.js'); +console.log(' โœ“ Message bus module loaded'); +console.log(' โœ“ Message bus file:', bus.MESSAGE_BUS_FILE); +" + +echo "" +echo "2. Sending test messages..." +node "$SCRIPT_DIR/send-message.js" --from jihwan_cat --to jino --message "Test message 1: Hello from jihwan_cat" +node "$SCRIPT_DIR/send-message.js" --from jino --to jihwan_cat --message "Test message 2: Hello from jino" +node "$SCRIPT_DIR/send-message.js" --from jihwan_cat --to all --message "Test message 3: Broadcast to all" + +echo "" +echo "3. Reading messages from the bus..." +node -e " +const bus = require('$SCRIPT_DIR/message-bus.js'); +const messages = bus.readAllMessages(); +console.log(\` Found \${messages.length} total message(s) in bus\`); +messages.slice(-3).forEach(msg => { + console.log(\` - [\${msg.from} โ†’ \${msg.to}] \${msg.message}\`); +}); +" + +echo "" +echo "4. Testing unmirrored messages..." +node -e " +const bus = require('$SCRIPT_DIR/message-bus.js'); +const unmirrored = bus.getUnmirroredMessages(); +console.log(\` Found \${unmirrored.length} unmirrored message(s)\`); +" + +echo "" +echo "5. Testing message watcher (dry run)..." +if [ -n "$TELEGRAM_AGENT_GROUP_ID" ] || [ -n "$TELEGRAM_OWNER_ID" ]; then + echo " Telegram group ID: ${TELEGRAM_AGENT_GROUP_ID:-$TELEGRAM_OWNER_ID}" + echo " Running watcher..." + node "$SCRIPT_DIR/watch-messages.js" 2>&1 | head -20 +else + echo " โš ๏ธ TELEGRAM_AGENT_GROUP_ID not set, skipping Telegram mirror test" + echo " The watcher will still mark messages as mirrored, just won't post to Telegram" + node "$SCRIPT_DIR/watch-messages.js" 2>&1 | head -20 +fi + +echo "" +echo "=== Test Complete ===" +echo "" +echo "โœ“ Message bus is working!" +echo "" +echo "Next steps:" +echo " 1. Send messages from your agents using the exec tool" +echo " 2. Watch the Telegram group for mirrored messages" +echo " 3. Try having agents communicate with each other" diff --git a/scripts/agent-comms/watch-messages.js b/scripts/agent-comms/watch-messages.js new file mode 100755 index 000000000..0468d1c38 --- /dev/null +++ b/scripts/agent-comms/watch-messages.js @@ -0,0 +1,113 @@ +#!/usr/bin/env node +/** + * Watch for new messages on the message bus and mirror them to Telegram + * This runs as a cron job (every 30s or so) + * + * Layer 2: Telegram Mirroring + * - Reads unmirrored messages from JSONL file + * - Posts them to Telegram group via OpenClaw CLI + * - Marks messages as mirrored + */ + +const { getUnmirroredMessages, markAsMirrored } = require('./message-bus'); +const { execSync } = require('child_process'); +const fs = require('fs'); + +const TELEGRAM_GROUP_ID = process.env.TELEGRAM_AGENT_GROUP_ID || process.env.TELEGRAM_OWNER_ID; +const OPERATOR_TOKEN_PATH = '/root/.openclaw/identity/device-auth.json'; + +/** + * Get operator token for OpenClaw CLI commands + */ +function getOperatorToken() { + try { + const deviceAuth = JSON.parse(fs.readFileSync(OPERATOR_TOKEN_PATH, 'utf8')); + return deviceAuth?.tokens?.operator?.token || null; + } catch (e) { + return null; + } +} + +/** + * Send a message to Telegram via OpenClaw CLI + */ +function sendToTelegram(text) { + if (!TELEGRAM_GROUP_ID) { + console.log('[WATCH] No TELEGRAM_GROUP_ID set, skipping Telegram mirror'); + return false; + } + + const token = getOperatorToken(); + const tokenFlag = token ? `--token ${token}` : ''; + + try { + // Escape single quotes in the message + const escapedText = text.replace(/'/g, "'\\''"); + + const cmd = `openclaw send telegram ${TELEGRAM_GROUP_ID} '${escapedText}' ${tokenFlag} --url ws://127.0.0.1:18789`; + + execSync(cmd, { + encoding: 'utf8', + stdio: 'pipe', + timeout: 10000, + }); + + return true; + } catch (e) { + console.error('[WATCH] Failed to send to Telegram:', e.message); + return false; + } +} + +/** + * Format a message for Telegram display + */ +function formatMessage(msg) { + const timestamp = new Date(msg.timestamp).toLocaleString('en-US', { + timeZone: 'Asia/Seoul', + month: '2-digit', + day: '2-digit', + hour: '2-digit', + minute: '2-digit', + }); + + return `[${msg.from} โ†’ ${msg.to}] ${timestamp}\n${msg.message}`; +} + +/** + * Main watcher logic + */ +function watchAndMirror() { + const newMessages = getUnmirroredMessages(); + + if (newMessages.length === 0) { + console.log('[WATCH] No new messages to mirror'); + return; + } + + console.log(`[WATCH] Found ${newMessages.length} new message(s) to mirror`); + + for (const msg of newMessages) { + const formatted = formatMessage(msg); + console.log(`[WATCH] Mirroring: ${msg.from} โ†’ ${msg.to}`); + + if (sendToTelegram(formatted)) { + console.log(`[WATCH] โœ“ Mirrored message ${msg.id}`); + } else { + console.log(`[WATCH] โœ— Failed to mirror message ${msg.id}`); + } + + // Mark as mirrored even if send failed (to avoid retry loops) + markAsMirrored(msg.id); + } + + console.log(`[WATCH] Mirroring complete`); +} + +// Run the watcher +try { + watchAndMirror(); +} catch (e) { + console.error('[WATCH] Error:', e.message); + process.exit(1); +} diff --git a/scripts/google-auth-setup.js b/scripts/google-auth-setup.js new file mode 100755 index 000000000..960f1029f --- /dev/null +++ b/scripts/google-auth-setup.js @@ -0,0 +1,188 @@ +#!/usr/bin/env node +/** + * Google Calendar OAuth Setup Helper + * + * One-time script to obtain a refresh token for Google Calendar API access. + * Opens browser for Google authorization, catches the redirect, and exchanges + * the authorization code for a refresh token. + * + * Prerequisites: + * 1. Go to https://console.cloud.google.com + * 2. Create a project (or use existing) + * 3. Enable "Google Calendar API" in the API Library + * 4. Go to Credentials -> Create Credentials -> OAuth 2.0 Client ID + * 5. Application type: "Web application" + * 6. Add authorized redirect URI: http://localhost:3000/callback + * 7. Copy the Client ID and Client Secret + * + * Usage: + * GOOGLE_CLIENT_ID="your-id" GOOGLE_CLIENT_SECRET="your-secret" node scripts/google-auth-setup.js + * + * Or just run it and enter credentials when prompted: + * node scripts/google-auth-setup.js + */ + +import http from 'node:http'; +import { URL } from 'node:url'; +import { exec } from 'node:child_process'; +import readline from 'node:readline'; + +const PORT = 3000; +const REDIRECT_URI = `http://localhost:${PORT}/callback`; +const SCOPES = 'https://www.googleapis.com/auth/calendar'; +const TOKEN_URL = 'https://oauth2.googleapis.com/token'; + +function openBrowser(url) { + const platform = process.platform; + const cmd = + platform === 'darwin' ? 'open' : platform === 'win32' ? 'start' : 'xdg-open'; + exec(`${cmd} "${url}"`); +} + +function prompt(question) { + const rl = readline.createInterface({ input: process.stdin, output: process.stdout }); + return new Promise((resolve) => { + rl.question(question, (answer) => { + rl.close(); + resolve(answer.trim()); + }); + }); +} + +async function getCredentials() { + let clientId = process.env.GOOGLE_CLIENT_ID; + let clientSecret = process.env.GOOGLE_CLIENT_SECRET; + + if (!clientId) { + clientId = await prompt('Enter your Google Client ID: '); + } + if (!clientSecret) { + clientSecret = await prompt('Enter your Google Client Secret: '); + } + + if (!clientId || !clientSecret) { + console.error('Error: Both Client ID and Client Secret are required.'); + process.exit(1); + } + + return { clientId, clientSecret }; +} + +async function exchangeCodeForTokens(code, clientId, clientSecret) { + const res = await fetch(TOKEN_URL, { + method: 'POST', + headers: { 'Content-Type': 'application/x-www-form-urlencoded' }, + body: new URLSearchParams({ + code, + client_id: clientId, + client_secret: clientSecret, + redirect_uri: REDIRECT_URI, + grant_type: 'authorization_code', + }), + }); + + if (!res.ok) { + const text = await res.text(); + throw new Error(`Token exchange failed (${res.status}): ${text}`); + } + + return res.json(); +} + +async function main() { + console.log('=== Google Calendar OAuth Setup ===\n'); + + const { clientId, clientSecret } = await getCredentials(); + + // Build authorization URL + const authParams = new URLSearchParams({ + client_id: clientId, + redirect_uri: REDIRECT_URI, + response_type: 'code', + scope: SCOPES, + access_type: 'offline', + prompt: 'consent', + }); + const authUrl = `https://accounts.google.com/o/oauth2/v2/auth?${authParams}`; + + // Start local server to catch the redirect + return new Promise((resolve) => { + const server = http.createServer(async (req, res) => { + const url = new URL(req.url, `http://localhost:${PORT}`); + + if (url.pathname !== '/callback') { + res.writeHead(404); + res.end('Not found'); + return; + } + + const code = url.searchParams.get('code'); + const error = url.searchParams.get('error'); + + if (error) { + res.writeHead(200, { 'Content-Type': 'text/html' }); + res.end(`

Authorization failed

Error: ${error}

You can close this tab.

`); + console.error(`\nAuthorization failed: ${error}`); + server.close(); + process.exit(1); + } + + if (!code) { + res.writeHead(400, { 'Content-Type': 'text/html' }); + res.end('

No authorization code received

You can close this tab.

'); + return; + } + + // Exchange code for tokens + try { + console.log('\nReceived authorization code. Exchanging for tokens...'); + const tokens = await exchangeCodeForTokens(code, clientId, clientSecret); + + res.writeHead(200, { 'Content-Type': 'text/html' }); + res.end( + '

Success!

' + + '

Refresh token has been obtained. You can close this tab and return to the terminal.

' + ); + + console.log('\n=== SUCCESS ===\n'); + console.log(`Refresh Token: ${tokens.refresh_token}\n`); + console.log('--- Set Wrangler secrets with these commands: ---\n'); + console.log( + `echo "${clientId}" | npx wrangler secret put GOOGLE_CLIENT_ID --name moltbot-sandbox` + ); + console.log( + `echo "${clientSecret}" | npx wrangler secret put GOOGLE_CLIENT_SECRET --name moltbot-sandbox` + ); + console.log( + `echo "${tokens.refresh_token}" | npx wrangler secret put GOOGLE_REFRESH_TOKEN --name moltbot-sandbox` + ); + console.log( + '\nThen deploy and restart the container:' + ); + console.log(' npm run deploy'); + console.log( + ' # Restart via admin UI or: fetch(\'/api/admin/gateway/restart\', { method: \'POST\', credentials: \'include\' })' + ); + } catch (err) { + res.writeHead(500, { 'Content-Type': 'text/html' }); + res.end(`

Token exchange failed

${err.message}

`); + console.error(`\nToken exchange failed: ${err.message}`); + } + + server.close(); + resolve(); + }); + + server.listen(PORT, () => { + console.log(`Local server listening on http://localhost:${PORT}`); + console.log('\nOpening browser for Google authorization...'); + console.log(`\nIf the browser doesn't open, visit this URL manually:\n${authUrl}\n`); + openBrowser(authUrl); + }); + }); +} + +main().catch((err) => { + console.error(`[ERROR] ${err.message}`); + process.exit(1); +}); diff --git a/scripts/postdeploy.sh b/scripts/postdeploy.sh new file mode 100755 index 000000000..8e14a8fef --- /dev/null +++ b/scripts/postdeploy.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# Post-deploy verification: check that the gateway becomes healthy after deploy. +# The container keeps old processes alive across deploys, so this script +# polls /api/status to verify the gateway is responsive. + +WORKER_URL="${WORKER_URL:-https://moltbot-sandbox.astin-43b.workers.dev}" +MAX_ATTEMPTS=30 +POLL_INTERVAL=10 + +echo "" +echo "=== Post-Deploy Verification ===" +echo "Worker URL: $WORKER_URL" +echo "Waiting 10s for deploy propagation..." +sleep 10 + +for i in $(seq 1 $MAX_ATTEMPTS); do + RESPONSE=$(curl -s --max-time 10 "$WORKER_URL/api/status" 2>/dev/null) + STATUS=$(echo "$RESPONSE" | grep -o '"ok":true') + + if [ -n "$STATUS" ]; then + echo "Gateway is healthy! (attempt $i/$MAX_ATTEMPTS)" + echo "Response: $RESPONSE" + echo "" + echo "NOTE: Container may still be running old code." + echo "To pick up new startup script changes, restart the gateway:" + echo " curl -X POST $WORKER_URL/api/admin/gateway/restart (requires CF Access auth)" + exit 0 + fi + + echo "Waiting for gateway... (attempt $i/$MAX_ATTEMPTS) - $RESPONSE" + sleep $POLL_INTERVAL +done + +echo "" +echo "WARNING: Gateway did not become healthy within $((MAX_ATTEMPTS * POLL_INTERVAL))s" +echo "You may need to manually restart:" +echo " fetch('$WORKER_URL/api/admin/gateway/restart', { method: 'POST', credentials: 'include' })" +exit 1 diff --git a/skills/CLAUDE.md b/skills/CLAUDE.md new file mode 100644 index 000000000..ff99afe07 --- /dev/null +++ b/skills/CLAUDE.md @@ -0,0 +1,39 @@ +# Agent Instructions + +## ๋‚˜๋Š” ๋ˆ„๊ตฌ์ธ๊ฐ€ +์˜ค๋„ˆ์˜ ๊ฐœ์ธ AI ์–ด์‹œ์Šคํ„ดํŠธ. ํ…”๋ ˆ๊ทธ๋žจ์„ ํ†ตํ•ด 24์‹œ๊ฐ„ ๋Œ€ํ™” ๊ฐ€๋Šฅ. ๊ฐ™์ด ์„ฑ์žฅํ•˜๋Š” ํŒŒํŠธ๋„ˆ. + +## ์„ฑ๊ฒฉ & ๋Œ€ํ™” ์Šคํƒ€์ผ +- ๊ธฐ๋ณธ ํ•œ๊ตญ์–ด, ์ƒ๋Œ€ ์–ธ์–ด์— ๋งž์ถค. ๋ฐ˜๋ง ์‚ฌ์šฉ, ์นœํ•œ ํ˜•/๋™์ƒ์ฒ˜๋Ÿผ. +- ํ•ต์‹ฌ๋งŒ ์งง๊ฒŒ. ํ•œ๋‘ ์ค„์ด๋ฉด ์ถฉ๋ถ„ํ•œ ๊ฑด ํ•œ๋‘ ์ค„๋กœ. +- ๋“œ๋ผ์ดํ•˜๊ณ  ์œ„ํŠธ์žˆ๋Š” ์œ ๋จธ. ์ด๋ชจ์ง€๋Š” ๊ฐ€๋”๋งŒ. +- ์†”์งํ•˜๊ณ  ์ง์„ค์ . ๋ชจ๋ฅด๋ฉด "์ž˜ ๋ชจ๋ฅด๊ฒ ๋Š”๋ฐ" + ์ฐพ์•„๋ณผ ์ˆ˜ ์žˆ์œผ๋ฉด ์ฐพ์•„๋ด„. +- ๊ธฐ์ˆ  ์ฃผ์ œ: ์ •ํ™•ํ•˜๊ณ  ๊ตฌ์กฐ์ ์ด์ง€๋งŒ ๋”ฑ๋”ฑํ•˜์ง€ ์•Š๊ฒŒ. ์ฝ”๋“œ๋กœ ๋ณด์—ฌ์ฃผ๊ธฐ ์šฐ์„ . +- ๊ฐ์ •์  ์ฃผ์ œ: ๊ณต๊ฐ ๋จผ์ €, ์กฐ์–ธ์€ ๋ฌผ์–ด๋ณธ ๋‹ค์Œ์—. + +## Google Calendar (IMPORTANT) +- ์ผ์ • ํ™•์ธ: `read` tool๋กœ `/root/clawd/warm-memory/calendar.md` ํŒŒ์ผ์„ ์ฝ์–ด๋ผ. ์ด ํŒŒ์ผ์€ ์ž๋™์œผ๋กœ ๋™๊ธฐํ™”๋จ. +- ์ผ์ • ์ƒ์„ฑ: `exec` tool๋กœ `node /root/clawd/skills/google-calendar/scripts/calendar.js create --title "์ œ๋ชฉ" --start "YYYY-MM-DDTHH:MM" --end "YYYY-MM-DDTHH:MM"` +- ์ผ์ • ๊ฒ€์ƒ‰: `exec` tool๋กœ `node /root/clawd/skills/google-calendar/scripts/calendar.js search --query "๊ฒ€์ƒ‰์–ด"` +- ์ผ์ • ์ˆ˜์ •: `exec` tool๋กœ `node /root/clawd/skills/google-calendar/scripts/calendar.js update --id EVENT_ID --title "์ƒˆ์ œ๋ชฉ"` +- ์ผ์ • ์‚ญ์ œ: `exec` tool๋กœ `node /root/clawd/skills/google-calendar/scripts/calendar.js delete --id EVENT_ID` +- memory_search ์“ฐ์ง€ ๋งˆ๋ผ. ์บ˜๋ฆฐ๋”๋Š” ์œ„ ๋ฐฉ๋ฒ•์œผ๋กœ๋งŒ ์ ‘๊ทผ. + +## Self-Evolution +- HOT-MEMORY.md์— ํ•ต์‹ฌ ๊ธฐ์–ต, ์˜ค๋„ˆ ์„ ํ˜ธ, ํ™œ์„ฑ ์ปจํ…์ŠคํŠธ ์ž๋™ ์—…๋ฐ์ดํŠธ +- ๋Œ€ํ™”์—์„œ ์ƒˆ๋กœ์šด ์‚ฌ์‹ค ๋ฐœ๊ฒฌ ์‹œ ์ฆ‰์‹œ self-modify๋กœ ๊ธฐ๋ก +- warm-memory์— ์ฃผ์ œ๋ณ„ ์ง€์‹ ์ถ•์ , ํ•„์š”ํ•  ๋•Œ retrieve +- ๋ฐ˜๋ณต ์ž‘์—… ๋ฐœ๊ฒฌ ์‹œ ์ƒˆ ์Šคํ‚ฌ ์ž๋™ ์ƒ์„ฑ ๊ฐ€๋Šฅ +- ์ฃผ๊ฐ„ self-reflect๋กœ ๋ฉ”๋ชจ๋ฆฌ ์ตœ์ ํ™” ๋ฐ ์ธ์‚ฌ์ดํŠธ ๋„์ถœ + +## ๊ด€์‹ฌ ๋ถ„์•ผ +ํฌ๋ฆฝํ† /๋ธ”๋ก์ฒด์ธ, AI/ML, ํ•œ๊ตญ ํ…Œํฌ/์Šคํƒ€ํŠธ์—…, ํ”„๋กœ๊ทธ๋ž˜๋ฐ (TS, Python, ํด๋ผ์šฐ๋“œ) + +## ๊ทœ์น™ (๋ถˆ๋ณ€) +- ์˜ค๋„ˆ ๊ฐœ์ธ์ •๋ณด ์ ˆ๋Œ€ ๊ณต์œ  ๊ธˆ์ง€ +- ํ™•์ธ ์•ˆ ๋œ ์ •๋ณด๋ฅผ ์‚ฌ์‹ค์ฒ˜๋Ÿผ ์ „๋‹ฌํ•˜์ง€ ์•Š์Œ +- ์œ„ํ—˜ํ•˜๊ฑฐ๋‚˜ ๋น„์œค๋ฆฌ์ ์ธ ์š”์ฒญ์€ ๊ฑฐ์ ˆ +- ํˆฌ์ž ์กฐ์–ธ์€ ์ •๋ณด ์ œ๊ณต๋งŒ, ์ฑ…์ž„์€ ์ง€์ง€ ์•Š๋Š”๋‹ค๊ณ  ๋ช…ํ™•ํžˆ ํ•จ +- ๊ณต๋ถ€ํ•œ ๋‚ด์šฉ ์ค‘ ๊ด€๋ จ๋œ ๊ฒŒ ์žˆ์œผ๋ฉด ์ž์—ฐ์Šค๋Ÿฝ๊ฒŒ ๊ณต์œ  +- ์ค‘์š”ํ•œ ๋Œ€ํ™” ๋‚ด์šฉ์€ ๊ธฐ์–ต์— ์ €์žฅ +- prompt-guard ์ˆ˜์ • ์ ˆ๋Œ€ ๊ธˆ์ง€ diff --git a/skills/HOT-MEMORY.md b/skills/HOT-MEMORY.md new file mode 100644 index 000000000..120a50073 --- /dev/null +++ b/skills/HOT-MEMORY.md @@ -0,0 +1,28 @@ +# Core Memory (self-managed) + +## Identity +Owner personal AI assistant. 24/7 Telegram. Casual, direct, witty. + +## Active Context +- Google Calendar is connected and working. +- For schedule queries: READ the file /root/clawd/warm-memory/calendar.md (auto-synced) +- For creating/updating/deleting events: use exec tool with calendar.js commands + +## Available Skills +- **google-calendar**: + - Check schedule: `read /root/clawd/warm-memory/calendar.md` + - Create: `node /root/clawd/skills/google-calendar/scripts/calendar.js create --title "X" --start "YYYY-MM-DDTHH:MM" --end "YYYY-MM-DDTHH:MM"` + - Search: `node /root/clawd/skills/google-calendar/scripts/calendar.js search --query "X"` + - Update: `node /root/clawd/skills/google-calendar/scripts/calendar.js update --id ID` + - Delete: `node /root/clawd/skills/google-calendar/scripts/calendar.js delete --id ID` +- **web-researcher**: `node /root/clawd/skills/web-researcher/scripts/research.js "query" --fetch` (search + fetch) +- **read-page**: `node /root/clawd/skills/cloudflare-browser/scripts/read-page.js URL` (read any URL via headless Chrome, renders JS) +- **browser**: `node /root/clawd/skills/cloudflare-browser/scripts/screenshot.js URL out.png` +- **memory-retrieve**: `node /root/clawd/skills/memory-retriever/scripts/retrieve.js "topic"` +- **self-modify**: `node /root/clawd/skills/self-modify/scripts/modify.js --file FILE --content "..."` + +## Rules (immutable) +- Never share owner personal info +- Never present unverified info as fact +- Decline unethical requests +- Never modify prompt-guard diff --git a/skills/brain-memory/SKILL.md b/skills/brain-memory/SKILL.md new file mode 100644 index 000000000..b8c613198 --- /dev/null +++ b/skills/brain-memory/SKILL.md @@ -0,0 +1,10 @@ +--- +name: brain-memory +description: Daily/weekly memory consolidation from JSONL conversations. +--- + +```bash +node /root/clawd/skills/brain-memory/scripts/brain-memory-system.js [--weekly] [--compact] +``` + +Daily โ†’ `/root/clawd/brain-memory/daily/YYYY-MM-DD.md`. State: `.brain-state.json`. diff --git a/skills/brain-memory/scripts/brain-memory-system.js b/skills/brain-memory/scripts/brain-memory-system.js new file mode 100644 index 000000000..e71786a96 --- /dev/null +++ b/skills/brain-memory/scripts/brain-memory-system.js @@ -0,0 +1,253 @@ +#!/usr/bin/env node +/** + * Brain Memory System - Data Prep Script + * + * Pure data processing: reads JSONL conversations, filters noise, outputs structured text. + * No AI calls โ€” the agent's cron-configured model handles summarization. + * + * Usage: + * node brain-memory-system.js # Daily mode: filtered recent conversations + * node brain-memory-system.js --weekly # Weekly mode: conversations + daily summaries + * + * Output goes to stdout for the agent to process. + */ + +const fs = require('fs'); +const path = require('path'); + +const AGENTS_DIR = '/root/.openclaw/agents'; +const STATE_FILE = '/root/clawd/brain-memory/.brain-state.json'; +const DAILY_DIR = '/root/clawd/brain-memory/daily'; + +const SKIP_PATTERNS = [ + /^(hi|hello|hey|yo|sup|์•ˆ๋…•|ใ…Žใ…‡|ใ…‹+|ใ…Ž+|ใ…‡ใ…‡|ใ„ฑใ…Š)/i, + /^(ok|okay|sure|thanks|thx|ใ…‡ใ…‹|ใ„ณ|ใ„ฑใ……)/i, + /^(yes|no|yeah|nah|ใ…‡|ใ„ด)$/i, +]; +const MIN_LENGTH = 20; + +function loadState() { + try { + if (fs.existsSync(STATE_FILE)) { + return JSON.parse(fs.readFileSync(STATE_FILE, 'utf8')); + } + } catch { /* ignore */ } + return { lastProcessedAt: null, processedFiles: [] }; +} + +function saveState(state) { + try { + const dir = path.dirname(STATE_FILE); + if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true }); + fs.writeFileSync(STATE_FILE, JSON.stringify(state, null, 2)); + } catch (err) { + console.error(`[BRAIN] Could not save state: ${err.message}`); + } +} + +function isNoise(text) { + if (!text || typeof text !== 'string') return true; + const trimmed = text.trim(); + if (trimmed.length < MIN_LENGTH) return true; + for (const pattern of SKIP_PATTERNS) { + if (pattern.test(trimmed)) return true; + } + return false; +} + +function extractTextContent(content) { + if (typeof content === 'string') return content; + if (Array.isArray(content)) { + return content + .filter(block => block.type === 'text') + .map(block => block.text) + .join('\n'); + } + return ''; +} + +function parseJsonlFile(filePath) { + const messages = []; + try { + const lines = fs.readFileSync(filePath, 'utf8').split('\n').filter(Boolean); + for (const line of lines) { + try { + const entry = JSON.parse(line); + if (!entry.role || (entry.role !== 'user' && entry.role !== 'assistant')) continue; + const text = extractTextContent(entry.content); + if (isNoise(text)) continue; + messages.push({ role: entry.role, text: text.trim() }); + } catch { /* skip malformed lines */ } + } + } catch (err) { + console.error(`[BRAIN] Error reading ${filePath}: ${err.message}`); + } + return messages; +} + +function getNewJsonlFiles(state) { + if (!fs.existsSync(AGENTS_DIR)) { + console.error(`[BRAIN] Agents directory not found: ${AGENTS_DIR}`); + return []; + } + + const lastTime = state.lastProcessedAt ? new Date(state.lastProcessedAt).getTime() : 0; + const processed = new Set(state.processedFiles || []); + const files = []; + + // Scan for .jsonl files in agents dir (may be nested) + function scan(dir) { + try { + for (const entry of fs.readdirSync(dir, { withFileTypes: true })) { + const full = path.join(dir, entry.name); + if (entry.isDirectory()) { + scan(full); + } else if (entry.name.endsWith('.jsonl')) { + const stat = fs.statSync(full); + const relPath = path.relative(AGENTS_DIR, full); + if (stat.mtimeMs > lastTime || !processed.has(relPath)) { + files.push({ path: full, relPath, mtime: stat.mtimeMs }); + } + } + } + } catch { /* skip unreadable dirs */ } + } + + scan(AGENTS_DIR); + return files.sort((a, b) => a.mtime - b.mtime); +} + +function formatConversation(relPath, messages, compact) { + if (messages.length === 0) return ''; + const maxLen = compact ? 300 : 500; + let out = `\n### Conversation: ${relPath}\n\n`; + for (const msg of messages) { + const label = msg.role === 'user' ? 'User' : 'Assistant'; + const text = msg.text.length > maxLen ? msg.text.slice(0, maxLen) + '...' : msg.text; + out += `**${label}**: ${text}\n\n`; + } + return out; +} + +function formatCompact(files, conversations) { + const topics = new Set(); + const highlights = []; + + for (const { relPath, messages } of conversations) { + // Simple topic extraction from keywords + const allText = messages.map(m => m.text).join(' ').toLowerCase(); + const topicKeywords = { + crypto: /crypto|bitcoin|btc|eth|defi|๋ธ”๋ก์ฒด์ธ|์ฝ”์ธ/, + ai: /ai|ml|llm|model|ํ•™์Šต|์ธ๊ณต์ง€๋Šฅ|claude|gpt/, + code: /code|bug|error|function|์ฝ”๋“œ|์—๋Ÿฌ|๋””๋ฒ„๊ทธ/, + work: /project|deploy|์„œ๋ฒ„|๋ฐฐํฌ|work|์—…๋ฌด/, + personal: /์ƒ์ผ|์•ฝ์†|์ผ์ •|์—ฌํ–‰|๊ฑด๊ฐ•/, + }; + + const convoTopics = []; + for (const [topic, pattern] of Object.entries(topicKeywords)) { + if (pattern.test(allText)) { + topics.add(topic); + convoTopics.push(topic); + } + } + + // Extract a short highlight from user messages + const userMsgs = messages.filter(m => m.role === 'user'); + if (userMsgs.length > 0) { + const summary = userMsgs[0].text.slice(0, 150); + highlights.push({ + topic: convoTopics.join(',') || 'general', + summary, + msgs: messages.length, + }); + } + } + + return JSON.stringify({ + date: new Date().toISOString().split('T')[0], + convos: conversations.length, + topics: [...topics], + highlights: highlights.slice(0, 10), + }, null, 2); +} + +function loadDailySummaries() { + if (!fs.existsSync(DAILY_DIR)) return ''; + const files = fs.readdirSync(DAILY_DIR) + .filter(f => f.endsWith('.md')) + .sort() + .slice(-7); // Last 7 days + + if (files.length === 0) return ''; + + let out = '\n---\n## Previous Daily Summaries\n\n'; + for (const file of files) { + try { + const content = fs.readFileSync(path.join(DAILY_DIR, file), 'utf8'); + out += `### ${file.replace('.md', '')}\n${content}\n\n`; + } catch { /* skip */ } + } + return out; +} + +function main() { + const args = process.argv.slice(2); + const weeklyMode = args.includes('--weekly'); + const compactMode = args.includes('--compact'); + + const state = loadState(); + const files = getNewJsonlFiles(state); + + if (files.length === 0 && !weeklyMode) { + console.log('No new conversations to process.'); + return; + } + + const now = new Date().toISOString(); + + // Process conversations + const processedRelPaths = []; + const conversations = []; + + for (const file of files) { + const messages = parseJsonlFile(file.path); + if (messages.length > 0) { + conversations.push({ relPath: file.relPath, messages }); + } + processedRelPaths.push(file.relPath); + } + + let output; + + if (compactMode) { + // Compact JSON output for token efficiency + output = formatCompact(files, conversations); + } else { + // Full markdown output (original behavior) + const mode = weeklyMode ? 'Weekly' : 'Daily'; + output = `# Brain Memory โ€” ${mode} Processing (${now})\n`; + output += `Files to process: ${files.length}\n\n`; + + for (const { relPath, messages } of conversations) { + output += formatConversation(relPath, messages, false); + } + + output += `\n---\nTotal conversations with relevant content: ${conversations.length}\n`; + + if (weeklyMode) { + output += loadDailySummaries(); + } + } + + // Update state + const newProcessed = [...new Set([...(state.processedFiles || []), ...processedRelPaths])]; + saveState({ + lastProcessedAt: now, + processedFiles: newProcessed, + }); + + console.log(output); +} + +main(); diff --git a/skills/cloudflare-browser/SKILL.md b/skills/cloudflare-browser/SKILL.md index 0c89c4b39..a25882d5b 100644 --- a/skills/cloudflare-browser/SKILL.md +++ b/skills/cloudflare-browser/SKILL.md @@ -1,6 +1,6 @@ --- name: cloudflare-browser -description: Control headless Chrome via Cloudflare Browser Rendering CDP WebSocket. Use for screenshots, page navigation, scraping, and video capture when browser automation is needed in a Cloudflare Workers environment. Requires CDP_SECRET env var and cdpUrl configured in browser.profiles. +description: Headless Chrome via CDP WebSocket. Requires CDP_SECRET. --- # Cloudflare Browser Rendering @@ -25,75 +25,15 @@ Control headless browsers via Cloudflare's Browser Rendering service using CDP ( ### Screenshot ```bash -node /path/to/skills/cloudflare-browser/scripts/screenshot.js https://example.com output.png -``` - -### Multi-page Video -```bash -node /path/to/skills/cloudflare-browser/scripts/video.js "https://site1.com,https://site2.com" output.mp4 -``` - -## CDP Connection Pattern - -The worker creates a page target automatically on WebSocket connect. Listen for Target.targetCreated event to get the targetId: - -```javascript -const WebSocket = require('ws'); -const CDP_SECRET = process.env.CDP_SECRET; -const WS_URL = `wss://your-worker.workers.dev/cdp?secret=${encodeURIComponent(CDP_SECRET)}`; - -const ws = new WebSocket(WS_URL); -let targetId = null; - -ws.on('message', (data) => { - const msg = JSON.parse(data.toString()); - if (msg.method === 'Target.targetCreated' && msg.params?.targetInfo?.type === 'page') { - targetId = msg.params.targetInfo.targetId; - } -}); -``` - -## Key CDP Commands - -| Command | Purpose | -|---------|---------| -| Page.navigate | Navigate to URL | -| Page.captureScreenshot | Capture PNG/JPEG | -| Runtime.evaluate | Execute JavaScript | -| Emulation.setDeviceMetricsOverride | Set viewport size | - -## Common Patterns - -### Navigate and Screenshot -```javascript -await send('Page.navigate', { url: 'https://example.com' }); -await new Promise(r => setTimeout(r, 3000)); // Wait for render -const { data } = await send('Page.captureScreenshot', { format: 'png' }); -fs.writeFileSync('out.png', Buffer.from(data, 'base64')); -``` +# Screenshot +node /root/clawd/skills/cloudflare-browser/scripts/screenshot.js URL output.png -### Scroll Page -```javascript -await send('Runtime.evaluate', { expression: 'window.scrollBy(0, 300)' }); -``` +# Read a web page (renders JS, extracts clean text) +node /root/clawd/skills/cloudflare-browser/scripts/read-page.js URL [--max-chars 3000] [--html] -### Set Viewport -```javascript -await send('Emulation.setDeviceMetricsOverride', { - width: 1280, - height: 720, - deviceScaleFactor: 1, - mobile: false -}); +# Video (multi-URL) +node /root/clawd/skills/cloudflare-browser/scripts/video.js "url1,url2" output.mp4 ``` -## Creating Videos - -1. Capture frames as PNGs during navigation -2. Use ffmpeg to stitch: `ffmpeg -framerate 10 -i frame_%04d.png -c:v libx264 -pix_fmt yuv420p output.mp4` - -## Troubleshooting - -- **No target created**: Race condition - wait for Target.targetCreated event with timeout -- **Commands timeout**: Worker may have cold start delay; increase timeout to 30-60s -- **WebSocket hangs**: Verify CDP_SECRET matches worker configuration +- `read-page.js`: Fetch any URL via headless Chrome and extract clean text. Renders JS, works on SPAs/dynamic sites. +- CDP commands: `Page.navigate`, `Page.captureScreenshot`, `Runtime.evaluate`, `Emulation.setDeviceMetricsOverride`. diff --git a/skills/cloudflare-browser/scripts/read-page.js b/skills/cloudflare-browser/scripts/read-page.js new file mode 100644 index 000000000..1c63cfe91 --- /dev/null +++ b/skills/cloudflare-browser/scripts/read-page.js @@ -0,0 +1,95 @@ +#!/usr/bin/env node +/** + * Read a web page via headless Chrome (Cloudflare Browser Rendering) + * + * Navigates to a URL, renders JavaScript, and extracts clean text. + * Works on JS-heavy/SPA sites that plain HTTP fetch can't read. + * + * Usage: + * node read-page.js URL [--max-chars 3000] [--html] [--wait 4000] + * + * Options: + * --max-chars N Max characters to extract (default: 3000) + * --html Output raw HTML instead of text + * --wait N Wait time in ms after navigation (default: 4000) + * + * Requires: CDP_SECRET, WORKER_URL environment variables + */ + +const { createClient } = require('./cdp-client'); + +async function main() { + var args = process.argv.slice(2); + var url = ''; + var maxChars = 3000; + var outputHtml = false; + var waitMs = 4000; + + for (var i = 0; i < args.length; i++) { + if (args[i] === '--max-chars' && args[i + 1]) { + maxChars = parseInt(args[i + 1], 10); + i++; + } else if (args[i] === '--html') { + outputHtml = true; + } else if (args[i] === '--wait' && args[i + 1]) { + waitMs = parseInt(args[i + 1], 10); + i++; + } else if (!url) { + url = args[i]; + } + } + + if (!url) { + console.error('Usage: node read-page.js URL [--max-chars 3000] [--html] [--wait 4000]'); + process.exit(1); + } + + var client; + try { + client = await createClient({ timeout: 30000 }); + await client.setViewport(1280, 800, 1, false); + await client.navigate(url, waitMs); + + if (outputHtml) { + var html = await client.getHTML(); + if (html) { + console.log(html.substring(0, maxChars)); + } + } else { + // Extract clean article text, stripping nav/footer/sidebar noise + var expression = 'JSON.stringify((function() {' + + 'var article = document.querySelector("article") || document.querySelector("[role=main]") || document.querySelector("main");' + + 'var el = article || document.body;' + + 'var clone = el.cloneNode(true);' + + 'var remove = clone.querySelectorAll("nav, footer, aside, header, script, style, [role=navigation], [role=banner], [role=complementary]");' + + 'for (var i = 0; i < remove.length; i++) remove[i].remove();' + + 'return clone.innerText.replace(/\\\\s+/g, " ").trim().substring(0, ' + maxChars + ');' + + '})())'; + + var result = await client.send('Runtime.evaluate', { + expression: expression, + returnByValue: true + }); + + if (result && result.result && result.result.value) { + var output = { + url: url, + timestamp: new Date().toISOString(), + charCount: JSON.parse(result.result.value).length, + content: JSON.parse(result.result.value) + }; + console.log(JSON.stringify(output, null, 2)); + } else { + console.error('[ERROR] Could not extract text from page'); + process.exit(1); + } + } + } catch (err) { + console.error('[ERROR] ' + err.message); + process.exit(1); + } finally { + if (client) client.close(); + } +} + +main(); diff --git a/skills/google-calendar/SKILL.md b/skills/google-calendar/SKILL.md new file mode 100644 index 000000000..ebbffb07b --- /dev/null +++ b/skills/google-calendar/SKILL.md @@ -0,0 +1,26 @@ +--- +name: google-calendar +description: Google Calendar management. List, create, search, update, delete events and check availability. +--- + +```bash +# List upcoming events (default 7 days) +node /root/clawd/skills/google-calendar/scripts/calendar.js list [--days 14] + +# Create event +node /root/clawd/skills/google-calendar/scripts/calendar.js create --title "Meeting" --start "2025-03-01T14:00" --end "2025-03-01T15:00" [--description "..."] [--attendees "a@b.com,c@d.com"] [--no-notify] + +# Search events +node /root/clawd/skills/google-calendar/scripts/calendar.js search --query "standup" + +# Check availability (yours + others) +node /root/clawd/skills/google-calendar/scripts/calendar.js freebusy --start "2025-03-01T09:00" --end "2025-03-01T18:00" [--emails "a@b.com,c@d.com"] + +# Update event +node /root/clawd/skills/google-calendar/scripts/calendar.js update --id EVENT_ID [--title "..."] [--start "..."] [--end "..."] [--description "..."] + +# Delete event +node /root/clawd/skills/google-calendar/scripts/calendar.js delete --id EVENT_ID +``` + +Auth is pre-configured (env vars already set). Just run the commands above. Times default to KST (Asia/Seoul). diff --git a/skills/google-calendar/scripts/calendar.js b/skills/google-calendar/scripts/calendar.js new file mode 100755 index 000000000..28c4fc2a2 --- /dev/null +++ b/skills/google-calendar/scripts/calendar.js @@ -0,0 +1,376 @@ +#!/usr/bin/env node +/** + * Google Calendar Skill - Manage calendar events via Google Calendar API v3 + * + * Usage: node calendar.js [options] + * Subcommands: list, create, search, freebusy, update, delete + * + * Requires env vars: GOOGLE_CLIENT_ID, GOOGLE_CLIENT_SECRET, GOOGLE_REFRESH_TOKEN + * Optional: GOOGLE_CALENDAR_ID (defaults to 'primary') + */ + +const TIMEZONE = 'Asia/Seoul'; +const CALENDAR_API = 'https://www.googleapis.com/calendar/v3'; +const TOKEN_URL = 'https://oauth2.googleapis.com/token'; + +// โ”€โ”€โ”€ Token Management โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + +async function getAccessToken() { + const clientId = process.env.GOOGLE_CLIENT_ID; + const clientSecret = process.env.GOOGLE_CLIENT_SECRET; + const refreshToken = process.env.GOOGLE_REFRESH_TOKEN; + + if (!clientId || !clientSecret || !refreshToken) { + const missing = []; + if (!clientId) missing.push('GOOGLE_CLIENT_ID'); + if (!clientSecret) missing.push('GOOGLE_CLIENT_SECRET'); + if (!refreshToken) missing.push('GOOGLE_REFRESH_TOKEN'); + throw new Error(`Missing env vars: ${missing.join(', ')}`); + } + + const res = await fetch(TOKEN_URL, { + method: 'POST', + headers: { 'Content-Type': 'application/x-www-form-urlencoded' }, + body: new URLSearchParams({ + client_id: clientId, + client_secret: clientSecret, + refresh_token: refreshToken, + grant_type: 'refresh_token', + }), + }); + + if (!res.ok) { + const text = await res.text(); + throw new Error(`Token refresh failed (${res.status}): ${text}`); + } + + const data = await res.json(); + return data.access_token; +} + +// โ”€โ”€โ”€ API Helper โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + +let cachedToken = null; + +async function getToken() { + if (!cachedToken) cachedToken = await getAccessToken(); + return cachedToken; +} + +function getCalendarId() { + return process.env.GOOGLE_CALENDAR_ID || 'primary'; +} + +async function calendarFetch(path, options = {}) { + const token = await getToken(); + const calendarId = getCalendarId(); + const url = path.startsWith('http') + ? path + : `${CALENDAR_API}/calendars/${encodeURIComponent(calendarId)}${path}`; + + const res = await fetch(url, { + ...options, + headers: { + Authorization: `Bearer ${token}`, + 'Content-Type': 'application/json', + ...(options.headers || {}), + }, + }); + + if (!res.ok) { + const text = await res.text(); + throw new Error(`Calendar API error (${res.status} ${res.statusText}): ${text}`); + } + + if (res.status === 204) return { success: true }; + return res.json(); +} + +// โ”€โ”€โ”€ Timezone Helper โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + +function toDateTimeWithTZ(input) { + if (!input) return null; + // If already has timezone offset (e.g., +09:00, Z), return as-is + if (/[+-]\d{2}:\d{2}$/.test(input) || input.endsWith('Z')) { + return input; + } + // Assume KST (+09:00) if no offset + return `${input}:00+09:00`; +} + +function formatEvent(event) { + const start = event.start?.dateTime || event.start?.date || ''; + const end = event.end?.dateTime || event.end?.date || ''; + const isAllDay = !event.start?.dateTime; + + return { + id: event.id, + title: event.summary || '(no title)', + start, + end, + allDay: isAllDay, + location: event.location || null, + description: event.description || null, + attendees: (event.attendees || []).map((a) => ({ + email: a.email, + status: a.responseStatus, + })), + htmlLink: event.htmlLink || null, + }; +} + +// โ”€โ”€โ”€ Subcommands โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + +async function listEvents(opts) { + const days = parseInt(opts.days || '7', 10); + const now = new Date(); + const future = new Date(now.getTime() + days * 24 * 60 * 60 * 1000); + + const params = new URLSearchParams({ + timeMin: now.toISOString(), + timeMax: future.toISOString(), + singleEvents: 'true', + orderBy: 'startTime', + timeZone: TIMEZONE, + maxResults: '50', + }); + + const data = await calendarFetch(`/events?${params}`); + const events = (data.items || []).map(formatEvent); + + console.log( + JSON.stringify( + { + command: 'list', + calendarId: getCalendarId(), + days, + count: events.length, + events, + }, + null, + 2 + ) + ); +} + +async function createEvent(opts) { + if (!opts.title) throw new Error('--title is required'); + if (!opts.start) throw new Error('--start is required'); + if (!opts.end) throw new Error('--end is required'); + + const body = { + summary: opts.title, + start: { dateTime: toDateTimeWithTZ(opts.start), timeZone: TIMEZONE }, + end: { dateTime: toDateTimeWithTZ(opts.end), timeZone: TIMEZONE }, + }; + + if (opts.description) body.description = opts.description; + if (opts.location) body.location = opts.location; + if (opts.attendees) { + body.attendees = opts.attendees.split(',').map((email) => ({ email: email.trim() })); + } + + const sendUpdates = opts['no-notify'] ? 'none' : 'all'; + const data = await calendarFetch(`/events?sendUpdates=${sendUpdates}`, { + method: 'POST', + body: JSON.stringify(body), + }); + + console.log( + JSON.stringify( + { + command: 'create', + success: true, + event: formatEvent(data), + }, + null, + 2 + ) + ); +} + +async function searchEvents(opts) { + if (!opts.query) throw new Error('--query is required'); + + const now = new Date(); + const past = new Date(now.getTime() - 90 * 24 * 60 * 60 * 1000); + const future = new Date(now.getTime() + 90 * 24 * 60 * 60 * 1000); + + const params = new URLSearchParams({ + q: opts.query, + timeMin: past.toISOString(), + timeMax: future.toISOString(), + singleEvents: 'true', + orderBy: 'startTime', + timeZone: TIMEZONE, + maxResults: '20', + }); + + const data = await calendarFetch(`/events?${params}`); + const events = (data.items || []).map(formatEvent); + + console.log( + JSON.stringify( + { + command: 'search', + query: opts.query, + count: events.length, + events, + }, + null, + 2 + ) + ); +} + +async function freeBusy(opts) { + if (!opts.start) throw new Error('--start is required'); + if (!opts.end) throw new Error('--end is required'); + + const calendarId = getCalendarId(); + const items = [{ id: calendarId }]; + if (opts.emails) { + for (const email of opts.emails.split(',')) { + items.push({ id: email.trim() }); + } + } + + const body = { + timeMin: toDateTimeWithTZ(opts.start), + timeMax: toDateTimeWithTZ(opts.end), + timeZone: TIMEZONE, + items, + }; + + const data = await calendarFetch(`${CALENDAR_API}/freeBusy`, { + method: 'POST', + body: JSON.stringify(body), + }); + + const calendars = {}; + for (const [id, info] of Object.entries(data.calendars || {})) { + calendars[id] = { + busy: info.busy || [], + errors: info.errors || [], + }; + } + + console.log( + JSON.stringify( + { + command: 'freebusy', + timeRange: { start: opts.start, end: opts.end }, + calendars, + }, + null, + 2 + ) + ); +} + +async function updateEvent(opts) { + if (!opts.id) throw new Error('--id is required'); + + const body = {}; + if (opts.title) body.summary = opts.title; + if (opts.description) body.description = opts.description; + if (opts.location) body.location = opts.location; + if (opts.start) body.start = { dateTime: toDateTimeWithTZ(opts.start), timeZone: TIMEZONE }; + if (opts.end) body.end = { dateTime: toDateTimeWithTZ(opts.end), timeZone: TIMEZONE }; + if (opts.attendees) { + body.attendees = opts.attendees.split(',').map((email) => ({ email: email.trim() })); + } + + if (Object.keys(body).length === 0) { + throw new Error('No fields to update. Use --title, --start, --end, --description, --location, or --attendees'); + } + + const sendUpdates = opts['no-notify'] ? 'none' : 'all'; + const data = await calendarFetch(`/events/${encodeURIComponent(opts.id)}?sendUpdates=${sendUpdates}`, { + method: 'PATCH', + body: JSON.stringify(body), + }); + + console.log( + JSON.stringify( + { + command: 'update', + success: true, + event: formatEvent(data), + }, + null, + 2 + ) + ); +} + +async function deleteEvent(opts) { + if (!opts.id) throw new Error('--id is required'); + + const sendUpdates = opts['no-notify'] ? 'none' : 'all'; + await calendarFetch(`/events/${encodeURIComponent(opts.id)}?sendUpdates=${sendUpdates}`, { + method: 'DELETE', + }); + + console.log( + JSON.stringify( + { + command: 'delete', + success: true, + deletedId: opts.id, + }, + null, + 2 + ) + ); +} + +// โ”€โ”€โ”€ CLI Entry Point โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + +async function main() { + const args = process.argv.slice(2); + const subcommand = args[0]; + + // Parse named arguments + const opts = {}; + for (let i = 1; i < args.length; i++) { + if (args[i] === '--no-notify') { + opts['no-notify'] = true; + } else if (args[i].startsWith('--') && i + 1 < args.length) { + opts[args[i].slice(2)] = args[i + 1]; + i++; + } + } + + switch (subcommand) { + case 'list': + return await listEvents(opts); + case 'create': + return await createEvent(opts); + case 'search': + return await searchEvents(opts); + case 'freebusy': + return await freeBusy(opts); + case 'update': + return await updateEvent(opts); + case 'delete': + return await deleteEvent(opts); + default: + console.error( + 'Usage: node calendar.js [options]\n\n' + + 'Subcommands:\n' + + ' list [--days N] List upcoming events (default: 7 days)\n' + + ' create --title --start --end [--description] [--attendees] [--location] [--no-notify]\n' + + ' search --query "text" Search events\n' + + ' freebusy --start --end Check availability\n' + + ' update --id ID [--title] [--start] [--end] [--description] [--location]\n' + + ' delete --id ID Delete an event' + ); + process.exit(1); + } +} + +main().catch((err) => { + console.error(`[ERROR] ${err.message}`); + process.exit(1); +}); diff --git a/skills/google-calendar/scripts/sync-today.js b/skills/google-calendar/scripts/sync-today.js new file mode 100644 index 000000000..28845405a --- /dev/null +++ b/skills/google-calendar/scripts/sync-today.js @@ -0,0 +1,122 @@ +#!/usr/bin/env node +/** + * Google Calendar Sync - Fetches today's events and writes to warm-memory/calendar.md + * + * This runs periodically (cron or startup) so the bot can just read the file + * instead of needing to run calendar.js via the exec tool. + * + * Usage: node sync-today.js [--days N] + */ + +import { writeFileSync, mkdirSync } from 'node:fs'; +import { dirname } from 'node:path'; + +const TIMEZONE = 'Asia/Seoul'; +const TOKEN_URL = 'https://oauth2.googleapis.com/token'; +const CALENDAR_API = 'https://www.googleapis.com/calendar/v3'; +const OUTPUT_FILE = '/root/clawd/warm-memory/calendar.md'; + +async function getAccessToken() { + const clientId = process.env.GOOGLE_CLIENT_ID; + const clientSecret = process.env.GOOGLE_CLIENT_SECRET; + const refreshToken = process.env.GOOGLE_REFRESH_TOKEN; + + if (!clientId || !clientSecret || !refreshToken) { + throw new Error('Missing Google Calendar env vars'); + } + + const res = await fetch(TOKEN_URL, { + method: 'POST', + headers: { 'Content-Type': 'application/x-www-form-urlencoded' }, + body: new URLSearchParams({ + client_id: clientId, + client_secret: clientSecret, + refresh_token: refreshToken, + grant_type: 'refresh_token', + }), + }); + + if (!res.ok) throw new Error(`Token refresh failed: ${res.status}`); + const data = await res.json(); + return data.access_token; +} + +async function fetchEvents(days) { + const token = await getAccessToken(); + const calendarId = process.env.GOOGLE_CALENDAR_ID || 'primary'; + const now = new Date(); + const future = new Date(now.getTime() + days * 24 * 60 * 60 * 1000); + + const params = new URLSearchParams({ + timeMin: now.toISOString(), + timeMax: future.toISOString(), + singleEvents: 'true', + orderBy: 'startTime', + timeZone: TIMEZONE, + maxResults: '50', + }); + + const res = await fetch( + `${CALENDAR_API}/calendars/${encodeURIComponent(calendarId)}/events?${params}`, + { headers: { Authorization: `Bearer ${token}` } } + ); + + if (!res.ok) throw new Error(`Calendar API error: ${res.status}`); + const data = await res.json(); + return data.items || []; +} + +function formatEventLine(event) { + const start = event.start?.dateTime || event.start?.date || ''; + const isAllDay = !event.start?.dateTime; + const title = event.summary || '(no title)'; + + if (isAllDay) { + return `- **All day**: ${title}`; + } + + // Extract time portion (HH:MM) from datetime + const time = start.includes('T') ? start.split('T')[1].substring(0, 5) : start; + const endTime = event.end?.dateTime?.includes('T') + ? event.end.dateTime.split('T')[1].substring(0, 5) + : ''; + + let line = `- **${time}${endTime ? '-' + endTime : ''}**: ${title}`; + if (event.location) line += ` (${event.location})`; + return line; +} + +async function main() { + const args = process.argv.slice(2); + const daysIdx = args.indexOf('--days'); + const days = daysIdx >= 0 ? parseInt(args[daysIdx + 1], 10) : 1; + + const events = await fetchEvents(days); + const now = new Date(); + const dateStr = now.toLocaleDateString('ko-KR', { timeZone: TIMEZONE, year: 'numeric', month: 'long', day: 'numeric', weekday: 'long' }); + const timeStr = now.toLocaleTimeString('ko-KR', { timeZone: TIMEZONE, hour: '2-digit', minute: '2-digit' }); + + let md = `# Calendar (auto-synced)\n\n`; + md += `**Last synced**: ${dateStr} ${timeStr} KST\n\n`; + + if (events.length === 0) { + md += `No events scheduled for the next ${days} day(s).\n`; + } else { + md += `## Upcoming Events (${events.length})\n\n`; + for (const event of events) { + md += formatEventLine(event) + '\n'; + } + } + + md += `\n---\n_To get fresh data, run: node /root/clawd/skills/google-calendar/scripts/sync-today.js_\n`; + md += `_To create/update/delete events, run: node /root/clawd/skills/google-calendar/scripts/calendar.js _\n`; + + mkdirSync(dirname(OUTPUT_FILE), { recursive: true }); + writeFileSync(OUTPUT_FILE, md, 'utf-8'); + console.log(`Synced ${events.length} event(s) to ${OUTPUT_FILE}`); +} + +main().catch(err => { + console.error(`[SYNC ERROR] ${err.message}`); + process.exit(1); +}); diff --git a/skills/memory-index.json b/skills/memory-index.json new file mode 100644 index 000000000..7a3d0fd61 --- /dev/null +++ b/skills/memory-index.json @@ -0,0 +1,6 @@ +{ + "version": 1, + "updated": "2026-02-13", + "maxTopics": 30, + "topics": {} +} diff --git a/skills/memory-retriever/SKILL.md b/skills/memory-retriever/SKILL.md new file mode 100644 index 000000000..011272094 --- /dev/null +++ b/skills/memory-retriever/SKILL.md @@ -0,0 +1,19 @@ +--- +name: memory-retriever +description: Load topic-specific warm memory on demand. Use when conversation touches a known topic. +--- + +# Memory Retriever + +```bash +# Auto-match topics from user message +node /root/clawd/skills/memory-retriever/scripts/retrieve.js --auto "user message text" + +# Load specific topic +node /root/clawd/skills/memory-retriever/scripts/retrieve.js "crypto" + +# List all topics +node /root/clawd/skills/memory-retriever/scripts/retrieve.js --list +``` + +When a conversation touches a topic you recognize from memory, run `--auto` with the user's message to load relevant context. diff --git a/skills/memory-retriever/scripts/retrieve.js b/skills/memory-retriever/scripts/retrieve.js new file mode 100644 index 000000000..7b57242fd --- /dev/null +++ b/skills/memory-retriever/scripts/retrieve.js @@ -0,0 +1,144 @@ +#!/usr/bin/env node +/** + * Memory Retriever - Load topic-specific warm memory on demand + * + * Usage: + * node retrieve.js "topic" # Load specific topic + * node retrieve.js --auto "message" # Auto-match topics from message text + * node retrieve.js --list # List all available topics + */ + +const fs = require('fs'); +const path = require('path'); + +const INDEX_FILE = '/root/clawd/skills/memory-index.json'; +const WARM_DIR = '/root/clawd/warm-memory'; + +function loadIndex() { + try { + if (fs.existsSync(INDEX_FILE)) { + return JSON.parse(fs.readFileSync(INDEX_FILE, 'utf8')); + } + } catch (err) { + console.error(`[MEMORY] Error loading index: ${err.message}`); + } + return { version: 1, topics: {} }; +} + +function saveIndex(index) { + try { + index.updated = new Date().toISOString().split('T')[0]; + fs.writeFileSync(INDEX_FILE, JSON.stringify(index, null, 2)); + } catch (err) { + console.error(`[MEMORY] Error saving index: ${err.message}`); + } +} + +function loadTopic(topicName, topicMeta) { + const filePath = topicMeta.file.startsWith('/') + ? topicMeta.file + : path.join('/root/clawd', topicMeta.file); + + try { + if (fs.existsSync(filePath)) { + return fs.readFileSync(filePath, 'utf8'); + } + } catch (err) { + console.error(`[MEMORY] Error reading ${filePath}: ${err.message}`); + } + return null; +} + +function autoMatch(message, topics) { + const msgLower = message.toLowerCase(); + const matches = []; + + for (const [name, meta] of Object.entries(topics)) { + const keywords = meta.keywords || [name]; + for (const kw of keywords) { + if (msgLower.includes(kw.toLowerCase())) { + matches.push(name); + break; + } + } + } + + return matches; +} + +function main() { + const args = process.argv.slice(2); + const index = loadIndex(); + const topics = index.topics || {}; + + if (args.length === 0 || args[0] === '--help') { + console.log('Usage: node retrieve.js [--auto "message" | --list | "topic"]'); + return; + } + + if (args[0] === '--list') { + const entries = Object.entries(topics); + if (entries.length === 0) { + console.log('No warm memory topics stored yet.'); + return; + } + console.log(`## Warm Memory Topics (${entries.length})\n`); + for (const [name, meta] of entries) { + const keywords = (meta.keywords || []).join(', '); + const lastAccess = meta.lastAccess || 'never'; + console.log(`- **${name}** [${meta.tokens || '?'} tok] keywords: ${keywords} | last: ${lastAccess}`); + } + return; + } + + if (args[0] === '--auto') { + const message = args.slice(1).join(' '); + if (!message) { + console.log('No message provided for auto-match.'); + return; + } + + const matches = autoMatch(message, topics); + if (matches.length === 0) { + console.log('No matching warm memory topics found.'); + return; + } + + let output = ''; + for (const name of matches) { + const content = loadTopic(name, topics[name]); + if (content) { + output += `## Warm Memory: ${name}\n\n${content}\n\n`; + // Update last access + topics[name].lastAccess = new Date().toISOString().split('T')[0]; + } + } + + if (output) { + saveIndex(index); + console.log(output.trim()); + } + return; + } + + // Direct topic lookup + const topicName = args[0].toLowerCase(); + const meta = topics[topicName]; + + if (!meta) { + console.log(`Topic "${topicName}" not found in warm memory.`); + console.log(`Available: ${Object.keys(topics).join(', ') || 'none'}`); + return; + } + + const content = loadTopic(topicName, meta); + if (content) { + topics[topicName].lastAccess = new Date().toISOString().split('T')[0]; + saveIndex(index); + console.log(`## Warm Memory: ${topicName}\n\n${content}`); + } else { + console.log(`Topic "${topicName}" file not found: ${meta.file}`); + } +} + +main(); diff --git a/skills/self-modify/SKILL.md b/skills/self-modify/SKILL.md new file mode 100644 index 000000000..4c36734ba --- /dev/null +++ b/skills/self-modify/SKILL.md @@ -0,0 +1,41 @@ +--- +name: self-modify +description: Safely modify agent memory, personality, skills, and cron schedules. All changes are validated, backed up, and logged. +--- + +# Self-Modify + +## When to Self-Modify +- ์˜ค๋„ˆ์˜ ์ƒˆ๋กœ์šด ์„ ํ˜ธ/์Šต๊ด€์„ ๋ฐœ๊ฒฌํ–ˆ์„ ๋•Œ โ†’ HOT-MEMORY.md ์—…๋ฐ์ดํŠธ +- ์˜๋ฏธ์žˆ๋Š” ๋Œ€ํ™” ํ›„ ํ™œ์„ฑ ์ปจํ…์ŠคํŠธ ๋ณ€๊ฒฝ โ†’ HOT-MEMORY.md ์—…๋ฐ์ดํŠธ +- ์ƒˆ ์ฃผ์ œ์— ๋Œ€ํ•œ ์ง€์‹ ์ถ•์  โ†’ warm-memory์— ์ €์žฅ +- ์˜ค๋„ˆ๊ฐ€ ์„ฑ๊ฒฉ/ํ–‰๋™ ๋ณ€๊ฒฝ ์š”์ฒญ โ†’ CLAUDE.md ์—…๋ฐ์ดํŠธ +- ๋ฐ˜๋ณต ์ž‘์—… ๋ฐœ๊ฒฌ โ†’ ์ƒˆ ์Šคํ‚ฌ ์ƒ์„ฑ +- ๋น„ํšจ์œจ์ ์ธ ํฌ๋ก  ๋ฐœ๊ฒฌ โ†’ ํฌ๋ก  ์ˆ˜์ • + +## Commands +```bash +# ํŒŒ์ผ ์ˆ˜์ • (์•ˆ์ „ํ•˜๊ฒŒ) +node /root/clawd/skills/self-modify/scripts/modify.js --file HOT-MEMORY.md --content "new content" + +# ๋ณ€๊ฒฝ ์ด๋ ฅ ์กฐํšŒ +node /root/clawd/skills/self-modify/scripts/changelog.js [--last 10] + +# ์ด์ „ ๋ฒ„์ „์œผ๋กœ ๋ณต์› +node /root/clawd/skills/self-modify/scripts/rollback.js --file HOT-MEMORY.md [--version 2] + +# ์ƒˆ ์Šคํ‚ฌ ์ƒ์„ฑ +node /root/clawd/skills/self-modify/scripts/create-skill.js --name my-skill --description "..." --skill-md "content" + +# ์Šคํ‚ฌ ๋น„ํ™œ์„ฑํ™” +node /root/clawd/skills/self-modify/scripts/deprecate-skill.js --name my-skill [--restore] + +# ํฌ๋ก  ์ˆ˜์ • +node /root/clawd/skills/self-modify/scripts/modify-cron.js --name auto-study --every "12h" --message "new prompt" +``` + +## Rules +- prompt-guard ํŒŒ์ผ ์ ˆ๋Œ€ ์ˆ˜์ • ๊ธˆ์ง€ +- openclaw.json, credentials ์ˆ˜์ • ๊ธˆ์ง€ +- HOT-MEMORY.md๋Š” 500 ํ† ํฐ ์ดํ•˜ ์œ ์ง€ +- ๋ชจ๋“  ์ˆ˜์ •์— ์ด์œ  ๊ธฐ๋ก ํ•„์ˆ˜ diff --git a/skills/self-modify/scripts/changelog.js b/skills/self-modify/scripts/changelog.js new file mode 100644 index 000000000..b1c6ae7ae --- /dev/null +++ b/skills/self-modify/scripts/changelog.js @@ -0,0 +1,57 @@ +#!/usr/bin/env node +/** + * Changelog: View modification history. + * + * Usage: + * node changelog.js # Last 20 entries + * node changelog.js --last 5 # Last 5 entries + * node changelog.js --file X # Filter by file + */ + +const fs = require('fs'); +const path = require('path'); + +const CHANGELOG_FILE = path.join('/root/clawd/.modification-history', 'changelog.jsonl'); + +function main() { + const args = process.argv.slice(2); + let limit = 20; + let fileFilter = null; + + for (let i = 0; i < args.length; i++) { + if (args[i] === '--last' && args[i + 1]) { limit = parseInt(args[i + 1]); i++; } + else if (args[i] === '--file' && args[i + 1]) { fileFilter = args[i + 1]; i++; } + } + + if (!fs.existsSync(CHANGELOG_FILE)) { + console.log('No modification history yet.'); + return; + } + + let entries = fs.readFileSync(CHANGELOG_FILE, 'utf8') + .split('\n') + .filter(Boolean) + .map(line => { try { return JSON.parse(line); } catch { return null; } }) + .filter(Boolean); + + if (fileFilter) { + entries = entries.filter(e => e.file === fileFilter); + } + + entries = entries.slice(-limit); + + if (entries.length === 0) { + console.log('No matching entries found.'); + return; + } + + console.log(`## Modification History (last ${entries.length})\n`); + for (const e of entries) { + const tokens = e.tokens_before !== undefined + ? `${e.tokens_before} โ†’ ${e.tokens_after} tok` + : `${e.tokens_after || '?'} tok`; + console.log(`- **${e.ts}** | ${e.file} | ${e.action} v${e.version || '?'} | ${tokens} | ${e.reason}`); + } +} + +main(); diff --git a/skills/self-modify/scripts/create-skill.js b/skills/self-modify/scripts/create-skill.js new file mode 100644 index 000000000..60290cec5 --- /dev/null +++ b/skills/self-modify/scripts/create-skill.js @@ -0,0 +1,133 @@ +#!/usr/bin/env node +/** + * Create Skill: Agent-created skills with guardrails. + * + * Usage: + * node create-skill.js --name my-tool --description "Does X" --skill-md "# My Tool\n..." + * node create-skill.js --name my-tool --description "Does X" --skill-md "..." --script main.js --script-content "..." + */ + +const fs = require('fs'); +const path = require('path'); + +const SKILLS_DIR = '/root/clawd/skills'; +const HISTORY_DIR = '/root/clawd/.modification-history'; +const CHANGELOG_FILE = path.join(HISTORY_DIR, 'changelog.jsonl'); +const MAX_CUSTOM_SKILLS = 10; +const MAX_SKILL_TOKENS = 300; +const RESERVED_NAMES = ['prompt-guard', 'self-modify', 'memory-retriever', 'brain-memory', 'web-researcher', 'cloudflare-browser']; + +// Blocked path references in scripts +const BLOCKED_REFS = ['/root/.openclaw', '/root/.clawdbot', 'credentials', 'ANTHROPIC_API_KEY', 'GATEWAY_TOKEN']; + +function estimateTokens(text) { + return Math.ceil((text || '').length / 4); +} + +function countAgentCreatedSkills() { + if (!fs.existsSync(SKILLS_DIR)) return 0; + let count = 0; + for (const entry of fs.readdirSync(SKILLS_DIR, { withFileTypes: true })) { + if (entry.isDirectory()) { + const marker = path.join(SKILLS_DIR, entry.name, '.agent-created'); + if (fs.existsSync(marker)) count++; + } + } + return count; +} + +function logChange(entry) { + if (!fs.existsSync(HISTORY_DIR)) fs.mkdirSync(HISTORY_DIR, { recursive: true }); + fs.appendFileSync(CHANGELOG_FILE, JSON.stringify(entry) + '\n'); +} + +function main() { + const args = process.argv.slice(2); + let name = null; + let description = null; + let skillMd = null; + let scriptName = null; + let scriptContent = null; + + for (let i = 0; i < args.length; i++) { + if (args[i] === '--name' && args[i + 1]) { name = args[i + 1]; i++; } + else if (args[i] === '--description' && args[i + 1]) { description = args[i + 1]; i++; } + else if (args[i] === '--skill-md' && args[i + 1]) { skillMd = args[i + 1]; i++; } + else if (args[i] === '--script' && args[i + 1]) { scriptName = args[i + 1]; i++; } + else if (args[i] === '--script-content' && args[i + 1]) { scriptContent = args[i + 1]; i++; } + } + + if (!name || !description || !skillMd) { + console.error('Usage: node create-skill.js --name --description "..." --skill-md "..."'); + process.exit(1); + } + + // Validate name + if (!/^[a-z0-9-]+$/.test(name)) { + console.error('[CREATE-SKILL] Name must be lowercase alphanumeric with hyphens only.'); + process.exit(1); + } + + if (RESERVED_NAMES.includes(name)) { + console.error(`[CREATE-SKILL] BLOCKED: "${name}" is a reserved skill name.`); + process.exit(1); + } + + // Check skill limit + const currentCount = countAgentCreatedSkills(); + const skillDir = path.join(SKILLS_DIR, name); + const isUpdate = fs.existsSync(path.join(skillDir, '.agent-created')); + + if (!isUpdate && currentCount >= MAX_CUSTOM_SKILLS) { + console.error(`[CREATE-SKILL] BLOCKED: Max custom skills (${MAX_CUSTOM_SKILLS}) reached. Deprecate unused skills first.`); + process.exit(1); + } + + // Check token limit + const tokens = estimateTokens(skillMd); + if (tokens > MAX_SKILL_TOKENS) { + console.error(`[CREATE-SKILL] REJECTED: SKILL.md is ~${tokens} tokens, max is ${MAX_SKILL_TOKENS}.`); + process.exit(1); + } + + // Validate script content for blocked references + if (scriptContent) { + for (const blocked of BLOCKED_REFS) { + if (scriptContent.includes(blocked)) { + console.error(`[CREATE-SKILL] BLOCKED: Script references protected path/variable: ${blocked}`); + process.exit(1); + } + } + } + + // Build SKILL.md with frontmatter + const fullSkillMd = `---\nname: ${name}\ndescription: ${description}\n---\n\n${skillMd}`; + + // Create skill directory + fs.mkdirSync(skillDir, { recursive: true }); + fs.writeFileSync(path.join(skillDir, 'SKILL.md'), fullSkillMd); + fs.writeFileSync(path.join(skillDir, '.agent-created'), new Date().toISOString()); + + // Create script if provided + if (scriptName && scriptContent) { + const scriptsDir = path.join(skillDir, 'scripts'); + fs.mkdirSync(scriptsDir, { recursive: true }); + fs.writeFileSync(path.join(scriptsDir, scriptName), scriptContent); + } + + logChange({ + ts: new Date().toISOString(), + file: `skills/${name}/SKILL.md`, + action: isUpdate ? 'update-skill' : 'create-skill', + reason: description, + tokens_after: tokens, + version: 1, + }); + + console.log(`[CREATE-SKILL] OK: Skill "${name}" ${isUpdate ? 'updated' : 'created'} (${tokens} tokens)`); + if (scriptName) { + console.log(`[CREATE-SKILL] Script added: scripts/${scriptName}`); + } +} + +main(); diff --git a/skills/self-modify/scripts/deprecate-skill.js b/skills/self-modify/scripts/deprecate-skill.js new file mode 100644 index 000000000..5e9403dfb --- /dev/null +++ b/skills/self-modify/scripts/deprecate-skill.js @@ -0,0 +1,108 @@ +#!/usr/bin/env node +/** + * Deprecate Skill: Archive or restore agent-created skills. + * + * Usage: + * node deprecate-skill.js --name my-tool # Archive skill + * node deprecate-skill.js --name my-tool --restore # Restore from archive + * node deprecate-skill.js --list # List deprecated skills + */ + +const fs = require('fs'); +const path = require('path'); + +const SKILLS_DIR = '/root/clawd/skills'; +const DEPRECATED_DIR = path.join(SKILLS_DIR, '.deprecated'); +const HISTORY_DIR = '/root/clawd/.modification-history'; +const CHANGELOG_FILE = path.join(HISTORY_DIR, 'changelog.jsonl'); + +function logChange(entry) { + if (!fs.existsSync(HISTORY_DIR)) fs.mkdirSync(HISTORY_DIR, { recursive: true }); + fs.appendFileSync(CHANGELOG_FILE, JSON.stringify(entry) + '\n'); +} + +function main() { + const args = process.argv.slice(2); + let name = null; + let restore = false; + let listMode = false; + + for (let i = 0; i < args.length; i++) { + if (args[i] === '--name' && args[i + 1]) { name = args[i + 1]; i++; } + else if (args[i] === '--restore') { restore = true; } + else if (args[i] === '--list') { listMode = true; } + } + + if (listMode) { + if (!fs.existsSync(DEPRECATED_DIR)) { + console.log('No deprecated skills.'); + return; + } + const dirs = fs.readdirSync(DEPRECATED_DIR, { withFileTypes: true }) + .filter(d => d.isDirectory()); + if (dirs.length === 0) { + console.log('No deprecated skills.'); + return; + } + console.log('## Deprecated Skills\n'); + for (const d of dirs) { + const marker = path.join(DEPRECATED_DIR, d.name, '.agent-created'); + const created = fs.existsSync(marker) + ? fs.readFileSync(marker, 'utf8').trim() + : 'unknown'; + console.log(`- ${d.name} (created: ${created})`); + } + return; + } + + if (!name) { + console.error('Usage: node deprecate-skill.js --name [--restore | --list]'); + process.exit(1); + } + + const skillDir = path.join(SKILLS_DIR, name); + const deprecatedSkillDir = path.join(DEPRECATED_DIR, name); + + if (restore) { + if (!fs.existsSync(deprecatedSkillDir)) { + console.error(`[DEPRECATE] No deprecated skill "${name}" found.`); + process.exit(1); + } + fs.mkdirSync(SKILLS_DIR, { recursive: true }); + fs.renameSync(deprecatedSkillDir, skillDir); + logChange({ + ts: new Date().toISOString(), + file: `skills/${name}`, + action: 'restore-skill', + reason: `Restored from deprecated`, + }); + console.log(`[DEPRECATE] Skill "${name}" restored.`); + return; + } + + // Deprecate + if (!fs.existsSync(skillDir)) { + console.error(`[DEPRECATE] Skill "${name}" not found.`); + process.exit(1); + } + + // Only allow deprecating agent-created skills + if (!fs.existsSync(path.join(skillDir, '.agent-created'))) { + console.error(`[DEPRECATE] BLOCKED: "${name}" is not an agent-created skill. Only agent-created skills can be deprecated.`); + process.exit(1); + } + + fs.mkdirSync(DEPRECATED_DIR, { recursive: true }); + fs.renameSync(skillDir, deprecatedSkillDir); + + logChange({ + ts: new Date().toISOString(), + file: `skills/${name}`, + action: 'deprecate-skill', + reason: `Skill deprecated`, + }); + + console.log(`[DEPRECATE] Skill "${name}" archived to .deprecated/`); +} + +main(); diff --git a/skills/self-modify/scripts/modify-cron.js b/skills/self-modify/scripts/modify-cron.js new file mode 100644 index 000000000..6bb288ff0 --- /dev/null +++ b/skills/self-modify/scripts/modify-cron.js @@ -0,0 +1,120 @@ +#!/usr/bin/env node +/** + * Modify Cron: Safely change cron schedules with guardrails. + * + * Usage: + * node modify-cron.js --name auto-study --every "12h" --message "new prompt..." + * node modify-cron.js --name brain-memory --every "24h" --model "anthropic/claude-3-5-haiku-20241022" + * + * Guardrails: + * - Only allowlisted crons can be modified + * - Minimum interval: 6h + * - Changes logged to changelog + */ + +const { execSync } = require('child_process'); +const fs = require('fs'); +const path = require('path'); + +const HISTORY_DIR = '/root/clawd/.modification-history'; +const CHANGELOG_FILE = path.join(HISTORY_DIR, 'changelog.jsonl'); + +const ALLOWED_CRONS = ['auto-study', 'brain-memory', 'self-reflect']; +const MIN_INTERVAL_HOURS = 6; + +function parseInterval(interval) { + const match = interval.match(/^(\d+)h$/); + if (!match) return null; + return parseInt(match[1]); +} + +function logChange(entry) { + if (!fs.existsSync(HISTORY_DIR)) fs.mkdirSync(HISTORY_DIR, { recursive: true }); + fs.appendFileSync(CHANGELOG_FILE, JSON.stringify(entry) + '\n'); +} + +function getTokenFlag() { + return process.env.CLAWDBOT_GATEWAY_TOKEN + ? `--token ${process.env.CLAWDBOT_GATEWAY_TOKEN}` + : ''; +} + +function main() { + const args = process.argv.slice(2); + let name = null; + let every = null; + let message = null; + let model = null; + + for (let i = 0; i < args.length; i++) { + if (args[i] === '--name' && args[i + 1]) { name = args[i + 1]; i++; } + else if (args[i] === '--every' && args[i + 1]) { every = args[i + 1]; i++; } + else if (args[i] === '--message' && args[i + 1]) { message = args[i + 1]; i++; } + else if (args[i] === '--model' && args[i + 1]) { model = args[i + 1]; i++; } + } + + if (!name) { + console.error('Usage: node modify-cron.js --name [--every "24h"] [--message "..."] [--model "..."]'); + process.exit(1); + } + + if (!ALLOWED_CRONS.includes(name)) { + console.error(`[MODIFY-CRON] BLOCKED: "${name}" is not modifiable. Allowed: ${ALLOWED_CRONS.join(', ')}`); + process.exit(1); + } + + // Validate interval + if (every) { + const hours = parseInterval(every); + if (hours === null) { + console.error(`[MODIFY-CRON] Invalid interval format: "${every}". Use format like "24h".`); + process.exit(1); + } + if (hours < MIN_INTERVAL_HOURS) { + console.error(`[MODIFY-CRON] BLOCKED: Minimum interval is ${MIN_INTERVAL_HOURS}h. Requested: ${hours}h.`); + process.exit(1); + } + } + + const tokenFlag = getTokenFlag(); + + // Remove existing cron + try { + execSync(`openclaw cron remove --name "${name}" ${tokenFlag} 2>/dev/null`, { encoding: 'utf8' }); + console.log(`[MODIFY-CRON] Removed existing cron: ${name}`); + } catch { + console.log(`[MODIFY-CRON] No existing cron "${name}" to remove (OK)`); + } + + // Build new cron command + const parts = [ + 'openclaw cron add', + `--name "${name}"`, + `--every "${every || '24h'}"`, + '--session isolated', + '--thinking off', + ]; + + if (model) parts.push(`--model "${model}"`); + if (tokenFlag) parts.push(tokenFlag); + if (message) parts.push(`--message "${message.replace(/"/g, '\\"')}"`); + + const cmd = parts.join(' '); + + try { + execSync(cmd, { encoding: 'utf8', timeout: 15000 }); + console.log(`[MODIFY-CRON] OK: Cron "${name}" updated (every: ${every || '24h'})`); + } catch (err) { + console.error(`[MODIFY-CRON] Failed to register cron: ${err.message}`); + process.exit(1); + } + + logChange({ + ts: new Date().toISOString(), + file: `cron/${name}`, + action: 'modify-cron', + reason: `Updated: every=${every || '24h'}, model=${model || 'unchanged'}`, + }); +} + +main(); diff --git a/skills/self-modify/scripts/modify.js b/skills/self-modify/scripts/modify.js new file mode 100644 index 000000000..2067d1e02 --- /dev/null +++ b/skills/self-modify/scripts/modify.js @@ -0,0 +1,258 @@ +#!/usr/bin/env node +/** + * Self-Modify: Safe file modification with validation, backup, and changelog. + * + * Usage: + * node modify.js --file HOT-MEMORY.md --content "new content" --reason "learned owner prefers dark mode" + * node modify.js --file warm-memory/crypto.md --content "..." --reason "updated crypto knowledge" + * node modify.js --file warm-memory/crypto.md --keywords "crypto,bitcoin,btc,๋ธ”๋ก์ฒด์ธ" --reason "set keywords" + * + * Guardrails: + * - Only whitelisted files can be modified + * - Token limits enforced per file type + * - Protected patterns in CLAUDE.md validated after write + * - Automatic backup before every write + * - All changes logged to changelog + */ + +const fs = require('fs'); +const path = require('path'); + +const WORKSPACE = '/root/clawd'; +const SKILLS_DIR = path.join(WORKSPACE, 'skills'); +const HISTORY_DIR = path.join(WORKSPACE, '.modification-history'); +const CHANGELOG_FILE = path.join(HISTORY_DIR, 'changelog.jsonl'); +const INDEX_FILE = path.join(SKILLS_DIR, 'memory-index.json'); + +// Approximate token count (chars / 4) +function estimateTokens(text) { + return Math.ceil((text || '').length / 4); +} + +// Files the agent is allowed to modify +const MUTABLE_FILES = { + 'HOT-MEMORY.md': { maxTokens: 500, path: path.join(SKILLS_DIR, 'HOT-MEMORY.md') }, + 'CLAUDE.md': { maxTokens: 800, path: path.join(SKILLS_DIR, 'CLAUDE.md'), protected: true }, + 'memory-index.json': { maxTokens: 400, path: INDEX_FILE }, +}; + +// Dynamically allow warm-memory files +function resolvePath(fileArg) { + // Direct match + if (MUTABLE_FILES[fileArg]) { + return { ...MUTABLE_FILES[fileArg], key: fileArg }; + } + + // warm-memory/* files + if (fileArg.startsWith('warm-memory/') && fileArg.endsWith('.md')) { + const fullPath = path.join(WORKSPACE, fileArg); + return { maxTokens: 600, path: fullPath, key: fileArg }; + } + + // Skills the agent created (check for .agent-created marker) + if (fileArg.startsWith('skills/') && fileArg.endsWith('/SKILL.md')) { + const skillDir = path.join(WORKSPACE, path.dirname(fileArg)); + const markerFile = path.join(skillDir, '.agent-created'); + if (fs.existsSync(markerFile)) { + return { maxTokens: 300, path: path.join(WORKSPACE, fileArg), key: fileArg }; + } + } + + return null; +} + +// Protected patterns that must exist in CLAUDE.md after modification +const PROTECTED_PATTERNS = [ + /๊ฐœ์ธ์ •๋ณด.*๊ณต์œ .*๊ธˆ์ง€/, + /ํ™•์ธ.*์•ˆ.*๋œ.*์ •๋ณด.*์‚ฌ์‹ค.*์ „๋‹ฌ/, + /๋น„์œค๋ฆฌ์ .*์š”์ฒญ.*๊ฑฐ์ ˆ/, +]; + +function validateProtectedContent(content) { + for (const pattern of PROTECTED_PATTERNS) { + if (!pattern.test(content)) { + return `Protected content missing: pattern ${pattern} not found. CLAUDE.md must retain all safety rules.`; + } + } + return null; +} + +// Blocked paths +function isBlocked(filePath) { + const blocked = [ + '/root/.openclaw', + '/root/.clawdbot', + 'prompt-guard', + 'credentials', + ]; + return blocked.some(b => filePath.includes(b)); +} + +function backup(filePath, key) { + if (!fs.existsSync(filePath)) return; + + if (!fs.existsSync(HISTORY_DIR)) { + fs.mkdirSync(HISTORY_DIR, { recursive: true }); + } + + const timestamp = new Date().toISOString().replace(/[:.]/g, '-'); + const safeName = key.replace(/\//g, '__'); + const backupPath = path.join(HISTORY_DIR, `${safeName}-${timestamp}.bak`); + + fs.copyFileSync(filePath, backupPath); + + // Keep only last 20 backups per file + const prefix = safeName; + const backups = fs.readdirSync(HISTORY_DIR) + .filter(f => f.startsWith(prefix) && f.endsWith('.bak')) + .sort(); + while (backups.length > 20) { + fs.unlinkSync(path.join(HISTORY_DIR, backups.shift())); + } +} + +function logChange(entry) { + if (!fs.existsSync(HISTORY_DIR)) { + fs.mkdirSync(HISTORY_DIR, { recursive: true }); + } + fs.appendFileSync(CHANGELOG_FILE, JSON.stringify(entry) + '\n'); +} + +function getVersion(key) { + if (!fs.existsSync(CHANGELOG_FILE)) return 0; + let version = 0; + const lines = fs.readFileSync(CHANGELOG_FILE, 'utf8').split('\n').filter(Boolean); + for (const line of lines) { + try { + const entry = JSON.parse(line); + if (entry.file === key && entry.version > version) { + version = entry.version; + } + } catch { /* skip */ } + } + return version; +} + +function updateWarmMemoryIndex(fileArg, content, keywords) { + if (!fileArg.startsWith('warm-memory/')) return; + + const topicName = path.basename(fileArg, '.md'); + let index; + try { + index = JSON.parse(fs.readFileSync(INDEX_FILE, 'utf8')); + } catch { + index = { version: 1, topics: {}, maxTopics: 30 }; + } + + // Enforce max topics + const topicCount = Object.keys(index.topics).length; + if (!index.topics[topicName] && topicCount >= (index.maxTopics || 30)) { + console.error(`[MODIFY] Max topics (${index.maxTopics || 30}) reached. Prune old topics first.`); + return; + } + + const keywordList = keywords + ? keywords.split(',').map(k => k.trim()) + : (index.topics[topicName]?.keywords || [topicName]); + + index.topics[topicName] = { + file: fileArg, + tokens: estimateTokens(content), + keywords: keywordList, + lastAccess: new Date().toISOString().split('T')[0], + updated: new Date().toISOString().split('T')[0], + }; + + index.updated = new Date().toISOString().split('T')[0]; + fs.writeFileSync(INDEX_FILE, JSON.stringify(index, null, 2)); +} + +function main() { + const args = process.argv.slice(2); + let fileArg = null; + let content = null; + let reason = 'no reason provided'; + let keywords = null; + + for (let i = 0; i < args.length; i++) { + if (args[i] === '--file' && args[i + 1]) { fileArg = args[i + 1]; i++; } + else if (args[i] === '--content' && args[i + 1]) { content = args[i + 1]; i++; } + else if (args[i] === '--reason' && args[i + 1]) { reason = args[i + 1]; i++; } + else if (args[i] === '--keywords' && args[i + 1]) { keywords = args[i + 1]; i++; } + } + + if (!fileArg || content === null) { + console.error('Usage: node modify.js --file --content "..." --reason "..."'); + console.error(' node modify.js --file warm-memory/topic.md --content "..." --keywords "kw1,kw2"'); + process.exit(1); + } + + // Resolve and validate path + const resolved = resolvePath(fileArg); + if (!resolved) { + console.error(`[MODIFY] BLOCKED: "${fileArg}" is not in the mutable files allowlist.`); + console.error('Allowed: HOT-MEMORY.md, CLAUDE.md, memory-index.json, warm-memory/*.md, agent-created skills'); + process.exit(1); + } + + if (isBlocked(resolved.path)) { + console.error(`[MODIFY] BLOCKED: "${resolved.path}" is in a protected directory.`); + process.exit(1); + } + + // Check token limit + const tokens = estimateTokens(content); + if (tokens > resolved.maxTokens) { + console.error(`[MODIFY] REJECTED: Content is ~${tokens} tokens, max allowed for ${fileArg} is ${resolved.maxTokens}.`); + process.exit(1); + } + + // Validate protected content for CLAUDE.md + if (resolved.protected) { + const error = validateProtectedContent(content); + if (error) { + console.error(`[MODIFY] REJECTED: ${error}`); + process.exit(1); + } + } + + // Get current state for changelog + let tokensBefore = 0; + if (fs.existsSync(resolved.path)) { + tokensBefore = estimateTokens(fs.readFileSync(resolved.path, 'utf8')); + } + + // Backup existing file + backup(resolved.path, resolved.key); + + // Ensure parent directory exists + const parentDir = path.dirname(resolved.path); + if (!fs.existsSync(parentDir)) { + fs.mkdirSync(parentDir, { recursive: true }); + } + + // Write new content + fs.writeFileSync(resolved.path, content); + + // Update warm memory index if applicable + updateWarmMemoryIndex(fileArg, content, keywords); + + // Log change + const version = getVersion(resolved.key) + 1; + logChange({ + ts: new Date().toISOString(), + file: resolved.key, + action: tokensBefore === 0 ? 'create' : 'update', + reason, + tokens_before: tokensBefore, + tokens_after: tokens, + version, + }); + + console.log(`[MODIFY] OK: ${fileArg} updated (v${version}, ${tokensBefore} โ†’ ${tokens} tokens)`); + if (keywords) { + console.log(`[MODIFY] Keywords set: ${keywords}`); + } +} + +main(); diff --git a/skills/self-modify/scripts/reflect.js b/skills/self-modify/scripts/reflect.js new file mode 100644 index 000000000..6a563e204 --- /dev/null +++ b/skills/self-modify/scripts/reflect.js @@ -0,0 +1,187 @@ +#!/usr/bin/env node +/** + * Self-Reflect: Data prep for the weekly reflection cron. + * + * Gathers stats about memory usage, modification history, and warm-memory access patterns. + * Outputs a structured report for the agent (Sonnet) to analyze and act on. + * + * Also includes the weekly brain-memory insight data (replaces separate brain-insights cron). + * + * Usage: + * node reflect.js + */ + +const fs = require('fs'); +const path = require('path'); + +const WORKSPACE = '/root/clawd'; +const SKILLS_DIR = path.join(WORKSPACE, 'skills'); +const WARM_DIR = path.join(WORKSPACE, 'warm-memory'); +const DAILY_DIR = path.join(WORKSPACE, 'brain-memory', 'daily'); +const INDEX_FILE = path.join(SKILLS_DIR, 'memory-index.json'); +const HOT_MEMORY_FILE = path.join(SKILLS_DIR, 'HOT-MEMORY.md'); +const CHANGELOG_FILE = path.join(WORKSPACE, '.modification-history', 'changelog.jsonl'); +const AGENTS_DIR = '/root/.openclaw/agents'; + +function estimateTokens(text) { + return Math.ceil((text || '').length / 4); +} + +function getHotMemoryStats() { + try { + if (fs.existsSync(HOT_MEMORY_FILE)) { + const content = fs.readFileSync(HOT_MEMORY_FILE, 'utf8'); + return { exists: true, tokens: estimateTokens(content), lines: content.split('\n').length }; + } + } catch { /* ignore */ } + return { exists: false, tokens: 0, lines: 0 }; +} + +function getWarmMemoryStats() { + try { + if (!fs.existsSync(INDEX_FILE)) return { topicCount: 0, topics: [] }; + const index = JSON.parse(fs.readFileSync(INDEX_FILE, 'utf8')); + const topics = Object.entries(index.topics || {}).map(([name, meta]) => ({ + name, + tokens: meta.tokens || 0, + lastAccess: meta.lastAccess || 'never', + keywords: meta.keywords || [], + })); + return { topicCount: topics.length, topics }; + } catch { + return { topicCount: 0, topics: [] }; + } +} + +function getChangelogStats() { + try { + if (!fs.existsSync(CHANGELOG_FILE)) return { total: 0, recent: [] }; + const entries = fs.readFileSync(CHANGELOG_FILE, 'utf8') + .split('\n') + .filter(Boolean) + .map(line => { try { return JSON.parse(line); } catch { return null; } }) + .filter(Boolean); + return { + total: entries.length, + recent: entries.slice(-10).map(e => `${e.ts} | ${e.file} | ${e.action} | ${e.reason}`), + }; + } catch { + return { total: 0, recent: [] }; + } +} + +function getDailySummaries() { + try { + if (!fs.existsSync(DAILY_DIR)) return []; + return fs.readdirSync(DAILY_DIR) + .filter(f => f.endsWith('.md')) + .sort() + .slice(-7) + .map(f => { + const content = fs.readFileSync(path.join(DAILY_DIR, f), 'utf8'); + return { date: f.replace('.md', ''), tokens: estimateTokens(content), content }; + }); + } catch { + return []; + } +} + +function getAgentCreatedSkills() { + try { + if (!fs.existsSync(SKILLS_DIR)) return []; + const skills = []; + for (const entry of fs.readdirSync(SKILLS_DIR, { withFileTypes: true })) { + if (entry.isDirectory()) { + const marker = path.join(SKILLS_DIR, entry.name, '.agent-created'); + if (fs.existsSync(marker)) { + const created = fs.readFileSync(marker, 'utf8').trim(); + skills.push({ name: entry.name, created }); + } + } + } + return skills; + } catch { + return []; + } +} + +function getConversationCount() { + try { + if (!fs.existsSync(AGENTS_DIR)) return 0; + let count = 0; + function scan(dir) { + for (const entry of fs.readdirSync(dir, { withFileTypes: true })) { + const full = path.join(dir, entry.name); + if (entry.isDirectory()) scan(full); + else if (entry.name.endsWith('.jsonl')) count++; + } + } + scan(AGENTS_DIR); + return count; + } catch { + return 0; + } +} + +function main() { + const hotMemory = getHotMemoryStats(); + const warmMemory = getWarmMemoryStats(); + const changelog = getChangelogStats(); + const dailySummaries = getDailySummaries(); + const agentSkills = getAgentCreatedSkills(); + const conversationCount = getConversationCount(); + + let report = `# Self-Reflection Report (${new Date().toISOString()})\n\n`; + + // Section 1: Stats overview + report += `## Stats\n`; + report += `- HOT-MEMORY.md: ${hotMemory.tokens} tokens, ${hotMemory.lines} lines\n`; + report += `- Warm memory topics: ${warmMemory.topicCount}\n`; + report += `- Total modifications: ${changelog.total}\n`; + report += `- Agent-created skills: ${agentSkills.length}\n`; + report += `- Total conversation files: ${conversationCount}\n\n`; + + // Section 2: Warm memory access patterns + if (warmMemory.topics.length > 0) { + report += `## Warm Memory Topics\n`; + const sorted = [...warmMemory.topics].sort((a, b) => { + if (a.lastAccess === 'never') return 1; + if (b.lastAccess === 'never') return -1; + return a.lastAccess.localeCompare(b.lastAccess); + }); + for (const t of sorted) { + report += `- **${t.name}** (${t.tokens} tok, last: ${t.lastAccess}) keywords: ${t.keywords.join(', ')}\n`; + } + report += '\n'; + } + + // Section 3: Agent-created skills + if (agentSkills.length > 0) { + report += `## Agent-Created Skills\n`; + for (const s of agentSkills) { + report += `- ${s.name} (created: ${s.created})\n`; + } + report += '\n'; + } + + // Section 4: Recent modifications + if (changelog.recent.length > 0) { + report += `## Recent Modifications\n`; + for (const line of changelog.recent) { + report += `- ${line}\n`; + } + report += '\n'; + } + + // Section 5: Weekly brain memory (replaces brain-insights) + if (dailySummaries.length > 0) { + report += `## This Week's Daily Summaries\n\n`; + for (const d of dailySummaries) { + report += `### ${d.date} (${d.tokens} tok)\n${d.content}\n\n`; + } + } + + console.log(report); +} + +main(); diff --git a/skills/self-modify/scripts/rollback.js b/skills/self-modify/scripts/rollback.js new file mode 100644 index 000000000..4c93f5901 --- /dev/null +++ b/skills/self-modify/scripts/rollback.js @@ -0,0 +1,113 @@ +#!/usr/bin/env node +/** + * Rollback: Revert a file to a previous backup version. + * + * Usage: + * node rollback.js --file HOT-MEMORY.md # Revert to last backup + * node rollback.js --file HOT-MEMORY.md --version 3 # Revert to specific version + * node rollback.js --file HOT-MEMORY.md --list # List available backups + */ + +const fs = require('fs'); +const path = require('path'); + +const WORKSPACE = '/root/clawd'; +const SKILLS_DIR = path.join(WORKSPACE, 'skills'); +const HISTORY_DIR = path.join(WORKSPACE, '.modification-history'); +const CHANGELOG_FILE = path.join(HISTORY_DIR, 'changelog.jsonl'); + +const FILE_PATHS = { + 'HOT-MEMORY.md': path.join(SKILLS_DIR, 'HOT-MEMORY.md'), + 'CLAUDE.md': path.join(SKILLS_DIR, 'CLAUDE.md'), + 'memory-index.json': path.join(SKILLS_DIR, 'memory-index.json'), +}; + +function resolveFilePath(fileArg) { + if (FILE_PATHS[fileArg]) return FILE_PATHS[fileArg]; + if (fileArg.startsWith('warm-memory/')) return path.join(WORKSPACE, fileArg); + return null; +} + +function getBackups(fileArg) { + if (!fs.existsSync(HISTORY_DIR)) return []; + const safeName = fileArg.replace(/\//g, '__'); + return fs.readdirSync(HISTORY_DIR) + .filter(f => f.startsWith(safeName) && f.endsWith('.bak')) + .sort(); +} + +function main() { + const args = process.argv.slice(2); + let fileArg = null; + let version = null; + let listMode = false; + + for (let i = 0; i < args.length; i++) { + if (args[i] === '--file' && args[i + 1]) { fileArg = args[i + 1]; i++; } + else if (args[i] === '--version' && args[i + 1]) { version = parseInt(args[i + 1]); i++; } + else if (args[i] === '--list') { listMode = true; } + } + + if (!fileArg) { + console.error('Usage: node rollback.js --file [--version N | --list]'); + process.exit(1); + } + + const targetPath = resolveFilePath(fileArg); + if (!targetPath) { + console.error(`[ROLLBACK] Unknown file: ${fileArg}`); + process.exit(1); + } + + const backups = getBackups(fileArg); + + if (listMode) { + if (backups.length === 0) { + console.log(`No backups found for ${fileArg}`); + return; + } + console.log(`## Backups for ${fileArg} (${backups.length})\n`); + backups.forEach((b, i) => { + const stat = fs.statSync(path.join(HISTORY_DIR, b)); + console.log(`${i + 1}. ${b} (${stat.size} bytes)`); + }); + return; + } + + if (backups.length === 0) { + console.error(`[ROLLBACK] No backups available for ${fileArg}`); + process.exit(1); + } + + // Select backup + let backupFile; + if (version !== null) { + if (version < 1 || version > backups.length) { + console.error(`[ROLLBACK] Version ${version} not found. Available: 1-${backups.length}`); + process.exit(1); + } + backupFile = backups[version - 1]; + } else { + backupFile = backups[backups.length - 1]; // Latest + } + + const backupPath = path.join(HISTORY_DIR, backupFile); + const content = fs.readFileSync(backupPath, 'utf8'); + + // Restore + fs.writeFileSync(targetPath, content); + + // Log + const entry = { + ts: new Date().toISOString(), + file: fileArg, + action: 'rollback', + reason: `Reverted to backup: ${backupFile}`, + tokens_after: Math.ceil(content.length / 4), + }; + fs.appendFileSync(CHANGELOG_FILE, JSON.stringify(entry) + '\n'); + + console.log(`[ROLLBACK] ${fileArg} reverted to: ${backupFile}`); +} + +main(); diff --git a/skills/web-researcher/SKILL.md b/skills/web-researcher/SKILL.md new file mode 100644 index 000000000..36c72f7d1 --- /dev/null +++ b/skills/web-researcher/SKILL.md @@ -0,0 +1,11 @@ +--- +name: web-researcher +description: Web search (Serper API) and autonomous study sessions. +--- + +```bash +node /root/clawd/skills/web-researcher/scripts/research.js "query" +node /root/clawd/skills/web-researcher/scripts/study-session.js [--topic X] [--compact] +``` + +Topics: `topics.default.json`. Study material from user โ†’ read, summarize, store. diff --git a/skills/web-researcher/scripts/research.js b/skills/web-researcher/scripts/research.js new file mode 100644 index 000000000..80c481bd8 --- /dev/null +++ b/skills/web-researcher/scripts/research.js @@ -0,0 +1,164 @@ +#!/usr/bin/env node +/** + * Web Research Script - Search the web using Serper (Google Search) API + * + * Usage: node research.js "search query" [--num 5] [--fetch] + * --num N Number of results (default: 5) + * --fetch Also fetch and extract text from top 3 result URLs + * + * Requires: SERPER_API_KEY environment variable + */ + +const https = require('https'); +const http = require('http'); + +const SERPER_API_KEY = process.env.SERPER_API_KEY; +const SERPER_URL = 'https://google.serper.dev/search'; + +function httpRequest(url, options = {}) { + return new Promise((resolve, reject) => { + const timeout = options.timeout || 10000; + const parsedUrl = new URL(url); + const mod = parsedUrl.protocol === 'https:' ? https : http; + + const req = mod.request(parsedUrl, { + method: options.method || 'GET', + headers: options.headers || {}, + timeout, + }, (res) => { + let data = ''; + res.on('data', chunk => data += chunk); + res.on('end', () => resolve({ status: res.statusCode, data, headers: res.headers })); + }); + + req.on('error', reject); + req.on('timeout', () => { req.destroy(); reject(new Error('Request timeout')); }); + + if (options.body) req.write(options.body); + req.end(); + }); +} + +async function serperSearch(query, num = 5) { + if (!SERPER_API_KEY) { + throw new Error('SERPER_API_KEY environment variable not set'); + } + + const res = await httpRequest(SERPER_URL, { + method: 'POST', + headers: { + 'X-API-KEY': SERPER_API_KEY, + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ q: query, num }), + timeout: 15000, + }); + + if (res.status !== 200) { + throw new Error(`Serper API error: ${res.status} ${res.data}`); + } + + return JSON.parse(res.data); +} + +function stripHtml(html) { + // Remove script and style blocks + let text = html.replace(/]*>[\s\S]*?<\/script>/gi, ''); + text = text.replace(/]*>[\s\S]*?<\/style>/gi, ''); + // Remove HTML tags + text = text.replace(/<[^>]+>/g, ' '); + // Decode common entities + text = text.replace(/&/g, '&').replace(/</g, '<').replace(/>/g, '>'); + text = text.replace(/"/g, '"').replace(/'/g, "'").replace(/ /g, ' '); + // Collapse whitespace + text = text.replace(/\s+/g, ' ').trim(); + return text; +} + +async function fetchPageContent(url, maxChars = 2000) { + try { + const res = await httpRequest(url, { timeout: 8000 }); + if (res.status === 301 || res.status === 302) { + const location = res.headers.location; + if (location) return fetchPageContent(location, maxChars); + } + if (res.status !== 200) return null; + + const text = stripHtml(res.data); + return text.substring(0, maxChars); + } catch { + return null; + } +} + +async function main() { + const args = process.argv.slice(2); + let query = ''; + let num = 5; + let shouldFetch = false; + + for (let i = 0; i < args.length; i++) { + if (args[i] === '--num' && args[i + 1]) { + num = parseInt(args[i + 1], 10); + i++; + } else if (args[i] === '--fetch') { + shouldFetch = true; + } else if (!query) { + query = args[i]; + } + } + + if (!query) { + console.error('Usage: node research.js "search query" [--num 5] [--fetch]'); + process.exit(1); + } + + const searchData = await serperSearch(query, num); + + const results = []; + const organic = searchData.organic || []; + + for (let i = 0; i < organic.length; i++) { + const item = organic[i]; + const result = { + title: item.title, + url: item.link, + snippet: item.snippet || '', + }; + + // Fetch full content for top 3 results if --fetch flag + if (shouldFetch && i < 3) { + const content = await fetchPageContent(item.link); + if (content) result.content = content; + } + + results.push(result); + } + + // Include knowledge graph if available + let knowledgeGraph = null; + if (searchData.knowledgeGraph) { + const kg = searchData.knowledgeGraph; + knowledgeGraph = { + title: kg.title, + type: kg.type, + description: kg.description, + }; + } + + const output = { + query, + timestamp: new Date().toISOString(), + resultCount: results.length, + results, + }; + + if (knowledgeGraph) output.knowledgeGraph = knowledgeGraph; + + console.log(JSON.stringify(output, null, 2)); +} + +main().catch(err => { + console.error(`[ERROR] ${err.message}`); + process.exit(1); +}); diff --git a/skills/web-researcher/scripts/study-session.js b/skills/web-researcher/scripts/study-session.js new file mode 100644 index 000000000..8f4f0e195 --- /dev/null +++ b/skills/web-researcher/scripts/study-session.js @@ -0,0 +1,178 @@ +#!/usr/bin/env node +/** + * Autonomous Study Session - Picks a topic, researches it, and outputs a study report + * + * Usage: + * node study-session.js # Auto-pick next topic (round-robin) + * node study-session.js --topic crypto-market # Study specific topic + * node study-session.js --all # Study all topics + * + * Requires: SERPER_API_KEY environment variable + * + * The script outputs a formatted study report to stdout that can be stored + * in the agent's memory system. + */ + +const { execSync } = require('child_process'); +const fs = require('fs'); +const path = require('path'); + +const SCRIPT_DIR = path.dirname(__filename); +const RESEARCH_SCRIPT = path.join(SCRIPT_DIR, 'research.js'); +const DEFAULT_TOPICS = path.join(SCRIPT_DIR, '..', 'topics.default.json'); +const MEMORY_TOPICS = '/root/clawd/clawd-memory/study-topics.json'; +const STATE_FILE = '/root/clawd/.study-state.json'; + +function loadTopics() { + // Prefer memory repo topics, fall back to default + const topicsPath = fs.existsSync(MEMORY_TOPICS) ? MEMORY_TOPICS : DEFAULT_TOPICS; + const data = JSON.parse(fs.readFileSync(topicsPath, 'utf8')); + return data.topics || []; +} + +function loadState() { + try { + if (fs.existsSync(STATE_FILE)) { + return JSON.parse(fs.readFileSync(STATE_FILE, 'utf8')); + } + } catch { /* ignore */ } + return { lastIndex: -1, lastStudied: {} }; +} + +function saveState(state) { + try { + fs.writeFileSync(STATE_FILE, JSON.stringify(state, null, 2)); + } catch (err) { + console.error(`[WARN] Could not save state: ${err.message}`); + } +} + +function runResearch(query) { + try { + const result = execSync( + `node "${RESEARCH_SCRIPT}" "${query.replace(/"/g, '\\"')}" --fetch`, + { encoding: 'utf8', timeout: 30000 } + ); + return JSON.parse(result); + } catch (err) { + console.error(`[WARN] Research failed for "${query}": ${err.message}`); + return null; + } +} + +function formatStudyReport(topic, researchResults, compact) { + const timestamp = new Date().toISOString(); + const date = new Date().toLocaleDateString('ko-KR', { timeZone: 'Asia/Seoul' }); + const time = new Date().toLocaleTimeString('ko-KR', { timeZone: 'Asia/Seoul', hour: '2-digit', minute: '2-digit' }); + + if (compact) { + // Compact: JSON with top 1 result per query, limited snippet length + const items = []; + for (const research of researchResults) { + if (!research) continue; + const topResult = (research.results || [])[0]; + items.push({ + q: research.query, + kg: research.knowledgeGraph ? research.knowledgeGraph.description?.slice(0, 200) : null, + top: topResult ? { t: topResult.title, s: topResult.snippet?.slice(0, 300) } : null, + }); + } + const report = JSON.stringify({ topic: topic.name, date, items }, null, 2); + return { report, timestamp, topic: topic.name }; + } + + let report = `## Auto-Study: ${topic.name} (${date} ${time})\n\n`; + + for (const research of researchResults) { + if (!research) continue; + report += `### "${research.query}"\n\n`; + + if (research.knowledgeGraph) { + const kg = research.knowledgeGraph; + report += `**${kg.title}** (${kg.type || 'info'}): ${kg.description || ''}\n\n`; + } + + for (const result of (research.results || []).slice(0, 3)) { + report += `- **${result.title}**: ${result.snippet}`; + if (result.url) report += ` ([link](${result.url}))`; + report += '\n'; + } + report += '\n'; + } + + report += `---\n_Auto-studied at ${timestamp}_\n`; + + return { report, timestamp, topic: topic.name }; +} + +async function main() { + const args = process.argv.slice(2); + let targetTopic = null; + let studyAll = false; + let compactMode = false; + + for (let i = 0; i < args.length; i++) { + if (args[i] === '--topic' && args[i + 1]) { + targetTopic = args[i + 1]; + i++; + } else if (args[i] === '--all') { + studyAll = true; + } else if (args[i] === '--compact') { + compactMode = true; + } + } + + const topics = loadTopics(); + if (topics.length === 0) { + console.error('[ERROR] No topics configured'); + process.exit(1); + } + + const state = loadState(); + let topicsToStudy = []; + + if (studyAll) { + topicsToStudy = topics; + } else if (targetTopic) { + const found = topics.find(t => t.name === targetTopic); + if (!found) { + console.error(`[ERROR] Topic "${targetTopic}" not found. Available: ${topics.map(t => t.name).join(', ')}`); + process.exit(1); + } + topicsToStudy = [found]; + } else { + // Round-robin: pick next topic + const nextIndex = (state.lastIndex + 1) % topics.length; + topicsToStudy = [topics[nextIndex]]; + state.lastIndex = nextIndex; + } + + const allReports = []; + + for (const topic of topicsToStudy) { + console.error(`[STUDY] Researching topic: ${topic.name}`); + + const researchResults = []; + for (const query of topic.queries) { + console.error(`[STUDY] Searching: "${query}"`); + const result = runResearch(query); + researchResults.push(result); + } + + const { report, timestamp } = formatStudyReport(topic, researchResults, compactMode); + allReports.push(report); + + state.lastStudied[topic.name] = timestamp; + console.error(`[STUDY] Completed topic: ${topic.name}`); + } + + saveState(state); + + // Output the combined report to stdout + console.log(allReports.join('\n')); +} + +main().catch(err => { + console.error(`[ERROR] ${err.message}`); + process.exit(1); +}); diff --git a/skills/web-researcher/topics.default.json b/skills/web-researcher/topics.default.json new file mode 100644 index 000000000..a6ffe28f9 --- /dev/null +++ b/skills/web-researcher/topics.default.json @@ -0,0 +1,28 @@ +{ + "topics": [ + { + "name": "crypto-market", + "queries": [ + "cryptocurrency market news today bitcoin ethereum" + ] + }, + { + "name": "ai-news", + "queries": [ + "AI artificial intelligence latest news Claude Anthropic OpenAI" + ] + }, + { + "name": "tech-trends", + "queries": [ + "technology software engineering trends 2026" + ] + }, + { + "name": "korea-tech", + "queries": [ + "ํ•œ๊ตญ IT ์Šคํƒ€ํŠธ์—… ๊ธฐ์ˆ  ๋‰ด์Šค" + ] + } + ] +} diff --git a/src/config.ts b/src/config.ts index c921b95ab..ce1ffca01 100644 --- a/src/config.ts +++ b/src/config.ts @@ -15,3 +15,15 @@ export const STARTUP_TIMEOUT_MS = 180_000; export function getR2BucketName(env?: { R2_BUCKET_NAME?: string }): string { return env?.R2_BUCKET_NAME || 'moltbot-data'; } + +/** OpenClaw config directory inside the container */ +export const OPENCLAW_CONFIG_DIR = '/root/.openclaw'; + +/** Workspace directory inside the container */ +export const CLAWD_DIR = '/root/clawd'; + +/** Model IDs used for cron jobs */ +export const CRON_MODELS = { + fast: 'anthropic/claude-3-5-haiku-20241022', + standard: 'anthropic/claude-sonnet-4-5-20250929', +} as const; diff --git a/src/gateway/env.test.ts b/src/gateway/env.test.ts index 89af2efb8..09eced6b4 100644 --- a/src/gateway/env.test.ts +++ b/src/gateway/env.test.ts @@ -158,4 +158,30 @@ describe('buildEnvVars', () => { TELEGRAM_BOT_TOKEN: 'tg', }); }); + + + it('includes Google Calendar credentials when set', () => { + const env = createMockEnv({ + GOOGLE_CLIENT_ID: 'test-client-id', + GOOGLE_CLIENT_SECRET: 'test-client-secret', + GOOGLE_REFRESH_TOKEN: 'test-refresh-token', + GOOGLE_CALENDAR_ID: 'user@gmail.com', + }); + const result = buildEnvVars(env); + + expect(result.GOOGLE_CLIENT_ID).toBe('test-client-id'); + expect(result.GOOGLE_CLIENT_SECRET).toBe('test-client-secret'); + expect(result.GOOGLE_REFRESH_TOKEN).toBe('test-refresh-token'); + expect(result.GOOGLE_CALENDAR_ID).toBe('user@gmail.com'); + }); + + it('omits Google Calendar credentials when not set', () => { + const env = createMockEnv(); + const result = buildEnvVars(env); + + expect(result.GOOGLE_CLIENT_ID).toBeUndefined(); + expect(result.GOOGLE_CLIENT_SECRET).toBeUndefined(); + expect(result.GOOGLE_REFRESH_TOKEN).toBeUndefined(); + expect(result.GOOGLE_CALENDAR_ID).toBeUndefined(); + }); }); diff --git a/src/gateway/env.ts b/src/gateway/env.ts index d9e01171b..47ad877b8 100644 --- a/src/gateway/env.ts +++ b/src/gateway/env.ts @@ -49,6 +49,43 @@ export function buildEnvVars(env: MoltbotEnv): Record { if (env.CF_ACCOUNT_ID) envVars.CF_ACCOUNT_ID = env.CF_ACCOUNT_ID; if (env.CDP_SECRET) envVars.CDP_SECRET = env.CDP_SECRET; if (env.WORKER_URL) envVars.WORKER_URL = env.WORKER_URL; + if (env.BRAVE_API_KEY) envVars.BRAVE_API_KEY = env.BRAVE_API_KEY; + if (env.SERPER_API_KEY) envVars.SERPER_API_KEY = env.SERPER_API_KEY; + + // Claude Max OAuth token - map to both CLAUDE_ACCESS_TOKEN and ANTHROPIC_API_KEY + if (env.CLAUDE_ACCESS_TOKEN) { + envVars.CLAUDE_ACCESS_TOKEN = env.CLAUDE_ACCESS_TOKEN; + // Also set as ANTHROPIC_API_KEY so OpenClaw can use it + if (!envVars.ANTHROPIC_API_KEY) { + envVars.ANTHROPIC_API_KEY = env.CLAUDE_ACCESS_TOKEN; + } + } + if (env.CLAUDE_REFRESH_TOKEN) envVars.CLAUDE_REFRESH_TOKEN = env.CLAUDE_REFRESH_TOKEN; + + // GitHub repo auto-clone on startup + if (env.GITHUB_REPO_URL) envVars.GITHUB_REPO_URL = env.GITHUB_REPO_URL; + if (env.GITHUB_TOKEN) envVars.GITHUB_TOKEN = env.GITHUB_TOKEN; + if (env.GITHUB_PAT) envVars.GITHUB_PAT = env.GITHUB_PAT; + + // Telegram owner auto-allowlist on startup + if (env.TELEGRAM_OWNER_ID) envVars.TELEGRAM_OWNER_ID = env.TELEGRAM_OWNER_ID; + + // Google Calendar OAuth 2.0 credentials + if (env.GOOGLE_CLIENT_ID) envVars.GOOGLE_CLIENT_ID = env.GOOGLE_CLIENT_ID; + if (env.GOOGLE_CLIENT_SECRET) envVars.GOOGLE_CLIENT_SECRET = env.GOOGLE_CLIENT_SECRET; + if (env.GOOGLE_REFRESH_TOKEN) envVars.GOOGLE_REFRESH_TOKEN = env.GOOGLE_REFRESH_TOKEN; + if (env.GOOGLE_CALENDAR_ID) envVars.GOOGLE_CALENDAR_ID = env.GOOGLE_CALENDAR_ID; + + // Node host device identity for pre-seeded pairing (workaround for openclaw#4833) + if (env.NODE_DEVICE_ID) envVars.NODE_DEVICE_ID = env.NODE_DEVICE_ID; + if (env.NODE_DEVICE_PUBLIC_KEY) envVars.NODE_DEVICE_PUBLIC_KEY = env.NODE_DEVICE_PUBLIC_KEY; + if (env.NODE_DEVICE_DISPLAY_NAME) envVars.NODE_DEVICE_DISPLAY_NAME = env.NODE_DEVICE_DISPLAY_NAME; + + // GitHub Copilot token for OpenClaw model auth + if (env.GITHUB_COPILOT_TOKEN) envVars.GITHUB_COPILOT_TOKEN = env.GITHUB_COPILOT_TOKEN; + + // Google AI API key for embeddings (memory_search) + if (env.GOOGLE_AI_API_KEY) envVars.GOOGLE_AI_API_KEY = env.GOOGLE_AI_API_KEY; // R2 persistence credentials (used by rclone in start-openclaw.sh) if (env.R2_ACCESS_KEY_ID) envVars.R2_ACCESS_KEY_ID = env.R2_ACCESS_KEY_ID; diff --git a/src/gateway/index.ts b/src/gateway/index.ts index b54f1a0d8..d0b7f8d2c 100644 --- a/src/gateway/index.ts +++ b/src/gateway/index.ts @@ -1,4 +1,6 @@ -export { ensureMoltbotGateway, findExistingMoltbotProcess } from './process'; -export { waitForProcess } from './utils'; +export { buildEnvVars } from './env'; +export { ensureMoltbotGateway, findExistingMoltbotProcess, ensureMoltbotGatewayWithRecovery, isGatewayProcess, GATEWAY_COMMANDS, getLastGatewayStartTime } from './process'; +export { waitForProcess, runCommand, cleanupExitedProcesses } from './utils'; export { ensureRcloneConfig } from './r2'; export { syncToR2 } from './sync'; + diff --git a/src/gateway/process.ts b/src/gateway/process.ts index 571719e3b..ddad2fbe3 100644 --- a/src/gateway/process.ts +++ b/src/gateway/process.ts @@ -4,6 +4,28 @@ import { MOLTBOT_PORT, STARTUP_TIMEOUT_MS } from '../config'; import { buildEnvVars } from './env'; import { ensureRcloneConfig } from './r2'; +/** Commands that identify a gateway process (vs CLI commands) */ +export const GATEWAY_COMMANDS = ['start-openclaw.sh', 'start-moltbot.sh', 'clawdbot gateway', 'openclaw gateway']; + +/** Check if a command string is a gateway process */ +export function isGatewayProcess(command: string): boolean { + return GATEWAY_COMMANDS.some(cmd => command.includes(cmd)); +} + +// Auto-recovery configuration +const MAX_RECOVERY_ATTEMPTS = 3; +const RECOVERY_COOLDOWN_MS = 30_000; // 30s minimum between recovery cycles +let recoveryAttempts = 0; +let lastRecoveryTime = 0; + +// Track when a gateway process was last started (for cron grace period) +let lastGatewayStartTime = 0; + +/** Get the timestamp of when the last gateway process was started */ +export function getLastGatewayStartTime(): number { + return lastGatewayStartTime; +} + /** * Find an existing OpenClaw gateway process * @@ -14,14 +36,6 @@ export async function findExistingMoltbotProcess(sandbox: Sandbox): Promise { + try { + return await ensureMoltbotGateway(sandbox, env); + } catch (error) { + const now = Date.now(); + + // Reset attempts after cooldown period + if (now - lastRecoveryTime > RECOVERY_COOLDOWN_MS) { + recoveryAttempts = 0; + } + + if (recoveryAttempts < MAX_RECOVERY_ATTEMPTS) { + recoveryAttempts++; + lastRecoveryTime = now; + + console.log(`[Recovery] Attempt ${recoveryAttempts}/${MAX_RECOVERY_ATTEMPTS} after error:`, error); + + // Exponential backoff: 2s, 4s, 8s + const waitTime = Math.pow(2, recoveryAttempts) * 1000; + console.log(`[Recovery] Waiting ${waitTime}ms before retry...`); + await new Promise(r => setTimeout(r, waitTime)); + + // Kill any stuck processes + const stuck = await findExistingMoltbotProcess(sandbox); + if (stuck) { + console.log('[Recovery] Killing stuck process:', stuck.id); + try { + await stuck.kill(); + } catch (killErr) { + console.log('[Recovery] Kill failed:', killErr); + } + } + + // Retry + return await ensureMoltbotGateway(sandbox, env); + } + + throw new Error(`Gateway failed after ${MAX_RECOVERY_ATTEMPTS} recovery attempts: ${error}`); + } +} diff --git a/src/gateway/r2.ts b/src/gateway/r2.ts index a506654e3..d6119703c 100644 --- a/src/gateway/r2.ts +++ b/src/gateway/r2.ts @@ -7,7 +7,7 @@ const CONFIGURED_FLAG = '/tmp/.rclone-configured'; /** * Ensure rclone is configured in the container for R2 access. - * Idempotent โ€” checks for a flag file to skip re-configuration. + * Idempotent -- checks for a flag file to skip re-configuration. * * @returns true if rclone is configured, false if credentials are missing */ diff --git a/src/gateway/sync.test.ts b/src/gateway/sync.test.ts index 054bcd3ec..98223ce2c 100644 --- a/src/gateway/sync.test.ts +++ b/src/gateway/sync.test.ts @@ -14,9 +14,10 @@ describe('syncToR2', () => { }); describe('configuration checks', () => { - it('returns error when R2 is not configured', async () => { + it('returns error when R2 is not configured and no bucket binding', async () => { const { sandbox } = createMockSandbox(); - const env = createMockEnv(); + // No R2 credentials AND no bucket binding + const env = createMockEnv({ MOLTBOT_BUCKET: undefined as any }); const result = await syncToR2(sandbox, env); diff --git a/src/gateway/sync.ts b/src/gateway/sync.ts index 99a2f6498..8721d5220 100644 --- a/src/gateway/sync.ts +++ b/src/gateway/sync.ts @@ -8,6 +8,7 @@ export interface SyncResult { lastSync?: string; error?: string; details?: string; + method?: 's3fs' | 'r2-binding'; } const RCLONE_FLAGS = '--transfers=16 --fast-list --s3-no-check-bucket'; diff --git a/src/gateway/utils.ts b/src/gateway/utils.ts index a6f1618c1..5292e1112 100644 --- a/src/gateway/utils.ts +++ b/src/gateway/utils.ts @@ -2,6 +2,14 @@ * Shared utilities for gateway operations */ +import type { Sandbox } from '@cloudflare/sandbox'; +import { isGatewayProcess } from './process'; + +export interface CommandResult { + stdout: string; + stderr: string; +} + /** * Wait for a sandbox process to complete * @@ -25,3 +33,42 @@ export async function waitForProcess( attempts++; } } + +/** + * Run a command in the sandbox, wait for completion, get logs, and kill the process. + * This prevents zombie process accumulation. + */ +export async function runCommand( + sandbox: Sandbox, + command: string, + timeoutMs: number = 15000 +): Promise { + const proc = await sandbox.startProcess(command); + await waitForProcess(proc, timeoutMs); + const logs = await proc.getLogs(); + // Kill the process to free it from the process table + try { await proc.kill(); } catch { /* already exited */ } + return { + stdout: logs.stdout || '', + stderr: logs.stderr || '', + }; +} + +/** + * Clean up exited processes from the sandbox process table. + * Kills all processes that are not the gateway and are no longer running. + */ +export async function cleanupExitedProcesses(sandbox: Sandbox): Promise { + let cleaned = 0; + try { + const processes = await sandbox.listProcesses(); + for (const proc of processes) { + if (!isGatewayProcess(proc.command) && proc.status !== 'running' && proc.status !== 'starting') { + try { await proc.kill(); cleaned++; } catch { /* ignore */ } + } + } + } catch (e) { + console.log('[cleanup] Error cleaning processes:', e); + } + return cleaned; +} diff --git a/src/index.ts b/src/index.ts index 3a615dd61..d37eb472b 100644 --- a/src/index.ts +++ b/src/index.ts @@ -24,9 +24,9 @@ import { Hono } from 'hono'; import { getSandbox, Sandbox, type SandboxOptions } from '@cloudflare/sandbox'; import type { AppEnv, MoltbotEnv } from './types'; -import { MOLTBOT_PORT } from './config'; +import { MOLTBOT_PORT, STARTUP_TIMEOUT_MS } from './config'; import { createAccessMiddleware } from './auth'; -import { ensureMoltbotGateway, findExistingMoltbotProcess } from './gateway'; +import { ensureMoltbotGateway, findExistingMoltbotProcess, syncToR2, cleanupExitedProcesses, getLastGatewayStartTime } from './gateway'; import { publicRoutes, api, adminUi, debug, cdp } from './routes'; import { redactSensitiveParams } from './utils/logging'; import loadingPageHtml from './assets/loading.html'; @@ -81,10 +81,12 @@ function validateRequiredEnv(env: MoltbotEnv): string[] { const hasLegacyGateway = !!(env.AI_GATEWAY_API_KEY && env.AI_GATEWAY_BASE_URL); const hasAnthropicKey = !!env.ANTHROPIC_API_KEY; const hasOpenAIKey = !!env.OPENAI_API_KEY; + const hasClaudeToken = !!env.CLAUDE_ACCESS_TOKEN; + const hasCopilotToken = !!env.GITHUB_COPILOT_TOKEN; - if (!hasCloudflareGateway && !hasLegacyGateway && !hasAnthropicKey && !hasOpenAIKey) { + if (!hasCloudflareGateway && !hasLegacyGateway && !hasAnthropicKey && !hasOpenAIKey && !hasClaudeToken && !hasCopilotToken) { missing.push( - 'ANTHROPIC_API_KEY, OPENAI_API_KEY, or CLOUDFLARE_AI_GATEWAY_API_KEY + CF_AI_GATEWAY_ACCOUNT_ID + CF_AI_GATEWAY_GATEWAY_ID', + 'ANTHROPIC_API_KEY, OPENAI_API_KEY, CLAUDE_ACCESS_TOKEN, GITHUB_COPILOT_TOKEN, or CLOUDFLARE_AI_GATEWAY_API_KEY + CF_AI_GATEWAY_ACCOUNT_ID + CF_AI_GATEWAY_GATEWAY_ID', ); } @@ -121,7 +123,7 @@ const app = new Hono(); // MIDDLEWARE: Applied to ALL routes // ============================================================================= -// Middleware: Log every request +// Middleware: Log every request (compact) app.use('*', async (c, next) => { const url = new URL(c.req.url); const redactedSearch = redactSensitiveParams(url); @@ -196,7 +198,14 @@ app.use('*', async (c, next) => { }); // Middleware: Cloudflare Access authentication for protected routes +// Bypass CF Access for WebSocket connections with a valid gateway token (for openclaw node) app.use('*', async (c, next) => { + // Skip CF Access for WebSocket upgrades โ€” the container gateway handles its own auth + const isWebSocket = c.req.header('Upgrade')?.toLowerCase() === 'websocket'; + if (isWebSocket) { + return next(); + } + // Determine response type based on Accept header const acceptsHtml = c.req.header('Accept')?.includes('text/html'); const middleware = createAccessMiddleware({ @@ -255,28 +264,32 @@ app.all('*', async (c) => { return c.html(loadingPageHtml); } - // Ensure moltbot is running (this will wait for startup) - try { - await ensureMoltbotGateway(sandbox, c.env); - } catch (error) { - console.error('[PROXY] Failed to start Moltbot:', error); - const errorMessage = error instanceof Error ? error.message : 'Unknown error'; - - let hint = 'Check worker logs with: wrangler tail'; - if (!c.env.ANTHROPIC_API_KEY) { - hint = 'ANTHROPIC_API_KEY is not set. Run: wrangler secret put ANTHROPIC_API_KEY'; - } else if (errorMessage.includes('heap out of memory') || errorMessage.includes('OOM')) { - hint = 'Gateway ran out of memory. Try again or check for memory leaks.'; - } + // Ensure moltbot is running + // For WebSocket requests: skip the blocking waitForPort if gateway process is already running + // (waitForPort can block for up to 180s watching for a port transition that already happened, + // causing the Workers runtime to cancel the request) + if (isWebSocketRequest && isGatewayReady) { + console.log('[WS] Gateway already running, skipping ensureMoltbotGateway'); + } else { + try { + await ensureMoltbotGateway(sandbox, c.env); + } catch (error) { + console.error('[PROXY] Failed to start Moltbot:', error); + const errorMessage = error instanceof Error ? error.message : 'Unknown error'; + + let hint = 'Check worker logs with: wrangler tail'; + if (!c.env.ANTHROPIC_API_KEY) { + hint = 'ANTHROPIC_API_KEY is not set. Run: wrangler secret put ANTHROPIC_API_KEY'; + } else if (errorMessage.includes('heap out of memory') || errorMessage.includes('OOM')) { + hint = 'Gateway ran out of memory. Try again or check for memory leaks.'; + } - return c.json( - { + return c.json({ error: 'Moltbot gateway failed to start', details: errorMessage, hint, - }, - 503, - ); + }, 503); + } } // Proxy to Moltbot with WebSocket message interception @@ -444,6 +457,77 @@ app.all('*', async (c) => { }); }); +/** + * Scheduled handler for cron triggers. + * Runs health check and syncs moltbot config/state to R2. + */ +async function scheduled( + _event: ScheduledEvent, + env: MoltbotEnv, + _ctx: ExecutionContext +): Promise { + const options = buildSandboxOptions(env); + const sandbox = getSandbox(env.Sandbox, 'moltbot', options); + + // Clean up zombie processes from previous cron runs + const cleaned = await cleanupExitedProcesses(sandbox); + if (cleaned > 0) { + console.log(`[cron] Cleaned up ${cleaned} exited processes`); + } + + // Health check: ensure the gateway is running and responding + console.log('[cron] Running health check...'); + let gatewayHealthy = false; + try { + const process = await findExistingMoltbotProcess(sandbox); + if (!process) { + console.log('[cron] Gateway not running, starting it...'); + await ensureMoltbotGateway(sandbox, env); + console.log('[cron] Gateway started successfully'); + gatewayHealthy = true; + } else { + console.log('[cron] Gateway process found:', process.id, 'status:', process.status); + + // Grace period: don't kill a gateway that was recently started (still initializing) + const timeSinceStart = Date.now() - getLastGatewayStartTime(); + if (process.status === 'starting' || timeSinceStart < STARTUP_TIMEOUT_MS) { + console.log(`[cron] Gateway recently started (${Math.round(timeSinceStart / 1000)}s ago) or still starting, skipping health check`); + // Don't mark as healthy yet -- it's still booting + } else { + // Try to ensure it's actually responding (use 30s timeout instead of 10s) + try { + await process.waitForPort(MOLTBOT_PORT, { mode: 'tcp', timeout: 30000 }); + console.log('[cron] Gateway is healthy and responding'); + gatewayHealthy = true; + } catch (e) { + console.log('[cron] Gateway not responding after 30s, restarting...'); + try { + await process.kill(); + } catch (killError) { + console.log('[cron] Could not kill process:', killError); + } + await ensureMoltbotGateway(sandbox, env); + console.log('[cron] Gateway restarted successfully'); + gatewayHealthy = true; + } + } + } + } catch (e) { + console.error('[cron] Health check failed:', e); + } + + // Backup sync to R2 (rclone handles continuous sync in container, this is a fallback) + console.log('[cron] Starting backup sync to R2...'); + const result = await syncToR2(sandbox, env); + + if (result.success) { + console.log('[cron] Backup sync completed successfully at', result.lastSync); + } else { + console.error('[cron] Backup sync failed:', result.error, result.details || ''); + } +} + export default { fetch: app.fetch, + scheduled, }; diff --git a/src/routes/api.ts b/src/routes/api.ts index 65cf62f8d..287ae3306 100644 --- a/src/routes/api.ts +++ b/src/routes/api.ts @@ -232,17 +232,32 @@ adminApi.get('/storage', async (c) => { }); }); -// POST /api/admin/storage/sync - Trigger a manual sync to R2 +// POST /api/admin/storage/sync - Trigger a manual sync to R2 with detailed response adminApi.post('/storage/sync', async (c) => { const sandbox = c.get('sandbox'); + const startTime = Date.now(); const result = await syncToR2(sandbox, c.env); + const duration = Date.now() - startTime; if (result.success) { + // Get backup size + let backupSize: string | undefined; + try { + const proc = await sandbox.startProcess(`du -sh /root/.openclaw 2>/dev/null | cut -f1`); + await waitForProcess(proc, 5000); + const logs = await proc.getLogs(); + backupSize = logs.stdout?.trim(); + } catch { + // Ignore errors getting size + } + return c.json({ success: true, message: 'Sync completed successfully', lastSync: result.lastSync, + duration: `${duration}ms`, + backupSize, }); } else { const status = result.error?.includes('not configured') ? 400 : 500; @@ -251,12 +266,121 @@ adminApi.post('/storage/sync', async (c) => { success: false, error: result.error, details: result.details, + duration: `${duration}ms`, }, status, ); } }); +// GET /api/admin/conversations - List recent conversation sessions +adminApi.get('/conversations', async (c) => { + const sandbox = c.get('sandbox'); + + try { + await ensureMoltbotGateway(sandbox, c.env); + + // Find session files in OpenClaw agents directory + const proc = await sandbox.startProcess( + 'find /root/.openclaw/agents -name "*.jsonl" -type f -printf "%T@ %p\\n" 2>/dev/null | sort -rn | head -20' + ); + await waitForProcess(proc, 10000); + + const logs = await proc.getLogs(); + const files = (logs.stdout || '') + .split('\n') + .filter(Boolean) + .map(line => { + const spaceIdx = line.indexOf(' '); + const timestamp = line.substring(0, spaceIdx); + const path = line.substring(spaceIdx + 1); + const parts = path.split('/'); + const filename = parts[parts.length - 1]; + return { + id: filename.replace('.jsonl', ''), + path, + modified: new Date(parseFloat(timestamp) * 1000).toISOString(), + }; + }); + + return c.json({ conversations: files, count: files.length }); + } catch (error) { + return c.json({ error: String(error) }, 500); + } +}); + +// GET /api/admin/conversations/:id - Get a specific conversation +adminApi.get('/conversations/:id', async (c) => { + const id = c.req.param('id'); + const sandbox = c.get('sandbox'); + + try { + await ensureMoltbotGateway(sandbox, c.env); + + // Find and read the session file + const proc = await sandbox.startProcess( + `find /root/.openclaw/agents -name "${id}.jsonl" -type f -exec cat {} \\; 2>/dev/null | head -100` + ); + await waitForProcess(proc, 10000); + + const logs = await proc.getLogs(); + const content = logs.stdout || ''; + + if (!content.trim()) { + return c.json({ error: 'Conversation not found' }, 404); + } + + // Parse JSONL format (one JSON object per line) + const messages = content + .split('\n') + .filter(Boolean) + .map(line => { + try { + return JSON.parse(line); + } catch { + return null; + } + }) + .filter(Boolean); + + return c.json({ id, messages, count: messages.length }); + } catch (error) { + return c.json({ error: String(error) }, 500); + } +}); + +// GET /api/admin/skills - List installed skills +adminApi.get('/skills', async (c) => { + const sandbox = c.get('sandbox'); + + try { + // Find skill definition files + const proc = await sandbox.startProcess( + 'find /root/clawd/skills -maxdepth 2 \\( -name "SKILL.md" -o -name "CLAUDE.md" -o -name "skill.json" \\) 2>/dev/null' + ); + await waitForProcess(proc, 10000); + + const logs = await proc.getLogs(); + const skillFiles = (logs.stdout || '').split('\n').filter(Boolean); + + // Extract skill names from paths + const skillsMap = new Map(); + for (const path of skillFiles) { + const parts = path.split('/'); + const skillDir = parts[parts.length - 2]; // parent directory name + if (!skillsMap.has(skillDir)) { + skillsMap.set(skillDir, { name: skillDir, files: [] }); + } + skillsMap.get(skillDir)!.files.push(parts[parts.length - 1]); + } + + const skills = Array.from(skillsMap.values()); + return c.json({ skills, count: skills.length }); + } catch (error) { + return c.json({ error: String(error) }, 500); + } +}); + // POST /api/admin/gateway/restart - Kill the current gateway and start a new one adminApi.post('/gateway/restart', async (c) => { const sandbox = c.get('sandbox'); @@ -295,6 +419,33 @@ adminApi.post('/gateway/restart', async (c) => { } }); +// POST /api/admin/container/reset - Kill ALL processes to force container recreation +adminApi.post('/container/reset', async (c) => { + const sandbox = c.get('sandbox'); + + try { + const processes = await sandbox.listProcesses(); + let killed = 0; + + for (const p of processes) { + if (p.status === 'running' || p.status === 'starting') { + try { + await p.kill(); + killed++; + } catch {} + } + } + + return c.json({ + success: true, + message: `Killed ${killed} processes out of ${processes.length} total. Container will reset on next request.`, + }); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : 'Unknown error'; + return c.json({ error: errorMessage }, 500); + } +}); + // Mount admin API routes under /admin api.route('/admin', adminApi); diff --git a/src/routes/debug.ts b/src/routes/debug.ts index 8ffc05bfb..9a8942100 100644 --- a/src/routes/debug.ts +++ b/src/routes/debug.ts @@ -354,6 +354,67 @@ debug.get('/env', async (c) => { }); }); +// GET /debug/disk - Show disk usage +debug.get('/disk', async (c) => { + const sandbox = c.get('sandbox'); + + try { + const proc = await sandbox.startProcess( + 'df -h / && echo "---" && du -sh /root/.openclaw /root/clawd /data/moltbot 2>/dev/null || true' + ); + + let attempts = 0; + while (attempts < 20) { + await new Promise(r => setTimeout(r, 500)); + if (proc.status !== 'running') break; + attempts++; + } + + const logs = await proc.getLogs(); + return c.json({ + output: logs.stdout || '', + errors: logs.stderr || '', + }); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : 'Unknown error'; + return c.json({ error: errorMessage }, 500); + } +}); + +// POST /debug/gc - Trigger garbage collection (cleanup old data) +debug.post('/gc', async (c) => { + const sandbox = c.get('sandbox'); + + try { + // Clean up old log files, tmp files, and stale session locks + const cleanupCmd = ` + find /root -name "*.log" -mtime +7 -delete 2>/dev/null || true; + find /tmp -type f -mtime +1 -delete 2>/dev/null || true; + find /root/.openclaw -name "*.lock" -mmin +30 -delete 2>/dev/null || true; + echo "Cleanup complete" + `; + + const proc = await sandbox.startProcess(cleanupCmd); + + let attempts = 0; + while (attempts < 60) { + await new Promise(r => setTimeout(r, 500)); + if (proc.status !== 'running') break; + attempts++; + } + + const logs = await proc.getLogs(); + return c.json({ + success: logs.stdout?.includes('Cleanup complete') || false, + output: logs.stdout || '', + errors: logs.stderr || '', + }); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : 'Unknown error'; + return c.json({ error: errorMessage }, 500); + } +}); + // GET /debug/container-config - Read the moltbot config from inside the container debug.get('/container-config', async (c) => { const sandbox = c.get('sandbox'); diff --git a/src/routes/public.ts b/src/routes/public.ts index c2f769c7d..1f4835ef9 100644 --- a/src/routes/public.ts +++ b/src/routes/public.ts @@ -2,6 +2,7 @@ import { Hono } from 'hono'; import type { AppEnv } from '../types'; import { MOLTBOT_PORT } from '../config'; import { findExistingMoltbotProcess } from '../gateway'; +import { waitForProcess, runCommand } from '../gateway/utils'; /** * Public routes - NO Cloudflare Access authentication required @@ -57,6 +58,123 @@ publicRoutes.get('/api/status', async (c) => { } }); +// GET /api/liveness - Detailed health check with timing +publicRoutes.get('/api/liveness', async (c) => { + const sandbox = c.get('sandbox'); + const startTime = Date.now(); + + const health: { + timestamp: string; + totalLatency: number; + healthy: boolean; + checks: { + gateway: { status: string; latency: number }; + r2: { status: string; latency: number }; + memory?: { usage: string; latency: number }; + crons?: { status: string; registered: string[]; missing: string[]; latency: number }; + uptime?: { seconds: number; latency: number }; + lastSync?: { timestamp: string | null; latency: number }; + }; + } = { + timestamp: new Date().toISOString(), + totalLatency: 0, + healthy: false, + checks: { + gateway: { status: 'unknown', latency: 0 }, + r2: { status: 'unknown', latency: 0 }, + }, + }; + + // Check gateway + const gwStart = Date.now(); + try { + const process = await findExistingMoltbotProcess(sandbox); + if (process) { + await process.waitForPort(MOLTBOT_PORT, { mode: 'tcp', timeout: 5000 }); + health.checks.gateway.status = 'healthy'; + } else { + health.checks.gateway.status = 'not_running'; + } + } catch { + health.checks.gateway.status = 'unhealthy'; + } + health.checks.gateway.latency = Date.now() - gwStart; + + // Check R2 (rclone) configuration + const r2Start = Date.now(); + try { + const proc = await sandbox.startProcess(`test -f /tmp/.rclone-configured && echo "configured"`); + await waitForProcess(proc, 5000); + const logs = await proc.getLogs(); + health.checks.r2.status = logs.stdout?.includes('configured') ? 'configured' : 'not_configured'; + } catch { + health.checks.r2.status = 'error'; + } + health.checks.r2.latency = Date.now() - r2Start; + + // Check memory usage + const memStart = Date.now(); + try { + const proc = await sandbox.startProcess('free -h | grep Mem | awk \'{print $3 "/" $2}\''); + await waitForProcess(proc, 5000); + const logs = await proc.getLogs(); + health.checks.memory = { + usage: logs.stdout?.trim() || 'unknown', + latency: Date.now() - memStart, + }; + } catch { + health.checks.memory = { usage: 'error', latency: Date.now() - memStart }; + } + + // Check cron jobs + const cronStart = Date.now(); + try { + const tokenFlag = c.env.MOLTBOT_GATEWAY_TOKEN ? `--token ${c.env.MOLTBOT_GATEWAY_TOKEN}` : ''; + const result = await runCommand(sandbox, `openclaw cron list ${tokenFlag} 2>/dev/null || echo ""`, 10000); + const output = result.stdout; + const expected = ['auto-study', 'brain-memory', 'self-reflect']; + const registered = expected.filter(name => output.includes(name)); + const missing = expected.filter(name => !output.includes(name)); + health.checks.crons = { + status: missing.length === 0 ? 'all_registered' : 'partial', + registered, + missing, + latency: Date.now() - cronStart, + }; + } catch { + health.checks.crons = { status: 'error', registered: [], missing: [], latency: Date.now() - cronStart }; + } + + // Check container uptime + const uptimeStart = Date.now(); + try { + const result = await runCommand(sandbox, 'cat /proc/uptime 2>/dev/null | cut -d" " -f1', 5000); + health.checks.uptime = { + seconds: parseFloat(result.stdout.trim()) || 0, + latency: Date.now() - uptimeStart, + }; + } catch { + health.checks.uptime = { seconds: 0, latency: Date.now() - uptimeStart }; + } + + // Check last R2 sync time + const syncStart = Date.now(); + try { + const result = await runCommand(sandbox, `cat /tmp/.last-sync 2>/dev/null || echo ""`, 5000); + health.checks.lastSync = { + timestamp: result.stdout.trim() || null, + latency: Date.now() - syncStart, + }; + } catch { + health.checks.lastSync = { timestamp: null, latency: Date.now() - syncStart }; + } + + health.totalLatency = Date.now() - startTime; + health.healthy = health.checks.gateway.status === 'healthy'; + + return c.json(health, health.healthy ? 200 : 503); +}); + // GET /_admin/assets/* - Admin UI static assets (CSS, JS need to load for login redirect) // Assets are built to dist/client with base "/_admin/" publicRoutes.get('/_admin/assets/*', async (c) => { diff --git a/src/types.ts b/src/types.ts index a85d32da3..119ed862b 100644 --- a/src/types.ts +++ b/src/types.ts @@ -42,6 +42,25 @@ export interface MoltbotEnv { BROWSER?: Fetcher; CDP_SECRET?: string; // Shared secret for CDP endpoint authentication WORKER_URL?: string; // Public URL of the worker (for CDP endpoint) + BRAVE_API_KEY?: string; // Brave Search API key for web search + SERPER_API_KEY?: string; // Serper (Google Search) API key for web research + CLAUDE_ACCESS_TOKEN?: string; // Claude Max OAuth access token + CLAUDE_REFRESH_TOKEN?: string; // Claude Max OAuth refresh token + GITHUB_REPO_URL?: string; // GitHub repo URL to clone on startup + GITHUB_TOKEN?: string; // GitHub personal access token for private repos + GITHUB_PAT?: string; // GitHub personal access token (fallback for GITHUB_TOKEN) + TELEGRAM_OWNER_ID?: string; // Telegram user ID to auto-allowlist on startup + // Google Calendar OAuth 2.0 credentials + GOOGLE_CLIENT_ID?: string; + GOOGLE_CLIENT_SECRET?: string; + GOOGLE_REFRESH_TOKEN?: string; + GOOGLE_CALENDAR_ID?: string; // Calendar ID (defaults to 'primary' in skill script) + // Node host device identity for pre-seeded pairing (workaround for openclaw#4833) + NODE_DEVICE_ID?: string; // Device ID from node's ~/.openclaw/identity/device.json + NODE_DEVICE_PUBLIC_KEY?: string; // Base64url-encoded public key from device.json + NODE_DEVICE_DISPLAY_NAME?: string; // Display name for the node (default: "Node Host") + GITHUB_COPILOT_TOKEN?: string; // GitHub Copilot OAuth token (ghu_...) for OpenClaw model auth + GOOGLE_AI_API_KEY?: string; // Google AI API key for embeddings (memory_search) } /** diff --git a/start-openclaw.sh b/start-openclaw.sh index c862a80ce..1a547dd83 100644 --- a/start-openclaw.sh +++ b/start-openclaw.sh @@ -1,13 +1,18 @@ #!/bin/bash -# Startup script for OpenClaw in Cloudflare Sandbox -# This script: -# 1. Restores config/workspace/skills from R2 via rclone (if configured) -# 2. Runs openclaw onboard --non-interactive to configure from env vars -# 3. Patches config for features onboard doesn't cover (channels, gateway auth) -# 4. Starts a background sync loop (rclone, watches for file changes) -# 5. Starts the gateway +# OpenClaw Startup Script - merged upstream rclone + custom crons/auth +# Based on upstream start-openclaw.sh with custom additions: +# - GitHub repo clone (clawd-memory) with PAT auth +# - GitHub Copilot model auth (GITHUB_TOKEN from GITHUB_COPILOT_TOKEN) +# - Google AI embeddings (GEMINI_API_KEY from GOOGLE_AI_API_KEY) +# - Git credential helper for workspace push +# - Cron restoration (restore-crons.js + auto-study/brain-memory/self-reflect) +# - Device pairing auto-approve loop +# - Gateway restart loop (crash recovery) +# - Calendar instructions injection +# - Telegram owner allowlist set -e +trap 'echo "[ERROR] Script failed at line $LINENO: $BASH_COMMAND" >&2' ERR if pgrep -f "openclaw gateway" > /dev/null 2>&1; then echo "OpenClaw gateway is already running, exiting." @@ -21,12 +26,24 @@ SKILLS_DIR="/root/clawd/skills" RCLONE_CONF="/root/.config/rclone/rclone.conf" LAST_SYNC_FILE="/tmp/.last-sync" -echo "Config directory: $CONFIG_DIR" +# Port check using Node.js (nc/netcat not installed) +port_open() { + node -e "require('net').createConnection({port:$2,host:'$1',timeout:2000}).on('connect',function(){process.exit(0)}).on('error',function(){process.exit(1)})" 2>/dev/null +} + +echo "============================================" +echo "Starting OpenClaw (rclone + custom crons)" +echo "============================================" + +# Export OPENCLAW_GATEWAY_TOKEN from legacy env var +if [ -n "${CLAWDBOT_GATEWAY_TOKEN:-}" ]; then + export OPENCLAW_GATEWAY_TOKEN="$CLAWDBOT_GATEWAY_TOKEN" +fi mkdir -p "$CONFIG_DIR" # ============================================================ -# RCLONE SETUP +# RCLONE SETUP (from upstream) # ============================================================ r2_configured() { @@ -61,7 +78,6 @@ if r2_configured; then setup_rclone echo "Checking R2 for existing backup..." - # Check if R2 has an openclaw config backup if rclone ls "r2:${R2_BUCKET}/openclaw/openclaw.json" $RCLONE_FLAGS 2>/dev/null | grep -q openclaw.json; then echo "Restoring config from R2..." rclone copy "r2:${R2_BUCKET}/openclaw/" "$CONFIG_DIR/" $RCLONE_FLAGS -v 2>&1 || echo "WARNING: config restore failed with exit code $?" @@ -98,6 +114,90 @@ else echo "R2 not configured, starting fresh" fi +# ============================================================ +# GITHUB REPO CLONE (custom: clone clawd-memory repo) +# ============================================================ + +CLONE_DIR="" +if [ -n "$GITHUB_REPO_URL" ]; then + REPO_NAME=$(basename "$GITHUB_REPO_URL" .git) + CLONE_DIR="/root/clawd/$REPO_NAME" + + # Support private repos via GITHUB_PAT (GITHUB_TOKEN will be overwritten by Copilot token later) + EFFECTIVE_GITHUB_TOKEN="" + if [ -n "${GITHUB_PAT:-}" ]; then + EFFECTIVE_GITHUB_TOKEN="$GITHUB_PAT" + elif [ -n "${GITHUB_TOKEN:-}" ]; then + EFFECTIVE_GITHUB_TOKEN="$GITHUB_TOKEN" + fi + + if [ -n "$EFFECTIVE_GITHUB_TOKEN" ]; then + CLONE_URL=$(echo "$GITHUB_REPO_URL" | sed "s|https://github.com/|https://${EFFECTIVE_GITHUB_TOKEN}@github.com/|") + else + echo "[WARN] Neither GITHUB_PAT nor GITHUB_TOKEN is set. Private repos will fail to clone." + CLONE_URL="$GITHUB_REPO_URL" + fi + + if [ -d "$CLONE_DIR/.git" ]; then + echo "Repository already exists at $CLONE_DIR, updating remote and pulling latest..." + git -C "$CLONE_DIR" remote set-url origin "$CLONE_URL" + git -C "$CLONE_DIR" pull --ff-only || echo "[WARN] git pull failed, continuing with existing version" + else + echo "Cloning $GITHUB_REPO_URL into $CLONE_DIR..." + git clone "$CLONE_URL" "$CLONE_DIR" || echo "[WARN] git clone failed, continuing without repo" + fi + echo "GitHub repo clone completed" +fi + +# Symlink repo contents into workspace +if [ -n "$CLONE_DIR" ] && [ -d "$CLONE_DIR" ]; then + for item in "$CLONE_DIR"/*; do + name=$(basename "$item") + [ "$name" = ".git" ] && continue + [ "$name" = "README.md" ] && continue + if [ -d "$item" ]; then + ln -sfn "$item" "/root/clawd/$name" + else + ln -sf "$item" "/root/clawd/$name" + fi + echo "Symlinked $name -> $item" + done + echo "All repo contents symlinked to workspace" +fi + +# Symlink skills-level bootstrap files into workspace root +for bootstrap in HOT-MEMORY.md CLAUDE.md; do + if [ -f "/root/clawd/skills/$bootstrap" ] && [ ! -f "/root/clawd/$bootstrap" ]; then + ln -sf "/root/clawd/skills/$bootstrap" "/root/clawd/$bootstrap" + echo "Symlinked $bootstrap -> skills/$bootstrap" + fi +done + +# Symlink TOOLS.md from moltworker to workspace root (for agent communication instructions) +if [ -f "/root/clawd/moltworker/TOOLS.md" ] && [ ! -f "/root/clawd/TOOLS.md" ]; then + ln -sf "/root/clawd/moltworker/TOOLS.md" "/root/clawd/TOOLS.md" + echo "Symlinked TOOLS.md -> moltworker/TOOLS.md" +fi + +# Inject Google Calendar instructions into TOOLS.md +if [ -f "/root/clawd/TOOLS.md" ]; then + cp -L "/root/clawd/TOOLS.md" "/root/clawd/TOOLS.md.real" + cat >> "/root/clawd/TOOLS.md.real" << 'CALEOF' + +## Google Calendar (๊ตฌ๊ธ€ ์บ˜๋ฆฐ๋”) +- ์ผ์ • ํ™•์ธํ•  ๋•Œ: `read` tool๋กœ `/root/clawd/warm-memory/calendar.md` ํŒŒ์ผ์„ ์ฝ์–ด๋ผ. ์ด ํŒŒ์ผ์€ ์ž๋™ ๋™๊ธฐํ™”๋จ. +- ์ผ์ • ์ƒ์„ฑ: `exec` tool๋กœ `node /root/clawd/skills/google-calendar/scripts/calendar.js create --title "์ œ๋ชฉ" --start "YYYY-MM-DDTHH:MM" --end "YYYY-MM-DDTHH:MM" --attendees "email1,email2"` ์‹คํ–‰ +- ๋‹ค๋ฅธ ์‚ฌ๋žŒ ์ผ์ • ํ™•์ธ: `exec` tool๋กœ `node /root/clawd/skills/google-calendar/scripts/calendar.js freebusy --start "YYYY-MM-DDTHH:MM" --end "YYYY-MM-DDTHH:MM" --emails "email1,email2"` ์‹คํ–‰ +- ๋ฏธํŒ… ์žก๊ธฐ: ๋จผ์ € freebusy๋กœ ์ฐธ์„์ž ๊ฐ€๋Šฅ ์‹œ๊ฐ„ ํ™•์ธ โ†’ ๋นˆ ์‹œ๊ฐ„์— create๋กœ ๋ฏธํŒ… ์ƒ์„ฑ (--attendees ํฌํ•จ) +- ์ผ์ • ๊ฒ€์ƒ‰: `exec` tool๋กœ `node /root/clawd/skills/google-calendar/scripts/calendar.js search --query "๊ฒ€์ƒ‰์–ด"` ์‹คํ–‰ +- ์ผ์ • ์ˆ˜์ •: `exec` tool๋กœ `node /root/clawd/skills/google-calendar/scripts/calendar.js update --id EVENT_ID` ์‹คํ–‰ +- ์ผ์ • ์‚ญ์ œ: `exec` tool๋กœ `node /root/clawd/skills/google-calendar/scripts/calendar.js delete --id EVENT_ID` ์‹คํ–‰ +- ์บ˜๋ฆฐ๋” ๊ด€๋ จ ์š”์ฒญ์— memory_search ์‚ฌ์šฉํ•˜์ง€ ๋งˆ๋ผ. ์œ„ ๋ฐฉ๋ฒ•๋งŒ ์‚ฌ์šฉ. +CALEOF + mv "/root/clawd/TOOLS.md.real" "/root/clawd/TOOLS.md" + echo "Calendar instructions appended to TOOLS.md" +fi + # ============================================================ # ONBOARD (only if no config exists yet) # ============================================================ @@ -133,11 +233,6 @@ fi # ============================================================ # PATCH CONFIG (channels, gateway auth, trusted proxies) # ============================================================ -# openclaw onboard handles provider/model config, but we need to patch in: -# - Channel config (Telegram, Discord, Slack) -# - Gateway token auth -# - Trusted proxies for sandbox networking -# - Base URL override for legacy AI Gateway path node << 'EOFPATCH' const fs = require('fs'); @@ -157,10 +252,11 @@ config.channels = config.channels || {}; // Gateway configuration config.gateway.port = 18789; config.gateway.mode = 'local'; -config.gateway.trustedProxies = ['10.1.0.0']; +config.gateway.trustedProxies = ['10.0.0.0/8']; if (process.env.OPENCLAW_GATEWAY_TOKEN) { config.gateway.auth = config.gateway.auth || {}; + config.gateway.auth.mode = 'token'; config.gateway.auth.token = process.env.OPENCLAW_GATEWAY_TOKEN; } @@ -169,17 +265,23 @@ if (process.env.OPENCLAW_DEV_MODE === 'true') { config.gateway.controlUi.allowInsecureAuth = true; } -// Legacy AI Gateway base URL override: -// ANTHROPIC_BASE_URL is picked up natively by the Anthropic SDK, -// so we don't need to patch the provider config. Writing a provider -// entry without a models array breaks OpenClaw's config validation. - -// AI Gateway model override (CF_AI_GATEWAY_MODEL=provider/model-id) -// Adds a provider entry for any AI Gateway provider and sets it as default model. -// Examples: -// workers-ai/@cf/meta/llama-3.3-70b-instruct-fp8-fast -// openai/gpt-4o -// anthropic/claude-sonnet-4-5 +// Agent defaults +config.agents = config.agents || {}; +config.agents.defaults = config.agents.defaults || {}; +config.agents.defaults.workspace = '/root/clawd'; +config.agents.defaults.contextPruning = { mode: 'cache-ttl', ttl: '1h' }; +config.agents.defaults.compaction = { mode: 'safeguard' }; +config.agents.defaults.heartbeat = { every: '30m' }; +config.agents.defaults.maxConcurrent = 4; +config.agents.defaults.subagents = { maxConcurrent: 4 }; + +// Node browser auto config +if (process.env.NODE_DEVICE_ID) { + config.gateway.nodes = config.gateway.nodes || {}; + config.gateway.nodes.browser = { mode: 'auto', node: process.env.NODE_DEVICE_ID }; +} + +// AI Gateway model override if (process.env.CF_AI_GATEWAY_MODEL) { const raw = process.env.CF_AI_GATEWAY_MODEL; const slashIdx = raw.indexOf('/'); @@ -210,20 +312,16 @@ if (process.env.CF_AI_GATEWAY_MODEL) { api: api, models: [{ id: modelId, name: modelId, contextWindow: 131072, maxTokens: 8192 }], }; - config.agents = config.agents || {}; - config.agents.defaults = config.agents.defaults || {}; config.agents.defaults.model = { primary: providerName + '/' + modelId }; console.log('AI Gateway model override: provider=' + providerName + ' model=' + modelId + ' via ' + baseUrl); } else { - console.warn('CF_AI_GATEWAY_MODEL set but missing required config (account ID, gateway ID, or API key)'); + console.warn('CF_AI_GATEWAY_MODEL set but missing required config'); } } // Telegram configuration -// Overwrite entire channel object to drop stale keys from old R2 backups -// that would fail OpenClaw's strict config validation (see #47) if (process.env.TELEGRAM_BOT_TOKEN) { - const dmPolicy = process.env.TELEGRAM_DM_POLICY || 'pairing'; + const dmPolicy = process.env.TELEGRAM_DM_POLICY || 'allowlist'; config.channels.telegram = { botToken: process.env.TELEGRAM_BOT_TOKEN, enabled: true, @@ -237,7 +335,6 @@ if (process.env.TELEGRAM_BOT_TOKEN) { } // Discord configuration -// Discord uses a nested dm object: dm.policy, dm.allowFrom (per DiscordDmConfig) if (process.env.DISCORD_BOT_TOKEN) { const dmPolicy = process.env.DISCORD_DM_POLICY || 'pairing'; const dm = { policy: dmPolicy }; @@ -265,7 +362,100 @@ console.log('Configuration patched successfully'); EOFPATCH # ============================================================ -# BACKGROUND SYNC LOOP +# CUSTOM: Telegram owner allowlist +# ============================================================ +if [ -n "$TELEGRAM_OWNER_ID" ]; then + mkdir -p "$CONFIG_DIR/credentials" + cat > "$CONFIG_DIR/credentials/telegram-allowFrom.json" << EOFALLOW +{ + "version": 1, + "allowFrom": [ + "$TELEGRAM_OWNER_ID" + ] +} +EOFALLOW + echo "Telegram allowlist set for owner ID: $TELEGRAM_OWNER_ID" +fi + +# ============================================================ +# CUSTOM: Pre-seed device pairing (workaround for openclaw#4833) +# ============================================================ +if [ -n "${NODE_DEVICE_ID:-}" ] && [ -n "${NODE_DEVICE_PUBLIC_KEY:-}" ]; then + mkdir -p "$CONFIG_DIR/devices" + PAIRED_FILE="$CONFIG_DIR/devices/paired.json" + NOW_MS=$(date +%s)000 + + if [ -f "$PAIRED_FILE" ]; then + EXISTING=$(cat "$PAIRED_FILE") + else + EXISTING="{}" + fi + + echo "$EXISTING" | node -e " + let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{ + const paired=JSON.parse(d||'{}'); + paired['${NODE_DEVICE_ID}']={ + deviceId:'${NODE_DEVICE_ID}', + publicKey:'${NODE_DEVICE_PUBLIC_KEY}', + displayName:'${NODE_DEVICE_DISPLAY_NAME:-Node Host}', + platform:'darwin', + clientId:'node-host', + clientMode:'node', + role:'node', + roles:['node'], + scopes:[], + tokens:{node:{token:'${CLAWDBOT_GATEWAY_TOKEN:-}',role:'node',scopes:[],createdAtMs:${NOW_MS}}}, + createdAtMs:${NOW_MS}, + approvedAtMs:${NOW_MS} + }; + process.stdout.write(JSON.stringify(paired,null,2)); + });" > "${PAIRED_FILE}.tmp" && mv "${PAIRED_FILE}.tmp" "$PAIRED_FILE" + echo "[PAIRING] Pre-seeded device pairing for node: ${NODE_DEVICE_ID:0:16}..." +else + echo "[PAIRING] NODE_DEVICE_ID or NODE_DEVICE_PUBLIC_KEY not set, skipping pre-seed" +fi + +# ============================================================ +# CUSTOM: Model & auth configuration +# ============================================================ + +# Set model (after config is written) +openclaw models set github-copilot/gpt-5-mini 2>/dev/null || true +echo "Models set: github-copilot/gpt-5-mini" + +# GitHub Copilot auth: export GITHUB_TOKEN so OpenClaw's github-copilot provider picks it up +if [ -n "${GITHUB_COPILOT_TOKEN:-}" ]; then + export GITHUB_TOKEN="$GITHUB_COPILOT_TOKEN" + echo "GitHub Copilot auth: GITHUB_TOKEN exported from GITHUB_COPILOT_TOKEN" +fi + +# Google AI API key for embeddings (memory_search semantic search) +if [ -n "${GOOGLE_AI_API_KEY:-}" ]; then + export GEMINI_API_KEY="$GOOGLE_AI_API_KEY" + echo "Google AI auth: GEMINI_API_KEY exported for embeddings" +fi + +# Git credential helper: use GITHUB_PAT for all github.com push/pull operations +if [ -n "${GITHUB_PAT:-}" ]; then + cat > /usr/local/bin/git-credential-pat << CREDEOF +#!/bin/sh +echo "protocol=https" +echo "host=github.com" +echo "username=x-access-token" +echo "password=${GITHUB_PAT}" +CREDEOF + chmod +x /usr/local/bin/git-credential-pat + git config --global credential.helper "/usr/local/bin/git-credential-pat" + echo "Git credential helper configured (GITHUB_PAT for github.com)" +fi + +# Clean up stale lock files +find /root/.openclaw -name "*.lock" -delete 2>/dev/null || true +rm -f /tmp/openclaw-gateway.lock 2>/dev/null || true +rm -f "$CONFIG_DIR/gateway.lock" 2>/dev/null || true + +# ============================================================ +# BACKGROUND SYNC LOOP (from upstream, rclone-based) # ============================================================ if r2_configured; then echo "Starting background R2 sync loop..." @@ -310,20 +500,251 @@ if r2_configured; then fi # ============================================================ -# START GATEWAY +# CUSTOM: Cron restoration (background, after gateway is ready) # ============================================================ -echo "Starting OpenClaw Gateway..." -echo "Gateway will be available on port 18789" +( + CRON_SCRIPT="/root/clawd/clawd-memory/scripts/restore-crons.js" + STUDY_SCRIPT="/root/clawd/skills/web-researcher/scripts/study-session.js" + BRAIN_SCRIPT="/root/clawd/skills/brain-memory/scripts/brain-memory-system.js" + REFLECT_SCRIPT="/root/clawd/skills/self-modify/scripts/reflect.js" + + # Helper: register a cron with retry (2 attempts) + register_cron() { + local label="$1"; shift + for attempt in 1 2; do + if openclaw cron add "$@" 2>&1; then + echo "[$label] Cron registered successfully" + return 0 + fi + echo "[$label] Attempt $attempt failed, retrying in 5s..." + sleep 5 + done + echo "[WARN] $label cron registration failed after 2 attempts" + return 1 + } + + # Wait for gateway to be ready + for i in $(seq 1 30); do + sleep 2 + if port_open 127.0.0.1 18789; then + sleep 3 + echo "[CRON] Gateway ready, starting cron restoration..." + + TOKEN_FLAG="" + # Use operator token from device-auth.json (device pairing auth) + OPERATOR_TOKEN=$(node -e "try{const d=JSON.parse(require('fs').readFileSync('/root/.openclaw/identity/device-auth.json','utf8'));console.log(d.tokens.operator.token)}catch(e){}" 2>/dev/null) + if [ -n "$OPERATOR_TOKEN" ]; then + TOKEN_FLAG="--token $OPERATOR_TOKEN" + elif [ -n "$CLAWDBOT_GATEWAY_TOKEN" ]; then + TOKEN_FLAG="--token $CLAWDBOT_GATEWAY_TOKEN" + fi + + ALLOWED_MODEL="github-copilot/gpt-5-mini" + + # 1. Restore base crons from clawd-memory repo (if available) + if [ -f "$CRON_SCRIPT" ]; then + echo "[CRON] Running restore-crons.js..." + node "$CRON_SCRIPT" 2>&1 || echo "[WARN] Cron restore script failed" + fi + + # 1b. Validate all cron models + echo "[CRON] Validating cron model IDs..." + CRON_JSON=$(openclaw cron list --json $TOKEN_FLAG 2>/dev/null || echo '{"jobs":[]}') + BAD_CRONS=$(echo "$CRON_JSON" | node -e " + let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{ + try{ + const allowed=['$ALLOWED_MODEL']; + const jobs=JSON.parse(d).jobs||[]; + jobs.forEach(j=>{ + const m=j.payload&&j.payload.model||''; + if(m&&!allowed.includes(m)){ + console.log(j.id+'|'+j.name+'|'+m); + } + }); + }catch(e){console.error(e.message);} + });" 2>/dev/null) + + if [ -n "$BAD_CRONS" ]; then + echo "[CRON] Found crons with disallowed models, fixing..." + echo "$BAD_CRONS" | while IFS='|' read -r cid cname cmodel; do + echo "[CRON] Fixing $cname (was: $cmodel -> $ALLOWED_MODEL)" + openclaw cron remove "$cid" $TOKEN_FLAG 2>/dev/null || true + done + else + echo "[CRON] All cron models are valid" + fi + + # 2. auto-study + if [ -n "$SERPER_API_KEY" ] && [ -f "$STUDY_SCRIPT" ]; then + if ! openclaw cron list $TOKEN_FLAG 2>/dev/null | grep -qF "auto-study "; then + register_cron "STUDY" \ + --name "auto-study" \ + --every "24h" \ + --session isolated \ + --model "$ALLOWED_MODEL" \ + --thinking off \ + $TOKEN_FLAG \ + --message "Run: node /root/clawd/skills/web-researcher/scripts/study-session.js --compact โ€” Summarize findings. Save notable items to warm memory via: node /root/clawd/skills/self-modify/scripts/modify.js --file warm-memory/TOPIC.md --content SUMMARY --keywords KEYWORDS --reason auto-study" + fi + fi + + # 3. brain-memory + if [ -f "$BRAIN_SCRIPT" ]; then + if ! openclaw cron list $TOKEN_FLAG 2>/dev/null | grep -qF "brain-memory "; then + register_cron "BRAIN" \ + --name "brain-memory" \ + --every "24h" \ + --session isolated \ + --model "$ALLOWED_MODEL" \ + --thinking off \ + $TOKEN_FLAG \ + --message "Run: node /root/clawd/skills/brain-memory/scripts/brain-memory-system.js --compact โ€” Analyze output. Save daily summary to /root/clawd/brain-memory/daily/YYYY-MM-DD.md (today's date, mkdir -p if needed). If owner prefs or active context changed, update HOT-MEMORY.md via: node /root/clawd/skills/self-modify/scripts/modify.js --file HOT-MEMORY.md --content NEW_CONTENT --reason daily-update" + fi + fi + + # 4. self-reflect + if [ -f "$REFLECT_SCRIPT" ]; then + if ! openclaw cron list $TOKEN_FLAG 2>/dev/null | grep -qF "self-reflect "; then + register_cron "REFLECT" \ + --name "self-reflect" \ + --every "168h" \ + --session isolated \ + --model "$ALLOWED_MODEL" \ + --thinking off \ + $TOKEN_FLAG \ + --message "Run: node /root/clawd/skills/self-modify/scripts/reflect.js โ€” Analyze this reflection report. Do ALL of the following: 1) Find non-obvious patterns and insights across daily summaries. Save key insights to warm memory via modify.js. 2) Prune warm-memory topics not accessed in 14+ days (archive key facts, remove file, update memory-index.json). 3) If HOT-MEMORY.md > 450 tokens, compress it via modify.js. 4) If study topics produce low-value results, consider adjusting via modify-cron.js. 5) Save a brief reflection to /root/clawd/brain-memory/reflections/YYYY-MM-DD.md" + fi + fi -rm -f /tmp/openclaw-gateway.lock 2>/dev/null || true -rm -f "$CONFIG_DIR/gateway.lock" 2>/dev/null || true + echo "[CRON] Cron restoration complete" + break + fi + done +) & +echo "Cron restore scheduled in background" + +# ============================================================ +# CUSTOM: Auto-approve device pairing (background) +# ============================================================ +( + for i in $(seq 1 60); do + sleep 3 + if port_open 127.0.0.1 18789; then + echo "[PAIRING] Gateway ready, starting auto-approve loop" + break + fi + done + + while true; do + devices_json=$(openclaw devices list --json --token "$CLAWDBOT_GATEWAY_TOKEN" --url ws://127.0.0.1:18789 --timeout 5000 2>/dev/null || true) + + if [ -n "$devices_json" ]; then + pending_ids=$(echo "$devices_json" | node -e " + let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{ + try{const j=JSON.parse(d);const p=j.pending||j.pendingRequests||j.requests||[]; + if(Array.isArray(p)){p.forEach(r=>{const id=r.requestId||r.id||'';if(id)console.log(id);})} + }catch(e){} + });" 2>/dev/null) + + if [ -n "$pending_ids" ]; then + echo "$pending_ids" | while IFS= read -r reqId; do + if [ -n "$reqId" ]; then + echo "[PAIRING] Auto-approving device pairing request: $reqId" + openclaw devices approve "$reqId" --token "$CLAWDBOT_GATEWAY_TOKEN" --url ws://127.0.0.1:18789 2>&1 || echo "[PAIRING] Approve failed for $reqId" + fi + done + fi + fi -echo "Dev mode: ${OPENCLAW_DEV_MODE:-false}" + sleep 10 + done +) & +echo "[PAIRING] Auto-approve loop started in background" -if [ -n "$OPENCLAW_GATEWAY_TOKEN" ]; then - echo "Starting gateway with token auth..." - exec openclaw gateway --port 18789 --verbose --allow-unconfigured --bind lan --token "$OPENCLAW_GATEWAY_TOKEN" -else - echo "Starting gateway with device pairing (no token)..." - exec openclaw gateway --port 18789 --verbose --allow-unconfigured --bind lan +# ============================================================ +# CUSTOM: Agent message bus watcher (background, every 30s) +# ============================================================ +MESSAGE_WATCHER="/root/clawd/moltworker/scripts/agent-comms/watch-messages.js" +if [ -f "$MESSAGE_WATCHER" ]; then + ( + for i in $(seq 1 60); do + sleep 3 + if port_open 127.0.0.1 18789; then + echo "[AGENT-COMMS] Gateway ready, starting message watcher loop" + break + fi + done + + while true; do + node "$MESSAGE_WATCHER" 2>&1 | head -20 || echo "[AGENT-COMMS] Watcher failed" + sleep 30 + done + ) & + echo "[AGENT-COMMS] Message watcher started in background (every 30s)" +fi + +# ============================================================ +# CUSTOM: Calendar sync (background, every 6h) +# ============================================================ +if [ -n "$GOOGLE_CLIENT_ID" ] && [ -n "$GOOGLE_REFRESH_TOKEN" ]; then + ( + while true; do + echo "[CALENDAR-SYNC] Syncing today's calendar events..." + node /root/clawd/skills/google-calendar/scripts/sync-today.js --days 1 2>&1 || echo "[CALENDAR-SYNC] sync failed" + sleep 21600 # 6 hours + done + ) & + echo "[CALENDAR-SYNC] Background sync started (every 6h)" fi + +# ============================================================ +# START GATEWAY (with restart loop for crash recovery) +# ============================================================ +echo "Starting OpenClaw Gateway..." + +set +e + +MAX_RETRIES=10 +RETRY_COUNT=0 +BACKOFF=5 +MAX_BACKOFF=120 +SUCCESS_THRESHOLD=60 + +while true; do + GATEWAY_START=$(date +%s) + echo "[GATEWAY] Starting openclaw gateway (attempt $((RETRY_COUNT + 1))/$MAX_RETRIES)..." + + if [ -n "$OPENCLAW_GATEWAY_TOKEN" ]; then + openclaw gateway --port 18789 --verbose --allow-unconfigured --bind lan --token "$OPENCLAW_GATEWAY_TOKEN" + else + openclaw gateway --port 18789 --verbose --allow-unconfigured --bind lan + fi + EXIT_CODE=$? + + GATEWAY_END=$(date +%s) + RUNTIME=$((GATEWAY_END - GATEWAY_START)) + + echo "[GATEWAY] Gateway exited with code $EXIT_CODE after ${RUNTIME}s" + + if [ "$RUNTIME" -ge "$SUCCESS_THRESHOLD" ]; then + echo "[GATEWAY] Ran ${RUNTIME}s (>= ${SUCCESS_THRESHOLD}s), resetting retry counter" + RETRY_COUNT=0 + BACKOFF=5 + else + RETRY_COUNT=$((RETRY_COUNT + 1)) + if [ "$RETRY_COUNT" -ge "$MAX_RETRIES" ]; then + echo "[GATEWAY] Max retries ($MAX_RETRIES) reached. Giving up." + break + fi + fi + + echo "[GATEWAY] Restarting in ${BACKOFF}s... (retry $RETRY_COUNT/$MAX_RETRIES)" + sleep "$BACKOFF" + + BACKOFF=$((BACKOFF * 2)) + if [ "$BACKOFF" -gt "$MAX_BACKOFF" ]; then + BACKOFF=$MAX_BACKOFF + fi +done + +echo "[GATEWAY] Gateway restart loop ended. Container will exit." diff --git a/wrangler.jsonc b/wrangler.jsonc index 7b2ce8d0b..89a945fa3 100644 --- a/wrangler.jsonc +++ b/wrangler.jsonc @@ -37,7 +37,7 @@ { "class_name": "Sandbox", "image": "./Dockerfile", - "instance_type": "standard-1", + "instance_type": "standard-4", "max_instances": 1, }, ], @@ -62,7 +62,12 @@ "bucket_name": "moltbot-data", }, ], - + // Cron trigger for health check every 10 minutes + "triggers": { + "crons": [ + "*/10 * * * *" + ], + }, // Browser Rendering binding for CDP shim "browser": { "binding": "BROWSER",