diff --git a/.github/.markdownlint.json b/.github/.markdownlint.json new file mode 100644 index 0000000..bc0a51f --- /dev/null +++ b/.github/.markdownlint.json @@ -0,0 +1,14 @@ +{ + "MD009": false, + "MD013": false, + "MD022": false, + "MD026": false, + "MD029": false, + "MD031": false, + "MD032": false, + "MD033": false, + "MD034": false, + "MD040": false, + "MD041": false, + "MD060": false +} diff --git a/.github/labeler.yml b/.github/labeler.yml new file mode 100644 index 0000000..82f6693 --- /dev/null +++ b/.github/labeler.yml @@ -0,0 +1,29 @@ +commands: + - changed-files: + - any-glob-to-any-file: "commands/**" + +skills: + - changed-files: + - any-glob-to-any-file: "skills/**" + +agents: + - changed-files: + - any-glob-to-any-file: "agents/**" + +docs: + - changed-files: + - any-glob-to-any-file: + - "README.md" + - "CLAUDE.md" + - "examples/**" + +config: + - changed-files: + - any-glob-to-any-file: + - ".claude-plugin/**" + - ".mcp.json" + - ".github/**" + +examples: + - changed-files: + - any-glob-to-any-file: "examples/**" diff --git a/.github/scripts/check-internal-links.py b/.github/scripts/check-internal-links.py new file mode 100755 index 0000000..102526e --- /dev/null +++ b/.github/scripts/check-internal-links.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +"""Check that internal markdown links resolve to existing files.""" + +import re +import sys +from pathlib import Path + +# Matches [text](path) and ![alt](path) — excludes URLs +LINK_RE = re.compile(r"!?\[([^\]]*)\]\(([^)]+)\)") + + +def check_file_links(file_path: Path, root: Path) -> list[str]: + errors = [] + text = file_path.read_text() + file_dir = file_path.parent + + for match in LINK_RE.finditer(text): + target = match.group(2) + + # Skip external URLs + if target.startswith(("http://", "https://", "mailto:")): + continue + + # Skip anchor-only links + if target.startswith("#"): + continue + + # Strip anchor fragments from file paths + target_path = target.split("#")[0] + if not target_path: + continue + + # Resolve relative to the file's directory + resolved = (file_dir / target_path).resolve() + if not resolved.exists(): + rel = file_path.relative_to(root) + errors.append(f"{rel}: Broken link '{target}' — file not found") + + return errors + + +def main(): + root = Path(__file__).resolve().parent.parent.parent + errors = [] + + for md_file in sorted(root.rglob("*.md")): + # Skip .git and node_modules + parts = md_file.relative_to(root).parts + if any(p.startswith(".git") or p == "node_modules" for p in parts): + continue + errors.extend(check_file_links(md_file, root)) + + if errors: + print("Link check failed:") + for e in errors: + print(f" ✗ {e}") + sys.exit(1) + else: + print("✓ Link check passed") + + +if __name__ == "__main__": + main() diff --git a/.github/scripts/validate-frontmatter.py b/.github/scripts/validate-frontmatter.py new file mode 100755 index 0000000..9c7c87f --- /dev/null +++ b/.github/scripts/validate-frontmatter.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python3 +"""Validate YAML frontmatter in commands, skills, and agents.""" + +import re +import sys +from pathlib import Path +from typing import Optional + +# PyYAML is not guaranteed on all runners, so parse simple YAML manually +FRONTMATTER_RE = re.compile(r"^---\s*\n(.*?)\n---", re.DOTALL) + +KNOWN_TOOLS = {"Bash", "Read", "Write", "Glob", "Grep", "mcp__postman__*"} + + +def parse_frontmatter(text: str) -> Optional[dict]: + match = FRONTMATTER_RE.match(text) + if not match: + return None + result = {} + for line in match.group(1).splitlines(): + line = line.strip() + if not line or line.startswith("#"): + continue + if ":" in line: + key, _, value = line.partition(":") + key = key.strip() + value = value.strip().strip('"').strip("'") + result[key] = value + return result + + +def validate_commands(root: Path) -> list[str]: + errors = [] + commands_dir = root / "commands" + if not commands_dir.is_dir(): + return [f"{commands_dir}: Directory not found"] + + for f in sorted(commands_dir.glob("*.md")): + text = f.read_text() + fm = parse_frontmatter(text) + if fm is None: + errors.append(f"{f.name}: Missing YAML frontmatter") + continue + if "description" not in fm or not fm["description"]: + errors.append(f"{f.name}: Missing required field 'description'") + if "allowed-tools" in fm and fm["allowed-tools"]: + tools = [t.strip() for t in fm["allowed-tools"].split(",")] + for tool in tools: + if tool not in KNOWN_TOOLS: + errors.append(f"{f.name}: Unknown tool '{tool}' in allowed-tools") + + return errors + + +def validate_skills(root: Path) -> list[str]: + errors = [] + skills_dir = root / "skills" + if not skills_dir.is_dir(): + return [f"{skills_dir}: Directory not found"] + + for skill_dir in sorted(skills_dir.iterdir()): + if not skill_dir.is_dir(): + continue + skill_file = skill_dir / "SKILL.md" + if not skill_file.exists(): + errors.append(f"skills/{skill_dir.name}/: Missing SKILL.md") + continue + text = skill_file.read_text() + fm = parse_frontmatter(text) + if fm is None: + errors.append(f"skills/{skill_dir.name}/SKILL.md: Missing YAML frontmatter") + continue + if "name" not in fm or not fm["name"]: + errors.append(f"skills/{skill_dir.name}/SKILL.md: Missing required field 'name'") + if "description" not in fm or not fm["description"]: + errors.append(f"skills/{skill_dir.name}/SKILL.md: Missing required field 'description'") + + return errors + + +def validate_agents(root: Path) -> list[str]: + errors = [] + agents_dir = root / "agents" + if not agents_dir.is_dir(): + return [f"{agents_dir}: Directory not found"] + + required_fields = ["name", "description", "model", "allowed-tools"] + + for f in sorted(agents_dir.glob("*.md")): + text = f.read_text() + fm = parse_frontmatter(text) + if fm is None: + errors.append(f"agents/{f.name}: Missing YAML frontmatter") + continue + for field in required_fields: + if field not in fm or not fm[field]: + errors.append(f"agents/{f.name}: Missing required field '{field}'") + if "allowed-tools" in fm and fm["allowed-tools"]: + tools = [t.strip() for t in fm["allowed-tools"].split(",")] + for tool in tools: + if tool not in KNOWN_TOOLS: + errors.append(f"agents/{f.name}: Unknown tool '{tool}' in allowed-tools") + + return errors + + +def main(): + root = Path(__file__).resolve().parent.parent.parent + errors = [] + + errors.extend(validate_commands(root)) + errors.extend(validate_skills(root)) + errors.extend(validate_agents(root)) + + if errors: + print("Frontmatter validation failed:") + for e in errors: + print(f" ✗ {e}") + sys.exit(1) + else: + print("✓ Frontmatter validation passed") + + +if __name__ == "__main__": + main() diff --git a/.github/scripts/validate-json.py b/.github/scripts/validate-json.py new file mode 100755 index 0000000..19870a5 --- /dev/null +++ b/.github/scripts/validate-json.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python3 +"""Validate JSON config files for the Claude Code plugin.""" + +import json +import sys +from pathlib import Path + + +def validate_plugin_json(path: Path) -> list[str]: + errors = [] + try: + with open(path) as f: + data = json.load(f) + except json.JSONDecodeError as e: + return [f"{path}: Invalid JSON — {e}"] + + required_fields = ["name", "version", "description"] + for field in required_fields: + if field not in data: + errors.append(f"{path}: Missing required field '{field}'") + elif not isinstance(data[field], str) or not data[field].strip(): + errors.append(f"{path}: Field '{field}' must be a non-empty string") + + if "version" in data and isinstance(data["version"], str): + parts = data["version"].split(".") + if len(parts) != 3 or not all(p.isdigit() for p in parts): + errors.append(f"{path}: Field 'version' must be semver (e.g. 1.0.0)") + + return errors + + +def validate_mcp_json(path: Path) -> list[str]: + errors = [] + try: + with open(path) as f: + data = json.load(f) + except json.JSONDecodeError as e: + return [f"{path}: Invalid JSON — {e}"] + + if "mcpServers" not in data: + errors.append(f"{path}: Missing required key 'mcpServers'") + elif not isinstance(data["mcpServers"], dict): + errors.append(f"{path}: 'mcpServers' must be an object") + else: + for name, config in data["mcpServers"].items(): + if "type" not in config: + errors.append(f"{path}: Server '{name}' missing 'type'") + if "url" not in config: + errors.append(f"{path}: Server '{name}' missing 'url'") + + return errors + + +def main(): + root = Path(__file__).resolve().parent.parent.parent + errors = [] + + plugin_json = root / ".claude-plugin" / "plugin.json" + if plugin_json.exists(): + errors.extend(validate_plugin_json(plugin_json)) + else: + errors.append(f"{plugin_json}: File not found") + + mcp_json = root / ".mcp.json" + if mcp_json.exists(): + errors.extend(validate_mcp_json(mcp_json)) + else: + errors.append(f"{mcp_json}: File not found") + + if errors: + print("JSON validation failed:") + for e in errors: + print(f" ✗ {e}") + sys.exit(1) + else: + print("✓ JSON validation passed") + + +if __name__ == "__main__": + main() diff --git a/.github/scripts/validate-structure.py b/.github/scripts/validate-structure.py new file mode 100755 index 0000000..f9e9e96 --- /dev/null +++ b/.github/scripts/validate-structure.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 +"""Validate plugin structure integrity — cross-reference components.""" + +import json +import re +import sys +from pathlib import Path + +FRONTMATTER_RE = re.compile(r"^---\s*\n(.*?)\n---", re.DOTALL) + + +def has_frontmatter(path: Path) -> bool: + text = path.read_text() + return FRONTMATTER_RE.match(text) is not None + + +def main(): + root = Path(__file__).resolve().parent.parent.parent + errors = [] + + # 1. plugin.json must exist and be valid + plugin_json = root / ".claude-plugin" / "plugin.json" + if not plugin_json.exists(): + errors.append(".claude-plugin/plugin.json: File not found") + else: + try: + json.loads(plugin_json.read_text()) + except json.JSONDecodeError: + errors.append(".claude-plugin/plugin.json: Invalid JSON") + + # 2. .mcp.json must exist + mcp_json = root / ".mcp.json" + if not mcp_json.exists(): + errors.append(".mcp.json: File not found") + + # 3. Every commands/*.md must have frontmatter + commands_dir = root / "commands" + if commands_dir.is_dir(): + command_files = sorted(commands_dir.glob("*.md")) + if not command_files: + errors.append("commands/: No command files found") + for f in command_files: + if not has_frontmatter(f): + errors.append(f"commands/{f.name}: Missing YAML frontmatter") + else: + errors.append("commands/: Directory not found") + + # 4. Every skills/*/ directory must have a SKILL.md + skills_dir = root / "skills" + if skills_dir.is_dir(): + skill_dirs = sorted([d for d in skills_dir.iterdir() if d.is_dir()]) + if not skill_dirs: + errors.append("skills/: No skill directories found") + for d in skill_dirs: + skill_file = d / "SKILL.md" + if not skill_file.exists(): + errors.append(f"skills/{d.name}/: Missing SKILL.md") + elif not has_frontmatter(skill_file): + errors.append(f"skills/{d.name}/SKILL.md: Missing YAML frontmatter") + else: + errors.append("skills/: Directory not found") + + # 5. Every agents/*.md must have frontmatter + agents_dir = root / "agents" + if agents_dir.is_dir(): + agent_files = sorted(agents_dir.glob("*.md")) + if not agent_files: + errors.append("agents/: No agent files found") + for f in agent_files: + if not has_frontmatter(f): + errors.append(f"agents/{f.name}: Missing YAML frontmatter") + else: + errors.append("agents/: Directory not found") + + # 6. Check for stray markdown files in root (not README, CLAUDE, LICENSE, or examples) + expected_root_md = {"README.md", "CLAUDE.md", "LICENSE"} + for f in sorted(root.glob("*.md")): + if f.name not in expected_root_md: + errors.append(f"{f.name}: Unexpected markdown file in repo root") + + if errors: + print("Structure validation failed:") + for e in errors: + print(f" ✗ {e}") + sys.exit(1) + else: + print("✓ Structure validation passed") + + +if __name__ == "__main__": + main() diff --git a/.github/workflows/plugin-checks.yml b/.github/workflows/plugin-checks.yml new file mode 100644 index 0000000..bd76f16 --- /dev/null +++ b/.github/workflows/plugin-checks.yml @@ -0,0 +1,59 @@ +name: Plugin Checks + +on: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + json-validation: + name: JSON Validation + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "3.12" + - run: python .github/scripts/validate-json.py + + frontmatter-validation: + name: Frontmatter Validation + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "3.12" + - run: python .github/scripts/validate-frontmatter.py + + markdown-lint: + name: Markdown Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: "20" + - run: npm install -g markdownlint-cli + - run: markdownlint "**/*.md" --config .github/.markdownlint.json + + link-check: + name: Link Check + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "3.12" + - run: python .github/scripts/check-internal-links.py + + structure-integrity: + name: Structure Integrity + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "3.12" + - run: python .github/scripts/validate-structure.py diff --git a/.github/workflows/pr-labeler.yml b/.github/workflows/pr-labeler.yml new file mode 100644 index 0000000..9accf59 --- /dev/null +++ b/.github/workflows/pr-labeler.yml @@ -0,0 +1,18 @@ +name: PR Labeler + +on: + pull_request: + types: [opened, synchronize, reopened] + +permissions: + contents: read + pull-requests: write + +jobs: + label: + name: Label PR + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@v5 + with: + configuration-path: .github/labeler.yml diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..969c30a --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,66 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## What This Is + +The Postman Plugin for Claude Code — a pure-markdown, configuration-driven plugin that provides full API lifecycle management via the Postman MCP Server. No compiled code, no runtime dependencies, no build step. + +## Repository Structure + +``` +.claude-plugin/plugin.json # Plugin manifest (name, version, metadata) +.mcp.json # MCP server auto-config (Postman MCP at mcp.postman.com) +commands/*.md # 11 slash commands (/postman:) +skills/*/SKILL.md # 7 skills (routing, knowledge, agent-ready APIs, CLI, send-request, generate-spec, run-collection) +agents/readiness-analyzer.md # Sub-agent for API readiness analysis +examples/ # Sample output (readiness report) +assets/ # GIFs for README +``` + +## How the Plugin Works + +- Claude Code discovers components via `.claude-plugin/plugin.json` manifest +- `.mcp.json` auto-configures the Postman MCP server, providing `mcp__postman__*` tools (111 tools) +- MCP commands use the cloud Postman MCP Server — requires `POSTMAN_API_KEY` environment variable +- CLI commands use the locally installed Postman CLI (`npm install -g postman-cli`) — requires `postman login` +- Plugin is loaded with `claude --plugin-dir /path/to/postman-claude-code-plugin` + +## Component Conventions + +**Commands** (`commands/*.md`): YAML front matter with `description` and `allowed-tools`. Each defines a structured workflow invoked as `/postman:`. +- MCP commands: setup, sync, codegen, search, test, mock, docs, security +- CLI commands: request, generate-spec, run-collection + +**Skills** (`skills/*/SKILL.md`): YAML front matter with `name`, `description`, `user-invocable`. Auto-injected context, not directly invoked. `postman-routing` routes requests to commands; `postman-knowledge` provides MCP tool guidance; `agent-ready-apis` provides readiness criteria; `postman-cli` provides CLI and git sync file structure knowledge. + +**Agent** (`agents/readiness-analyzer.md`): YAML front matter with `name`, `description`, `model`, `allowed-tools`. Runs as a sub-agent (sonnet model) for deep API readiness analysis (8 pillars, 48 checks). + +## Key MCP Limitations + +These are documented in `skills/postman-knowledge/mcp-limitations.md` and must be respected in all commands: + +- `searchPostmanElements` searches PUBLIC network only — use `getWorkspaces` + `getCollections` for private content +- `generateCollection` and `syncCollectionWithSpec` return HTTP 202 — must poll for completion +- `syncCollectionWithSpec` supports OpenAPI 3.0 only — use `updateSpecFile` + `generateCollection` for Swagger 2.0 or OpenAPI 3.1 +- `createCollection` creates flat collections — nest via `createCollectionFolder` + `createCollectionRequest` +- `createSpec` struggles with specs >50KB — decompose into collection items instead + +## Postman CLI Commands + +Three commands use the Postman CLI instead of MCP. They require `postman-cli` installed locally (`npm install -g postman-cli`) and authenticated (`postman login`). If CLI is not found, show install instructions and stop. + +- `/postman:request` — Send HTTP requests via `postman request ` +- `/postman:generate-spec` — Scan code for API routes, generate OpenAPI 3.0 YAML, validate with `postman spec lint` +- `/postman:run-collection` — Run collection tests via `postman collection run ` using cloud IDs from `.postman/resources.yaml` + +CLI commands work with Postman's git sync structure: `postman/collections/` (v3 folder format), `postman/environments/`, `postman/specs/`, and `.postman/resources.yaml` for cloud ID mapping. + +## Development Notes + +- There is no build, lint, or test suite — all "code" is instructional markdown +- Changes are purely editing markdown files with YAML front matter +- When adding a new command, follow the existing front matter pattern in `commands/` +- When adding a new skill, create `skills//SKILL.md` with proper front matter +- The `allowed-tools` field in front matter controls what tools a command/agent can use +- CLI commands need `Bash` in `allowed-tools`; MCP commands need `mcp__postman__*` diff --git a/commands/generate-spec.md b/commands/generate-spec.md new file mode 100644 index 0000000..032456b --- /dev/null +++ b/commands/generate-spec.md @@ -0,0 +1,55 @@ +--- +description: Generate or update an OpenAPI spec from your codebase +--- + +Generate or update an OpenAPI 3.0 specification by analyzing API routes in your codebase. + +## Step 1: Check for Existing Spec + +```bash +ls postman/specs/**/*.yaml postman/specs/**/*.yml postman/specs/**/*.json 2>/dev/null +ls openapi.yaml openapi.yml swagger.yaml swagger.yml 2>/dev/null +``` + +If a spec exists, read it to understand current state. You'll update it rather than replace it. + +## Step 2: Discover API Endpoints + +Scan the project for route definitions based on the framework: + +- **Express/Node**: `app.get()`, `router.post()`, `@Get()` (NestJS) +- **Python**: `@app.route()`, `@router.get()` (FastAPI), `path()` (Django) +- **Go**: `http.HandleFunc()`, `r.GET()` (Gin/Echo) +- **Java**: `@GetMapping`, `@PostMapping`, `@RequestMapping` +- **Ruby**: `get`, `post`, `resources` in routes.rb + +Read the route files and extract: methods, paths, parameters, request bodies, response schemas, auth requirements. + +## Step 3: Generate or Update the Spec + +Write a valid OpenAPI 3.0.3 YAML spec including: +- `info` with title, version, description +- `servers` with local dev URL +- `paths` with all discovered endpoints +- `components/schemas` with models derived from code (types, models, structs) +- `components/securitySchemes` if auth is used + +**When updating**: Add new endpoints, update changed ones, remove endpoints no longer in code. Preserve existing descriptions and examples. + +**When creating**: Write to `postman/specs/openapi.yaml`. + +## Step 4: Validate + +```bash +postman spec lint ./postman/specs/openapi.yaml +``` + +Fix any validation errors and re-run until clean. + +## Step 5: Report + +Show what was created or changed: +- Endpoints documented (count and list) +- Schemas defined +- Changes from previous spec (if updating) +- Validation result diff --git a/commands/run-collection.md b/commands/run-collection.md new file mode 100644 index 0000000..b344a1f --- /dev/null +++ b/commands/run-collection.md @@ -0,0 +1,55 @@ +--- +description: Run Postman collection tests using the CLI +--- + +Run Postman collection tests to verify your API endpoints. + +## Step 1: Find Collections and IDs + +List collection folders and look up their cloud IDs: + +```bash +ls postman/collections/ +cat .postman/resources.yaml +``` + +The `cloudResources.collections` section maps local collection paths to cloud IDs. + +If no collections found, tell the user and stop. +If one collection, use it directly. +If multiple, list them and ask which to run. + +## Step 2: Run the Collection + +Run by **collection ID** (from `.postman/resources.yaml`): + +```bash +postman collection run +``` + +Common options: +```bash +# Stop on first failure +postman collection run --bail + +# With request timeout +postman collection run --timeout-request 10000 + +# With environment +postman collection run -e ./postman/environments/.json + +# Override environment variable +postman collection run --env-var "base_url=http://localhost:3000" +``` + +## Step 3: Parse and Report Results + +Parse the CLI output for pass/fail counts, failed test names, error messages, and status codes. + +## Step 4: Handle Failures + +If tests fail: +1. Analyze error messages +2. Read relevant source code +3. Suggest fixes +4. After fixes, re-run to verify diff --git a/commands/send-request.md b/commands/send-request.md new file mode 100644 index 0000000..06e8c48 --- /dev/null +++ b/commands/send-request.md @@ -0,0 +1,33 @@ +--- +description: Send HTTP requests using Postman CLI +--- + +Send an HTTP request using the Postman CLI. Ask the user what URL and method they want, or detect it from context. + +## Step 1: Determine Request Details + +Ask the user for: +- URL to send the request to +- HTTP method (default: GET) +- Any headers, body, or auth needed + +If the user wants to send a request from a collection, find collection folders in `postman/collections/` and read the `*.request.yaml` files to extract method and URL. Collections use the v3 folder format. + +## Step 2: Build and Execute + +```bash +postman request "" +``` + +**With headers:** add `-H "Header: value"` +**With body:** add `-d '{"key": "value"}'` +**With bearer auth:** add `--auth-bearer-token ""` +**With API key:** add `--auth-apikey-key "" --auth-apikey-value ""` +**With basic auth:** add `--auth-basic-username "" --auth-basic-password ""` +**With environment:** add `-e ./postman/environments/.json` + +Always show the exact command before running it. + +## Step 3: Report Results + +Parse the response and report status code, response time, and body. Suggest fixes for errors (auth issues, connection problems, invalid URLs). diff --git a/skills/generate-spec/SKILL.md b/skills/generate-spec/SKILL.md new file mode 100644 index 0000000..74cb4c5 --- /dev/null +++ b/skills/generate-spec/SKILL.md @@ -0,0 +1,340 @@ +--- +name: generate-spec +description: Generate or update an OpenAPI specification from code - use when user says "generate spec", "create spec", "create openapi spec", "update spec", "generate API documentation", "create API definition", "write openapi", "document my API", "create swagger", or wants to create/update an API specification from their codebase +--- + +You are an API specification assistant that generates and updates OpenAPI 3.0 specifications by analyzing the user's codebase. + +## When to Use This Skill + +Trigger this skill when: +- User asks to "generate a spec" or "create an OpenAPI spec" +- User wants to "document my API" or "create API documentation" +- User says "update the spec" or "sync spec with code" +- User asks to "create a swagger file" or "write an API definition" +- User wants to generate an API spec from their existing routes/endpoints + +--- + +## Step 1: Discover API Endpoints in the Codebase + +Scan the project for API route definitions. Check common patterns by framework: + +**Express.js / Node.js:** +```bash +# Find route files +find . -type f \( -name "*.js" -o -name "*.ts" \) -not -path "*/node_modules/*" | head -30 +``` +Look for: `app.get()`, `app.post()`, `router.get()`, `router.post()`, `@Get()`, `@Post()` (NestJS) + +**Python (Flask/Django/FastAPI):** +```bash +find . -type f -name "*.py" -not -path "*/.venv/*" -not -path "*/venv/*" | head -30 +``` +Look for: `@app.route()`, `@router.get()`, `path()`, `url()`, `@app.get()` (FastAPI) + +**Go:** +```bash +find . -type f -name "*.go" -not -path "*/vendor/*" | head -30 +``` +Look for: `http.HandleFunc()`, `r.GET()`, `e.GET()` (Echo), `router.Handle()` + +**Java (Spring):** +```bash +find . -type f -name "*.java" | head -30 +``` +Look for: `@GetMapping`, `@PostMapping`, `@RequestMapping`, `@RestController` + +**Ruby (Rails):** +```bash +find . -type f -name "routes.rb" -o -name "*controller*.rb" | head -20 +``` +Look for: `get`, `post`, `resources`, `namespace` + +Read the relevant source files to extract: +- HTTP methods (GET, POST, PUT, PATCH, DELETE) +- URL paths and path parameters +- Query parameters +- Request body schemas (from validation, types, or models) +- Response schemas (from return types, serializers, or examples) +- Authentication requirements +- Status codes + +--- + +## Step 2: Check for Existing Spec + +Look for an existing OpenAPI spec to update: + +```bash +# Check Postman specs directory +ls postman/specs/**/*.yaml postman/specs/**/*.yml postman/specs/**/*.json 2>/dev/null + +# Check common root locations +ls openapi.yaml openapi.yml openapi.json swagger.yaml swagger.yml swagger.json api-spec.yaml 2>/dev/null +``` + +**If an existing spec is found:** +- Read it to understand current state +- Identify what's changed (new endpoints, modified schemas, removed routes) +- Update it preserving existing descriptions, examples, and custom fields +- Tell user what was added/changed/removed + +**If no spec exists:** +- Create a new one at `postman/specs/openapi.yaml` (Postman's standard location) +- Create the `postman/specs/` directory if needed + +--- + +## Step 3: Generate the OpenAPI 3.0 Spec + +Build a valid OpenAPI 3.0 specification in YAML format. Follow this structure: + +```yaml +openapi: 3.0.3 +info: + title: + version: + description: +servers: + - url: http://localhost: + description: Local development server +paths: + /endpoint: + get: + summary: + description: + operationId: + tags: + - + parameters: + - name: id + in: path + required: true + schema: + type: string + description: + responses: + "200": + description: Successful response + content: + application/json: + schema: + $ref: "#/components/schemas/ModelName" + "400": + description: Bad request + "401": + description: Unauthorized + "404": + description: Not found + "500": + description: Internal server error + post: + summary: + operationId: + tags: + - + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateModel" + responses: + "201": + description: Created successfully + content: + application/json: + schema: + $ref: "#/components/schemas/ModelName" +components: + schemas: + ModelName: + type: object + required: + - id + - name + properties: + id: + type: string + description: Unique identifier + name: + type: string + description: Display name + securitySchemes: + bearerAuth: + type: http + scheme: bearer + bearerFormat: JWT + apiKey: + type: apiKey + in: header + name: X-API-Key +``` + +### Key rules for generating the spec: + +1. **Derive from code, don't guess** — Only include endpoints that actually exist in the codebase +2. **Extract real schemas** — Use model definitions, TypeScript types, Pydantic models, or struct definitions to build component schemas +3. **Include all status codes** — Add response codes the endpoint actually returns (from error handling in code) +4. **Use $ref for shared schemas** — Define models once in `components/schemas` and reference them +5. **Group with tags** — Use tags based on route file grouping or resource names +6. **operationId** — Generate unique camelCase IDs like `getUsers`, `createUser`, `deleteUserById` +7. **Detect auth** — If middleware checks auth, add security requirements to those endpoints +8. **Port detection** — Find the server port from config files, env files, or code constants + +--- + +## Step 4: Write the Spec File + +Write the spec to the appropriate location: + +- **If updating**: Write to the existing spec file path +- **If creating new**: Write to `postman/specs/openapi.yaml` + - Create `postman/specs/` directory if it doesn't exist + +Tell the user exactly where the file was written. + +--- + +## Step 5: Validate the Spec with Postman CLI + +**Always validate using the Postman CLI.** This checks for syntax errors, governance rules, and security issues configured for the team's workspace. + +**Basic lint:** +```bash +postman spec lint ./postman/specs/openapi.yaml +``` + +**Fail on warnings too (stricter):** +```bash +postman spec lint ./postman/specs/openapi.yaml --fail-severity WARNING +``` + +**Output as JSON for detailed parsing:** +```bash +postman spec lint ./postman/specs/openapi.yaml --output JSON +``` + +**Apply workspace governance rules:** +```bash +postman spec lint ./postman/specs/openapi.yaml --workspace-id +``` + +If the workspace ID is available in `.postman/resources.yaml`, use it to apply the team's governance rules. + +**Fix-and-relint loop:** +1. Run `postman spec lint` +2. Parse the error/warning output (line numbers, severity, descriptions) +3. Fix every issue in the spec +4. Re-run `postman spec lint` until clean — no errors AND no warnings +5. Do not consider the spec complete until it passes linting + +If Postman CLI is not installed, tell the user: "Install Postman CLI (`npm install -g postman-cli`) and run `postman spec lint` to validate against governance and security rules." + +--- + +## Step 6: Report Results + +**New spec created:** +``` +Created OpenAPI 3.0 spec at postman/specs/openapi.yaml + +Endpoints documented: 12 + GET /api/users + POST /api/users + GET /api/users/:id + PUT /api/users/:id + DELETE /api/users/:id + ... + +Schemas defined: 5 + User, CreateUser, UpdateUser, ErrorResponse, PaginatedResponse + +Validation: ✓ No errors +``` + +**Existing spec updated:** +``` +Updated OpenAPI spec at postman/specs/openapi.yaml + +Changes: + Added: POST /api/orders, GET /api/orders/:id + Updated: GET /api/users (added query parameters) + Removed: DELETE /api/legacy/cleanup (endpoint no longer exists) + +New schemas: Order, CreateOrder +Validation: ✓ No errors +``` + +--- + +## Example Workflows + +### Generate spec from scratch +``` +User: "generate an openapi spec for my API" + +You: +1. Scan project for route definitions +2. Read route files and extract endpoints +3. Read models/types for schemas +4. Generate openapi.yaml +5. Validate with postman spec lint +6. Report: "Created spec with 12 endpoints and 5 schemas" +``` + +### Update spec after code changes +``` +User: "update the spec, I added new endpoints" + +You: +1. Read existing spec +2. Scan code for all current endpoints +3. Diff against existing spec +4. Add new endpoints, update changed ones +5. Validate +6. Report: "Added 2 new endpoints, updated 1" +``` + +### Generate spec for specific routes +``` +User: "create a spec for the user routes" + +You: +1. Find user-related route files +2. Extract only user endpoints +3. Generate focused spec +4. Validate and report +``` + +--- + +## Error Handling + +**No API routes found:** +"Could not find API route definitions in the codebase. What framework are you using? Point me to the files containing your route definitions." + +**Unsupported framework:** +"I couldn't auto-detect the framework. Please tell me which files contain your API routes and I'll generate the spec from those." + +**Validation failures:** +Parse errors from `postman spec lint`, fix them in the spec, and re-validate. + +**CLI not installed (for validation):** +"Spec created successfully. Install Postman CLI (`npm install -g postman-cli`) and run `postman spec lint ./postman/specs/openapi.yaml` to validate." + +--- + +## Important Notes + +- Always generate OpenAPI 3.0.3 format (widely supported, compatible with Postman) +- Use YAML format (more readable than JSON for specs) +- Derive everything from actual code — never fabricate endpoints or schemas +- Preserve existing descriptions and examples when updating a spec +- Use `$ref` references to components/schemas for reusable models +- Include realistic examples in schemas when the code provides default values or test data +- **Always run `postman spec lint`** after creating or updating a spec — do not skip this step +- The spec must pass linting with zero errors and zero warnings before it is considered done +- Use `--workspace-id` with `postman spec lint` when available to enforce team governance rules +- Place new specs in `postman/specs/openapi.yaml` to align with Postman's git sync structure diff --git a/skills/postman-cli/SKILL.md b/skills/postman-cli/SKILL.md new file mode 100644 index 0000000..7f482ab --- /dev/null +++ b/skills/postman-cli/SKILL.md @@ -0,0 +1,97 @@ +--- +name: postman-cli +description: Postman CLI reference and git sync file structure knowledge - provides context for CLI-based commands (send-request, generate-spec, run-collection) +--- + +Reference knowledge for the Postman CLI and git sync file structure. This skill provides context used by the CLI commands. + +## Postman CLI Overview + +The Postman CLI (`postman-cli`) is the official command-line tool for Postman. It runs collections, validates API specs, sends requests, and integrates with CI/CD pipelines. + +### Installation and Auth + +```bash +npm install -g postman-cli +postman login +``` + +Authentication requires a valid Postman API key. Run `postman login` and follow the prompts. + +### Core Commands + +| Command | Purpose | +|---------|---------| +| `postman collection run ` | Run collection tests by cloud ID | +| `postman request ` | Send an HTTP request | +| `postman spec lint ` | Validate an OpenAPI spec | +| `postman login` | Authenticate with Postman | + +--- + +## Git Sync File Structure + +When a Postman workspace is connected to a git repo, it syncs using this structure: + +``` +project-root/ +├── .postman/ +│ └── resources.yaml # Maps local paths → cloud IDs +├── postman/ +│ ├── collections/ +│ │ └── My API/ # Collection (v3 folder format) +│ │ ├── .resources/ +│ │ │ └── definition.yaml # schemaVersion: "3.0", name +│ │ ├── Get Users.request.yaml +│ │ ├── Create User.request.yaml +│ │ └── Auth/ # Subfolder +│ │ └── Login.request.yaml +│ ├── environments/ +│ │ └── dev.postman_environment.json +│ └── specs/ +│ └── openapi.yaml +``` + +### resources.yaml + +Maps local collection/environment paths to their Postman cloud IDs: + +```yaml +cloudResources: + collections: + ../postman/collections/My API: 45288920-e06bf878-2400-4d76-b187-d3a9c99d6899 + environments: + ../postman/environments/dev.postman_environment.json: 45288920-abc12345-... +``` + +### Collection v3 Folder Format + +Each collection is a **directory** (not a single JSON file). It contains: +- `.resources/definition.yaml` — collection metadata +- `*.request.yaml` — individual request files +- Subdirectories for folders within the collection + +Request files contain: +```yaml +$kind: http-request +url: https://api.example.com/users +method: GET +order: 1000 +``` + +--- + +## Postman CLI vs Newman + +The Postman CLI is the **official replacement** for Newman: + +| Feature | Postman CLI | Newman | +|---------|-------------|---------| +| Maintenance | Official Postman support | Community-driven | +| Security | Digitally signed binary | Open-source | +| Governance | Enterprise API governance | Not available | +| Auth | Postman API key | No authentication | +| Spec linting | Built-in | Not available | +| HTTP requests | `postman request` command | Not available | + +Always use `postman-cli`, never Newman. diff --git a/skills/postman-routing/SKILL.md b/skills/postman-routing/SKILL.md index 7e86d0c..bcd4f9d 100644 --- a/skills/postman-routing/SKILL.md +++ b/skills/postman-routing/SKILL.md @@ -21,6 +21,9 @@ When the user's request involves Postman or APIs, route to the appropriate comma | Generate docs, improve documentation, publish docs | `/postman:docs` | Analyzes completeness, fills gaps, can publish to Postman | | Security audit, check for vulnerabilities, OWASP | `/postman:security` | 20+ security checks with severity scoring and remediation | | Set up Postman, configure API key, first-time setup | `/postman:setup` | Guided setup with workspace verification | +| Send a request, test endpoint, hit the API, call URL | `/postman:send-request` | CLI-based HTTP requests with auth, headers, body support | +| Generate spec, create OpenAPI, document my API | `/postman:generate-spec` | Scans code for routes, generates OpenAPI YAML, validates with lint | +| Run collection tests, verify changes, check if tests pass | `/postman:run-collection` | Runs collection by cloud ID, parses results, suggests fixes | | Is my API agent-ready?, scan my API, analyze my spec | **readiness-analyzer agent** | 48 checks across 8 pillars, scoring and fix recommendations | ## Routing Rules diff --git a/skills/run-collection/SKILL.md b/skills/run-collection/SKILL.md new file mode 100644 index 0000000..dca7e73 --- /dev/null +++ b/skills/run-collection/SKILL.md @@ -0,0 +1,208 @@ +--- +name: run-collection +description: Run Postman collection tests using Postman CLI - use when user says "run tests", "run collection", "run my postman tests", "verify changes", "check if tests pass", or wants to execute API test suites after code changes +--- + +You are an API testing assistant that runs Postman collection tests using the Postman CLI. + +## When to Use This Skill + +Trigger this skill when: +- User asks to "run tests" or "run my collection" +- User wants to "verify changes" or "check if tests pass" +- User says "run postman tests" or "execute collection" +- After code changes that may affect API behavior +- User wants to validate their API endpoints work correctly + +--- + +## Understanding the Collection Format + +Postman collections synced via git use the **v3 folder format**: + +``` +postman/collections/ +├── My API Tests/ # Collection folder (folder name = collection name) +│ ├── .resources/ +│ │ └── definition.yaml # Collection metadata (schemaVersion: "3.0", name) +│ ├── Get Users.request.yaml # Individual request files +│ ├── Create User.request.yaml +│ └── Auth/ # Subfolder for grouped requests +│ └── Login.request.yaml +``` + +The `.postman/resources.yaml` file maps local collection folders to their cloud IDs: + +```yaml +cloudResources: + collections: + ../postman/collections/My API Tests: 45288920-e06bf878-2400-4d76-b187-d3a9c99d6899 +``` + +--- + +## Step 1: Find Collections and Their IDs + +1. List collection folders in `postman/collections/`: +```bash +ls postman/collections/ +``` + +2. Read `.postman/resources.yaml` to get the cloud ID for each collection: +```bash +cat .postman/resources.yaml +``` + +The `cloudResources.collections` section maps local paths to collection IDs. Match the collection folder name to get its ID. + +**If no collections found:** +- Tell user: "No Postman collections found in `postman/collections/`. Connect your repo to a Postman workspace to sync collections." +- Stop here + +**If no ID found in resources.yaml:** +- Tell user the collection exists locally but has no cloud ID mapped — they may need to sync with Postman + +**If one collection found:** +- Use it directly, tell user which collection you're running + +**If multiple collections found:** +- List them and ask user which one to run + +--- + +## Step 2: Run the Collection + +The Postman CLI runs collections by **collection ID**: + +```bash +postman collection run +``` + +For example: +```bash +postman collection run 45288920-e06bf878-2400-4d76-b187-d3a9c99d6899 +``` + +**With environment:** +```bash +postman collection run \ + -e ./postman/environments/.json +``` + +**With options:** +```bash +# Stop on first failure +postman collection run --bail + +# With request timeout +postman collection run --timeout-request 10000 + +# Override environment variables +postman collection run \ + --env-var "base_url=http://localhost:3000" + +# Run specific folder or request within collection +postman collection run -i +``` + +Always show the exact command being executed before running it. + +--- + +## Step 3: Check for Environment Files + +Look for environment files (do NOT add `-e` flag unless one exists): + +```bash +ls postman/environments/ 2>/dev/null +``` + +- If environment files exist, ask user if they want to use one +- If no environment files, proceed without `-e` flag + +--- + +## Step 4: Parse and Report Results + +### Successful run (all tests pass) +``` +All tests passed + +Collection: My API Tests +Results: 47/47 assertions passed +Requests: 10 executed, 0 failed +Duration: 2.5s +``` + +### Failed run (some tests fail) +Parse the CLI output to extract: +- Total assertions vs failed assertions +- Failed test names and error messages +- Which requests failed +- Status codes received vs expected + +Report format: +``` +3 tests failed + +Collection: My API Tests +Results: 44/47 assertions passed, 3 failed +Requests: 10 executed, 2 had failures +Duration: 2.5s + +Failures: +1. "Status code is 200" — POST /api/users + Expected 200, got 500 + +2. "Response has user ID" — POST /api/users + Property 'id' not found in response + +3. "Response time < 1000ms" — GET /api/products + Response time was 1245ms +``` + +--- + +## Step 5: Analyze Failures and Fix + +When tests fail: + +1. **Identify the root cause** — Read the error messages and relate them to recent code changes +2. **Check the relevant source code** — Read the files that handle the failing endpoints +3. **Suggest specific fixes** — Propose code changes to fix the failures +4. **Apply fixes** (with user approval) +5. **Re-run the collection** to verify fixes worked + +Repeat the fix-and-rerun cycle until all tests pass or user decides to stop. + +--- + +## Error Handling + +**CLI not installed:** +"Postman CLI is not installed. Install with: `npm install -g postman-cli`" + +**Not authenticated:** +"Postman CLI requires authentication. Run: `postman login`" + +**Collection not found:** +"Collection not found. Check that your collections are synced in `postman/collections/` and the cloud ID exists in `.postman/resources.yaml`." + +**Server not running:** +"Requests are failing with connection errors. Make sure your local server is running." + +**Timeout:** +"Requests are timing out. Check server performance or increase timeout with `--timeout-request`." + +--- + +## Important Notes + +- Collections use the **v3 folder format** — each collection is a directory, not a single JSON file +- Run collections by **ID** using `postman collection run ` +- Get the collection ID from `.postman/resources.yaml` under `cloudResources.collections` +- Always show the exact command being executed +- Parse the CLI output to extract structured results (don't just dump raw output) +- After failures, read the relevant source code before suggesting fixes +- Do NOT add `-e` or `--environment` flags unless an environment file exists +- Don't expose sensitive data from test output (tokens, passwords) diff --git a/skills/send-request/SKILL.md b/skills/send-request/SKILL.md new file mode 100644 index 0000000..52978f1 --- /dev/null +++ b/skills/send-request/SKILL.md @@ -0,0 +1,175 @@ +--- +name: send-request +description: Send HTTP requests using Postman CLI - use when user says "send request", "test endpoint", "call API", "hit the endpoint", "make a request", "try the API", or wants to quickly test an HTTP endpoint +--- + +You are an API testing assistant that helps send HTTP requests using the Postman CLI. + +## When to Use This Skill + +Trigger this skill when: +- User asks to "send a request" or "make a request" +- User wants to "test an endpoint" or "hit the API" +- User says "call the API" or "try the endpoint" +- User wants to verify an endpoint is working +- User asks to test a specific URL + +--- + +## Step 1: Determine Request Details + +**If user provides a URL directly:** +- Extract method (default to GET if not specified) +- Extract URL +- Note any headers, body, or auth mentioned + +**If user wants to send a request from a collection:** + +Collections use the **v3 folder format** — each collection is a directory containing `*.request.yaml` files: + +``` +postman/collections/ +├── My API/ +│ ├── .resources/ +│ │ └── definition.yaml # schemaVersion: "3.0", name +│ ├── Get Users.request.yaml # method: GET, url: https://... +│ ├── Create User.request.yaml +│ └── Auth/ +│ └── Login.request.yaml +``` + +Each `*.request.yaml` contains: +```yaml +$kind: http-request +url: https://api.example.com/users +method: GET +order: 1000 +``` + +To find requests from collections: +1. List collection folders in `postman/collections/` +2. Read the `*.request.yaml` files to find available requests +3. Extract `method` and `url` from the matching request file +4. Ask user which request to send (if multiple match) + +--- + +## Step 2: Build the Command + +**Basic request:** +```bash +postman request "" +``` + +**With headers** (repeatable): +```bash +postman request "" \ + -H "Header-Name: value" \ + -H "Another-Header: value" +``` + +**With body** (POST/PUT/PATCH): +```bash +# Inline JSON +postman request "" -d '{"key": "value"}' + +# From file +postman request "" -d @body.json +``` + +**With form data** (repeatable, supports file upload): +```bash +postman request "" \ + -f "field=value" \ + -f "file=@path/to/file.png" +``` + +**With authentication:** +```bash +# Bearer token +postman request "" --auth-bearer-token "" + +# API Key +postman request "" --auth-apikey-key "X-API-Key" --auth-apikey-value "" + +# Basic auth +postman request "" --auth-basic-username "" --auth-basic-password "" +``` + +**With environment:** +```bash +postman request "" \ + -e ./postman/environments/.postman_environment.json +``` + +**Additional options:** +```bash +# Retry on failure +postman request "" --retry 3 --retry-delay 1000 + +# Custom timeout (default 300000ms) +postman request "" --timeout 10000 + +# Save response to file +postman request "" -o response.json + +# Response body only (no metadata) +postman request "" --response-only + +# Verbose output (full request/response details) +postman request "" --verbose + +# Debug mode +postman request "" --debug + +# Redirect control +postman request "" --redirects-max 5 +postman request "" --redirects-ignore + +# Pre-request and post-response scripts +postman request "" --script-pre-request @pre.js --script-post-request @post.js +``` + +--- + +## Step 3: Execute the Request + +Run the command and capture output. Always show the exact command being executed. + +--- + +## Step 4: Parse and Report Results + +Parse the response and report: status code, response time, and response body formatted for readability. + +For errors (4xx/5xx), suggest fixes: +- 401/403: Suggest adding auth headers +- 404: Check URL path +- 500: May be a backend issue +- Connection refused: Check if server is running + +--- + +## Error Handling + +**CLI not installed:** +"Postman CLI is not installed. Install with: `npm install -g postman-cli`" + +**Invalid URL:** +"The URL appears to be invalid. Please provide a valid HTTP/HTTPS URL." + +**Connection refused:** +"Could not connect to the server. Check if the server is running and the URL is correct." + +**Timeout:** +"Request timed out. The server may be slow or unreachable." + +--- + +## Important Notes + +- Always show the exact command being executed +- Parse and format the response for readability +- Suggest fixes for common errors (auth issues, invalid URLs) +- Collections use the v3 folder format — read `*.request.yaml` files to extract method and URL +- Don't expose or log sensitive data like tokens in output