Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions INSTALLATION.md
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,23 @@ GITHUB_TOKEN=your_github_token_here # Optional for npx, required for web UI
- Select `repo` scope for private repositories
- Select `public_repo` for public repositories only

### Using non-Gemini providers

AsyncReview uses DSPy/LiteLLM model prefixes, so provider-specific environment
variables are picked up automatically when you choose a matching model.

```bash
# OpenAI
export OPENAI_API_KEY=your_openai_api_key
asyncreview review --url https://github.com/org/repo/pull/123 \
-q "Review this" --model openai/gpt-4o-mini

# Local Ollama
ollama serve
asyncreview review --url https://github.com/org/repo/pull/123 \
-q "Review this" --model ollama_chat/qwen3:4b
```

## Running AsyncReview Locally

### Option 1: Using the API Server + Web UI
Expand Down
21 changes: 19 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -64,13 +64,30 @@ npx skills add AsyncFuncAI/AsyncReview

### Public Repositories

For public repos, you only need a Gemini API key.
For public repos, you only need a Gemini API key by default.

```bash
export GEMINI_API_KEY="your-key"
npx asyncreview review --url https://github.com/org/repo/pull/123 -q "Review this"
```

### Using non-Gemini providers

AsyncReview passes model names directly to DSPy/LiteLLM, so you can use any
LiteLLM-compatible provider prefix. Gemini remains the default.

```bash
# OpenAI
export OPENAI_API_KEY="your-key"
npx asyncreview review --url https://github.com/org/repo/pull/123 \
-q "Review this" --model openai/gpt-4o-mini

# Local Ollama
ollama serve
npx asyncreview review --url https://github.com/org/repo/pull/123 \
-q "Review this" --model ollama_chat/qwen3:4b
```

### Private Repositories

For private repos, you also need a GitHub token.
Expand All @@ -92,7 +109,7 @@ For private repos, you also need a GitHub token.
## Configuration

**Required:**
- **Gemini API Key:** Get one from Google AI Studio. Set as `GEMINI_API_KEY`.
- **LLM API Key:** Gemini uses `GEMINI_API_KEY`; other LiteLLM providers use their own env vars, such as `OPENAI_API_KEY` or `ANTHROPIC_API_KEY`.

**Optional:**
- **GitHub Token:** Required for private repositories to access file contents. Set as `GITHUB_TOKEN`.
Expand Down
5 changes: 4 additions & 1 deletion cli/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,10 @@ def main():
"--model", "-m",
type=str,
default=None,
help="Model to use (e.g. gemini-3.0-pro-preview)",
help=(
"Model to use (e.g. gemini/gemini-3-pro-preview, "
"openai/gpt-4o, anthropic/claude-3-5-sonnet, ollama_chat/qwen3:4b)"
),
)

args = parser.parse_args()
Expand Down
17 changes: 10 additions & 7 deletions cli/virtual_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,13 @@
)


def _normalize_model_name(model_name: str) -> str:
"""Keep legacy bare Gemini names working while allowing LiteLLM prefixes."""
if model_name.startswith("gemini-"):
return f"gemini/{model_name}"
return model_name


class VirtualReviewRunner:
"""Run RLM code reviews on GitHub PRs without a local repository.

Expand All @@ -31,7 +38,7 @@ def __init__(
"""Initialize the virtual runner.

Args:
model: Override model (e.g. "gemini-3.0-pro-preview")
model: Override model (e.g. "gemini/gemini-3-pro-preview" or "openai/gpt-4o")
quiet: If True, suppress progress output
on_step: Optional callback for RLM step updates
"""
Expand Down Expand Up @@ -59,11 +66,7 @@ def _ensure_configured(self):
logging.getLogger(name).setLevel(logging.WARNING)

# Configure DSPy with specified model
model_name = self.model
if not model_name.startswith("gemini/"):
model_name = f"gemini/{model_name}"

dspy.configure(lm=dspy.LM(model_name))
dspy.configure(lm=dspy.LM(_normalize_model_name(self.model)))

# Create RLM with custom interpreter that has Deno 2.x fix
from dspy.primitives.python_interpreter import PythonInterpreter
Expand All @@ -76,7 +79,7 @@ def _ensure_configured(self):
signature="context, question -> answer, sources",
max_iterations=MAX_ITERATIONS,
max_llm_calls=MAX_LLM_CALLS,
sub_lm=dspy.LM(f"gemini/{SUB_MODEL}" if not SUB_MODEL.startswith("gemini/") else SUB_MODEL),
sub_lm=dspy.LM(_normalize_model_name(SUB_MODEL)),
verbose=not self.quiet,
interpreter=interpreter,
)
Expand Down
4 changes: 3 additions & 1 deletion cr/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@
load_dotenv()

# LLM Configuration
# LiteLLM reads provider-specific keys such as OPENAI_API_KEY, ANTHROPIC_API_KEY,
# GROQ_API_KEY, and OLLAMA_API_BASE directly based on the model prefix.
# GEMINI_API_KEY remains supported for the default Gemini models.
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY", "")
MAIN_MODEL = os.getenv("MAIN_MODEL", "gemini/gemini-3-pro-preview")
SUB_MODEL = os.getenv("SUB_MODEL", "gemini/gemini-3-flash-preview")
Expand Down Expand Up @@ -137,4 +140,3 @@
"test/**",
"spec/**",
]

5 changes: 4 additions & 1 deletion npx/python/cli/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,10 @@ def main():
"--model", "-m",
type=str,
default=None,
help="Model to use (e.g. gemini-3.0-pro-preview)",
help=(
"Model to use (e.g. gemini/gemini-3-pro-preview, "
"openai/gpt-4o, anthropic/claude-3-5-sonnet, ollama_chat/qwen3:4b)"
),
)
review_parser.add_argument(
"--submit",
Expand Down
18 changes: 10 additions & 8 deletions npx/python/cli/virtual_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,13 @@



def _normalize_model_name(model_name: str) -> str:
"""Keep legacy bare Gemini names working while allowing LiteLLM prefixes."""
if model_name.startswith("gemini-"):
return f"gemini/{model_name}"
return model_name


class VirtualReviewRunner:
"""Run RLM code reviews on GitHub PRs and local directories.

Expand All @@ -41,7 +48,7 @@ def __init__(
"""Initialize the virtual runner.

Args:
model: Override model (e.g. "gemini-3.0-pro-preview")
model: Override model (e.g. "gemini/gemini-3-pro-preview" or "openai/gpt-4o")
quiet: If True, suppress progress output
on_step: Optional callback for RLM step updates
"""
Expand Down Expand Up @@ -199,23 +206,18 @@ def _ensure_configured(self):
logging.getLogger(name).setLevel(logging.WARNING)

# Configure DSPy with specified model (cache=False to prevent disk caching)
model_name = self.model
if not model_name.startswith("gemini/"):
model_name = f"gemini/{model_name}"

self._lm = dspy.LM(model_name, cache=False)
self._lm = dspy.LM(_normalize_model_name(self.model), cache=False)

# Create RLM with custom interpreter that has Deno 2.x fix
deno_command = build_deno_command()
interpreter = PythonInterpreter(deno_command=deno_command)

# Standard signature
sub_model = f"gemini/{SUB_MODEL}" if not SUB_MODEL.startswith("gemini/") else SUB_MODEL
self._rlm = dspy.RLM(
signature="context, question -> answer, sources",
max_iterations=MAX_ITERATIONS,
max_llm_calls=MAX_LLM_CALLS,
sub_lm=dspy.LM(sub_model, cache=False),
sub_lm=dspy.LM(_normalize_model_name(SUB_MODEL), cache=False),
verbose=not self.quiet,
interpreter=interpreter,
tools=self._create_tool_functions(),
Expand Down
4 changes: 3 additions & 1 deletion npx/python/cr/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@
load_dotenv()

# LLM Configuration
# LiteLLM reads provider-specific keys such as OPENAI_API_KEY, ANTHROPIC_API_KEY,
# GROQ_API_KEY, and OLLAMA_API_BASE directly based on the model prefix.
# GEMINI_API_KEY remains supported for the default Gemini models.
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY", "")
MAIN_MODEL = os.getenv("MAIN_MODEL", "gemini/gemini-3-pro-preview")
SUB_MODEL = os.getenv("SUB_MODEL", "gemini/gemini-3-flash-preview")
Expand Down Expand Up @@ -137,4 +140,3 @@
"test/**",
"spec/**",
]

56 changes: 30 additions & 26 deletions npx/python/tests/test_e2e_virtual_runner.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
"""E2E tests for VirtualReviewRunner with real Gemini API and GitHub.
"""E2E tests for VirtualReviewRunner with real LLM API and GitHub.

These tests verify:
1. FETCH_FILE tool interception works across iterations
Expand All @@ -7,8 +7,8 @@
4. Multi-turn RLM conversations handle state correctly

Requirements:
- GEMINI_API_KEY environment variable must be set
- Internet connection for GitHub and Gemini API
- GEMINI_API_KEY, OPENAI_API_KEY, or ANTHROPIC_API_KEY environment variable must be set
- Internet connection for GitHub and the selected LLM provider
- Deno must be installed and in PATH
"""

Expand All @@ -17,19 +17,23 @@
import pytest
from cli.virtual_runner import VirtualReviewRunner

LLM_API_KEY_VARS = ("GEMINI_API_KEY", "OPENAI_API_KEY", "ANTHROPIC_API_KEY")
TEST_MODEL = os.getenv("ASYNCREVIEW_TEST_MODEL", "gemini-2.0-flash-exp")


# Require explicit API key for E2E tests
@pytest.fixture(scope="module")
def gemini_api_key():
"""Ensure GEMINI_API_KEY is set for E2E tests."""
key = os.getenv("GEMINI_API_KEY")
if not key:
pytest.skip("GEMINI_API_KEY not set, skipping E2E tests")
return key
def llm_api_key():
"""Ensure an LLM provider API key is set for E2E tests."""
for name in LLM_API_KEY_VARS:
key = os.getenv(name)
if key:
return key
pytest.skip("No LLM API key set, skipping E2E tests")
Comment on lines +26 to +32
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The llm_api_key fixture returns the first key found in LLM_API_KEY_VARS. However, TEST_MODEL defaults to a Gemini model. If a user has OPENAI_API_KEY set but not GEMINI_API_KEY, the test will attempt to run a Gemini model with an OpenAI key, leading to failure. The fixture should return the key that corresponds to the TEST_MODEL.

def llm_api_key():
    """Ensure the correct LLM provider API key is set for E2E tests based on TEST_MODEL."""
    provider = TEST_MODEL.split('/')[0] if '/' in TEST_MODEL else 'gemini'
    if TEST_MODEL.startswith('gemini-'):
        provider = 'gemini'
    
    env_var = f"{provider.upper()}_API_KEY"
    key = os.getenv(env_var)
    if key:
        return key
        
    pytest.skip(f"{env_var} not set, skipping E2E tests for {TEST_MODEL}")



@pytest.mark.asyncio
async def test_fetch_file_interception(gemini_api_key):
async def test_fetch_file_interception(llm_api_key):
"""Test that FETCH_FILE reliably populates repo_files across iterations.

This is the core test for the variable rebuild fix. It verifies that when
Expand All @@ -40,7 +44,7 @@ async def test_fetch_file_interception(gemini_api_key):
url = "https://github.com/stanfordnlp/dspy/pull/9240"
question = "What is in dspy/predict/rlm.py? Please fetch and analyze the complete contents of this file."

runner = VirtualReviewRunner(model="gemini-3-flash-preview", quiet=False)
runner = VirtualReviewRunner(model=TEST_MODEL, quiet=False)

# Intercept to verify state propagation
original_acall = None
Expand Down Expand Up @@ -81,12 +85,12 @@ async def intercepted_acall(*args, **kwargs):


@pytest.mark.asyncio
async def test_search_code_tool(gemini_api_key):
async def test_search_code_tool(llm_api_key):
"""Test SEARCH_CODE tool integration."""
url = "https://github.com/stanfordnlp/dspy/pull/9240"
question = "Use SEARCH_CODE to find all files related to 'DataFrame'. List the paths you find."

runner = VirtualReviewRunner(model="gemini-2.0-flash-exp", quiet=True)
runner = VirtualReviewRunner(model=TEST_MODEL, quiet=True)

answer, sources, metadata = await runner.review(url, question)

Expand All @@ -97,12 +101,12 @@ async def test_search_code_tool(gemini_api_key):


@pytest.mark.asyncio
async def test_list_directory_tool(gemini_api_key):
async def test_list_directory_tool(llm_api_key):
"""Test LIST_DIR tool integration."""
url = "https://github.com/stanfordnlp/dspy/pull/9240"
question = "Use LIST_DIR to list the contents of the 'dspy/predict/' directory."

runner = VirtualReviewRunner(model="gemini-2.0-flash-exp", quiet=True)
runner = VirtualReviewRunner(model=TEST_MODEL, quiet=True)

answer, sources, metadata = await runner.review(url, question)

Expand All @@ -113,13 +117,13 @@ async def test_list_directory_tool(gemini_api_key):


@pytest.mark.asyncio
async def test_multi_file_fetch(gemini_api_key):
async def test_multi_file_fetch(llm_api_key):
"""Test fetching multiple files in sequence."""
url = "https://github.com/stanfordnlp/dspy/pull/9240"
question = ("Find and fetch both dspy/predict/rlm.py and any test file related to RLM. "
"Compare their contents briefly.")

runner = VirtualReviewRunner(model="gemini-2.0-flash-exp", quiet=True)
runner = VirtualReviewRunner(model=TEST_MODEL, quiet=True)

answer, sources, metadata = await runner.review(url, question)

Expand All @@ -132,12 +136,12 @@ async def test_multi_file_fetch(gemini_api_key):


@pytest.mark.asyncio
async def test_error_handling_invalid_path(gemini_api_key):
async def test_error_handling_invalid_path(llm_api_key):
"""Test that invalid file paths are handled gracefully."""
url = "https://github.com/stanfordnlp/dspy/pull/9240"
question = "Try to fetch the file 'nonexistent/fake/path.py' and report what happens."

runner = VirtualReviewRunner(model="gemini-2.0-flash-exp", quiet=True)
runner = VirtualReviewRunner(model=TEST_MODEL, quiet=True)

# Should not raise, even with invalid path
answer, sources, metadata = await runner.review(url, question)
Expand All @@ -150,28 +154,28 @@ async def test_error_handling_invalid_path(gemini_api_key):


@pytest.mark.asyncio
async def test_issue_review(gemini_api_key):
async def test_issue_review(llm_api_key):
"""Test reviewing a GitHub issue (not just PRs)."""
# Use a known issue
url = "https://github.com/stanfordnlp/dspy/issues/100"
question = "Summarize what this issue is about."

runner = VirtualReviewRunner(model="gemini-2.0-flash-exp", quiet=True)
runner = VirtualReviewRunner(model=TEST_MODEL, quiet=True)

answer, sources, metadata = await runner.review(url, question)

assert answer, "Should return an answer for issues"
assert metadata.get("type") == "issue", "Should identify as issue type"


@pytest.mark.asyncio
async def test_context_preservation(gemini_api_key):
@pytest.mark.asyncio
async def test_context_preservation(llm_api_key):
"""Test that PR context (diff, description) is preserved alongside tool results."""
url = "https://github.com/stanfordnlp/dspy/pull/9240"
question = ("Based on the PR description and the actual code in dspy/predict/rlm.py, "
"explain how the DataFrame feature is implemented.")

runner = VirtualReviewRunner(model="gemini-2.0-flash-exp", quiet=True)
runner = VirtualReviewRunner(model=TEST_MODEL, quiet=True)

answer, sources, metadata = await runner.review(url, question)

Expand All @@ -187,9 +191,9 @@ async def test_context_preservation(gemini_api_key):
# Allow running tests directly with: python test_e2e_virtual_runner.py
import sys

api_key = os.getenv("GEMINI_API_KEY")
api_key = next((os.getenv(name) for name in LLM_API_KEY_VARS if os.getenv(name)), None)
if not api_key:
print("ERROR: GEMINI_API_KEY not set")
print("ERROR: no LLM API key set")
sys.exit(1)

print("Running E2E tests...")
Expand Down
Loading