Skip to content

fixed python 3.9 tests #7

fixed python 3.9 tests

fixed python 3.9 tests #7

Workflow file for this run

name: test
on:
push:
branches: ["*"]
pull_request:
branches: ["main"]
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install uv
uses: astral-sh/setup-uv@v2
- name: Create an environment
run: uv sync
- name: Install dependencies
run: uv pip install -e ".[dev,test]"
- name: Run black
run: uv run black --check src/ tests/
- name: Run ruff
run: uv run ruff check src/ tests/
- name: Run mypy
run: uv run mypy src/
test:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
python-version: ["3.9", "3.10", "3.11", "3.12"]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install uv
uses: astral-sh/setup-uv@v2
- name: Set up Python ${{ matrix.python-version }}
run: uv python install ${{ matrix.python-version }}
- name: Create an environment
run: uv venv --python ${{ matrix.python-version }}
- name: Install dependencies
run: uv pip install -e ".[test]"
- name: Run pytest
run: uv run pytest tests/unit/ -v --cov --cov-report=xml --cov-report=term-missing
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v4
if: matrix.python-version == '3.12'
with:
file: ./coverage.xml
fail_ci_if_error: false
continue-on-error: true
integration-tests:
runs-on: ubuntu-latest
# Don't block PRs if integration tests fail (API may be down, rate limits, etc.)
continue-on-error: true
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install uv
uses: astral-sh/setup-uv@v2
- name: Set up Python
run: uv python install 3.12
- name: Create an environment
run: uv venv --python 3.12
- name: Install dependencies
run: uv pip install -e ".[test]"
- name: Run integration tests
env:
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
run: |
if [ -n "$GROQ_API_KEY" ]; then
uv run pytest tests/integration/ -m integration -v
else
echo "Skipping integration tests - GROQ_API_KEY not configured"
fi
performance:
runs-on: ubuntu-latest
# Run on main branch commits and PRs
if: github.event_name == 'push' || github.event_name == 'pull_request'
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
# Fetch enough history to compare with previous commit
fetch-depth: 2
- name: Install uv
uses: astral-sh/setup-uv@v2
- name: Set up Python
run: uv python install 3.12
- name: Create an environment
run: uv venv --python 3.12
- name: Install dependencies
run: uv pip install -e ".[test]"
- name: Create benchmarks directory
run: mkdir -p .benchmarks
- name: Run performance benchmarks
run: |
uv run pytest tests/performance/test_benchmarks.py \
--benchmark-only \
--benchmark-json=.benchmarks/current.json \
--benchmark-columns=min,max,mean,stddev,median,ops \
--benchmark-sort=name
- name: Download previous benchmark results
uses: actions/cache@v4
with:
path: .benchmarks/baseline.json
key: benchmark-baseline-${{ github.ref }}-${{ github.sha }}
restore-keys: |
benchmark-baseline-${{ github.ref }}-
benchmark-baseline-refs/heads/main-
- name: Compare with baseline
if: hashFiles('.benchmarks/baseline.json') != ''
run: |
uv run pytest tests/performance/test_benchmarks.py \
--benchmark-only \
--benchmark-compare=.benchmarks/baseline.json \
--benchmark-compare-fail=mean:20% || true
- name: Store benchmark results as baseline
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
run: |
cp .benchmarks/current.json .benchmarks/baseline.json
- name: Upload benchmark results
uses: actions/upload-artifact@v4
with:
name: benchmark-results
path: .benchmarks/current.json
retention-days: 90
- name: Comment PR with benchmark results
if: github.event_name == 'pull_request'
uses: actions/github-script@v7
continue-on-error: true
with:
script: |
const fs = require('fs');
const results = JSON.parse(fs.readFileSync('.benchmarks/current.json', 'utf8'));
let comment = '## 📊 Performance Benchmark Results\n\n';
comment += '| Benchmark | Mean | Ops/sec |\n';
comment += '|-----------|------|----------|\n';
results.benchmarks.forEach(b => {
const name = b.name.replace('test_benchmark_', '').replace('test_', '');
const mean = (b.stats.mean * 1000000).toFixed(2); // Convert to µs
const ops = (b.stats.ops / 1000).toFixed(2); // Convert to K ops/sec
comment += `| ${name} | ${mean} µs | ${ops}K |\n`;
});
comment += '\n*All benchmarks run with pytest-benchmark (100 iterations, 10 rounds)*';
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: comment
});