Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
55 changes: 55 additions & 0 deletions .github/workflows/benchmarks.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
name: Benchmarks

on:
push:
branches:
- "main"
pull_request:
# `workflow_dispatch` allows CodSpeed to trigger backtest
# performance analysis in order to generate initial data.
workflow_dispatch:

permissions:
contents: read
id-token: write

concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true

jobs:
benchmarks:

runs-on: ubuntu-latest
timeout-minutes: 30

steps:
- uses: actions/checkout@v4

- name: Set up Python 3.13
uses: actions/setup-python@v5
with:
python-version: '3.13'

- name: Set up uv
uses: astral-sh/setup-uv@v7
with:
enable-cache: true

- name: Install dependencies
shell: bash
run: uv pip install --system -r requirements_dev.txt

- name: Install the library
shell: bash
run: uv pip install --system .

- name: Install pytest-codspeed
shell: bash
run: uv pip install --system pytest-codspeed

- name: Run benchmarks
uses: CodSpeedHQ/action@v4
with:
mode: simulation
run: pytest tests/benchmarks/test_benchmarks.py --codspeed
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -20,3 +20,4 @@ planning_features.md
coverage.xml
.qwen
uv.lock
.codspeed
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
[![Checked with mypy](http://www.mypy-lang.org/static/mypy_badge.svg)](http://mypy-lang.org/)
[![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff)
[![DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/mutating/suby)
[![CodSpeed](https://img.shields.io/endpoint?url=https://codspeed.io/badge.json)](https://codspeed.io/mutating/suby?utm_source=badge)

</details>

Expand Down
4 changes: 3 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"

[project]
name = "suby"
version = "0.0.5"
version = "0.0.6"
authors = [
{ name="Evgeniy Blinov", email="zheni-b@yandex.ru" },
]
Expand All @@ -14,6 +14,7 @@ requires-python = ">=3.8"
dependencies = [
'emptylog>=0.0.12',
'cantok>=0.0.36',
'microbenchmark>=0.0.2',
]
classifiers = [
"Operating System :: OS Independent",
Expand Down Expand Up @@ -58,6 +59,7 @@ source = ["suby"]

[tool.pytest.ini_options]
norecursedirs = ["build", "mutants"]
testpaths = ["tests/documentation", "tests/typing", "tests/units"]

[tool.ruff]
lint.ignore = ['E501', 'E712', 'PTH123', 'PTH118', 'PLR2004', 'PTH107', 'SIM105', 'SIM102', 'RET503', 'PLR0912', 'C901', 'E731', 'F821']
Expand Down
2 changes: 2 additions & 0 deletions requirements_dev.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
pytest==8.3.5
pytest-codspeed==2.2.1; python_version < '3.9'
pytest-codspeed==4.3.0; python_version >= '3.9'
pytest-xdist==3.6.1; python_version < '3.9'
pytest-xdist==3.8.0; python_version >= '3.9'
coverage==7.6.1
Expand Down
191 changes: 191 additions & 0 deletions suby/benchmarks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,191 @@
from __future__ import annotations

import sys
from pathlib import Path
from tempfile import TemporaryDirectory
from time import time_ns

from cantok import ConditionToken, SimpleToken
from microbenchmark import Scenario, a

from suby import run

ITERATIONS = 100
SHORT_ITERATIONS = 20
PYTHON = Path(sys.executable)


def run_with_delayed_condition_token_cancellation() -> None:
with TemporaryDirectory() as temporary_directory:
marker_file = Path(temporary_directory) / 'subprocess-started'
subprocess_started_at_ns = None

def should_cancel() -> bool:
nonlocal subprocess_started_at_ns

if not marker_file.exists():
return False
if subprocess_started_at_ns is None:
subprocess_started_at_ns = marker_file.stat().st_mtime_ns
return time_ns() - subprocess_started_at_ns >= 10_000_000

run(
PYTHON,
'-c',
(
'import sys\n'
'import time\n'
'from pathlib import Path\n'
'Path(sys.argv[1]).touch()\n'
'time.sleep(1)'
),
marker_file,
split=False,
token=ConditionToken(should_cancel),
catch_exceptions=True,
catch_output=True,
)


simple_success = Scenario(
run,
a(PYTHON, '-c', 'pass'),
name='simple_success',
doc='Runs a minimal successful Python subprocess.',
number=ITERATIONS,
)

python_version_output = Scenario(
run,
a(PYTHON, '-VV', catch_output=True),
name='python_version_output',
doc='Runs the current Python executable as a pathlib.Path and prints its detailed version.',
number=ITERATIONS,
)

string_executable = Scenario(
run,
a(sys.executable, '-c', 'pass'),
name='string_executable',
doc='Runs a minimal command where the executable is supplied as a string.',
number=ITERATIONS,
)

path_argument = Scenario(
run,
a(PYTHON, '-c "import sys; print(sys.argv[1])"', Path(__file__), catch_output=True),
name='path_argument',
doc='Runs a command with a pathlib.Path supplied as one of the subprocess arguments.',
number=ITERATIONS,
)

multi_line_stdout = Scenario(
run,
a(PYTHON, '-c "for i in range(10): print(i)"', catch_output=True),
name='multi_line_stdout',
doc='Runs a successful command that writes several short stdout lines.',
number=ITERATIONS,
)

large_stdout = Scenario(
run,
a(PYTHON, '-c "print(\'x\' * 10000)"', catch_output=True),
name='large_stdout',
doc='Runs a successful command that writes one larger stdout payload.',
number=ITERATIONS,
)

stderr_output = Scenario(
run,
a(PYTHON, '-c "import sys; sys.stderr.write(\'error line\\\\n\')"', catch_output=True),
name='stderr_output',
doc='Runs a successful command that writes to stderr.',
number=ITERATIONS,
)

mixed_stdout_stderr = Scenario(
run,
a(PYTHON, '-c "import sys; print(\'out\'); sys.stderr.write(\'err\\\\n\')"', catch_output=True),
name='mixed_stdout_stderr',
doc='Runs a successful command that writes to both stdout and stderr.',
number=ITERATIONS,
)

many_short_lines = Scenario(
run,
a(PYTHON, '-c "for i in range(1000): print(i)"', catch_output=True),
name='many_short_lines',
doc='Runs a command that emits many small stdout lines for stream-reading overhead.',
number=ITERATIONS,
)

moderate_python_work = Scenario(
run,
a(PYTHON, '-c "sum(range(100000))"'),
name='moderate_python_work',
doc='Runs a subprocess that performs a small amount of CPU work before exiting.',
number=ITERATIONS,
)

short_sleep = Scenario(
run,
a(PYTHON, '-c "import time; time.sleep(0.01)"'),
name='short_sleep',
doc='Runs a subprocess that stays alive briefly without producing output.',
number=SHORT_ITERATIONS,
)

simple_token_success = Scenario(
run,
a(PYTHON, '-c', 'pass', token=SimpleToken()),
name='simple_token_success',
doc='Runs a minimal subprocess while checking a non-cancelled SimpleToken.',
number=ITERATIONS,
)

condition_token_success = Scenario(
run,
a(PYTHON, '-c', 'pass', token=ConditionToken(lambda: False)),
name='condition_token_success',
doc='Runs a minimal subprocess while polling a ConditionToken that remains active.',
number=ITERATIONS,
)

cancelled_token_before_start = Scenario(
run,
a(
PYTHON,
'-c "import time; time.sleep(1)"',
token=SimpleToken().cancel(),
catch_exceptions=True,
catch_output=True,
),
name='cancelled_token_before_start',
doc='Runs a subprocess with an already-cancelled token and catches the cancellation result.',
number=SHORT_ITERATIONS,
)

condition_token_cancel_after_start = Scenario(
run_with_delayed_condition_token_cancellation,
name='condition_token_cancel_after_start',
doc='Starts a subprocess and cancels it with a ConditionToken shortly after the subprocess reports startup.',
number=SHORT_ITERATIONS,
)

all = ( # noqa: A001
simple_success
+ python_version_output
+ string_executable
+ path_argument
+ multi_line_stdout
+ large_stdout
+ stderr_output
+ mixed_stdout_stderr
+ many_short_lines
+ moderate_python_work
+ short_sleep
+ simple_token_success
+ condition_token_success
+ cancelled_token_before_start
+ condition_token_cancel_after_start
)
Empty file added tests/benchmarks/__init__.py
Empty file.
27 changes: 27 additions & 0 deletions tests/benchmarks/test_benchmarks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import pytest

from suby import benchmarks

SCENARIOS = [
benchmarks.simple_success,
benchmarks.python_version_output,
benchmarks.string_executable,
benchmarks.path_argument,
benchmarks.multi_line_stdout,
benchmarks.large_stdout,
benchmarks.stderr_output,
benchmarks.mixed_stdout_stderr,
benchmarks.many_short_lines,
benchmarks.moderate_python_work,
benchmarks.short_sleep,
benchmarks.simple_token_success,
benchmarks.condition_token_success,
benchmarks.cancelled_token_before_start,
benchmarks.condition_token_cancel_after_start,
]


@pytest.mark.benchmark
@pytest.mark.parametrize('scenario', SCENARIOS, ids=[scenario.name for scenario in SCENARIOS])
def test_benchmark_scenario(benchmark, scenario):
benchmark(scenario._call_once)
Loading
Loading