From f0dddaa16a1372aca83689615a473f947b5cf358 Mon Sep 17 00:00:00 2001 From: Arthur Pastel Date: Mon, 2 Feb 2026 15:22:03 +0000 Subject: [PATCH 1/5] feat: add asv-codspeed package for airspeed velocity support Add a new `asv-codspeed` workspace package that enables running ASV (airspeed velocity) benchmarks with CodSpeed instrumentation. The package discovers `time_*` benchmarks, runs them through the existing pytest-codspeed instruments (walltime and valgrind), and reports results in the same format so the CodSpeed platform treats them identically. Co-Authored-By: Claude Opus 4.5 --- asv-codspeed/pyproject.toml | 29 ++ asv-codspeed/src/asv_codspeed/__init__.py | 2 + asv-codspeed/src/asv_codspeed/__main__.py | 98 +++++ asv-codspeed/src/asv_codspeed/discovery.py | 347 +++++++++++++++++ asv-codspeed/src/asv_codspeed/runner.py | 197 ++++++++++ asv-codspeed/tests/__init__.py | 0 asv-codspeed/tests/conftest.py | 26 ++ .../tests/sample_benchmarks/bench_fibo.py | 11 + .../sample_benchmarks/bench_foo_bar_baz.py | 17 + .../tests/sample_benchmarks/bench_simple.py | 40 ++ asv-codspeed/tests/test_cli.py | 140 +++++++ asv-codspeed/tests/test_discovery.py | 219 +++++++++++ asv-codspeed/tests/test_runner.py | 355 ++++++++++++++++++ pyproject.toml | 9 +- uv.lock | 25 +- 15 files changed, 1512 insertions(+), 3 deletions(-) create mode 100644 asv-codspeed/pyproject.toml create mode 100644 asv-codspeed/src/asv_codspeed/__init__.py create mode 100644 asv-codspeed/src/asv_codspeed/__main__.py create mode 100644 asv-codspeed/src/asv_codspeed/discovery.py create mode 100644 asv-codspeed/src/asv_codspeed/runner.py create mode 100644 asv-codspeed/tests/__init__.py create mode 100644 asv-codspeed/tests/conftest.py create mode 100644 asv-codspeed/tests/sample_benchmarks/bench_fibo.py create mode 100644 asv-codspeed/tests/sample_benchmarks/bench_foo_bar_baz.py create mode 100644 asv-codspeed/tests/sample_benchmarks/bench_simple.py create mode 100644 asv-codspeed/tests/test_cli.py create mode 100644 asv-codspeed/tests/test_discovery.py create mode 100644 asv-codspeed/tests/test_runner.py diff --git a/asv-codspeed/pyproject.toml b/asv-codspeed/pyproject.toml new file mode 100644 index 0000000..dcd32f9 --- /dev/null +++ b/asv-codspeed/pyproject.toml @@ -0,0 +1,29 @@ +[project] +name = "asv-codspeed" +version = "0.1.0" +description = "Run ASV (airspeed velocity) benchmarks with CodSpeed instrumentation" +license = "MIT" +requires-python = ">=3.9" +authors = [{ name = "Arthur Pastel", email = "arthur@codspeed.io" }] +keywords = ["codspeed", "benchmark", "performance", "asv", "airspeed-velocity"] +classifiers = [ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3", + "Topic :: Software Development :: Testing", + "Topic :: System :: Benchmark", +] +dependencies = [ + "pytest-codspeed>=4.2.0", + "rich>=13.8.1", +] + +[project.scripts] +asv-codspeed = "asv_codspeed.__main__:main" + +[build-system] +requires = ["setuptools >= 61"] +build-backend = "setuptools.build_meta" + +[tool.setuptools.packages.find] +where = ["src"] diff --git a/asv-codspeed/src/asv_codspeed/__init__.py b/asv-codspeed/src/asv_codspeed/__init__.py new file mode 100644 index 0000000..bea1b07 --- /dev/null +++ b/asv-codspeed/src/asv_codspeed/__init__.py @@ -0,0 +1,2 @@ +__version__ = "0.1.0" +__semver_version__ = "0.1.0" diff --git a/asv-codspeed/src/asv_codspeed/__main__.py b/asv-codspeed/src/asv_codspeed/__main__.py new file mode 100644 index 0000000..7352f5b --- /dev/null +++ b/asv-codspeed/src/asv_codspeed/__main__.py @@ -0,0 +1,98 @@ +from __future__ import annotations + +import argparse +import sys +from pathlib import Path + +from asv_codspeed import __version__ +from asv_codspeed.runner import run_benchmarks + +from pytest_codspeed.instruments import MeasurementMode + + +def main(argv: list[str] | None = None) -> int: + parser = argparse.ArgumentParser( + prog="asv-codspeed", + description="Run ASV benchmarks with CodSpeed instrumentation", + ) + parser.add_argument( + "--version", + action="version", + version=f"asv-codspeed {__version__}", + ) + + subparsers = parser.add_subparsers(dest="command", required=True) + + # 'run' subcommand + run_parser = subparsers.add_parser("run", help="Run ASV benchmarks") + run_parser.add_argument( + "benchmark_dir", + type=Path, + nargs="?", + default=Path("benchmarks"), + help="Path to benchmark directory (default: benchmarks/)", + ) + run_parser.add_argument( + "--mode", + choices=[m.value for m in MeasurementMode], + default=None, + help="Measurement mode (default: walltime locally, simulation in CI)", + ) + run_parser.add_argument( + "--warmup-time", + type=float, + default=None, + help="Warmup time in seconds (walltime mode only)", + ) + run_parser.add_argument( + "--max-time", + type=float, + default=None, + help="Maximum benchmark time in seconds", + ) + run_parser.add_argument( + "--max-rounds", + type=int, + default=None, + help="Maximum number of benchmark rounds", + ) + run_parser.add_argument( + "--bench", + type=str, + default=None, + help="Regex pattern to filter benchmarks", + ) + + args = parser.parse_args(argv) + + if args.command == "run": + import os + + # Determine mode + if args.mode: + mode = MeasurementMode(args.mode) + elif os.environ.get("CODSPEED_ENV") is not None: + if os.environ.get("CODSPEED_RUNNER_MODE") == "walltime": + mode = MeasurementMode.WallTime + else: + mode = MeasurementMode.Simulation + else: + mode = MeasurementMode.WallTime + + profile_folder = os.environ.get("CODSPEED_PROFILE_FOLDER") + + return run_benchmarks( + benchmark_dir=args.benchmark_dir, + mode=mode, + warmup_time=args.warmup_time, + max_time=args.max_time, + max_rounds=args.max_rounds, + bench_filter=args.bench, + profile_folder=Path(profile_folder) if profile_folder else None, + ) + + return 1 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/asv-codspeed/src/asv_codspeed/discovery.py b/asv-codspeed/src/asv_codspeed/discovery.py new file mode 100644 index 0000000..96cb7e0 --- /dev/null +++ b/asv-codspeed/src/asv_codspeed/discovery.py @@ -0,0 +1,347 @@ +from __future__ import annotations + +import importlib +import importlib.util +import re +import sys +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any, Callable + + +# ASV benchmark name patterns +TIME_PATTERN = re.compile(r"^(Time[A-Z_].+)|(time_.+)$") +TRACK_PATTERN = re.compile(r"^(Track[A-Z_].+)|(track_.+)$") +MEM_PATTERN = re.compile(r"^(Mem[A-Z_].+)|(mem_.+)$") +PEAKMEM_PATTERN = re.compile(r"^(PeakMem[A-Z_].+)|(peakmem_.+)$") +TIMERAW_PATTERN = re.compile(r"^(Timeraw[A-Z_].+)|(timeraw_.+)$") + +# Method-level patterns (for class methods) +TIME_METHOD = re.compile(r"^time_") +TRACK_METHOD = re.compile(r"^track_") +MEM_METHOD = re.compile(r"^mem_") +PEAKMEM_METHOD = re.compile(r"^peakmem_") +TIMERAW_METHOD = re.compile(r"^timeraw_") + + +@dataclass +class ASVBenchmark: + """Represents a single ASV benchmark to run.""" + + name: str + func: Callable + type: str # "time", "track", "mem", "peakmem", "timeraw" + setup: Callable | None = None + teardown: Callable | None = None + params: list[list[Any]] = field(default_factory=list) + param_names: list[str] = field(default_factory=list) + current_param_values: tuple[Any, ...] = field(default_factory=tuple) + timeout: float = 60.0 + + +def _get_benchmark_type_from_name(name: str) -> str | None: + """Determine benchmark type from function/class name.""" + if TIMERAW_PATTERN.match(name): + return "timeraw" + if TIME_PATTERN.match(name): + return "time" + if TRACK_PATTERN.match(name): + return "track" + if MEM_PATTERN.match(name): + return "mem" + if PEAKMEM_PATTERN.match(name): + return "peakmem" + return None + + +def _get_method_type(method_name: str) -> str | None: + """Determine benchmark type from method name.""" + if TIMERAW_METHOD.match(method_name): + return "timeraw" + if TIME_METHOD.match(method_name): + return "time" + if TRACK_METHOD.match(method_name): + return "track" + if MEM_METHOD.match(method_name): + return "mem" + if PEAKMEM_METHOD.match(method_name): + return "peakmem" + return None + + +def _import_module_from_path(module_name: str, file_path: Path): + """Import a Python module from a file path.""" + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None or spec.loader is None: + return None + module = importlib.util.module_from_spec(spec) + sys.modules[module_name] = module + try: + spec.loader.exec_module(module) + except Exception: + del sys.modules[module_name] + return None + return module + + +def _discover_from_module( + module, module_name: str +) -> list[ASVBenchmark]: + """Discover benchmarks from a single module.""" + benchmarks = [] + + for attr_name in dir(module): + if attr_name.startswith("_"): + continue + + attr = getattr(module, attr_name, None) + if attr is None: + continue + + # Check if it's a benchmark class + if isinstance(attr, type): + btype = _get_benchmark_type_from_name(attr_name) + if btype is not None: + # Class-level benchmark: all methods matching the type + benchmarks.extend( + _discover_from_class(attr, f"{module_name}.{attr_name}", btype) + ) + else: + # Check individual methods + benchmarks.extend( + _discover_from_class(attr, f"{module_name}.{attr_name}", None) + ) + + # Check if it's a benchmark function + elif callable(attr): + btype = _get_method_type(attr_name) + if btype is not None: + bench = _create_benchmark( + name=f"{module_name}.{attr_name}", + func=attr, + btype=btype, + source=module, + ) + benchmarks.append(bench) + + return benchmarks + + +def _discover_from_class( + cls, class_full_name: str, class_btype: str | None +) -> list[ASVBenchmark]: + """Discover benchmarks from a class.""" + benchmarks = [] + instance = None + + # Get class-level params + params = getattr(cls, "params", []) + param_names = getattr(cls, "param_names", []) + timeout = getattr(cls, "timeout", 60.0) + + for attr_name in dir(cls): + if attr_name.startswith("_"): + continue + + btype = _get_method_type(attr_name) + if btype is None: + continue + + # Lazy-instantiate the class + if instance is None: + try: + instance = cls() + except Exception: + break + + method = getattr(instance, attr_name, None) + if method is None or not callable(method): + continue + + setup_method = getattr(instance, "setup", None) + teardown_method = getattr(instance, "teardown", None) + + bench_name = f"{class_full_name}.{attr_name}" + + if params: + # Expand parameterized benchmarks + param_combos = _expand_params(params) + for i, combo in enumerate(param_combos): + param_suffix = _format_param_suffix(combo, param_names) + benchmarks.append( + ASVBenchmark( + name=f"{bench_name}({param_suffix})", + func=method, + type=btype, + setup=setup_method, + teardown=teardown_method, + params=params if isinstance(params[0], list) else [params], + param_names=param_names, + current_param_values=combo, + timeout=timeout, + ) + ) + else: + benchmarks.append( + ASVBenchmark( + name=bench_name, + func=method, + type=btype, + setup=setup_method, + teardown=teardown_method, + timeout=timeout, + ) + ) + + return benchmarks + + +def _create_benchmark( + name: str, func: Callable, btype: str, source: Any +) -> ASVBenchmark: + """Create a benchmark from a function, inheriting attributes from its source.""" + setup = getattr(source, "setup", None) + teardown = getattr(source, "teardown", None) + params = getattr(func, "params", getattr(source, "params", [])) + param_names = getattr(func, "param_names", getattr(source, "param_names", [])) + timeout = getattr(func, "timeout", getattr(source, "timeout", 60.0)) + + if params: + # For now, create one benchmark per param combo + benchmarks = [] + param_combos = _expand_params(params) + if len(param_combos) == 1: + return ASVBenchmark( + name=f"{name}({_format_param_suffix(param_combos[0], param_names)})", + func=func, + type=btype, + setup=setup, + teardown=teardown, + params=params if isinstance(params[0], list) else [params], + param_names=param_names, + current_param_values=param_combos[0], + timeout=timeout, + ) + # Return first for now - parameterized will be expanded in discover_benchmarks + return ASVBenchmark( + name=name, + func=func, + type=btype, + setup=setup, + teardown=teardown, + params=params if isinstance(params[0], list) else [params], + param_names=param_names, + timeout=timeout, + ) + + return ASVBenchmark( + name=name, + func=func, + type=btype, + setup=setup, + teardown=teardown, + timeout=timeout, + ) + + +def _expand_params(params: list) -> list[tuple]: + """Expand parameter lists into all combinations.""" + if not params: + return [()] + + # If params is a list of lists, compute cartesian product + if isinstance(params[0], (list, tuple)): + import itertools + + return list(itertools.product(*params)) + else: + # Single parameter list + return [(p,) for p in params] + + +def _format_param_suffix(values: tuple, names: list[str]) -> str: + """Format parameter values into a suffix string.""" + parts = [] + for i, v in enumerate(values): + if i < len(names) and names[i]: + parts.append(f"{v}") + else: + parts.append(f"{v}") + return ", ".join(parts) + + +def _discover_benchmark_files(benchmark_dir: Path) -> list[Path]: + """Find all Python files in the benchmark directory.""" + files = [] + for py_file in sorted(benchmark_dir.rglob("*.py")): + if py_file.name.startswith("_"): + continue + files.append(py_file) + return files + + +def discover_benchmarks( + benchmark_dir: Path, + filter_pattern: str | None = None, +) -> list[ASVBenchmark]: + """Discover all ASV benchmarks in the given directory. + + Args: + benchmark_dir: Path to the benchmark directory + filter_pattern: Optional regex to filter benchmark names + + Returns: + List of discovered benchmarks + """ + if not benchmark_dir.exists(): + return [] + + # Add benchmark dir to sys.path for imports + benchmark_dir_str = str(benchmark_dir.resolve()) + if benchmark_dir_str not in sys.path: + sys.path.insert(0, benchmark_dir_str) + + benchmarks: list[ASVBenchmark] = [] + + for py_file in _discover_benchmark_files(benchmark_dir): + # Build module name from relative path + rel_path = py_file.relative_to(benchmark_dir) + module_parts = list(rel_path.parts) + module_parts[-1] = module_parts[-1].removesuffix(".py") + module_name = ".".join(module_parts) + + module = _import_module_from_path(module_name, py_file) + if module is None: + continue + + benchmarks.extend(_discover_from_module(module, module_name)) + + # Expand parameterized benchmarks that haven't been expanded yet + expanded = [] + for bench in benchmarks: + if bench.params and not bench.current_param_values: + param_combos = _expand_params(bench.params) + for combo in param_combos: + param_suffix = _format_param_suffix(combo, bench.param_names) + expanded.append( + ASVBenchmark( + name=f"{bench.name}({param_suffix})", + func=bench.func, + type=bench.type, + setup=bench.setup, + teardown=bench.teardown, + params=bench.params, + param_names=bench.param_names, + current_param_values=combo, + timeout=bench.timeout, + ) + ) + else: + expanded.append(bench) + + # Apply filter + if filter_pattern: + pattern = re.compile(filter_pattern) + expanded = [b for b in expanded if pattern.search(b.name)] + + return expanded diff --git a/asv-codspeed/src/asv_codspeed/runner.py b/asv-codspeed/src/asv_codspeed/runner.py new file mode 100644 index 0000000..ec0b89b --- /dev/null +++ b/asv-codspeed/src/asv_codspeed/runner.py @@ -0,0 +1,197 @@ +from __future__ import annotations + +import gc +import json +import os +import random +from pathlib import Path +from time import time +from typing import TYPE_CHECKING, Any + +from asv_codspeed.discovery import discover_benchmarks + +from pytest_codspeed import __semver_version__ as pytest_codspeed_version +from pytest_codspeed.instruments import MeasurementMode, get_instrument_from_mode +from pytest_codspeed.utils import get_git_relative_path + +if TYPE_CHECKING: + from asv_codspeed.discovery import ASVBenchmark + from pytest_codspeed.instruments import Instrument + + +def get_environment_metadata() -> dict[str, dict]: + """Report as pytest-codspeed so the CodSpeed platform treats results identically.""" + import importlib.metadata as importlib_metadata + import sysconfig + + return { + "creator": { + "name": "pytest-codspeed", + "version": pytest_codspeed_version, + "pid": os.getpid(), + }, + "python": { + "sysconfig": sysconfig.get_config_vars(), + "dependencies": { + d.name: d.version for d in importlib_metadata.distributions() + }, + }, + } + + +def _get_uri(benchmark_name: str, benchmark_dir: Path) -> str: + """Build a CodSpeed-compatible URI for a benchmark.""" + git_relative = get_git_relative_path(benchmark_dir.resolve()) + return f"{git_relative}::{benchmark_name}" + + +def run_benchmarks( + benchmark_dir: Path, + mode: MeasurementMode, + warmup_time: float | None = None, + max_time: float | None = None, + max_rounds: int | None = None, + bench_filter: str | None = None, + profile_folder: Path | None = None, +) -> int: + """Discover and run ASV benchmarks with CodSpeed instrumentation. + + Returns exit code (0 for success, 1 for failure). + """ + from rich.console import Console + + console = Console() + + # Discover benchmarks + benchmarks = discover_benchmarks(benchmark_dir, bench_filter) + if not benchmarks: + console.print("[yellow]No benchmarks found[/yellow]") + return 1 + + time_benchmarks = [b for b in benchmarks if b.type == "time"] + if not time_benchmarks: + console.print( + "[yellow]No time benchmarks found (only time_* supported)[/yellow]" + ) + return 1 + + console.print( + f"[bold]asv-codspeed[/bold]: {len(time_benchmarks)} time benchmark(s) found, " + f"mode: {mode.value}" + ) + + # Build CodSpeed config + from pytest_codspeed.config import BenchmarkMarkerOptions, CodSpeedConfig + + codspeed_config = CodSpeedConfig( + warmup_time_ns=( + int(warmup_time * 1_000_000_000) if warmup_time is not None else None + ), + max_time_ns=int(max_time * 1_000_000_000) if max_time is not None else None, + max_rounds=max_rounds, + ) + + # Create instrument + instrument_cls = get_instrument_from_mode(mode) + instrument = instrument_cls(codspeed_config) + config_str, warns = instrument.get_instrument_config_str_and_warns() + console.print(f" {config_str}") + for w in warns: + console.print(f" [yellow]{w}[/yellow]") + + # Run benchmarks + marker_options = BenchmarkMarkerOptions() + passed = 0 + failed = 0 + + for bench in time_benchmarks: + uri = _get_uri(bench.name, benchmark_dir) + name = bench.name + console.print(f" Running: {name}...", end=" ") + try: + _run_single_benchmark(instrument, marker_options, bench, name, uri) + console.print("[green]OK[/green]") + passed += 1 + except Exception as e: + console.print(f"[red]FAILED: {e}[/red]") + failed += 1 + + # Report results + _report(console, instrument, mode, passed, failed) + + # Save results + if profile_folder is None: + profile_folder_env = os.environ.get("CODSPEED_PROFILE_FOLDER") + if profile_folder_env: + profile_folder = Path(profile_folder_env) + + if profile_folder: + result_path = profile_folder / "results" / f"{os.getpid()}.json" + else: + result_path = benchmark_dir / f".codspeed/results_{time() * 1000:.0f}.json" + + data = {**get_environment_metadata(), **instrument.get_result_dict()} + result_path.parent.mkdir(parents=True, exist_ok=True) + result_path.write_text(json.dumps(data, indent=2)) + console.print(f"Results saved to: {result_path}") + + return 1 if failed > 0 else 0 + + +def _report( + console: Any, + instrument: Instrument, + mode: MeasurementMode, + passed: int, + failed: int, +) -> None: + """Print a summary report of benchmark results.""" + from pytest_codspeed.instruments.walltime import WallTimeInstrument + + if isinstance(instrument, WallTimeInstrument) and instrument.benchmarks: + instrument._print_benchmark_table() + + total = passed + failed + status = "passed" if failed == 0 else "with failures" + console.print(f"\n[bold]===== {total} benchmarked ({passed} passed, {failed} failed) =====[/bold]") + + +def _run_single_benchmark( + instrument: Instrument, + marker_options: Any, + bench: ASVBenchmark, + name: str, + uri: str, +) -> None: + """Run a single ASV benchmark through the CodSpeed instrument.""" + # Setup + if bench.setup is not None: + if bench.params and bench.current_param_values: + bench.setup(*bench.current_param_values) + else: + bench.setup() + + try: + random.seed(0) + is_gc_enabled = gc.isenabled() + if is_gc_enabled: + gc.collect() + gc.disable() + try: + fn = bench.func + if bench.params and bench.current_param_values: + args = bench.current_param_values + else: + args = () + + instrument.measure(marker_options, name, uri, fn, *args) + finally: + if is_gc_enabled: + gc.enable() + finally: + # Teardown + if bench.teardown is not None: + if bench.params and bench.current_param_values: + bench.teardown(*bench.current_param_values) + else: + bench.teardown() diff --git a/asv-codspeed/tests/__init__.py b/asv-codspeed/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/asv-codspeed/tests/conftest.py b/asv-codspeed/tests/conftest.py new file mode 100644 index 0000000..4c5cdc2 --- /dev/null +++ b/asv-codspeed/tests/conftest.py @@ -0,0 +1,26 @@ +from __future__ import annotations + +from pathlib import Path + +import pytest + +SAMPLE_BENCHMARKS_DIR = Path(__file__).parent / "sample_benchmarks" + + +@pytest.fixture +def sample_benchmarks_dir(): + return SAMPLE_BENCHMARKS_DIR + + +@pytest.fixture +def tmp_benchmarks(tmp_path): + """Create a temporary directory with benchmark files.""" + + def _create(files: dict[str, str]) -> Path: + bench_dir = tmp_path / "benchmarks" + bench_dir.mkdir() + for name, content in files.items(): + (bench_dir / name).write_text(content) + return bench_dir + + return _create diff --git a/asv-codspeed/tests/sample_benchmarks/bench_fibo.py b/asv-codspeed/tests/sample_benchmarks/bench_fibo.py new file mode 100644 index 0000000..9fffb3d --- /dev/null +++ b/asv-codspeed/tests/sample_benchmarks/bench_fibo.py @@ -0,0 +1,11 @@ +"""Recursive Fibonacci benchmark for asv-codspeed.""" + + +def _fibo(n): + if n <= 1: + return n + return _fibo(n - 1) + _fibo(n - 2) + + +def time_recursive_fibo_10(): + _fibo(10) diff --git a/asv-codspeed/tests/sample_benchmarks/bench_foo_bar_baz.py b/asv-codspeed/tests/sample_benchmarks/bench_foo_bar_baz.py new file mode 100644 index 0000000..5aad239 --- /dev/null +++ b/asv-codspeed/tests/sample_benchmarks/bench_foo_bar_baz.py @@ -0,0 +1,17 @@ +"""Foo / bar / baz benchmarks for asv-codspeed.""" + + +def time_foo(): + total = 0 + for i in range(500): + total += i + return total + + +def time_bar(): + data = list(range(200)) + return sorted(data, reverse=True) + + +def time_baz(): + return {k: k * k for k in range(300)} diff --git a/asv-codspeed/tests/sample_benchmarks/bench_simple.py b/asv-codspeed/tests/sample_benchmarks/bench_simple.py new file mode 100644 index 0000000..1fe8c4d --- /dev/null +++ b/asv-codspeed/tests/sample_benchmarks/bench_simple.py @@ -0,0 +1,40 @@ +"""Sample ASV benchmarks for testing asv-codspeed.""" + + +def time_sum(): + """Benchmark summing a list.""" + total = sum(range(1000)) + return total + + +def time_list_comprehension(): + """Benchmark list comprehension.""" + result = [i**2 for i in range(100)] + return result + + +class TimeSorting: + """Benchmark sorting operations.""" + + def setup(self): + self.data = list(range(1000, 0, -1)) + + def time_sort(self): + sorted(self.data) + + def time_reverse(self): + list(reversed(self.data)) + + +class TimeParameterized: + """Parameterized benchmarks.""" + + params = [10, 100, 1000] + param_names = ["n"] + + def time_range(self, n): + for i in range(n): + pass + + def time_sum(self, n): + sum(range(n)) diff --git a/asv-codspeed/tests/test_cli.py b/asv-codspeed/tests/test_cli.py new file mode 100644 index 0000000..5dfb6f4 --- /dev/null +++ b/asv-codspeed/tests/test_cli.py @@ -0,0 +1,140 @@ +"""Tests for the asv-codspeed CLI.""" +from __future__ import annotations + +import json +import subprocess +import sys +from pathlib import Path + +import pytest + + +def test_cli_version(): + """Test that --version works.""" + result = subprocess.run( + [sys.executable, "-m", "asv_codspeed", "--version"], + capture_output=True, + text=True, + ) + assert result.returncode == 0 + assert "asv-codspeed" in result.stdout + + +def test_cli_run_help(): + """Test that 'run --help' works.""" + result = subprocess.run( + [sys.executable, "-m", "asv_codspeed", "run", "--help"], + capture_output=True, + text=True, + ) + assert result.returncode == 0 + assert "benchmark_dir" in result.stdout + assert "--mode" in result.stdout + + +def test_cli_run_benchmarks(tmp_path): + """Test running benchmarks via CLI.""" + bench_dir = tmp_path / "benchmarks" + bench_dir.mkdir() + (bench_dir / "bench_test.py").write_text( + """ +def time_hello(): + return "hello" +""" + ) + + result = subprocess.run( + [ + sys.executable, + "-m", + "asv_codspeed", + "run", + str(bench_dir), + "--mode", + "walltime", + "--warmup-time", + "0", + "--max-rounds", + "2", + ], + capture_output=True, + text=True, + ) + assert result.returncode == 0 + assert "time_hello" in result.stdout + assert "Results saved to" in result.stdout + + +def test_cli_run_with_filter(tmp_path): + """Test running with --bench filter.""" + bench_dir = tmp_path / "benchmarks" + bench_dir.mkdir() + (bench_dir / "bench_test.py").write_text( + """ +def time_foo(): + pass + +def time_bar(): + pass +""" + ) + + result = subprocess.run( + [ + sys.executable, + "-m", + "asv_codspeed", + "run", + str(bench_dir), + "--mode", + "walltime", + "--warmup-time", + "0", + "--max-rounds", + "2", + "--bench", + "foo", + ], + capture_output=True, + text=True, + ) + assert result.returncode == 0 + assert "time_foo" in result.stdout + + # Verify only filtered benchmark ran + result_files = list(bench_dir.glob(".codspeed/results_*.json")) + assert len(result_files) == 1 + data = json.loads(result_files[0].read_text()) + assert len(data["benchmarks"]) == 1 + assert "foo" in data["benchmarks"][0]["name"] + + +def test_cli_run_empty_dir(tmp_path): + """Test running with no benchmarks found returns non-zero.""" + bench_dir = tmp_path / "empty" + bench_dir.mkdir() + + result = subprocess.run( + [ + sys.executable, + "-m", + "asv_codspeed", + "run", + str(bench_dir), + "--mode", + "walltime", + ], + capture_output=True, + text=True, + ) + assert result.returncode == 1 + + +def test_cli_no_command(): + """Test that running without a command shows an error.""" + result = subprocess.run( + [sys.executable, "-m", "asv_codspeed"], + capture_output=True, + text=True, + ) + assert result.returncode == 2 # argparse exits with 2 for missing args diff --git a/asv-codspeed/tests/test_discovery.py b/asv-codspeed/tests/test_discovery.py new file mode 100644 index 0000000..c7c66c8 --- /dev/null +++ b/asv-codspeed/tests/test_discovery.py @@ -0,0 +1,219 @@ +"""Tests for ASV benchmark discovery.""" +from __future__ import annotations + +from pathlib import Path + +import pytest + +from asv_codspeed.discovery import discover_benchmarks + + +def test_discover_simple_functions(tmp_benchmarks): + """Test discovery of module-level time_* functions.""" + bench_dir = tmp_benchmarks( + { + "bench_basic.py": """ +def time_addition(): + return 1 + 1 + +def time_multiplication(): + return 2 * 3 + +def not_a_benchmark(): + pass +""" + } + ) + benchmarks = discover_benchmarks(bench_dir) + names = [b.name for b in benchmarks] + assert "bench_basic.time_addition" in names + assert "bench_basic.time_multiplication" in names + assert all("not_a_benchmark" not in n for n in names) + assert all(b.type == "time" for b in benchmarks) + + +def test_discover_class_methods(tmp_benchmarks): + """Test discovery of time_* methods inside classes.""" + bench_dir = tmp_benchmarks( + { + "bench_class.py": """ +class TimeSuite: + def time_method_one(self): + pass + + def time_method_two(self): + pass + + def helper(self): + pass +""" + } + ) + benchmarks = discover_benchmarks(bench_dir) + names = [b.name for b in benchmarks] + assert len(benchmarks) == 2 + assert any("time_method_one" in n for n in names) + assert any("time_method_two" in n for n in names) + assert all("helper" not in n for n in names) + + +def test_discover_class_with_setup_teardown(tmp_benchmarks): + """Test that setup and teardown are properly attached.""" + bench_dir = tmp_benchmarks( + { + "bench_lifecycle.py": """ +class TimeSuite: + def setup(self): + self.data = [1, 2, 3] + + def teardown(self): + del self.data + + def time_process(self): + sum(self.data) +""" + } + ) + benchmarks = discover_benchmarks(bench_dir) + assert len(benchmarks) == 1 + bench = benchmarks[0] + assert bench.setup is not None + assert bench.teardown is not None + + +def test_discover_parameterized(tmp_benchmarks): + """Test discovery of parameterized benchmarks.""" + bench_dir = tmp_benchmarks( + { + "bench_params.py": """ +class TimeParams: + params = [10, 100, 1000] + param_names = ["n"] + + def time_range(self, n): + for i in range(n): + pass +""" + } + ) + benchmarks = discover_benchmarks(bench_dir) + assert len(benchmarks) == 3 + names = [b.name for b in benchmarks] + assert any("10" in n for n in names) + assert any("100" in n and "1000" not in n for n in names) + assert any("1000" in n for n in names) + # Verify params are set correctly + for bench in benchmarks: + assert bench.current_param_values is not None + assert len(bench.current_param_values) == 1 + + +def test_discover_multi_param(tmp_benchmarks): + """Test discovery of multi-parameter benchmarks.""" + bench_dir = tmp_benchmarks( + { + "bench_multi.py": """ +class TimeMultiParam: + params = [[10, 100], ["a", "b"]] + param_names = ["n", "label"] + + def time_combo(self, n, label): + pass +""" + } + ) + benchmarks = discover_benchmarks(bench_dir) + # 2 x 2 = 4 combinations + assert len(benchmarks) == 4 + + +def test_discover_empty_directory(tmp_path): + """Test discovery in an empty directory.""" + bench_dir = tmp_path / "benchmarks" + bench_dir.mkdir() + benchmarks = discover_benchmarks(bench_dir) + assert benchmarks == [] + + +def test_discover_nonexistent_directory(tmp_path): + """Test discovery in a nonexistent directory.""" + benchmarks = discover_benchmarks(tmp_path / "nonexistent") + assert benchmarks == [] + + +def test_discover_skips_underscore_files(tmp_benchmarks): + """Test that files starting with _ are skipped.""" + bench_dir = tmp_benchmarks( + { + "_helper.py": """ +def time_should_not_be_found(): + pass +""", + "bench_real.py": """ +def time_real(): + pass +""", + } + ) + benchmarks = discover_benchmarks(bench_dir) + names = [b.name for b in benchmarks] + assert len(benchmarks) == 1 + assert "bench_real.time_real" in names + + +def test_discover_filter_pattern(tmp_benchmarks): + """Test filtering benchmarks by regex pattern.""" + bench_dir = tmp_benchmarks( + { + "bench_filter.py": """ +def time_foo(): + pass + +def time_bar(): + pass + +def time_baz(): + pass +""" + } + ) + benchmarks = discover_benchmarks(bench_dir, filter_pattern="foo|bar") + names = [b.name for b in benchmarks] + assert len(benchmarks) == 2 + assert any("foo" in n for n in names) + assert any("bar" in n for n in names) + assert all("baz" not in n for n in names) + + +def test_discover_different_benchmark_types(tmp_benchmarks): + """Test that different benchmark types are identified correctly.""" + bench_dir = tmp_benchmarks( + { + "bench_types.py": """ +def time_something(): + pass + +def track_value(): + return 42 + +def mem_object(): + return [0] * 1000 + +def peakmem_process(): + pass +""" + } + ) + benchmarks = discover_benchmarks(bench_dir) + types = {b.name: b.type for b in benchmarks} + assert types["bench_types.time_something"] == "time" + assert types["bench_types.track_value"] == "track" + assert types["bench_types.mem_object"] == "mem" + assert types["bench_types.peakmem_process"] == "peakmem" + + +def test_discover_sample_benchmarks(sample_benchmarks_dir): + """Test discovery of the sample benchmark files.""" + benchmarks = discover_benchmarks(sample_benchmarks_dir) + time_benchmarks = [b for b in benchmarks if b.type == "time"] + assert len(time_benchmarks) >= 4 # At least the 4 non-parameterized ones diff --git a/asv-codspeed/tests/test_runner.py b/asv-codspeed/tests/test_runner.py new file mode 100644 index 0000000..e101868 --- /dev/null +++ b/asv-codspeed/tests/test_runner.py @@ -0,0 +1,355 @@ +"""Tests for the ASV benchmark runner.""" +from __future__ import annotations + +import json +from pathlib import Path + +import pytest + +from asv_codspeed.runner import run_benchmarks + +from pytest_codspeed.instruments import MeasurementMode + + +def test_run_simple_benchmarks_walltime(tmp_benchmarks): + """Test running simple benchmarks in walltime mode.""" + bench_dir = tmp_benchmarks( + { + "bench_simple.py": """ +def time_addition(): + return 1 + 1 + +def time_loop(): + for i in range(100): + pass +""" + } + ) + result = run_benchmarks( + benchmark_dir=bench_dir, + mode=MeasurementMode.WallTime, + warmup_time=0, + max_rounds=2, + ) + assert result == 0 + + # Verify results file was created + result_files = list(bench_dir.glob(".codspeed/results_*.json")) + assert len(result_files) == 1 + + data = json.loads(result_files[0].read_text()) + assert data["creator"]["name"] == "pytest-codspeed" + assert data["instrument"]["type"] == "walltime" + assert len(data["benchmarks"]) == 2 + + +def test_run_with_setup_teardown(tmp_benchmarks): + """Test running benchmarks with setup and teardown.""" + bench_dir = tmp_benchmarks( + { + "bench_lifecycle.py": """ +class TimeSuite: + def setup(self): + self.data = list(range(100)) + + def teardown(self): + pass + + def time_sort(self): + sorted(self.data) +""" + } + ) + result = run_benchmarks( + benchmark_dir=bench_dir, + mode=MeasurementMode.WallTime, + warmup_time=0, + max_rounds=2, + ) + assert result == 0 + + +def test_run_parameterized_benchmarks(tmp_benchmarks): + """Test running parameterized benchmarks.""" + bench_dir = tmp_benchmarks( + { + "bench_params.py": """ +class TimeRange: + params = [10, 100] + param_names = ["n"] + + def time_loop(self, n): + for i in range(n): + pass +""" + } + ) + result = run_benchmarks( + benchmark_dir=bench_dir, + mode=MeasurementMode.WallTime, + warmup_time=0, + max_rounds=2, + ) + assert result == 0 + + result_files = list(bench_dir.glob(".codspeed/results_*.json")) + assert len(result_files) == 1 + + data = json.loads(result_files[0].read_text()) + assert len(data["benchmarks"]) == 2 # 2 parameter values + + +def test_run_no_benchmarks_found(tmp_path): + """Test running when no benchmarks are found.""" + bench_dir = tmp_path / "empty_benchmarks" + bench_dir.mkdir() + (bench_dir / "no_bench.py").write_text("def helper(): pass\n") + + result = run_benchmarks( + benchmark_dir=bench_dir, + mode=MeasurementMode.WallTime, + ) + assert result == 1 + + +def test_run_nonexistent_dir(tmp_path): + """Test running with a nonexistent benchmark directory.""" + result = run_benchmarks( + benchmark_dir=tmp_path / "nonexistent", + mode=MeasurementMode.WallTime, + ) + assert result == 1 + + +def test_run_with_filter(tmp_benchmarks): + """Test running with a benchmark filter.""" + bench_dir = tmp_benchmarks( + { + "bench_filter.py": """ +def time_included(): + return 1 + 1 + +def time_excluded(): + return 2 + 2 +""" + } + ) + result = run_benchmarks( + benchmark_dir=bench_dir, + mode=MeasurementMode.WallTime, + warmup_time=0, + max_rounds=2, + bench_filter="included", + ) + assert result == 0 + + result_files = list(bench_dir.glob(".codspeed/results_*.json")) + data = json.loads(result_files[0].read_text()) + assert len(data["benchmarks"]) == 1 + assert "included" in data["benchmarks"][0]["name"] + + +def test_result_json_format(tmp_benchmarks): + """Test that the result JSON has the expected structure.""" + bench_dir = tmp_benchmarks( + { + "bench_format.py": """ +def time_test(): + return sum(range(100)) +""" + } + ) + run_benchmarks( + benchmark_dir=bench_dir, + mode=MeasurementMode.WallTime, + warmup_time=0, + max_rounds=2, + ) + + result_files = list(bench_dir.glob(".codspeed/results_*.json")) + data = json.loads(result_files[0].read_text()) + + # Verify top-level structure + assert "creator" in data + assert "python" in data + assert "instrument" in data + assert "benchmarks" in data + + # Verify creator + assert data["creator"]["name"] == "pytest-codspeed" + assert "version" in data["creator"] + assert "pid" in data["creator"] + + # Verify python metadata + assert "sysconfig" in data["python"] + assert "dependencies" in data["python"] + + # Verify instrument + assert data["instrument"]["type"] == "walltime" + assert "clock_info" in data["instrument"] + + # Verify benchmark entry + bench = data["benchmarks"][0] + assert "name" in bench + assert "uri" in bench + assert "config" in bench + assert "stats" in bench + + stats = bench["stats"] + assert "min_ns" in stats + assert "max_ns" in stats + assert "mean_ns" in stats + assert "stdev_ns" in stats + assert "rounds" in stats + + +def test_run_sample_benchmarks(sample_benchmarks_dir): + """Test running the sample benchmarks.""" + result = run_benchmarks( + benchmark_dir=sample_benchmarks_dir, + mode=MeasurementMode.WallTime, + warmup_time=0, + max_rounds=2, + ) + assert result == 0 + + # Clean up + import shutil + + codspeed_dir = sample_benchmarks_dir / ".codspeed" + if codspeed_dir.exists(): + shutil.rmtree(codspeed_dir) + + +def test_run_to_profile_folder(tmp_benchmarks, tmp_path): + """Test running with a custom profile folder.""" + bench_dir = tmp_benchmarks( + { + "bench_profile.py": """ +def time_test(): + return 1 + 1 +""" + } + ) + profile_folder = tmp_path / "profiles" + + result = run_benchmarks( + benchmark_dir=bench_dir, + mode=MeasurementMode.WallTime, + warmup_time=0, + max_rounds=2, + profile_folder=profile_folder, + ) + assert result == 0 + + result_files = list(profile_folder.glob("results/*.json")) + assert len(result_files) == 1 + + +def test_run_recursive_fibo_10(tmp_benchmarks): + """Test running a recursive Fibonacci(10) benchmark.""" + bench_dir = tmp_benchmarks( + { + "bench_fibo.py": """ +def _fibo(n): + if n <= 1: + return n + return _fibo(n - 1) + _fibo(n - 2) + +def time_recursive_fibo_10(): + _fibo(10) +""" + } + ) + result = run_benchmarks( + benchmark_dir=bench_dir, + mode=MeasurementMode.WallTime, + warmup_time=0, + max_rounds=2, + ) + assert result == 0 + + result_files = list(bench_dir.glob(".codspeed/results_*.json")) + assert len(result_files) == 1 + + data = json.loads(result_files[0].read_text()) + assert len(data["benchmarks"]) == 1 + assert data["benchmarks"][0]["name"] == "bench_fibo.time_recursive_fibo_10" + assert data["benchmarks"][0]["stats"]["mean_ns"] > 0 + + +def test_run_foo_bar_baz(tmp_benchmarks): + """Test running foo, bar, and baz benchmarks.""" + bench_dir = tmp_benchmarks( + { + "bench_foo_bar_baz.py": """ +def time_foo(): + total = 0 + for i in range(500): + total += i + return total + +def time_bar(): + data = list(range(200)) + return sorted(data, reverse=True) + +def time_baz(): + return {k: k * k for k in range(300)} +""" + } + ) + result = run_benchmarks( + benchmark_dir=bench_dir, + mode=MeasurementMode.WallTime, + warmup_time=0, + max_rounds=2, + ) + assert result == 0 + + result_files = list(bench_dir.glob(".codspeed/results_*.json")) + data = json.loads(result_files[0].read_text()) + assert len(data["benchmarks"]) == 3 + + names = {b["name"] for b in data["benchmarks"]} + assert "bench_foo_bar_baz.time_foo" in names + assert "bench_foo_bar_baz.time_bar" in names + assert "bench_foo_bar_baz.time_baz" in names + + # All should report as pytest-codspeed + assert data["creator"]["name"] == "pytest-codspeed" + + +def test_run_recursive_fibo_10_sample(sample_benchmarks_dir): + """Test running the fibo sample benchmark from sample_benchmarks.""" + result = run_benchmarks( + benchmark_dir=sample_benchmarks_dir, + mode=MeasurementMode.WallTime, + warmup_time=0, + max_rounds=2, + bench_filter="fibo", + ) + assert result == 0 + + import shutil + + codspeed_dir = sample_benchmarks_dir / ".codspeed" + if codspeed_dir.exists(): + shutil.rmtree(codspeed_dir) + + +def test_run_foo_bar_baz_sample(sample_benchmarks_dir): + """Test running the foo/bar/baz sample benchmarks from sample_benchmarks.""" + result = run_benchmarks( + benchmark_dir=sample_benchmarks_dir, + mode=MeasurementMode.WallTime, + warmup_time=0, + max_rounds=2, + bench_filter="foo_bar_baz", + ) + assert result == 0 + + import shutil + + codspeed_dir = sample_benchmarks_dir / ".codspeed" + if codspeed_dir.exists(): + shutil.rmtree(codspeed_dir) diff --git a/pyproject.toml b/pyproject.toml index a97fcb6..f19b392 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,14 +47,19 @@ compat = [ [tool.uv] # Python builds change with uv versions, and we are quite susceptible to that. # We pin uv to to make sure reproducibility is maintained for any contributor. -required-version = "0.9.5" +required-version = ">=0.9.5" + +[tool.uv.workspace] +members = [".", "asv-codspeed"] [tool.uv.sources] pytest-codspeed = { workspace = true } +asv-codspeed = { workspace = true } [dependency-groups] dev = [ "pytest-codspeed", + "asv-codspeed", "mypy ~= 1.18.2", "ruff ~= 0.11.12", "pytest ~= 7.0", @@ -127,7 +132,7 @@ force_grid_wrap = 0 float_to_top = true [tool.pytest.ini_options] -addopts = "--ignore=tests/benchmarks --ignore=tests/examples --ignore=tests/benchmarks/TheAlgorithms" +addopts = "--ignore=tests/benchmarks --ignore=tests/examples --ignore=tests/benchmarks/TheAlgorithms --ignore=asv-codspeed" filterwarnings = ["ignore::DeprecationWarning:pytest_benchmark.utils.*:"] pythonpath = ["tests/benchmarks/TheAlgorithms", "./scripts"] diff --git a/uv.lock b/uv.lock index 32198a5..0deb95b 100644 --- a/uv.lock +++ b/uv.lock @@ -1,7 +1,28 @@ version = 1 -revision = 2 +revision = 3 requires-python = ">=3.9" +[manifest] +members = [ + "asv-codspeed", + "pytest-codspeed", +] + +[[package]] +name = "asv-codspeed" +version = "0.1.0" +source = { editable = "asv-codspeed" } +dependencies = [ + { name = "pytest-codspeed" }, + { name = "rich" }, +] + +[package.metadata] +requires-dist = [ + { name = "pytest-codspeed", editable = "." }, + { name = "rich", specifier = ">=13.8.1" }, +] + [[package]] name = "cffi" version = "1.17.1" @@ -376,6 +397,7 @@ compat = [ [package.dev-dependencies] dev = [ + { name = "asv-codspeed" }, { name = "mypy" }, { name = "pytest" }, { name = "pytest-codspeed" }, @@ -397,6 +419,7 @@ provides-extras = ["compat"] [package.metadata.requires-dev] dev = [ + { name = "asv-codspeed", editable = "asv-codspeed" }, { name = "mypy", specifier = "~=1.18.2" }, { name = "pytest", specifier = "~=7.0" }, { name = "pytest-codspeed", editable = "." }, From c44216ef57d92b990f374f08cee9e059a92e0dbf Mon Sep 17 00:00:00 2001 From: Arthur Pastel Date: Mon, 2 Feb 2026 22:44:49 +0000 Subject: [PATCH 2/5] feat: extract shared codspeed core package with C bindings Restructure the repository into a three-package monorepo: - codspeed: shared core with CFFI/C bindings, instruments (walltime, valgrind), config, and utilities. Owns the instrument-hooks submodule. - pytest-codspeed: pure Python pytest plugin, depends on codspeed - asv-codspeed: ASV benchmark runner, depends on codspeed (no longer depends on pytest-codspeed or pytest) Also adds asv-codspeed benchmarks job to the CodSpeed CI workflow. Co-Authored-By: Claude Opus 4.5 --- .github/workflows/ci.yml | 6 +- .github/workflows/codspeed.yml | 37 ++++++- .github/workflows/release.yml | 52 ++++++--- .gitmodules | 4 +- asv-codspeed/pyproject.toml | 2 +- asv-codspeed/src/asv_codspeed/__main__.py | 2 +- asv-codspeed/src/asv_codspeed/runner.py | 50 ++++----- asv-codspeed/tests/test_runner.py | 2 +- codspeed/pyproject.toml | 47 ++++++++ setup.py => codspeed/setup.py | 10 +- codspeed/src/codspeed/__init__.py | 5 + .../src/codspeed}/config.py | 43 +------- .../src/codspeed}/instruments/__init__.py | 11 +- .../codspeed}/instruments/hooks/__init__.py | 2 +- .../src/codspeed}/instruments/hooks/build.py | 4 +- .../hooks/dist_instrument_hooks.pyi | 0 .../instruments/hooks/instrument-hooks | 0 .../src/codspeed}/instruments/valgrind.py | 35 +++--- .../src/codspeed}/instruments/walltime.py | 49 ++++----- .../src/codspeed}/py.typed | 0 codspeed/src/codspeed/utils.py | 45 ++++++++ pyproject.toml | 102 ++---------------- pytest-codspeed/pyproject.toml | 90 ++++++++++++++++ .../src}/pytest_codspeed/__init__.py | 0 .../src}/pytest_codspeed/plugin.py | 89 +++++++++++++-- .../src/pytest_codspeed/py.typed | 0 .../src}/pytest_codspeed/utils.py | 40 +------ .../tests}/benchmarks/TheAlgorithms | 0 .../TheAlgorithms_bench}/__init__.py | 0 .../TheAlgorithms_bench/bit_manipulation.py | 0 .../test_bench_audio_filters.py | 0 .../test_bench_backtracking.py | 0 .../tests/benchmarks}/__init__.py | 0 .../tests}/benchmarks/test_bench_doc.py | 0 .../tests}/benchmarks/test_bench_fibo.py | 0 .../tests}/benchmarks/test_bench_misc.py | 0 .../tests}/benchmarks/test_bench_syscalls.py | 0 .../benchmarks/test_bench_various_noop.py | 0 {tests => pytest-codspeed/tests}/conftest.py | 2 +- pytest-codspeed/tests/examples/__init__.py | 0 .../tests}/examples/test_addition_fixture.py | 0 .../tests}/test_format_time.py | 2 +- .../tests}/test_pytest_plugin.py | 0 .../test_pytest_plugin_cpu_instrumentation.py | 2 +- .../tests}/test_pytest_plugin_walltime.py | 2 +- .../tests}/test_utils.py | 3 +- uv.lock | 66 ++++++------ 47 files changed, 462 insertions(+), 342 deletions(-) create mode 100644 codspeed/pyproject.toml rename setup.py => codspeed/setup.py (79%) create mode 100644 codspeed/src/codspeed/__init__.py rename {src/pytest_codspeed => codspeed/src/codspeed}/config.py (61%) rename {src/pytest_codspeed => codspeed/src/codspeed}/instruments/__init__.py (81%) rename {src/pytest_codspeed => codspeed/src/codspeed}/instruments/hooks/__init__.py (98%) rename {src/pytest_codspeed => codspeed/src/codspeed}/instruments/hooks/build.py (91%) rename {src/pytest_codspeed => codspeed/src/codspeed}/instruments/hooks/dist_instrument_hooks.pyi (100%) rename {src/pytest_codspeed => codspeed/src/codspeed}/instruments/hooks/instrument-hooks (100%) rename {src/pytest_codspeed => codspeed/src/codspeed}/instruments/valgrind.py (81%) rename {src/pytest_codspeed => codspeed/src/codspeed}/instruments/walltime.py (90%) rename {src/pytest_codspeed => codspeed/src/codspeed}/py.typed (100%) create mode 100644 codspeed/src/codspeed/utils.py create mode 100644 pytest-codspeed/pyproject.toml rename {src => pytest-codspeed/src}/pytest_codspeed/__init__.py (100%) rename {src => pytest-codspeed/src}/pytest_codspeed/plugin.py (80%) rename tests/benchmarks/TheAlgorithms_bench/__init__.py => pytest-codspeed/src/pytest_codspeed/py.typed (100%) rename {src => pytest-codspeed/src}/pytest_codspeed/utils.py (52%) rename {tests => pytest-codspeed/tests}/benchmarks/TheAlgorithms (100%) rename {tests/benchmarks => pytest-codspeed/tests/benchmarks/TheAlgorithms_bench}/__init__.py (100%) rename {tests => pytest-codspeed/tests}/benchmarks/TheAlgorithms_bench/bit_manipulation.py (100%) rename {tests => pytest-codspeed/tests}/benchmarks/TheAlgorithms_bench/test_bench_audio_filters.py (100%) rename {tests => pytest-codspeed/tests}/benchmarks/TheAlgorithms_bench/test_bench_backtracking.py (100%) rename {tests/examples => pytest-codspeed/tests/benchmarks}/__init__.py (100%) rename {tests => pytest-codspeed/tests}/benchmarks/test_bench_doc.py (100%) rename {tests => pytest-codspeed/tests}/benchmarks/test_bench_fibo.py (100%) rename {tests => pytest-codspeed/tests}/benchmarks/test_bench_misc.py (100%) rename {tests => pytest-codspeed/tests}/benchmarks/test_bench_syscalls.py (100%) rename {tests => pytest-codspeed/tests}/benchmarks/test_bench_various_noop.py (100%) rename {tests => pytest-codspeed/tests}/conftest.py (97%) create mode 100644 pytest-codspeed/tests/examples/__init__.py rename {tests => pytest-codspeed/tests}/examples/test_addition_fixture.py (100%) rename {tests => pytest-codspeed/tests}/test_format_time.py (94%) rename {tests => pytest-codspeed/tests}/test_pytest_plugin.py (100%) rename {tests => pytest-codspeed/tests}/test_pytest_plugin_cpu_instrumentation.py (99%) rename {tests => pytest-codspeed/tests}/test_pytest_plugin_walltime.py (97%) rename {tests => pytest-codspeed/tests}/test_utils.py (90%) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0132b3f..728fc8d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -73,8 +73,10 @@ jobs: - if: matrix.config == 'pytest-benchmark-5' name: Install pytest-benchmark 5.0.0 run: uv pip install pytest-benchmark~=5.0.0 - - name: Run tests - run: uv run --no-sync pytest -vs + - name: Run pytest-codspeed tests + run: uv run --no-sync pytest pytest-codspeed/tests/ --ignore=pytest-codspeed/tests/benchmarks --ignore=pytest-codspeed/tests/examples -vs + - name: Run asv-codspeed tests + run: uv run --no-sync pytest asv-codspeed/tests/ -vs all-checks: runs-on: ubuntu-latest diff --git a/.github/workflows/codspeed.yml b/.github/workflows/codspeed.yml index 66dfef9..f80b017 100644 --- a/.github/workflows/codspeed.yml +++ b/.github/workflows/codspeed.yml @@ -11,13 +11,13 @@ env: SHARDS: 4 jobs: - benchmarks: + pytest-benchmarks: strategy: matrix: shard: [1, 2, 3, 4] mode: ["instrumentation", "walltime"] - name: "Run ${{ matrix.mode }} benchmarks (Shard #${{ matrix.shard }})" + name: "pytest ${{ matrix.mode }} benchmarks (Shard #${{ matrix.shard }})" runs-on: ${{ matrix.mode == 'instrumentation' && 'ubuntu-24.04' || 'codspeed-macro' }} steps: - uses: actions/checkout@v5 @@ -37,7 +37,35 @@ jobs: uses: CodSpeedHQ/action@main with: mode: ${{ matrix.mode }} - run: uv run pytest tests/benchmarks/ --codspeed --test-group=${{ matrix.shard }} --test-group-count=${{ env.SHARDS }} + run: uv run pytest pytest-codspeed/tests/benchmarks/ --codspeed --test-group=${{ matrix.shard }} --test-group-count=${{ env.SHARDS }} + token: ${{ secrets.CODSPEED_TOKEN }} + + asv-benchmarks: + strategy: + matrix: + mode: ["instrumentation", "walltime"] + + name: "asv ${{ matrix.mode }} benchmarks" + runs-on: ${{ matrix.mode == 'instrumentation' && 'ubuntu-24.04' || 'codspeed-macro' }} + steps: + - uses: actions/checkout@v5 + with: + submodules: "recursive" + - uses: astral-sh/setup-uv@v7 + - uses: actions/setup-python@v6 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Install dependencies + run: | + sudo apt-get update + sudo apt-get install valgrind -y + uv sync --dev + sudo apt-get remove valgrind -y + - name: Run ASV benchmarks + uses: CodSpeedHQ/action@main + with: + mode: ${{ matrix.mode }} + run: uv run asv-codspeed run asv-codspeed/tests/sample_benchmarks token: ${{ secrets.CODSPEED_TOKEN }} all-checks: @@ -45,4 +73,5 @@ jobs: steps: - run: echo "All CI checks passed." needs: - - benchmarks + - pytest-benchmarks + - asv-benchmarks diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 0a1b68a..9e706c0 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -11,7 +11,7 @@ permissions: contents: write jobs: - build-wheels: + build-codspeed-wheels: strategy: matrix: platform: @@ -25,19 +25,20 @@ jobs: - uses: actions/checkout@v5 with: submodules: true - - name: Build wheels + - name: Build codspeed wheels uses: pypa/cibuildwheel@v3.2.1 env: CIBW_ARCHS: ${{ matrix.platform.arch }} with: + package-dir: codspeed output-dir: wheelhouse - uses: actions/upload-artifact@v4 with: - name: wheels-${{ matrix.platform.arch }} + name: codspeed-wheels-${{ matrix.platform.arch }} path: wheelhouse/*.whl - build-py3-none-any: + build-codspeed-py3-none-any: runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v5 @@ -47,17 +48,17 @@ jobs: - uses: actions/setup-python@v6 with: python-version: "3.14" - - name: Build py3-none-any wheel + - name: Build codspeed py3-none-any wheel env: - PYTEST_CODSPEED_SKIP_EXTENSION_BUILD: "1" - run: uv build --wheel --out-dir dist/ + CODSPEED_SKIP_EXTENSION_BUILD: "1" + run: uv build --wheel --out-dir dist/ --package codspeed - uses: actions/upload-artifact@v4 with: - name: wheels-py3-none-any + name: codspeed-wheels-py3-none-any path: dist/*.whl - build-sdist: + build-pytest-codspeed: runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v5 @@ -67,19 +68,40 @@ jobs: - uses: actions/setup-python@v6 with: python-version: "3.14" - - name: Build the source dist - run: uv build --sdist --out-dir dist/ + - name: Build pytest-codspeed wheel + run: uv build --wheel --out-dir dist/ --package pytest-codspeed + - name: Build pytest-codspeed sdist + run: uv build --sdist --out-dir dist/ --package pytest-codspeed - uses: actions/upload-artifact@v4 with: - name: sdist + name: pytest-codspeed-dist + path: dist/* + + build-codspeed-sdist: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v5 + with: + submodules: true + - uses: astral-sh/setup-uv@v7 + - uses: actions/setup-python@v6 + with: + python-version: "3.14" + - name: Build the codspeed source dist + run: uv build --sdist --out-dir dist/ --package codspeed + + - uses: actions/upload-artifact@v4 + with: + name: codspeed-sdist path: dist/*.tar.gz publish: needs: - - build-wheels - - build-py3-none-any - - build-sdist + - build-codspeed-wheels + - build-codspeed-py3-none-any + - build-codspeed-sdist + - build-pytest-codspeed runs-on: ubuntu-24.04 steps: diff --git a/.gitmodules b/.gitmodules index ad23fac..42a8996 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,6 @@ [submodule "tests/benchmarks/TheAlgorithms"] - path = tests/benchmarks/TheAlgorithms + path = pytest-codspeed/tests/benchmarks/TheAlgorithms url = git@github.com:TheAlgorithms/Python.git [submodule "src/pytest_codspeed/instruments/hooks/instrument-hooks"] - path = src/pytest_codspeed/instruments/hooks/instrument-hooks + path = codspeed/src/codspeed/instruments/hooks/instrument-hooks url = https://github.com/CodSpeedHQ/instrument-hooks diff --git a/asv-codspeed/pyproject.toml b/asv-codspeed/pyproject.toml index dcd32f9..103bddf 100644 --- a/asv-codspeed/pyproject.toml +++ b/asv-codspeed/pyproject.toml @@ -14,7 +14,7 @@ classifiers = [ "Topic :: System :: Benchmark", ] dependencies = [ - "pytest-codspeed>=4.2.0", + "codspeed>=0.1.0", "rich>=13.8.1", ] diff --git a/asv-codspeed/src/asv_codspeed/__main__.py b/asv-codspeed/src/asv_codspeed/__main__.py index 7352f5b..85fbe71 100644 --- a/asv-codspeed/src/asv_codspeed/__main__.py +++ b/asv-codspeed/src/asv_codspeed/__main__.py @@ -7,7 +7,7 @@ from asv_codspeed import __version__ from asv_codspeed.runner import run_benchmarks -from pytest_codspeed.instruments import MeasurementMode +from codspeed.instruments import MeasurementMode def main(argv: list[str] | None = None) -> int: diff --git a/asv-codspeed/src/asv_codspeed/runner.py b/asv-codspeed/src/asv_codspeed/runner.py index ec0b89b..2c050aa 100644 --- a/asv-codspeed/src/asv_codspeed/runner.py +++ b/asv-codspeed/src/asv_codspeed/runner.py @@ -10,33 +10,13 @@ from asv_codspeed.discovery import discover_benchmarks -from pytest_codspeed import __semver_version__ as pytest_codspeed_version -from pytest_codspeed.instruments import MeasurementMode, get_instrument_from_mode -from pytest_codspeed.utils import get_git_relative_path +from codspeed.config import BenchmarkMarkerOptions, CodSpeedConfig +from codspeed.instruments import MeasurementMode, get_instrument_from_mode +from codspeed.utils import get_environment_metadata, get_git_relative_path if TYPE_CHECKING: from asv_codspeed.discovery import ASVBenchmark - from pytest_codspeed.instruments import Instrument - - -def get_environment_metadata() -> dict[str, dict]: - """Report as pytest-codspeed so the CodSpeed platform treats results identically.""" - import importlib.metadata as importlib_metadata - import sysconfig - - return { - "creator": { - "name": "pytest-codspeed", - "version": pytest_codspeed_version, - "pid": os.getpid(), - }, - "python": { - "sysconfig": sysconfig.get_config_vars(), - "dependencies": { - d.name: d.version for d in importlib_metadata.distributions() - }, - }, - } + from codspeed.instruments import Instrument def _get_uri(benchmark_name: str, benchmark_dir: Path) -> str: @@ -81,8 +61,6 @@ def run_benchmarks( ) # Build CodSpeed config - from pytest_codspeed.config import BenchmarkMarkerOptions, CodSpeedConfig - codspeed_config = CodSpeedConfig( warmup_time_ns=( int(warmup_time * 1_000_000_000) if warmup_time is not None else None @@ -91,9 +69,16 @@ def run_benchmarks( max_rounds=max_rounds, ) - # Create instrument + # Create instrument, reporting as "pytest-codspeed" so the CodSpeed platform + # treats results identically + from codspeed import __semver_version__ as codspeed_version + instrument_cls = get_instrument_from_mode(mode) - instrument = instrument_cls(codspeed_config) + instrument = instrument_cls( + codspeed_config, + integration_name="pytest-codspeed", + integration_version=codspeed_version, + ) config_str, warns = instrument.get_instrument_config_str_and_warns() console.print(f" {config_str}") for w in warns: @@ -130,7 +115,10 @@ def run_benchmarks( else: result_path = benchmark_dir / f".codspeed/results_{time() * 1000:.0f}.json" - data = {**get_environment_metadata(), **instrument.get_result_dict()} + data = { + **get_environment_metadata("pytest-codspeed", codspeed_version), + **instrument.get_result_dict(), + } result_path.parent.mkdir(parents=True, exist_ok=True) result_path.write_text(json.dumps(data, indent=2)) console.print(f"Results saved to: {result_path}") @@ -146,10 +134,10 @@ def _report( failed: int, ) -> None: """Print a summary report of benchmark results.""" - from pytest_codspeed.instruments.walltime import WallTimeInstrument + from codspeed.instruments.walltime import WallTimeInstrument if isinstance(instrument, WallTimeInstrument) and instrument.benchmarks: - instrument._print_benchmark_table() + instrument.print_benchmark_table() total = passed + failed status = "passed" if failed == 0 else "with failures" diff --git a/asv-codspeed/tests/test_runner.py b/asv-codspeed/tests/test_runner.py index e101868..6817704 100644 --- a/asv-codspeed/tests/test_runner.py +++ b/asv-codspeed/tests/test_runner.py @@ -8,7 +8,7 @@ from asv_codspeed.runner import run_benchmarks -from pytest_codspeed.instruments import MeasurementMode +from codspeed.instruments import MeasurementMode def test_run_simple_benchmarks_walltime(tmp_benchmarks): diff --git a/codspeed/pyproject.toml b/codspeed/pyproject.toml new file mode 100644 index 0000000..b31143e --- /dev/null +++ b/codspeed/pyproject.toml @@ -0,0 +1,47 @@ +[project] +name = "codspeed" +version = "0.1.0" +description = "Core CodSpeed instrumentation library with C bindings" +license = "MIT" +requires-python = ">=3.9" +authors = [{ name = "Arthur Pastel", email = "arthur@codspeed.io" }] +keywords = ["codspeed", "benchmark", "performance", "instrumentation"] +classifiers = [ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", + "Topic :: Software Development :: Testing", + "Topic :: System :: Benchmark", + "Typing :: Typed", +] +dependencies = [ + "cffi >= 1.17.1", + "rich>=13.8.1", + "importlib-metadata>=8.5.0; python_version < '3.10'", +] + +[build-system] +requires = ["setuptools >= 61", "cffi >= 1.17.1"] +build-backend = "setuptools.build_meta" + +[tool.setuptools.dynamic] +version = { attr = "codspeed.__version__" } + +[tool.setuptools.packages.find] +where = ["src"] + +[tool.cibuildwheel] +build = "cp*manylinux*" +test-command = "python -c \"from codspeed.instruments.hooks.dist_instrument_hooks import lib; print('OK')\"" + +[tool.cibuildwheel.linux] +environment = { CODSPEED_FORCE_EXTENSION_BUILD = "1" } +manylinux-x86_64-image = "manylinux_2_28" +manylinux-aarch64-image = "manylinux_2_28" +before-all = "yum -y install valgrind-devel" diff --git a/setup.py b/codspeed/setup.py similarity index 79% rename from setup.py rename to codspeed/setup.py index e0657bb..06e6152 100644 --- a/setup.py +++ b/codspeed/setup.py @@ -5,7 +5,7 @@ from setuptools import setup -build_path = Path(__file__).parent / "src/pytest_codspeed/instruments/hooks/build.py" +build_path = Path(__file__).parent / "src/codspeed/instruments/hooks/build.py" spec = importlib.util.spec_from_file_location("build", build_path) assert spec is not None, "The spec should be initialized" @@ -24,11 +24,13 @@ ] IS_EXTENSION_REQUIRED = ( - os.environ.get("PYTEST_CODSPEED_FORCE_EXTENSION_BUILD") is not None + os.environ.get("CODSPEED_FORCE_EXTENSION_BUILD") is not None + or os.environ.get("PYTEST_CODSPEED_FORCE_EXTENSION_BUILD") is not None ) SKIP_EXTENSION_BUILD = ( - os.environ.get("PYTEST_CODSPEED_SKIP_EXTENSION_BUILD") is not None + os.environ.get("CODSPEED_SKIP_EXTENSION_BUILD") is not None + or os.environ.get("PYTEST_CODSPEED_SKIP_EXTENSION_BUILD") is not None ) if SKIP_EXTENSION_BUILD and IS_EXTENSION_REQUIRED: @@ -49,7 +51,7 @@ setup( package_data={ - "pytest_codspeed": [ + "codspeed": [ "instruments/hooks/instrument-hooks/includes/*.h", "instruments/hooks/instrument-hooks/dist/*.c", ] diff --git a/codspeed/src/codspeed/__init__.py b/codspeed/src/codspeed/__init__.py new file mode 100644 index 0000000..0190918 --- /dev/null +++ b/codspeed/src/codspeed/__init__.py @@ -0,0 +1,5 @@ +__version__ = "0.1.0" +# We also have the semver version since __version__ is not semver compliant +__semver_version__ = "0.1.0" + +__all__ = ["__version__", "__semver_version__"] diff --git a/src/pytest_codspeed/config.py b/codspeed/src/codspeed/config.py similarity index 61% rename from src/pytest_codspeed/config.py rename to codspeed/src/codspeed/config.py index 1932a8e..24765a0 100644 --- a/src/pytest_codspeed/config.py +++ b/codspeed/src/codspeed/config.py @@ -9,34 +9,17 @@ if TYPE_CHECKING: from typing import Any, Callable - import pytest - @dataclass(frozen=True) class CodSpeedConfig: """ - The configuration for the codspeed plugin. - Usually created from the command line arguments. + The configuration for CodSpeed instrumentation. """ warmup_time_ns: int | None = None max_time_ns: int | None = None max_rounds: int | None = None - @classmethod - def from_pytest_config(cls, config: pytest.Config) -> CodSpeedConfig: - warmup_time = config.getoption("--codspeed-warmup-time", None) - warmup_time_ns = ( - int(warmup_time * 1_000_000_000) if warmup_time is not None else None - ) - max_time = config.getoption("--codspeed-max-time", None) - max_time_ns = int(max_time * 1_000_000_000) if max_time is not None else None - return cls( - warmup_time_ns=warmup_time_ns, - max_rounds=config.getoption("--codspeed-max-rounds", None), - max_time_ns=max_time_ns, - ) - @dataclass(frozen=True) class BenchmarkMarkerOptions: @@ -58,30 +41,6 @@ class BenchmarkMarkerOptions: Takes precedence over max_time. Only available in walltime mode. """ - @classmethod - def from_pytest_item(cls, item: pytest.Item) -> BenchmarkMarkerOptions: - marker = item.get_closest_marker( - "codspeed_benchmark" - ) or item.get_closest_marker("benchmark") - if marker is None: - return cls() - if len(marker.args) > 0: - raise ValueError( - "Positional arguments are not allowed in the benchmark marker" - ) - kwargs = marker.kwargs - - unknown_kwargs = set(kwargs.keys()) - { - field.name for field in dataclasses.fields(cls) - } - if unknown_kwargs: - raise ValueError( - "Unknown kwargs passed to benchmark marker: " - + ", ".join(sorted(unknown_kwargs)) - ) - - return cls(**kwargs) - @dataclass(frozen=True) class PedanticOptions(Generic[T]): diff --git a/src/pytest_codspeed/instruments/__init__.py b/codspeed/src/codspeed/instruments/__init__.py similarity index 81% rename from src/pytest_codspeed/instruments/__init__.py rename to codspeed/src/codspeed/instruments/__init__.py index 08dace3..8e411ce 100644 --- a/src/pytest_codspeed/instruments/__init__.py +++ b/codspeed/src/codspeed/instruments/__init__.py @@ -7,11 +7,9 @@ if TYPE_CHECKING: from typing import Any, Callable, ClassVar, TypeVar - import pytest from typing_extensions import ParamSpec - from pytest_codspeed.config import BenchmarkMarkerOptions, PedanticOptions - from pytest_codspeed.plugin import CodSpeedConfig + from codspeed.config import BenchmarkMarkerOptions, CodSpeedConfig, PedanticOptions T = TypeVar("T") P = ParamSpec("P") @@ -46,9 +44,6 @@ def measure_pedantic( uri: str, ) -> T: ... - @abstractmethod - def report(self, session: pytest.Session) -> None: ... - @abstractmethod def get_result_dict( self, @@ -68,10 +63,10 @@ def _missing_(cls, value: object): def get_instrument_from_mode(mode: MeasurementMode) -> type[Instrument]: - from pytest_codspeed.instruments.valgrind import ( + from codspeed.instruments.valgrind import ( ValgrindInstrument, ) - from pytest_codspeed.instruments.walltime import WallTimeInstrument + from codspeed.instruments.walltime import WallTimeInstrument if mode == MeasurementMode.Simulation: return ValgrindInstrument diff --git a/src/pytest_codspeed/instruments/hooks/__init__.py b/codspeed/src/codspeed/instruments/hooks/__init__.py similarity index 98% rename from src/pytest_codspeed/instruments/hooks/__init__.py rename to codspeed/src/codspeed/instruments/hooks/__init__.py index 3a852d6..627e401 100644 --- a/src/pytest_codspeed/instruments/hooks/__init__.py +++ b/codspeed/src/codspeed/instruments/hooks/__init__.py @@ -5,7 +5,7 @@ import warnings from typing import TYPE_CHECKING -from pytest_codspeed.utils import SUPPORTS_PERF_TRAMPOLINE +from codspeed.utils import SUPPORTS_PERF_TRAMPOLINE if TYPE_CHECKING: from .dist_instrument_hooks import InstrumentHooksPointer, LibType diff --git a/src/pytest_codspeed/instruments/hooks/build.py b/codspeed/src/codspeed/instruments/hooks/build.py similarity index 91% rename from src/pytest_codspeed/instruments/hooks/build.py rename to codspeed/src/codspeed/instruments/hooks/build.py index 992ee01..0991ad9 100644 --- a/src/pytest_codspeed/instruments/hooks/build.py +++ b/codspeed/src/codspeed/instruments/hooks/build.py @@ -37,12 +37,12 @@ """) ffibuilder.set_source( - "pytest_codspeed.instruments.hooks.dist_instrument_hooks", + "codspeed.instruments.hooks.dist_instrument_hooks", """ #include "core.h" """, sources=[ - "src/pytest_codspeed/instruments/hooks/instrument-hooks/dist/core.c", + "src/codspeed/instruments/hooks/instrument-hooks/dist/core.c", ], include_dirs=[str(includes_dir)], ) diff --git a/src/pytest_codspeed/instruments/hooks/dist_instrument_hooks.pyi b/codspeed/src/codspeed/instruments/hooks/dist_instrument_hooks.pyi similarity index 100% rename from src/pytest_codspeed/instruments/hooks/dist_instrument_hooks.pyi rename to codspeed/src/codspeed/instruments/hooks/dist_instrument_hooks.pyi diff --git a/src/pytest_codspeed/instruments/hooks/instrument-hooks b/codspeed/src/codspeed/instruments/hooks/instrument-hooks similarity index 100% rename from src/pytest_codspeed/instruments/hooks/instrument-hooks rename to codspeed/src/codspeed/instruments/hooks/instrument-hooks diff --git a/src/pytest_codspeed/instruments/valgrind.py b/codspeed/src/codspeed/instruments/valgrind.py similarity index 81% rename from src/pytest_codspeed/instruments/valgrind.py rename to codspeed/src/codspeed/instruments/valgrind.py index b6667f8..eff979d 100644 --- a/src/pytest_codspeed/instruments/valgrind.py +++ b/codspeed/src/codspeed/instruments/valgrind.py @@ -4,30 +4,34 @@ import warnings from typing import TYPE_CHECKING -from pytest_codspeed import __semver_version__ -from pytest_codspeed.instruments import Instrument -from pytest_codspeed.instruments.hooks import InstrumentHooks -from pytest_codspeed.utils import SUPPORTS_PERF_TRAMPOLINE +from codspeed.instruments import Instrument +from codspeed.instruments.hooks import InstrumentHooks +from codspeed.utils import SUPPORTS_PERF_TRAMPOLINE if TYPE_CHECKING: from typing import Any, Callable - from pytest import Session - - from pytest_codspeed.config import PedanticOptions - from pytest_codspeed.instruments import P, T - from pytest_codspeed.plugin import BenchmarkMarkerOptions, CodSpeedConfig + from codspeed.config import CodSpeedConfig, PedanticOptions + from codspeed.instruments import P, T + from codspeed.config import BenchmarkMarkerOptions class ValgrindInstrument(Instrument): instrument = "valgrind" instrument_hooks: InstrumentHooks | None - def __init__(self, config: CodSpeedConfig) -> None: + def __init__( + self, + config: CodSpeedConfig, + integration_name: str = "pytest-codspeed", + integration_version: str = "0.0.0", + ) -> None: self.benchmark_count = 0 try: self.instrument_hooks = InstrumentHooks() - self.instrument_hooks.set_integration("pytest-codspeed", __semver_version__) + self.instrument_hooks.set_integration( + integration_name, integration_version + ) except RuntimeError as e: if os.environ.get("CODSPEED_ENV") is not None: raise Exception( @@ -128,15 +132,6 @@ def __codspeed_root_frame__(*args, **kwargs) -> T: return out - def report(self, session: Session) -> None: - reporter = session.config.pluginmanager.get_plugin("terminalreporter") - assert reporter is not None, "terminalreporter not found" - count_suffix = "benchmarked" if self.should_measure else "benchmark tested" - reporter.write_sep( - "=", - f"{self.benchmark_count} {count_suffix}", - ) - def get_result_dict(self) -> dict[str, Any]: return { "instrument": {"type": self.instrument}, diff --git a/src/pytest_codspeed/instruments/walltime.py b/codspeed/src/codspeed/instruments/walltime.py similarity index 90% rename from src/pytest_codspeed/instruments/walltime.py rename to codspeed/src/codspeed/instruments/walltime.py index f85f857..3797897 100644 --- a/src/pytest_codspeed/instruments/walltime.py +++ b/codspeed/src/codspeed/instruments/walltime.py @@ -13,19 +13,15 @@ from rich.table import Table from rich.text import Text -from pytest_codspeed import __semver_version__ -from pytest_codspeed.instruments import Instrument -from pytest_codspeed.instruments.hooks import InstrumentHooks -from pytest_codspeed.utils import SUPPORTS_PERF_TRAMPOLINE +from codspeed.instruments import Instrument +from codspeed.instruments.hooks import InstrumentHooks +from codspeed.utils import SUPPORTS_PERF_TRAMPOLINE if TYPE_CHECKING: from typing import Any, Callable - from pytest import Session - - from pytest_codspeed.config import PedanticOptions - from pytest_codspeed.instruments import P, T - from pytest_codspeed.plugin import BenchmarkMarkerOptions, CodSpeedConfig + from codspeed.config import BenchmarkMarkerOptions, CodSpeedConfig, PedanticOptions + from codspeed.instruments import P, T DEFAULT_WARMUP_TIME_NS = 1_000_000_000 DEFAULT_MAX_TIME_NS = 3_000_000_000 @@ -159,10 +155,17 @@ class WallTimeInstrument(Instrument): instrument = "walltime" instrument_hooks: InstrumentHooks | None - def __init__(self, config: CodSpeedConfig) -> None: + def __init__( + self, + config: CodSpeedConfig, + integration_name: str = "pytest-codspeed", + integration_version: str = "0.0.0", + ) -> None: try: self.instrument_hooks = InstrumentHooks() - self.instrument_hooks.set_integration("pytest-codspeed", __semver_version__) + self.instrument_hooks.set_integration( + integration_name, integration_version + ) except RuntimeError as e: if os.environ.get("CODSPEED_ENV") is not None: warnings.warn( @@ -324,23 +327,7 @@ def __codspeed_root_frame__(*args, **kwargs) -> T: ) return out - def report(self, session: Session) -> None: - reporter = session.config.pluginmanager.get_plugin("terminalreporter") - assert reporter is not None, "terminalreporter not found" - - if len(self.benchmarks) == 0: - reporter.write_sep( - "=", - f"{len(self.benchmarks)} benchmarked", - ) - return - self._print_benchmark_table() - reporter.write_sep( - "=", - f"{len(self.benchmarks)} benchmarked", - ) - - def _print_benchmark_table(self) -> None: + def print_benchmark_table(self) -> None: table = Table(title="Benchmark Results") table.add_column("Benchmark", justify="right", style="cyan", no_wrap=True) @@ -386,13 +373,13 @@ def format_time(time_ns: float) -> str: time_ns: Time in nanoseconds Returns: - Formatted string with appropriate unit (ns, µs, ms, or s) + Formatted string with appropriate unit (ns, us, ms, or s) Examples: >>> format_time(123) '123ns' >>> format_time(1_234) - '1.23µs' + '1.23us' >>> format_time(76_126_625) '76.1ms' >>> format_time(2_500_000_000) @@ -403,7 +390,7 @@ def format_time(time_ns: float) -> str: return f"{time_ns:.0f}ns" elif time_ns < 1_000_000: # Less than 1 millisecond - show in microseconds - return f"{time_ns / 1_000:.2f}µs" + return f"{time_ns / 1_000:.2f}\u00b5s" elif time_ns < 1_000_000_000: # Less than 1 second - show in milliseconds return f"{time_ns / 1_000_000:.1f}ms" diff --git a/src/pytest_codspeed/py.typed b/codspeed/src/codspeed/py.typed similarity index 100% rename from src/pytest_codspeed/py.typed rename to codspeed/src/codspeed/py.typed diff --git a/codspeed/src/codspeed/utils.py b/codspeed/src/codspeed/utils.py new file mode 100644 index 0000000..0a48b31 --- /dev/null +++ b/codspeed/src/codspeed/utils.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +import os +import sysconfig +from pathlib import Path + +if __import__("sys").version_info < (3, 10): + import importlib_metadata as importlib_metadata +else: + import importlib.metadata as importlib_metadata + + +SUPPORTS_PERF_TRAMPOLINE = sysconfig.get_config_var("PY_HAVE_PERF_TRAMPOLINE") == 1 + + +def get_git_relative_path(abs_path: Path) -> Path: + """Get the path relative to the git root directory. If the path is not + inside a git repository, the original path itself is returned. + """ + git_path = Path(abs_path).resolve() + while ( + git_path != git_path.parent + ): # stops at root since parent of root is root itself + if (git_path / ".git").exists(): + return abs_path.resolve().relative_to(git_path) + git_path = git_path.parent + return abs_path + + +def get_environment_metadata( + creator_name: str, creator_version: str +) -> dict[str, dict]: + return { + "creator": { + "name": creator_name, + "version": creator_version, + "pid": os.getpid(), + }, + "python": { + "sysconfig": sysconfig.get_config_vars(), + "dependencies": { + d.name: d.version for d in importlib_metadata.distributions() + }, + }, + } diff --git a/pyproject.toml b/pyproject.toml index f19b392..099ed53 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,63 +1,19 @@ -[project.urls] -Homepage = "https://codspeed.io/" -Documentation = "https://codspeed.io/docs/reference/pytest-codspeed" -Source = "https://github.com/CodSpeedHQ/pytest-codspeed" - -[project] -name = "pytest-codspeed" -dynamic = ["version"] -description = "Pytest plugin to create CodSpeed benchmarks" -readme = "README.md" -license = { file = "LICENSE" } -requires-python = ">=3.9" -authors = [{ name = "Arthur Pastel", email = "arthur@codspeed.io" }] -keywords = ["codspeed", "benchmark", "performance", "pytest"] -classifiers = [ - "Development Status :: 5 - Production/Stable", - "Framework :: Pytest", - "Intended Audience :: Developers", - "Intended Audience :: Information Technology", - "License :: OSI Approved :: MIT License", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "Programming Language :: Python :: 3.13", - "Programming Language :: Python :: 3.14", - "Topic :: Software Development :: Testing", - "Topic :: System :: Benchmark", - "Topic :: Utilities", - "Typing :: Typed", -] -dependencies = [ - "cffi >= 1.17.1", - "pytest>=3.8", - "rich>=13.8.1", - "importlib-metadata>=8.5.0; python_version < '3.10'", -] - -[project.optional-dependencies] -compat = [ - "pytest-benchmark ~= 5.0.0", - "pytest-xdist ~= 3.6.1", - # "pytest-speed>=0.3.5", -] - [tool.uv] # Python builds change with uv versions, and we are quite susceptible to that. # We pin uv to to make sure reproducibility is maintained for any contributor. required-version = ">=0.9.5" [tool.uv.workspace] -members = [".", "asv-codspeed"] +members = ["codspeed", "pytest-codspeed", "asv-codspeed"] [tool.uv.sources] +codspeed = { workspace = true } pytest-codspeed = { workspace = true } asv-codspeed = { workspace = true } [dependency-groups] dev = [ + "codspeed", "pytest-codspeed", "asv-codspeed", "mypy ~= 1.18.2", @@ -67,52 +23,6 @@ dev = [ "pytest-test-groups>=1.1.0", ] -[project.entry-points] -pytest11 = { codspeed = "pytest_codspeed.plugin" } - -[build-system] -requires = ["setuptools >= 61", "cffi >= 1.17.1"] -build-backend = "setuptools.build_meta" - -[tool.setuptools] -license-files = [] # Workaround of https://github.com/astral-sh/uv/issues/9513 - -[tool.setuptools.dynamic] -version = { attr = "pytest_codspeed.__version__" } - - -[tool.bumpver] -current_version = "4.2.0" -version_pattern = "MAJOR.MINOR.PATCH[-TAG[NUM]]" -commit_message = "Release v{new_version} 🚀" -tag_message = "Release v{new_version} 🚀" -tag_scope = "default" -allow_dirty = false -pre_commit_hook = "./scripts/pre-release.sh" -post_commit_hook = "./scripts/post-release.sh" -commit = true -tag = false -push = false - - -[tool.bumpver.file_patterns] -"pyproject.toml" = ['current_version = "{version}"'] -"src/pytest_codspeed/__init__.py" = [ - '__version__ = "{pep440_version}"', - '__semver_version__ = "{version}"', -] - -[tool.cibuildwheel] -build = "cp*manylinux*" -test-extras = ["build", "test", "compat"] -test-command = "pytest -v --ignore={project}/tests/benchmarks {project}/tests" - -[tool.cibuildwheel.linux] -environment = { PYTEST_CODSPEED_FORCE_EXTENSION_BUILD = "1", PYTEST_CODSPEED_FORCE_VALGRIND_TESTS = "1" } -manylinux-x86_64-image = "manylinux_2_28" -manylinux-aarch64-image = "manylinux_2_28" -before-all = "yum -y install valgrind-devel" - [tool.mypy] python_version = "3.12" @@ -132,14 +42,14 @@ force_grid_wrap = 0 float_to_top = true [tool.pytest.ini_options] -addopts = "--ignore=tests/benchmarks --ignore=tests/examples --ignore=tests/benchmarks/TheAlgorithms --ignore=asv-codspeed" +addopts = "--ignore=pytest-codspeed/tests/benchmarks --ignore=pytest-codspeed/tests/examples" filterwarnings = ["ignore::DeprecationWarning:pytest_benchmark.utils.*:"] -pythonpath = ["tests/benchmarks/TheAlgorithms", "./scripts"] +pythonpath = ["pytest-codspeed/tests/benchmarks/TheAlgorithms", "scripts"] [tool.coverage.run] branch = true [tool.coverage.report] -include = ["src/*", "tests/*"] +include = ["*/src/*", "*/tests/*"] omit = ["**/conftest.py"] exclude_lines = [ "pragma: no cover", diff --git a/pytest-codspeed/pyproject.toml b/pytest-codspeed/pyproject.toml new file mode 100644 index 0000000..cc67d9a --- /dev/null +++ b/pytest-codspeed/pyproject.toml @@ -0,0 +1,90 @@ +[project.urls] +Homepage = "https://codspeed.io/" +Documentation = "https://codspeed.io/docs/reference/pytest-codspeed" +Source = "https://github.com/CodSpeedHQ/pytest-codspeed" + +[project] +name = "pytest-codspeed" +dynamic = ["version"] +description = "Pytest plugin to create CodSpeed benchmarks" +license = "MIT" +requires-python = ">=3.9" +authors = [{ name = "Arthur Pastel", email = "arthur@codspeed.io" }] +keywords = ["codspeed", "benchmark", "performance", "pytest"] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Framework :: Pytest", + "Intended Audience :: Developers", + "Intended Audience :: Information Technology", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", + "Topic :: Software Development :: Testing", + "Topic :: System :: Benchmark", + "Topic :: Utilities", + "Typing :: Typed", +] +dependencies = [ + "codspeed>=0.1.0", + "pytest>=3.8", +] + +[project.optional-dependencies] +compat = [ + "pytest-benchmark ~= 5.0.0", + "pytest-xdist ~= 3.6.1", + # "pytest-speed>=0.3.5", +] + +[project.entry-points] +pytest11 = { codspeed = "pytest_codspeed.plugin" } + +[build-system] +requires = ["setuptools >= 61"] +build-backend = "setuptools.build_meta" + +[tool.setuptools.dynamic] +version = { attr = "pytest_codspeed.__version__" } + +[tool.setuptools.packages.find] +where = ["src"] + +[tool.bumpver] +current_version = "4.2.0" +version_pattern = "MAJOR.MINOR.PATCH[-TAG[NUM]]" +commit_message = "Release v{new_version}" +tag_message = "Release v{new_version}" +tag_scope = "default" +allow_dirty = false +pre_commit_hook = "../scripts/pre-release.sh" +post_commit_hook = "../scripts/post-release.sh" +commit = true +tag = false +push = false + +[tool.bumpver.file_patterns] +"pyproject.toml" = ['current_version = "{version}"'] +"src/pytest_codspeed/__init__.py" = [ + '__version__ = "{pep440_version}"', + '__semver_version__ = "{version}"', +] + +[tool.cibuildwheel] +build = "cp*manylinux*" +test-extras = ["build", "test", "compat"] +test-command = "pytest -v --ignore={project}/tests/benchmarks {project}/tests" + +[tool.cibuildwheel.linux] +environment = { PYTEST_CODSPEED_FORCE_EXTENSION_BUILD = "1", PYTEST_CODSPEED_FORCE_VALGRIND_TESTS = "1" } +manylinux-x86_64-image = "manylinux_2_28" +manylinux-aarch64-image = "manylinux_2_28" +before-all = "yum -y install valgrind-devel" + +[tool.pytest.ini_options] +addopts = "--ignore=tests/benchmarks --ignore=tests/examples --ignore=tests/benchmarks/TheAlgorithms" +filterwarnings = ["ignore::DeprecationWarning:pytest_benchmark.utils.*:"] +pythonpath = ["tests/benchmarks/TheAlgorithms", "../scripts"] diff --git a/src/pytest_codspeed/__init__.py b/pytest-codspeed/src/pytest_codspeed/__init__.py similarity index 100% rename from src/pytest_codspeed/__init__.py rename to pytest-codspeed/src/pytest_codspeed/__init__.py diff --git a/src/pytest_codspeed/plugin.py b/pytest-codspeed/src/pytest_codspeed/plugin.py similarity index 80% rename from src/pytest_codspeed/plugin.py rename to pytest-codspeed/src/pytest_codspeed/plugin.py index 0d2853b..62d4902 100644 --- a/src/pytest_codspeed/plugin.py +++ b/pytest-codspeed/src/pytest_codspeed/plugin.py @@ -1,5 +1,6 @@ from __future__ import annotations +import dataclasses import functools import gc import json @@ -13,12 +14,13 @@ import pytest from _pytest.fixtures import FixtureManager -from pytest_codspeed.config import ( +from codspeed.config import ( BenchmarkMarkerOptions, CodSpeedConfig, PedanticOptions, ) -from pytest_codspeed.instruments import MeasurementMode, get_instrument_from_mode +from codspeed.instruments import MeasurementMode, get_instrument_from_mode + from pytest_codspeed.utils import ( BEFORE_PYTEST_8_1_1, IS_PYTEST_BENCHMARK_INSTALLED, @@ -27,17 +29,57 @@ get_git_relative_uri_and_name, ) -from . import __version__ +from . import __semver_version__, __version__ if TYPE_CHECKING: from typing import Any, Callable, ParamSpec, TypeVar - from pytest_codspeed.instruments import Instrument + from codspeed.instruments import Instrument T = TypeVar("T") P = ParamSpec("P") +def _marker_options_from_pytest_item(item: pytest.Item) -> BenchmarkMarkerOptions: + """Extract BenchmarkMarkerOptions from a pytest item's markers.""" + marker = item.get_closest_marker( + "codspeed_benchmark" + ) or item.get_closest_marker("benchmark") + if marker is None: + return BenchmarkMarkerOptions() + if len(marker.args) > 0: + raise ValueError( + "Positional arguments are not allowed in the benchmark marker" + ) + kwargs = marker.kwargs + + unknown_kwargs = set(kwargs.keys()) - { + f.name for f in dataclasses.fields(BenchmarkMarkerOptions) + } + if unknown_kwargs: + raise ValueError( + "Unknown kwargs passed to benchmark marker: " + + ", ".join(sorted(unknown_kwargs)) + ) + + return BenchmarkMarkerOptions(**kwargs) + + +def _codspeed_config_from_pytest(config: pytest.Config) -> CodSpeedConfig: + """Build CodSpeedConfig from pytest command-line options.""" + warmup_time = config.getoption("--codspeed-warmup-time", None) + warmup_time_ns = ( + int(warmup_time * 1_000_000_000) if warmup_time is not None else None + ) + max_time = config.getoption("--codspeed-max-time", None) + max_time_ns = int(max_time * 1_000_000_000) if max_time is not None else None + return CodSpeedConfig( + warmup_time_ns=warmup_time_ns, + max_rounds=config.getoption("--codspeed-max-rounds", None), + max_time_ns=max_time_ns, + ) + + @pytest.hookimpl(trylast=True) def pytest_addoption(parser: pytest.Parser): group = parser.getgroup("CodSpeed benchmarking") @@ -120,7 +162,7 @@ def pytest_configure(config: pytest.Config): default_mode = MeasurementMode.WallTime.value mode = MeasurementMode(config.getoption("--codspeed-mode", None) or default_mode) - instrument = get_instrument_from_mode(mode) + instrument_cls = get_instrument_from_mode(mode) disabled_plugins: list[str] = [] if is_codspeed_enabled: if IS_PYTEST_BENCHMARK_INSTALLED: @@ -136,13 +178,17 @@ def pytest_configure(config: pytest.Config): profile_folder = os.environ.get("CODSPEED_PROFILE_FOLDER") - codspeed_config = CodSpeedConfig.from_pytest_config(config) + codspeed_config = _codspeed_config_from_pytest(config) plugin = CodSpeedPlugin( disabled_plugins=tuple(disabled_plugins), is_codspeed_enabled=is_codspeed_enabled, mode=mode, - instrument=instrument(codspeed_config), + instrument=instrument_cls( + codspeed_config, + integration_name="pytest-codspeed", + integration_version=__semver_version__, + ), config=codspeed_config, profile_folder=Path(profile_folder) if profile_folder else None, ) @@ -241,7 +287,7 @@ def _measure( args: tuple[Any, ...], kwargs: dict[str, Any], ) -> T: - marker_options = BenchmarkMarkerOptions.from_pytest_item(node) + marker_options = _marker_options_from_pytest_item(node) random.seed(0) is_gc_enabled = gc.isenabled() if is_gc_enabled: @@ -292,11 +338,36 @@ def pytest_runtest_protocol(item: pytest.Item, nextitem: pytest.Item | None): return None +def _report_instrument(instrument: Instrument, session: pytest.Session) -> None: + """Handle instrument-specific reporting to the pytest terminal.""" + from codspeed.instruments.valgrind import ValgrindInstrument + from codspeed.instruments.walltime import WallTimeInstrument + + reporter = session.config.pluginmanager.get_plugin("terminalreporter") + assert reporter is not None, "terminalreporter not found" + + if isinstance(instrument, WallTimeInstrument): + if instrument.benchmarks: + instrument.print_benchmark_table() + reporter.write_sep( + "=", + f"{len(instrument.benchmarks)} benchmarked", + ) + elif isinstance(instrument, ValgrindInstrument): + count_suffix = ( + "benchmarked" if instrument.should_measure else "benchmark tested" + ) + reporter.write_sep( + "=", + f"{instrument.benchmark_count} {count_suffix}", + ) + + @pytest.hookimpl() def pytest_sessionfinish(session: pytest.Session, exitstatus): plugin = get_plugin(session.config) if plugin.is_codspeed_enabled: - plugin.instrument.report(session) + _report_instrument(plugin.instrument, session) if plugin.profile_folder: result_path = plugin.profile_folder / "results" / f"{os.getpid()}.json" else: diff --git a/tests/benchmarks/TheAlgorithms_bench/__init__.py b/pytest-codspeed/src/pytest_codspeed/py.typed similarity index 100% rename from tests/benchmarks/TheAlgorithms_bench/__init__.py rename to pytest-codspeed/src/pytest_codspeed/py.typed diff --git a/src/pytest_codspeed/utils.py b/pytest-codspeed/src/pytest_codspeed/utils.py similarity index 52% rename from src/pytest_codspeed/utils.py rename to pytest-codspeed/src/pytest_codspeed/utils.py index 6c24143..1b1b164 100644 --- a/src/pytest_codspeed/utils.py +++ b/pytest-codspeed/src/pytest_codspeed/utils.py @@ -1,39 +1,19 @@ from __future__ import annotations import importlib.util -import os -import sys -import sysconfig from pathlib import Path import pytest -from pytest_codspeed import __semver_version__ +from codspeed.utils import get_environment_metadata as _get_environment_metadata +from codspeed.utils import get_git_relative_path -if sys.version_info < (3, 10): - import importlib_metadata as importlib_metadata -else: - import importlib.metadata as importlib_metadata +from pytest_codspeed import __semver_version__ IS_PYTEST_BENCHMARK_INSTALLED = importlib.util.find_spec("pytest_benchmark") is not None IS_PYTEST_SPEED_INSTALLED = importlib.util.find_spec("pytest_speed") is not None BEFORE_PYTEST_8_1_1 = pytest.version_tuple < (8, 1, 1) -SUPPORTS_PERF_TRAMPOLINE = sysconfig.get_config_var("PY_HAVE_PERF_TRAMPOLINE") == 1 - - -def get_git_relative_path(abs_path: Path) -> Path: - """Get the path relative to the git root directory. If the path is not - inside a git repository, the original path itself is returned. - """ - git_path = Path(abs_path).resolve() - while ( - git_path != git_path.parent - ): # stops at root since parent of root is root itself - if (git_path / ".git").exists(): - return abs_path.resolve().relative_to(git_path) - git_path = git_path.parent - return abs_path def get_git_relative_uri_and_name(nodeid: str, pytest_rootdir: Path) -> tuple[str, str]: @@ -57,16 +37,4 @@ def get_git_relative_uri_and_name(nodeid: str, pytest_rootdir: Path) -> tuple[st def get_environment_metadata() -> dict[str, dict]: - return { - "creator": { - "name": "pytest-codspeed", - "version": __semver_version__, - "pid": os.getpid(), - }, - "python": { - "sysconfig": sysconfig.get_config_vars(), - "dependencies": { - d.name: d.version for d in importlib_metadata.distributions() - }, - }, - } + return _get_environment_metadata("pytest-codspeed", __semver_version__) diff --git a/tests/benchmarks/TheAlgorithms b/pytest-codspeed/tests/benchmarks/TheAlgorithms similarity index 100% rename from tests/benchmarks/TheAlgorithms rename to pytest-codspeed/tests/benchmarks/TheAlgorithms diff --git a/tests/benchmarks/__init__.py b/pytest-codspeed/tests/benchmarks/TheAlgorithms_bench/__init__.py similarity index 100% rename from tests/benchmarks/__init__.py rename to pytest-codspeed/tests/benchmarks/TheAlgorithms_bench/__init__.py diff --git a/tests/benchmarks/TheAlgorithms_bench/bit_manipulation.py b/pytest-codspeed/tests/benchmarks/TheAlgorithms_bench/bit_manipulation.py similarity index 100% rename from tests/benchmarks/TheAlgorithms_bench/bit_manipulation.py rename to pytest-codspeed/tests/benchmarks/TheAlgorithms_bench/bit_manipulation.py diff --git a/tests/benchmarks/TheAlgorithms_bench/test_bench_audio_filters.py b/pytest-codspeed/tests/benchmarks/TheAlgorithms_bench/test_bench_audio_filters.py similarity index 100% rename from tests/benchmarks/TheAlgorithms_bench/test_bench_audio_filters.py rename to pytest-codspeed/tests/benchmarks/TheAlgorithms_bench/test_bench_audio_filters.py diff --git a/tests/benchmarks/TheAlgorithms_bench/test_bench_backtracking.py b/pytest-codspeed/tests/benchmarks/TheAlgorithms_bench/test_bench_backtracking.py similarity index 100% rename from tests/benchmarks/TheAlgorithms_bench/test_bench_backtracking.py rename to pytest-codspeed/tests/benchmarks/TheAlgorithms_bench/test_bench_backtracking.py diff --git a/tests/examples/__init__.py b/pytest-codspeed/tests/benchmarks/__init__.py similarity index 100% rename from tests/examples/__init__.py rename to pytest-codspeed/tests/benchmarks/__init__.py diff --git a/tests/benchmarks/test_bench_doc.py b/pytest-codspeed/tests/benchmarks/test_bench_doc.py similarity index 100% rename from tests/benchmarks/test_bench_doc.py rename to pytest-codspeed/tests/benchmarks/test_bench_doc.py diff --git a/tests/benchmarks/test_bench_fibo.py b/pytest-codspeed/tests/benchmarks/test_bench_fibo.py similarity index 100% rename from tests/benchmarks/test_bench_fibo.py rename to pytest-codspeed/tests/benchmarks/test_bench_fibo.py diff --git a/tests/benchmarks/test_bench_misc.py b/pytest-codspeed/tests/benchmarks/test_bench_misc.py similarity index 100% rename from tests/benchmarks/test_bench_misc.py rename to pytest-codspeed/tests/benchmarks/test_bench_misc.py diff --git a/tests/benchmarks/test_bench_syscalls.py b/pytest-codspeed/tests/benchmarks/test_bench_syscalls.py similarity index 100% rename from tests/benchmarks/test_bench_syscalls.py rename to pytest-codspeed/tests/benchmarks/test_bench_syscalls.py diff --git a/tests/benchmarks/test_bench_various_noop.py b/pytest-codspeed/tests/benchmarks/test_bench_various_noop.py similarity index 100% rename from tests/benchmarks/test_bench_various_noop.py rename to pytest-codspeed/tests/benchmarks/test_bench_various_noop.py diff --git a/tests/conftest.py b/pytest-codspeed/tests/conftest.py similarity index 97% rename from tests/conftest.py rename to pytest-codspeed/tests/conftest.py index e8a5625..9aa4c60 100644 --- a/tests/conftest.py +++ b/pytest-codspeed/tests/conftest.py @@ -9,7 +9,7 @@ import pytest -from pytest_codspeed.instruments import MeasurementMode +from codspeed.instruments import MeasurementMode from pytest_codspeed.utils import IS_PYTEST_BENCHMARK_INSTALLED if TYPE_CHECKING: diff --git a/pytest-codspeed/tests/examples/__init__.py b/pytest-codspeed/tests/examples/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/examples/test_addition_fixture.py b/pytest-codspeed/tests/examples/test_addition_fixture.py similarity index 100% rename from tests/examples/test_addition_fixture.py rename to pytest-codspeed/tests/examples/test_addition_fixture.py diff --git a/tests/test_format_time.py b/pytest-codspeed/tests/test_format_time.py similarity index 94% rename from tests/test_format_time.py rename to pytest-codspeed/tests/test_format_time.py index e374871..de622e4 100644 --- a/tests/test_format_time.py +++ b/pytest-codspeed/tests/test_format_time.py @@ -2,7 +2,7 @@ import pytest -from pytest_codspeed.instruments.walltime import format_time +from codspeed.instruments.walltime import format_time @pytest.mark.parametrize( diff --git a/tests/test_pytest_plugin.py b/pytest-codspeed/tests/test_pytest_plugin.py similarity index 100% rename from tests/test_pytest_plugin.py rename to pytest-codspeed/tests/test_pytest_plugin.py diff --git a/tests/test_pytest_plugin_cpu_instrumentation.py b/pytest-codspeed/tests/test_pytest_plugin_cpu_instrumentation.py similarity index 99% rename from tests/test_pytest_plugin_cpu_instrumentation.py rename to pytest-codspeed/tests/test_pytest_plugin_cpu_instrumentation.py index ee0eca8..20d1233 100644 --- a/tests/test_pytest_plugin_cpu_instrumentation.py +++ b/pytest-codspeed/tests/test_pytest_plugin_cpu_instrumentation.py @@ -9,7 +9,7 @@ skip_without_valgrind, ) -from pytest_codspeed.instruments import MeasurementMode +from codspeed.instruments import MeasurementMode @skip_without_valgrind diff --git a/tests/test_pytest_plugin_walltime.py b/pytest-codspeed/tests/test_pytest_plugin_walltime.py similarity index 97% rename from tests/test_pytest_plugin_walltime.py rename to pytest-codspeed/tests/test_pytest_plugin_walltime.py index 510ab30..4ff56bb 100644 --- a/tests/test_pytest_plugin_walltime.py +++ b/pytest-codspeed/tests/test_pytest_plugin_walltime.py @@ -1,7 +1,7 @@ import pytest from conftest import run_pytest_codspeed_with_mode -from pytest_codspeed.instruments import MeasurementMode +from codspeed.instruments import MeasurementMode def test_bench_enabled_header_with_perf( diff --git a/tests/test_utils.py b/pytest-codspeed/tests/test_utils.py similarity index 90% rename from tests/test_utils.py rename to pytest-codspeed/tests/test_utils.py index c9d2e01..4995e5a 100644 --- a/tests/test_utils.py +++ b/pytest-codspeed/tests/test_utils.py @@ -2,7 +2,8 @@ from contextlib import contextmanager from pathlib import Path -from pytest_codspeed.utils import get_git_relative_path, get_git_relative_uri_and_name +from codspeed.utils import get_git_relative_path +from pytest_codspeed.utils import get_git_relative_uri_and_name @contextmanager diff --git a/uv.lock b/uv.lock index 0deb95b..fb79c41 100644 --- a/uv.lock +++ b/uv.lock @@ -5,21 +5,34 @@ requires-python = ">=3.9" [manifest] members = [ "asv-codspeed", + "codspeed", "pytest-codspeed", ] +[manifest.dependency-groups] +dev = [ + { name = "asv-codspeed", editable = "asv-codspeed" }, + { name = "codspeed", editable = "codspeed" }, + { name = "mypy", specifier = "~=1.18.2" }, + { name = "pytest", specifier = "~=7.0" }, + { name = "pytest-codspeed", editable = "pytest-codspeed" }, + { name = "pytest-cov", specifier = "~=4.0.0" }, + { name = "pytest-test-groups", specifier = ">=1.1.0" }, + { name = "ruff", specifier = "~=0.11.12" }, +] + [[package]] name = "asv-codspeed" version = "0.1.0" source = { editable = "asv-codspeed" } dependencies = [ - { name = "pytest-codspeed" }, + { name = "codspeed" }, { name = "rich" }, ] [package.metadata] requires-dist = [ - { name = "pytest-codspeed", editable = "." }, + { name = "codspeed", editable = "codspeed" }, { name = "rich", specifier = ">=13.8.1" }, ] @@ -92,6 +105,23 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/8c/52/b08750ce0bce45c143e1b5d7357ee8c55341b52bdef4b0f081af1eb248c2/cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662", size = 181290, upload-time = "2024-09-04T20:45:20.226Z" }, ] +[[package]] +name = "codspeed" +version = "0.1.0" +source = { editable = "codspeed" } +dependencies = [ + { name = "cffi" }, + { name = "importlib-metadata", marker = "python_full_version < '3.10'" }, + { name = "rich" }, +] + +[package.metadata] +requires-dist = [ + { name = "cffi", specifier = ">=1.17.1" }, + { name = "importlib-metadata", marker = "python_full_version < '3.10'", specifier = ">=8.5.0" }, + { name = "rich", specifier = ">=13.8.1" }, +] + [[package]] name = "colorama" version = "0.4.6" @@ -381,12 +411,10 @@ wheels = [ [[package]] name = "pytest-codspeed" -source = { editable = "." } +source = { editable = "pytest-codspeed" } dependencies = [ - { name = "cffi" }, - { name = "importlib-metadata", marker = "python_full_version < '3.10'" }, + { name = "codspeed" }, { name = "pytest" }, - { name = "rich" }, ] [package.optional-dependencies] @@ -395,39 +423,15 @@ compat = [ { name = "pytest-xdist" }, ] -[package.dev-dependencies] -dev = [ - { name = "asv-codspeed" }, - { name = "mypy" }, - { name = "pytest" }, - { name = "pytest-codspeed" }, - { name = "pytest-cov" }, - { name = "pytest-test-groups" }, - { name = "ruff" }, -] - [package.metadata] requires-dist = [ - { name = "cffi", specifier = ">=1.17.1" }, - { name = "importlib-metadata", marker = "python_full_version < '3.10'", specifier = ">=8.5.0" }, + { name = "codspeed", editable = "codspeed" }, { name = "pytest", specifier = ">=3.8" }, { name = "pytest-benchmark", marker = "extra == 'compat'", specifier = "~=5.0.0" }, { name = "pytest-xdist", marker = "extra == 'compat'", specifier = "~=3.6.1" }, - { name = "rich", specifier = ">=13.8.1" }, ] provides-extras = ["compat"] -[package.metadata.requires-dev] -dev = [ - { name = "asv-codspeed", editable = "asv-codspeed" }, - { name = "mypy", specifier = "~=1.18.2" }, - { name = "pytest", specifier = "~=7.0" }, - { name = "pytest-codspeed", editable = "." }, - { name = "pytest-cov", specifier = "~=4.0.0" }, - { name = "pytest-test-groups", specifier = ">=1.1.0" }, - { name = "ruff", specifier = "~=0.11.12" }, -] - [[package]] name = "pytest-cov" version = "4.0.0" From 422b1b7d3ab7b737a3a261478eece6f87f332ec8 Mon Sep 17 00:00:00 2001 From: Arthur Pastel Date: Mon, 2 Feb 2026 22:56:17 +0000 Subject: [PATCH 3/5] fix: ignore TheAlgorithms submodule in CodSpeed benchmark workflow The TheAlgorithms submodule contains test files that import numpy/requests which aren't installed in CI, causing collection errors. Co-Authored-By: Claude Opus 4.5 --- .github/workflows/codspeed.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/codspeed.yml b/.github/workflows/codspeed.yml index f80b017..3787a9f 100644 --- a/.github/workflows/codspeed.yml +++ b/.github/workflows/codspeed.yml @@ -37,7 +37,7 @@ jobs: uses: CodSpeedHQ/action@main with: mode: ${{ matrix.mode }} - run: uv run pytest pytest-codspeed/tests/benchmarks/ --codspeed --test-group=${{ matrix.shard }} --test-group-count=${{ env.SHARDS }} + run: uv run pytest pytest-codspeed/tests/benchmarks/ --codspeed --ignore=pytest-codspeed/tests/benchmarks/TheAlgorithms --test-group=${{ matrix.shard }} --test-group-count=${{ env.SHARDS }} token: ${{ secrets.CODSPEED_TOKEN }} asv-benchmarks: From 7f56309474ebe36e2dcbb6c0103ea4cd6d326a1b Mon Sep 17 00:00:00 2001 From: Arthur Pastel Date: Mon, 2 Feb 2026 23:03:46 +0000 Subject: [PATCH 4/5] fix: resolve static analysis and mypy failures - Add integration_name/integration_version params to Instrument base class - Move type-only imports to TYPE_CHECKING blocks (TC001/TC003) - Remove unused variables (F841) - Fix line length violations (E501) - Exclude TheAlgorithms submodule from ruff and mypy - Apply ruff format to all files Co-Authored-By: Claude Opus 4.5 --- asv-codspeed/src/asv_codspeed/__main__.py | 1 - asv-codspeed/src/asv_codspeed/discovery.py | 13 ++++++------- asv-codspeed/src/asv_codspeed/runner.py | 15 +++++++++------ asv-codspeed/tests/test_cli.py | 4 +--- asv-codspeed/tests/test_discovery.py | 5 +---- asv-codspeed/tests/test_runner.py | 4 +--- codspeed/src/codspeed/config.py | 1 - codspeed/src/codspeed/instruments/__init__.py | 7 ++++++- codspeed/src/codspeed/instruments/valgrind.py | 7 ++----- codspeed/src/codspeed/instruments/walltime.py | 4 +--- pyproject.toml | 2 ++ pytest-codspeed/src/pytest_codspeed/plugin.py | 11 ++++------- pytest-codspeed/src/pytest_codspeed/utils.py | 2 -- pytest-codspeed/tests/conftest.py | 2 +- pytest-codspeed/tests/test_utils.py | 3 ++- 15 files changed, 36 insertions(+), 45 deletions(-) diff --git a/asv-codspeed/src/asv_codspeed/__main__.py b/asv-codspeed/src/asv_codspeed/__main__.py index 85fbe71..13573ed 100644 --- a/asv-codspeed/src/asv_codspeed/__main__.py +++ b/asv-codspeed/src/asv_codspeed/__main__.py @@ -6,7 +6,6 @@ from asv_codspeed import __version__ from asv_codspeed.runner import run_benchmarks - from codspeed.instruments import MeasurementMode diff --git a/asv-codspeed/src/asv_codspeed/discovery.py b/asv-codspeed/src/asv_codspeed/discovery.py index 96cb7e0..ecbbe21 100644 --- a/asv-codspeed/src/asv_codspeed/discovery.py +++ b/asv-codspeed/src/asv_codspeed/discovery.py @@ -5,8 +5,11 @@ import re import sys from dataclasses import dataclass, field -from pathlib import Path -from typing import Any, Callable +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from pathlib import Path + from typing import Any, Callable # ASV benchmark name patterns @@ -84,9 +87,7 @@ def _import_module_from_path(module_name: str, file_path: Path): return module -def _discover_from_module( - module, module_name: str -) -> list[ASVBenchmark]: +def _discover_from_module(module, module_name: str) -> list[ASVBenchmark]: """Discover benchmarks from a single module.""" benchmarks = [] @@ -207,8 +208,6 @@ def _create_benchmark( timeout = getattr(func, "timeout", getattr(source, "timeout", 60.0)) if params: - # For now, create one benchmark per param combo - benchmarks = [] param_combos = _expand_params(params) if len(param_combos) == 1: return ASVBenchmark( diff --git a/asv-codspeed/src/asv_codspeed/runner.py b/asv-codspeed/src/asv_codspeed/runner.py index 2c050aa..df1e413 100644 --- a/asv-codspeed/src/asv_codspeed/runner.py +++ b/asv-codspeed/src/asv_codspeed/runner.py @@ -6,17 +6,18 @@ import random from pathlib import Path from time import time -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING from asv_codspeed.discovery import discover_benchmarks - from codspeed.config import BenchmarkMarkerOptions, CodSpeedConfig -from codspeed.instruments import MeasurementMode, get_instrument_from_mode +from codspeed.instruments import get_instrument_from_mode from codspeed.utils import get_environment_metadata, get_git_relative_path if TYPE_CHECKING: + from typing import Any + from asv_codspeed.discovery import ASVBenchmark - from codspeed.instruments import Instrument + from codspeed.instruments import Instrument, MeasurementMode def _get_uri(benchmark_name: str, benchmark_dir: Path) -> str: @@ -140,8 +141,10 @@ def _report( instrument.print_benchmark_table() total = passed + failed - status = "passed" if failed == 0 else "with failures" - console.print(f"\n[bold]===== {total} benchmarked ({passed} passed, {failed} failed) =====[/bold]") + console.print( + f"\n[bold]===== {total} benchmarked " + f"({passed} passed, {failed} failed) =====[/bold]" + ) def _run_single_benchmark( diff --git a/asv-codspeed/tests/test_cli.py b/asv-codspeed/tests/test_cli.py index 5dfb6f4..613b0ee 100644 --- a/asv-codspeed/tests/test_cli.py +++ b/asv-codspeed/tests/test_cli.py @@ -1,12 +1,10 @@ """Tests for the asv-codspeed CLI.""" + from __future__ import annotations import json import subprocess import sys -from pathlib import Path - -import pytest def test_cli_version(): diff --git a/asv-codspeed/tests/test_discovery.py b/asv-codspeed/tests/test_discovery.py index c7c66c8..7eac5e7 100644 --- a/asv-codspeed/tests/test_discovery.py +++ b/asv-codspeed/tests/test_discovery.py @@ -1,9 +1,6 @@ """Tests for ASV benchmark discovery.""" -from __future__ import annotations - -from pathlib import Path -import pytest +from __future__ import annotations from asv_codspeed.discovery import discover_benchmarks diff --git a/asv-codspeed/tests/test_runner.py b/asv-codspeed/tests/test_runner.py index 6817704..104d60e 100644 --- a/asv-codspeed/tests/test_runner.py +++ b/asv-codspeed/tests/test_runner.py @@ -1,10 +1,8 @@ """Tests for the ASV benchmark runner.""" + from __future__ import annotations import json -from pathlib import Path - -import pytest from asv_codspeed.runner import run_benchmarks diff --git a/codspeed/src/codspeed/config.py b/codspeed/src/codspeed/config.py index 24765a0..4d2e49e 100644 --- a/codspeed/src/codspeed/config.py +++ b/codspeed/src/codspeed/config.py @@ -1,6 +1,5 @@ from __future__ import annotations -import dataclasses from dataclasses import dataclass, field from typing import TYPE_CHECKING, Generic, TypeVar diff --git a/codspeed/src/codspeed/instruments/__init__.py b/codspeed/src/codspeed/instruments/__init__.py index 8e411ce..3ca4076 100644 --- a/codspeed/src/codspeed/instruments/__init__.py +++ b/codspeed/src/codspeed/instruments/__init__.py @@ -19,7 +19,12 @@ class Instrument(metaclass=ABCMeta): instrument: ClassVar[str] @abstractmethod - def __init__(self, config: CodSpeedConfig): ... + def __init__( + self, + config: CodSpeedConfig, + integration_name: str = "pytest-codspeed", + integration_version: str = "0.0.0", + ): ... @abstractmethod def get_instrument_config_str_and_warns(self) -> tuple[str, list[str]]: ... diff --git a/codspeed/src/codspeed/instruments/valgrind.py b/codspeed/src/codspeed/instruments/valgrind.py index eff979d..f5d5aa0 100644 --- a/codspeed/src/codspeed/instruments/valgrind.py +++ b/codspeed/src/codspeed/instruments/valgrind.py @@ -11,9 +11,8 @@ if TYPE_CHECKING: from typing import Any, Callable - from codspeed.config import CodSpeedConfig, PedanticOptions + from codspeed.config import BenchmarkMarkerOptions, CodSpeedConfig, PedanticOptions from codspeed.instruments import P, T - from codspeed.config import BenchmarkMarkerOptions class ValgrindInstrument(Instrument): @@ -29,9 +28,7 @@ def __init__( self.benchmark_count = 0 try: self.instrument_hooks = InstrumentHooks() - self.instrument_hooks.set_integration( - integration_name, integration_version - ) + self.instrument_hooks.set_integration(integration_name, integration_version) except RuntimeError as e: if os.environ.get("CODSPEED_ENV") is not None: raise Exception( diff --git a/codspeed/src/codspeed/instruments/walltime.py b/codspeed/src/codspeed/instruments/walltime.py index 3797897..7f97991 100644 --- a/codspeed/src/codspeed/instruments/walltime.py +++ b/codspeed/src/codspeed/instruments/walltime.py @@ -163,9 +163,7 @@ def __init__( ) -> None: try: self.instrument_hooks = InstrumentHooks() - self.instrument_hooks.set_integration( - integration_name, integration_version - ) + self.instrument_hooks.set_integration(integration_name, integration_version) except RuntimeError as e: if os.environ.get("CODSPEED_ENV") is not None: warnings.warn( diff --git a/pyproject.toml b/pyproject.toml index 099ed53..8e27d2e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,9 +25,11 @@ dev = [ [tool.mypy] python_version = "3.12" +exclude = ["pytest-codspeed/tests/benchmarks/TheAlgorithms"] [tool.ruff] target-version = "py37" +extend-exclude = ["pytest-codspeed/tests/benchmarks/TheAlgorithms"] [tool.ruff.lint] select = ["E", "F", "I", "C", "TCH", "FA", "UP"] diff --git a/pytest-codspeed/src/pytest_codspeed/plugin.py b/pytest-codspeed/src/pytest_codspeed/plugin.py index 62d4902..6644ff1 100644 --- a/pytest-codspeed/src/pytest_codspeed/plugin.py +++ b/pytest-codspeed/src/pytest_codspeed/plugin.py @@ -20,7 +20,6 @@ PedanticOptions, ) from codspeed.instruments import MeasurementMode, get_instrument_from_mode - from pytest_codspeed.utils import ( BEFORE_PYTEST_8_1_1, IS_PYTEST_BENCHMARK_INSTALLED, @@ -42,15 +41,13 @@ def _marker_options_from_pytest_item(item: pytest.Item) -> BenchmarkMarkerOptions: """Extract BenchmarkMarkerOptions from a pytest item's markers.""" - marker = item.get_closest_marker( - "codspeed_benchmark" - ) or item.get_closest_marker("benchmark") + marker = item.get_closest_marker("codspeed_benchmark") or item.get_closest_marker( + "benchmark" + ) if marker is None: return BenchmarkMarkerOptions() if len(marker.args) > 0: - raise ValueError( - "Positional arguments are not allowed in the benchmark marker" - ) + raise ValueError("Positional arguments are not allowed in the benchmark marker") kwargs = marker.kwargs unknown_kwargs = set(kwargs.keys()) - { diff --git a/pytest-codspeed/src/pytest_codspeed/utils.py b/pytest-codspeed/src/pytest_codspeed/utils.py index 1b1b164..469193f 100644 --- a/pytest-codspeed/src/pytest_codspeed/utils.py +++ b/pytest-codspeed/src/pytest_codspeed/utils.py @@ -7,10 +7,8 @@ from codspeed.utils import get_environment_metadata as _get_environment_metadata from codspeed.utils import get_git_relative_path - from pytest_codspeed import __semver_version__ - IS_PYTEST_BENCHMARK_INSTALLED = importlib.util.find_spec("pytest_benchmark") is not None IS_PYTEST_SPEED_INSTALLED = importlib.util.find_spec("pytest_speed") is not None BEFORE_PYTEST_8_1_1 = pytest.version_tuple < (8, 1, 1) diff --git a/pytest-codspeed/tests/conftest.py b/pytest-codspeed/tests/conftest.py index 9aa4c60..2548ec8 100644 --- a/pytest-codspeed/tests/conftest.py +++ b/pytest-codspeed/tests/conftest.py @@ -8,9 +8,9 @@ from typing import TYPE_CHECKING import pytest +from pytest_codspeed.utils import IS_PYTEST_BENCHMARK_INSTALLED from codspeed.instruments import MeasurementMode -from pytest_codspeed.utils import IS_PYTEST_BENCHMARK_INSTALLED if TYPE_CHECKING: from _pytest.pytester import RunResult diff --git a/pytest-codspeed/tests/test_utils.py b/pytest-codspeed/tests/test_utils.py index 4995e5a..6945dcc 100644 --- a/pytest-codspeed/tests/test_utils.py +++ b/pytest-codspeed/tests/test_utils.py @@ -2,9 +2,10 @@ from contextlib import contextmanager from pathlib import Path -from codspeed.utils import get_git_relative_path from pytest_codspeed.utils import get_git_relative_uri_and_name +from codspeed.utils import get_git_relative_path + @contextmanager def TemporaryGitRepo(): From 47a9500078858b0eaeb3c8c02ea74c19505c2d1f Mon Sep 17 00:00:00 2001 From: Arthur Pastel Date: Mon, 2 Feb 2026 23:11:38 +0000 Subject: [PATCH 5/5] fix: remove unnecessary ruff/mypy excludes for TheAlgorithms Pre-commit naturally skips git submodule files, so explicit exclude config is not needed. Co-Authored-By: Claude Opus 4.5 --- pyproject.toml | 2 -- 1 file changed, 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 8e27d2e..099ed53 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,11 +25,9 @@ dev = [ [tool.mypy] python_version = "3.12" -exclude = ["pytest-codspeed/tests/benchmarks/TheAlgorithms"] [tool.ruff] target-version = "py37" -extend-exclude = ["pytest-codspeed/tests/benchmarks/TheAlgorithms"] [tool.ruff.lint] select = ["E", "F", "I", "C", "TCH", "FA", "UP"]