diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a402b70..dc7b06c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -46,8 +46,11 @@ jobs: with: python-version: "3.12" + - name: Install dependencies + run: uv sync --all-extras --group dev + - name: Type check - run: uvx mypy --strict protest + run: uv run mypy protest test: needs: lint @@ -70,6 +73,11 @@ jobs: - os: windows-latest python-version: "3.12" runs-on: ${{ matrix.os }} + env: + # Force uv to honor the matrix Python version. Without this, uv picks + # the newest interpreter satisfying `requires-python` (often the system + # 3.12), making the matrix cosmetic. + UV_PYTHON: ${{ matrix.python-version }} steps: - uses: actions/checkout@v6 @@ -87,6 +95,9 @@ jobs: - name: Install dependencies run: uv sync --dev + - name: Verify Python version + run: uv run python -c "import sys; v = '${{ matrix.python-version }}'; assert sys.version.startswith(v), f'expected {v}, got {sys.version}'" + - name: Run tests if: matrix.os != 'ubuntu-latest' || matrix.python-version != '3.12' run: uv run pytest -vv @@ -103,7 +114,7 @@ jobs: files: coverage.xml fail_ci_if_error: false -c docs: + docs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 diff --git a/README.md b/README.md index 32af39d..41b04c2 100644 --- a/README.md +++ b/README.md @@ -62,6 +62,24 @@ CODES = ForEach([200, 201]) def test_status(code: Annotated[int, From(CODES)]): ... ``` +### Native LLM Evals + +Score model outputs alongside your tests — same fixtures, same parallelism, same `protest` CLI. Cases get pass/fail + numeric metrics, persisted to JSONL for run-over-run comparison. + +```python +@chatbot_suite.eval(evaluators=[contains_keywords(keywords=["paris"])]) +async def chatbot(case: Annotated[EvalCase, From(cases)]) -> str: + return await my_agent(case.inputs) +``` + +```bash +protest eval evals.session:session +protest history --runs # recent runs +protest history --compare # current vs previous +``` + +See [Evals docs](https://renaudcepre.github.io/protest/evals/) for evaluators, judges, history tracking. + --- ## Quick Start @@ -120,6 +138,7 @@ protest run module:session --ctrf-output r.json # CTRF report for CI/CD - **Plugin system** - Custom reporters, filters - **Last-failed mode** - Re-run only failed tests with `--lf` - **CTRF reports** - Standardized JSON for CI/CD integration +- **Native LLM evals** - Scored cases, JSONL history, `protest eval` (see [evals docs](https://renaudcepre.github.io/protest/evals/)) ## Why Not pytest? diff --git a/docs/cli.md b/docs/cli.md index 7495ae5..910701d 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -13,6 +13,8 @@ protest [options] | Command | Description | |---------|-------------| | `run` | Run tests | +| `eval` | Run evaluations | +| `history` | Browse run history (tests and evals) | | `live` | Start live reporter server | | `tags list` | List tags in a session | @@ -276,6 +278,175 @@ protest run tests:session --- +## protest eval + +Run evaluations from a session. + +`protest eval` is the eval-suite counterpart of `protest run`. It shares +the same target format, filters, capture flags and reporting options as +`run`; the differences are listed below. + +### Syntax + +```bash +protest eval [options] +``` + +### Options + +`protest eval` accepts every option from `protest run` (see above: +`-n/--concurrency`, `--collect-only`, `-x/--exitfirst`, `-s/--no-capture`, +`-q/--quiet`, `-v/--verbose`, `--show-logs`, `-t/--tag`, `--no-tag`, +`-k/--keyword`, `--lf`, `--cache-clear`, `--no-color`, `--ctrf-output`, +`--no-log-file`, `--app-dir`), plus one eval-only flag: + +| Option | Description | Default | +|--------|-------------|---------| +| `--show-output` | Print `inputs` / `output` / `expected` for **every** case (failed cases always print these). | off | + +### Examples + +```bash +# Run all evals in a session +protest eval evals.session:session + +# One specific suite +protest eval evals.session:session::helpdesk_struct + +# One ticket by name +protest eval evals.session:session -k T001 + +# All cases tagged "cat:hardware" +protest eval evals.session:session --tag cat:hardware + +# Re-run only the cases that failed last time +protest eval evals.session:session --lf + +# Show the input/output of every case (not just failures) +protest eval evals.session:session --show-output +``` + +### Output + +Each case prints one line: + +``` +✓ classify_ticket_struct[T011] (2ms) category_is_allowed=✓ summary_keyword_recall=1.00 … +``` + +After every suite, an aggregate-stats table summarizes the `Metric` +fields across cases (mean / p50 / p5 / p95). `Verdict` and `Reason` +fields don't appear in this table — only numeric `Metric` fields do. + +Per-case markdown artifacts are written to +`.protest/results/_/.md`, with the full +input, output, expected, and per-evaluator scores. + +--- + +## protest history + +Browse persisted run history (tests and evals). + +Every run appends one entry to `.protest/history.jsonl`; `protest history` +queries that file via sub-commands. + +### Syntax + +```bash +protest history [filters] +``` + +If no sub-command is given, `list` runs by default — so +`protest history --tail 5` is equivalent to +`protest history list --tail 5`. + +### Sub-commands + +| Sub-command | Description | +|-------------|-------------| +| `list` | Per-suite trend table: pass-rate trend + score arrows. **Default** when no sub-command is given. | +| `runs` | Run-by-run pass rates, most recent first. | +| `show [N]` | Detailed panel for the Nth most recent run (`N=0` = latest, the default). | +| `compare` | Compare the two most recent runs of the same model. | +| `clean` | Remove entries from runs made on a dirty working tree. **Dry-run by default** — pass `--apply` to actually modify the file. | + +### Filters (shared by every sub-command) + +| Flag | Description | Default | +|------|-------------|---------| +| `--tail N`, `-n N` | Limit to the N most recent entries | 10 | +| `--evals` | Show eval runs only | _all kinds_ | +| `--tests` | Show test runs only | _all kinds_ | +| `--model NAME` | Keep only suites whose `ModelLabel.name` matches | _all_ | +| `--suite NAME` | Keep only the suite with this name | _all_ | +| `--path DIR` | Use a custom history directory | `.protest/` | + +`--model` and `--suite` filter at the **suite level**: a run that +contains *several* suites with different models keeps the entry alive, +with non-matching suites pruned out of the displayed view. + +### Reading `--compare` + +`--compare` reports four kinds of change between the two most recent +runs of the same model: + +| Marker | Label | Meaning | +|--------|-------|---------| +| `+` | Fixed | Case was failing in the previous run, passes now | +| `-` | Regressions | Case was passing in the previous run, fails now | +| `⟳` | Modified | Case is recognizable (same name) but its content changed | +| `*` | New | Case did not exist in the previous run | +| `✗` | Deleted | Case existed in the previous run, gone now | + +The `Modified` line tells you **what** changed by suffixing the case +name: + +- `T001 (case modified)` — `inputs` or `expected` changed (`case_hash` + diff) +- `T001 (scoring modified)` — only the evaluator configuration changed + (`eval_hash` diff). Inputs and expected output are intact; you've + edited an evaluator or its parameters. + +### Examples + +```bash +# Per-suite trend across last 10 eval runs (default sub-command: list) +protest history --evals + +# Run-by-run breakdown of the last 5 eval runs +protest history runs --evals --tail 5 + +# Detailed panel for the most recent eval run +protest history show --evals + +# Detailed panel for the run before that (1 = next-most-recent) +protest history show 1 --evals + +# Compare the two most recent runs of the same model +protest history compare --evals + +# Filter to one model — only suites with this model are shown +protest history list --evals --model qwen-2.5 + +# Preview which entries `clean` would remove (no file changes) +protest history clean --evals + +# Actually remove dirty entries +protest history clean --apply +``` + +### Notes + +- When the project is not a git repo, the per-run commit / dirty + columns display `?`. `clean` is a no-op in that case. +- `--evals` and `--tests` are mutually exclusive; omit both to see + every kind. +- Per-case detail (input, output, expected, evaluator scores) lives in + `.protest/results/`, not in the history file. + +--- + ## protest live Start a persistent live reporter server for real-time test visualization. diff --git a/docs/core-concepts/console.md b/docs/core-concepts/console.md new file mode 100644 index 0000000..b172246 --- /dev/null +++ b/docs/core-concepts/console.md @@ -0,0 +1,49 @@ +# Console Output + +Print progress and debug messages that bypass test capture. + +## The Problem + +`print()` inside tests and fixtures is captured by ProTest. During long-running fixtures (pipeline imports, graph seeding), there's no visible feedback. + +## `console.print` + +```python +from protest import console + +@fixture() +async def pipeline(): + for i, scene in enumerate(scenes): + console.print(f"[cyan]pipeline:[/] importing {scene.name} ({i+1}/{len(scenes)})") + await import_scene(scene) + return driver +``` + +Messages appear inline in the reporter output, between test results. + +## Rich Markup + +`console.print` supports Rich markup. The Rich reporter renders colors; the ASCII reporter strips tags. + +```python +console.print(f"[bold green]done[/] in {duration:.1f}s") +console.print(f"[yellow]warning:[/] slow query ({elapsed:.2f}s)") +``` + +## Raw Mode + +Skip markup processing with `raw=True`: + +```python +console.print("debug: raw bytes here", raw=True) +``` + +The message is passed as-is to both reporters. + +## How It Works + +`console.print` sends a `USER_PRINT` event through the event bus. The reporter receives it and writes to the real stdout (bypassing test capture). This means: + +- Messages appear immediately, not buffered until test end +- Works with `-n 4` (concurrent tests) — the event bus serializes per plugin +- No interference with test capture or `result.output` diff --git a/docs/core-concepts/dependency-injection.md b/docs/core-concepts/dependency-injection.md index 2aba9c2..3e3d3e3 100644 --- a/docs/core-concepts/dependency-injection.md +++ b/docs/core-concepts/dependency-injection.md @@ -24,6 +24,24 @@ async def test_query(db: Annotated[Database, Use(database)]): The `Use` marker takes a **function reference**, not a string. This makes dependencies explicit and enables IDE navigation. +### `Type` is a hint, not a runtime check + +In `Annotated[Type, Use(fixture)]`, `Type` is a **type hint for your IDE and static checkers** — ProTest does not validate at runtime that `fixture()` actually returns a `Type`. This matches FastAPI's behavior with `Annotated[Type, Depends(fn)]`: the type is taken on faith, not enforced. + +```python +@fixture() +def returns_str() -> str: + return "hello" + +@session.test() +def test_mismatch(value: Annotated[int, Use(returns_str)]): + # `value` is actually a `str` at runtime — ProTest will not warn. + # The mismatch surfaces only when `value` is used as an `int`. + ... +``` + +In practice this is rarely a problem: keep your fixture return types and your call-site annotations aligned, and rely on `mypy`/`pyright` for the static check on the fixture itself. + ## Why Function References? Using function references instead of string names has benefits: diff --git a/docs/evals.md b/docs/evals.md new file mode 100644 index 0000000..4e22920 --- /dev/null +++ b/docs/evals.md @@ -0,0 +1,698 @@ +# Evals + +Evaluate LLM outputs with scored metrics and historical tracking. + +## Contents + +- [What is an Eval?](#what-is-an-eval) +- [Quick Start](#quick-start) +- [How It Works](#how-it-works) +- [EvalSuite](#evalsuite) +- [EvalCase](#evalcase) +- [Evaluators](#evaluators) +- [Fixtures](#fixtures) +- [ModelLabel](#modelinfo) +- [Judge](#judge) +- [TaskResult (SUT Usage Tracking)](#taskresult-sut-usage-tracking) +- [Usage Display](#usage-display) +- [Evaluator Errors](#evaluator-errors) +- [Name Collisions](#name-collisions) +- [Multi-Model Sessions](#multi-model-sessions) +- [CLI](#cli) +- [Output](#output) +- [History](#history) +- [Progress Output](#progress-output) + +## What is an Eval? + +A test produces **pass/fail**. An eval produces **scores** — numeric values (0.0–1.0) that measure output quality. Scores are aggregated across cases, tracked over time, and compared between runs. + +ProTest evals use the same infrastructure as tests: fixtures, DI, parallelism, tags. An eval is a test that returns a value, scored by evaluators. + +!!! tip "First-run expectations: don't expect 100% green" + + Unlike tests, evals are **expected to have failing cases** — that's + the signal you're measuring. `protest eval` still exits 1 when any + case fails a `Verdict` (so CI surfaces regressions), but the + failures are not bugs, they're data points. The aggregate-stats + table and `protest history` are designed for this — you watch the + metrics drift over time, and use `--compare` to flag actual + regressions between runs. If you want a CI gate that only fails on + infrastructure errors (fixture / evaluator crashes) and not on + case-level scoring, run `protest eval || true` followed by + `protest history --compare` to assert no regression. + +## Quick Start + +```python +# evals/session.py +from typing import Annotated + +from protest import ForEach, From, ProTestSession +from protest.evals import EvalCase, ModelLabel, evaluator +from protest.evals.evaluators import contains_keywords +from protest.evals import EvalSuite + +cases = ForEach([ + EvalCase(inputs="Who is Marie?", expected="Marie, Resistance", name="lookup"), + EvalCase(inputs="What is 2+2?", expected="4", name="math"), +]) + +session = ProTestSession() + +chatbot_suite = EvalSuite("chatbot", model=ModelLabel(name="gpt-4o-mini")) +session.add_suite(chatbot_suite) + +@chatbot_suite.eval(evaluators=[contains_keywords(keywords=["Marie"])]) +async def chatbot(case: Annotated[EvalCase, From(cases)]) -> str: + return await my_agent(case.inputs) +``` + +```bash +protest eval evals.session:session +``` + +## How It Works + +`@suite.eval()` wraps a function to run evaluators on its return value: + +1. Your function receives case data via `ForEach`/`From` (same as parameterized tests) +2. It returns the output (string, object, anything) +3. ProTest passes the output to evaluators → scores +4. Bool verdicts determine pass/fail +5. Aggregated stats appear in the terminal + +The rest of the pipeline — fixtures, DI, parallelism, reporters — works identically to tests. + +## EvalSuite + +`EvalSuite` groups eval cases. It's the eval equivalent of `ProTestSuite` — it forces `kind=EVAL` and carries model/judge configuration. Model and judge are suite-level config: each suite declares which model produced its results and which judge scores them. + +```python +from protest.evals import EvalSuite +from protest.evals import ModelLabel + +chatbot_suite = EvalSuite("chatbot", model=ModelLabel(name="gpt-4o-mini")) +session.add_suite(chatbot_suite) + +@chatbot_suite.eval(evaluators=[my_scorer]) +async def chatbot(case: Annotated[EvalCase, From(cases)]) -> str: + return await my_agent(case.inputs) +``` + +## EvalCase + +Typed dataclass for eval case data. All eval cases **must** use `EvalCase` — plain dicts are not supported. + +```python +from protest.evals import EvalCase + +cases = ForEach([ + EvalCase(inputs="What is 2+2?", expected="4", name="math"), + EvalCase(inputs="Who is Napoleon?", expected="emperor, France", name="history"), +]) +``` + +| Field | Type | Description | +|-------|------|-------------| +| `inputs` | `Any` | Input to your task function | +| `expected` | `Any` | Expected output (passed to evaluators as `ctx.expected_output`) | +| `name` | `str` | Case identifier (used in test IDs and history) | +| `evaluators` | `list` | Per-case evaluators (added to suite-level ones) | +| `tags` | `list[str]` | First-class tags — flow to `protest eval --tag …` (see below) | +| `metadata` | `dict` | Arbitrary metadata, opaque to the framework | + +### Why `EvalCase` and not a dict? + +The runtime reads case data via attribute access (`case.expected`, `case.metadata`, `case.evaluators`), not by string key. A plain dict would compile fine but blow up at runtime, and you'd lose the IDE refactor/Ctrl+Click affordances. Making `EvalCase` a typed dataclass surfaces typos at import time and keeps the contract one obvious place — same trade-off as `Annotated[T, Use(fn)]` over pytest's name-based fixture lookup. + +### Per-case `tags` + +`EvalCase.tags` is a first-class field. Tags flow through the test collector and become first-class on the resulting `TestItem`, so `protest eval --tag slow` works out of the box. Use `metadata` for any other free-form annotation the framework should ignore. + +```python +EvalCase( + inputs="Long doc to summarize…", + expected="…", + name="long_doc_case", + tags=["slow", "summarization"], + metadata={"source_dataset": "v3"}, # opaque to the framework +) +``` + +```bash +protest eval evals.session:session --tag slow +protest eval evals.session:session --no-tag slow +``` + +## Evaluators + +An evaluator is a function decorated with `@evaluator` that receives an `EvalContext` and returns a verdict. The decorator is mandatory: passing a plain function in `evaluators=[...]` raises `TypeError` at registration. The wrapping is what gives the evaluator its identity (used for hashing, history, reporting) and a typed `run(ctx)` method — there's no implicit conversion. + +!!! info "If your eval task returns a non-string output" + + The built-in evaluators (`contains_keywords`, `not_empty`, `max_length`, + `matches_regex`, `json_valid`, `word_overlap`) assume `ctx.output` is a + string and call methods like `.lower()` on it. They drop in cleanly for + summarization, chatbot replies, single-string completions, etc. + + For a structured output (`dict`, `dataclass`, `pydantic.BaseModel`, list + of objects, …), the path is to write **custom evaluators** that + pick the field they care about. A typical pattern: + + ```python + @evaluator + def category_matches_expected(ctx: EvalContext) -> CategoryMatch: + expected = (ctx.expected_output or {}).get("category") + actual = ctx.output.get("category") + return CategoryMatch(category_matches=(expected == actual), ...) + ``` + + See *Structured Evaluator* below and *EvalContext* for the data + you can read off `ctx`. + +### Return Types + +Evaluators return `bool` (simple verdict) or a `dataclass` (structured result). In dataclasses, annotate fields to tell the framework what each one is: + +```python +from typing import Annotated +from protest.evals import Metric, Verdict, Reason +``` + +| Annotation | Role | +|------------|------| +| `Annotated[bool, Verdict]` | Verdict — pass/fail (`all(verdicts)`) | +| `Annotated[float, Metric]` | Metric — aggregated in stats (mean/p50/p95) | +| `Annotated[int, Metric]` | Metric — converted to float | +| `Annotated[str, Reason]` | Reason — displayed on failure, stored in history | + +Unannotated fields are ignored by the runner — free metadata. + +Returning `float`, `dict`, or any other non-dataclass/non-bool type raises `TypeError`. + +### Tracking-Only Evaluators + +A dataclass with `Metric` fields but no `Verdict` is tracking-only. The case always passes for this evaluator — it measures without gating. + +```python +@dataclass +class OverlapMetrics: + overlap: Annotated[float, Metric] + +@evaluator +def word_overlap(ctx: EvalContext) -> OverlapMetrics: + ... +``` + +In the terminal, tracking evaluators show with `·` instead of `✓`/`✗`: + +``` +✓ chatbot[lookup] (1.2s) keyword_recall=0.95 all_present=✓ +· chatbot[lookup] overlap=0.80 +``` + +### Simple Evaluator + +```python +@evaluator +def not_empty(ctx: EvalContext) -> bool: + return bool(ctx.output.strip()) +``` + +### Structured Evaluator + +```python +from dataclasses import dataclass +from typing import Annotated +from protest.evals import Metric, Verdict, Reason + +@dataclass +class KeywordScores: + keyword_recall: Annotated[float, Metric] + all_present: Annotated[bool, Verdict] + detail: Annotated[str, Reason] = "" + +@evaluator +def keyword_check(ctx: EvalContext, keywords: list[str], min_recall: float = 0.5) -> KeywordScores: + found = [k for k in keywords if k.lower() in ctx.output.lower()] + recall = len(found) / len(keywords) + return KeywordScores( + keyword_recall=recall, + all_present=recall >= min_recall, + detail=f"found {len(found)}/{len(keywords)}", + ) +``` + +The threshold (`min_recall`) is a parameter of the evaluator, not a framework concept. The evaluator decides the verdict. + +### Async (LLM Judge) + +Use `ctx.judge()` for structured LLM evaluation (requires `judge=` on `EvalSuite`): + +```python +@dataclass +class JudgeResult: + accuracy: Annotated[float, Metric] + accurate_enough: Annotated[bool, Verdict] + reason: Annotated[str, Reason] = "" + +@evaluator +async def llm_judge(ctx: EvalContext, rubric: str = "", min_score: float = 0.7) -> JudgeResult: + return await ctx.judge( + f"Evaluate this response on a 0-1 scale.\n\n" + f"Response: {ctx.output}\nCriteria: {rubric}", + JudgeResult, + ) +``` + +The judge handles structured output — no text parsing needed. See [Judge](#judge) for setup. + +### Per-Case Thresholds + +Different thresholds per case = different evaluator bindings: + +```python +EvalCase(name="easy_lookup", inputs="easy lookup", evaluators=[keyword_check(keywords=["paris"], min_recall=0.9)]), +EvalCase(name="hard_causal", inputs="hard causal", evaluators=[keyword_check(keywords=["paris"], min_recall=0.3)]), +``` + +### ShortCircuit + +Skip expensive evaluators (LLM judges) when cheap ones already fail: + +```python +from protest.evals import ShortCircuit + +evaluators=[ + not_empty, # always runs + ShortCircuit([ + contains_keywords(keywords=["paris"], min_recall=0.5), # 0ms — if fail → stop + llm_judge(rubric="factual accuracy"), # 3s — skipped if above fails + ]), +] +``` + +`ShortCircuit` is a group of ordered evaluators. The first `Verdict=False` stops the group. Evaluators outside the `ShortCircuit` always run. + +Execution order — `evaluators=[a, ShortCircuit([b, c]), d]`: + +``` +a ← always runs +├─ pass → continue +└─ fail → continue (a is outside the group, doesn't gate b/c) + +[ShortCircuit group ──────────────────────────────────┐ + b ← always runs (first in group) │ + ├─ pass → c │ + └─ fail → c skipped (Verdict=False stops group) │ + c ← runs only if b passed │ +└─────────────────────────────────────────────────────┘ + +d ← always runs (outside the group) +``` + +The list `evaluators=[…]` is sequential at the top level; a `ShortCircuit` is just a sub-group that may stop early. Use it to gate expensive evaluators (LLM judges) behind cheap ones (keyword/regex checks). + +### Using Evaluators + +```python +# No params → use directly +evaluators=[not_empty] + +# With params → call to bind +evaluators=[contains_keywords(keywords=["python", "async"], min_recall=0.75)] + +# Per-case evaluators (added to suite-level) +EvalCase(name="factual_accuracy_case", inputs="...", evaluators=[llm_judge(rubric="Check factual accuracy")]) +``` + +### EvalContext + +| Field / Method | Type | Description | +|----------------|------|-------------| +| `name` | `str` | Case name | +| `inputs` | `I` | Case inputs | +| `output` | `O` | Task return value | +| `expected_output` | `O \| None` | From `EvalCase.expected` | +| `metadata` | `Any` | From `EvalCase.metadata` | +| `duration` | `float` | Task execution time (seconds) | +| `judge(prompt, type)` | `async` | Call the configured LLM judge (see [Judge](#judge)) | +| `judge_call_count` | `int` | Number of judge calls made | + +### Built-in Evaluators + +| Evaluator | Params | Returns | +|-----------|--------|---------| +| `contains_keywords` | `keywords, min_recall=1.0` | `keyword_recall: float`, `all_keywords_present: bool` | +| `contains_expected` | `case_sensitive=False` | `bool` | +| `does_not_contain` | `forbidden` | `no_forbidden_words: bool` | +| `not_empty` | — | `bool` | +| `max_length` | `max_chars=500` | `conciseness: float`, `within_limit: bool` | +| `min_length` | `min_chars=1` | `bool` | +| `matches_regex` | `pattern` | `bool` | +| `json_valid` | `required_keys=[]` | `valid_json: bool`, `has_required_keys: bool` | +| `word_overlap` | — | `overlap: float` (tracking-only) | + +## Fixtures + +Evals use the same fixture system as tests. Expensive setup (database, pipeline, graph) runs once and is shared across all cases. + +```python +@fixture() +async def pipeline(): + driver = await build_pipeline() # 3 minutes, once + yield driver + await driver.close() + +session.bind(pipeline) + +pipeline_suite = EvalSuite("pipeline") +session.add_suite(pipeline_suite) + +@pipeline_suite.eval(evaluators=[my_scorer]) +async def pipeline_eval( + case: Annotated[EvalCase, From(cases)], + driver: Annotated[AsyncDriver, Use(pipeline)], +) -> QueryResult: + return await query(driver, case.inputs) +``` + +## ModelLabel + +`ModelLabel` is a **passive label** that ProTest stores in the history alongside each run, so you can attribute results to a specific model and compare runs side-by-side. It does not route requests, set a temperature, pick a provider, or otherwise touch any LLM — the actual model wiring happens inside *your* task function (or the agent / SDK it calls). + +```python +suite = EvalSuite("pipeline", model=ModelLabel(name="qwen-2.5")) +``` + +## Judge + +A `Judge` is a protocol for LLM-as-judge evaluators. ProTest owns the interface — you plug in your LLM library. + +### The Protocol + +```python +class Judge(Protocol): + async def judge(self, prompt: str, output_type: type[T]) -> JudgeResponse[T]: ... +``` + +Minimal contract: takes a prompt and a return type, returns a `JudgeResponse` wrapping the typed result with optional usage stats. All configuration (model, temperature, system prompt, max_tokens) lives in your implementation's constructor, not in the protocol. + +### Writing a Judge + +The `judge()` method returns a `JudgeResponse[T]` that wraps the output with optional usage stats: + +```python +from pydantic_ai import Agent +from protest.evals import JudgeResponse + +class PydanticAIJudge: + name = "gpt-4o-mini" # used in history + provider = "openai" # optional, used in history + + def __init__(self, model: str = "gpt-4o-mini", temperature: float = 0): + self.model = model + self.temperature = temperature + + async def judge(self, prompt: str, output_type: type[T]) -> JudgeResponse[T]: + agent = Agent(self.model, output_type=output_type) + result = await agent.run(prompt) + usage = result.usage() + return JudgeResponse( + output=result.output, + input_tokens=usage.request_tokens, + output_tokens=usage.response_tokens, + cost=usage.request_tokens * 0.15/1e6 + usage.response_tokens * 0.60/1e6, + ) +``` + +Tokens and cost are optional — omit them if your provider doesn't expose usage data: + +```python +return JudgeResponse(output=result.output) # tokens/cost = None, that's fine +``` + +### Configuring the Judge + +```python +suite = EvalSuite( + "pipeline", + model=ModelLabel(name="qwen-2.5"), + judge=PydanticAIJudge(model="gpt-4o-mini", temperature=0), +) +``` + +`JudgeInfo` (name, provider) is derived automatically from the instance for history tracking. + +### Using the Judge in Evaluators + +Evaluators access the judge via `ctx.judge()`: + +```python +@dataclass +class JudgeResult: + accurate: Annotated[bool, Verdict] + reason: Annotated[str, Reason] = "" + +@evaluator +async def llm_rubric(ctx: EvalContext, rubric: str = "") -> JudgeResult: + return await ctx.judge( + f"Evaluate this response.\n\nResponse: {ctx.output}\nCriteria: {rubric}", + JudgeResult, # structured output — no text parsing + ) +``` + +For simple verdicts, use `bool` or `str` as `output_type`: + +```python +@evaluator +async def simple_judge(ctx: EvalContext) -> bool: + return await ctx.judge(f"Is this a valid answer? {ctx.output}", bool) +``` + +### No Judge Configured + +If an evaluator calls `ctx.judge()` and no judge was passed to `EvalSuite`, a `RuntimeError` is raised. This is treated as an **infrastructure error** (not a test failure), same as a fixture crash. + +### Usage Tracking + +Each call to `ctx.judge()` is counted. Tokens and cost from `JudgeResponse` are accumulated per case and flow to `EvalPayload`: + +| Field | Description | +|-------|-------------| +| `judge_call_count` | Number of judge calls | +| `judge_input_tokens` | Total input tokens | +| `judge_output_tokens` | Total output tokens | +| `judge_cost` | Total cost (user-computed) | + +These are available in history, letting you track LLM usage across runs. + +## TaskResult (SUT Usage Tracking) + +If your eval task calls an LLM, you can report usage by returning `TaskResult` instead of a plain value: + +```python +from protest.evals import TaskResult + +@chatbot_suite.eval(evaluators=[my_scorer]) +async def chatbot(case: Annotated[EvalCase, From(cases)]) -> TaskResult[str]: + result = await agent.run(case.inputs) + usage = result.usage() + return TaskResult( + output=result.output, + input_tokens=usage.request_tokens, + output_tokens=usage.response_tokens, + cost=usage.request_tokens * 0.10/1e6 + usage.response_tokens * 0.30/1e6, + ) +``` + +This is **opt-in** — returning a plain `str` still works. ProTest unwraps `TaskResult` transparently: evaluators see the plain output, usage stats flow to the reporter and history. + +## Usage Display + +When task or judge usage data is available, ProTest shows a summary after the eval stats: + +``` + Passed: 16/26 (61.5%) + Task: 45.2k in / 27.1k out, $0.0142 + Judge: 5 calls, 800 in / 400 out, $0.0030 +``` + +Lines only appear when there is data. No `TaskResult` = no Task line. No judge configured = no Judge line. + +## Evaluator Errors + +If an evaluator raises an exception (e.g. LLM judge timeout), the case is marked as **error** (not fail). The stack trace appears in the output. + +> **Tip:** For non-deterministic evaluators (LLM judges), catch exceptions in the evaluator and return a verdict indicating failure rather than letting them propagate. + +## Name Collisions + +Each `Verdict` / `Metric` / `Reason` field name from a dataclass evaluator +becomes a key in the per-case score dict (and in the history file). **Names +must be unique across all evaluators that run on the same case.** + +If two evaluators emit a score under the same name (e.g. both have a +`detail` field), ProTest raises `ScoreNameCollisionError` at runtime so the +collision is loud instead of silently overwriting the duplicate. Rename the +colliding field — typically by prefixing with the evaluator's concept: + +```python +@dataclass +class SummaryShape: + summary_well_formed: Annotated[bool, Verdict] + summary_detail: Annotated[str, Reason] = "" # not just "detail" + +@dataclass +class CategoryMatch: + category_matches: Annotated[bool, Verdict] + category_match_detail: Annotated[str, Reason] = "" # not just "detail" +``` + +Why no auto-prefix? An evaluator's score name is what users grep for in +history, scripts, and the markdown artifacts. Auto-prefixing would mean the +same evaluator's `accuracy` field changes name (`fact_check.accuracy` vs +plain `accuracy`) depending on which other evaluators are wired in alongside +it — silently breaking downstream consumers when a new evaluator is added. +Failing loud and asking you to pick a stable, unique name keeps the score +identifiers stable across configurations. + +## Multi-Model Sessions + +Track which model produced each eval suite's results. Each `EvalSuite` can have its own model: + +```python +session = ProTestSession() + +pipeline_suite = EvalSuite("pipeline", model=ModelLabel(name="qwen-2.5")) +chatbot_suite = EvalSuite("chatbot", model=ModelLabel(name="mistral-7b")) + +session.add_suite(pipeline_suite) +session.add_suite(chatbot_suite) + +@pipeline_suite.eval(evaluators=[...]) +async def pipeline_eval(case, driver) -> str: ... + +@chatbot_suite.eval(evaluators=[...]) +async def chatbot_eval(case, deps) -> str: ... +``` + +`protest history --runs` shows the model per suite: + +``` +#1 2026-03-28T09:14 57/81 (70%) cb6f7bc + pipeline 29/39 (74%) qwen-2.5 + chatbot 10/21 (48%) mistral-7b +``` + +## CLI + +```bash +# Run evals +protest eval evals.session:session + +# Parallelism +protest eval evals.session:session -n 4 + +# Filter by tag +protest eval evals.session:session --tag chatbot + +# Filter by name +protest eval evals.session:session -k "lookup" + +# Re-run failures only +protest eval evals.session:session --last-failed + +# Verbosity: scores inline +protest eval evals.session:session -v + +# Show eval inputs/output/expected on passing cases +protest eval evals.session:session --show-output + +# Show captured log records +protest eval evals.session:session --show-logs +protest eval evals.session:session --show-logs=DEBUG +``` + +Flags are independent and combinable: `-v --show-output --show-logs`. + +> **Note:** Failed eval cases always show inputs/output/expected — no flag needed. + +## Output + +### Default + +``` + ✓ chatbot[lookup] (1.2s) keyword_recall=1.00 all_keywords_present=✓ + ✗ chatbot[math]: all_keywords_present=False + │ inputs: What is 2+2? + │ output: The answer is 4. + │ expected: 4 + │ detail: found 0/1 + + Eval: chatbot (2 cases) +┏━━━━━━━━━━━━━━━━━┳━━━━━━┳━━━━━━┳━━━━━━┳━━━━━━┓ +┃ Score ┃ mean ┃ p50 ┃ p5 ┃ p95 ┃ +┡━━━━━━━━━━━━━━━━━╇━━━━━━╇━━━━━━╇━━━━━━╇━━━━━━┩ +│ keyword_recall │ 0.50 │ 0.50 │ 0.00 │ 1.00 │ +└─────────────────┴──────┴──────┴──────┴──────┘ + Passed: 1/2 (50.0%) + Results: .protest/results/chatbot_20260329_091422 +``` + +### Per-Case Results + +Each eval case writes a markdown file to `.protest/results/_/`: + +``` +.protest/results/chatbot_20260329_091422/ +├── lookup.md +├── causal.md +└── negative.md +``` + +## History + +Eval results are persisted as JSONL in `.protest/history.jsonl`. Track trends across runs. + +```bash +# Run list with per-suite breakdown +protest history --evals --runs + +# Detailed view of latest run +protest history --evals --show + +# Compare last two runs (fixed/regressed/new) +# Requires --model NAME if your history mixes multiple model labels +# (e.g. one suite per rules version) — comparing across labels is rejected +# to avoid phantom regressions where a case "fails" only because the two +# runs being diffed used different models. +protest history --evals --compare --model rules_v1 +``` + +### Integrity Hashes + +Each case in history carries two hashes: + +- **`case_hash`** — hash of inputs + expected output. Changes when the test data changes. +- **`eval_hash`** — hash of evaluators. Changes when the scoring criteria change. + +`protest history --compare` uses these hashes to detect modified cases vs regressions. If a case's `eval_hash` changed between runs, it's reported as "scoring modified" rather than a real regression. + +## Progress Output + +For long-running fixtures, use `console.print` to show progress without polluting test capture: + +```python +from protest import console + +@fixture() +async def pipeline(): + for i, scene in enumerate(scenes): + console.print(f"[cyan]pipeline:[/] importing {scene.name} ({i+1}/{len(scenes)})") + await import_scene(scene) + return driver +``` + +Messages appear inline in the reporter output. Rich markup is supported (stripped for ASCII). diff --git a/docs/getting-started/installation.md b/docs/getting-started/installation.md index e885d05..05f5cd6 100644 --- a/docs/getting-started/installation.md +++ b/docs/getting-started/installation.md @@ -31,3 +31,29 @@ ProTest automatically uses [Rich](https://rich.readthedocs.io/) for better termi ```bash uv add rich ``` + +## IDE / type checker setup + +ProTest ships a `py.typed` marker, so Pyright, mypy and Pylance pick up +its type hints once it is installed in the project's virtual env. + +If your editor reports `Import "protest" could not be resolved`, point +your type checker at the right interpreter: + +- **VS Code / Pylance**: open the command palette → *Python: Select + Interpreter* → choose `.venv/bin/python` (the one `uv` created). +- **Pyright (CLI/standalone)**: add a `pyrightconfig.json` next to your + `pyproject.toml`: + + ```json + { + "venvPath": ".", + "venv": ".venv" + } + ``` + +- **mypy**: run via `uv run mypy ...` so it inherits the same + interpreter, or set `python_executable` in `mypy.ini`. + +Once configured, no extra stub package or plugin is needed — protest +exposes its own types directly. diff --git a/examples/yorkshire/app/chatbot.py b/examples/yorkshire/app/chatbot.py new file mode 100644 index 0000000..82ca519 --- /dev/null +++ b/examples/yorkshire/app/chatbot.py @@ -0,0 +1,93 @@ +"""Yorkshire Terrier Expert Chatbot — fake LLM for eval demos. + +Simulates a RAG chatbot with realistic imperfections: +- Sometimes misses keywords (simulates retrieval failures) +- Occasionally adds irrelevant info (simulates hallucination) +- Response quality varies (simulates LLM non-determinism) +""" + +from __future__ import annotations + +import random + +# Knowledge base — what a real RAG system would retrieve +YORKSHIRE_FACTS = { + "size": "Yorkshire Terriers typically weigh between 2-3 kg. They come in teacup, mini, and standard sizes.", + "grooming": "Yorkies with long coats need daily brushing. Seniors over 6 years need extra grooming care. Regular baths every 2-3 weeks.", + "temperament": "Yorkies are bold, confident, and affectionate. Despite their small size, they are courageous and sometimes stubborn.", + "health": "Common health issues include dental problems, patellar luxation, and tracheal collapse. Regular vet checkups recommended.", + "training": "Yorkies are intelligent but can be stubborn. Positive reinforcement works best. Start training early for best results.", + "diet": "Small breed formula recommended. Feed 2-3 small meals per day. Avoid chocolate, grapes, and onions.", + "exercise": "30 minutes of daily exercise is sufficient. Short walks and indoor play. Avoid extreme temperatures.", + "jobs": "Historically bred as ratters. Modern Yorkies excel as therapy dogs, influencers, and loyal companions.", + "puppies": "Yorkshire puppies need extra care until 12 months. Socialization is critical in the first 6 months.", + "seniors": "Senior Yorkies (8+ years) may slow down. Adjust exercise and diet. More frequent vet visits recommended.", +} + + +def yorkshire_chatbot(question: str) -> str: # noqa: PLR0912 + """Fake chatbot that answers questions about Yorkshire Terriers. + + Simulates a RAG pipeline: keyword matching → fact retrieval → response generation. + No LLM calls — pure string matching for deterministic eval testing. + """ + question_lower = question.lower() + + # Find relevant facts by keyword matching + relevant_facts: list[str] = [] + for topic, fact in YORKSHIRE_FACTS.items(): + if topic in question_lower or any( + word in question_lower for word in topic.split() + ): + relevant_facts.append(fact) + + # Check for specific question patterns + if "weight" in question_lower or "how heavy" in question_lower: + relevant_facts.append(YORKSHIRE_FACTS["size"]) + if "brush" in question_lower or "coat" in question_lower: + relevant_facts.append(YORKSHIRE_FACTS["grooming"]) + if "eat" in question_lower or "food" in question_lower or "feed" in question_lower: + relevant_facts.append(YORKSHIRE_FACTS["diet"]) + if "walk" in question_lower or "active" in question_lower: + relevant_facts.append(YORKSHIRE_FACTS["exercise"]) + if "old" in question_lower or "aging" in question_lower: + relevant_facts.append(YORKSHIRE_FACTS["seniors"]) + if ( + "puppy" in question_lower + or "baby" in question_lower + or "young" in question_lower + ): + relevant_facts.append(YORKSHIRE_FACTS["puppies"]) + + # Deduplicate while preserving order + seen: set[str] = set() + unique_facts = [] + for fact in relevant_facts: + if fact not in seen: + seen.add(fact) + unique_facts.append(fact) + + if not unique_facts: + return "I'm not sure about that. I specialize in Yorkshire Terrier care and health." + + response = " ".join(unique_facts) + + # Simulate LLM imperfections + # ~20% chance: drop a sentence (simulates retrieval miss) + if random.random() < 0.2 and ". " in response: # noqa: S311, PLR2004 + sentences = response.split(". ") + drop_idx = random.randint(0, len(sentences) - 1) # noqa: S311 + sentences.pop(drop_idx) + response = ". ".join(sentences) + + # ~10% chance: add irrelevant filler (simulates rambling) + if random.random() < 0.1: # noqa: S311, PLR2004 + response += " By the way, Yorkshire Terriers were originally bred in Yorkshire, England during the 19th century." + + # ~5% chance: return a vague non-answer (simulates confusion) + if random.random() < 0.05: # noqa: S311, PLR2004 + response = ( + "That's a great question about Yorkies! There are many factors to consider." + ) + + return response diff --git a/examples/yorkshire/evals/__init__.py b/examples/yorkshire/evals/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/examples/yorkshire/evals/cases.py b/examples/yorkshire/evals/cases.py new file mode 100644 index 0000000..f50eae9 --- /dev/null +++ b/examples/yorkshire/evals/cases.py @@ -0,0 +1,121 @@ +"""Eval cases for the Yorkshire chatbot.""" + +from __future__ import annotations + +from protest import ForEach +from protest.evals import EvalCase +from protest.evals.evaluators import ( + contains_keywords, + does_not_contain, + max_length, + not_empty, +) + +yorkshire_cases = ForEach( + [ + # --- Factual recall --- + EvalCase( + name="weight_question", + inputs="How much does a Yorkshire Terrier weigh?", + expected="2-3 kg", + tags=["factual", "size"], + evaluators=[ + contains_keywords(keywords=["2-3 kg", "teacup", "mini", "standard"]) + ], + ), + EvalCase( + name="grooming_basics", + inputs="How often should I brush my Yorkie?", + expected="daily brushing for long coats", + tags=["factual", "grooming"], + evaluators=[contains_keywords(keywords=["daily", "brushing", "long"])], + ), + EvalCase( + name="diet_advice", + inputs="What should I feed my Yorkshire Terrier?", + expected="small breed formula, 2-3 meals", + tags=["factual", "diet"], + evaluators=[contains_keywords(keywords=["small breed", "meals", "avoid"])], + ), + EvalCase( + name="exercise_needs", + inputs="How much exercise does a Yorkie need?", + expected="30 minutes daily", + tags=["factual", "exercise"], + evaluators=[contains_keywords(keywords=["30 minutes", "walk"])], + ), + # --- Temperament --- + EvalCase( + name="personality", + inputs="What is the temperament of a Yorkshire Terrier?", + expected="bold, confident, affectionate", + tags=["factual", "temperament"], + evaluators=[ + contains_keywords(keywords=["bold", "confident", "affectionate"]) + ], + ), + # --- Age-specific --- + EvalCase( + name="puppy_care", + inputs="How do I care for a Yorkshire puppy?", + expected="extra care, socialization", + tags=["factual", "puppies"], + evaluators=[contains_keywords(keywords=["12 months", "socialization"])], + ), + EvalCase( + name="senior_care", + inputs="My Yorkie is getting old, what should I change?", + expected="adjust exercise, more vet visits", + tags=["factual", "seniors"], + evaluators=[contains_keywords(keywords=["senior", "exercise", "vet"])], + ), + # --- Hallucination checks --- + EvalCase( + name="no_cat_advice", + inputs="Tell me about Yorkshire Terrier health", + expected="dental problems, patellar luxation", + tags=["safety"], + evaluators=[ + does_not_contain(forbidden=["cat", "feline", "persian"]), + contains_keywords(keywords=["dental", "health"]), + ], + ), + EvalCase( + name="no_made_up_breeds", + inputs="What jobs can a Yorkie do?", + expected="therapy dogs, companions", + tags=["safety"], + evaluators=[ + does_not_contain(forbidden=["labrador", "golden retriever", "poodle"]), + contains_keywords(keywords=["therapy", "companion"]), + ], + ), + # --- Edge cases --- + EvalCase( + name="unknown_topic", + inputs="What is the GDP of France?", + expected="I'm not sure", + tags=["edge_case"], + evaluators=[contains_keywords(keywords=["not sure", "specialize"])], + ), + EvalCase( + name="empty_question", + inputs="", + expected="I'm not sure", + tags=["edge_case"], + evaluators=[contains_keywords(keywords=["not sure"])], + ), + # --- Known weak spot (chatbot doesn't know about training treats) --- + EvalCase( + name="training_treats", + inputs="What treats are best for training a Yorkie?", + expected="small soft treats, positive reinforcement", + tags=["factual", "training"], + evaluators=[ + contains_keywords(keywords=["treats", "small", "soft", "reward"]) + ], + ), + ] +) + +suite_evaluators = [not_empty, max_length(max_chars=500)] diff --git a/examples/yorkshire/evals/evaluators.py b/examples/yorkshire/evals/evaluators.py new file mode 100644 index 0000000..1008c22 --- /dev/null +++ b/examples/yorkshire/evals/evaluators.py @@ -0,0 +1,65 @@ +"""Yorkshire-specific evaluators. + +Generic evaluators come from protest.evals.evaluators. +Only project-specific ones live here. + +These also demonstrate how EvalContext generics document +what an evaluator expects as input/output types. +""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Annotated, Any + +from protest.evals import EvalContext, Metric, Verdict, evaluator + +# --- Text evaluator: EvalContext[Any, str] --------------------------------- +# Most evaluators work on text output. The first type param (inputs) is Any +# because evaluators don't usually care about the input shape. + + +@dataclass(frozen=True, slots=True) +class MentionsBreedResult: + breed_mentioned: Annotated[bool, Verdict] + + +@evaluator +def mentions_breed( + ctx: EvalContext[Any, str], breed: str = "Yorkshire" +) -> MentionsBreedResult: + """Check that the output mentions a specific breed.""" + return MentionsBreedResult(breed_mentioned=breed.lower() in ctx.output.lower()) + + +# --- Numeric evaluator: EvalContext[str, float] ---------------------------- +# An evaluator for a task that returns a numeric score (e.g. a classifier +# confidence, a similarity metric). The output is a float, not a string. + + +@dataclass(frozen=True, slots=True) +class ConfidenceResult: + confidence: Annotated[float, Metric] + above_threshold: Annotated[bool, Verdict] + + +@evaluator +def confidence_above( + ctx: EvalContext[str, float], threshold: float = 0.8 +) -> ConfidenceResult: + """Check that a numeric output (e.g. classifier confidence) meets a threshold.""" + return ConfidenceResult( + confidence=ctx.output, + above_threshold=ctx.output >= threshold, + ) + + +# --- Binary evaluator: EvalContext[str, bytes] ----------------------------- +# An evaluator for a task that returns raw bytes (e.g. image generation, +# audio synthesis). The evaluator checks basic properties of the output. + + +@evaluator +def output_not_empty_bytes(ctx: EvalContext[str, bytes]) -> bool: + """Check that a binary output (e.g. generated image) is not empty.""" + return len(ctx.output) > 0 diff --git a/examples/yorkshire/evals/session.py b/examples/yorkshire/evals/session.py new file mode 100644 index 0000000..e23f1d7 --- /dev/null +++ b/examples/yorkshire/evals/session.py @@ -0,0 +1,35 @@ +"""Yorkshire Chatbot Evals — evaluate the fake Yorkshire expert chatbot. + +Run with: + protest eval examples.yorkshire.evals.session:session + protest eval examples.yorkshire.evals.session:session -n 4 + protest eval examples.yorkshire.evals.session:session --tag safety + protest eval examples.yorkshire.evals.session:session --last-failed + protest history --evals --show +""" + +from typing import Annotated + +from examples.yorkshire.app.chatbot import yorkshire_chatbot +from examples.yorkshire.evals.cases import ( + suite_evaluators, + yorkshire_cases, +) +from protest import From, ProTestSession +from protest.evals import EvalCase, ModelLabel +from protest.evals.suite import EvalSuite + +session = ProTestSession( + metadata={"version": "1.0", "type": "keyword-matching"}, +) + +yorkshire_suite = EvalSuite( + "yorkshire_eval", + model=ModelLabel(name="yorkshire-chatbot-v1", provider="local"), +) +session.add_suite(yorkshire_suite) + + +@yorkshire_suite.eval(evaluators=suite_evaluators) +def yorkshire_eval(case: Annotated[EvalCase, From(yorkshire_cases)]) -> str: + return yorkshire_chatbot(case.inputs) diff --git a/examples/yorkshire/session.py b/examples/yorkshire/session.py new file mode 100644 index 0000000..f1347b7 --- /dev/null +++ b/examples/yorkshire/session.py @@ -0,0 +1,58 @@ +"""Yorkshire Terrier Unified Session — tests + evals in one session. + +Run all (tests + evals): + protest run examples.yorkshire.session:session + +Run only tests: + protest run examples.yorkshire.session:session + (protest run filters to kind=test) + +Run only evals: + protest eval examples.yorkshire.session:session +""" + +from typing import Annotated + +from examples.yorkshire.app.chatbot import yorkshire_chatbot +from examples.yorkshire.evals.cases import suite_evaluators, yorkshire_cases +from examples.yorkshire.tests.fixtures import ( + configure_kennel_logging, + kennel, + yorkshire, +) +from examples.yorkshire.tests.plugins import BarkPlugin +from examples.yorkshire.tests.suites.adults import adults_suite +from examples.yorkshire.tests.suites.custom_factory import custom_factory_suite +from examples.yorkshire.tests.suites.legacy.suite import legacy_suite +from examples.yorkshire.tests.suites.puppies.suite import puppies_suite +from examples.yorkshire.tests.suites.rate_limited import rate_limited_suite +from examples.yorkshire.tests.suites.seniors.suite import seniors_suite +from examples.yorkshire.tests.suites.showcase.suite import showcase_suite +from protest import From, ProTestSession +from protest.evals import EvalCase, ModelLabel +from protest.evals.suite import EvalSuite + +session = ProTestSession(concurrency=4, history=True) +session.use(BarkPlugin) +session.bind(configure_kennel_logging, autouse=True) +session.bind(kennel) +session.bind(yorkshire) + +session.add_suite(puppies_suite) +session.add_suite(adults_suite) +session.add_suite(seniors_suite) +session.add_suite(legacy_suite) +session.add_suite(showcase_suite) +session.add_suite(rate_limited_suite) +session.add_suite(custom_factory_suite) + +yorkshire_suite = EvalSuite( + "yorkshire_eval", + model=ModelLabel(name="yorkshire-chatbot-v1", provider="local"), +) +session.add_suite(yorkshire_suite) + + +@yorkshire_suite.eval(evaluators=suite_evaluators) +def yorkshire_eval(case: Annotated[EvalCase, From(yorkshire_cases)]) -> str: + return yorkshire_chatbot(case.inputs) diff --git a/justfile b/justfile index ddce526..9ddfe7b 100644 --- a/justfile +++ b/justfile @@ -7,7 +7,7 @@ @lint: ruff format . ruff check --fix . - mypy --strict protest + uv run mypy protest @fullcheck: ruff format --check . && ruff check . # lint diff --git a/mkdocs.yml b/mkdocs.yml index 93864db..a643afe 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -65,6 +65,8 @@ nav: - Tags: core-concepts/tags.md - Dependency Injection: core-concepts/dependency-injection.md - Reporters: core-concepts/reporters.md + - Console Output: core-concepts/console.md + - Evals: evals.md - Guides: - Best Practices: best-practices.md - Project Organization: guides/project-organization.md diff --git a/protest/__init__.py b/protest/__init__.py index 61ab9e6..97221b9 100644 --- a/protest/__init__.py +++ b/protest/__init__.py @@ -1,3 +1,4 @@ +from protest import console from protest.api import collect_tests, list_tags, run_session from protest.assertions import ExceptionInfo, RaisesContext, raises, warns from protest.core.session import ProTestSession @@ -41,6 +42,7 @@ "__version__", "caplog", "collect_tests", + "console", "factory", "fixture", "list_tags", diff --git a/protest/api.py b/protest/api.py index a6c6f79..7b1e169 100644 --- a/protest/api.py +++ b/protest/api.py @@ -14,21 +14,28 @@ def test_example(): assert True success = run_session(session) - -Note: - This module uses lazy imports (PLC0415) to optimize startup time. - Users importing `from protest.api import run_session` shouldn't pay - the cost of loading the entire framework until they actually call it. """ from __future__ import annotations +import asyncio from typing import TYPE_CHECKING +from protest.core.collector import Collector +from protest.core.runner import TestRunner +from protest.core.suite import ( + ProTestSuite, # noqa: TC001 — used at runtime in list_tags +) +from protest.events.types import Event +from protest.filters.keyword import KeywordFilterPlugin +from protest.filters.kind import KindFilterPlugin +from protest.filters.suite import SuiteFilterPlugin +from protest.plugin import PluginBase, PluginContext +from protest.tags.plugin import TagFilterPlugin + if TYPE_CHECKING: from protest.core.session import ProTestSession from protest.entities import RunResult, TestItem - from protest.plugin import PluginContext def run_session( # noqa: PLR0913 - public API with many optional params @@ -69,10 +76,6 @@ def run_session( # noqa: PLR0913 - public API with many optional params Returns: RunResult with success status and interrupted flag. """ - from protest.core.runner import ( # noqa: PLC0415 - lazy import for startup perf - TestRunner, - ) - # Apply session-level settings from ctx or params if ctx is not None: if ctx.get("concurrency") is not None: @@ -91,10 +94,6 @@ def run_session( # noqa: PLR0913 - public API with many optional params # Build context from parameters if not provided if ctx is None: - from protest.plugin import ( # noqa: PLC0415 - lazy import for startup perf - PluginContext, - ) - ctx = PluginContext( args={ "last_failed": last_failed, @@ -136,16 +135,6 @@ def collect_tests( # noqa: PLR0913 - public API with many optional params Returns: List of collected TestItem objects. """ - # Lazy imports for startup performance - only load when function is called - import asyncio # noqa: PLC0415 - - from protest.core.collector import Collector # noqa: PLC0415 - from protest.events.types import Event # noqa: PLC0415 - from protest.filters.keyword import KeywordFilterPlugin # noqa: PLC0415 - from protest.filters.suite import SuiteFilterPlugin # noqa: PLC0415 - from protest.plugin import PluginBase, PluginContext # noqa: PLC0415 - from protest.tags.plugin import TagFilterPlugin # noqa: PLC0415 - # Build context from parameters if not provided if ctx is None: ctx = PluginContext( @@ -162,6 +151,7 @@ def collect_tests( # noqa: PLR0913 - public API with many optional params TagFilterPlugin, SuiteFilterPlugin, KeywordFilterPlugin, + KindFilterPlugin, ] for plugin_class in filter_plugins: instance = plugin_class.activate(ctx) @@ -182,10 +172,6 @@ def list_tags(session: ProTestSession) -> set[str]: Returns: Set of all tag names declared on fixtures, suites, and tests. """ - from protest.core.suite import ( # noqa: PLC0415, TC001 - lazy import for startup perf - ProTestSuite, - ) - all_tags: set[str] = set() for fixture_reg in session.fixtures: diff --git a/protest/cli/history.py b/protest/cli/history.py new file mode 100644 index 0000000..19b6a97 --- /dev/null +++ b/protest/cli/history.py @@ -0,0 +1,623 @@ +"""CLI command: protest history — browse run history.""" + +from __future__ import annotations + +import argparse +import sys +from pathlib import Path +from typing import Any + +from protest.history.storage import clean_dirty, count_dirty_entries, load_history + + +def _make_common_parser() -> argparse.ArgumentParser: + """Filters shared by every `protest history` sub-command.""" + common = argparse.ArgumentParser(add_help=False) + common.add_argument( + "--tail", + "-n", + type=int, + default=10, + help="Limit to the N most recent entries (default: 10)", + ) + common.add_argument("--model", type=str, default=None, help="Filter by model name") + common.add_argument("--suite", type=str, default=None, help="Filter by suite name") + kind_group = common.add_mutually_exclusive_group() + kind_group.add_argument("--evals", action="store_true", help="Eval runs only") + kind_group.add_argument("--tests", action="store_true", help="Test runs only") + common.add_argument( + "--path", + type=str, + default=None, + help="History directory (default: .protest/)", + ) + return common + + +def handle_history_command(argv: list[str]) -> None: + """Entry point for `protest history`. + + Sub-commands: + + - ``list`` (default): per-suite trend table. + - ``runs``: run-by-run pass rates, most recent first. + - ``show [N]``: detailed panel for the Nth most recent run (0=latest). + - ``compare``: compare the two most recent runs. + - ``clean``: remove entries from runs made on a dirty working tree + (dry-run by default; pass ``--apply`` to actually modify the file). + """ + parser = argparse.ArgumentParser( + prog="protest history", + description="Browse run history", + ) + sub = parser.add_subparsers(dest="action") + common = _make_common_parser() + + sub.add_parser("list", parents=[common], help="Per-suite trend (default)") + sub.add_parser("runs", parents=[common], help="Run-by-run breakdown") + show_p = sub.add_parser("show", parents=[common], help="Detailed panel for one run") + show_p.add_argument( + "nth", + type=int, + nargs="?", + default=0, + help="Nth most recent run (0=latest, default: 0)", + ) + sub.add_parser("compare", parents=[common], help="Compare 2 most recent runs") + clean_p = sub.add_parser("clean", parents=[common], help="Remove dirty entries") + clean_p.add_argument( + "--apply", + action="store_true", + help="Actually modify the history file (default: dry-run, no changes).", + ) + + # Default to `list` when no sub-command is given (so users can still + # write `protest history --tail 5` without typing `list`). + # `--help` / `-h` go to the parent so users see the sub-command list, + # not list-specific options. + if not argv: + argv = ["list"] + elif argv[0].startswith("-") and argv[0] not in ("--help", "-h"): + argv = ["list", *argv] + args = parser.parse_args(argv) + + history_dir = Path(args.path) if args.path else None + + if args.action == "clean": + _run_clean(history_dir=history_dir, apply=args.apply) + sys.exit(0) + + entries = load_history( + history_dir=history_dir, + model=args.model, + suite=args.suite, + evals_only=args.evals, + tests_only=args.tests, + ) + if not entries: + print("No history found.") + sys.exit(0) + + # Apply --tail to entries before any aggregation so the trend view + # actually narrows to the requested window (otherwise the per-suite + # trend would still cover the full file even with --tail). + entries = entries[-args.tail :] + _dispatch_view(args.action, getattr(args, "nth", 0), entries) + + +def _run_clean(history_dir: Path | None, *, apply: bool) -> None: + if apply: + removed = clean_dirty(history_dir=history_dir) + print( + f"Removed {removed} dirty entries." + if removed + else "No dirty entries to clean." + ) + return + count = count_dirty_entries(history_dir=history_dir) + if count: + print( + f"Would remove {count} dirty entries. " + f"Re-run with --apply to actually modify the history file." + ) + else: + print("No dirty entries to clean.") + + +def _dispatch_view(action: str, nth: int, entries: list[dict[str, Any]]) -> None: + out = _get_output() + if action == "compare": + if len(entries) < 2: + print("Need at least 2 runs to compare.") + sys.exit(1) + # Refuse to compare across multiple models silently. When two runs + # contain suites with several distinct model labels (e.g. rules_v1 + # and rules_v2 in the same multi-model session), the case-name diff + # would conflate the two contexts and emit phantom regressions. + # Force the user to disambiguate via --model NAME or --suite NAME. + models = _models_in_entries([entries[-1], entries[-2]]) + if len(models) > 1: + print( + "Cannot compare runs that contain multiple models: " + f"{sorted(models)}. Pass --model NAME to compare runs of " + "the same model, or --suite NAME to focus on one suite." + ) + sys.exit(1) + out.compare(entries[-1], entries[-2]) + elif action == "show": + if nth >= len(entries): + print(f"Only {len(entries)} entries available.") + sys.exit(1) + out.detail(entries[-(nth + 1)]) + elif action == "runs": + out.runs(entries) + else: # "list" (default) + out.stats(entries) + + +def _models_in_entries(entries: list[dict[str, Any]]) -> set[str]: + """Collect distinct, non-empty model labels across the given entries.""" + models: set[str] = set() + for entry in entries: + for sdata in entry.get("suites", {}).values(): + if isinstance(sdata, dict): + model = sdata.get("model") + if model: + models.add(model) + return models + + +# --------------------------------------------------------------------------- +# Output abstraction — Rich if available, plain text fallback +# --------------------------------------------------------------------------- + + +class _Output: + """Base output — plain text.""" + + def stats(self, entries: list[dict[str, Any]]) -> None: + suites = _aggregate_suites(entries) + if not suites: + print("No suite data found.") + return + print(f"\n {'Suite':<22} {'Kind':<6} {'Runs':>4} {'Pass rate':<16} {'Flaky'}") + for name in sorted(suites): + s = suites[name] + rate_str = _format_rate(s["pass_rates"]) + flaky_n = len(s["flaky"]) + print( + f" {name:<22} {s['kind']:<6} {s['n_runs']:>4} {rate_str:<16} {flaky_n or ''}" + ) + print() + + def runs(self, entries: list[dict[str, Any]]) -> None: + # Display most-recent first (git log convention). `entries` arrives + # sorted oldest→newest from storage, so we reverse for display. + for i, e in enumerate(reversed(entries)): + p, t, r = _entry_stats(e) + git = (e.get("git") or {}).get("commit_short", "?") + ts = e.get("timestamp", "?")[:16] + print(f"\n #{i + 1:<3} {ts} {p}/{t} ({r * 100:.0f}%) {git}") + for sn, sd in e.get("suites", {}).items(): + if not isinstance(sd, dict): + continue + sp = sd.get("passed", 0) + st = sd.get("total_cases", 0) + sr = sp / st * 100 if st else 0 + model = sd.get("model") or "-" + print(f" {sn:<20} {sp}/{st} ({sr:.0f}%) {model}") + print() + + def detail(self, entry: dict[str, Any]) -> None: + kind = "EVAL" if entry.get("evals") else "TEST" + git = entry.get("git") or {} + ts = entry.get("timestamp", "?")[:19] + print( + f"\n {kind} run {ts} {git.get('commit_short', '?')} @ {git.get('branch', '?')}" + ) + for sn, sd in entry.get("suites", {}).items(): + if not isinstance(sd, dict): + continue + suite_model = sd.get("model") + model_str = f" [{suite_model}]" if suite_model else "" + print( + f"\n Suite: {sn} {sd.get('passed', 0)}/{sd.get('total_cases', 0)}{model_str}" + ) + for cn, cd in sd.get("cases", {}).items(): + if not isinstance(cd, dict): + continue + m = "+" if cd.get("passed") else "-" + print(f" {m} {cn} ({_fmt_dur(cd.get('duration', 0))})") + print() + + def compare(self, current: dict[str, Any], previous: dict[str, Any]) -> None: + cm = _get_display_model(current) + pm = _get_display_model(previous) + _, _, cr = _entry_stats(current) + _, _, pr = _entry_stats(previous) + if cm == pm: + print(f"\n Model: {cm}") + else: + print(f"\n Model: {pm} → {cm}") + print(f" Pass rate: {pr * 100:.0f}% → {cr * 100:.0f}%") + changes = _classify_changes(_all_cases(current), _all_cases(previous)) + _print_changes(changes) + + +class _RichOutput(_Output): + """Rich output with colors, tables, panels.""" + + def __init__(self) -> None: + from rich.console import Console # noqa: PLC0415 — optional dep + + self.console = Console(highlight=False) + + def stats(self, entries: list[dict[str, Any]]) -> None: + from rich.table import Table # noqa: PLC0415 — optional dep + + suites = _aggregate_suites(entries) + if not suites: + self.console.print("No suite data found.") + return + table = Table(show_header=True, header_style="bold", box=None, pad_edge=False) + table.add_column("Suite", min_width=12, no_wrap=True) + table.add_column("Kind", width=5) + table.add_column("Runs", justify="right", width=4) + table.add_column("Pass rate", min_width=14, no_wrap=True) + table.add_column("Scores", no_wrap=True) + table.add_column("Flaky", width=5) + + for name in sorted(suites): + s = suites[name] + kind = s["kind"] + kind_color = "cyan" if kind == "eval" else "blue" + rate_str = _rich_rate(s["pass_rates"]) + score_arrows = _rich_score_arrows(s.get("score_values", {})) + flaky_n = len(s["flaky"]) + flaky_str = f"[yellow]{flaky_n}[/]" if flaky_n else "" + table.add_row( + name, + f"[{kind_color}]{kind}[/]", + str(s["n_runs"]), + rate_str, + score_arrows, + flaky_str, + ) + + self.console.print() + self.console.print(table) + self.console.print() + + def runs(self, entries: list[dict[str, Any]]) -> None: + self.console.print() + # Display most-recent first (git log convention). + for i, e in enumerate(reversed(entries)): + p, t, r = _entry_stats(e) + git = (e.get("git") or {}).get("commit_short", "?") + ts = e.get("timestamp", "?")[:16] + rate_color = "green" if r >= 0.8 else "yellow" if r >= 0.5 else "red" + self.console.print( + f" [dim]#{i + 1:<3}[/] {ts} " + f"[{rate_color}]{p}/{t} ({r * 100:.0f}%)[/] [dim]{git}[/]" + ) + for sn, sd in e.get("suites", {}).items(): + if not isinstance(sd, dict): + continue + sp = sd.get("passed", 0) + st = sd.get("total_cases", 0) + sr = sp / st * 100 if st else 0 + sc = "green" if sr >= 80 else "yellow" if sr >= 50 else "red" + model = sd.get("model") or "-" + self.console.print( + f" {sn:<20} [{sc}]{sp}/{st} ({sr:.0f}%)[/] [cyan]{model}[/]" + ) + self.console.print() + + def detail(self, entry: dict[str, Any]) -> None: + from rich.panel import Panel # noqa: PLC0415 — optional dep + from rich.text import Text # noqa: PLC0415 — optional dep + + kind = "EVAL" if entry.get("evals") else "TEST" + git = entry.get("git") or {} + ts = entry.get("timestamp", "?")[:19] + evals_info = entry.get("evals") or {} + + lines = Text() + lines.append(f"{kind} run", style="bold") + lines.append(f" {ts} ", style="dim") + lines.append( + f"{git.get('commit_short', '?')} @ {git.get('branch', '?')}\n", style="dim" + ) + + # Scores summary + for sn, stats in evals_info.get("scores_summary", {}).items(): + mean = stats.get("mean", 0) + color = "green" if mean >= 0.8 else "yellow" if mean >= 0.5 else "red" + lines.append(f" {sn}: ", style="dim") + lines.append(f"mean={mean:.2f}", style=color) + lines.append( + f" p50={stats.get('median', 0):.2f} p95={stats.get('p95', 0):.2f}\n", + style="dim", + ) + + for sn, sd in entry.get("suites", {}).items(): + if not isinstance(sd, dict): + continue + p, t = sd.get("passed", 0), sd.get("total_cases", 0) + lines.append("\nSuite: ", style="bold") + lines.append(sn) + pc = "green" if p == t else "yellow" if p >= t * 0.5 else "red" + lines.append(f" {p}/{t}", style=pc) + suite_model = sd.get("model") + if suite_model: + lines.append(f" [{suite_model}]", style="cyan") + lines.append(f" {_fmt_dur(sd.get('duration', 0))}\n", style="dim") + for cn, cd in sd.get("cases", {}).items(): + if not isinstance(cd, dict): + continue + if cd.get("passed"): + lines.append(" + ", style="green") + else: + lines.append(" - ", style="red") + lines.append(cn) + lines.append(f" ({_fmt_dur(cd.get('duration', 0))})\n", style="dim") + + self.console.print() + self.console.print( + Panel( + lines, + title="[bold]Run Detail[/] [dim]([green]+[/] pass · [red]-[/] fail)[/]", + border_style="cyan", + ) + ) + + def compare(self, current: dict[str, Any], previous: dict[str, Any]) -> None: + from rich.panel import Panel # noqa: PLC0415 — optional dep + from rich.text import Text # noqa: PLC0415 — optional dep + + cm = _get_display_model(current) + pm = _get_display_model(previous) + _, _, cr = _entry_stats(current) + _, _, pr = _entry_stats(previous) + delta = cr - pr + + lines = Text() + if cm == pm: + lines.append(f"Model: {cm}\n", style="cyan") + else: + lines.append(f"Model: {pm} → {cm}\n", style="cyan") + + lines.append("Pass rate: ") + lines.append(f"{pr * 100:.0f}%", style="dim") + lines.append(" → ") + rc = "green" if delta > 0 else "red" if delta < 0 else "" + lines.append(f"{cr * 100:.0f}%", style=rc) + if abs(delta) >= 0.001: + lines.append(f" ({delta * 100:+.0f}%)", style=rc) + lines.append("\n\n") + + changes = _classify_changes(_all_cases(current), _all_cases(previous)) + labels = [ + ("fixed", "Fixed", "green", "+"), + ("regressed", "Regressions", "red", "-"), + ("modified", "Modified", "yellow", "⟳"), + ("new", "New", "cyan", "*"), + ("deleted", "Deleted", "red", "✗"), + ] + has_any = False + for key, label, color, marker in labels: + items = changes[key] + if items: + has_any = True + lines.append(f"{label} ({len(items)}):\n", style=color) + for n in items: + lines.append(f" {marker} {n}\n") + lines.append("\n") + if not has_any: + lines.append("No changes.\n", style="dim") + + self.console.print() + self.console.print( + Panel(lines, title="[bold]Run Comparison[/]", border_style="cyan") + ) + + +def _get_output() -> _Output: + try: + return _RichOutput() + except ImportError: + return _Output() + + +# --------------------------------------------------------------------------- +# Rich helpers +# --------------------------------------------------------------------------- + + +def _rich_rate(rates: list[float]) -> str: + if len(rates) >= 2: + first, last = rates[0], rates[-1] + delta = last - first + if delta > 0.01: + return f"[dim]{first * 100:.0f}%[/] [green]↗ {last * 100:.0f}%[/]" + if delta < -0.01: + return f"[dim]{first * 100:.0f}%[/] [red]↘ {last * 100:.0f}%[/]" + return f"{last * 100:.0f}%" + if rates: + return f"{rates[0] * 100:.0f}%" + return "-" + + +def _rich_score_arrows(score_values: dict[str, list[float]]) -> str: + """Score trend arrows: ↗↘→ per score.""" + parts: list[str] = [] + for _name, values in sorted(score_values.items()): + if len(values) >= 2: + d = values[-1] - values[0] + if d > 0.01: + parts.append("[green]↗[/]") + elif d < -0.01: + parts.append("[red]↘[/]") + else: + parts.append("[dim]→[/]") + return "".join(parts) + + +# --------------------------------------------------------------------------- +# Data helpers +# --------------------------------------------------------------------------- + + +def _format_rate(rates: list[float]) -> str: + if len(rates) >= 2: + first, last = rates[0], rates[-1] + delta = last - first + arrow = "↗" if delta > 0.01 else "↘" if delta < -0.01 else "→" + return f"{first * 100:.0f}% {arrow} {last * 100:.0f}%" + if rates: + return f"{rates[0] * 100:.0f}%" + return "-" + + +def _aggregate_suites(entries: list[dict[str, Any]]) -> dict[str, dict[str, Any]]: + suites: dict[str, dict[str, Any]] = {} + for entry in entries: + for name, data in entry.get("suites", {}).items(): + if not isinstance(data, dict): + continue + if name not in suites: + suites[name] = { + "kind": data.get("kind", "test"), + "n_runs": 0, + "pass_rates": [], + "flaky": {}, + "cases_seen": {}, + "score_values": {}, + } + s = suites[name] + errored = data.get("errored", 0) + total = data.get("total_cases", 0) + passed = data.get("passed", 0) + # Skip error-only runs (fixture crashes) from stats + if errored and errored >= total: + continue + s["n_runs"] += 1 + if total: + s["pass_rates"].append(passed / total) + _track_cases(s, data.get("cases", {})) + + for s in suites.values(): + s["flaky"] = { + cn: cs["fails"] + for cn, cs in s["cases_seen"].items() + if 0 < cs["fails"] < cs["runs"] + } + return suites + + +def _track_cases(suite: dict[str, Any], cases: dict[str, Any]) -> None: + """Track per-case pass/fail and scores for a suite.""" + for cn, cd in cases.items(): + if not isinstance(cd, dict): + continue + # Skip errored cases (fixture crashes) from stats + if cd.get("is_error"): + continue + if cn not in suite["cases_seen"]: + suite["cases_seen"][cn] = {"runs": 0, "fails": 0} + suite["cases_seen"][cn]["runs"] += 1 + if not cd.get("passed", True): + suite["cases_seen"][cn]["fails"] += 1 + for sn, sv in cd.get("scores", {}).items(): + if isinstance(sv, (int, float)): + if sn not in suite["score_values"]: + suite["score_values"][sn] = [] + suite["score_values"][sn].append(float(sv)) + + +def _get_display_model(entry: dict[str, Any]) -> str: + """Get display model: per-suite models if they differ, global otherwise.""" + suite_models: set[str] = { + sd["model"] + for sd in entry.get("suites", {}).values() + if isinstance(sd, dict) and sd.get("model") + } + if len(suite_models) > 1: + return ", ".join(sorted(suite_models)) + if suite_models: + return next(iter(suite_models)) + return (entry.get("evals") or {}).get("model") or "-" + + +def _entry_stats(entry: dict[str, Any]) -> tuple[int, int, float]: + total = passed = 0 + for data in entry.get("suites", {}).values(): + if isinstance(data, dict): + total += data.get("total_cases", 0) + passed += data.get("passed", 0) + return passed, total, passed / total if total else 0 + + +def _all_cases(entry: dict[str, Any]) -> dict[str, Any]: + cases: dict[str, Any] = {} + for data in entry.get("suites", {}).values(): + if isinstance(data, dict): + cases.update(data.get("cases", {})) + return cases + + +def _classify_changes( + curr_cases: dict[str, Any], + prev_cases: dict[str, Any], +) -> dict[str, list[str]]: + result: dict[str, list[str]] = { + "fixed": [], + "regressed": [], + "modified": [], + "new": [], + "deleted": [], + } + for name, curr in curr_cases.items(): + prev = prev_cases.get(name) + if prev is None: + result["new"].append(name) + elif curr.get("case_hash") and curr["case_hash"] != prev.get("case_hash"): + result["modified"].append(f"{name} (case modified)") + elif curr.get("eval_hash") and curr["eval_hash"] != prev.get("eval_hash"): + result["modified"].append(f"{name} (scoring modified)") + elif curr.get("passed") and not prev.get("passed"): + result["fixed"].append(name) + elif not curr.get("passed") and prev.get("passed"): + result["regressed"].append(name) + for name in prev_cases: + if name not in curr_cases: + result["deleted"].append(name) + return result + + +def _print_changes(changes: dict[str, list[str]]) -> None: + labels = { + "fixed": ("Fixed", "+"), + "regressed": ("Regressions", "-"), + "modified": ("Modified", "⟳"), + "new": ("New", "*"), + "deleted": ("Deleted", "✗"), + } + has_any = False + for key, (label, marker) in labels.items(): + if changes[key]: + has_any = True + print(f"\n {label} ({len(changes[key])}):") + for n in changes[key]: + print(f" {marker} {n}") + if not has_any: + print(" No changes.") + print() + + +def _fmt_dur(seconds: float) -> str: + if seconds < 1: + return f"{seconds * 1000:.0f}ms" + if seconds < 60: + return f"{seconds:.1f}s" + return f"{int(seconds // 60)}m{seconds % 60:.0f}s" diff --git a/protest/cli/main.py b/protest/cli/main.py index a913e7f..2fcc5b1 100644 --- a/protest/cli/main.py +++ b/protest/cli/main.py @@ -1,13 +1,18 @@ from __future__ import annotations import argparse +import functools import sys -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any + +from protest.api import collect_tests, list_tags, run_session +from protest.core.session import ProTestSession +from protest.loader import LoadError, load_session, parse_target +from protest.plugin import PluginContext +from protest.reporting.verbosity import Verbosity if TYPE_CHECKING: - from protest.core.session import ProTestSession from protest.entities import TestItem - from protest.plugin import PluginContext HELP_EPILOG = """ Examples: @@ -19,6 +24,13 @@ protest run demo:session --collect-only List tests without running protest run demo:session --tag slow Run tests with 'slow' tag protest run demo:session -s Disable capture (show print output) + protest eval demo:session Run all evaluations + protest eval demo:session --show-output Show inputs/output/expected per case + protest history --evals Show eval suite trends + protest history runs --evals Run-by-run breakdown + protest history compare --evals Compare 2 most recent runs + protest history clean Preview removable dirty entries + protest live Start live reporter server protest tags list demo:session List all available tags """ @@ -56,9 +68,6 @@ def _handle_tags_command() -> None: def _list_tags(target: str, app_dir: str, recursive: bool = False) -> None: """List all tags in a session.""" - from protest.api import collect_tests, list_tags - from protest.loader import LoadError, load_session - try: session = load_session(target, app_dir) except LoadError as exc: @@ -103,19 +112,21 @@ def main() -> None: _print_help() return - if command == "tags": - _handle_tags_command() - return - - if command == "run": - _handle_run_command() - return - - if command == "live": - _handle_live_command() + commands: dict[str, Any] = { + "tags": _handle_tags_command, + "run": functools.partial(_handle_run_command, kind_filter="test"), + "eval": functools.partial(_handle_run_command, kind_filter="eval"), + "history": _handle_history_command, + "live": _handle_live_command, + } + + handler = commands.get(command) + if handler: + handler() return - print(f"Error: Unknown command '{command}'. Use 'run', 'tags', or 'live'.") + valid = ", ".join(f"'{c}'" for c in commands) + print(f"Error: Unknown command '{command}'. Use {valid}.") sys.exit(1) @@ -134,7 +145,7 @@ def _handle_live_command() -> None: ) args = parser.parse_args(sys.argv[2:]) - from protest.reporting.web import run_live_server + from protest.reporting.web import run_live_server # noqa: PLC0415 — optional dep run_live_server(port=args.port) @@ -143,9 +154,11 @@ def _print_help() -> None: """Print main help.""" print("ProTest - Async-first Python test framework\n") print("Commands:") - print(" run Run tests") - print(" live Start live reporter server") - print(" tags Tag inspection commands") + print(" run Run tests") + print(" eval Run evaluations") + print(" history Browse run history") + print(" live Start live reporter server") + print(" tags Tag inspection commands") print(HELP_EPILOG) @@ -169,11 +182,19 @@ def _create_base_parser() -> argparse.ArgumentParser: return parser -def _create_run_parser() -> argparse.ArgumentParser: - """Base parser with core run options. Plugin options added dynamically.""" +def _create_run_parser( + *, + include_eval_options: bool = False, +) -> argparse.ArgumentParser: + """Base parser with core run options. Plugin options added dynamically. + + `include_eval_options=True` adds eval-only flags (e.g. ``--show-output``). + Set when building the parser for ``protest eval``; left False for + ``protest run`` so the eval-only flags don't pollute the test help/parsing. + """ parser = argparse.ArgumentParser( - prog="protest run", - description="Run tests", + prog="protest eval" if include_eval_options else "protest run", + description="Run evals" if include_eval_options else "Run tests", ) parser.add_argument( "target", @@ -225,14 +246,44 @@ def _create_run_parser() -> argparse.ArgumentParser: default=0, help="Increase verbosity (-v for lifecycle, -vv for fixtures)", ) + parser.add_argument( + "--show-logs", + dest="show_logs", + nargs="?", + const="INFO", + default=None, + metavar="LEVEL", + help="Show captured log records (default: INFO+)", + ) + if include_eval_options: + parser.add_argument( + "--show-output", + dest="show_output", + action="store_true", + help="Show eval inputs/output/expected per case", + ) + parser.add_argument( + "--short", + dest="short", + action="store_true", + help="Compact eval output: only print scores that failed per case", + ) return parser -def _handle_run_command() -> None: - """Handle 'protest run' subcommand with two-phase parsing.""" - from protest.loader import LoadError, load_session, parse_target +def _handle_history_command() -> None: + """Handle 'protest history' subcommand.""" + from protest.cli.history import ( # noqa: PLC0415 — heavy module + handle_history_command, + ) + + handle_history_command(sys.argv[2:]) + +def _handle_run_command(kind_filter: str | None = None) -> None: + """Handle 'protest run' / 'protest eval' with two-phase parsing.""" argv = sys.argv[2:] + include_eval_options = kind_filter == "eval" # Phase 1: Parse base args to get target base_parser = _create_base_parser() @@ -240,16 +291,14 @@ def _handle_run_command() -> None: # If --help without target, show full help with all plugin options if ("--help" in remaining or "-h" in remaining) and not base_args.target: - from protest.core.session import ProTestSession - - full_parser = _create_run_parser() + full_parser = _create_run_parser(include_eval_options=include_eval_options) for plugin_class in ProTestSession.default_plugin_classes(): plugin_class.add_cli_options(full_parser) full_parser.parse_args(["--help"]) return if not base_args.target: - _create_run_parser().print_help() + _create_run_parser(include_eval_options=include_eval_options).print_help() sys.exit(1) # Phase 2: Load session and register default plugins @@ -263,7 +312,7 @@ def _handle_run_command() -> None: session.register_default_plugins() # Phase 3: Build full parser with plugin options - full_parser = _create_run_parser() + full_parser = _create_run_parser(include_eval_options=include_eval_options) for plugin_class in session.plugin_classes: plugin_class.add_cli_options(full_parser) @@ -271,17 +320,15 @@ def _handle_run_command() -> None: args = full_parser.parse_args(argv) # Phase 5: Build context - from protest.plugin import PluginContext - from protest.reporting.verbosity import Verbosity - effective_verbosity = Verbosity.QUIET if args.quiet else args.verbosity - ctx = PluginContext( - args={ - **vars(args), - "target_suite": suite_filter, - "verbosity": effective_verbosity, - } - ) + ctx_args: dict[str, Any] = { + **vars(args), + "target_suite": suite_filter, + "verbosity": effective_verbosity, + } + if kind_filter: + ctx_args["kind_filter"] = kind_filter + ctx = PluginContext(args=ctx_args) # Phase 6: Run tests (api.run_session handles plugin activation) run_tests(session, ctx, collect_only=args.collect_only) @@ -292,8 +339,6 @@ def run_tests( ctx: PluginContext, collect_only: bool = False, ) -> None: - from protest.api import collect_tests, run_session - if collect_only: items = collect_tests(session, ctx=ctx) print(f"Collected {len(items)} test(s):\n") diff --git a/protest/console.py b/protest/console.py new file mode 100644 index 0000000..f200d9d --- /dev/null +++ b/protest/console.py @@ -0,0 +1,85 @@ +"""protest.console — progress output that bypasses test capture. + +Usage:: + + from protest import console + + @fixture() + async def pipeline(): + for i, scene in enumerate(scenes): + console.print(f"[bold]pipeline:[/] importing {scene.name} ({i+1}/{len(scenes)})") + await import_scene(scene) + + # Raw mode — no markup processing + console.print("debug: raw bytes here", raw=True) + + # Section mode — no per-test prefix (use for suite/session-level lines) + console.print(f" Results: {run_dir}", prefix=False) + +Messages go through the event bus → reporters display them inline. +If no event bus is available (outside a protest session), falls back to stderr. +""" + +from __future__ import annotations + +import contextlib +import re + +from protest.events.types import Event +from protest.execution.capture import get_event_bus, real_stderr + + +def print(msg: str, *, raw: bool = False, prefix: bool = True) -> None: + """Print a message that bypasses test capture. + + Goes through the event bus so reporters display it at the right place. + Supports Rich markup (stripped for ASCII reporter). + + Args: + msg: The message to print. Supports Rich markup unless raw=True. + raw: If True, no markup processing — message passed as-is. + prefix: If False, omit the per-test indent/bar prefix. Use for + suite-level or session-level lines (e.g. "Results: ") that + visually belong outside any single case's output block. + """ + bus = get_event_bus() + if bus is None: + _fallback_print(msg, raw) + return + + # Intentional private access to `bus._handlers`: we need sync dispatch + # so messages appear immediately (not after the test). An earlier public + # `EventBus.emit_sync` was removed (commit e14ffd5) because its signal- + # handler use case was async-signal-unsafe, and we don't want to offer + # that API to users. Kept private here — the framework itself is the + # only caller, and console.print is never invoked from a signal handler. + for handler_entry in bus._handlers.get(Event.USER_PRINT, []): + try: + handler_entry.func((msg, raw, prefix)) + except Exception as exc: + # Surface handler failures (typically: malformed Rich markup) on + # real stderr so users don't conclude `console.print` is silently + # broken. Wrapped in suppress() to guarantee the loop continues + # even if the fallback write itself raises. + with contextlib.suppress(Exception): + stream = real_stderr() + stream.write(f"console.print: handler raised {exc!r}\n") + stream.flush() + + +def _fallback_print(msg: str, raw: bool) -> None: + """Fallback when no event bus — write to real stderr (bypassing capture).""" + text = msg if raw else strip_markup(msg) + stream = real_stderr() + stream.write(text + "\n") + stream.flush() + + +def strip_markup(msg: str) -> str: + """Strip Rich markup tags from a string. + + Handles escaped brackets (``\\[text]`` → ``[text]``). + """ + msg = msg.replace("\\[", "\x00") + msg = re.sub(r"\[/?[^\]]*\]", "", msg) + return msg.replace("\x00", "[") diff --git a/protest/core/collector.py b/protest/core/collector.py index 74dd75d..565cb71 100644 --- a/protest/core/collector.py +++ b/protest/core/collector.py @@ -2,12 +2,14 @@ from inspect import signature from itertools import groupby, product -from typing import TYPE_CHECKING, Annotated, Any, get_args, get_origin, get_type_hints +from typing import TYPE_CHECKING, Annotated, Any, get_args, get_origin from protest.di.decorators import get_fixture_marker, unwrap_fixture +from protest.di.hints import get_type_hints_compat from protest.di.markers import Use from protest.di.validation import _extract_from_params from protest.entities import FixtureCallable, SuitePath, TestItem, TestRegistration +from protest.evals.evaluator import EvalCase if TYPE_CHECKING: from collections.abc import Callable @@ -18,10 +20,7 @@ def _extract_use_fixtures(func: Callable[..., Any]) -> list[FixtureCallable]: """Extract fixtures referenced via Use() markers in function parameters.""" - try: - type_hints = get_type_hints(func, include_extras=True) - except Exception: - type_hints = {} + type_hints = get_type_hints_compat(func) fixtures: list[FixtureCallable] = [] for param_name in signature(func).parameters: @@ -164,6 +163,7 @@ def _expand_registration( xfail=test_reg.xfail, timeout=test_reg.timeout, retry=test_reg.retry, + is_eval=test_reg.is_eval, ) ] @@ -177,17 +177,23 @@ def _expand_registration( sources[index].get_id(value) for index, value in enumerate(combination) ] + item_tags = tags.copy() + for value in combination: + if isinstance(value, EvalCase) and value.tags: + item_tags.update(value.tags) + items.append( TestItem( func=test_reg.func, suite=suite, - tags=tags.copy(), + tags=item_tags, case_kwargs=case_kwargs, case_ids=case_ids, skip=test_reg.skip, xfail=test_reg.xfail, timeout=test_reg.timeout, retry=test_reg.retry, + is_eval=test_reg.is_eval, ) ) diff --git a/protest/core/execution/test_executor.py b/protest/core/execution/test_executor.py index 8fa92a3..10921b9 100644 --- a/protest/core/execution/test_executor.py +++ b/protest/core/execution/test_executor.py @@ -7,11 +7,12 @@ import time from contextlib import AsyncExitStack, asynccontextmanager from inspect import signature -from typing import TYPE_CHECKING, Any, get_type_hints +from typing import TYPE_CHECKING, Any from protest.core.collector import get_transitive_fixtures from protest.core.outcome import OutcomeBuilder, TestExecutionResult from protest.di.container import FixtureContainer +from protest.di.hints import get_type_hints_compat from protest.entities import ( FixtureCallable, TestItem, @@ -20,11 +21,13 @@ TestStartInfo, TestTeardownInfo, ) +from protest.entities.events import EvalPayload from protest.events.types import Event from protest.exceptions import FixtureError from protest.execution.async_bridge import ensure_async from protest.execution.capture import ( CaptureCurrentTest, + get_current_log_records, reset_current_node_id, set_current_node_id, ) @@ -112,8 +115,6 @@ async def _run_test( # noqa: PLR0912 - complex test execution flow, refactoring ) ) - start = time.perf_counter() - try: kwargs = await self._resolve_test_kwargs(item, ctx) except Exception as exc: @@ -122,13 +123,15 @@ async def _run_test( # noqa: PLR0912 - complex test execution flow, refactoring test_name=test_name, node_id=node_id, suite_path=item.suite_path, - duration=time.perf_counter() - start, + duration=0, output=buffer.getvalue(), error=exc, is_fixture_error=True, ) ) + start = time.perf_counter() + # Conditional skip (callable) - evaluated AFTER fixture resolution if item.skip and item.skip.is_conditional: try: @@ -162,26 +165,37 @@ async def _run_test( # noqa: PLR0912 - complex test execution flow, refactoring previous_errors: list[Exception] = [] error: Exception | None = None is_fixture_error = False + eval_payload: EvalPayload | None = None attempt = 1 # Initialized here; always overwritten by loop for attempt in range(1, max_attempts + 1): error = None is_fixture_error = False + eval_payload = None try: if item.timeout is not None: try: - await asyncio.wait_for( + return_value = await asyncio.wait_for( ensure_async(item.func, **kwargs), timeout=item.timeout, ) except asyncio.TimeoutError: - # Only wrap timeout from wait_for, not from test code - raise asyncio.TimeoutError( + # Raise the builtin TimeoutError, not asyncio.TimeoutError. + # On Python 3.11+ they are aliases, but on 3.10 they are + # distinct classes and reporters/tests check isinstance + # against the builtin. + raise TimeoutError( f"Test exceeded timeout of {item.timeout}s" ) from None else: - await ensure_async(item.func, **kwargs) + return_value = await ensure_async(item.func, **kwargs) + + # For eval items: capture EvalPayload and determine pass/fail + if item.is_eval and isinstance(return_value, EvalPayload): + eval_payload = return_value + if not eval_payload.passed: + error = _build_eval_error(eval_payload) except FixtureError as exc: error = exc.original is_fixture_error = True @@ -231,6 +245,9 @@ async def _run_test( # noqa: PLR0912 - complex test execution flow, refactoring attempt=attempt, max_attempts=max_attempts, previous_errors=tuple(previous_errors), + is_eval=item.is_eval, + eval_payload=eval_payload, + log_records=tuple(get_current_log_records()), ) ) @@ -243,10 +260,7 @@ async def _resolve_test_kwargs( func_signature = signature(item.func) kwargs: dict[str, Any] = dict(item.case_kwargs) - try: - type_hints = get_type_hints(item.func, include_extras=True) - except Exception: - type_hints = {} + type_hints = get_type_hints_compat(item.func) for param_name, param in func_signature.parameters.items(): if param_name in kwargs: @@ -346,3 +360,14 @@ async def _acquire_fixture_semaphores( for _, sem in sems_sorted: await stack.enter_async_context(_semaphore_context(sem)) yield + + +def _build_eval_error(payload: EvalPayload) -> AssertionError: + """Build a descriptive AssertionError from failed eval scores.""" + parts = [] + for name, entry in payload.scores.items(): + if entry.skipped: + parts.append(f"{name}=⊘") + elif not entry.passed: + parts.append(f"{name}={entry.value}") + return AssertionError(f"{', '.join(parts)}") diff --git a/protest/core/outcome.py b/protest/core/outcome.py index b89a7bb..2563d95 100644 --- a/protest/core/outcome.py +++ b/protest/core/outcome.py @@ -1,11 +1,17 @@ """Test outcome classification and building.""" +from __future__ import annotations + from dataclasses import dataclass from enum import Enum, auto +from typing import TYPE_CHECKING, Any from protest.entities import SuitePath, TestCounts, TestOutcome, TestResult from protest.events.types import Event +if TYPE_CHECKING: + from protest.entities.events import EvalPayload + class OutcomeType(Enum): """Classification of test execution outcomes.""" @@ -35,13 +41,16 @@ class TestExecutionResult: attempt: int = 1 max_attempts: int = 1 previous_errors: tuple[Exception, ...] = () + is_eval: bool = False + eval_payload: EvalPayload | None = None + log_records: tuple[Any, ...] = () class OutcomeBuilder: """Builds TestOutcome from test execution results.""" def build(self, exec_result: TestExecutionResult) -> TestOutcome: - """Build a TestOutcome from execution result by classifying and constructing.""" + """Build a TestOutcome from execution result.""" outcome_type = self._classify(exec_result) match outcome_type: @@ -59,7 +68,6 @@ def build(self, exec_result: TestExecutionResult) -> TestOutcome: return self._build_fail(exec_result) def _classify(self, exec_result: TestExecutionResult) -> OutcomeType: - """Classify execution result into outcome type.""" match ( exec_result.skip_reason, exec_result.error, @@ -79,91 +87,51 @@ def _classify(self, exec_result: TestExecutionResult) -> OutcomeType: case _: return OutcomeType.FAIL - def _build_skip(self, exec_result: TestExecutionResult) -> TestOutcome: - result = TestResult( - name=exec_result.test_name, - node_id=exec_result.node_id, - suite_path=exec_result.suite_path, - skip_reason=exec_result.skip_reason, - timeout=exec_result.timeout, - attempt=exec_result.attempt, - max_attempts=exec_result.max_attempts, - previous_errors=exec_result.previous_errors, - ) - return TestOutcome(result, TestCounts(skipped=1), Event.TEST_SKIP) - - def _build_pass(self, exec_result: TestExecutionResult) -> TestOutcome: - result = TestResult( - name=exec_result.test_name, - node_id=exec_result.node_id, - suite_path=exec_result.suite_path, - duration=exec_result.duration, - output=exec_result.output, - timeout=exec_result.timeout, - attempt=exec_result.attempt, - max_attempts=exec_result.max_attempts, - previous_errors=exec_result.previous_errors, - ) - return TestOutcome(result, TestCounts(passed=1), Event.TEST_PASS) - - def _build_xpass(self, exec_result: TestExecutionResult) -> TestOutcome: - result = TestResult( - name=exec_result.test_name, - node_id=exec_result.node_id, - suite_path=exec_result.suite_path, - duration=exec_result.duration, - output=exec_result.output, - xfail_reason=exec_result.xfail_reason, - timeout=exec_result.timeout, - attempt=exec_result.attempt, - max_attempts=exec_result.max_attempts, - previous_errors=exec_result.previous_errors, + def _base_kwargs(self, er: TestExecutionResult) -> dict[str, object]: + """Common TestResult kwargs from an execution result.""" + return { + "name": er.test_name, + "node_id": er.node_id, + "suite_path": er.suite_path, + "duration": er.duration, + "output": er.output, + "timeout": er.timeout, + "attempt": er.attempt, + "max_attempts": er.max_attempts, + "previous_errors": er.previous_errors, + "is_eval": er.is_eval, + "eval_payload": er.eval_payload, + "log_records": er.log_records, + } + + def _build_skip(self, er: TestExecutionResult) -> TestOutcome: + kw = self._base_kwargs(er) + kw.update(duration=0, output="", skip_reason=er.skip_reason) + return TestOutcome(TestResult(**kw), TestCounts(skipped=1), Event.TEST_SKIP) # type: ignore[arg-type] + + def _build_pass(self, er: TestExecutionResult) -> TestOutcome: + return TestOutcome( + TestResult(**self._base_kwargs(er)), # type: ignore[arg-type] + TestCounts(passed=1), + Event.TEST_PASS, ) - return TestOutcome(result, TestCounts(xpassed=1), Event.TEST_XPASS) - - def _build_error(self, exec_result: TestExecutionResult) -> TestOutcome: - result = TestResult( - name=exec_result.test_name, - node_id=exec_result.node_id, - suite_path=exec_result.suite_path, - error=exec_result.error, - duration=exec_result.duration, - output=exec_result.output, - is_fixture_error=True, - timeout=exec_result.timeout, - attempt=exec_result.attempt, - max_attempts=exec_result.max_attempts, - previous_errors=exec_result.previous_errors, - ) - return TestOutcome(result, TestCounts(errored=1), Event.TEST_FAIL) - - def _build_xfail(self, exec_result: TestExecutionResult) -> TestOutcome: - result = TestResult( - name=exec_result.test_name, - node_id=exec_result.node_id, - suite_path=exec_result.suite_path, - error=exec_result.error, - duration=exec_result.duration, - output=exec_result.output, - xfail_reason=exec_result.xfail_reason, - timeout=exec_result.timeout, - attempt=exec_result.attempt, - max_attempts=exec_result.max_attempts, - previous_errors=exec_result.previous_errors, - ) - return TestOutcome(result, TestCounts(xfailed=1), Event.TEST_XFAIL) - - def _build_fail(self, exec_result: TestExecutionResult) -> TestOutcome: - result = TestResult( - name=exec_result.test_name, - node_id=exec_result.node_id, - suite_path=exec_result.suite_path, - error=exec_result.error, - duration=exec_result.duration, - output=exec_result.output, - timeout=exec_result.timeout, - attempt=exec_result.attempt, - max_attempts=exec_result.max_attempts, - previous_errors=exec_result.previous_errors, - ) - return TestOutcome(result, TestCounts(failed=1), Event.TEST_FAIL) + + def _build_xpass(self, er: TestExecutionResult) -> TestOutcome: + kw = self._base_kwargs(er) + kw["xfail_reason"] = er.xfail_reason + return TestOutcome(TestResult(**kw), TestCounts(xpassed=1), Event.TEST_XPASS) # type: ignore[arg-type] + + def _build_error(self, er: TestExecutionResult) -> TestOutcome: + kw = self._base_kwargs(er) + kw.update(error=er.error, is_fixture_error=True) + return TestOutcome(TestResult(**kw), TestCounts(errored=1), Event.TEST_FAIL) # type: ignore[arg-type] + + def _build_xfail(self, er: TestExecutionResult) -> TestOutcome: + kw = self._base_kwargs(er) + kw.update(error=er.error, xfail_reason=er.xfail_reason) + return TestOutcome(TestResult(**kw), TestCounts(xfailed=1), Event.TEST_XFAIL) # type: ignore[arg-type] + + def _build_fail(self, er: TestExecutionResult) -> TestOutcome: + kw = self._base_kwargs(er) + kw["error"] = er.error + return TestOutcome(TestResult(**kw), TestCounts(failed=1), Event.TEST_FAIL) # type: ignore[arg-type] diff --git a/protest/core/runner.py b/protest/core/runner.py index 0347c2d..d25fb47 100644 --- a/protest/core/runner.py +++ b/protest/core/runner.py @@ -1,12 +1,15 @@ """Test runner orchestration.""" +from __future__ import annotations + import asyncio import time +from typing import TYPE_CHECKING, Any from protest.core.collector import Collector from protest.core.execution import ParallelExecutor, SuiteManager, TestExecutor from protest.core.outcome import OutcomeBuilder -from protest.core.session import ProTestSession +from protest.core.session import ProTestSession # noqa: TC001 — used at runtime from protest.core.tracker import SuiteTracker from protest.entities import ( RunResult, @@ -14,14 +17,20 @@ SessionSetupInfo, TestCounts, ) +from protest.evals.types import EvalCaseResult, EvalSuiteReport from protest.events.types import Event from protest.execution.capture import ( GlobalCapturePatch, + reset_event_bus, + set_event_bus, set_session_setup_capture, ) from protest.execution.context import cancellation_event from protest.execution.interrupt import InterruptHandler +if TYPE_CHECKING: + from protest.entities.events import TestResult + class TestRunner: """Executes tests with parallel support and fixture lifecycle management. @@ -36,6 +45,7 @@ def __init__(self, session: ProTestSession) -> None: self._interrupt_handler = InterruptHandler() self._interrupted = False self._force_interrupt_emitted = False + self._eval_results: dict[str, list[EvalCaseResult]] = {} # Extracted components self._suite_manager = SuiteManager(session) @@ -61,10 +71,23 @@ def run(self) -> RunResult: self._interrupt_handler.uninstall() loop.close() - async def _main_loop(self) -> bool: + def _collect_eval_result(self, result: TestResult) -> None: + """Internal handler: collect eval results from TEST_PASS/FAIL events.""" + if not result.is_eval or result.eval_payload is None: + return + suite_name = result.suite_path.root_name if result.suite_path else "evals" + case_result = EvalCaseResult.from_test_result(result) + self._eval_results.setdefault(suite_name, []).append(case_result) + + async def _main_loop(self) -> bool: # noqa: PLR0915 """The main async loop for running tests.""" session_start = time.perf_counter() + # Register internal eval collector before tests run + self._eval_results.clear() + self._session.events.on(Event.TEST_PASS, self._collect_eval_result) + self._session.events.on(Event.TEST_FAIL, self._collect_eval_result) + collector = Collector() items = collector.collect(self._session) @@ -82,6 +105,7 @@ async def _main_loop(self) -> bool: cancel_token = cancellation_event.set( self._interrupt_handler.force_teardown_event ) + bus_token = set_event_bus(self._session.events) try: with GlobalCapturePatch(show_output=not self._session.capture): async with self._session: @@ -112,6 +136,8 @@ async def _main_loop(self) -> bool: ): suite_result = self._suite_manager.build_result(suite_path) await self._session.events.emit(Event.SUITE_END, suite_result) + # Emit EVAL_SUITE_END for eval suites + await self._emit_eval_suite_end(suite_path) await self._session.events.emit(Event.SESSION_TEARDOWN_START) finally: @@ -124,6 +150,7 @@ async def _main_loop(self) -> bool: await self._session.events.emit(Event.SESSION_INTERRUPTED, True) self._force_interrupt_emitted = True cancellation_event.reset(cancel_token) + reset_event_bus(bus_token) if self._interrupt_handler.should_stop_new_tests: self._interrupted = True @@ -151,8 +178,29 @@ async def _main_loop(self) -> bool: await self._session.events.wait_pending() await self._session.events.emit(Event.SESSION_COMPLETE, session_result) + # Unregister eval collector + self._session.events.off(Event.TEST_PASS, self._collect_eval_result) + self._session.events.off(Event.TEST_FAIL, self._collect_eval_result) + return ( total_counts.failed == 0 and total_counts.errored == 0 and total_counts.xpassed == 0 ) + + async def _emit_eval_suite_end(self, suite_path: Any) -> None: + """Emit EVAL_SUITE_END if this suite_path corresponds to an eval suite.""" + suite_name = ( + suite_path.root_name + if hasattr(suite_path, "root_name") + else str(suite_path) + ) + eval_cases = self._eval_results.get(suite_name) + if not eval_cases: + return + report = EvalSuiteReport( + suite_name=suite_name, + cases=tuple(eval_cases), + duration=sum(c.duration for c in eval_cases), + ) + await self._session.events.emit(Event.EVAL_SUITE_END, report) diff --git a/protest/core/session.py b/protest/core/session.py index 778dbb3..4b3d008 100644 --- a/protest/core/session.py +++ b/protest/core/session.py @@ -1,9 +1,10 @@ from __future__ import annotations -from typing import TYPE_CHECKING, TypeVar +from typing import TYPE_CHECKING, Any, TypeVar if TYPE_CHECKING: from collections.abc import Callable + from pathlib import Path from types import TracebackType from protest.compat import Self @@ -26,12 +27,15 @@ normalize_skip, normalize_xfail, ) +from protest.evals.results_writer import EvalResultsWriter from protest.events.bus import EventBus from protest.events.types import Event from protest.exceptions import InvalidMaxConcurrencyError from protest.execution.capture import set_session_teardown_capture from protest.filters.keyword import KeywordFilterPlugin +from protest.filters.kind import KindFilterPlugin from protest.filters.suite import SuiteFilterPlugin +from protest.history.plugin import HistoryPlugin from protest.reporting.ascii import AsciiReporter from protest.reporting.ctrf import CTRFReporter from protest.reporting.log_file import LogFilePlugin @@ -54,7 +58,13 @@ class ProTestSession: concurrency: Number of parallel test workers (default: 1). """ - def __init__(self, concurrency: int = 1) -> None: + def __init__( + self, + concurrency: int = 1, + history: bool = True, + history_dir: Path | None = None, + metadata: dict[str, Any] | None = None, + ) -> None: if concurrency < 1: raise InvalidMaxConcurrencyError(concurrency) @@ -72,6 +82,9 @@ def __init__(self, concurrency: int = 1) -> None: self._capture: bool = True self._setup_duration: float = 0 self._teardown_duration: float = 0 + self._history = history + self._history_dir = history_dir + self._metadata: dict[str, Any] = dict(metadata) if metadata else {} async def resolve_autouse(self) -> None: """Resolve all session autouse fixtures at session start.""" @@ -104,6 +117,18 @@ def capture(self) -> bool: def capture(self, value: bool) -> None: self._capture = value + @property + def history(self) -> bool: + return self._history + + @property + def history_dir(self) -> Path | None: + return self._history_dir + + @property + def metadata(self) -> dict[str, Any]: + return self._metadata + @property def setup_duration(self) -> float: """Duration of session setup (available after resolve_autouse).""" @@ -151,6 +176,7 @@ def test( skip_reason: str = "Skipped", xfail: bool | str | Xfail | None = None, retry: int | Retry | None = None, + is_eval: bool = False, ) -> Callable[[FuncT], FuncT]: def decorator(func: FuncT) -> FuncT: if timeout is not None and timeout < 0: @@ -168,6 +194,7 @@ def decorator(func: FuncT) -> FuncT: xfail=norm_xfail, timeout=timeout, retry=norm_retry, + is_eval=is_eval, ) ) return func @@ -179,10 +206,6 @@ def add_suite(self, suite: ProTestSuite) -> None: suite._attach_to_session(self) self._suites.append(suite) - def include_suite(self, suite: ProTestSuite) -> None: - """Alias for add_suite (backward compatibility).""" - self.add_suite(suite) - def bind( self, fn: FixtureCallable, @@ -246,6 +269,9 @@ def default_plugin_classes() -> list[type[PluginBase]]: TagFilterPlugin, SuiteFilterPlugin, KeywordFilterPlugin, + KindFilterPlugin, + HistoryPlugin, + EvalResultsWriter, RichReporter, AsciiReporter, CTRFReporter, @@ -345,7 +371,7 @@ async def __aexit__( exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> bool: - import time + import time # noqa: PLC0415 — only needed in __aexit__ teardown_start = time.perf_counter() set_session_teardown_capture(True) diff --git a/protest/core/suite.py b/protest/core/suite.py index 1176842..b73e9f0 100644 --- a/protest/core/suite.py +++ b/protest/core/suite.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import TYPE_CHECKING, TypeVar +from typing import TYPE_CHECKING, Any, TypeVar from protest.di.decorators import unwrap_fixture @@ -14,6 +14,7 @@ FixtureRegistration, Retry, Skip, + SuiteKind, SuitePath, TestRegistration, Xfail, @@ -42,18 +43,22 @@ class ProTestSuite: description: Optional description for documentation purposes. """ - def __init__( + def __init__( # noqa: PLR0913 self, name: str, max_concurrency: int | None = None, tags: list[str] | None = None, description: str | None = None, + kind: SuiteKind = SuiteKind.TEST, + metadata: dict[str, Any] | None = None, ) -> None: if max_concurrency is not None and max_concurrency < 1: raise InvalidMaxConcurrencyError(max_concurrency) self._name = name + self._kind = kind self._description = description + self._metadata: dict[str, Any] = dict(metadata) if metadata else {} self._session: ProTestSession | None = None self._parent_suite: ProTestSuite | None = None self._tests: list[TestRegistration] = [] @@ -70,6 +75,14 @@ def name(self) -> str: def description(self) -> str | None: return self._description + @property + def kind(self) -> SuiteKind: + return self._kind + + @property + def suite_metadata(self) -> dict[str, Any]: + return self._metadata + @property def full_path(self) -> SuitePath: """Return hierarchical path: Parent::Child::GrandChild.""" @@ -122,6 +135,7 @@ def test( # noqa: PLR0913 - test decorator requires flexible params skip_reason: str = "Skipped", xfail: bool | str | Xfail | None = None, retry: int | Retry | None = None, + is_eval: bool = False, ) -> Callable[[FuncT], FuncT]: def decorator(func: FuncT) -> FuncT: if timeout is not None and timeout < 0: @@ -139,6 +153,7 @@ def decorator(func: FuncT) -> FuncT: xfail=norm_xfail, timeout=timeout, retry=norm_retry, + is_eval=is_eval, ) ) return func diff --git a/protest/di/container.py b/protest/di/container.py index 8ab6e49..3a85ae0 100644 --- a/protest/di/container.py +++ b/protest/di/container.py @@ -11,7 +11,6 @@ Any, get_args, get_origin, - get_type_hints, overload, ) @@ -23,6 +22,7 @@ unwrap_fixture, ) from protest.di.factory import FixtureFactory +from protest.di.hints import get_type_hints_compat from protest.di.markers import Use from protest.di.proxy import FixtureErrorWrapper from protest.entities import ( @@ -741,8 +741,9 @@ async def _run_teardown_interruptible( """Run exit stack teardown, interruptible by cancellation event. Returns True if cancelled (should abort), False if completed normally. - Teardown runs in a thread pool so sync blocking code doesn't freeze - the event loop, allowing us to detect and respond to cancellation. + Teardown runs on the SAME event loop as fixture setup — creating a + new loop would break async resources (drivers, connections) that hold + references to the original loop. """ if interrupt_event is None: await exit_stack.__aexit__(exc_type, exc_val, exc_tb) @@ -751,23 +752,10 @@ async def _run_teardown_interruptible( if interrupt_event.is_set(): return True - # Run teardown in thread pool so sync code doesn't block event loop - loop = asyncio.get_running_loop() - - def run_sync_teardown() -> None: - # Create a new event loop for the thread to run async teardowns - new_loop = asyncio.new_event_loop() - try: - new_loop.run_until_complete( - exit_stack.__aexit__(exc_type, exc_val, exc_tb) - ) - finally: - new_loop.close() - - async def run_in_thread() -> None: - await loop.run_in_executor(None, run_sync_teardown) - - teardown_task = asyncio.create_task(run_in_thread()) + # Run teardown on the same loop, race with cancellation + teardown_task = asyncio.create_task( + exit_stack.__aexit__(exc_type, exc_val, exc_tb) + ) wait_cancel = asyncio.create_task(interrupt_event.wait()) done, _ = await asyncio.wait( @@ -793,10 +781,7 @@ def _analyze_and_store_dependencies( actual_func = unwrap_fixture(func) func_signature = signature(actual_func) - try: - type_hints = get_type_hints(actual_func, include_extras=True) - except Exception: - type_hints = {} + type_hints = get_type_hints_compat(actual_func) dependencies: dict[str, FixtureCallable] = {} for param_name, param in func_signature.parameters.items(): diff --git a/protest/di/hints.py b/protest/di/hints.py new file mode 100644 index 0000000..0af61bc --- /dev/null +++ b/protest/di/hints.py @@ -0,0 +1,118 @@ +"""Type hints resolution with PEP 563 / TYPE_CHECKING compatibility. + +Shared by the core DI system and evals runner. ``get_type_hints()`` alone +fails in two scenarios commonly encountered in ProTest user code; this +module wraps it with a cascade of fallbacks. + +------------------------------------------------------------------------ +Failure mode 1 — names defined in a local scope (PEP 563 stringification) +------------------------------------------------------------------------ + +With ``from __future__ import annotations``, all annotations are stored +as strings. ``get_type_hints()`` resolves them via ``eval()`` inside +``func.__globals__`` only. Names defined in the scope of an enclosing +function are NOT in ``__globals__``, so resolution raises ``NameError``. + +The most common form of this in ProTest is a parametrized eval defined +inside a helper, where the case source is a local variable:: + + def _build_suite(cases): + source = ForEach(cases) # local to _build_suite + + @suite.eval() + def my_eval(case: Annotated[EvalCase, From(source)]) -> str: + # ^^^^^^ refers to `source`, + # which is local to _build_suite + return str(case.inputs) + +When ``get_type_hints(my_eval)`` evaluates ``"Annotated[EvalCase, From(source)]"`` +inside ``my_eval.__globals__``, ``source`` is undefined → ``NameError``. + +Fix: walk the call stack with ``inspect.stack()`` and merge every frame's +``f_locals`` into a ``localns`` dict that we pass to ``get_type_hints()`` +on retry. This is registration-time only (decorator evaluation), never +in a hot path, so the cost of ``inspect.stack()`` is acceptable. + +Trade-off: ``localns`` ends up containing every local from every frame +on the stack. Name collisions silently resolve to the most recently +seen binding. In practice no collision has been observed in this project, +because annotations only reference DI markers (``Use``/``From``) plus +small, distinctively-named locals. + +------------------------------------------------- +Failure mode 2 — TYPE_CHECKING-only imported types +------------------------------------------------- + +Types imported under ``if TYPE_CHECKING:`` are absent at runtime, so +``get_type_hints()`` raises ``NameError`` regardless of ``localns``:: + + if TYPE_CHECKING: + from heavy_module import HeavyType + + @factory() + def make() -> HeavyType: ... + +Fix: substitute ``Any`` for each unresolvable name and retry. The exact +type is irrelevant for DI dispatch — only the ``Use(...)``/``From(...)`` +marker inside ``Annotated[...]`` is consulted at injection time. +""" + +from __future__ import annotations + +import contextlib +import inspect +import re +from typing import Any, get_type_hints + + +def get_type_hints_compat(func: Any) -> dict[str, Any]: + """Resolve type hints with PEP 563 / TYPE_CHECKING fallbacks. + + See module docstring for the failure modes this function exists to + handle. Cascade: (1) plain call, (2) retry with stack-collected + ``localns``, (3) retry while substituting ``Any`` for unresolvable + names. All fallbacks run at registration time only. + """ + with contextlib.suppress(Exception): + return get_type_hints(func, include_extras=True) + + # Build a namespace from the entire call stack so that locals from + # an enclosing helper (e.g. `source = ForEach(...)`) become visible + # to `get_type_hints`'s eval. See module docstring, failure mode 1. + localns: dict[str, Any] = {} + with contextlib.suppress(Exception): + for frame_info in inspect.stack(): + localns.update(frame_info.frame.f_locals) + + with contextlib.suppress(Exception): + return get_type_hints(func, localns=localns, include_extras=True) + + # Last resort for TYPE_CHECKING-only types. See module docstring, + # failure mode 2. + return _get_type_hints_substituting_any(func, localns) + + +def _get_type_hints_substituting_any( + func: Any, + localns: dict[str, Any], +) -> dict[str, Any]: + """Retry ``get_type_hints``, replacing each NameError'd name with ``Any``. + + Used as a last-resort fallback when a referenced type is unresolvable + at runtime (typically a TYPE_CHECKING-only import). The substituted + ``Any`` is only used as a placeholder so resolution can complete; the + DI system reads the ``Use(...)``/``From(...)`` marker out of the + ``Annotated[...]``, not the underlying type. + """ + localns = dict(localns) + for _ in range(20): + try: + return get_type_hints(func, localns=localns, include_extras=True) + except NameError as exc: + match = re.search(r"name '(\w+)' is not defined", str(exc)) + if not match: + break + localns[match.group(1)] = Any + except Exception: + break + return {} diff --git a/protest/di/validation.py b/protest/di/validation.py index 2d6cd18..1026bca 100644 --- a/protest/di/validation.py +++ b/protest/di/validation.py @@ -3,8 +3,9 @@ from __future__ import annotations from inspect import signature -from typing import TYPE_CHECKING, Annotated, Any, get_args, get_origin, get_type_hints +from typing import TYPE_CHECKING, Annotated, Any, get_args, get_origin +from protest.di.hints import get_type_hints_compat from protest.di.markers import ForEach, From from protest.exceptions import ParameterizedFixtureError from protest.utils import get_callable_name @@ -15,10 +16,7 @@ def _extract_from_params(func: Callable[..., Any]) -> dict[str, ForEach[Any]]: """Extract parameters annotated with From(source).""" - try: - type_hints = get_type_hints(func, include_extras=True) - except Exception: - type_hints = {} + type_hints = get_type_hints_compat(func) result: dict[str, ForEach[Any]] = {} for param_name in signature(func).parameters: diff --git a/protest/entities/__init__.py b/protest/entities/__init__.py index ec91eb9..3016ebb 100644 --- a/protest/entities/__init__.py +++ b/protest/entities/__init__.py @@ -4,12 +4,15 @@ FixtureMarker, FixtureRegistration, FixtureScope, + SuiteKind, TestItem, TestOutcome, TestRegistration, format_fixture_scope, ) from protest.entities.events import ( + EvalPayload, + EvalScoreEntry, FixtureInfo, HandlerInfo, RunResult, @@ -31,6 +34,8 @@ from protest.entities.xfail import Xfail, normalize_xfail __all__ = [ + "EvalPayload", + "EvalScoreEntry", "Fixture", "FixtureCallable", "FixtureInfo", @@ -44,6 +49,7 @@ "SessionResult", "SessionSetupInfo", "Skip", + "SuiteKind", "SuitePath", "SuiteResult", "SuiteSetupInfo", diff --git a/protest/entities/core.py b/protest/entities/core.py index 465c5d3..d8b157b 100644 --- a/protest/entities/core.py +++ b/protest/entities/core.py @@ -20,6 +20,19 @@ FixtureCallable: TypeAlias = "Callable[..., Any]" +class SuiteKind(str, Enum): + """Kind of suite — determines behavior (eval wiring, history, reporting). + + Inherits from `str` (not `StrEnum`) for Python 3.10 compatibility. + """ + + TEST = "test" + EVAL = "eval" + + def __str__(self) -> str: + return self.value + + class FixtureScope(Enum): """Scope level for fixtures.""" @@ -49,6 +62,7 @@ class TestRegistration: xfail: Xfail | None = None timeout: float | None = None retry: Retry | None = None + is_eval: bool = False @dataclass(frozen=True, slots=True) @@ -111,6 +125,7 @@ class TestItem: xfail: Xfail | None = None timeout: float | None = None retry: Retry | None = None + is_eval: bool = False @property def test_name(self) -> str: diff --git a/protest/entities/events.py b/protest/entities/events.py index f87d9d9..d67388d 100644 --- a/protest/entities/events.py +++ b/protest/entities/events.py @@ -1,13 +1,44 @@ from __future__ import annotations -from dataclasses import dataclass -from typing import TYPE_CHECKING +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from protest.entities import FixtureScope, SuitePath from protest.events.types import Event +@dataclass(frozen=True, slots=True) +class EvalScoreEntry: + """A single score entry from an evaluator.""" + + value: float | bool | str + passed: bool = True + skipped: bool = False + + +@dataclass(frozen=True, slots=True) +class EvalPayload: + """Structured payload for eval results, carried on TestResult.""" + + case_name: str + passed: bool + task_duration: float + inputs: Any = None + output: Any = None + expected_output: Any = None + scores: dict[str, EvalScoreEntry] = field(default_factory=dict) + case_hash: str = "" + eval_hash: str = "" + task_input_tokens: int = 0 + task_output_tokens: int = 0 + task_cost: float = 0.0 + judge_call_count: int = 0 + judge_input_tokens: int = 0 + judge_output_tokens: int = 0 + judge_cost: float = 0.0 + + @dataclass(frozen=True, slots=True) class TestCounts: passed: int = 0 @@ -43,6 +74,9 @@ class TestResult: attempt: int = 1 max_attempts: int = 1 previous_errors: tuple[Exception, ...] = () + is_eval: bool = False + eval_payload: EvalPayload | None = None + log_records: tuple[Any, ...] = () @dataclass(frozen=True, slots=True) diff --git a/protest/entities/suite_path.py b/protest/entities/suite_path.py index 38c78a2..4b7223e 100644 --- a/protest/entities/suite_path.py +++ b/protest/entities/suite_path.py @@ -58,6 +58,11 @@ def lower(self) -> str: """Return lowercase string representation for case-insensitive comparison.""" return self._path.lower() + @property + def root_name(self) -> str: + """Return the top-level suite name: 'A::B::C' -> 'A'.""" + return self.parts[0] if self.parts else "" + def __str__(self) -> str: return self._path diff --git a/protest/evals/__init__.py b/protest/evals/__init__.py new file mode 100644 index 0000000..9882d7f --- /dev/null +++ b/protest/evals/__init__.py @@ -0,0 +1,45 @@ +"""ProTest evals — native eval support.""" + +from protest.evals.evaluator import ( + EvalCase, + EvalContext, + Evaluator, + Metric, + Reason, + ShortCircuit, + Verdict, + evaluator, +) +from protest.evals.suite import EvalSuite +from protest.evals.types import ( + EvalCaseResult, + EvalScore, + EvalSuiteReport, + Judge, + JudgeInfo, + JudgeResponse, + ModelLabel, + ScoreStats, + TaskResult, +) + +__all__ = [ + "EvalCase", + "EvalCaseResult", + "EvalContext", + "EvalScore", + "EvalSuite", + "EvalSuiteReport", + "Evaluator", + "Judge", + "JudgeInfo", + "JudgeResponse", + "Metric", + "ModelLabel", + "Reason", + "ScoreStats", + "ShortCircuit", + "TaskResult", + "Verdict", + "evaluator", +] diff --git a/protest/evals/evaluator.py b/protest/evals/evaluator.py new file mode 100644 index 0000000..8f3927d --- /dev/null +++ b/protest/evals/evaluator.py @@ -0,0 +1,332 @@ +"""Evaluator primitives. + +An evaluator is a function decorated with ``@evaluator`` that receives an +``EvalContext`` and returns a verdict. The decorator wraps the function in an +``Evaluator`` instance that carries identity (for hashing/history) and exposes +two distinct entry points: + +- ``ev(keyword=value, ...)`` — bind params, return a new ``Evaluator`` +- ``ev.run(ctx)`` — execute against an ``EvalContext`` (called by the framework) + +Plain callables are not accepted in ``evaluators=[...]``; use ``@evaluator``:: + + @evaluator + def contains_keywords(ctx: EvalContext, keywords: list[str]) -> ContainsKeywordsResult: + found = sum(1 for k in keywords if k.lower() in ctx.output.lower()) + return ContainsKeywordsResult(keyword_recall=found / len(keywords), ...) + + # Bind params → returns a fresh Evaluator with kwargs frozen in. + evaluators=[contains_keywords(keywords=["paris", "france"])] + + # No params → use the bare Evaluator directly. + @evaluator + def not_empty(ctx: EvalContext) -> bool: + return bool(ctx.output.strip()) + +Async evaluators are supported:: + + @evaluator + async def llm_judge(ctx: EvalContext, model: str = "haiku") -> bool: + ... + +Evaluators return either bool (simple verdict) or a dataclass (structured result). +The framework reads fields by type: +- bool → verdict (pass/fail = all(bool_fields)) +- float → metric (aggregated in stats) +- str → reason (displayed on failure) +""" + +from __future__ import annotations + +import dataclasses +from dataclasses import dataclass, field +from typing import ( + TYPE_CHECKING, + Annotated, + Any, + Generic, + TypeVar, + get_args, + get_origin, + get_type_hints, +) + +from protest.evals.hashing import _canonical +from protest.evals.types import EvalScore + +if TYPE_CHECKING: + from collections.abc import Callable + + from protest.evals.types import Judge + +InputT = TypeVar("InputT") +OutputT = TypeVar("OutputT") +T = TypeVar("T") + + +@dataclass +class EvalContext(Generic[InputT, OutputT]): + """Context passed to evaluator functions. + + Dual role: read-only DTO (inputs, output, expected) + mutable accumulator + for judge call stats (tokens, cost, call count). One instance per case, + shared sequentially across evaluators, discarded after scoring. + + Note: judge stats accumulate via ctx.judge() side-effects. If evaluators + are ever parallelized within a case, the accumulators will need isolation. + """ + + name: str + inputs: InputT + output: OutputT + expected_output: OutputT | None + metadata: Any + duration: float + _judge: Judge | None = field(default=None, repr=False) + _judge_call_count: int = field(default=0, repr=False, init=False) + _judge_input_tokens: int = field(default=0, repr=False, init=False) + _judge_output_tokens: int = field(default=0, repr=False, init=False) + _judge_cost: float = field(default=0.0, repr=False, init=False) + + async def judge(self, prompt: str, output_type: type[T]) -> T: + """Call the configured LLM judge and return the typed output. + + Tokens and cost from JudgeResponse are accumulated internally + and flow to EvalPayload for history/display. The evaluator + only sees the unwrapped output. + + Raises RuntimeError if no judge was configured on the session. + """ + if self._judge is None: + raise RuntimeError( + f"Evaluator for case '{self.name}' called ctx.judge() but no " + "judge is configured. Pass judge= to EvalSuite()." + ) + self._judge_call_count += 1 + response = await self._judge.judge(prompt, output_type) + if response.input_tokens is not None: + self._judge_input_tokens += response.input_tokens + if response.output_tokens is not None: + self._judge_output_tokens += response.output_tokens + if response.cost is not None: + self._judge_cost += response.cost + return response.output + + @property + def judge_call_count(self) -> int: + return self._judge_call_count + + @property + def judge_input_tokens(self) -> int: + return self._judge_input_tokens + + @property + def judge_output_tokens(self) -> int: + return self._judge_output_tokens + + @property + def judge_cost(self) -> float: + return self._judge_cost + + +@dataclass +class EvalCase: + """Typed container for eval case data in ForEach. + + `name` is required: it identifies the case across history, reporting, and + file-based output. Two cases sharing a name collide silently in those + downstream consumers. + + Usage:: + + cases = ForEach([ + EvalCase(inputs="Who is Marie?", name="marie_lookup", expected="Marie, Resistance"), + EvalCase(inputs="Who is Pierre?", name="pierre_lookup", expected="Pierre, arrest"), + ]) + + @suite.eval(evaluators=[contains_facts]) + def my_eval(case: Annotated[EvalCase, From(cases)]) -> str: + return ask(case.inputs) + """ + + inputs: Any + name: str + expected: Any = None + evaluators: list[Any] = field(default_factory=list) + tags: list[str] = field(default_factory=list) + metadata: dict[str, Any] = field(default_factory=dict) + + def __post_init__(self) -> None: + if not self.name: + raise ValueError( + "EvalCase.name must be a non-empty string " + "(used for history tracking and case identity)." + ) + validate_evaluators(self.evaluators) + + def __repr__(self) -> str: + return self.name + + +class ShortCircuit: + """Group evaluators with fail-fast behavior. + + The first Verdict=False stops the group. Evaluators outside + the ShortCircuit run regardless. + + Usage:: + + evaluators=[ + not_empty, + ShortCircuit([ + contains_expected_facts(min_score=0.5), + llm_judge(rubric="..."), # skipped if above fails + ]), + ] + """ + + def __init__(self, evaluators: list[Evaluator]) -> None: + validate_evaluators(evaluators, _inside_short_circuit=True) + self.evaluators = evaluators + + def evaluator_identity(self) -> dict[str, Any]: + """Identity is the ordered list of inner evaluators.""" + return {"short_circuit": [_canonical(e) for e in self.evaluators]} + + +def validate_evaluators( + items: list[Any], *, _inside_short_circuit: bool = False +) -> None: + """Reject anything that isn't a registered Evaluator (or ShortCircuit). + + ``@evaluator`` is the only sanctioned path to producing an evaluator. Plain + callables used to be accepted, which forced a runtime ``isinstance`` dispatch + in the executor and made the evaluators list type effectively ``list[Any]``. + Failing loud at registration moves the error to the boundary and lets + downstream code work on a uniform ``Evaluator | ShortCircuit`` Union. + """ + for item in items: + if isinstance(item, Evaluator): + continue + if isinstance(item, ShortCircuit) and not _inside_short_circuit: + continue + if _inside_short_circuit and isinstance(item, ShortCircuit): + raise TypeError( + "ShortCircuit cannot nest another ShortCircuit; " + "flatten the inner evaluators into the outer group." + ) + if callable(item): + raise TypeError( + f"{item!r} is a plain callable, not an Evaluator. " + "Wrap it with @evaluator (from protest.evals) so it carries " + "identity, hashing, and a typed run() method." + ) + raise TypeError( + f"Expected Evaluator or ShortCircuit, got {type(item).__name__}. " + "Only objects produced by @evaluator (or ShortCircuit groups) " + "are accepted in evaluators=[...]." + ) + + +class Metric: + """Annotate a float/int field as a metric for stats aggregation.""" + + +class Verdict: + """Annotate a bool field as a verdict for pass/fail.""" + + +class Reason: + """Annotate a str field as a reason displayed on failure.""" + + +def extract_scores_from_result(result: Any, evaluator_name: str) -> list[Any]: + """Extract EvalScore instances from an evaluator result. + + For bool returns: a single verdict named after the evaluator. + For dataclass returns: only fields annotated with Metric/Verdict/Reason + are extracted. Unannotated fields are ignored (free metadata). + + Raises: + TypeError: If result is not bool or dataclass. + """ + if isinstance(result, bool): + return [EvalScore(name=evaluator_name, value=result)] + + if dataclasses.is_dataclass(result) and not isinstance(result, type): + scores = [] + hints = get_type_hints(type(result), include_extras=True) + for f in dataclasses.fields(result): + ann = hints.get(f.name) + if ann is None or get_origin(ann) is not Annotated: + continue + for meta in get_args(ann)[1:]: + if isinstance(meta, type) and issubclass( + meta, (Metric, Verdict, Reason) + ): + scores.append(EvalScore(name=f.name, value=getattr(result, f.name))) + break + return scores + + type_name = type(result).__name__ + raise TypeError(f"Evaluator must return bool or dataclass, got {type_name}") + + +class Evaluator: + """A configured evaluator — callable with identity for hashing. + + Created by the ``@evaluator`` decorator. Two distinct entry points: + + - ``ev(keyword=value, ...)`` — bind params, return a new Evaluator + - ``ev.run(ctx)`` — execute against an EvalContext + + Splitting these avoids the "callable that does two things based on the + type of arg[0]" anti-pattern: each method has a single, monomorphic + signature that type checkers can read without overload gymnastics. + """ + + __slots__ = ("_fn", "_kwargs", "_name", "_qualname") + + def __init__( + self, fn: Callable[..., Any], kwargs: dict[str, Any] | None = None + ) -> None: + self._fn = fn + self._kwargs = kwargs or {} + self._name = fn.__name__ + self._qualname = fn.__qualname__ + + @property + def name(self) -> str: + return self._name + + def __call__(self, **kwargs: Any) -> Evaluator: + # Re-binding form: always returns a fresh clone. Returning `self` + # for the no-kwargs case used to make `f is f()` accidentally true, + # which surprised users expecting `()` to behave like a constructor. + return Evaluator(self._fn, {**self._kwargs, **kwargs}) + + def run(self, ctx: EvalContext[Any, Any], /) -> Any: + return self._fn(ctx, **self._kwargs) + + def evaluator_identity(self) -> dict[str, Any]: + identity: dict[str, Any] = {"fn": self._qualname} + if self._kwargs: + identity["kwargs"] = self._kwargs + return identity + + def __repr__(self) -> str: + if self._kwargs: + kw = ", ".join(f"{k}={v!r}" for k, v in self._kwargs.items()) + return f"Evaluator({self._name}({kw}))" + return f"Evaluator({self._name})" + + +def evaluator(fn: Callable[..., Any]) -> Evaluator: + """Turn a function into a ProTest evaluator. + + The decorator is the only sanctioned way to produce an object that + ``evaluators=[...]`` will accept. Plain callables are rejected at + registration so the executor can rely on a uniform Union type instead + of dispatching at runtime. + """ + return Evaluator(fn) diff --git a/protest/evals/evaluators.py b/protest/evals/evaluators.py new file mode 100644 index 0000000..8866961 --- /dev/null +++ b/protest/evals/evaluators.py @@ -0,0 +1,160 @@ +"""Built-in evaluators for common eval patterns. + +Evaluators return either bool (simple verdict) or a dataclass with +annotated fields: Annotated[bool, Verdict], Annotated[float, Metric], +Annotated[str, Reason]. Unannotated fields are ignored by the runner. +""" + +from __future__ import annotations + +import json as json_module +import re +from collections.abc import Sized +from dataclasses import dataclass +from typing import Annotated, Any + +from protest.evals.evaluator import EvalContext, Metric, Verdict, evaluator + + +@dataclass(frozen=True, slots=True) +class ContainsKeywordsResult: + keyword_recall: Annotated[float, Metric] + all_keywords_present: Annotated[bool, Verdict] + + +@dataclass(frozen=True, slots=True) +class DoesNotContainResult: + no_forbidden_words: Annotated[bool, Verdict] + + +@dataclass(frozen=True, slots=True) +class MaxLengthResult: + conciseness: Annotated[float, Metric] + within_limit: Annotated[bool, Verdict] + + +@dataclass(frozen=True, slots=True) +class JsonValidResult: + valid_json: Annotated[bool, Verdict] + has_required_keys: Annotated[bool, Verdict] + + +@dataclass(frozen=True, slots=True) +class WordOverlapResult: + overlap: Annotated[float, Metric] + + +@evaluator +def contains_keywords( + ctx: EvalContext[Any, str], keywords: list[str], min_recall: float = 1.0 +) -> ContainsKeywordsResult: + """Check that the output contains expected keywords (case-insensitive). + + `min_recall` is the minimum fraction of keywords that must appear for + the verdict to pass. Default `1.0` requires all keywords to be present; + set to `0.5` for "at least half", `0.0` to ignore the verdict and only + track the metric. + """ + output_lower = ctx.output.lower() + found = sum(1 for kw in keywords if kw.lower() in output_lower) + total = len(keywords) + recall = found / total if total else 1.0 + return ContainsKeywordsResult( + keyword_recall=recall, + all_keywords_present=recall >= min_recall, + ) + + +@evaluator +def contains_expected(ctx: EvalContext[Any, str], case_sensitive: bool = False) -> bool: + """Check that the output contains expected_output as a substring.""" + if ctx.expected_output is None: + return True + if case_sensitive: + return ctx.expected_output in ctx.output + return ctx.expected_output.lower() in ctx.output.lower() + + +@evaluator +def does_not_contain( + ctx: EvalContext[Any, str], forbidden: list[str], case_sensitive: bool = False +) -> DoesNotContainResult: + """Check that the output does not contain forbidden words.""" + output = ctx.output if case_sensitive else ctx.output.lower() + found = [w for w in forbidden if (w if case_sensitive else w.lower()) in output] + return DoesNotContainResult(no_forbidden_words=len(found) == 0) + + +@evaluator +def not_empty(ctx: EvalContext[Any, Any]) -> bool: + """Check that the output is not empty. + + - `None` -> False. + - `str`: False if empty or whitespace-only. + - Sized (list, dict, set, tuple, ...): False if `len() == 0`. + - Other (int, float, dataclass, custom objects): True. + """ + if ctx.output is None: + return False + if isinstance(ctx.output, str): + return len(ctx.output.strip()) > 0 + if isinstance(ctx.output, Sized): + return len(ctx.output) > 0 + return True + + +@evaluator +def max_length(ctx: EvalContext[Any, str], max_chars: int = 500) -> MaxLengthResult: + """Check that the output doesn't exceed a character limit.""" + length = len(ctx.output) + return MaxLengthResult( + conciseness=min(1.0, max_chars / max(length, 1)), + within_limit=length <= max_chars, + ) + + +@evaluator +def min_length(ctx: EvalContext[Any, str], min_chars: int = 1) -> bool: + """Check that the output meets a minimum length.""" + return len(ctx.output) >= min_chars + + +@evaluator +def matches_regex(ctx: EvalContext[Any, str], pattern: str, flags: int = 0) -> bool: + """Check that the output matches a regex pattern.""" + return bool(re.search(pattern, ctx.output, flags)) + + +@evaluator +def json_valid( + ctx: EvalContext[Any, str], required_keys: list[str] | None = None +) -> JsonValidResult: + """Check that the output is valid JSON, optionally with required keys.""" + if required_keys is None: + required_keys = [] + try: + parsed = json_module.loads(ctx.output) + except (json_module.JSONDecodeError, TypeError): + return JsonValidResult(valid_json=False, has_required_keys=False) + + has_keys = ( + all(k in parsed for k in required_keys) + if required_keys and isinstance(parsed, dict) + else True + ) + return JsonValidResult(valid_json=True, has_required_keys=has_keys) + + +@evaluator +def word_overlap(ctx: EvalContext[Any, str]) -> WordOverlapResult: + """Compute word overlap between output and expected_output (tracking-only).""" + if ctx.expected_output is None: + return WordOverlapResult(overlap=1.0) + expected = str(ctx.expected_output) + expected_words = set(expected.lower().split()) + output_words = set(ctx.output.lower().split()) + if not expected_words: + return WordOverlapResult(overlap=1.0) + return WordOverlapResult( + overlap=len(expected_words & output_words) / len(expected_words), + ) diff --git a/protest/evals/hashing.py b/protest/evals/hashing.py new file mode 100644 index 0000000..5b3114a --- /dev/null +++ b/protest/evals/hashing.py @@ -0,0 +1,109 @@ +"""Content hashing for eval cases — detect when cases or scoring change. + +Hashes capture identity + configuration, not implementation. A renamed +parameter changes the hash; a rewritten function body does not. This is +a deliberate trade-off: we detect config drift, not code drift. + +Custom evaluators can implement ``evaluator_identity()`` to control +exactly what gets hashed. Built-in types (dataclass, functools.partial, +plain callable) are introspected automatically as a fallback. +""" + +from __future__ import annotations + +import dataclasses +import functools +import hashlib +import json +from typing import Any, Protocol, runtime_checkable + +HASH_LENGTH = 12 + + +class CanonicalError(TypeError): + """Raised when an object cannot be converted to a canonical form.""" + + +@runtime_checkable +class HasEvaluatorIdentity(Protocol): + """Protocol for objects that provide explicit hashing identity.""" + + def evaluator_identity(self) -> dict[str, Any]: ... + + +def compute_case_hash(inputs: Any, expected_output: Any) -> str: + """Hash the case content (inputs + expected_output).""" + data = {"inputs": _canonical(inputs), "expected": _canonical(expected_output)} + return _hash(data) + + +def compute_eval_hash(evaluators: list[Any]) -> str: + """Hash the scoring config (evaluators only).""" + data = {"evaluators": [_canonical(e) for e in evaluators]} + return _hash(data) + + +def _hash(data: Any) -> str: + raw = json.dumps(data, sort_keys=True) + return hashlib.sha256(raw.encode()).hexdigest()[:HASH_LENGTH] + + +def _canonical(obj: Any) -> Any: # noqa: PLR0911 + """Convert an object to a canonical JSON-serializable form. + + Resolution order: + 1. Primitives, list, tuple, dict — native support + 2. ``evaluator_identity()`` — explicit, user-controlled + 3. Dataclass / functools.partial / callable — introspection fallback + 4. Anything else → CanonicalError + """ + # --- primitives & containers --- + if obj is None or isinstance(obj, (bool, int, float, str)): + return obj + if isinstance(obj, (list, tuple)): + return [_canonical(item) for item in obj] + if isinstance(obj, dict): + return { + str(k): _canonical(v) + for k, v in sorted(obj.items(), key=lambda item: str(item[0])) + } + + # --- explicit identity (user-controlled) --- + if isinstance(obj, HasEvaluatorIdentity): + return _canonical(obj.evaluator_identity()) + + # --- introspection fallback --- + + # Dataclasses — public fields only (skip _ prefixed runtime internals) + if dataclasses.is_dataclass(obj) and not isinstance(obj, type): + return { + "__type__": type(obj).__qualname__, + **{ + f.name: _canonical(getattr(obj, f.name)) + for f in dataclasses.fields(obj) + if not f.name.startswith("_") + }, + } + # functools.partial — qualname + bound kwargs + if isinstance(obj, functools.partial): + return { + "fn": _fn_qualname(obj.func), + "args": _canonical(list(obj.args)) if obj.args else [], + "kwargs": _canonical(dict(obj.keywords)) if obj.keywords else {}, + } + # Plain callable — qualname only + if callable(obj): + qualname = _fn_qualname(obj) + if qualname is not None: + return {"fn": qualname} + + raise CanonicalError( + f"Cannot canonicalize {type(obj).__name__!r}. " + f"Implement evaluator_identity() or use a supported type " + f"(primitives, list, dict, dataclass, callable)." + ) + + +def _fn_qualname(fn: Any) -> str | None: + """Extract a stable qualified name from a callable.""" + return getattr(fn, "__qualname__", None) or getattr(fn, "__name__", None) diff --git a/protest/evals/results_writer.py b/protest/evals/results_writer.py new file mode 100644 index 0000000..71c3725 --- /dev/null +++ b/protest/evals/results_writer.py @@ -0,0 +1,142 @@ +"""EvalResultsWriter — writes per-case eval results as markdown files. + +Listens to TEST_PASS/FAIL events, filters for eval cases, and writes +a markdown file for each case to .protest/results/_/. +""" + +from __future__ import annotations + +import re +from datetime import datetime, timezone +from pathlib import Path +from typing import TYPE_CHECKING, Any + +from protest import console +from protest.evals.types import EvalCaseResult, EvalScore, EvalSuiteReport +from protest.plugin import PluginBase + +if TYPE_CHECKING: + from protest.entities.events import TestResult + from protest.plugin import PluginContext + +DEFAULT_RESULTS_DIR = Path(".protest") / "results" + + +class EvalResultsWriter(PluginBase): + """Writes per-case eval result files as markdown.""" + + name = "eval-results-writer" + description = "Write eval case result files" + + def __init__(self, history_dir: Path | None = None) -> None: + self._results_base = ( + (history_dir / "results") if history_dir else DEFAULT_RESULTS_DIR + ) + self._run_dirs: dict[str, Path] = {} + + @classmethod + def activate(cls, ctx: PluginContext) -> EvalResultsWriter: + return cls(history_dir=ctx.get("history_dir")) + + def on_test_pass(self, result: TestResult) -> None: + self._maybe_write(result) + + def on_test_fail(self, result: TestResult) -> None: + self._maybe_write(result) + + def _maybe_write(self, result: TestResult) -> None: + if not result.is_eval or result.eval_payload is None: + return + suite_name = result.suite_path.root_name if result.suite_path else "evals" + case_result = EvalCaseResult.from_test_result(result) + self._write_case_file(case_result, suite_name) + + def _write_case_file(self, case_result: EvalCaseResult, suite_name: str) -> None: + if suite_name not in self._run_dirs: + self._run_dirs[suite_name] = _make_run_dir(suite_name, self._results_base) + _write_case_file(case_result, self._run_dirs[suite_name]) + + def on_eval_suite_end(self, report: Any) -> None: + """Print results dir path for the suite.""" + + if not isinstance(report, EvalSuiteReport): + return + run_dir = self._run_dirs.get(report.suite_name) + if run_dir: + console.print(f" Results: {run_dir}", prefix=False) + + +# --------------------------------------------------------------------------- +# File writing helpers +# --------------------------------------------------------------------------- + + +def _make_run_dir(suite_name: str, base_dir: Path | None = None) -> Path: + """Create and return the timestamped directory for this run.""" + base = base_dir or DEFAULT_RESULTS_DIR + ts = datetime.now(tz=timezone.utc).strftime("%Y%m%d_%H%M%S") + safe_suite = re.sub(r"[^\w\-]", "_", suite_name) + run_dir = base / f"{safe_suite}_{ts}" + run_dir.mkdir(parents=True, exist_ok=True) + return run_dir + + +def _write_case_file(case: EvalCaseResult, run_dir: Path) -> None: + """Write a markdown file for a single eval case.""" + safe_name = re.sub(r"[^\w\-]", "_", case.case_name) + path = run_dir / f"{safe_name}.md" + path.write_text(_render_case(case), encoding="utf-8") + + +def _render_case(case: EvalCaseResult) -> str: + status = "PASS ✓" if case.passed else "FAIL ✗" + duration = _format_case_duration(case.duration) + lines: list[str] = [ + f"# {case.case_name} — {status} ({duration})", + "", + ] + + lines += ["## Input", "", _format_value(case.inputs), ""] + lines += ["## Output", "", _format_value(case.output), ""] + lines += ["## Expected", "", _format_value(case.expected_output), ""] + + if case.scores: + lines += ["## Scores", ""] + for score in case.scores: + lines.append(_format_score(score)) + lines.append("") + + return "\n".join(lines) + + +_ONE_MILLISECOND = 0.001 +_TEN_MILLISECONDS = 0.01 +_ONE_SECOND = 1.0 + + +def _format_case_duration(seconds: float) -> str: + """Format SUT duration with adaptive units. + + Sub-ms tasks (deterministic stubs, fast classifiers) used to render as + `0ms` because the renderer rounded to the nearest millisecond. + """ + if seconds < _ONE_MILLISECOND: + return f"{seconds * 1_000_000:.0f}µs" + if seconds < _TEN_MILLISECONDS: + return f"{seconds * 1000:.2f}ms" + if seconds < _ONE_SECOND: + return f"{seconds * 1000:.0f}ms" + return f"{seconds:.2f}s" + + +def _format_score(score: EvalScore) -> str: + icon = "·" if score.is_metric else ("✓" if score.passed else "✗") + return f"- **{score.name}**: {score.value} {icon}" + + +def _format_value(value: Any) -> str: + if value is None: + return "_none_" + if isinstance(value, str): + return value if value.strip() else "_empty string_" + return f"```\n{value!r}\n```" diff --git a/protest/evals/suite.py b/protest/evals/suite.py new file mode 100644 index 0000000..67e277c --- /dev/null +++ b/protest/evals/suite.py @@ -0,0 +1,98 @@ +"""EvalSuite — eval-dedicated suite with judge and model support.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, TypeVar + +from protest.core.suite import ProTestSuite +from protest.entities import SuiteKind +from protest.evals.wrapper import make_eval_wrapper + +if TYPE_CHECKING: + from collections.abc import Callable, Sequence + + from protest.evals.evaluator import Evaluator, ShortCircuit + from protest.evals.types import Judge, ModelLabel + +FuncT = TypeVar("FuncT", bound="Callable[..., object]") + + +class EvalSuite(ProTestSuite): + """Eval-dedicated suite that forces kind=EVAL and carries judge/model config. + + Usage:: + + chatbot = EvalSuite("chatbot") + session.add_suite(chatbot) + + @chatbot.eval(evaluators=[contains_facts]) + async def chatbot(case: Annotated[EvalCase, From(cases)]) -> str: + return await ask(case.inputs) + """ + + def __init__( + self, + name: str, + *, + model: ModelLabel | None = None, + judge: Judge | None = None, + tags: list[str] | None = None, + max_concurrency: int | None = None, + description: str | None = None, + metadata: dict[str, Any] | None = None, + ) -> None: + suite_meta: dict[str, Any] = dict(metadata) if metadata else {} + if model is not None: + suite_meta["model"] = model.name + suite_meta["provider"] = model.provider + super().__init__( + name=name, + kind=SuiteKind.EVAL, + tags=tags, + max_concurrency=max_concurrency, + description=description, + metadata=suite_meta, + ) + self._judge: Judge | None = judge + self._model = model + + @property + def judge(self) -> Judge | None: + return self._judge + + @property + def model(self) -> ModelLabel | None: + return self._model + + def eval( + self, + evaluators: Sequence[Evaluator | ShortCircuit] | None = None, + tags: list[str] | None = None, + timeout: float | None = None, + judge: Judge | None = None, + ) -> Callable[[FuncT], FuncT]: + """Register a scored eval test on this suite. + + Args: + evaluators: Per-eval evaluators, appended to suite-level ones. + tags: Tags forwarded to the underlying `@suite.test`. + timeout: Per-eval timeout in seconds. + judge: Override the suite-level judge for this eval only. + Useful when one eval needs a stronger model than the rest + of the suite. Falls back to `self.judge` when omitted. + """ + + def decorator(func: FuncT) -> FuncT: + resolved_judge = judge or self._judge + evals_list: list[Evaluator | ShortCircuit] = ( + list(evaluators) if evaluators else [] + ) + wrapper = make_eval_wrapper( + func, + evals_list, + judge=resolved_judge, + ) + self.test(tags=tags, timeout=timeout, is_eval=True)(wrapper) + return func + + return decorator diff --git a/protest/evals/types.py b/protest/evals/types.py new file mode 100644 index 0000000..1d19474 --- /dev/null +++ b/protest/evals/types.py @@ -0,0 +1,356 @@ +"""Types for eval results, scores, and run context.""" + +from __future__ import annotations + +import statistics +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any, Generic, Protocol, TypeVar, runtime_checkable + +if TYPE_CHECKING: + from protest.entities.events import TestResult + +T = TypeVar("T") + + +@dataclass(frozen=True, slots=True) +class TaskResult(Generic[T]): + """Optional wrapper for eval task return values with usage stats. + + Return this instead of a plain value to report LLM usage for the + system under test. ProTest unwraps it transparently — evaluators + see the plain output. + + Usage:: + + @suite.eval(evaluators=[...]) + async def my_eval(case: EvalCase) -> TaskResult[str]: + result = await agent.run(case.inputs) + usage = result.usage() + return TaskResult( + output=result.output, + input_tokens=usage.request_tokens, + output_tokens=usage.response_tokens, + cost=0.003, + ) + + # Or just return str directly — TaskResult is opt-in. + """ + + output: T + input_tokens: int | None = None + output_tokens: int | None = None + cost: float | None = None + + +@dataclass(frozen=True, slots=True) +class JudgeResponse(Generic[T]): + """Return type for Judge.judge() — wraps the output with optional usage stats. + + Evaluators never see this: ``ctx.judge()`` unwraps and returns ``output``. + ProTest accumulates tokens/cost for history and display. + + Usage:: + + return JudgeResponse( + output=result.output, + input_tokens=usage.request_tokens, + output_tokens=usage.response_tokens, + cost=0.003, + ) + + # Or minimal — tokens/cost are optional: + return JudgeResponse(output=result.output) + """ + + output: T + input_tokens: int | None = None + output_tokens: int | None = None + cost: float | None = None + + +@runtime_checkable +class Judge(Protocol): + """Protocol for LLM judge implementations. + + All configuration (model, temperature, system_prompt, max_tokens) + lives in the constructor of the implementation, NOT in this protocol. + + Usage:: + + class MyJudge: + name = "my-judge" + provider = "openai" + + async def judge(self, prompt: str, output_type: type[T]) -> JudgeResponse[T]: + result = await agent.run(prompt) + return JudgeResponse(output=result.output, input_tokens=100) + + suite = EvalSuite("chatbot", judge=MyJudge()) + """ + + name: str + provider: str | None + + async def judge(self, prompt: str, output_type: type[T]) -> JudgeResponse[T]: ... + + +@dataclass(frozen=True, slots=True) +class ModelLabel: + """Metadata about the model being evaluated.""" + + name: str + provider: str | None = None + temperature: float | None = None + extra: dict[str, Any] = field(default_factory=dict) + + +@dataclass(frozen=True, slots=True) +class JudgeInfo: + """Metadata about the LLM judge used for evaluation.""" + + name: str + provider: str | None = None + evaluators: tuple[str, ...] = () + extra: dict[str, Any] = field(default_factory=dict) + + +@dataclass(frozen=True, slots=True) +class EvalScore: + """A single named value from an evaluator result. + + Values are categorized by type: + - bool → verdict (pass/fail) + - float → metric (aggregated in stats) + - str → reason (displayed on failure) + """ + + name: str + value: float | bool | str + skipped: bool = False + + @property + def is_verdict(self) -> bool: + return not self.skipped and isinstance(self.value, bool) + + @property + def is_metric(self) -> bool: + return ( + not self.skipped + and isinstance(self.value, (int, float)) + and not isinstance(self.value, bool) + ) + + @property + def is_reason(self) -> bool: + return not self.skipped and isinstance(self.value, str) + + @property + def passed(self) -> bool: + if self.skipped: + return True # skipped scores don't affect pass/fail + if isinstance(self.value, bool): + return self.value + return True + + +@dataclass(frozen=True, slots=True) +class EvalCaseResult: + """Complete result of evaluating a single case.""" + + case_name: str + node_id: str + scores: tuple[EvalScore, ...] + duration: float + passed: bool + inputs: Any = None + output: Any = None + expected_output: Any = None + case_hash: str = "" + eval_hash: str = "" + task_input_tokens: int = 0 + task_output_tokens: int = 0 + task_cost: float = 0.0 + judge_call_count: int = 0 + judge_input_tokens: int = 0 + judge_output_tokens: int = 0 + judge_cost: float = 0.0 + is_error: bool = False + + @classmethod + def from_test_result(cls, result: TestResult) -> EvalCaseResult: + """Build from a `TestResult` carrying an `eval_payload`. + + `passed` is derived from `result.error` and `payload.passed`, so both + the runner (post-execution) and the results writer (pass/fail hooks) + agree on the same computation. + """ + payload = result.eval_payload + if payload is None: + raise ValueError( + f"Cannot build EvalCaseResult from TestResult without " + f"eval_payload (node_id={result.node_id})" + ) + return cls( + case_name=payload.case_name or "", + node_id=result.node_id, + scores=tuple( + EvalScore(name=name, value=entry.value) + for name, entry in payload.scores.items() + ), + duration=payload.task_duration, + passed=result.error is None and payload.passed, + inputs=payload.inputs, + output=payload.output, + expected_output=payload.expected_output, + case_hash=payload.case_hash, + eval_hash=payload.eval_hash, + task_input_tokens=payload.task_input_tokens, + task_output_tokens=payload.task_output_tokens, + task_cost=payload.task_cost, + judge_call_count=payload.judge_call_count, + judge_input_tokens=payload.judge_input_tokens, + judge_output_tokens=payload.judge_output_tokens, + judge_cost=payload.judge_cost, + is_error=result.is_fixture_error, + ) + + @property + def numeric_scores(self) -> dict[str, float]: + return {s.name: float(s.value) for s in self.scores if s.is_metric} + + @property + def failed_scores(self) -> tuple[EvalScore, ...]: + return tuple(s for s in self.scores if not s.passed) + + +_MIN_VALUES_FOR_PERCENTILES = 2 # statistics.quantiles requires at least 2 inputs + + +@dataclass(frozen=True, slots=True) +class ScoreStats: + """Aggregated statistics for a named score across cases.""" + + name: str + mean: float + median: float + p5: float + p95: float + min: float + max: float + count: int + + @classmethod + def from_values(cls, name: str, values: list[float]) -> ScoreStats: + if not values: + return cls(name=name, mean=0, median=0, p5=0, p95=0, min=0, max=0, count=0) + sv = sorted(values) + n = len(sv) + if n >= _MIN_VALUES_FOR_PERCENTILES: + # `quantiles(n=20, method='inclusive')` returns 19 cutpoints that + # split the data into 20 equal groups. Index 0 = 5%, index 18 = 95%. + # Inclusive method interpolates linearly between adjacent values + # and clamps to [min, max] — appropriate for bounded scores. + cuts = statistics.quantiles(sv, n=20, method="inclusive") + p5_value = cuts[0] + p95_value = cuts[18] + else: + # Single value: percentiles are undefined; fall back to that value. + p5_value = p95_value = sv[0] + return cls( + name=name, + mean=statistics.mean(sv), + median=statistics.median(sv), + p5=p5_value, + p95=p95_value, + min=sv[0], + max=sv[-1], + count=n, + ) + + +@dataclass(frozen=True, slots=True) +class EvalSuiteReport: + """Aggregated report for a suite of eval cases.""" + + suite_name: str + cases: tuple[EvalCaseResult, ...] + duration: float + + @property + def passed_count(self) -> int: + return sum(1 for c in self.cases if c.passed) + + @property + def failed_count(self) -> int: + return sum(1 for c in self.cases if not c.passed and not c.is_error) + + @property + def errored_count(self) -> int: + return sum(1 for c in self.cases if c.is_error) + + @property + def total_count(self) -> int: + return len(self.cases) + + @property + def pass_rate(self) -> float: + return self.passed_count / self.total_count if self.cases else 0.0 + + def score_names(self) -> set[str]: + return {s.name for c in self.cases for s in c.scores if s.is_metric} + + def score_stats(self, name: str) -> ScoreStats: + values = [ + float(s.value) + for c in self.cases + for s in c.scores + if s.name == name and s.is_metric + ] + return ScoreStats.from_values(name, values) + + def all_score_stats(self) -> list[ScoreStats]: + # Single pass groups values by score name, avoiding O(n_cases x n_names) + # of calling score_stats(n) per name. score_stats(name) is preserved as + # a public single-name accessor. + by_name: dict[str, list[float]] = {} + for c in self.cases: + for s in c.scores: + if s.is_metric: + by_name.setdefault(s.name, []).append(float(s.value)) + return [ScoreStats.from_values(n, by_name[n]) for n in sorted(by_name)] + + @property + def total_task_input_tokens(self) -> int: + return sum(c.task_input_tokens for c in self.cases) + + @property + def total_task_output_tokens(self) -> int: + return sum(c.task_output_tokens for c in self.cases) + + @property + def total_task_tokens(self) -> int: + return self.total_task_input_tokens + self.total_task_output_tokens + + @property + def total_task_cost(self) -> float: + return sum(c.task_cost for c in self.cases) + + @property + def total_judge_calls(self) -> int: + return sum(c.judge_call_count for c in self.cases) + + @property + def total_judge_input_tokens(self) -> int: + return sum(c.judge_input_tokens for c in self.cases) + + @property + def total_judge_output_tokens(self) -> int: + return sum(c.judge_output_tokens for c in self.cases) + + @property + def total_judge_tokens(self) -> int: + return self.total_judge_input_tokens + self.total_judge_output_tokens + + @property + def total_judge_cost(self) -> float: + return sum(c.judge_cost for c in self.cases) diff --git a/protest/evals/wrapper.py b/protest/evals/wrapper.py new file mode 100644 index 0000000..3f07cc3 --- /dev/null +++ b/protest/evals/wrapper.py @@ -0,0 +1,274 @@ +"""Eval wrapper — turns a function into a scored eval test. + +The wrapper intercepts the return value, runs evaluators, and returns +an EvalPayload. The rest of the pipeline (executor, outcome builder, +reporters) handles it like any eval test. +""" + +from __future__ import annotations + +import asyncio +import functools +import time +from typing import Annotated, Any, get_args, get_origin + +from protest.di.hints import get_type_hints_compat +from protest.entities.events import EvalPayload, EvalScoreEntry +from protest.evals.evaluator import ( + EvalCase, + EvalContext, + Evaluator, + ShortCircuit, + extract_scores_from_result, + validate_evaluators, +) +from protest.evals.hashing import compute_case_hash, compute_eval_hash +from protest.evals.types import EvalScore, TaskResult +from protest.exceptions import ( + FixtureError, + MultipleEvalCaseParamsError, + ScoreNameCollisionError, +) + + +def make_eval_wrapper( + func: Any, + evaluators: list[Evaluator | ShortCircuit], + judge: Any = None, +) -> Any: + """Wrap a function to run evaluators on its return value.""" + + _validate_single_evalcase_param(func) + validate_evaluators(evaluators) + + @functools.wraps(func) + async def eval_wrapper(**kwargs: Any) -> EvalPayload: + expected = _extract_expected(kwargs) + case_name = _extract_case_name(kwargs, func.__name__) + inputs = _extract_inputs(kwargs) + metadata = _extract_metadata(kwargs) + + start = time.perf_counter() + if asyncio.iscoroutinefunction(func): + raw_output = await func(**kwargs) + else: + raw_output = func(**kwargs) + task_duration = time.perf_counter() - start + + # Unwrap TaskResult if returned + task_input_tokens = 0 + task_output_tokens = 0 + task_cost = 0.0 + if isinstance(raw_output, TaskResult): + output = raw_output.output + task_input_tokens = raw_output.input_tokens or 0 + task_output_tokens = raw_output.output_tokens or 0 + task_cost = raw_output.cost or 0.0 + else: + output = raw_output + + all_evaluators = list(evaluators) + per_case = _extract_per_case_evaluators(kwargs) + all_evaluators.extend(per_case) + + scores, eval_ctx = await run_evaluators( + all_evaluators, + case_name, + inputs, + output, + expected, + metadata, + task_duration, + judge=judge, + ) + + # Detect score-name collisions across evaluators. EvalPayload.scores + # is a dict keyed by name; duplicates would silently overwrite each + # other downstream. Fail loud so the user can rename the field. + seen: set[str] = set() + duplicates: list[str] = [] + for s in scores: + if s.name in seen and s.name not in duplicates: + duplicates.append(s.name) + seen.add(s.name) + if duplicates: + raise ScoreNameCollisionError(case_name, duplicates) + + return EvalPayload( + case_name=case_name, + passed=all(s.passed for s in scores), + task_duration=task_duration, + inputs=inputs, + output=output, + expected_output=expected, + scores={ + s.name: EvalScoreEntry( + value=s.value, + passed=s.passed, + skipped=s.skipped, + ) + for s in scores + }, + case_hash=compute_case_hash(inputs, expected), + eval_hash=compute_eval_hash(all_evaluators), + task_input_tokens=task_input_tokens, + task_output_tokens=task_output_tokens, + task_cost=task_cost, + judge_call_count=eval_ctx.judge_call_count, + judge_input_tokens=eval_ctx.judge_input_tokens, + judge_output_tokens=eval_ctx.judge_output_tokens, + judge_cost=eval_ctx.judge_cost, + ) + + return eval_wrapper + + +# --------------------------------------------------------------------------- +# Registration-time validation +# --------------------------------------------------------------------------- + + +def _validate_single_evalcase_param(func: Any) -> None: + """Raise MultipleEvalCaseParamsError if `func` has > 1 EvalCase parameter. + + Runs at decorator time. The runtime contract (`_find_case`) silently + picks the first EvalCase in kwargs, which would drop the second one's + name/expected/inputs/metadata/per-case evaluators downstream. We catch + that here so the failure is loud and pinpoints the offending eval. + + Subclasses of EvalCase count: the runtime uses isinstance(_, EvalCase), + so any subclass would trigger the same silent drop. + """ + hints = get_type_hints_compat(func) + offending: list[str] = [] + for param_name, annotation in hints.items(): + if param_name == "return": + continue + underlying = ( + get_args(annotation)[0] + if get_origin(annotation) is Annotated + else annotation + ) + if isinstance(underlying, type) and issubclass(underlying, EvalCase): + offending.append(param_name) + if len(offending) > 1: + raise MultipleEvalCaseParamsError(func.__name__, offending) + + +# --------------------------------------------------------------------------- +# Extract helpers — pull EvalCase from kwargs +# --------------------------------------------------------------------------- + + +def _find_case(kwargs: dict[str, Any]) -> EvalCase | None: + """Find the EvalCase instance in kwargs.""" + for v in kwargs.values(): + if isinstance(v, EvalCase): + return v + return None + + +def _extract_expected(kwargs: dict[str, Any]) -> Any: + case = _find_case(kwargs) + if case is None: + return None + return case.expected + + +def _extract_case_name(kwargs: dict[str, Any], fallback: str) -> str: + case = _find_case(kwargs) + if case is None: + return fallback + return case.name + + +def _extract_inputs(kwargs: dict[str, Any]) -> Any: + case = _find_case(kwargs) + if case is None: + return None + return case.inputs + + +def _extract_metadata(kwargs: dict[str, Any]) -> Any: + case = _find_case(kwargs) + if case is None: + return None + return case.metadata or None + + +def _extract_per_case_evaluators(kwargs: dict[str, Any]) -> list[Any]: + case = _find_case(kwargs) + if case is None or not case.evaluators: + return [] + return list(case.evaluators) + + +# --------------------------------------------------------------------------- +# Evaluator execution +# --------------------------------------------------------------------------- + + +async def run_evaluators( + evaluators: list[Evaluator | ShortCircuit], + case_name: str, + inputs: Any, + output: Any, + expected_output: Any, + metadata: Any, + duration: float, + judge: Any = None, +) -> tuple[list[EvalScore], EvalContext[Any, Any]]: + """Run evaluators and return (scores, ctx with judge stats). + + Callers must have validated the list (Evaluator | ShortCircuit only) at the + boundary; the loop below trusts the Union and uses isinstance solely to + narrow it — the only legitimate isinstance kept in this module. + """ + ctx = EvalContext( + name=case_name, + inputs=inputs, + output=output, + expected_output=expected_output, + metadata=metadata, + duration=duration, + _judge=judge, + ) + + scores: list[EvalScore] = [] + for ev in evaluators: + if isinstance(ev, ShortCircuit): + scores.extend(await _run_short_circuit(ev.evaluators, ctx)) + continue + + try: + raw = ev.run(ctx) + result = await raw if asyncio.iscoroutine(raw) else raw + scores.extend(extract_scores_from_result(result, ev.name)) + except Exception as exc: + raise FixtureError(f"evaluator '{ev.name}'", exc) from exc + + return scores, ctx + + +async def _run_short_circuit( + evaluators: list[Evaluator], + ctx: EvalContext[Any, Any], +) -> list[EvalScore]: + """Run evaluators in order, stop at first Verdict=False.""" + scores: list[EvalScore] = [] + for i, ev in enumerate(evaluators): + try: + raw = ev.run(ctx) + result = await raw if asyncio.iscoroutine(raw) else raw + except Exception as exc: + raise FixtureError(f"evaluator '{ev.name}'", exc) from exc + extracted = extract_scores_from_result(result, ev.name) + scores.extend(extracted) + if any(s.is_verdict and not s.passed for s in extracted): + # Mark remaining evaluators as skipped + for skipped_ev in evaluators[i + 1 :]: + scores.append( + EvalScore(name=skipped_ev.name, value=False, skipped=True) + ) + break + return scores diff --git a/protest/events/types.py b/protest/events/types.py index 8f4d1fc..05d9fa2 100644 --- a/protest/events/types.py +++ b/protest/events/types.py @@ -16,6 +16,7 @@ class Event(Enum): SUITE_SETUP_DONE = "suite_setup_done" SUITE_TEARDOWN_START = "suite_teardown_start" SUITE_END = "suite_end" + EVAL_SUITE_END = "eval_suite_end" TEST_START = "test_start" TEST_ACQUIRED = "test_acquired" TEST_SETUP_DONE = "test_setup_done" @@ -34,3 +35,4 @@ class Event(Enum): FIXTURE_TEARDOWN_START = "fixture_teardown_start" FIXTURE_TEARDOWN_DONE = "fixture_teardown_done" SESSION_INTERRUPTED = "session_interrupted" + USER_PRINT = "user_print" diff --git a/protest/exceptions.py b/protest/exceptions.py index 8176c6f..3cff676 100644 --- a/protest/exceptions.py +++ b/protest/exceptions.py @@ -93,3 +93,49 @@ def __init__(self, value: int): f"max_concurrency must be >= 1, got {value}. " f"Use None for unlimited concurrency." ) + + +class MultipleEvalCaseParamsError(ProTestError): + """Raised when an eval function declares more than one EvalCase parameter. + + Only one EvalCase per eval is supported: it determines the case identity + (name, expected, inputs, metadata, per-case evaluators) used by the + runner, history, and reporters. Additional EvalCase parameters would be + silently ignored downstream. + """ + + def __init__(self, func_name: str, param_names: list[str]): + params = ", ".join(param_names) + super().__init__( + f"Eval '{func_name}' declares multiple EvalCase parameters: {params}. " + f"Only one EvalCase parameter is supported per eval — it is used " + f"for case identity (name), expected output, inputs, metadata, " + f"and per-case evaluators. Merge the cases into a single EvalCase, " + f"or split into separate evals." + ) + + +class ScoreNameCollisionError(ProTestError): + """Raised when two evaluators in the same eval emit scores with the same name. + + Each `EvalScore.name` (from a dataclass `Verdict`/`Metric`/`Reason` field + or from the evaluator's name when it returns `bool`) becomes a key in + `EvalPayload.scores` (a dict). If two evaluators emit the same name, + one would silently overwrite the other in the per-case report and history, + which is a real source of misleading data. + + Fix by renaming the colliding fields so each Verdict/Metric/Reason has a + unique name within the suite (e.g. prefix with the evaluator's concept: + `summary_detail` instead of just `detail`). + """ + + def __init__(self, case_name: str, duplicates: list[str]): + dup_str = ", ".join(repr(d) for d in sorted(duplicates)) + super().__init__( + f"Score-name collision in eval '{case_name}': {dup_str}. " + f"Two or more evaluators emit a score under the same name. " + f"Rename the colliding dataclass Verdict/Metric/Reason field(s) " + f"so each name is unique within the suite — otherwise the " + f"duplicate scores would silently overwrite each other in the " + f"per-case report and the history file." + ) diff --git a/protest/execution/capture.py b/protest/execution/capture.py index d05fe00..584dbf3 100644 --- a/protest/execution/capture.py +++ b/protest/execution/capture.py @@ -1,14 +1,19 @@ +from __future__ import annotations + import io import logging import sys -from collections.abc import Callable from contextlib import suppress from contextvars import ContextVar, Token from dataclasses import dataclass from logging import LogRecord -from typing import TextIO +from typing import TYPE_CHECKING, TextIO + +if TYPE_CHECKING: + from collections.abc import Callable -from protest.compat import Self + from protest.compat import Self + from protest.events.bus import EventBus _capture_buffer: ContextVar[io.StringIO | None] = ContextVar( "capture_buffer", default=None @@ -19,6 +24,7 @@ ) _current_node_id: ContextVar[str | None] = ContextVar("current_node_id", default=None) +_event_bus_ref: ContextVar[EventBus | None] = ContextVar("event_bus_ref", default=None) @dataclass(slots=True) @@ -100,6 +106,21 @@ def get_session_teardown_output() -> str: return _session_teardown.buffer.getvalue() if _session_teardown.buffer else "" +def set_event_bus(bus: EventBus) -> Token[EventBus | None]: + """Set event bus reference for console.print() access.""" + return _event_bus_ref.set(bus) + + +def reset_event_bus(token: Token[EventBus | None]) -> None: + """Reset event bus reference.""" + _event_bus_ref.reset(token) + + +def get_event_bus() -> EventBus | None: + """Get current event bus (for console.print).""" + return _event_bus_ref.get() + + class TaskAwareStream: def __init__(self, original_stream: TextIO, show_output: bool = False) -> None: self._original = original_stream @@ -132,6 +153,25 @@ def __getattr__(self, name: str) -> object: return getattr(self._original, name) +def real_stdout() -> TextIO: + """Return the real process stdout, bypassing any active capture wrapper. + + When a run is under capture, `sys.stdout` is a `TaskAwareStream` routing + writes into per-test buffers; reporters need to bypass that buffering to + write their own output (progress, summary) directly to the terminal. + """ + if isinstance(sys.stdout, TaskAwareStream): + return sys.stdout._original + return sys.stdout + + +def real_stderr() -> TextIO: + """Return the real process stderr, bypassing any active capture wrapper.""" + if isinstance(sys.stderr, TaskAwareStream): + return sys.stderr._original + return sys.stderr + + class TaskAwareLogHandler(logging.Handler): def emit(self, record: LogRecord) -> None: records = _log_records.get() diff --git a/protest/filters/kind.py b/protest/filters/kind.py new file mode 100644 index 0000000..076684a --- /dev/null +++ b/protest/filters/kind.py @@ -0,0 +1,37 @@ +"""KindFilterPlugin — filters tests by suite kind (test/eval).""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from protest.entities import SuiteKind +from protest.plugin import PluginBase + +if TYPE_CHECKING: + from protest.entities import TestItem + from protest.plugin import PluginContext + + +class KindFilterPlugin(PluginBase): + """Filters collected tests by suite kind ('test' or 'eval').""" + + name = "kind-filter" + description = "Filter by suite kind" + + def __init__(self, kind: SuiteKind) -> None: + self._kind = kind + + @classmethod + def activate(cls, ctx: PluginContext) -> KindFilterPlugin | None: + kind = ctx.get("kind_filter") + if kind: + return cls(kind=SuiteKind(kind)) + return None + + def on_collection_finish(self, items: list[TestItem]) -> list[TestItem]: + return [item for item in items if self._matches(item)] + + def _matches(self, item: TestItem) -> bool: + if item.suite is None: + return self._kind == SuiteKind.TEST + return item.suite.kind == self._kind diff --git a/protest/history/__init__.py b/protest/history/__init__.py new file mode 100644 index 0000000..5183cf7 --- /dev/null +++ b/protest/history/__init__.py @@ -0,0 +1,17 @@ +"""History module — run tracking for tests and evals.""" + +from protest.history.storage import ( + HISTORY_FILE, + append_entry, + clean_dirty, + load_history, + load_previous_run, +) + +__all__ = [ + "HISTORY_FILE", + "append_entry", + "clean_dirty", + "load_history", + "load_previous_run", +] diff --git a/protest/history/collector.py b/protest/history/collector.py new file mode 100644 index 0000000..7aa8659 --- /dev/null +++ b/protest/history/collector.py @@ -0,0 +1,80 @@ +"""Metadata collection: git info, environment, CI detection.""" + +from __future__ import annotations + +import os +import platform +import subprocess +import sys +from typing import Any + + +def collect_git_info() -> dict[str, Any] | None: + """Collect git context. Returns None if not in a git repo.""" + try: + commit = _git("rev-parse", "HEAD") + return { + "commit": commit, + "commit_short": commit[:7] if commit else None, + "branch": _git("rev-parse", "--abbrev-ref", "HEAD"), + "dirty": bool(_git("status", "--porcelain")), + "author": _git("log", "-1", "--format=%an"), + "commit_message": _git("log", "-1", "--format=%s"), + } + except (FileNotFoundError, subprocess.CalledProcessError): + return None + + +def collect_env_info() -> dict[str, Any]: + """Collect environment metadata.""" + ci_provider = detect_ci_provider() + return { + "python_version": platform.python_version(), + "protest_version": _get_pkg_version("protest"), + "hostname": platform.node(), + "os": sys.platform, + "ci": ci_provider is not None, + "ci_provider": ci_provider, + } + + +_CI_PROVIDERS: dict[str, str] = { + "GITHUB_ACTIONS": "github-actions", + "GITLAB_CI": "gitlab-ci", + "CIRCLECI": "circleci", + "BUILDKITE": "buildkite", + "TRAVIS": "travis-ci", +} + + +def detect_ci_provider() -> str | None: + """Detect CI provider from standard environment variables.""" + env = os.environ + for var, name in _CI_PROVIDERS.items(): + if env.get(var) == "true": + return name + if env.get("JENKINS_URL"): + return "jenkins" + if env.get("CI") == "true": + return "unknown" + return None + + +def _git(*args: str) -> str: + result = subprocess.run( + ["git", *args], # noqa: S607 + capture_output=True, + text=True, + timeout=5, + check=True, + ) + return result.stdout.strip() + + +def _get_pkg_version(name: str) -> str | None: + try: + from importlib.metadata import version # noqa: PLC0415 — inside try/except + + return version(name) + except Exception: + return None diff --git a/protest/history/plugin.py b/protest/history/plugin.py new file mode 100644 index 0000000..e662e14 --- /dev/null +++ b/protest/history/plugin.py @@ -0,0 +1,254 @@ +"""HistoryPlugin — persists test and eval run results as JSONL.""" + +from __future__ import annotations + +import uuid +from datetime import datetime, timezone +from typing import TYPE_CHECKING, Any + +from protest.entities import SuiteKind +from protest.evals.suite import EvalSuite +from protest.history import storage +from protest.history.collector import collect_env_info, collect_git_info +from protest.history.storage import ( + HISTORY_FILE, + SCHEMA_VERSION, + append_entry, + load_previous_run, +) +from protest.plugin import PluginBase + +if TYPE_CHECKING: + from pathlib import Path + + from protest.core.session import ProTestSession + from protest.entities.events import TestResult + from protest.evals.types import EvalCaseResult, EvalSuiteReport + from protest.plugin import PluginContext + + +class HistoryPlugin(PluginBase): + """Persists test and eval results to JSONL for run-over-run tracking. + + Always-on plugin. When history is disabled on the session, all handlers + are no-ops. Handles both test results (on_test_pass/fail) and eval + results (on_eval_suite_end). + """ + + name = "history" + description = "Run history tracking" + + def __init__(self, history_dir: Path | None = None) -> None: + self._history_dir = history_dir or storage.DEFAULT_HISTORY_DIR + self._history_file = self._history_dir / HISTORY_FILE + # Test data + self._test_suites: dict[str, dict[str, dict[str, Any]]] = {} + self._suite_kinds: dict[str, SuiteKind] = {} + # Bucket name for tests without a suite_path; resolved during setup + # to the first non-eval suite name, or kept as the literal fallback. + self._default_suite_name: str | None = None + # Eval data + self._eval_reports: dict[str, EvalSuiteReport] = {} + self._eval_suite_metadata: dict[str, dict[str, Any]] = {} + self._eval_judge_info: dict[str, dict[str, Any]] = {} + # Session state + self._enabled: bool = False + self._metadata: dict[str, Any] = {} + + @classmethod + def activate(cls, ctx: PluginContext) -> HistoryPlugin | None: + if ctx.get("no_history", False): + return None + return cls(history_dir=ctx.get("history_dir")) + + def setup(self, session: ProTestSession) -> None: + self._enabled = session.history + self._metadata = dict(session.metadata) + if session.history_dir: + self._history_dir = session.history_dir + self._history_file = self._history_dir / HISTORY_FILE + for suite in session.suites: + self._suite_kinds[suite.name] = suite.kind + if suite.kind == SuiteKind.EVAL: + self._eval_suite_metadata[suite.name] = suite.suite_metadata + if isinstance(suite, EvalSuite) and suite.judge is not None: + self._eval_judge_info[suite.name] = { + "name": suite.judge.name, + "provider": suite.judge.provider, + } + elif self._default_suite_name is None: + self._default_suite_name = suite.name + + # -- Test event handlers -------------------------------------------------- + + def on_test_pass(self, result: TestResult) -> None: + if not self._enabled or result.is_eval: + return + self._record_test(result, passed=True) + + def on_test_fail(self, result: TestResult) -> None: + if not self._enabled or result.is_eval: + return + self._record_test(result, passed=False) + + def _record_test(self, result: TestResult, *, passed: bool) -> None: + suite_name = ( + result.suite_path.root_name + if result.suite_path + else (self._default_suite_name or "tests") + ) + if suite_name not in self._test_suites: + self._test_suites[suite_name] = {} + self._test_suites[suite_name][result.name] = { + "passed": passed, + "duration": round(result.duration, 5), + } + + # -- Eval event handlers -------------------------------------------------- + + def on_eval_suite_end(self, report: EvalSuiteReport) -> None: + if not self._enabled: + return + self._eval_reports[report.suite_name] = report + + # -- Session end: write combined entry ------------------------------------ + + def on_session_end(self, result: Any) -> None: + if not self._enabled: + return + if not self._test_suites and not self._eval_reports: + return + + suites_data: dict[str, Any] = {} + + # Test suites + for suite_name, cases in self._test_suites.items(): + total = len(cases) + passed = sum(1 for c in cases.values() if c["passed"]) + kind = self._suite_kinds.get(suite_name) + suites_data[suite_name] = { + "kind": kind.value if kind is not None else "test", + "total_cases": total, + "passed": passed, + "failed": total - passed, + "pass_rate": round(passed / total, 4) if total else 0, + "duration": round(sum(c["duration"] for c in cases.values()), 2), + "cases": cases, + } + + # Eval suites + all_score_stats: list[Any] = [] + for suite_name, report in self._eval_reports.items(): + sm = self._eval_suite_metadata.get(suite_name, {}) + suites_data[suite_name] = { + "kind": "eval", + "model": sm.get("model"), + "provider": sm.get("provider"), + "total_cases": report.total_count, + "passed": report.passed_count, + "failed": report.failed_count, + "errored": report.errored_count, + "pass_rate": round(report.pass_rate, 4), + "duration": round(report.duration, 2), + "cases": {c.case_name: _serialize_eval_case(c) for c in report.cases}, + } + all_score_stats.extend(report.all_score_stats()) + + # Build evals summary (non-null only if we have eval data) + evals_summary = None + if self._eval_reports: + # Derive top-level model from first eval suite (or None if mixed) + models = { + sm.get("model") + for sm in self._eval_suite_metadata.values() + if sm.get("model") + } + top_model = models.pop() if len(models) == 1 else None + providers = { + sm.get("provider") + for sm in self._eval_suite_metadata.values() + if sm.get("provider") + } + top_provider = providers.pop() if len(providers) == 1 else None + + # Aggregate judge info (first one found, or None) + judge_dict = None + if self._eval_judge_info: + first_judge = next(iter(self._eval_judge_info.values())) + judge_dict = first_judge + + scores_summary = { + s.name: { + "mean": round(s.mean, 4), + "median": round(s.median, 4), + "p5": round(s.p5, 4), + "p95": round(s.p95, 4), + "min": round(s.min, 4), + "max": round(s.max, 4), + "count": s.count, + } + for s in all_score_stats + } + + evals_summary = { + "model": top_model, + "provider": top_provider, + "judge": judge_dict, + "scores_summary": scores_summary, + } + + entry: dict[str, Any] = { + "schema_version": SCHEMA_VERSION, + "run_id": str(uuid.uuid4()), + "timestamp": datetime.now(tz=timezone.utc).isoformat(), + "git": collect_git_info(), + "environment": collect_env_info(), + "metadata": self._metadata, + "evals": evals_summary, + "suites": suites_data, + } + append_entry(self._history_file, entry) + + +def _serialize_eval_case(case: EvalCaseResult) -> dict[str, Any]: + """Serialize an eval case result for JSONL storage. + + Skipped scores are excluded: a ShortCircuit skip produces + `EvalScore(value=False, skipped=True)` — serializing it as an assertion + would look like a real failure in `history --compare` diffs. + + `case.duration` here is `EvalPayload.task_duration` (SUT-only timing, + set by the eval wrapper), not the full TestResult duration shown by live + reporters. Persisted at 10 µs precision so sub-ms SUTs don't all hash + down to 0.0 across runs. + """ + entry: dict[str, Any] = { + "passed": case.passed, + "is_error": case.is_error, + "duration": round(case.duration, 5), + "scores": { + s.name: s.value for s in case.scores if s.is_metric and not s.skipped + }, + "case_hash": case.case_hash, + "eval_hash": case.eval_hash, + } + labels = { + s.name: s.value + for s in case.scores + if isinstance(s.value, str) and not s.skipped + } + if labels: + entry["labels"] = labels + assertions = { + s.name: s.value + for s in case.scores + if isinstance(s.value, bool) and not s.skipped + } + if assertions: + entry["assertions"] = assertions + return entry + + +def load_previous_eval_run(history_dir: Any = None) -> dict[str, Any] | None: + """Load the most recent eval run from history.""" + return load_previous_run(history_dir=history_dir, evals_only=True) diff --git a/protest/history/storage.py b/protest/history/storage.py new file mode 100644 index 0000000..7903649 --- /dev/null +++ b/protest/history/storage.py @@ -0,0 +1,295 @@ +"""JSONL history storage: load, append, filter, clean.""" + +from __future__ import annotations + +import contextlib +import json +import subprocess +import sys +import warnings +from pathlib import Path +from typing import IO, TYPE_CHECKING, Any + +if TYPE_CHECKING: + from collections.abc import Iterator + +if sys.platform == "win32": + import msvcrt + + @contextlib.contextmanager + def _exclusive_file_lock(f: IO[Any]) -> Iterator[None]: + """Hold an exclusive advisory lock on `f` for the block's duration. + + Windows `msvcrt.locking` cannot lock regions beyond EOF, so we lock + a sibling `.lock` file that we ensure always has 1 byte. All + writers cooperate on this sibling, so concurrent appends to the + main file are serialized. + """ + lock_path = Path(f"{f.name}.lock") + with open(lock_path, "a+b") as lf: + lf.seek(0, 2) + if lf.tell() == 0: + lf.write(b"\0") + lf.flush() + lf.seek(0) + msvcrt.locking(lf.fileno(), msvcrt.LK_LOCK, 1) + try: + yield + finally: + lf.seek(0) + msvcrt.locking(lf.fileno(), msvcrt.LK_UNLCK, 1) +else: + import fcntl + + @contextlib.contextmanager + def _exclusive_file_lock(f: IO[Any]) -> Iterator[None]: + """Hold an exclusive advisory lock on `f` for the block's duration. + + POSIX `fcntl.flock` locks the file descriptor directly; cross-process + callers opening the same path will block until the lock is released. + """ + fcntl.flock(f.fileno(), fcntl.LOCK_EX) + try: + yield + finally: + fcntl.flock(f.fileno(), fcntl.LOCK_UN) + + +DEFAULT_HISTORY_DIR = Path(".protest") +HISTORY_FILE = "history.jsonl" + +# JSONL entry schema version. Bump when the on-disk shape changes in a way +# that older readers can't transparently handle (new required fields, +# restructured nesting). Entries written before this was introduced have no +# `schema_version` key and are treated as version 0 (legacy — best-effort). +SCHEMA_VERSION = 1 + +_warned_future_versions: set[int] = set() + + +def _is_future_schema(entry: dict[str, Any]) -> bool: + """Return True if the entry was written by a newer protest version. + + Entries with `schema_version > SCHEMA_VERSION` are skipped by readers, + with a one-time warning per version (avoids N warnings for N such + entries). + """ + version = entry.get("schema_version", 0) + if not isinstance(version, int) or version <= SCHEMA_VERSION: + return False + if version not in _warned_future_versions: + _warned_future_versions.add(version) + warnings.warn( + f"history.jsonl contains entries with schema_version={version}, " + f"but this protest supports up to {SCHEMA_VERSION}. " + f"Those entries will be skipped. Upgrade protest to read them.", + stacklevel=3, + ) + return True + + +def load_history( + history_dir: Path | None = None, + n: int | None = None, + model: str | None = None, + suite: str | None = None, + evals_only: bool = False, + tests_only: bool = False, +) -> list[dict[str, Any]]: + """Load history entries with optional filtering.""" + path = (history_dir or DEFAULT_HISTORY_DIR) / HISTORY_FILE + if not path.exists(): + return [] + + entries: list[dict[str, Any]] = [] + for line in path.read_text().strip().splitlines(): + try: + entry = json.loads(line) + except json.JSONDecodeError: + continue + filtered = _apply_entry_filters( + entry, + evals_only=evals_only, + tests_only=tests_only, + model=model, + suite=suite, + ) + if filtered is not None: + entries.append(filtered) + + entries.sort(key=lambda e: e.get("timestamp", "")) + if n is not None: + entries = entries[-n:] + return entries + + +def _apply_entry_filters( + entry: dict[str, Any], + *, + evals_only: bool, + tests_only: bool, + model: str | None, + suite: str | None, +) -> dict[str, Any] | None: + """Apply CLI filters to a single history entry. + + Returns the (possibly suite-pruned) entry to keep, or None to drop it. + `--model` / `--suite` operate at the suite level: any suite in the run + that matches keeps the entry alive, with non-matching suites pruned out. + """ + if _is_future_schema(entry): + return None + if evals_only and not _has_suite_kind(entry, "eval"): + return None + if tests_only and not _has_suite_kind(entry, "test"): + return None + if model is None and suite is None: + return entry + + kept_suites: dict[str, Any] = {} + for sname, sdata in entry.get("suites", {}).items(): + if not isinstance(sdata, dict): + continue + if model is not None and sdata.get("model") != model: + continue + if suite is not None and sname != suite: + continue + kept_suites[sname] = sdata + if not kept_suites: + return None + return {**entry, "suites": kept_suites} + + +def _has_suite_kind(entry: dict[str, Any], kind: str) -> bool: + """Check if entry has at least one suite with the given kind.""" + suites = entry.get("suites", {}) + for suite_data in suites.values(): + if isinstance(suite_data, dict) and suite_data.get("kind") == kind: + return True + # Legacy fallback: entries without kind field + if not any(isinstance(s, dict) and "kind" in s for s in suites.values()): + if kind == "eval": + return entry.get("evals") is not None + if kind == "test": + return entry.get("evals") is None + return False + + +def append_entry(path: Path, entry: dict[str, Any]) -> None: + """Append a single JSON entry to a JSONL file. + + Serializes concurrent writes from separate processes sharing the same + history file (e.g. a CI matrix) via an exclusive advisory lock: + `fcntl.flock` on POSIX, `msvcrt.locking` on a sibling `.lock` + file on Windows. + """ + path.parent.mkdir(parents=True, exist_ok=True) + line = json.dumps(entry, default=str) + "\n" + with open(path, "a") as f, _exclusive_file_lock(f): + f.write(line) + f.flush() + + +def load_previous_run( + history_dir: Path | None = None, + evals_only: bool = False, +) -> dict[str, Any] | None: + """Load the most recent history entry.""" + path = (history_dir or DEFAULT_HISTORY_DIR) / HISTORY_FILE + if not path.exists(): + return None + lines = path.read_text().strip().splitlines() + for line in reversed(lines): + try: + entry = json.loads(line) + except json.JSONDecodeError: + continue + if _is_future_schema(entry): + continue + if evals_only and entry.get("evals") is None: + continue + return dict(entry) + return None + + +def _current_git_head() -> str | None: + """Return the current HEAD short SHA, or None when not in a git repo.""" + try: + return subprocess.run( + ["git", "rev-parse", "HEAD"], # noqa: S607 + capture_output=True, + text=True, + timeout=5, + check=True, + ).stdout.strip() + except (FileNotFoundError, subprocess.CalledProcessError): + return None + + +def is_dirty_entry(entry: dict[str, Any], current_commit: str | None) -> bool: + """Return True if `entry` was produced on a dirty working tree at HEAD.""" + if not current_commit: + return False + git = entry.get("git") or {} + return bool(git.get("dirty")) and git.get("commit") == current_commit + + +def count_dirty_entries(history_dir: Path | None = None) -> int: + """Count entries `clean_dirty()` would remove (without touching the file).""" + path = (history_dir or DEFAULT_HISTORY_DIR) / HISTORY_FILE + if not path.exists(): + return 0 + current_commit = _current_git_head() + if not current_commit: + return 0 + count = 0 + for line in path.read_text().strip().splitlines(): + try: + entry = json.loads(line) + except json.JSONDecodeError: + continue + if is_dirty_entry(entry, current_commit): + count += 1 + return count + + +def clean_dirty(history_dir: Path | None = None) -> int: + """Remove entries where git.dirty=True AND git.commit matches current HEAD. + + Returns the number of entries removed. + + The read+write happens under `_exclusive_file_lock` so a concurrent + `append_entry` cannot land between our read and our truncate (which + would silently drop the new entry). + """ + path = (history_dir or DEFAULT_HISTORY_DIR) / HISTORY_FILE + if not path.exists(): + return 0 + + current_commit = _current_git_head() + if not current_commit: + return 0 + + with open(path, "r+") as f, _exclusive_file_lock(f): + f.seek(0) + lines = f.read().strip().splitlines() + kept: list[str] = [] + removed = 0 + + for line in lines: + try: + entry = json.loads(line) + except json.JSONDecodeError: + kept.append(line) + continue + if is_dirty_entry(entry, current_commit): + removed += 1 + else: + kept.append(line) + + if removed: + f.seek(0) + f.truncate() + if kept: + f.write("\n".join(kept) + "\n") + return removed diff --git a/protest/plugin.py b/protest/plugin.py index 6833b03..895d7a5 100644 --- a/protest/plugin.py +++ b/protest/plugin.py @@ -1,12 +1,13 @@ from __future__ import annotations from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Any, Self +from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from argparse import ArgumentParser from collections.abc import Awaitable + from protest.compat import Self from protest.core.session import ProTestSession from protest.entities import ( FixtureInfo, @@ -142,6 +143,12 @@ def on_suite_teardown_start(self, path: SuitePath) -> None | Awaitable[None]: def on_suite_end(self, result: SuiteResult) -> None | Awaitable[None]: """Suite ends (after fixture teardown).""" + def on_eval_suite_end(self, report: Any) -> None | Awaitable[None]: + """Eval suite finished — aggregated report with scores/stats.""" + + def on_user_print(self, data: Any) -> None | Awaitable[None]: + """User-initiated print via protest.console.print().""" + # ───────────────────────────────────────────────────────────────────── # Fixture lifecycle # ───────────────────────────────────────────────────────────────────── diff --git a/protest/reporting/ascii.py b/protest/reporting/ascii.py index 9ff7211..a7dfdea 100644 --- a/protest/reporting/ascii.py +++ b/protest/reporting/ascii.py @@ -1,8 +1,11 @@ +import logging import traceback from pathlib import Path +from typing import Any from typing_extensions import Self +from protest.console import strip_markup from protest.entities import ( FixtureInfo, HandlerInfo, @@ -18,7 +21,15 @@ TestStartInfo, TestTeardownInfo, ) +from protest.evals.types import EvalSuiteReport +from protest.execution.capture import real_stdout from protest.plugin import PluginBase, PluginContext +from protest.reporting.format import ( + format_duration as _format_duration, +) +from protest.reporting.format import ( + format_usage as _format_usage, +) from protest.reporting.verbosity import Verbosity _MIN_NODE_ID_PARTS = 2 @@ -47,16 +58,29 @@ def _format_test_name(result: TestResult, include_suite: bool = False) -> str: return name -MIN_DURATION_THRESHOLD = 0.001 - - -def _format_duration(seconds: float) -> str: - """Format duration: ms for fast, s for slow.""" - if seconds < MIN_DURATION_THRESHOLD: - return "<1ms" - if seconds < 1: - return f"{seconds * 1000:.0f}ms" - return f"{seconds:.2f}s" +def _format_eval_scores_inline(result: TestResult, short: bool = False) -> str: + """Format eval scores for inline display — ASCII version (no glyphs). + + When `short=True`, only failing/skipped scores are shown — passing scores + are hidden to keep the output readable on large suites. + """ + if not result.eval_payload: + return "" + parts: list[str] = [] + for name, entry in result.eval_payload.scores.items(): + if entry.skipped: + parts.append(f"{name}=skip") + continue + if short and entry.passed: + continue + val = entry.value + if isinstance(val, bool): + parts.append(f"{name}={'pass' if val else 'fail'}") + elif isinstance(val, float): + parts.append(f"{name}={val:.2f}") + else: + parts.append(f"{name}={val}") + return f" {' '.join(parts)}" if parts else "" class AsciiReporter(PluginBase): @@ -65,18 +89,59 @@ class AsciiReporter(PluginBase): name = "ascii-reporter" description = "Plain ASCII reporter" - def __init__(self, verbosity: int = 0) -> None: + def __init__( + self, + verbosity: int = 0, + show_logs: str | None = None, + show_output: bool = False, + short: bool = False, + ) -> None: self._verbosity = verbosity + self._show_logs = show_logs + self._show_output = show_output + self._short = short self._is_parallel = False self._failed_results: list[TestResult] = [] self._error_results: list[TestResult] = [] @classmethod def activate(cls, ctx: PluginContext) -> Self | None: - if ctx.get("no_color", False): - return cls(verbosity=ctx.get("verbosity", 0)) + # Activate when --no-color was passed, OR when `rich` is not + # installed (RichReporter would otherwise leave the run silent). + import importlib.util # noqa: PLC0415 — std lib, kept local for clarity + + if ctx.get("no_color", False) or importlib.util.find_spec("rich") is None: + return cls( + verbosity=ctx.get("verbosity", 0), + show_logs=ctx.get("show_logs"), + show_output=ctx.get("show_output", False), + short=ctx.get("short", False), + ) return None + def _print_eval_detail(self, result: TestResult) -> None: + """Print eval inputs/output/expected (enabled by --show-output or on failure).""" + p = result.eval_payload + if not p: + return + if p.inputs is not None: + print(f" | inputs: {str(p.inputs)[:200]}") + if p.output is not None: + print(f" | output: {str(p.output)[:200]}") + if p.expected_output is not None: + print(f" | expected: {str(p.expected_output)[:200]}") + + def _maybe_show_logs(self, result: TestResult) -> None: + """Show captured log records if --show-logs is active.""" + if not self._show_logs or not result.log_records: + return + min_level = getattr(logging, self._show_logs.upper(), logging.INFO) + for record in result.log_records: + if record.levelno >= min_level: + print( + f" LOG [{record.levelname}] {record.name}: {record.getMessage()}" + ) + def on_collection_finish(self, items: list[TestItem]) -> list[TestItem]: self._is_parallel = len(items) > 1 return items @@ -123,7 +188,7 @@ def on_fixture_setup_start(self, info: FixtureInfo) -> None: print(f" -> fixture '{info.name}' setup... ({info.scope.value})") def on_fixture_setup_done(self, info: FixtureInfo) -> None: - if self._verbosity >= Verbosity.FIXTURES: + if self._verbosity >= Verbosity.NORMAL: print( f" -> fixture '{info.name}' ready ({_format_duration(info.duration)})" ) @@ -140,11 +205,17 @@ def on_fixture_teardown_done(self, info: FixtureInfo) -> None: def on_test_setup_done(self, info: TestStartInfo) -> None: if self._verbosity >= Verbosity.FIXTURES: - print(f" > {info.name} setup done") + self._print_bypass(f" > {info.name} setup done") def on_test_teardown_start(self, info: TestTeardownInfo) -> None: if self._verbosity >= Verbosity.FIXTURES: - print(f" < {info.name} teardown...") + self._print_bypass(f" < {info.name} teardown...") + + @staticmethod + def _print_bypass(msg: str) -> None: + stream = real_stdout() + stream.write(msg + "\n") + stream.flush() def on_test_retry(self, info: TestRetryInfo) -> None: delay_msg = f", retrying in {info.delay}s" if info.delay > 0 else "" @@ -161,7 +232,15 @@ def on_test_pass(self, result: TestResult) -> None: retry_suffix = "" if result.max_attempts > 1: retry_suffix = f" [attempt {result.attempt}/{result.max_attempts}]" - print(f" OK {name} ({duration}){retry_suffix}") + scores_str = ( + _format_eval_scores_inline(result, short=self._short) + if result.is_eval + else "" + ) + print(f" OK {name} ({duration}){scores_str}{retry_suffix}") + if self._show_output and result.is_eval: + self._print_eval_detail(result) + self._maybe_show_logs(result) def on_test_fail(self, result: TestResult) -> None: name = _format_test_name(result, include_suite=self._is_parallel) @@ -184,6 +263,9 @@ def on_test_fail(self, result: TestResult) -> None: if result.output: for line in result.output.rstrip().splitlines(): print(f" | {line}") + if result.is_eval: + self._print_eval_detail(result) + self._maybe_show_logs(result) def on_test_skip(self, result: TestResult) -> None: if self._verbosity >= Verbosity.NORMAL: @@ -225,14 +307,16 @@ def _format_traceback(self, error: Exception) -> str: return "".join(lines) def _print_failure_summary(self) -> None: - if self._failed_results: + non_eval_failures = [r for r in self._failed_results if not r.is_eval] + if non_eval_failures: print("\n=== FAILURES ===") - for result in self._failed_results: + for result in non_eval_failures: self._print_failure_detail(result, is_error=False) - if self._error_results: + non_eval_errors = [r for r in self._error_results if not r.is_eval] + if non_eval_errors: print("\n=== ERRORS ===") - for result in self._error_results: + for result in non_eval_errors: self._print_failure_detail(result, is_error=True) def _print_failure_detail(self, result: TestResult, *, is_error: bool) -> None: @@ -250,8 +334,52 @@ def _print_failure_detail(self, result: TestResult, *, is_error: bool) -> None: for line in result.output.rstrip().splitlines(): print(f" {line}") + def on_user_print(self, data: Any) -> None: + msg, raw, prefix = data + text = msg if raw else strip_markup(msg) + stream = real_stdout() + line = f" | {text}\n" if prefix and not raw else f"{text}\n" + stream.write(line) + stream.flush() + + def on_eval_suite_end(self, report: Any) -> None: + if not isinstance(report, EvalSuiteReport): + return + stats = report.all_score_stats() + print() + print(f" Eval: {report.suite_name} ({report.total_count} cases)") + if stats: + max_name = max(len(s.name) for s in stats) + print(" " + "─" * 60) + for s in stats: + print( + f" {s.name:<{max_name}} " + f"mean={s.mean:.2f} p50={s.median:.2f} " + f"p5={s.p5:.2f} p95={s.p95:.2f}" + ) + print(" " + "─" * 60) + rate_pct = report.pass_rate * 100 + print(f" Passed: {report.passed_count}/{report.total_count} ({rate_pct:.1f}%)") + if report.total_task_tokens > 0 or report.total_task_cost > 0: + print( + f" Task: {_format_usage(report.total_task_input_tokens, report.total_task_output_tokens, report.total_task_cost)}" + ) + if report.total_judge_calls > 0: + judge_parts = [f"{report.total_judge_calls} calls"] + usage = _format_usage( + report.total_judge_input_tokens, + report.total_judge_output_tokens, + report.total_judge_cost, + ) + if usage: + judge_parts.append(usage) + print(f" Judge: {', '.join(judge_parts)}") + print() + def on_session_complete(self, result: SessionResult) -> None: - if self._failed_results or self._error_results: + has_non_eval_failures = any(not r.is_eval for r in self._failed_results) + has_non_eval_errors = any(not r.is_eval for r in self._error_results) + if has_non_eval_failures or has_non_eval_errors: self._print_failure_summary() total = ( diff --git a/protest/reporting/factory.py b/protest/reporting/factory.py index e3d405a..6d0fbf6 100644 --- a/protest/reporting/factory.py +++ b/protest/reporting/factory.py @@ -18,7 +18,7 @@ def get_reporter(force_no_color: bool = False) -> PluginBase: return AsciiReporter() try: - from rich.console import Console # type: ignore[import-not-found] + from rich.console import Console Console() except ImportError: diff --git a/protest/reporting/format.py b/protest/reporting/format.py new file mode 100644 index 0000000..6e23151 --- /dev/null +++ b/protest/reporting/format.py @@ -0,0 +1,39 @@ +"""Shared formatting helpers used by both Rich and Ascii reporters. + +Only formats that are *truly identical* between the two reporters live here. +Visual rendering (glyphs vs ASCII words, colors) stays in each reporter. +""" + +from __future__ import annotations + +MIN_DURATION_THRESHOLD = 0.001 +_TOKEN_K_THRESHOLD = 1000 + + +def format_duration(seconds: float) -> str: + if seconds < MIN_DURATION_THRESHOLD: + return "<1ms" + if seconds < 1: + return f"{seconds * 1000:.0f}ms" + return f"{seconds:.2f}s" + + +def format_tokens(tokens: int) -> str: + """Format token count: 1234 → '1.2k', 45 → '45'.""" + return ( + f"{tokens / _TOKEN_K_THRESHOLD:.1f}k" + if tokens >= _TOKEN_K_THRESHOLD + else str(tokens) + ) + + +def format_usage(input_tokens: int, output_tokens: int, cost: float) -> str: + """Format usage stats as 'Xk in / Yk out, $0.0042'.""" + parts: list[str] = [] + if input_tokens > 0 or output_tokens > 0: + parts.append( + f"{format_tokens(input_tokens)} in / {format_tokens(output_tokens)} out" + ) + if cost > 0: + parts.append(f"${cost:.4f}") + return ", ".join(parts) diff --git a/protest/reporting/rich_reporter.py b/protest/reporting/rich_reporter.py index 2931e6b..57ab433 100644 --- a/protest/reporting/rich_reporter.py +++ b/protest/reporting/rich_reporter.py @@ -1,8 +1,10 @@ +import importlib.util +import logging import traceback from argparse import ArgumentParser from pathlib import Path +from typing import Any -from rich.console import Console # type: ignore[import-not-found] from typing_extensions import Self from protest.entities import ( @@ -20,27 +22,68 @@ TestStartInfo, TestTeardownInfo, ) +from protest.evals.types import EvalSuiteReport +from protest.execution.capture import real_stdout from protest.plugin import PluginBase, PluginContext +from protest.reporting.format import ( + format_duration as _format_duration, +) +from protest.reporting.format import ( + format_usage as _format_usage, +) from protest.reporting.verbosity import Verbosity -def _format_test_name(result: TestResult) -> str: - if "[" in result.node_id: - suffix = result.node_id[result.node_id.index("[") :] - escaped_suffix = suffix.replace("[", "\\[") - return f"{result.name}{escaped_suffix}" - return result.name +# `rich` is an optional dependency. All `from rich...` imports below are +# done lazily inside methods so that `import protest` works without it; +# `RichReporter.activate()` returns None when rich is missing, and +# `AsciiReporter` takes over via its own activate() check. +def _rich_available() -> bool: + return importlib.util.find_spec("rich") is not None + + +# Per-run pass-rate thresholds for the eval suite color cue. +# Strict default — green only if every case passes; yellow above half. +_PERFECT_PASS_RATE = 1.0 +_PARTIAL_PASS_RATE = 0.5 -MIN_DURATION_THRESHOLD = 0.001 +def _short_label(name: str, node_id: str) -> str: + """name + [case_id] from node_id.""" + if "[" in node_id: + suffix = node_id[node_id.index("[") :] + return f"{name}{suffix}" + return name -def _format_duration(seconds: float) -> str: - if seconds < MIN_DURATION_THRESHOLD: - return "<1ms" - if seconds < 1: - return f"{seconds * 1000:.0f}ms" - return f"{seconds:.2f}s" +def _format_test_name(result: TestResult) -> str: + label = _short_label(result.name, result.node_id) + return label.replace("[", "\\[") + + +def _format_eval_scores_inline(result: TestResult, short: bool = False) -> str: + """Format eval scores for inline display (e.g. ' bg_score=0.8 char_id=1.0'). + + When `short=True`, only failing/skipped scores are shown — passing scores + are hidden to keep the output readable on large suites. + """ + if not result.eval_payload: + return "" + parts = [] + for name, entry in result.eval_payload.scores.items(): + if entry.skipped: + parts.append(f"{name}=⊘") + continue + if short and entry.passed: + continue + val = entry.value + if isinstance(val, bool): + parts.append(f"{name}={'✓' if val else '✗'}") + elif isinstance(val, float): + parts.append(f"{name}={val:.2f}") + else: + parts.append(f"{name}={val}") + return f" [dim]{' '.join(parts)}[/]" if parts else "" class RichReporter(PluginBase): @@ -49,9 +92,20 @@ class RichReporter(PluginBase): name = "rich-reporter" description = "Rich console reporter with colors" - def __init__(self, verbosity: int = 0) -> None: + def __init__( + self, + verbosity: int = 0, + show_logs: str | None = None, + show_output: bool = False, + short: bool = False, + ) -> None: + from rich.console import Console # noqa: PLC0415 — optional dep, lazy + self.console = Console(highlight=False) self._verbosity = verbosity + self._show_logs = show_logs + self._show_output = show_output + self._short = short self._failed_results: list[TestResult] = [] self._error_results: list[TestResult] = [] @@ -76,11 +130,60 @@ def add_cli_options(cls, parser: ArgumentParser) -> None: def activate(cls, ctx: PluginContext) -> Self | None: if ctx.get("no_color", False): return None - return cls(verbosity=ctx.get("verbosity", 0)) + if not _rich_available(): + # `rich` is an optional dependency; AsciiReporter takes over. + return None + return cls( + verbosity=ctx.get("verbosity", 0), + show_logs=ctx.get("show_logs"), + show_output=ctx.get("show_output", False), + short=ctx.get("short", False), + ) def _print(self, message: str) -> None: self.console.print(message) + def _print_eval_detail(self, result: TestResult) -> None: + """Print eval inputs/output/expected for -vv verbosity.""" + p = result.eval_payload + if not p: + return + if p.inputs is not None: + inp = str(p.inputs)[:200] + self._print(f"[dim] │ inputs: {inp}[/]") + if p.output is not None: + out = str(p.output)[:200] + self._print(f"[dim] │ output: {out}[/]") + if p.expected_output is not None: + exp = str(p.expected_output)[:200] + self._print(f"[dim] │ expected: {exp}[/]") + + def _maybe_show_logs(self, result: TestResult) -> None: + """Show captured log records if --show-logs is active.""" + if not self._show_logs or not result.log_records: + return + min_level = getattr(logging, self._show_logs.upper(), logging.INFO) + for record in result.log_records: + if record.levelno >= min_level: + level = record.levelname + color = ( + "red" + if record.levelno >= logging.ERROR + else "yellow" + if record.levelno >= logging.WARNING + else "dim" + ) + self._print( + f"[{color}] LOG [{level}] {record.name}: {record.getMessage()}[/]" + ) + + def _print_bypass(self, message: str) -> None: + """Print bypassing capture (for lifecycle messages emitted during tests).""" + from rich.console import Console # noqa: PLC0415 — optional dep, lazy + + stream = real_stdout() + Console(file=stream, highlight=False).print(message) + def on_collection_finish(self, items: list[TestItem]) -> list[TestItem]: return items @@ -128,7 +231,7 @@ def on_fixture_setup_start(self, info: FixtureInfo) -> None: self._print(f"[dim] ↳ fixture '{info.name}' setup... {scope_str}[/]") def on_fixture_setup_done(self, info: FixtureInfo) -> None: - if self._verbosity >= Verbosity.FIXTURES: + if self._verbosity >= Verbosity.NORMAL: self._print( f"[dim] ↳ fixture '{info.name}' ready ({_format_duration(info.duration)})[/]" ) @@ -145,11 +248,13 @@ def on_fixture_teardown_done(self, info: FixtureInfo) -> None: def on_test_setup_done(self, info: TestStartInfo) -> None: if self._verbosity >= Verbosity.FIXTURES: - self._print(f"[dim] → {info.name} setup done[/]") + label = _short_label(info.name, info.node_id).replace("[", "\\[") + self._print_bypass(f"[dim] → {label} setup done[/]") def on_test_teardown_start(self, info: TestTeardownInfo) -> None: if self._verbosity >= Verbosity.FIXTURES: - self._print(f"[dim] ← {info.name} teardown...[/]") + label = _short_label(info.name, info.node_id).replace("[", "\\[") + self._print_bypass(f"[dim] ← {label} teardown...[/]") def on_test_retry(self, info: TestRetryInfo) -> None: delay_msg = f", retrying in {info.delay}s" if info.delay > 0 else "" @@ -169,7 +274,17 @@ def on_test_pass(self, result: TestResult) -> None: retry_suffix = ( f" [dim]\\[attempt {result.attempt}/{result.max_attempts}][/]" ) - self._print(f" [green]✓[/] {name} [dim]({duration})[/]{retry_suffix}") + scores_str = ( + _format_eval_scores_inline(result, short=self._short) + if result.is_eval + else "" + ) + self._print( + f" [green]✓[/] {name} [dim]({duration})[/]{scores_str}{retry_suffix}" + ) + if self._show_output and result.is_eval: + self._print_eval_detail(result) + self._maybe_show_logs(result) def on_test_fail(self, result: TestResult) -> None: name = _format_test_name(result) @@ -197,8 +312,17 @@ def on_test_fail(self, result: TestResult) -> None: self._print(f" [red]✗[/] {name}: {result.error}{retry_suffix}") if result.output: - for line in result.output.rstrip().splitlines(): + lines = result.output.rstrip().splitlines() + max_lines = 20 + for line in lines[:max_lines]: self._print(f"[dim] │ {line}[/]") + if len(lines) > max_lines: + self._print( + f"[dim] │ ... ({len(lines) - max_lines} more lines in .protest/last_run_stdout)[/]" + ) + if result.is_eval: + self._print_eval_detail(result) # always show on fail + self._maybe_show_logs(result) def on_test_skip(self, result: TestResult) -> None: self._skipped += 1 @@ -249,14 +373,16 @@ def _format_traceback(self, error: Exception) -> str: return "".join(lines) def _print_failure_summary(self) -> None: - if self._failed_results: + non_eval_failures = [r for r in self._failed_results if not r.is_eval] + if non_eval_failures: self._print("\n[bold red]═══ FAILURES ═══[/]") - for result in self._failed_results: + for result in non_eval_failures: self._print_failure_detail(result, is_error=False) - if self._error_results: + non_eval_errors = [r for r in self._error_results if not r.is_eval] + if non_eval_errors: self._print("\n[bold yellow]═══ ERRORS ═══[/]") - for result in self._error_results: + for result in non_eval_errors: self._print_failure_detail(result, is_error=True) def _print_failure_detail(self, result: TestResult, *, is_error: bool) -> None: @@ -281,8 +407,82 @@ def _print_failure_detail(self, result: TestResult, *, is_error: bool) -> None: escaped_line = line.replace("[", "\\[") self._print(f"[dim]{escaped_line}[/]") + def on_user_print(self, data: Any) -> None: + from rich.console import Console # noqa: PLC0415 — optional dep, lazy + + msg, raw, prefix = data + # Write to the real stdout, bypassing capture + stream = real_stdout() + c = Console(file=stream, highlight=False) + if raw: + c.print(msg, markup=False) + elif prefix: + c.print(f"[dim] │[/] {msg}") + else: + c.print(msg) + + def on_eval_suite_end(self, report: Any) -> None: + if not isinstance(report, EvalSuiteReport): + return + from rich.table import Table # noqa: PLC0415 — optional dep, lazy + + stats = report.all_score_stats() + self._print("") + if stats: + table = Table( + title=f"Eval: {report.suite_name} ({report.total_count} cases)", + show_header=True, + header_style="bold cyan", + padding=(0, 1), + ) + table.add_column("Score", style="cyan", no_wrap=True) + table.add_column("mean", justify="right") + table.add_column("p50", justify="right") + table.add_column("p5", justify="right", style="dim") + table.add_column("p95", justify="right", style="dim") + for s in stats: + table.add_row( + s.name, + f"{s.mean:.2f}", + f"{s.median:.2f}", + f"{s.p5:.2f}", + f"{s.p95:.2f}", + ) + self.console.print(table) + else: + self._print( + f" [cyan]Eval: {report.suite_name} ({report.total_count} cases)[/]" + ) + rate = report.pass_rate + color = ( + "green" + if rate >= _PERFECT_PASS_RATE + else "yellow" + if rate >= _PARTIAL_PASS_RATE + else "red" + ) + self._print( + f" [{color}]Passed: {report.passed_count}/{report.total_count} ({rate * 100:.1f}%)[/]" + ) + if report.total_task_tokens > 0 or report.total_task_cost > 0: + self._print( + f" [dim]Task: {_format_usage(report.total_task_input_tokens, report.total_task_output_tokens, report.total_task_cost)}[/]" + ) + if report.total_judge_calls > 0: + judge_parts = [f"{report.total_judge_calls} calls"] + usage = _format_usage( + report.total_judge_input_tokens, + report.total_judge_output_tokens, + report.total_judge_cost, + ) + if usage: + judge_parts.append(usage) + self._print(f" [dim]Judge: {', '.join(judge_parts)}[/]") + def on_session_complete(self, result: SessionResult) -> None: - if self._failed_results or self._error_results: + has_non_eval_failures = any(not r.is_eval for r in self._failed_results) + has_non_eval_errors = any(not r.is_eval for r in self._error_results) + if has_non_eval_failures or has_non_eval_errors: self._print_failure_summary() total = ( diff --git a/protest/reporting/web.py b/protest/reporting/web.py index 2e47b5d..0b6f915 100644 --- a/protest/reporting/web.py +++ b/protest/reporting/web.py @@ -28,14 +28,15 @@ TestStartInfo, TestTeardownInfo, ) + from protest.evals.types import EvalSuiteReport try: - from websockets.asyncio.server import ( # type: ignore[import-not-found] + from websockets.asyncio.server import ( serve as ws_serve, ) - from websockets.datastructures import Headers # type: ignore[import-not-found] - from websockets.http11 import Request, Response # type: ignore[import-not-found] - from websockets.sync.client import ( # type: ignore[import-not-found] + from websockets.datastructures import Headers + from websockets.http11 import Request, Response + from websockets.sync.client import ( connect as ws_connect, ) except ImportError as err: # pragma: no cover @@ -50,6 +51,22 @@ _broadcast_clients: set[Any] = set() +_REPR_LIMIT = 2048 + + +def _safe_repr(value: Any) -> str | None: + """Render an arbitrary value as a JSON-safe string, capped at _REPR_LIMIT.""" + if value is None: + return None + try: + text = str(value) + except Exception as exc: + text = f"" + if len(text) > _REPR_LIMIT: + text = text[:_REPR_LIMIT] + f"... " + return text + + def _format_traceback(error: Exception) -> str: lines = traceback.format_exception(type(error), error, error.__traceback__) return "".join(lines) @@ -245,6 +262,33 @@ def on_fixture_teardown_done(self, info: FixtureInfo) -> None: {"name": info.name, "scope": info.scope, "duration": info.duration}, ) + def on_eval_suite_end(self, report: EvalSuiteReport) -> None: + self._send( + "EVAL_SUITE_END", + { + "suiteName": report.suite_name, + "totalCount": report.total_count, + "passedCount": report.passed_count, + "failedCount": report.failed_count, + "passRate": report.pass_rate, + "duration": report.duration, + "scoreStats": [ + { + "name": s.name, + "mean": s.mean, + "median": s.median, + "p5": s.p5, + "p95": s.p95, + } + for s in report.all_score_stats() + ], + "taskTokens": report.total_task_tokens, + "taskCost": report.total_task_cost, + "judgeTokens": report.total_judge_tokens, + "judgeCost": report.total_judge_cost, + }, + ) + def on_suite_end(self, result: SuiteResult) -> None: self._send( "SUITE_END", @@ -276,4 +320,29 @@ def _result_payload( if include_error and result.error: payload["message"] = str(result.error) payload["traceback"] = _format_traceback(result.error) + if result.is_eval and result.eval_payload: + ep = result.eval_payload + payload["evalPayload"] = { + "caseName": ep.case_name, + "passed": ep.passed, + "inputs": _safe_repr(ep.inputs), + "output": _safe_repr(ep.output), + "expected": _safe_repr(ep.expected_output), + "scores": { + name: { + "value": entry.value, + "passed": entry.passed, + "skipped": entry.skipped, + } + for name, entry in ep.scores.items() + }, + "taskDuration": ep.task_duration, + "taskInputTokens": ep.task_input_tokens, + "taskOutputTokens": ep.task_output_tokens, + "taskCost": ep.task_cost, + "judgeCallCount": ep.judge_call_count, + "judgeInputTokens": ep.judge_input_tokens, + "judgeOutputTokens": ep.judge_output_tokens, + "judgeCost": ep.judge_cost, + } return payload diff --git a/pyproject.toml b/pyproject.toml index 0608d8d..0cb8974 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -93,13 +93,25 @@ ignore = [ ] "protest/cli/**" = [ "T201", # print allowed in CLI - "PLC0415", # lazy imports for fast --help "PLR2004", # magic values for arg parsing ] "protest/core/session.py" = [ - "PLC0415", # lazy import for optional rich dependency "PLR0913", # many args is deliberate API design ] +"protest/core/execution/test_executor.py" = [ + "PLR0915", # _run_test is inherently complex (retry loop + eval capture) +] +"protest/history/**" = [ + "S603", # subprocess git calls are safe + "PLR0913", # load_history has many filter params by design +] +"protest/cli/history.py" = [ + "T201", # print for CLI output +] +"protest/evals/**" = [ + "T201", # print for eval reporting + "PLR0913", # adapter functions have many params by design +] "protest/reporting/ascii.py" = [ "T201", # print is the purpose of this module ] @@ -120,6 +132,9 @@ omit = [ "protest/compat.py", # Version-specific imports, impossible to cover without multi-version CI ] +[tool.mypy] +strict = true + [tool.pytest.ini_options] testpaths = ["tests"] asyncio_mode = "strict" @@ -152,6 +167,7 @@ include = ["protest*"] dev = [ "jsonschema>=4.0.0", "mkdocs-material>=9.7.0", + "mypy>=1.0", "pre-commit>=4.5.0", "pytest>=9.0.1", "pytest-asyncio>=1.3.0", diff --git a/tests/cli/test_run_command.py b/tests/cli/test_run_command.py index a56174d..878bd19 100644 --- a/tests/cli/test_run_command.py +++ b/tests/cli/test_run_command.py @@ -244,3 +244,34 @@ def test_suite_keyword_and_tag(self, run_protest: Callable[..., CLIResult]) -> N result.assert_success() expected_count = 1 assert f"{expected_count}/{expected_count} passed" in result.stdout + + +class TestRunRejectsEvalOnlyFlags: + """`--show-output` is eval-only and must not be accepted by `protest run`. + + The CLI parser is split: `protest run` builds a parser without eval-only + flags, so passing `--show-output` should raise an argparse error rather + than silently no-op (the previous behavior was a UX papercut: the flag + appeared in `protest run --help` but did nothing for non-eval tests). + """ + + def test_run_rejects_show_output( + self, run_protest: Callable[..., CLIResult] + ) -> None: + result = run_protest("run", "simple_session:session", "--show-output") + assert result.exit_code != 0, ( + f"Expected non-zero exit for `protest run --show-output`, " + f"got {result.exit_code}\nstdout: {result.stdout}\nstderr: {result.stderr}" + ) + assert "show-output" in result.stderr, ( + f"Expected argparse error mentioning 'show-output' in stderr, " + f"got: {result.stderr}" + ) + + def test_run_help_omits_show_output( + self, run_protest: Callable[..., CLIResult] + ) -> None: + result = run_protest("run", "--help") + assert "--show-output" not in result.stdout, ( + f"Expected --show-output absent from `protest run --help`:\n{result.stdout}" + ) diff --git a/tests/conftest.py b/tests/conftest.py index 5e14ed2..a40d851 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -13,11 +13,28 @@ TestItem, TestResult, ) +from protest.history import storage as history_storage from protest.plugin import PluginBase from tests.factories.test_items import make_test_item if TYPE_CHECKING: from collections.abc import Callable + from pathlib import Path + + +@pytest.fixture(autouse=True) +def _isolate_protest_history(tmp_path: "Path", monkeypatch: pytest.MonkeyPatch) -> None: + """Redirect `DEFAULT_HISTORY_DIR` to a per-test temp dir. + + Tests that forget to pass `history_dir=tmp_path` would otherwise write + into the repo's real `.protest/history.jsonl`. The monkeypatch targets + the single source of truth (`storage.DEFAULT_HISTORY_DIR`) — all + consumers access it via the module so the override is seen everywhere. + + Tests that pass an explicit `history_dir` still use that value, because + the plugin does `history_dir or storage.DEFAULT_HISTORY_DIR`. + """ + monkeypatch.setattr(history_storage, "DEFAULT_HISTORY_DIR", tmp_path / ".protest") @pytest.fixture diff --git a/tests/core/test_collector.py b/tests/core/test_collector.py index 6b02ad7..9ba8719 100644 --- a/tests/core/test_collector.py +++ b/tests/core/test_collector.py @@ -88,7 +88,7 @@ def test_collect_suite_tests(self) -> None: """Collects tests from suites.""" session = ProTestSession() suite = ProTestSuite("my_suite") - session.include_suite(suite) + session.add_suite(suite) @suite.test() def suite_test() -> None: @@ -107,7 +107,7 @@ def test_collect_mixed_tests(self) -> None: """Collects both standalone and suite tests.""" session = ProTestSession() suite = ProTestSuite("my_suite") - session.include_suite(suite) + session.add_suite(suite) @session.test() def standalone_test() -> None: @@ -129,7 +129,7 @@ def test_collect_generates_correct_node_ids(self) -> None: """Collected items have correct node_ids.""" session = ProTestSession() suite = ProTestSuite("MySuite") - session.include_suite(suite) + session.add_suite(suite) @session.test() def standalone() -> None: diff --git a/tests/core/test_parametrize.py b/tests/core/test_parametrize.py index ec567db..df8a9ac 100644 --- a/tests/core/test_parametrize.py +++ b/tests/core/test_parametrize.py @@ -190,7 +190,7 @@ def test_triple( def test_structured_data_for_reporters(self) -> None: session = ProTestSession() suite = ProTestSuite("API") - session.include_suite(suite) + session.add_suite(suite) users = ForEach(["alice"], ids=lambda u: u) diff --git a/tests/core/test_skip.py b/tests/core/test_skip.py index 437e47d..71cddb1 100644 --- a/tests/core/test_skip.py +++ b/tests/core/test_skip.py @@ -54,7 +54,7 @@ def test_normal() -> None: def test_suite_skip_decorator(self) -> None: session = ProTestSession() suite = ProTestSuite("test") - session.include_suite(suite) + session.add_suite(suite) @suite.test(skip="Suite test skipped") def test_skipped() -> None: diff --git a/tests/core/test_skipif.py b/tests/core/test_skipif.py index 65fe632..4e24388 100644 --- a/tests/core/test_skipif.py +++ b/tests/core/test_skipif.py @@ -74,7 +74,7 @@ def test_skipped() -> None: def test_suite_skip_with_callable(self) -> None: session = ProTestSession() suite = ProTestSuite("test") - session.include_suite(suite) + session.add_suite(suite) @suite.test(skip=lambda: True, skip_reason="Suite conditional skip") def test_skipped() -> None: diff --git a/tests/core/test_xfail.py b/tests/core/test_xfail.py index 8451e23..4cf1d0a 100644 --- a/tests/core/test_xfail.py +++ b/tests/core/test_xfail.py @@ -57,7 +57,7 @@ def test_normal() -> None: def test_suite_xfail_decorator(self) -> None: session = ProTestSession() suite = ProTestSuite("test") - session.include_suite(suite) + session.add_suite(suite) @suite.test(xfail="Suite test xfailed") def test_xfailed() -> None: diff --git a/tests/evals/test_duration_precision.py b/tests/evals/test_duration_precision.py new file mode 100644 index 0000000..fdd47bf --- /dev/null +++ b/tests/evals/test_duration_precision.py @@ -0,0 +1,75 @@ +"""Tests for C3 — sub-millisecond duration handling. + +The eval pipeline persists `EvalPayload.task_duration` (SUT-only timing). +For deterministic stubs / fast classifiers, that value is sub-millisecond +and the previous serializer (`round(_, 3)`) collapsed everything to `0.0`, +making run-over-run comparisons useless. The markdown renderer had the +matching bug — it printed `0ms` for any sub-ms task. +""" + +from __future__ import annotations + +from protest.evals.results_writer import _format_case_duration, _render_case +from protest.evals.types import EvalCaseResult +from protest.history.plugin import _serialize_eval_case + + +def _make_case(duration: float) -> EvalCaseResult: + return EvalCaseResult( + case_name="case", + node_id="suite::case", + scores=(), + duration=duration, + passed=True, + inputs="in", + output="out", + expected_output=None, + case_hash="h", + eval_hash="e", + is_error=False, + ) + + +class TestSerializerPrecision: + """`_serialize_eval_case` keeps 5-decimal precision (10 µs).""" + + def test_sub_millisecond_is_not_collapsed_to_zero(self) -> None: + case = _make_case(2.07e-05) # 20.7 µs + entry = _serialize_eval_case(case) + # Previously: 0.0 (round to 3 decimals) + # Now: 2e-05 (round to 5 decimals — 10 µs precision) + assert entry["duration"] > 0 + assert entry["duration"] == 2e-05 + + def test_distinct_sub_ms_values_remain_distinguishable(self) -> None: + e1 = _serialize_eval_case(_make_case(1.0e-05)) # 10 µs + e2 = _serialize_eval_case(_make_case(5.0e-05)) # 50 µs + assert e1["duration"] != e2["duration"] + + def test_millisecond_values_unchanged(self) -> None: + # >1ms: 5-decimal rounding produces the same numbers as 3-decimal. + entry = _serialize_eval_case(_make_case(0.123)) + assert entry["duration"] == 0.123 + + +class TestMarkdownDurationFormat: + """`_format_case_duration` adapts unit to magnitude.""" + + def test_microseconds_for_sub_millisecond(self) -> None: + assert _format_case_duration(2.07e-05) == "21µs" + + def test_two_decimals_in_low_milliseconds(self) -> None: + # 2.5 ms — keep one fractional digit so 1ms vs 2ms is visible. + assert _format_case_duration(0.0025) == "2.50ms" + + def test_integer_milliseconds_in_mid_range(self) -> None: + assert _format_case_duration(0.135) == "135ms" + + def test_seconds_for_one_or_more(self) -> None: + assert _format_case_duration(2.5) == "2.50s" + + def test_renders_microseconds_in_case_header(self) -> None: + case = _make_case(2.07e-05) + rendered = _render_case(case) + # Header contains the duration; previously read "(0ms)". + assert "21µs" in rendered.splitlines()[0] diff --git a/tests/evals/test_e2e.py b/tests/evals/test_e2e.py new file mode 100644 index 0000000..75def3c --- /dev/null +++ b/tests/evals/test_e2e.py @@ -0,0 +1,1229 @@ +"""End-to-end tests for ProTest evals integration. + +These tests define the PUBLIC API contract. They test what the user sees: +- Session setup (ProTestSession, EvalSuite + @suite.eval with ForEach/From) +- CLI behavior (protest run vs protest eval) +- Output format (scores table, trends, failure messages) +- History (JSONL format, stats, significance, clean-dirty) +- Built-in evaluators + +Implementation can change freely as long as these tests pass. +""" + +from __future__ import annotations + +import json +import subprocess +from dataclasses import dataclass +from pathlib import Path # noqa: TC003 — used at runtime (pytest tmp_path) +from typing import Annotated, Any + +from protest import ForEach, From, ProTestSession, Use, fixture +from protest.api import run_session +from protest.core.collector import Collector +from protest.core.runner import TestRunner +from protest.core.suite import ProTestSuite +from protest.entities import SuiteKind +from protest.evals import ( + EvalCase, + EvalContext, + Metric, + ModelLabel, + ShortCircuit, + Verdict, + evaluator, +) +from protest.evals.evaluators import ( + contains_expected, + contains_keywords, + does_not_contain, + json_valid, + matches_regex, + max_length, + min_length, + not_empty, + word_overlap, +) +from protest.evals.hashing import compute_case_hash, compute_eval_hash +from protest.evals.results_writer import EvalResultsWriter +from protest.evals.suite import EvalSuite +from protest.evals.types import EvalSuiteReport # noqa: TC001 — used at runtime +from protest.filters.kind import KindFilterPlugin +from protest.history.storage import append_entry, clean_dirty +from protest.plugin import PluginBase, PluginContext + +# --------------------------------------------------------------------------- +# Fixtures: deterministic evaluators + task +# --------------------------------------------------------------------------- + + +@dataclass(frozen=True, slots=True) +class FakeAccuracyResult: + """Structured result for fake accuracy evaluator.""" + + accuracy: Annotated[float, Metric] + matches_expected: Annotated[bool, Verdict] + + +@evaluator +def fake_accuracy(ctx: EvalContext) -> FakeAccuracyResult: + if ctx.expected_output and ctx.expected_output.lower() in ctx.output.lower(): + return FakeAccuracyResult(accuracy=1.0, matches_expected=True) + return FakeAccuracyResult(accuracy=0.0, matches_expected=False) + + +@evaluator +async def async_fake_accuracy(ctx: EvalContext) -> FakeAccuracyResult: + """Async evaluator — simulates LLMJudge which calls an async LLM API.""" + # Simulate async I/O (e.g. LLM call) without actually blocking + if ctx.expected_output and ctx.expected_output.lower() in ctx.output.lower(): + return FakeAccuracyResult(accuracy=1.0, matches_expected=True) + return FakeAccuracyResult(accuracy=0.0, matches_expected=False) + + +def echo_task(text: str) -> str: + return f"Echo: {text}" + + +async def async_echo_task(text: str) -> str: + return f"Async: {text}" + + +basic_cases = ForEach( + [ + EvalCase(inputs="hello world", expected="hello", name="case_pass"), + EvalCase(inputs="xyz", expected="notfound", name="case_fail"), + ], + ids=lambda c: c.name, +) + + +# --------------------------------------------------------------------------- +# Session setup +# --------------------------------------------------------------------------- + + +class TestEvalSetup: + """Eval setup: ProTestSession + EvalSuite with model=, @suite.eval.""" + + def test_add_eval_creates_eval_kind(self) -> None: + session = ProTestSession() + + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[EvalCase, From(basic_cases)]) -> str: + return echo_task(case.inputs) + + # The session should have a suite with kind=eval + assert len(session._suites) > 0 + assert any(s.kind == "eval" for s in session._suites) + + def test_model_set_via_suite(self) -> None: + suite = EvalSuite("eval_echo", model=ModelLabel(name="test-model")) + assert suite._model is not None + assert suite._model.name == "test-model" + + def test_metadata_on_constructor(self) -> None: + session = ProTestSession(metadata={"env": "test"}) + assert session.metadata["env"] == "test" + + def test_eval_with_bool_verdict(self) -> None: + """Evaluator with bool field: case_fail has matches_expected=False -> fail.""" + session = ProTestSession() + + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[EvalCase, From(basic_cases)]) -> str: + return echo_task(case.inputs) + + runner = TestRunner(session) + result = runner.run() + # case_pass returns matches_expected=True -> pass + # case_fail returns matches_expected=False -> fail + assert result.success is False + + def test_async_task_works(self) -> None: + session = ProTestSession() + + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[fake_accuracy]) + async def eval_echo(case: Annotated[EvalCase, From(basic_cases)]) -> str: + return await async_echo_task(case.inputs) + + runner = TestRunner(session) + runner.run() + + def test_async_evaluator_does_not_crash(self) -> None: + """Regression: async evaluator called via evaluate_sync raised 'event loop already running'.""" + single_case = ForEach( + [ + EvalCase(inputs="hello world", expected="hello", name="c1"), + ], + ids=lambda c: c.name, + ) + + session = ProTestSession() + + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[async_fake_accuracy]) + def eval_echo(case: Annotated[EvalCase, From(single_case)]) -> str: + return echo_task(case.inputs) + + runner = TestRunner(session) + result = runner.run() + assert result.success is True + + +# --------------------------------------------------------------------------- +# Kind filtering (protest run vs protest eval) +# --------------------------------------------------------------------------- + + +class TestKindFiltering: + """Suites have kind, filtering works.""" + + def test_test_suite_has_kind_test(self) -> None: + suite = ProTestSuite("my_tests") + assert suite.kind == "test" + + def test_eval_suite_has_kind_eval(self) -> None: + session = ProTestSession() + + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[EvalCase, From(basic_cases)]) -> str: + return echo_task(case.inputs) + + assert any(s.kind == "eval" for s in session._suites) + + def test_kind_filter_keeps_only_matching(self) -> None: + test_suite = ProTestSuite("tests") + eval_suite = ProTestSuite("evals", kind=SuiteKind.EVAL) + + session = ProTestSession() + + @test_suite.test() + def test_one() -> None: + pass + + @eval_suite.test(is_eval=True) + def eval_one() -> None: + pass + + session.add_suite(test_suite) + session.add_suite(eval_suite) + + items = Collector().collect(session) + assert len(items) == 2 + + # Filter to eval only + plugin = KindFilterPlugin(kind=SuiteKind.EVAL) + filtered = plugin.on_collection_finish(items) + assert len(filtered) == 1 + assert filtered[0].suite.kind == "eval" + + def test_unified_session_runs_tests_only(self) -> None: + """protest run behavior: only kind=test suites.""" + session = ProTestSession() + + test_suite = ProTestSuite("unit") + results: list[str] = [] + + @test_suite.test() + def test_a() -> None: + results.append("test") + + session.add_suite(test_suite) + + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[EvalCase, From(basic_cases)]) -> str: + return echo_task(case.inputs) + + ctx = PluginContext(args={"kind_filter": "test"}) + run_session(session, ctx=ctx) + + assert "test" in results + + def test_unified_session_runs_evals_only(self) -> None: + """protest eval behavior: only kind=eval suites.""" + session = ProTestSession() + + test_suite = ProTestSuite("unit") + test_ran = [] + + @test_suite.test() + def test_a() -> None: + test_ran.append(True) + + session.add_suite(test_suite) + + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[EvalCase, From(basic_cases)]) -> str: + return echo_task(case.inputs) + + ctx = PluginContext(args={"kind_filter": "eval"}) + run_session(session, ctx=ctx) + + assert len(test_ran) == 0 # test suite was filtered out + + +# --------------------------------------------------------------------------- +# Output format +# --------------------------------------------------------------------------- + + +class TestEvalOutput: + """What the user sees in the terminal. + + These tests verify output by reading the EvalPlugin report directly, + since ProTest captures stdout during test runs. + """ + + def test_report_contains_score_stats(self) -> None: + reports: list[EvalSuiteReport] = [] + + class ReportCapture(PluginBase): + name = "report-capture" + description = "Captures eval reports" + + def on_eval_suite_end(self, report: Any) -> None: + reports.append(report) + + session = ProTestSession() + session.register_plugin(ReportCapture()) + + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[EvalCase, From(basic_cases)]) -> str: + return echo_task(case.inputs) + + runner = TestRunner(session) + runner.run() + + assert len(reports) == 1 + stats = reports[0].all_score_stats() + assert len(stats) > 0 + assert any(s.name == "accuracy" for s in stats) + + def test_report_has_pass_count(self) -> None: + reports: list[EvalSuiteReport] = [] + + class ReportCapture(PluginBase): + name = "report-capture" + description = "Captures eval reports" + + def on_eval_suite_end(self, report: Any) -> None: + reports.append(report) + + session = ProTestSession() + session.register_plugin(ReportCapture()) + + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[EvalCase, From(basic_cases)]) -> str: + return echo_task(case.inputs) + + runner = TestRunner(session) + runner.run() + + assert len(reports) == 1 + assert reports[0].total_count == 2 + + def test_failed_eval_has_error_with_score_details(self) -> None: + """When an eval case fails, the error message includes score details.""" + errors: list[Any] = [] + + class ErrorCollector(PluginBase): + name = "error-collector" + + def on_test_fail(self, result: Any) -> None: + if result.error: + errors.append(str(result.error)) + + session = ProTestSession() + session.register_plugin(ErrorCollector()) + + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[EvalCase, From(basic_cases)]) -> str: + return echo_task(case.inputs) + + run_session(session) + + # case_fail has matches_expected=False + assert any("matches_expected=" in e for e in errors) + + +# --------------------------------------------------------------------------- +# EvalPayload flow +# --------------------------------------------------------------------------- + + +class TestEvalPayloadFlow: + """EvalPayload flows through the framework correctly.""" + + def test_test_result_has_eval_payload(self) -> None: + collected: list[Any] = [] + + class Collector(PluginBase): + name = "collector" + + def on_test_pass(self, result: Any) -> None: + collected.append(result) + + def on_test_fail(self, result: Any) -> None: + collected.append(result) + + session = ProTestSession() + session.register_plugin(Collector()) + + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[EvalCase, From(basic_cases)]) -> str: + return echo_task(case.inputs) + + runner = TestRunner(session) + runner.run() + + assert len(collected) == 2 + for result in collected: + assert result.is_eval is True + assert result.eval_payload is not None + assert result.eval_payload.case_name in ("case_pass", "case_fail") + assert "accuracy" in result.eval_payload.scores + assert "matches_expected" in result.eval_payload.scores + + def test_lifecycle_events_have_case_id_in_node_id(self) -> None: + """setup_done/teardown_start events carry node_id with [case_id].""" + setup_ids: list[str] = [] + teardown_ids: list[str] = [] + + class LifecycleCollector(PluginBase): + name = "lifecycle-collector" + + def on_test_setup_done(self, info: Any) -> None: + setup_ids.append(info.node_id) + + def on_test_teardown_start(self, info: Any) -> None: + teardown_ids.append(info.node_id) + + session = ProTestSession() + session.register_plugin(LifecycleCollector()) + + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[EvalCase, From(basic_cases)]) -> str: + return echo_task(case.inputs) + + runner = TestRunner(session) + runner.run() + + assert len(setup_ids) == 2 + for node_id in setup_ids: + assert "[" in node_id, f"node_id missing case id: {node_id}" + for node_id in teardown_ids: + assert "[" in node_id, f"node_id missing case id: {node_id}" + + def test_evaluator_exception_is_error_not_fail(self) -> None: + """An evaluator that raises is treated as error (infra), not test fail.""" + results: list[Any] = [] + + class Collector(PluginBase): + name = "collector" + + def on_test_fail(self, result: Any) -> None: + results.append(result) + + @evaluator + def crashing_evaluator(ctx: EvalContext) -> bool: + raise RuntimeError("LLM judge timeout") + + single_case = ForEach( + [ + EvalCase(inputs="hello", expected="hello", name="c1"), + ], + ids=lambda c: c.name, + ) + + session = ProTestSession() + session.register_plugin(Collector()) + + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[crashing_evaluator]) + def eval_echo(case: Annotated[EvalCase, From(single_case)]) -> str: + return echo_task(case.inputs) + + runner = TestRunner(session) + runner.run() + + assert len(results) == 1 + assert results[0].is_fixture_error is True + assert "LLM judge timeout" in str(results[0].error) + + def test_non_eval_test_has_no_payload(self) -> None: + collected: list[Any] = [] + + class Collector(PluginBase): + name = "collector" + + def on_test_pass(self, result: Any) -> None: + collected.append(result) + + session = ProTestSession() + session.register_plugin(Collector()) + + @session.test() + def regular_test() -> None: + assert True + + runner = TestRunner(session) + runner.run() + + assert len(collected) == 1 + assert collected[0].is_eval is False + assert collected[0].eval_payload is None + + +# --------------------------------------------------------------------------- +# History +# --------------------------------------------------------------------------- + + +class TestHistory: + """JSONL history format and querying.""" + + def _run_eval(self, tmp_path: Path) -> None: + session = ProTestSession(history_dir=tmp_path) + + eval_echo_suite = EvalSuite("eval_echo", model=ModelLabel(name="test-model")) + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[EvalCase, From(basic_cases)]) -> str: + return echo_task(case.inputs) + + run_session(session) + + def test_history_file_created(self, tmp_path: Path) -> None: + self._run_eval(tmp_path) + assert (tmp_path / "history.jsonl").exists() + + def test_history_entry_format(self, tmp_path: Path) -> None: + self._run_eval(tmp_path) + lines = (tmp_path / "history.jsonl").read_text().strip().splitlines() + entry = json.loads(lines[0]) + + # Required top-level keys + assert "run_id" in entry + assert "timestamp" in entry + assert "git" in entry + assert "environment" in entry + assert "metadata" in entry + assert "evals" in entry + assert "suites" in entry + + # Evals block + assert entry["evals"] is not None + assert entry["evals"]["model"] == "test-model" + + # Suites with kind + suites = entry["suites"] + assert len(suites) == 1 + suite_name = next(iter(suites)) + suite = suites[suite_name] + assert suite["kind"] == "eval" + assert "total_cases" in suite + assert "passed" in suite + assert "cases" in suite + + def test_history_test_run_has_null_evals(self, tmp_path: Path) -> None: + session = ProTestSession(history=True, history_dir=tmp_path) + + @session.test() + def test_simple() -> None: + pass + + run_session(session) + + lines = (tmp_path / "history.jsonl").read_text().strip().splitlines() + entry = json.loads(lines[0]) + assert entry["evals"] is None + + def test_history_multiple_runs_append(self, tmp_path: Path) -> None: + self._run_eval(tmp_path) + self._run_eval(tmp_path) + lines = (tmp_path / "history.jsonl").read_text().strip().splitlines() + assert len(lines) == 2 + + def test_history_metadata_included(self, tmp_path: Path) -> None: + session = ProTestSession( + history_dir=tmp_path, + metadata={"env": "test", "version": "1.0"}, + ) + + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[EvalCase, From(basic_cases)]) -> str: + return echo_task(case.inputs) + + run_session(session) + + lines = (tmp_path / "history.jsonl").read_text().strip().splitlines() + entry = json.loads(lines[0]) + assert entry["metadata"]["env"] == "test" + + +# --------------------------------------------------------------------------- +# History: clean-dirty +# --------------------------------------------------------------------------- + + +class TestCleanDirty: + """protest history --clean-dirty behavior.""" + + def test_clean_dirty_removes_current_head_only(self, tmp_path: Path) -> None: + # Entry with current HEAD + dirty + try: + current_commit = subprocess.run( + ["git", "rev-parse", "HEAD"], # noqa: S607 + capture_output=True, + text=True, + timeout=5, + check=True, + ).stdout.strip() + except (FileNotFoundError, subprocess.CalledProcessError): + return # skip if not in a git repo + + path = tmp_path / "history.jsonl" + + # Dirty entry on current HEAD -> should be removed + append_entry( + path, {"git": {"commit": current_commit, "dirty": True}, "suites": {}} + ) + # Dirty entry on old commit -> should be preserved + append_entry(path, {"git": {"commit": "old123", "dirty": True}, "suites": {}}) + # Clean entry on current HEAD -> should be preserved + append_entry( + path, {"git": {"commit": current_commit, "dirty": False}, "suites": {}} + ) + + removed = clean_dirty(history_dir=tmp_path) + assert removed == 1 + + lines = path.read_text().strip().splitlines() + assert len(lines) == 2 + + +# --------------------------------------------------------------------------- +# Case hashing +# --------------------------------------------------------------------------- + + +class TestCaseHashing: + """Content hashing for eval integrity.""" + + def test_case_hash_stored_in_history(self, tmp_path: Path) -> None: + """History entries include case_hash and eval_hash per case.""" + session = ProTestSession(history_dir=tmp_path) + + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[EvalCase, From(basic_cases)]) -> str: + return echo_task(case.inputs) + + run_session(session) + + lines = (tmp_path / "history.jsonl").read_text().strip().splitlines() + entry = json.loads(lines[0]) + suites = entry["suites"] + suite = next(iter(suites.values())) + case = next(iter(suite["cases"].values())) + assert "case_hash" in case + assert "eval_hash" in case + assert len(case["case_hash"]) > 0 + assert len(case["eval_hash"]) > 0 + + def test_case_hash_changes_on_input_change(self) -> None: + """Different inputs -> different case_hash.""" + h1 = compute_case_hash("hello world", "expected") + h2 = compute_case_hash("hello world modified", "expected") + assert h1 != h2 + + def test_case_hash_stable_for_same_input(self) -> None: + """Same inputs -> same case_hash (deterministic).""" + h1 = compute_case_hash("hello world", "expected") + h2 = compute_case_hash("hello world", "expected") + assert h1 == h2 + + def test_eval_hash_changes_on_evaluator_change(self) -> None: + """Different evaluators -> different eval_hash.""" + e1 = contains_keywords(keywords=["hello"]) + e2 = contains_keywords(keywords=["hello", "world"]) + h1 = compute_eval_hash([e1]) + h2 = compute_eval_hash([e2]) + assert h1 != h2 + + +# --------------------------------------------------------------------------- +# Built-in evaluators +# --------------------------------------------------------------------------- + + +class TestBuiltinEvaluators: + """All built-in evaluators work correctly through protest-native API.""" + + def _make_ctx(self, output: str, expected: str | None = None) -> EvalContext: + """Minimal EvalContext for evaluator testing.""" + return EvalContext( + name="test", + inputs="", + output=output, + expected_output=expected, + metadata=None, + duration=0.0, + ) + + def test_contains_keywords(self) -> None: + e = contains_keywords(keywords=["hello", "world"]) + result = e.run(self._make_ctx("Hello World")) + assert result.keyword_recall == 1.0 + assert result.all_keywords_present is True + + def test_contains_keywords_default_requires_all(self) -> None: + """Default `min_recall=1.0` means strict: missing one → verdict False.""" + e = contains_keywords(keywords=["hello", "world"]) + result = e.run(self._make_ctx("Only hello here")) + assert result.keyword_recall == 0.5 + assert result.all_keywords_present is False + + def test_contains_keywords_threshold_continuity_at_zero(self) -> None: + """Regression: `min_recall=0.0` must always pass (no discontinuity at 0). + + Earlier behavior: `min_recall=0.0` flipped to strict mode (all required), + while `min_recall=0.0001` was permissive — surprising at the boundary. + Now `recall >= min_recall` applies uniformly. + """ + e = contains_keywords(keywords=["alpha", "beta"], min_recall=0.0) + result = e.run(self._make_ctx("nothing matches")) + assert result.keyword_recall == 0.0 + assert result.all_keywords_present is True + + def test_contains_keywords_threshold_at_exact_value(self) -> None: + """Verdict passes when recall equals the threshold exactly.""" + e = contains_keywords(keywords=["alpha", "beta"], min_recall=0.5) + result = e.run(self._make_ctx("only alpha here")) + assert result.keyword_recall == 0.5 + assert result.all_keywords_present is True + + def test_contains_keywords_threshold_just_below(self) -> None: + """Verdict fails when recall is below the threshold.""" + e = contains_keywords(keywords=["alpha", "beta", "gamma"], min_recall=0.5) + result = e.run(self._make_ctx("only alpha")) + assert abs(result.keyword_recall - 1 / 3) < 1e-9 + assert result.all_keywords_present is False + + def test_contains_expected(self) -> None: + e = contains_expected + assert e.run(self._make_ctx("Hello World", "world")) is True + assert e.run(self._make_ctx("Hello", "world")) is False + + def test_does_not_contain(self) -> None: + e = does_not_contain(forbidden=["cat", "dog"]) + assert e.run(self._make_ctx("Yorkshire")).no_forbidden_words is True + assert e.run(self._make_ctx("I like cats")).no_forbidden_words is False + + def test_not_empty(self) -> None: + assert not_empty.run(self._make_ctx("hello")) is True + assert not_empty.run(self._make_ctx("")) is False + assert not_empty.run(self._make_ctx(" ")) is False + + def test_not_empty_handles_sized_containers(self) -> None: + """Sized containers: empty -> False, non-empty -> True. + + Earlier behavior fell through to `return True` for any non-string, + so `not_empty([])` reported True — misleading for tasks that return + lists/dicts (e.g. tool calls, retrieved chunks). + """ + # Helper accepts Any at runtime; type hint is just a default. + ctx_empty_list: Any = self._make_ctx("") + ctx_empty_list.output = [] + assert not_empty.run(ctx_empty_list) is False + + ctx_nonempty_list: Any = self._make_ctx("") + ctx_nonempty_list.output = [1, 2] + assert not_empty.run(ctx_nonempty_list) is True + + ctx_empty_dict: Any = self._make_ctx("") + ctx_empty_dict.output = {} + assert not_empty.run(ctx_empty_dict) is False + + ctx_nonempty_dict: Any = self._make_ctx("") + ctx_nonempty_dict.output = {"a": 1} + assert not_empty.run(ctx_nonempty_dict) is True + + ctx_empty_set: Any = self._make_ctx("") + ctx_empty_set.output = set() + assert not_empty.run(ctx_empty_set) is False + + def test_not_empty_unsized_objects_still_pass(self) -> None: + """Non-Sized values (int, float, dataclass): always True (kept as-is).""" + ctx_int: Any = self._make_ctx("") + ctx_int.output = 42 + assert not_empty.run(ctx_int) is True + + ctx_zero: Any = self._make_ctx("") + ctx_zero.output = 0 # 0 is not None, not Sized — still passes. + assert not_empty.run(ctx_zero) is True + + def test_max_length(self) -> None: + e = max_length(max_chars=5) + result = e.run(self._make_ctx("hi")) + assert result.within_limit is True + result = e.run(self._make_ctx("this is too long")) + assert result.within_limit is False + + def test_min_length(self) -> None: + assert min_length(min_chars=3).run(self._make_ctx("hello")) is True + assert min_length(min_chars=10).run(self._make_ctx("hi")) is False + + def test_matches_regex(self) -> None: + e = matches_regex(pattern=r"\d{3}-\d{4}") + assert e.run(self._make_ctx("Call 555-1234")) is True + assert e.run(self._make_ctx("no numbers")) is False + + def test_json_valid(self) -> None: + e = json_valid(required_keys=["name"]) + result = e.run(self._make_ctx('{"name": "Rex"}')) + assert result.valid_json is True + assert result.has_required_keys is True + result = e.run(self._make_ctx("not json")) + assert result.valid_json is False + + def test_word_overlap(self) -> None: + e = word_overlap + assert e.run(self._make_ctx("hello world", "hello world")).overlap == 1.0 + assert e.run(self._make_ctx("hello there", "hello world")).overlap == 0.5 + assert e.run(self._make_ctx("foo", "hello world")).overlap == 0.0 + + +# --------------------------------------------------------------------------- +# Scoring v2: bool verdict, tracking-only metrics +# --------------------------------------------------------------------------- + + +class TestScoringV2: + """Scoring v2: evaluators return bool or dataclass.""" + + def test_bool_evaluator_pass(self) -> None: + """Evaluator returning True -> case passes.""" + results: list[Any] = [] + + class Collector(PluginBase): + name = "collector" + + def on_test_pass(self, result: Any) -> None: + results.append(result) + + def on_test_fail(self, result: Any) -> None: + results.append(result) + + single_case = ForEach( + [ + EvalCase(inputs="hello world", expected="hello", name="c1"), + ], + ids=lambda c: c.name, + ) + + session = ProTestSession() + session.register_plugin(Collector()) + + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[not_empty]) + def eval_echo(case: Annotated[EvalCase, From(single_case)]) -> str: + return echo_task(case.inputs) + + runner = TestRunner(session) + result = runner.run() + + assert result.success is True + assert len(results) == 1 + assert results[0].eval_payload.scores["not_empty"].value is True + + def test_dataclass_without_bool_is_tracking_only(self) -> None: + """Dataclass with only float fields -> tracking-only, always passes.""" + results: list[Any] = [] + + class Collector(PluginBase): + name = "collector" + + def on_test_pass(self, result: Any) -> None: + results.append(result) + + def on_test_fail(self, result: Any) -> None: + results.append(result) + + single_case = ForEach( + [ + EvalCase(inputs="foo", expected="bar baz", name="c1"), + ], + ids=lambda c: c.name, + ) + + session = ProTestSession() + session.register_plugin(Collector()) + + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[word_overlap]) + def eval_echo(case: Annotated[EvalCase, From(single_case)]) -> str: + return echo_task(case.inputs) + + runner = TestRunner(session) + result = runner.run() + + # word_overlap returns only float -> tracking-only, always passes + assert result.success is True + + def test_float_return_raises_type_error(self) -> None: + """Evaluator returning naked float -> TypeError (caught as fixture error).""" + results: list[Any] = [] + + class Collector(PluginBase): + name = "collector" + + def on_test_fail(self, result: Any) -> None: + results.append(result) + + @evaluator + def bad_evaluator(ctx: EvalContext) -> float: + return 0.5 + + single_case = ForEach( + [EvalCase(inputs="hello", expected="hello", name="c1")], + ids=lambda c: c.name, + ) + + session = ProTestSession() + session.register_plugin(Collector()) + + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[bad_evaluator]) + def eval_echo(case: Annotated[EvalCase, From(single_case)]) -> str: + return echo_task(case.inputs) + + runner = TestRunner(session) + runner.run() + + assert len(results) == 1 + assert results[0].is_fixture_error is True + + +class TestShortCircuit: + """ShortCircuit: skip expensive evaluators when cheap ones fail.""" + + def test_short_circuit_skips_on_fail(self) -> None: + call_log: list[str] = [] + + @evaluator + def cheap(ctx: EvalContext) -> bool: + call_log.append("cheap") + return "hello" in ctx.output.lower() + + @evaluator + def expensive(ctx: EvalContext) -> bool: + call_log.append("expensive") + return True + + session = ProTestSession() + + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[ShortCircuit([cheap, expensive])]) + def eval_echo(case: Annotated[EvalCase, From(basic_cases)]) -> str: + return echo_task(case.inputs) + + runner = TestRunner(session) + runner.run() + + # case_pass: cheap ✓ → expensive ✓ (both called) + # case_fail: cheap ✗ → expensive SKIPPED + assert call_log.count("cheap") == 2 + assert call_log.count("expensive") == 1 + + def test_short_circuit_all_pass(self) -> None: + call_log: list[str] = [] + + @evaluator + def check_a(ctx: EvalContext) -> bool: + call_log.append("a") + return True + + @evaluator + def check_b(ctx: EvalContext) -> bool: + call_log.append("b") + return True + + single = ForEach( + [EvalCase(inputs="x", expected="x", name="c1")], ids=lambda c: c.name + ) + session = ProTestSession() + + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[ShortCircuit([check_a, check_b])]) + def eval_echo(case: Annotated[EvalCase, From(single)]) -> str: + return echo_task(case.inputs) + + runner = TestRunner(session) + result = runner.run() + + assert result.success is True + assert call_log == ["a", "b"] + + +# --------------------------------------------------------------------------- +# Results files per run +# --------------------------------------------------------------------------- + + +class TestResultsFiles: + """Per-case markdown files written to .protest/results/_/.""" + + def _run_eval(self, tmp_path: Path) -> Path: + results_dir = tmp_path / "results" + session = ProTestSession() + writer = EvalResultsWriter(history_dir=tmp_path) + session.register_plugin(writer) + + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[EvalCase, From(basic_cases)]) -> str: + return echo_task(case.inputs) + + runner = TestRunner(session) + runner.run() + return results_dir + + def test_results_dir_created(self, tmp_path: Path) -> None: + results_dir = self._run_eval(tmp_path) + assert results_dir.exists() + + def test_one_file_per_case(self, tmp_path: Path) -> None: + results_dir = self._run_eval(tmp_path) + run_dirs = list(results_dir.iterdir()) + assert len(run_dirs) == 1 + case_files = list(run_dirs[0].iterdir()) + assert len(case_files) == 2 # case_pass + case_fail + + def test_case_file_contains_output(self, tmp_path: Path) -> None: + results_dir = self._run_eval(tmp_path) + run_dir = next(results_dir.iterdir()) + pass_file = next(f for f in run_dir.iterdir() if "pass" in f.name) + content = pass_file.read_text() + assert "Echo:" in content # task output + assert "PASS" in content + + def test_case_file_contains_scores(self, tmp_path: Path) -> None: + results_dir = self._run_eval(tmp_path) + run_dir = next(results_dir.iterdir()) + pass_file = next(f for f in run_dir.iterdir() if "pass" in f.name) + content = pass_file.read_text() + assert "accuracy" in content + + def test_case_file_contains_inputs(self, tmp_path: Path) -> None: + results_dir = self._run_eval(tmp_path) + run_dir = next(results_dir.iterdir()) + pass_file = next(f for f in run_dir.iterdir() if "pass" in f.name) + content = pass_file.read_text() + assert "hello world" in content # from case inputs + + +# --------------------------------------------------------------------------- +# Multi-dataset history (regression: all suites were merged under one name) +# --------------------------------------------------------------------------- + + +class TestMultiDatasetHistory: + """Multiple EvalSuite + @suite.eval calls produce distinct suites in history.""" + + def _run_multi(self, tmp_path: Path) -> dict[str, Any]: + pipeline_cases = ForEach( + [ + EvalCase(inputs="hello", expected="hello", name="c1"), + ], + ids=lambda c: c.name, + ) + + ingest_cases = ForEach( + [ + EvalCase(inputs="world", expected="world", name="c2"), + ], + ids=lambda c: c.name, + ) + + session = ProTestSession(history_dir=tmp_path) + + pipeline_suite = EvalSuite("pipeline") + session.add_suite(pipeline_suite) + + @pipeline_suite.eval(evaluators=[fake_accuracy]) + def pipeline(case: Annotated[EvalCase, From(pipeline_cases)]) -> str: + return echo_task(case.inputs) + + ingest_suite = EvalSuite("ingest") + session.add_suite(ingest_suite) + + @ingest_suite.eval(evaluators=[fake_accuracy]) + def ingest(case: Annotated[EvalCase, From(ingest_cases)]) -> str: + return echo_task(case.inputs) + + run_session(session) + + history = (tmp_path / "history.jsonl").read_text().splitlines() + return json.loads(history[-1]) + + def test_two_datasets_produce_two_suites_in_history(self, tmp_path: Path) -> None: + entry = self._run_multi(tmp_path) + assert "pipeline" in entry["suites"] + assert "ingest" in entry["suites"] + + def test_each_suite_has_its_own_cases(self, tmp_path: Path) -> None: + entry = self._run_multi(tmp_path) + assert "c1" in entry["suites"]["pipeline"]["cases"] + assert "c2" in entry["suites"]["ingest"]["cases"] + + +# --------------------------------------------------------------------------- +# DI fixture injection dans les taches eval +# --------------------------------------------------------------------------- + + +class TestEvalTaskFixtures: + """EvalSuite + @suite.eval() peut utiliser des fixtures protest via Use().""" + + def test_task_without_fixtures_still_works(self) -> None: + # basic_cases has one match (case_pass) and one mismatch (case_fail) + # fake_accuracy returns matches_expected=False for case_fail -> fail + session = ProTestSession() + + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[EvalCase, From(basic_cases)]) -> str: + return echo_task(case.inputs) + + runner = TestRunner(session) + result = runner.run() + assert result.success is False # case_fail has matches_expected=False + + def test_task_with_session_fixture_is_injected(self) -> None: + """Une fixture session-scoped est injectee dans task via Use().""" + + @fixture() + def prefix_service() -> str: + return "PREFIX" + + single_case = ForEach( + [ + EvalCase(inputs="hello", expected="PREFIX:hello", name="c1"), + ], + ids=lambda c: c.name, + ) + + session = ProTestSession() + session.bind(prefix_service) + + eval_prefixed_suite = EvalSuite("eval_prefixed") + session.add_suite(eval_prefixed_suite) + + @eval_prefixed_suite.eval(evaluators=[fake_accuracy]) + async def eval_prefixed( + case: Annotated[EvalCase, From(single_case)], + svc: Annotated[str, Use(prefix_service)], + ) -> str: + return f"{svc}:{case.inputs}" + + runner = TestRunner(session) + result = runner.run() + + # fake_accuracy retourne 1.0 (output contient expected) -> passe + assert result.success is True + + def test_session_fixture_resolved_once_for_all_cases(self) -> None: + """Une session fixture ne doit etre appelee qu'une fois meme avec N cas.""" + call_count = 0 + + @fixture() + def expensive_resource() -> str: + nonlocal call_count + call_count += 1 + return "resource" + + multi_cases = ForEach( + [ + EvalCase(inputs="a", expected="resource:a", name="c1"), + EvalCase(inputs="b", expected="resource:b", name="c2"), + EvalCase(inputs="c", expected="resource:c", name="c3"), + ], + ids=lambda c: c.name, + ) + + session = ProTestSession() + session.bind(expensive_resource) + + eval_resource_suite = EvalSuite("eval_resource") + session.add_suite(eval_resource_suite) + + @eval_resource_suite.eval(evaluators=[fake_accuracy]) + async def eval_resource( + case: Annotated[EvalCase, From(multi_cases)], + res: Annotated[str, Use(expensive_resource)], + ) -> str: + return f"{res}:{case.inputs}" + + runner = TestRunner(session) + runner.run() + + assert call_count == 1 # fixture resolue une seule fois diff --git a/tests/evals/test_eval_case_result.py b/tests/evals/test_eval_case_result.py new file mode 100644 index 0000000..06471eb --- /dev/null +++ b/tests/evals/test_eval_case_result.py @@ -0,0 +1,135 @@ +"""Tests for `EvalCaseResult.from_test_result`. + +This classmethod is the single constructor used by both the runner (post- +execution) and the results writer (pass/fail hooks). The test below pins the +full field mapping so that future additions to `EvalPayload` or `TestResult` +either update the classmethod or break the test. +""" + +from __future__ import annotations + +import pytest + +from protest.entities.events import EvalPayload, EvalScoreEntry, TestResult +from protest.evals.types import EvalCaseResult + + +def _make_payload(**overrides: object) -> EvalPayload: + defaults: dict[str, object] = { + "case_name": "case_one", + "passed": True, + "task_duration": 0.123, + "inputs": "in", + "output": "out", + "expected_output": "expected", + "scores": {"accuracy": EvalScoreEntry(value=0.9, passed=True)}, + "case_hash": "ch", + "eval_hash": "eh", + "task_input_tokens": 100, + "task_output_tokens": 200, + "task_cost": 0.01, + "judge_call_count": 1, + "judge_input_tokens": 50, + "judge_output_tokens": 30, + "judge_cost": 0.005, + } + defaults.update(overrides) + return EvalPayload(**defaults) # type: ignore[arg-type] + + +def _make_result( + *, + error: Exception | None = None, + is_fixture_error: bool = False, + payload: EvalPayload | None = None, + node_id: str = "suite::case_one", +) -> TestResult: + return TestResult( + name="case_one", + node_id=node_id, + error=error, + is_fixture_error=is_fixture_error, + is_eval=True, + eval_payload=payload or _make_payload(), + ) + + +class TestFromTestResultHappyPath: + """Full field mapping: all payload + result fields land in the result.""" + + def test_all_fields_copied(self) -> None: + result = _make_result() + case = EvalCaseResult.from_test_result(result) + assert case.case_name == "case_one" + assert case.node_id == "suite::case_one" + assert case.duration == pytest.approx(0.123) + assert case.passed is True + assert case.inputs == "in" + assert case.output == "out" + assert case.expected_output == "expected" + assert case.case_hash == "ch" + assert case.eval_hash == "eh" + assert case.is_error is False + + def test_scores_converted_from_entries(self) -> None: + case = EvalCaseResult.from_test_result(_make_result()) + assert len(case.scores) == 1 + assert case.scores[0].name == "accuracy" + assert case.scores[0].value == 0.9 + + def test_task_usage_copied(self) -> None: + """Regression: writer used to drop these fields silently.""" + case = EvalCaseResult.from_test_result(_make_result()) + assert case.task_input_tokens == 100 + assert case.task_output_tokens == 200 + assert case.task_cost == pytest.approx(0.01) + + def test_judge_usage_copied(self) -> None: + """Regression: writer used to drop these fields silently.""" + case = EvalCaseResult.from_test_result(_make_result()) + assert case.judge_call_count == 1 + assert case.judge_input_tokens == 50 + assert case.judge_output_tokens == 30 + assert case.judge_cost == pytest.approx(0.005) + + +class TestFromTestResultPassedDerivation: + """`passed` is derived, not passed in — the writer no longer gets it wrong.""" + + def test_passed_when_no_error_and_payload_passed(self) -> None: + result = _make_result(payload=_make_payload(passed=True)) + assert EvalCaseResult.from_test_result(result).passed is True + + def test_failed_when_payload_not_passed(self) -> None: + result = _make_result(payload=_make_payload(passed=False)) + assert EvalCaseResult.from_test_result(result).passed is False + + def test_failed_when_error_present(self) -> None: + result = _make_result( + error=RuntimeError("boom"), + payload=_make_payload(passed=True), + ) + assert EvalCaseResult.from_test_result(result).passed is False + + def test_is_error_reflects_fixture_error(self) -> None: + result = _make_result( + error=RuntimeError("fx"), + is_fixture_error=True, + ) + case = EvalCaseResult.from_test_result(result) + assert case.is_error is True + assert case.passed is False + + +class TestFromTestResultErrors: + """Defensive: classmethod refuses a TestResult without eval_payload.""" + + def test_missing_payload_raises(self) -> None: + result = TestResult( + name="n", + node_id="x", + is_eval=False, + eval_payload=None, + ) + with pytest.raises(ValueError, match="eval_payload"): + EvalCaseResult.from_test_result(result) diff --git a/tests/evals/test_evalcase.py b/tests/evals/test_evalcase.py new file mode 100644 index 0000000..12435f6 --- /dev/null +++ b/tests/evals/test_evalcase.py @@ -0,0 +1,36 @@ +"""Tests for `EvalCase` construction invariants.""" + +from __future__ import annotations + +import pytest + +from protest.evals import EvalCase + + +class TestEvalCaseRequiresName: + """`name` is required and must be non-empty.""" + + def test_name_required_as_kwarg(self) -> None: + case = EvalCase(inputs="x", name="my_case") + assert case.name == "my_case" + + def test_missing_name_raises(self) -> None: + with pytest.raises(TypeError): + EvalCase(inputs="x") # type: ignore[call-arg] + + def test_empty_name_raises(self) -> None: + with pytest.raises(ValueError, match="non-empty"): + EvalCase(inputs="x", name="") + + def test_name_is_second_positional(self) -> None: + case = EvalCase("input_val", "case_name") + assert case.inputs == "input_val" + assert case.name == "case_name" + + +class TestEvalCaseRepr: + """`__repr__` returns the name (no fallback anymore since name is required).""" + + def test_repr_is_name(self) -> None: + case = EvalCase(inputs="x", name="readable_name") + assert repr(case) == "readable_name" diff --git a/tests/evals/test_evalcase_tags_wiring.py b/tests/evals/test_evalcase_tags_wiring.py new file mode 100644 index 0000000..05ff8ca --- /dev/null +++ b/tests/evals/test_evalcase_tags_wiring.py @@ -0,0 +1,95 @@ +"""Tests for `EvalCase.tags` → `TestItem.tags` wiring. + +Verifies that tags declared on an `EvalCase` via `tags=[...]` are merged +into the resulting `TestItem.tags` set, so that the `TagFilterPlugin` +(which filters on `TestItem.tags`) can honor them. + +Eval functions are defined at module level to avoid `get_type_hints()` +resolution issues that occur with nested function definitions. +""" + +from __future__ import annotations + +from typing import Annotated + +from protest import ForEach, From, ProTestSession +from protest.core.collector import Collector +from protest.evals import EvalCase +from protest.evals.suite import EvalSuite +from protest.tags.plugin import TagFilterPlugin + +# Module-level case sources so `get_type_hints()` can resolve Annotated args. +_single_tagged = [EvalCase(inputs="x", name="c1", tags=["safety"])] +_multi_tagged = [EvalCase(inputs="x", name="c1", tags=["safety", "factual"])] +_mixed_cases = [ + EvalCase(inputs="x", name="c1", tags=["safety"]), + EvalCase(inputs="y", name="c2", tags=["factual"]), + EvalCase(inputs="z", name="c3"), +] +_no_tags_metadata = [ + EvalCase(inputs="x", name="c1", metadata={"other": "value"}), +] +_filter_cases = [ + EvalCase(inputs="a", name="c_safety", tags=["safety"]), + EvalCase(inputs="b", name="c_factual", tags=["factual"]), +] + + +def _collect(cases: list[EvalCase]) -> list: + """Build a session with a parametrized eval over `cases` and collect items.""" + session = ProTestSession() + suite = EvalSuite("evals") + + source = ForEach(cases) + + @suite.eval() + def my_eval(case: Annotated[EvalCase, From(source)]) -> str: + return str(case.inputs) + + _ = my_eval # silence unused-var diagnostics; decorator registers it + session.add_suite(suite) + return Collector().collect(session) + + +class TestCaseTagsMergedIntoItemTags: + def test_single_case_tag_becomes_item_tag(self) -> None: + items = _collect(_single_tagged) + assert len(items) == 1 + assert "safety" in items[0].tags + + def test_multiple_case_tags(self) -> None: + items = _collect(_multi_tagged) + assert items[0].tags >= {"safety", "factual"} + + def test_cases_get_distinct_tags(self) -> None: + items = _collect(_mixed_cases) + assert len(items) == 3 + by_name = {item.case_ids[0]: item for item in items} + assert "safety" in by_name["c1"].tags + assert "factual" not in by_name["c1"].tags + assert "factual" in by_name["c2"].tags + assert "safety" not in by_name["c2"].tags + assert by_name["c3"].tags == set() + + def test_case_with_metadata_only_has_no_tags(self) -> None: + """`metadata` is user-free: no key (including 'tags') is interpreted.""" + items = _collect(_no_tags_metadata) + assert items[0].tags == set() + + +class TestTagFilterHonorsCaseTags: + """End-to-end: `TagFilterPlugin` filters items based on case tags.""" + + def test_include_tag_keeps_matching_cases(self) -> None: + items = _collect(_filter_cases) + plugin = TagFilterPlugin(include_tags={"safety"}) + filtered = plugin.on_collection_finish(items) + assert len(filtered) == 1 + assert filtered[0].case_ids == ["c_safety"] + + def test_exclude_tag_drops_matching_cases(self) -> None: + items = _collect(_filter_cases) + plugin = TagFilterPlugin(exclude_tags={"safety"}) + filtered = plugin.on_collection_finish(items) + assert len(filtered) == 1 + assert filtered[0].case_ids == ["c_factual"] diff --git a/tests/evals/test_evaluator_validation.py b/tests/evals/test_evaluator_validation.py new file mode 100644 index 0000000..584a988 --- /dev/null +++ b/tests/evals/test_evaluator_validation.py @@ -0,0 +1,57 @@ +"""Validation that evaluators=[...] only accepts @evaluator-wrapped objects. + +Plain callables and arbitrary values used to be silently accepted, forcing a +runtime ``isinstance`` dispatch in the executor. Validating at the boundary +turns the failure into a clear TypeError at registration time and lets the +downstream code work on a uniform ``Evaluator | ShortCircuit`` Union. +""" + +from __future__ import annotations + +import pytest + +from protest.evals.evaluator import ( + EvalCase, + EvalContext, + ShortCircuit, + evaluator, + validate_evaluators, +) + + +@evaluator +def _ok(ctx: EvalContext) -> bool: + return True + + +def _plain_callable(ctx: EvalContext) -> bool: + return True + + +class TestValidateEvaluators: + def test_accepts_evaluator(self) -> None: + validate_evaluators([_ok]) + + def test_accepts_short_circuit(self) -> None: + validate_evaluators([ShortCircuit([_ok])]) + + def test_rejects_plain_callable(self) -> None: + with pytest.raises(TypeError, match="@evaluator"): + validate_evaluators([_plain_callable]) + + def test_rejects_non_callable(self) -> None: + with pytest.raises(TypeError, match="Expected Evaluator or ShortCircuit"): + validate_evaluators(["not_an_evaluator"]) # type: ignore[list-item] + + def test_rejects_nested_short_circuit(self) -> None: + with pytest.raises(TypeError, match="cannot nest"): + ShortCircuit([ShortCircuit([_ok])]) # type: ignore[list-item] + + +class TestEvalCaseValidates: + def test_evalcase_rejects_plain_callable(self) -> None: + with pytest.raises(TypeError, match="@evaluator"): + EvalCase(inputs="x", name="c", evaluators=[_plain_callable]) + + def test_evalcase_accepts_evaluator(self) -> None: + EvalCase(inputs="x", name="c", evaluators=[_ok]) diff --git a/tests/evals/test_hashing.py b/tests/evals/test_hashing.py new file mode 100644 index 0000000..26e5570 --- /dev/null +++ b/tests/evals/test_hashing.py @@ -0,0 +1,289 @@ +"""Tests for protest.evals.hashing — fail-hard canonicalization.""" + +from __future__ import annotations + +import dataclasses +import functools +import threading + +import pytest + +from protest.evals.hashing import ( + CanonicalError, + _canonical, + compute_case_hash, + compute_eval_hash, +) + +# --------------------------------------------------------------------------- +# Fixtures — representative evaluator types +# --------------------------------------------------------------------------- + + +@dataclasses.dataclass +class SimpleEvaluator: + threshold: float + name: str = "simple" + + +@dataclasses.dataclass +class NestedEvaluator: + inner: SimpleEvaluator + weight: float = 1.0 + + +@dataclasses.dataclass +class LockHoldingEvaluator: + """Simulates evaluators like LLMJudge that hold non-picklable resources.""" + + name: str + _lock: threading.Lock = dataclasses.field(default_factory=threading.Lock) + + +def bare_function(ctx: object) -> bool: + return True + + +def parameterized_function(ctx: object, keywords: list[str]) -> bool: + return True + + +# --------------------------------------------------------------------------- +# _canonical — primitives & containers +# --------------------------------------------------------------------------- + + +class TestCanonicalPrimitives: + @pytest.mark.parametrize("value", [None, True, False, 42, 3.14, "hello"]) + def test_primitives_pass_through(self, value: object) -> None: + assert _canonical(value) is value + + def test_list(self) -> None: + assert _canonical([1, "a", [2]]) == [1, "a", [2]] + + def test_tuple_treated_as_list(self) -> None: + assert _canonical((1, 2)) == [1, 2] + + def test_dict_sorted_by_key(self) -> None: + assert _canonical({"b": 2, "a": 1}) == {"a": 1, "b": 2} + + +# --------------------------------------------------------------------------- +# _canonical — dataclass handling +# --------------------------------------------------------------------------- + + +class TestCanonicalDataclass: + def test_simple_dataclass_is_serialized(self) -> None: + ev = SimpleEvaluator(threshold=0.8) + result = _canonical(ev) + assert result == { + "__type__": "SimpleEvaluator", + "threshold": 0.8, + "name": "simple", + } + + def test_nested_dataclass_is_serialized_recursively(self) -> None: + ev = NestedEvaluator(inner=SimpleEvaluator(threshold=0.5), weight=2.0) + result = _canonical(ev) + assert result == { + "__type__": "NestedEvaluator", + "inner": { + "__type__": "SimpleEvaluator", + "threshold": 0.5, + "name": "simple", + }, + "weight": 2.0, + } + + def test_dataclass_with_lock_skips_private_fields(self) -> None: + """Regression: dataclasses.asdict() deepcopy fails on threading.Lock. + + Private fields (_prefixed) are runtime internals, not config — excluded from hash. + """ + ev = LockHoldingEvaluator(name="llm_judge") + result = _canonical(ev) + assert result == {"__type__": "LockHoldingEvaluator", "name": "llm_judge"} + assert "_lock" not in result + + +# --------------------------------------------------------------------------- +# _canonical — callables (the real-world evaluator path) +# --------------------------------------------------------------------------- + + +class TestCanonicalCallable: + def test_bare_function(self) -> None: + result = _canonical(bare_function) + assert result == {"fn": "bare_function"} + + def test_partial_captures_qualname_and_kwargs(self) -> None: + bound = functools.partial(parameterized_function, keywords=["paris"]) + result = _canonical(bound) + assert result == { + "fn": "parameterized_function", + "args": [], + "kwargs": {"keywords": ["paris"]}, + } + + def test_partial_different_kwargs_different_canonical(self) -> None: + a = functools.partial(parameterized_function, keywords=["paris"]) + b = functools.partial(parameterized_function, keywords=["lyon"]) + assert _canonical(a) != _canonical(b) + + def test_partial_same_kwargs_same_canonical(self) -> None: + a = functools.partial(parameterized_function, keywords=["paris"]) + b = functools.partial(parameterized_function, keywords=["paris"]) + assert _canonical(a) == _canonical(b) + + +# --------------------------------------------------------------------------- +# _canonical — evaluator_identity (explicit, user-controlled) +# --------------------------------------------------------------------------- + + +class TestCanonicalEvaluatorIdentity: + def test_evaluator_identity_takes_precedence(self) -> None: + """evaluator_identity() is used over introspection when available.""" + + class CustomScorer: + def __init__(self, model: str, temperature: float): + self.model = model + self.temperature = temperature + self._client = object() # runtime state, not config + + def evaluator_identity(self) -> dict: + return {"model": self.model, "temperature": self.temperature} + + result = _canonical(CustomScorer(model="gpt-4", temperature=0.7)) + assert result == {"model": "gpt-4", "temperature": 0.7} + + def test_evaluator_identity_on_dataclass_overrides_introspection(self) -> None: + """evaluator_identity() wins even if the object is a dataclass.""" + + @dataclasses.dataclass + class VersionedEvaluator: + threshold: float + version: int = 1 + + def evaluator_identity(self) -> dict: + return {"v": self.version, "t": self.threshold} + + result = _canonical(VersionedEvaluator(threshold=0.8, version=2)) + assert result == {"v": 2, "t": 0.8} + + def test_evaluator_identity_different_config_different_hash(self) -> None: + class CustomScorer: + def __init__(self, model: str): + self.model = model + + def evaluator_identity(self) -> dict: + return {"model": self.model} + + h1 = compute_eval_hash([CustomScorer(model="gpt-4")]) + h2 = compute_eval_hash([CustomScorer(model="claude")]) + assert h1 != h2 + + def test_evaluator_identity_same_config_same_hash(self) -> None: + class CustomScorer: + def __init__(self, model: str): + self.model = model + + def evaluator_identity(self) -> dict: + return {"model": self.model} + + h1 = compute_eval_hash([CustomScorer(model="gpt-4")]) + h2 = compute_eval_hash([CustomScorer(model="gpt-4")]) + assert h1 == h2 + + +# --------------------------------------------------------------------------- +# _canonical — fail-hard on unknown types +# --------------------------------------------------------------------------- + + +class TestCanonicalFailHard: + def test_unknown_type_raises_canonical_error(self) -> None: + class Opaque: + pass + + with pytest.raises(CanonicalError, match="Opaque"): + _canonical(Opaque()) + + def test_non_callable_non_dataclass_raises(self) -> None: + with pytest.raises(CanonicalError): + _canonical(object()) + + def test_error_message_mentions_evaluator_identity(self) -> None: + class Opaque: + pass + + with pytest.raises(CanonicalError, match="evaluator_identity"): + _canonical(Opaque()) + + +# --------------------------------------------------------------------------- +# compute_case_hash +# --------------------------------------------------------------------------- + + +class TestComputeCaseHash: + def test_same_inputs_same_hash(self) -> None: + h1 = compute_case_hash("hello", "expected") + h2 = compute_case_hash("hello", "expected") + assert h1 == h2 + + def test_different_inputs_different_hash(self) -> None: + h1 = compute_case_hash("hello", "expected") + h2 = compute_case_hash("world", "expected") + assert h1 != h2 + + def test_none_expected_is_stable(self) -> None: + h1 = compute_case_hash("hello", None) + h2 = compute_case_hash("hello", None) + assert h1 == h2 + + def test_dict_inputs(self) -> None: + h1 = compute_case_hash({"q": "hello", "context": "world"}, "expected") + h2 = compute_case_hash({"context": "world", "q": "hello"}, "expected") + assert h1 == h2, "dict key order should not affect hash" + + +# --------------------------------------------------------------------------- +# compute_eval_hash +# --------------------------------------------------------------------------- + + +class TestComputeEvalHash: + def test_identical_evaluators_produce_same_hash(self) -> None: + ev = SimpleEvaluator(threshold=0.8) + h1 = compute_eval_hash([ev]) + h2 = compute_eval_hash([ev]) + assert h1 == h2 + + def test_different_thresholds_produce_different_hashes(self) -> None: + ev_a = SimpleEvaluator(threshold=0.8) + ev_b = SimpleEvaluator(threshold=0.9) + assert compute_eval_hash([ev_a]) != compute_eval_hash([ev_b]) + + def test_evaluator_with_lock_does_not_crash(self) -> None: + """Regression for non-picklable evaluator fields.""" + ev = LockHoldingEvaluator(name="llm_judge") + hash_val = compute_eval_hash([ev]) + assert len(hash_val) == 12 + + def test_partial_evaluators_hash_stably(self) -> None: + ev = functools.partial(parameterized_function, keywords=["paris"]) + h1 = compute_eval_hash([ev]) + h2 = compute_eval_hash([ev]) + assert h1 == h2 + + def test_bare_function_evaluator(self) -> None: + h1 = compute_eval_hash([bare_function]) + h2 = compute_eval_hash([bare_function]) + assert h1 == h2 + + def test_different_partial_kwargs_different_hash(self) -> None: + ev_a = functools.partial(parameterized_function, keywords=["paris"]) + ev_b = functools.partial(parameterized_function, keywords=["lyon"]) + assert compute_eval_hash([ev_a]) != compute_eval_hash([ev_b]) diff --git a/tests/evals/test_judge.py b/tests/evals/test_judge.py new file mode 100644 index 0000000..a27ea41 --- /dev/null +++ b/tests/evals/test_judge.py @@ -0,0 +1,448 @@ +"""Tests for the Judge protocol and ctx.judge() integration.""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Annotated, Any + +import pytest + +from protest import ForEach, From, ProTestSession +from protest.core.runner import TestRunner +from protest.evals import ( + EvalContext, + Judge, + JudgeResponse, + TaskResult, + Verdict, + evaluator, +) +from protest.evals.suite import EvalSuite +from protest.plugin import PluginBase + +# --------------------------------------------------------------------------- +# Fake judge for testing +# --------------------------------------------------------------------------- + + +class FakeJudge: + """Minimal Judge implementation for tests.""" + + name: str = "fake-judge" + provider: str | None = "test" + + async def judge(self, prompt: str, output_type: type) -> JudgeResponse: + if output_type is bool: + return JudgeResponse( + output="pass" in prompt.lower(), + input_tokens=10, + output_tokens=5, + cost=0.001, + ) + if output_type is str: + return JudgeResponse(output=f"judged: {prompt[:20]}") + # Dataclass fallback: caller must use a dataclass whose fields all + # have defaults — no real LLM call to derive values from. + return JudgeResponse(output=output_type()) + + +class BareJudge: + """Minimal Judge with required name/provider.""" + + name: str = "bare-judge" + provider: str | None = None + + async def judge(self, prompt: str, output_type: type) -> JudgeResponse: + return JudgeResponse(output=True) + + +# --------------------------------------------------------------------------- +# Protocol compliance +# --------------------------------------------------------------------------- + + +class TestJudgeProtocol: + def test_fake_judge_satisfies_protocol(self) -> None: + assert isinstance(FakeJudge(), Judge) + + def test_bare_judge_satisfies_protocol(self) -> None: + assert isinstance(BareJudge(), Judge) + + def test_non_judge_rejected(self) -> None: + class NotAJudge: + def evaluate(self, prompt: str) -> str: + return "nope" + + assert not isinstance(NotAJudge(), Judge) + + +# --------------------------------------------------------------------------- +# EvalContext.judge() +# --------------------------------------------------------------------------- + + +class TestEvalContextJudge: + @pytest.mark.asyncio + async def test_judge_happy_path(self) -> None: + judge = FakeJudge() + ctx = EvalContext( + name="test_case", + inputs="q", + output="a", + expected_output=None, + metadata=None, + duration=0.1, + _judge=judge, + ) + result = await ctx.judge("pass this", bool) + assert result is True + + @pytest.mark.asyncio + async def test_judge_str_output(self) -> None: + judge = FakeJudge() + ctx = EvalContext( + name="test_case", + inputs="q", + output="a", + expected_output=None, + metadata=None, + duration=0.1, + _judge=judge, + ) + result = await ctx.judge("hello world", str) + assert result == "judged: hello world" + + @pytest.mark.asyncio + async def test_judge_raises_without_judge(self) -> None: + ctx = EvalContext( + name="my_case", + inputs="q", + output="a", + expected_output=None, + metadata=None, + duration=0.1, + ) + with pytest.raises(RuntimeError, match="no judge is configured"): + await ctx.judge("test", bool) + + @pytest.mark.asyncio + async def test_judge_error_mentions_case_name(self) -> None: + ctx = EvalContext( + name="chatbot_eval", + inputs="q", + output="a", + expected_output=None, + metadata=None, + duration=0.1, + ) + with pytest.raises(RuntimeError, match="chatbot_eval"): + await ctx.judge("test", bool) + + @pytest.mark.asyncio + async def test_judge_call_count(self) -> None: + judge = FakeJudge() + ctx = EvalContext( + name="test_case", + inputs="q", + output="a", + expected_output=None, + metadata=None, + duration=0.1, + _judge=judge, + ) + assert ctx.judge_call_count == 0 + await ctx.judge("pass 1", bool) + assert ctx.judge_call_count == 1 + await ctx.judge("pass 2", bool) + await ctx.judge("pass 3", bool) + assert ctx.judge_call_count == 3 + + @pytest.mark.asyncio + async def test_judge_tokens_accumulated(self) -> None: + judge = FakeJudge() # returns input_tokens=10, output_tokens=5 for bool + ctx = EvalContext( + name="test_case", + inputs="q", + output="a", + expected_output=None, + metadata=None, + duration=0.1, + _judge=judge, + ) + await ctx.judge("pass 1", bool) + await ctx.judge("pass 2", bool) + assert ctx.judge_input_tokens == 20 + assert ctx.judge_output_tokens == 10 + + @pytest.mark.asyncio + async def test_judge_cost_accumulated(self) -> None: + judge = FakeJudge() # returns cost=0.001 for bool + ctx = EvalContext( + name="test_case", + inputs="q", + output="a", + expected_output=None, + metadata=None, + duration=0.1, + _judge=judge, + ) + await ctx.judge("pass 1", bool) + await ctx.judge("pass 2", bool) + assert ctx.judge_cost == pytest.approx(0.002) + + @pytest.mark.asyncio + async def test_judge_none_tokens_not_accumulated(self) -> None: + """JudgeResponse with tokens=None doesn't affect accumulation.""" + judge = FakeJudge() + ctx = EvalContext( + name="test_case", + inputs="q", + output="a", + expected_output=None, + metadata=None, + duration=0.1, + _judge=judge, + ) + await ctx.judge("hello", str) # FakeJudge returns no tokens for str + assert ctx.judge_input_tokens == 0 + assert ctx.judge_output_tokens == 0 + assert ctx.judge_cost == 0.0 + + +# --------------------------------------------------------------------------- +# E2E: ProTestSession with judge on EvalSuite +# --------------------------------------------------------------------------- + +single_case = ForEach( + [{"inputs": "hello", "expected": "hello", "name": "case_1"}], + ids=lambda c: c["name"], +) + + +class TestJudgeE2E: + def test_judge_available_in_evaluator(self) -> None: + """Full run: evaluator calls ctx.judge(), result is pass.""" + + @evaluator + async def judge_evaluator(ctx: EvalContext) -> bool: + return await ctx.judge("pass this", bool) + + session = ProTestSession() + eval_echo_suite = EvalSuite("eval_echo", judge=FakeJudge()) + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[judge_evaluator]) + def eval_echo(case: Annotated[dict, From(single_case)]) -> str: + return case["inputs"] + + runner = TestRunner(session) + result = runner.run() + assert result.success is True + + def test_no_judge_is_fixture_error(self) -> None: + """Evaluator calls ctx.judge() without judge configured → infra error.""" + + @evaluator + async def needs_judge(ctx: EvalContext) -> bool: + return await ctx.judge("test", bool) + + session = ProTestSession() + eval_echo_suite = EvalSuite("eval_echo") # no judge + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[needs_judge]) + def eval_echo(case: Annotated[dict, From(single_case)]) -> str: + return case["inputs"] + + results: list[Any] = [] + + class Collector(PluginBase): + name = "collector" + + def on_test_fail(self, result: Any) -> None: + results.append(result) + + session.register_plugin(Collector()) + runner = TestRunner(session) + result = runner.run() + assert result.success is False + assert len(results) == 1 + assert results[0].is_fixture_error is True + + def test_judge_call_count_in_payload(self) -> None: + """judge_call_count flows through to EvalPayload.""" + + @evaluator + async def double_judge(ctx: EvalContext) -> bool: + r1 = await ctx.judge("pass first", bool) + r2 = await ctx.judge("pass second", bool) + return r1 and r2 + + session = ProTestSession() + eval_echo_suite = EvalSuite("eval_echo", judge=FakeJudge()) + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[double_judge]) + def eval_echo(case: Annotated[dict, From(single_case)]) -> str: + return case["inputs"] + + results: list[Any] = [] + + class Collector(PluginBase): + name = "collector" + + def on_test_pass(self, result: Any) -> None: + results.append(result) + + session.register_plugin(Collector()) + runner = TestRunner(session) + runner.run() + assert len(results) == 1 + payload = results[0].eval_payload + assert payload is not None + assert payload.judge_call_count == 2 + assert payload.judge_input_tokens == 20 # 10 per call x 2 + assert payload.judge_output_tokens == 10 # 5 per call x 2 + assert payload.judge_cost == pytest.approx(0.002) # 0.001 per call x 2 + + def test_judge_info_derived_from_suite(self) -> None: + """EvalSuite derives JudgeInfo from Judge instance.""" + suite = EvalSuite("eval_echo", judge=FakeJudge()) + assert suite._judge is not None + assert suite._judge.name == "fake-judge" + assert suite._judge.provider == "test" + + def test_no_judge_no_judge_info(self) -> None: + """EvalSuite without judge has no JudgeInfo.""" + suite = EvalSuite("eval_echo") + assert suite._judge is None + + def test_judge_with_structured_output(self) -> None: + """Judge returns structured dataclass via output_type.""" + + @dataclass + class JudgeVerdict: + ok: Annotated[bool, Verdict] + + class StructuredJudge: + name: str = "structured" + provider: str | None = None + + async def judge(self, prompt: str, output_type: type) -> JudgeResponse: + return JudgeResponse(output=output_type(ok=True)) + + @evaluator + async def struct_evaluator(ctx: EvalContext) -> JudgeVerdict: + return await ctx.judge("evaluate this", JudgeVerdict) + + session = ProTestSession() + eval_echo_suite = EvalSuite("eval_echo", judge=StructuredJudge()) + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[struct_evaluator]) + def eval_echo(case: Annotated[dict, From(single_case)]) -> str: + return case["inputs"] + + runner = TestRunner(session) + result = runner.run() + assert result.success is True + + +# --------------------------------------------------------------------------- +# TaskResult: SUT usage tracking +# --------------------------------------------------------------------------- + + +class TestTaskResult: + def test_task_result_unwrapped_for_evaluators(self) -> None: + """TaskResult is unwrapped — evaluators see the plain output.""" + + @evaluator + def check_output(ctx: EvalContext) -> bool: + return ctx.output == "hello" # sees str, not TaskResult + + session = ProTestSession() + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[check_output]) + def eval_echo(case: Annotated[dict, From(single_case)]) -> TaskResult[str]: + return TaskResult( + output=case["inputs"], + input_tokens=100, + output_tokens=50, + cost=0.01, + ) + + runner = TestRunner(session) + result = runner.run() + assert result.success is True + + def test_task_usage_in_payload(self) -> None: + """TaskResult tokens/cost flow through to EvalPayload.""" + + @evaluator + def always_pass(ctx: EvalContext) -> bool: + return True + + session = ProTestSession() + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[always_pass]) + def eval_echo(case: Annotated[dict, From(single_case)]) -> TaskResult[str]: + return TaskResult( + output=case["inputs"], + input_tokens=200, + output_tokens=80, + cost=0.005, + ) + + results: list[Any] = [] + + class Collector(PluginBase): + name = "collector" + + def on_test_pass(self, result: Any) -> None: + results.append(result) + + session.register_plugin(Collector()) + runner = TestRunner(session) + runner.run() + assert len(results) == 1 + payload = results[0].eval_payload + assert payload is not None + assert payload.task_input_tokens == 200 + assert payload.task_output_tokens == 80 + assert payload.task_cost == pytest.approx(0.005) + + def test_plain_return_has_zero_task_usage(self) -> None: + """Plain return (no TaskResult) has zero task usage.""" + + @evaluator + def always_pass(ctx: EvalContext) -> bool: + return True + + session = ProTestSession() + eval_echo_suite = EvalSuite("eval_echo") + session.add_suite(eval_echo_suite) + + @eval_echo_suite.eval(evaluators=[always_pass]) + def eval_echo(case: Annotated[dict, From(single_case)]) -> str: + return case["inputs"] + + results: list[Any] = [] + + class Collector(PluginBase): + name = "collector" + + def on_test_pass(self, result: Any) -> None: + results.append(result) + + session.register_plugin(Collector()) + runner = TestRunner(session) + runner.run() + payload = results[0].eval_payload + assert payload.task_input_tokens == 0 + assert payload.task_output_tokens == 0 + assert payload.task_cost == 0.0 diff --git a/tests/evals/test_multiple_evalcase_params.py b/tests/evals/test_multiple_evalcase_params.py new file mode 100644 index 0000000..3880811 --- /dev/null +++ b/tests/evals/test_multiple_evalcase_params.py @@ -0,0 +1,99 @@ +"""Tests for `_validate_single_evalcase_param` — D1 registration-time check. + +The runtime contract (`_find_case`) picks the first `EvalCase` in kwargs and +silently drops any others. The wrapper detects > 1 EvalCase param at +registration and raises a clear error pointing at the offending parameters. +""" + +from __future__ import annotations + +from typing import Annotated + +import pytest + +from protest import ForEach, From, ProTestSession +from protest.evals import EvalCase +from protest.evals.suite import EvalSuite +from protest.exceptions import MultipleEvalCaseParamsError + +# Module-level case sources so `get_type_hints()` can resolve Annotated args. +_cases_a = ForEach([EvalCase(inputs="a", name="a1")]) +_cases_b = ForEach([EvalCase(inputs="b", name="b1")]) + + +class _MyCase(EvalCase): + """Subclass to verify the check covers user-defined EvalCase types.""" + + +_subclass_cases = ForEach([_MyCase(inputs="x", name="x1")]) + + +class TestSingleEvalCaseParamAccepted: + def test_one_evalcase_param_via_annotated_from(self) -> None: + session = ProTestSession() + suite = EvalSuite("evals") + + @suite.eval() + def good(case: Annotated[EvalCase, From(_cases_a)]) -> str: + return str(case.inputs) + + _ = good + session.add_suite(suite) # no raise + + def test_zero_evalcase_param_accepted(self) -> None: + """Evals without parametrization (or without EvalCase) are valid.""" + session = ProTestSession() + suite = EvalSuite("evals") + + @suite.eval() + def no_case() -> str: + return "static" + + _ = no_case + session.add_suite(suite) # no raise + + def test_subclass_param_accepted_when_alone(self) -> None: + session = ProTestSession() + suite = EvalSuite("evals") + + @suite.eval() + def good(case: Annotated[_MyCase, From(_subclass_cases)]) -> str: + return str(case.inputs) + + _ = good + session.add_suite(suite) + + +class TestMultipleEvalCaseParamRejected: + def test_two_evalcase_params_raise(self) -> None: + suite = EvalSuite("evals") + + with pytest.raises(MultipleEvalCaseParamsError) as excinfo: + + @suite.eval() + def bad( + case_a: Annotated[EvalCase, From(_cases_a)], + case_b: Annotated[EvalCase, From(_cases_b)], + ) -> str: + return f"{case_a.inputs}+{case_b.inputs}" + + msg = str(excinfo.value) + assert "bad" in msg + assert "case_a" in msg + assert "case_b" in msg + + def test_subclass_counts_as_evalcase(self) -> None: + """A param typed `_MyCase` (subclass) collides with a `EvalCase` param.""" + suite = EvalSuite("evals") + + with pytest.raises(MultipleEvalCaseParamsError) as excinfo: + + @suite.eval() + def bad( + case_a: Annotated[EvalCase, From(_cases_a)], + case_b: Annotated[_MyCase, From(_subclass_cases)], + ) -> str: + return str(case_a.inputs) + str(case_b.inputs) + + assert "case_a" in str(excinfo.value) + assert "case_b" in str(excinfo.value) diff --git a/tests/evals/test_score_name_collision.py b/tests/evals/test_score_name_collision.py new file mode 100644 index 0000000..f5d4c48 --- /dev/null +++ b/tests/evals/test_score_name_collision.py @@ -0,0 +1,143 @@ +"""Tests for `ScoreNameCollisionError` — fail-loud on duplicate score names. + +Two evaluators emitting a score under the same name (e.g. both have a +``detail`` field on their dataclass return) would silently overwrite each +other in ``EvalPayload.scores`` (a dict). The wrapper detects the +collision at runtime and raises a clear error pointing at the duplicate +name(s) so the user can rename the colliding field. +""" + +from __future__ import annotations + +import asyncio +from dataclasses import dataclass +from typing import Annotated + +import pytest + +from protest import ForEach, From, ProTestSession +from protest.evals import ( + EvalCase, + EvalContext, + EvalSuite, + Reason, + Verdict, + evaluator, +) +from protest.evals.wrapper import make_eval_wrapper +from protest.exceptions import ScoreNameCollisionError + +_cases = ForEach([EvalCase(inputs="x", name="c1")]) + + +@dataclass +class _ShapeA: + matches: Annotated[bool, Verdict] + detail: Annotated[str, Reason] = "" + + +@dataclass +class _ShapeB: + other_check: Annotated[bool, Verdict] + detail: Annotated[str, Reason] = "" # collides with _ShapeA.detail + + +@evaluator +def _shape_a(ctx: EvalContext) -> _ShapeA: + return _ShapeA(matches=True, detail="from A") + + +@evaluator +def _shape_b(ctx: EvalContext) -> _ShapeB: + return _ShapeB(other_check=True, detail="from B") + + +@evaluator +def _bool_one(ctx: EvalContext) -> bool: + return True + + +@dataclass +class _ShapeWithBoolOneField: + _bool_one: Annotated[bool, Verdict] # collides with _bool_one evaluator's name + + +@evaluator +def _shape_collides_with_bool(ctx: EvalContext) -> _ShapeWithBoolOneField: + return _ShapeWithBoolOneField(_bool_one=True) + + +@dataclass +class _ShapeUniqueA: + matches_a: Annotated[bool, Verdict] + detail_a: Annotated[str, Reason] = "" + + +@dataclass +class _ShapeUniqueB: + matches_b: Annotated[bool, Verdict] + detail_b: Annotated[str, Reason] = "" + + +@evaluator +def _shape_unique_a(ctx: EvalContext) -> _ShapeUniqueA: + return _ShapeUniqueA(matches_a=True, detail_a="A") + + +@evaluator +def _shape_unique_b(ctx: EvalContext) -> _ShapeUniqueB: + return _ShapeUniqueB(matches_b=True, detail_b="B") + + +def _invoke(evaluators: list, case: EvalCase) -> None: + """Invoke the eval wrapper directly so collision exceptions propagate.""" + + def task(case: EvalCase) -> str: + return str(case.inputs) + + wrapped = make_eval_wrapper(task, evaluators) + asyncio.run(wrapped(case=case)) + + +class TestCollisionRaises: + def test_two_dataclasses_share_field_name(self) -> None: + with pytest.raises(ScoreNameCollisionError) as excinfo: + _invoke([_shape_a, _shape_b], EvalCase(inputs="x", name="c1")) + msg = str(excinfo.value) + assert "'detail'" in msg + assert "c1" in msg + + def test_bool_evaluator_name_collides_with_dataclass_field(self) -> None: + with pytest.raises(ScoreNameCollisionError) as excinfo: + _invoke( + [_bool_one, _shape_collides_with_bool], + EvalCase(inputs="x", name="c2"), + ) + msg = str(excinfo.value) + assert "_bool_one" in msg + assert "c2" in msg + + +class TestNoCollisionPasses: + def test_unique_names_pass(self) -> None: + # Should not raise. + _invoke( + [_shape_unique_a, _shape_unique_b], + EvalCase(inputs="x", name="c1"), + ) + + def test_session_with_unique_names_runs_clean(self) -> None: + """Smoke check: running through the full session path also succeeds.""" + from protest.api import run_session # noqa: PLC0415 — heavy import + + session = ProTestSession() + suite = EvalSuite("evals") + + @suite.eval(evaluators=[_shape_unique_a, _shape_unique_b]) + def ok(case: Annotated[EvalCase, From(_cases)]) -> str: + return str(case.inputs) + + _ = ok + session.add_suite(suite) + result = run_session(session) + assert result.success diff --git a/tests/evals/test_score_stats.py b/tests/evals/test_score_stats.py new file mode 100644 index 0000000..7a0eb90 --- /dev/null +++ b/tests/evals/test_score_stats.py @@ -0,0 +1,92 @@ +"""Tests for `ScoreStats.from_values` — percentile correctness. + +Pre-M11, p5/p95 used `int(n * 0.05)` index lookup, which collapses to +min/max for small samples (the typical eval case). Post-M11 uses +`statistics.quantiles(method='inclusive')` for true linear-interpolated +percentiles. These tests pin the new behavior. +""" + +from __future__ import annotations + +import pytest + +from protest.evals.types import ScoreStats + + +class TestEmptyAndSingleValue: + def test_empty_returns_zeroed_stats(self) -> None: + stats = ScoreStats.from_values("acc", []) + assert stats.count == 0 + assert stats.mean == 0 + assert stats.p5 == 0 + assert stats.p95 == 0 + assert stats.min == 0 + assert stats.max == 0 + + def test_single_value_collapses_percentiles(self) -> None: + """One value → percentiles undefined; fall back to that value.""" + stats = ScoreStats.from_values("acc", [0.42]) + assert stats.count == 1 + assert stats.mean == pytest.approx(0.42) + assert stats.median == pytest.approx(0.42) + assert stats.p5 == pytest.approx(0.42) + assert stats.p95 == pytest.approx(0.42) + assert stats.min == pytest.approx(0.42) + assert stats.max == pytest.approx(0.42) + + +class TestPercentilesNotCollapsedForSmallSamples: + """Regression: with n=10 the old impl returned min/max for p5/p95.""" + + def test_n_equals_10_p5_is_above_min(self) -> None: + values = [float(i) for i in range(10)] # 0..9 + stats = ScoreStats.from_values("acc", values) + # Inclusive method interpolates: p5 of [0..9] is 0.45, p95 is 8.55 + assert stats.min == 0 + assert stats.p5 > stats.min + assert stats.p5 == pytest.approx(0.45, abs=0.01) + + def test_n_equals_10_p95_is_below_max(self) -> None: + values = [float(i) for i in range(10)] + stats = ScoreStats.from_values("acc", values) + assert stats.max == 9 + assert stats.p95 < stats.max + assert stats.p95 == pytest.approx(8.55, abs=0.01) + + def test_n_equals_2_interpolates(self) -> None: + """Inclusive percentiles work even for n=2 (interpolation).""" + stats = ScoreStats.from_values("acc", [0.0, 1.0]) + assert stats.p5 == pytest.approx(0.05, abs=0.01) + assert stats.p95 == pytest.approx(0.95, abs=0.01) + + +class TestPercentilesAccurateForLargeSamples: + def test_n_equals_100_uniform_distribution(self) -> None: + """For uniform 0..99, p5 ≈ 5 and p95 ≈ 95 (inclusive method).""" + values = [float(i) for i in range(100)] + stats = ScoreStats.from_values("acc", values) + assert stats.p5 == pytest.approx(4.95, abs=0.1) + assert stats.p95 == pytest.approx(94.05, abs=0.1) + + def test_unsorted_input_is_sorted_internally(self) -> None: + """from_values must not depend on input order.""" + ordered = ScoreStats.from_values("a", [0.1, 0.2, 0.3, 0.4, 0.5]) + shuffled = ScoreStats.from_values("a", [0.3, 0.5, 0.1, 0.4, 0.2]) + assert ordered.p5 == pytest.approx(shuffled.p5) + assert ordered.p95 == pytest.approx(shuffled.p95) + assert ordered.median == pytest.approx(shuffled.median) + + +class TestBasicStatsStillCorrect: + """Mean/median/min/max/count are unchanged.""" + + def test_mean_and_median(self) -> None: + stats = ScoreStats.from_values("acc", [1.0, 2.0, 3.0, 4.0, 5.0]) + assert stats.mean == pytest.approx(3.0) + assert stats.median == pytest.approx(3.0) + + def test_min_max_count(self) -> None: + stats = ScoreStats.from_values("acc", [0.2, 0.7, 0.1, 0.9, 0.5]) + assert stats.min == pytest.approx(0.1) + assert stats.max == pytest.approx(0.9) + assert stats.count == 5 diff --git a/tests/execution/test_real_streams.py b/tests/execution/test_real_streams.py new file mode 100644 index 0000000..c7b53de --- /dev/null +++ b/tests/execution/test_real_streams.py @@ -0,0 +1,46 @@ +"""Tests for `real_stdout()` / `real_stderr()`. + +These accessors replace the previous `getattr(sys.stdout, "_original", ...)` +duck-typing. They give reporters a typed way to bypass the per-test capture +wrapper, so renaming or removing the private attribute won't silently break +reporter output. +""" + +from __future__ import annotations + +import io +import sys + +from protest.execution.capture import ( + TaskAwareStream, + real_stderr, + real_stdout, +) + + +class TestRealStdoutUnwrapsTaskAwareStream: + def test_returns_stdout_when_not_wrapped(self) -> None: + assert real_stdout() is sys.stdout + + def test_unwraps_wrapped_stream(self) -> None: + buffer = io.StringIO() + wrapper = TaskAwareStream(buffer) + sys.stdout = wrapper # type: ignore[assignment] + try: + assert real_stdout() is buffer + finally: + sys.stdout = sys.__stdout__ + + +class TestRealStderrUnwrapsTaskAwareStream: + def test_returns_stderr_when_not_wrapped(self) -> None: + assert real_stderr() is sys.stderr + + def test_unwraps_wrapped_stream(self) -> None: + buffer = io.StringIO() + wrapper = TaskAwareStream(buffer) + sys.stderr = wrapper # type: ignore[assignment] + try: + assert real_stderr() is buffer + finally: + sys.stderr = sys.__stderr__ diff --git a/tests/history/__init__.py b/tests/history/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/history/test_append_entry_concurrency.py b/tests/history/test_append_entry_concurrency.py new file mode 100644 index 0000000..ab82739 --- /dev/null +++ b/tests/history/test_append_entry_concurrency.py @@ -0,0 +1,186 @@ +"""Tests for `append_entry` — concurrent writer safety. + +Covers the basic invariant (one entry = one parseable line) and the +multiprocess-concurrency case: N workers append concurrently to the same +file; every line must be parseable JSON. Without locking, interleaved +writes larger than `PIPE_BUF` would corrupt lines and the test would fail. + +Also covers `clean_dirty` concurrency: a concurrent `append_entry` while +`clean_dirty` is running must not be silently dropped by the truncate. +""" + +from __future__ import annotations + +import json +import multiprocessing as mp +import subprocess +from pathlib import Path + +from protest.history.storage import append_entry, clean_dirty + + +def _worker_append(args: tuple[str, int, int]) -> None: + """Child-process entry: append `count` entries, each padded to ~5 KB. + + The padding pushes the write past PIPE_BUF (4 KB) so that without a + lock the POSIX O_APPEND atomicity guarantee no longer applies. + """ + path_str, worker_id, count = args + path = Path(path_str) + padding = "x" * 5000 + for i in range(count): + append_entry(path, {"worker": worker_id, "i": i, "pad": padding}) + + +def _worker_append_innocent(args: tuple[str, int, int]) -> None: + """Append entries on an unrelated commit — `clean_dirty` must not touch them.""" + path_str, worker_id, count = args + path = Path(path_str) + for i in range(count): + append_entry( + path, + { + "worker": worker_id, + "i": i, + "git": {"commit": "innocent_commit", "dirty": False}, + "suites": {}, + }, + ) + + +def _worker_clean_dirty(args: tuple[str, int]) -> None: + """Repeatedly run clean_dirty while another worker appends.""" + path_str, count = args + history_dir = Path(path_str).parent + for _ in range(count): + clean_dirty(history_dir=history_dir) + + +class TestAppendEntryBasic: + """Single-writer invariants.""" + + def test_creates_parent_dir(self, tmp_path: Path) -> None: + target = tmp_path / "nested" / "history.jsonl" + append_entry(target, {"k": "v"}) + assert target.exists() + assert target.parent.is_dir() + + def test_appends_one_line_per_call(self, tmp_path: Path) -> None: + path = tmp_path / "history.jsonl" + append_entry(path, {"a": 1}) + append_entry(path, {"b": 2}) + lines = path.read_text().splitlines() + assert len(lines) == 2 + assert json.loads(lines[0]) == {"a": 1} + assert json.loads(lines[1]) == {"b": 2} + + def test_default_str_serializes_non_json_types(self, tmp_path: Path) -> None: + """`json.dumps(..., default=str)` handles non-serializable values.""" + path = tmp_path / "history.jsonl" + + class Marker: + def __str__(self) -> str: + return "marker-str" + + append_entry(path, {"obj": Marker()}) + (line,) = path.read_text().splitlines() + assert json.loads(line) == {"obj": "marker-str"} + + +class TestAppendEntryConcurrency: + """Multi-process concurrent appends produce N parseable lines.""" + + def test_concurrent_writers_do_not_interleave(self, tmp_path: Path) -> None: + path = tmp_path / "history.jsonl" + workers = 8 + per_worker = 5 + total = workers * per_worker + + ctx = mp.get_context("spawn") + with ctx.Pool(workers) as pool: + pool.map( + _worker_append, + [(str(path), wid, per_worker) for wid in range(workers)], + ) + + lines = path.read_text().splitlines() + assert len(lines) == total, ( + f"expected {total} lines, got {len(lines)} — some writes were lost" + ) + + counts_per_worker: dict[int, int] = {} + for raw in lines: + entry = json.loads(raw) # raises JSONDecodeError on interleaved bytes + counts_per_worker[entry["worker"]] = ( + counts_per_worker.get(entry["worker"], 0) + 1 + ) + + assert counts_per_worker == dict.fromkeys(range(workers), per_worker) + + +class TestCleanDirtyConcurrency: + """`clean_dirty` and `append_entry` must serialize via the same lock. + + The dangerous race: clean_dirty does (read → compute kept → truncate → + rewrite). Without a lock, an `append_entry` landing between the read + and the truncate is silently overwritten — the new entry disappears. + Here we run both in parallel and check the conserved quantity: every + appended "innocent" entry (different commit) must survive. + """ + + def test_concurrent_append_not_dropped_by_clean_dirty(self, tmp_path: Path) -> None: + # Skip outside a git repo — clean_dirty depends on `git rev-parse HEAD`. + try: + subprocess.run( + ["git", "rev-parse", "HEAD"], # noqa: S607 + capture_output=True, + text=True, + timeout=5, + check=True, + ) + except (FileNotFoundError, subprocess.CalledProcessError): + return + + path = tmp_path / "history.jsonl" + # Pre-populate with one no-op entry so the file exists for clean_dirty. + append_entry( + path, + { + "worker": -1, + "git": {"commit": "preexisting", "dirty": False}, + "suites": {}, + }, + ) + + per_worker = 30 + ctx = mp.get_context("spawn") + with ctx.Pool(2) as pool: + pool.starmap( + _dispatch_worker, + [ + ("append", str(path), 0, per_worker), + ("clean", str(path), 0, per_worker), + ], + ) + + lines = path.read_text().splitlines() + # Every line still parses (no torn writes). + innocent_count = 0 + for raw in lines: + entry = json.loads(raw) + if entry.get("git", {}).get("commit") == "innocent_commit": + innocent_count += 1 + # All `per_worker` innocent appends survived — none silently + # discarded by an interleaved clean_dirty truncate. + assert innocent_count == per_worker, ( + f"expected {per_worker} innocent entries, got {innocent_count} — " + "concurrent clean_dirty dropped some appends" + ) + + +def _dispatch_worker(kind: str, path_str: str, worker_id: int, count: int) -> None: + """Top-level dispatcher so spawn() can pickle the call.""" + if kind == "append": + _worker_append_innocent((path_str, worker_id, count)) + else: + _worker_clean_dirty((path_str, count)) diff --git a/tests/history/test_history_dir_isolation.py b/tests/history/test_history_dir_isolation.py new file mode 100644 index 0000000..26946ac --- /dev/null +++ b/tests/history/test_history_dir_isolation.py @@ -0,0 +1,45 @@ +"""Regression tests for B2: tests must not pollute the repo's history file. + +The autouse `_isolate_protest_history` fixture in `tests/conftest.py` +monkeypatches `storage.DEFAULT_HISTORY_DIR` to a per-test temp directory. +These tests assert that both the storage functions and the HistoryPlugin +pick up the override — any regression in the plumbing would let runs leak +into `.protest/history.jsonl` in the real project cwd. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from protest.history import storage +from protest.history.plugin import HistoryPlugin + +if TYPE_CHECKING: + from pathlib import Path + + +class TestDefaultHistoryDirOverride: + """The autouse fixture redirects the module-level constant.""" + + def test_storage_default_points_to_tmp(self, tmp_path: Path) -> None: + assert tmp_path / ".protest" == storage.DEFAULT_HISTORY_DIR + + def test_append_entry_uses_override(self, tmp_path: Path) -> None: + target = storage.DEFAULT_HISTORY_DIR / storage.HISTORY_FILE + storage.append_entry(target, {"k": "v"}) + assert target.exists() + assert target.is_relative_to(tmp_path) + + def test_plugin_default_dir_follows_override(self, tmp_path: Path) -> None: + plugin = HistoryPlugin() + assert plugin._history_dir == tmp_path / ".protest" + assert plugin._history_file.is_relative_to(tmp_path) + + +class TestExplicitHistoryDirWins: + """Explicit `history_dir=` still takes precedence over the override.""" + + def test_plugin_honors_explicit_dir(self, tmp_path: Path) -> None: + explicit = tmp_path / "custom" + plugin = HistoryPlugin(history_dir=explicit) + assert plugin._history_dir == explicit diff --git a/tests/history/test_schema_version.py b/tests/history/test_schema_version.py new file mode 100644 index 0000000..b4a0724 --- /dev/null +++ b/tests/history/test_schema_version.py @@ -0,0 +1,109 @@ +"""Tests for `schema_version` on history JSONL entries. + +The plugin stamps every new entry with `schema_version`. Readers skip +entries with a future version (written by a newer protest) and warn once +per version. + +Legacy entries (no `schema_version` key at all — written before this was +introduced) are treated as version 0 and read without warning. +""" + +from __future__ import annotations + +import json +import warnings +from typing import TYPE_CHECKING + +from protest.history import storage +from protest.history.plugin import HistoryPlugin + +if TYPE_CHECKING: + from pathlib import Path + + +def _write_jsonl(path: Path, entries: list[dict]) -> None: + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text("\n".join(json.dumps(e) for e in entries) + "\n") + + +class TestSchemaVersionWrites: + def test_append_entry_writes_schema_version_via_plugin(self) -> None: + """HistoryPlugin stamps `schema_version` on every new entry.""" + plugin = HistoryPlugin() + assert storage.SCHEMA_VERSION >= 1 + + entry_with_version = {"schema_version": storage.SCHEMA_VERSION, "k": "v"} + storage.append_entry(plugin._history_file, entry_with_version) + loaded = json.loads(plugin._history_file.read_text().splitlines()[0]) + assert loaded["schema_version"] == storage.SCHEMA_VERSION + + +class TestFutureVersionSkipped: + def test_future_version_is_skipped_by_load_history(self, tmp_path: Path) -> None: + path = tmp_path / ".protest" / storage.HISTORY_FILE + _write_jsonl( + path, + [ + {"schema_version": storage.SCHEMA_VERSION, "run_id": "current"}, + {"schema_version": storage.SCHEMA_VERSION + 10, "run_id": "future"}, + ], + ) + storage._warned_future_versions.clear() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + entries = storage.load_history(history_dir=tmp_path / ".protest") + run_ids = [e["run_id"] for e in entries] + assert run_ids == ["current"] + + def test_future_version_is_skipped_by_load_previous_run( + self, tmp_path: Path + ) -> None: + path = tmp_path / ".protest" / storage.HISTORY_FILE + _write_jsonl( + path, + [ + {"schema_version": storage.SCHEMA_VERSION, "run_id": "older"}, + {"schema_version": storage.SCHEMA_VERSION + 1, "run_id": "newer"}, + ], + ) + storage._warned_future_versions.clear() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + entry = storage.load_previous_run(history_dir=tmp_path / ".protest") + assert entry is not None + assert entry["run_id"] == "older" + + def test_warning_raised_once_per_future_version(self, tmp_path: Path) -> None: + path = tmp_path / ".protest" / storage.HISTORY_FILE + future = storage.SCHEMA_VERSION + 42 + _write_jsonl( + path, + [{"schema_version": future, "run_id": str(i)} for i in range(5)], + ) + storage._warned_future_versions.clear() + with warnings.catch_warnings(record=True) as caught: + warnings.simplefilter("always") + storage.load_history(history_dir=tmp_path / ".protest") + future_warnings = [ + w for w in caught if f"schema_version={future}" in str(w.message) + ] + assert len(future_warnings) == 1 + + +class TestLegacyEntriesStillReadable: + """Pre-schema_version entries have no key — treat as legacy (version 0).""" + + def test_entry_without_schema_version_is_read(self, tmp_path: Path) -> None: + path = tmp_path / ".protest" / storage.HISTORY_FILE + _write_jsonl(path, [{"run_id": "legacy", "suites": {}}]) + storage._warned_future_versions.clear() + entries = storage.load_history(history_dir=tmp_path / ".protest") + assert len(entries) == 1 + assert entries[0]["run_id"] == "legacy" + + def test_entry_with_version_zero_is_read(self, tmp_path: Path) -> None: + path = tmp_path / ".protest" / storage.HISTORY_FILE + _write_jsonl(path, [{"schema_version": 0, "run_id": "v0"}]) + storage._warned_future_versions.clear() + entries = storage.load_history(history_dir=tmp_path / ".protest") + assert len(entries) == 1 diff --git a/tests/reporting/test_reporter_symmetry.py b/tests/reporting/test_reporter_symmetry.py new file mode 100644 index 0000000..4eec50c --- /dev/null +++ b/tests/reporting/test_reporter_symmetry.py @@ -0,0 +1,369 @@ +"""Symmetry tests between RichReporter and AsciiReporter. + +Goal: catch divergences between the two reporters before they ship as silent +asymmetries. A user who swaps `--no-color` should get the same *semantic* +output (same fields, same filters) — only the visual style differs. + +Three axes are enforced: + +1. Structural — both reporters expose the same public hooks (`on_*` handlers). +2. CLI — both reporters react to the same shared flags (`--show-output`, + `--show-logs`). Reporter-specific flags (`--no-color`) are allowed. +3. Behavioral — parametrized scenarios drive the same input through both + reporters and assert the same *semantic* markers appear + (score names for eval pass, eval detail on fail, summary omits + eval failures, etc.). +""" + +from __future__ import annotations + +import argparse +import inspect +import logging +from typing import Any + +import pytest + +from protest.entities import ( + EvalPayload, + EvalScoreEntry, + SessionResult, + TestResult, +) +from protest.plugin import PluginBase, PluginContext +from protest.reporting.ascii import AsciiReporter +from protest.reporting.rich_reporter import RichReporter + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +REPORTER_CLASSES = [RichReporter, AsciiReporter] + +# CLI flags that are handled by the shared run-parser (not by either reporter's +# add_cli_options). Both reporters must still read them via their activate(). +_SHARED_CLI_FLAGS = {"show_output", "show_logs"} + + +def _public_handlers(cls: type[PluginBase]) -> set[str]: + """Return the set of `on_*` handlers defined or overridden on cls. + + Only include methods that are *actually overridden* (not inherited from + PluginBase as no-ops). That's what makes the reporter visible to the bus. + """ + handlers: set[str] = set() + for name, member in inspect.getmembers(cls, predicate=inspect.isfunction): + if not name.startswith("on_"): + continue + # Skip no-op base implementations that a subclass didn't override. + base_member = getattr(PluginBase, name, None) + if base_member is not None and member is base_member: + continue + handlers.add(name) + return handlers + + +def _cli_flag_dests(cls: type[PluginBase]) -> set[str]: + """Return the argparse `dest` names registered by cls.add_cli_options.""" + parser = argparse.ArgumentParser() + cls.add_cli_options(parser) + dests: set[str] = set() + for action in parser._actions: + if action.dest and action.dest != "help": + dests.add(action.dest) + return dests + + +def _make_reporter(cls: type[PluginBase], **kwargs: Any) -> PluginBase: + """Activate a reporter via its own activate() path to exercise wiring.""" + ctx_args = {"no_color": cls is AsciiReporter, "verbosity": 1, **kwargs} + ctx = PluginContext(args=ctx_args) + instance = cls.activate(ctx) + assert instance is not None, f"{cls.__name__}.activate returned None" + return instance + + +def _capture_output(capsys: pytest.CaptureFixture[str]) -> str: + """Capture everything captured so far on stdout+stderr. + + Rich writes via `self.console` (stdout by default), Ascii uses `print`. + Capsys grabs both uniformly. + """ + captured = capsys.readouterr() + return captured.out + captured.err + + +@pytest.fixture +def eval_result_pass() -> TestResult: + """A passing eval TestResult with two scores (one bool, one float).""" + return TestResult( + name="case_alpha", + node_id="mod::chatbot::case_alpha", + duration=0.05, + is_eval=True, + eval_payload=EvalPayload( + case_name="case_alpha", + passed=True, + task_duration=0.05, + inputs="hello", + output="world", + expected_output="world", + scores={ + "contains_world": EvalScoreEntry(value=True, passed=True), + "similarity": EvalScoreEntry(value=0.92, passed=True), + }, + ), + ) + + +@pytest.fixture +def eval_result_fail() -> TestResult: + """A failing eval TestResult (one score fails).""" + return TestResult( + name="case_beta", + node_id="mod::chatbot::case_beta", + duration=0.04, + error=AssertionError("score contains_hi failed"), + is_eval=True, + eval_payload=EvalPayload( + case_name="case_beta", + passed=False, + task_duration=0.04, + inputs="goodbye", + output="farewell", + expected_output="hi", + scores={ + "contains_hi": EvalScoreEntry(value=False, passed=False), + }, + ), + ) + + +@pytest.fixture +def plain_failing_test() -> TestResult: + return TestResult( + name="test_plain_fail", + node_id="mod::test_plain_fail", + duration=0.01, + error=AssertionError("plain failure"), + ) + + +# --------------------------------------------------------------------------- +# 1. Structural symmetry +# --------------------------------------------------------------------------- + + +class TestStructuralSymmetry: + """Ensure the two reporters expose the same public handler surface.""" + + def test_reporters_override_same_handlers(self) -> None: + """Both reporters override the same set of on_* methods. + + If one reporter starts overriding a hook that the other ignores, an + event will be invisible in the "other" reporter — that's the bug we + want to catch at test time, not in production. + """ + rich_handlers = _public_handlers(RichReporter) + ascii_handlers = _public_handlers(AsciiReporter) + + only_in_rich = rich_handlers - ascii_handlers + only_in_ascii = ascii_handlers - rich_handlers + assert not only_in_rich, ( + f"Rich handles events that Ascii doesn't: {sorted(only_in_rich)}" + ) + assert not only_in_ascii, ( + f"Ascii handles events that Rich doesn't: {sorted(only_in_ascii)}" + ) + + +# --------------------------------------------------------------------------- +# 2. CLI symmetry +# --------------------------------------------------------------------------- + + +class TestCliSymmetry: + """Ensure the two reporters consume the same shared flags. + + Reporter-specific flags are allowed (e.g. `--no-color` makes sense only on + the Ascii side) — they're expected to appear in either one but not both. + The rule is: anything in `_SHARED_CLI_FLAGS` must be *activatable* on both + reporters (read from PluginContext via activate()). + """ + + @pytest.mark.parametrize( + "flag,value,attr", + [ + pytest.param("show_output", True, "_show_output", id="show_output"), + pytest.param("show_logs", "INFO", "_show_logs", id="show_logs"), + ], + ) + def test_shared_flags_reach_both_reporters( + self, flag: str, value: Any, attr: str + ) -> None: + """Given a shared run-parser flag, both reporters pick it up via activate().""" + for cls in REPORTER_CLASSES: + reporter = _make_reporter(cls, **{flag: value}) + assert getattr(reporter, attr) == value, ( + f"{cls.__name__} didn't wire flag '{flag}' into attr '{attr}'" + ) + + def test_reporters_dont_redeclare_shared_flags(self) -> None: + """Shared flags live on the run-parser, not on reporter add_cli_options. + + If either reporter redeclares them via add_cli_options, argparse will + raise at runtime when both get wired (cli/main.py iterates plugin + classes and calls add_cli_options on each). + """ + for cls in REPORTER_CLASSES: + dests = _cli_flag_dests(cls) + redeclared = dests & _SHARED_CLI_FLAGS + assert not redeclared, ( + f"{cls.__name__}.add_cli_options redeclares shared flags: " + f"{sorted(redeclared)} — move them to cli._create_run_parser" + ) + + +# --------------------------------------------------------------------------- +# 3. Behavioral symmetry +# --------------------------------------------------------------------------- + + +class TestBehavioralSymmetry: + """Drive the same events through both reporters; assert same semantics. + + We deliberately avoid asserting on *exact* characters: the visual prefix + differs (`✓` vs `OK`, colors vs plain). What must be identical is which + pieces of information are rendered. + """ + + @pytest.mark.parametrize("reporter_cls", REPORTER_CLASSES) + def test_eval_pass_shows_score_names_inline( + self, + reporter_cls: type[PluginBase], + eval_result_pass: TestResult, + capsys: pytest.CaptureFixture[str], + ) -> None: + """Given a passing eval, both reporters surface each score's name inline.""" + reporter = _make_reporter(reporter_cls, verbosity=1) + reporter.on_test_pass(eval_result_pass) + output = _capture_output(capsys) + assert "contains_world" in output, ( + f"{reporter_cls.__name__}: missing score name" + ) + assert "similarity" in output, f"{reporter_cls.__name__}: missing float score" + + @pytest.mark.parametrize("reporter_cls", REPORTER_CLASSES) + def test_eval_fail_shows_detail_inline( + self, + reporter_cls: type[PluginBase], + eval_result_fail: TestResult, + capsys: pytest.CaptureFixture[str], + ) -> None: + """Given a failing eval, both reporters dump inputs/output/expected. + + This must happen regardless of --show-output — the user can't debug + a failed assertion without seeing what the task actually produced. + """ + reporter = _make_reporter(reporter_cls) + reporter.on_test_fail(eval_result_fail) + output = _capture_output(capsys) + assert "goodbye" in output, f"{reporter_cls.__name__}: missing inputs" + assert "farewell" in output, f"{reporter_cls.__name__}: missing output" + assert "hi" in output, f"{reporter_cls.__name__}: missing expected" + + @pytest.mark.parametrize("reporter_cls", REPORTER_CLASSES) + def test_show_output_true_prints_eval_detail_on_pass( + self, + reporter_cls: type[PluginBase], + eval_result_pass: TestResult, + capsys: pytest.CaptureFixture[str], + ) -> None: + """Given --show-output, both reporters print eval detail even on pass.""" + reporter = _make_reporter(reporter_cls, verbosity=1, show_output=True) + reporter.on_test_pass(eval_result_pass) + output = _capture_output(capsys) + assert "hello" in output, f"{reporter_cls.__name__}: missing inputs on pass" + assert "world" in output, f"{reporter_cls.__name__}: missing output on pass" + + @pytest.mark.parametrize("reporter_cls", REPORTER_CLASSES) + def test_show_output_false_omits_eval_detail_on_pass( + self, + reporter_cls: type[PluginBase], + eval_result_pass: TestResult, + capsys: pytest.CaptureFixture[str], + ) -> None: + """Given default --show-output, eval detail is hidden on pass.""" + reporter = _make_reporter(reporter_cls, verbosity=1, show_output=False) + reporter.on_test_pass(eval_result_pass) + output = _capture_output(capsys) + # "hello" and "world" appear in the score name ("contains_world"); + # assert on a unique substring that only appears if the detail block runs. + assert "inputs:" not in output, ( + f"{reporter_cls.__name__}: leaked eval detail without --show-output" + ) + + @pytest.mark.parametrize("reporter_cls", REPORTER_CLASSES) + def test_failure_summary_omits_eval_failures( + self, + reporter_cls: type[PluginBase], + eval_result_fail: TestResult, + plain_failing_test: TestResult, + capsys: pytest.CaptureFixture[str], + ) -> None: + """End-of-session summary must not re-list eval failures. + + Eval cases are already displayed inline via on_test_fail. Re-listing + them in the summary duplicates noise — the pattern agreed on is + "non_eval_failures only". + """ + reporter = _make_reporter(reporter_cls) + reporter.on_test_fail(eval_result_fail) + reporter.on_test_fail(plain_failing_test) + capsys.readouterr() # drop inline fail output + + reporter.on_session_complete( + SessionResult(passed=0, failed=2, errors=0, duration=1.0) + ) + summary = _capture_output(capsys) + + assert "test_plain_fail" in summary, ( + f"{reporter_cls.__name__}: summary lost the plain failure" + ) + # The eval case name should NOT appear in the failure-summary block. + # It may appear in the inline tally above; we only captured summary here. + assert "case_beta" not in summary, ( + f"{reporter_cls.__name__}: summary re-lists eval failure (should be inline only)" + ) + + @pytest.mark.parametrize("reporter_cls", REPORTER_CLASSES) + def test_show_logs_prints_captured_records( + self, + reporter_cls: type[PluginBase], + capsys: pytest.CaptureFixture[str], + ) -> None: + """Given --show-logs INFO, both reporters emit the captured log records.""" + record = logging.LogRecord( + name="mylib.module", + level=logging.INFO, + pathname="x.py", + lineno=1, + msg="captured thing", + args=(), + exc_info=None, + ) + result = TestResult( + name="test_foo", + node_id="mod::test_foo", + duration=0.01, + log_records=(record,), + ) + reporter = _make_reporter(reporter_cls, verbosity=1, show_logs="INFO") + reporter.on_test_pass(result) + output = _capture_output(capsys) + assert "captured thing" in output, ( + f"{reporter_cls.__name__}: --show-logs didn't render the record" + ) + assert "mylib.module" in output, ( + f"{reporter_cls.__name__}: --show-logs didn't render the logger name" + ) diff --git a/tests/reporting/test_rich_reporter.py b/tests/reporting/test_rich_reporter.py index 1452579..4585338 100644 --- a/tests/reporting/test_rich_reporter.py +++ b/tests/reporting/test_rich_reporter.py @@ -15,11 +15,13 @@ TestRetryInfo, ) from protest.events.types import Event -from protest.reporting.rich_reporter import ( +from protest.reporting.format import ( MIN_DURATION_THRESHOLD, - RichReporter, - _format_duration, ) +from protest.reporting.format import ( + format_duration as _format_duration, +) +from protest.reporting.rich_reporter import RichReporter class TestFormatDuration: diff --git a/tests/test_console_print.py b/tests/test_console_print.py new file mode 100644 index 0000000..6bac47e --- /dev/null +++ b/tests/test_console_print.py @@ -0,0 +1,171 @@ +"""Tests for `protest.console.print` — payload shape and reporter formatting. + +`console.print(msg, raw=False, prefix=True)` builds a 3-tuple payload +`(msg, raw, prefix)` dispatched on USER_PRINT. Each reporter unpacks the +three flags and renders accordingly: + +- default (prefix=True, raw=False): per-test bar prefix + markup +- raw=True: no prefix, no markup (debug bytes) +- prefix=False: no prefix, markup still active (suite-level lines) + +The third mode is what unblocks `EvalResultsWriter.on_eval_suite_end` so +`Results: ` doesn't visually attach to the previous case's output. +""" + +from __future__ import annotations + +import io +from unittest.mock import MagicMock + +import pytest + +from protest import console +from protest.events.types import Event +from protest.reporting.ascii import AsciiReporter +from protest.reporting.rich_reporter import RichReporter + + +@pytest.fixture +def stdout_buffer(monkeypatch: pytest.MonkeyPatch) -> io.StringIO: + buf = io.StringIO() + # `real_stdout()` is what reporters write to; patch at both reporter modules. + monkeypatch.setattr("protest.reporting.ascii.real_stdout", lambda: buf) + monkeypatch.setattr("protest.reporting.rich_reporter.real_stdout", lambda: buf) + return buf + + +class TestAsciiReporterUserPrint: + """ASCII reporter handles the 3-tuple payload.""" + + def test_default_adds_bar_prefix(self, stdout_buffer: io.StringIO) -> None: + reporter = AsciiReporter() + reporter.on_user_print(("hello", False, True)) + assert stdout_buffer.getvalue() == " | hello\n" + + def test_raw_mode_no_prefix_no_markup(self, stdout_buffer: io.StringIO) -> None: + reporter = AsciiReporter() + reporter.on_user_print(("[bold]raw[/]", True, True)) + # raw bypasses both markup-strip and prefix + assert stdout_buffer.getvalue() == "[bold]raw[/]\n" + + def test_prefix_false_no_bar(self, stdout_buffer: io.StringIO) -> None: + reporter = AsciiReporter() + reporter.on_user_print(("Results: /tmp/foo", False, False)) + # No bar — visually a section line, not attached to a case. + assert stdout_buffer.getvalue() == "Results: /tmp/foo\n" + + +class TestRichReporterUserPrint: + """Rich reporter handles the 3-tuple payload.""" + + def _make_reporter(self) -> RichReporter: + # RichReporter pulls deps from the bus; we only exercise on_user_print. + return RichReporter.__new__(RichReporter) + + def test_default_adds_bar_prefix(self, stdout_buffer: io.StringIO) -> None: + reporter = self._make_reporter() + reporter.on_user_print(("hello", False, True)) + assert "│" in stdout_buffer.getvalue() + assert "hello" in stdout_buffer.getvalue() + + def test_prefix_false_no_bar(self, stdout_buffer: io.StringIO) -> None: + reporter = self._make_reporter() + reporter.on_user_print(("Results: /tmp/foo", False, False)) + out = stdout_buffer.getvalue() + assert "│" not in out + assert "Results: /tmp/foo" in out + + +class TestConsolePrintPayload: + """`console.print` builds the payload and dispatches to handlers.""" + + def _captured_bus(self, monkeypatch: pytest.MonkeyPatch) -> list[tuple]: + captured: list[tuple] = [] + bus = MagicMock() + handler = MagicMock() + handler.func = lambda payload: captured.append(payload) + bus._handlers = {Event.USER_PRINT: [handler]} + monkeypatch.setattr("protest.console.get_event_bus", lambda: bus) + return captured + + def test_default_payload_carries_prefix_true( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + captured = self._captured_bus(monkeypatch) + console.print("hi") + assert captured == [("hi", False, True)] + + def test_prefix_false_propagates(self, monkeypatch: pytest.MonkeyPatch) -> None: + captured = self._captured_bus(monkeypatch) + console.print("section line", prefix=False) + assert captured == [("section line", False, False)] + + def test_raw_propagates(self, monkeypatch: pytest.MonkeyPatch) -> None: + captured = self._captured_bus(monkeypatch) + console.print("[raw]", raw=True) + assert captured == [("[raw]", True, True)] + + +class TestConsolePrintHandlerErrors: + """Handler failures must surface on stderr instead of disappearing. + + Earlier behavior: `contextlib.suppress(Exception)` swallowed any handler + raise. A reporter bug (e.g. malformed Rich markup) made `console.print` + silently no-op — users assumed the call did nothing. + """ + + def _bus_with_failing_handler( + self, monkeypatch: pytest.MonkeyPatch, exc: Exception + ) -> None: + bus = MagicMock() + handler = MagicMock() + + def boom(_payload: tuple) -> None: + raise exc + + handler.func = boom + bus._handlers = {Event.USER_PRINT: [handler]} + monkeypatch.setattr("protest.console.get_event_bus", lambda: bus) + + def test_handler_exception_is_surfaced_on_stderr( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + stderr = io.StringIO() + monkeypatch.setattr("protest.console.real_stderr", lambda: stderr) + self._bus_with_failing_handler(monkeypatch, RuntimeError("boom")) + + console.print("anything") + + out = stderr.getvalue() + assert "console.print: handler raised" in out + assert "RuntimeError" in out + assert "boom" in out + + def test_loop_continues_when_real_stderr_itself_fails( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + """Defense in depth: if logging the error also fails, no cascade.""" + + def raising_stderr() -> object: + raise OSError("stderr broken") + + monkeypatch.setattr("protest.console.real_stderr", raising_stderr) + self._bus_with_failing_handler(monkeypatch, RuntimeError("boom")) + + # Must not raise — the outer suppress() is the last line of defense. + console.print("anything") + + def test_successful_handler_does_not_touch_stderr( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + stderr = io.StringIO() + monkeypatch.setattr("protest.console.real_stderr", lambda: stderr) + + bus = MagicMock() + handler = MagicMock() + handler.func = lambda _payload: None # no-op, no raise + bus._handlers = {Event.USER_PRINT: [handler]} + monkeypatch.setattr("protest.console.get_event_bus", lambda: bus) + + console.print("ok") + assert stderr.getvalue() == "" diff --git a/tests/test_history_changes.py b/tests/test_history_changes.py new file mode 100644 index 0000000..a24698e --- /dev/null +++ b/tests/test_history_changes.py @@ -0,0 +1,128 @@ +"""Tests for `_classify_changes` — diffing logic for `protest history --compare`. + +Each case entry is a minimal dict mirroring what `_all_cases(entry)` returns +from a history JSONL record: at least `passed`, optionally `case_hash` and +`eval_hash`. +""" + +from __future__ import annotations + +from protest.cli.history import _classify_changes + + +def _case( + *, + passed: bool = True, + case_hash: str | None = None, + eval_hash: str | None = None, +) -> dict: + entry: dict = {"passed": passed} + if case_hash is not None: + entry["case_hash"] = case_hash + if eval_hash is not None: + entry["eval_hash"] = eval_hash + return entry + + +class TestClassifyChangesDeleted: + """Cases present in `prev` but absent from `curr` land in `deleted`.""" + + def test_single_deletion(self) -> None: + prev = {"case_a": _case(passed=True), "case_b": _case(passed=True)} + curr = {"case_a": _case(passed=True)} + changes = _classify_changes(curr, prev) + assert changes["deleted"] == ["case_b"] + assert changes["new"] == [] + assert changes["fixed"] == [] + assert changes["regressed"] == [] + assert changes["modified"] == [] + + def test_multiple_deletions_preserve_prev_order(self) -> None: + prev = { + "alpha": _case(), + "beta": _case(), + "gamma": _case(), + "delta": _case(), + } + curr = {"alpha": _case()} + changes = _classify_changes(curr, prev) + assert changes["deleted"] == ["beta", "gamma", "delta"] + + def test_deletion_coexists_with_other_changes(self) -> None: + prev = { + "to_delete": _case(passed=True), + "to_fix": _case(passed=False), + "stable": _case(passed=True), + } + curr = { + "to_fix": _case(passed=True), + "stable": _case(passed=True), + "brand_new": _case(passed=True), + } + changes = _classify_changes(curr, prev) + assert changes["deleted"] == ["to_delete"] + assert changes["fixed"] == ["to_fix"] + assert changes["new"] == ["brand_new"] + + def test_all_cases_deleted(self) -> None: + prev = {"a": _case(), "b": _case()} + curr: dict = {} + changes = _classify_changes(curr, prev) + assert changes["deleted"] == ["a", "b"] + assert changes["new"] == [] + + def test_no_deletions(self) -> None: + prev = {"a": _case()} + curr = {"a": _case(), "b": _case()} + changes = _classify_changes(curr, prev) + assert changes["deleted"] == [] + assert changes["new"] == ["b"] + + +class TestClassifyChangesExistingCategories: + """Existing categories keep working after adding `deleted`.""" + + def test_new_case(self) -> None: + changes = _classify_changes({"a": _case()}, {}) + assert changes["new"] == ["a"] + + def test_fixed_case(self) -> None: + prev = {"a": _case(passed=False)} + curr = {"a": _case(passed=True)} + assert _classify_changes(curr, prev)["fixed"] == ["a"] + + def test_regressed_case(self) -> None: + prev = {"a": _case(passed=True)} + curr = {"a": _case(passed=False)} + assert _classify_changes(curr, prev)["regressed"] == ["a"] + + def test_modified_case_hash(self) -> None: + prev = {"a": _case(case_hash="h1")} + curr = {"a": _case(case_hash="h2")} + assert _classify_changes(curr, prev)["modified"] == ["a (case modified)"] + + def test_modified_eval_hash(self) -> None: + prev = {"a": _case(eval_hash="h1")} + curr = {"a": _case(eval_hash="h2")} + assert _classify_changes(curr, prev)["modified"] == ["a (scoring modified)"] + + def test_no_changes(self) -> None: + prev = {"a": _case(passed=True)} + curr = {"a": _case(passed=True)} + changes = _classify_changes(curr, prev) + assert all(not v for v in changes.values()) + + +class TestClassifyChangesResultShape: + """Result dict always has the five expected keys.""" + + def test_empty_inputs_still_yield_five_buckets(self) -> None: + changes = _classify_changes({}, {}) + assert set(changes.keys()) == { + "fixed", + "regressed", + "modified", + "new", + "deleted", + } + assert all(v == [] for v in changes.values()) diff --git a/tests/test_history_cli.py b/tests/test_history_cli.py new file mode 100644 index 0000000..d8b9f3c --- /dev/null +++ b/tests/test_history_cli.py @@ -0,0 +1,247 @@ +"""Tests for `protest history` CLI argument parsing. + +The CLI uses sub-commands (`list`, `runs`, `show`, `compare`, `clean`). +`list` is the implicit default when no sub-command is given. Each sub-command +shares a common filter parser (`--tail`, `--model`, `--suite`, `--evals`/ +`--tests`, `--path`); `--evals` and `--tests` remain mutually exclusive. + +`handle_history_command(argv)` triggers `SystemExit(2)` from argparse on a +parsing error, and `SystemExit(0)` on a clean (possibly empty-history) run. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import pytest + +from protest.cli.history import handle_history_command +from protest.history.storage import HISTORY_FILE, append_entry + +if TYPE_CHECKING: + from pathlib import Path + + +class TestKindMutex: + """`--evals` and `--tests` cannot be combined within a sub-command.""" + + @pytest.mark.parametrize( + "argv", + [ + ["list", "--evals", "--tests"], + ["runs", "--tests", "--evals"], + ], + ) + def test_mutex_violation_exits_with_error( + self, + argv: list[str], + capsys: pytest.CaptureFixture[str], + ) -> None: + with pytest.raises(SystemExit) as exc_info: + handle_history_command(argv) + assert exc_info.value.code == 2 + stderr = capsys.readouterr().err + assert "not allowed with argument" in stderr + + +class TestSubcommandsAccepted: + """Each sub-command parses cleanly with shared filters.""" + + @pytest.mark.parametrize( + "argv", + [ + ["list"], + ["runs"], + ["show"], + ["show", "0"], + ["compare"], + ["clean"], + ["list", "--evals"], + ["list", "--tests"], + ["runs", "--tail", "5"], + ["show", "1", "--model", "gpt-4"], + ["compare", "--suite", "my_suite"], + ], + ) + def test_subcommand_parses_with_empty_history( + self, + argv: list[str], + tmp_path: Path, + capsys: pytest.CaptureFixture[str], + ) -> None: + full_argv = [*argv, "--path", str(tmp_path)] + with pytest.raises(SystemExit) as exc_info: + handle_history_command(full_argv) + # Empty history exits 0 with "No history found." (or similar). + assert exc_info.value.code == 0 + captured = capsys.readouterr() + assert "not allowed with argument" not in captured.err + + +class TestImplicitListDefault: + """`protest history` with no sub-command falls back to `list`.""" + + def test_no_subcommand_runs_list( + self, tmp_path: Path, capsys: pytest.CaptureFixture[str] + ) -> None: + with pytest.raises(SystemExit) as exc_info: + handle_history_command(["--path", str(tmp_path)]) + assert exc_info.value.code == 0 + + def test_no_subcommand_with_only_filter_runs_list( + self, tmp_path: Path, capsys: pytest.CaptureFixture[str] + ) -> None: + # `protest history --tail 5 --path X` should be parsed as the + # implicit `list --tail 5 --path X`, not as a parser error. + with pytest.raises(SystemExit) as exc_info: + handle_history_command(["--tail", "5", "--path", str(tmp_path)]) + assert exc_info.value.code == 0 + + +class TestHelpOutput: + """`--help` lists the sub-commands.""" + + def test_help_lists_subcommands(self, capsys: pytest.CaptureFixture[str]) -> None: + with pytest.raises(SystemExit) as exc_info: + handle_history_command(["--help"]) + assert exc_info.value.code == 0 + stdout = capsys.readouterr().out + for cmd in ("list", "runs", "show", "compare", "clean"): + assert cmd in stdout + + +class TestRunsOrderRecentFirst: + """`runs` lists most-recent run first (git log convention). + + Storage returns entries oldest→newest; the CLI must reverse for display + so #1 maps to the newest run, matching `git stash list` / `git log`. + """ + + def _seed(self, tmp_path: Path, commits: list[tuple[str, str]]) -> None: + path = tmp_path / HISTORY_FILE + for ts, commit in commits: + append_entry( + path, + { + "schema_version": 1, + "run_id": commit, + "timestamp": ts, + "git": {"commit_short": commit}, + "suites": {}, + }, + ) + + def test_runs_displays_newest_first( + self, + tmp_path: Path, + capsys: pytest.CaptureFixture[str], + ) -> None: + # Seed in chronological order — storage preserves write order. + self._seed( + tmp_path, + [ + ("2026-04-25T10:00:00", "old1234"), + ("2026-04-25T11:00:00", "mid5678"), + ("2026-04-25T12:00:00", "newabcd"), + ], + ) + handle_history_command(["runs", "--path", str(tmp_path)]) + stdout = capsys.readouterr().out + # #1 is newest, #3 is oldest. + assert stdout.index("#1") < stdout.index("#2") < stdout.index("#3") + assert ( + stdout.index("newabcd") < stdout.index("mid5678") < stdout.index("old1234") + ) + # And #1 lines up with the newest commit, not the oldest. + newest_line = next(line for line in stdout.splitlines() if "#1" in line) + assert "newabcd" in newest_line + + +class TestCompareRefusesMixedModels: + """`compare` must not silently diff across models — would cause false regressions. + + When the two most recent runs each contain suites with several distinct + `ModelLabel.name`s (e.g. `rules_v1` + `rules_v2` in a multi-model + session), aplatting the cases by name conflates contexts: a case-id that + passes under one model and fails under the other shows up as a phantom + regression. The CLI rejects this and asks the user to disambiguate via + `--model NAME` or `--suite NAME`. + """ + + def _seed_two_model_run(self, tmp_path: Path, run_id: str, ts: str) -> None: + path = tmp_path / HISTORY_FILE + append_entry( + path, + { + "schema_version": 1, + "run_id": run_id, + "timestamp": ts, + "git": {"commit_short": run_id}, + "suites": { + "helpdesk_v1": { + "kind": "eval", + "model": "rules_v1", + "passed": 9, + "total_cases": 18, + "cases": {"T010": {"passed": False, "case_hash": "h1"}}, + }, + "helpdesk_v2": { + "kind": "eval", + "model": "rules_v2", + "passed": 11, + "total_cases": 18, + "cases": {"T010": {"passed": True, "case_hash": "h1"}}, + }, + }, + }, + ) + + def test_compare_rejects_mixed_models_without_filter( + self, tmp_path: Path, capsys: pytest.CaptureFixture[str] + ) -> None: + self._seed_two_model_run(tmp_path, "aaa1111", "2026-04-27T10:00:00") + self._seed_two_model_run(tmp_path, "bbb2222", "2026-04-27T11:00:00") + with pytest.raises(SystemExit) as exc_info: + handle_history_command(["compare", "--evals", "--path", str(tmp_path)]) + assert exc_info.value.code == 1 + out = capsys.readouterr().out + assert "multiple models" in out + assert "rules_v1" in out and "rules_v2" in out + assert "--model" in out + + def test_compare_with_model_filter_succeeds( + self, tmp_path: Path, capsys: pytest.CaptureFixture[str] + ) -> None: + self._seed_two_model_run(tmp_path, "aaa1111", "2026-04-27T10:00:00") + self._seed_two_model_run(tmp_path, "bbb2222", "2026-04-27T11:00:00") + # `--model rules_v1` prunes helpdesk_v2 out of each entry, leaving + # a single-model comparison that should succeed (no false regression). + handle_history_command( + ["compare", "--evals", "--model", "rules_v1", "--path", str(tmp_path)] + ) + out = capsys.readouterr().out + assert "multiple models" not in out + + +class TestCleanDryRun: + """`clean` is dry-run by default; `--apply` to actually modify the file.""" + + def test_clean_default_is_dry_run( + self, tmp_path: Path, capsys: pytest.CaptureFixture[str] + ) -> None: + # Empty history is the simplest case — both modes should report + # "No dirty entries to clean." without touching anything. + with pytest.raises(SystemExit) as exc_info: + handle_history_command(["clean", "--path", str(tmp_path)]) + assert exc_info.value.code == 0 + out = capsys.readouterr().out + assert "No dirty entries to clean." in out + + def test_clean_apply_flag_accepted( + self, tmp_path: Path, capsys: pytest.CaptureFixture[str] + ) -> None: + with pytest.raises(SystemExit) as exc_info: + handle_history_command(["clean", "--apply", "--path", str(tmp_path)]) + assert exc_info.value.code == 0 + out = capsys.readouterr().out + assert "No dirty entries to clean." in out diff --git a/tests/test_history_stats.py b/tests/test_history_stats.py new file mode 100644 index 0000000..cc99c17 --- /dev/null +++ b/tests/test_history_stats.py @@ -0,0 +1,164 @@ +"""Tests for history stats — error-only runs must be excluded from stats.""" + +from __future__ import annotations + +from protest.cli.history import _aggregate_suites, _rich_score_arrows + + +def _make_entry( + suite_name: str = "pipeline", + passed: int = 0, + total: int = 0, + errored: int = 0, + cases: dict | None = None, +) -> dict: + """Build a minimal history entry with one suite.""" + return { + "suites": { + suite_name: { + "kind": "eval", + "passed": passed, + "total_cases": total, + "errored": errored, + "cases": cases or {}, + } + } + } + + +def _case(passed: bool, score: float) -> dict: + return {"passed": passed, "scores": {"accuracy": score}} + + +def _error_case() -> dict: + return {"passed": False, "is_error": True, "scores": {}} + + +class TestErrorRunsExcludedFromStats: + """Error-only runs (fixture crashes) are excluded from stats.""" + + def test_error_runs_not_counted(self) -> None: + """Runs where errored >= total should not count in n_runs or pass_rates.""" + entries = [ + _make_entry(passed=29, total=39, cases={"a": _case(True, 0.8)}), + _make_entry(passed=0, total=1, errored=1, cases={"x": _error_case()}), + _make_entry(passed=0, total=1, errored=1, cases={"x": _error_case()}), + _make_entry(passed=28, total=39, cases={"a": _case(True, 0.7)}), + _make_entry(passed=0, total=1, errored=1, cases={"x": _error_case()}), + ] + + suites = _aggregate_suites(entries) + s = suites["pipeline"] + + # Only 2 real runs counted + assert s["n_runs"] == 2 + assert len(s["pass_rates"]) == 2 + # pass_rates reflect only real runs + assert s["pass_rates"][0] == 29 / 39 + assert s["pass_rates"][1] == 28 / 39 + + def test_error_cases_not_tracked(self) -> None: + """Cases with is_error=True should not appear in cases_seen or score_values.""" + entries = [ + _make_entry( + passed=1, + total=2, + errored=0, + cases={ + "real_case": _case(True, 0.9), + "errored_case": _error_case(), + }, + ), + ] + + suites = _aggregate_suites(entries) + s = suites["pipeline"] + assert "real_case" in s["cases_seen"] + assert "errored_case" not in s["cases_seen"] + assert len(s["score_values"]["accuracy"]) == 1 + + def test_error_cases_not_in_flaky(self) -> None: + """Error cases should never appear as flaky.""" + entries = [ + _make_entry(passed=1, total=1, cases={"a": _case(True, 0.9)}), + _make_entry( + passed=0, + total=1, + errored=1, + cases={"a": _error_case()}, + ), + ] + + suites = _aggregate_suites(entries) + s = suites["pipeline"] + # Only the real run is counted + assert s["n_runs"] == 1 + assert len(s["flaky"]) == 0 + + def test_all_error_runs_produce_empty_suite(self) -> None: + """If ALL runs are errors, suite has 0 runs and empty stats.""" + entries = [ + _make_entry(passed=0, total=1, errored=1, cases={"x": _error_case()}), + _make_entry(passed=0, total=1, errored=1, cases={"x": _error_case()}), + ] + + suites = _aggregate_suites(entries) + # Suite exists but has 0 real runs + assert suites["pipeline"]["n_runs"] == 0 + assert suites["pipeline"]["pass_rates"] == [] + + def test_mixed_real_and_error_runs(self) -> None: + """Real data pattern: mostly errors with a few real runs.""" + entries = [ + _make_entry(passed=0, total=1, errored=1), # error + _make_entry(passed=0, total=1, errored=1), # error + _make_entry(passed=29, total=39, cases={"a": _case(True, 0.7)}), # real + _make_entry(passed=0, total=1, errored=1), # error + _make_entry(passed=0, total=1, errored=1), # error + _make_entry(passed=28, total=39, cases={"a": _case(True, 0.8)}), # real + _make_entry(passed=0, total=1, errored=1), # error + _make_entry(passed=0, total=1, errored=1), # error + _make_entry(passed=0, total=1, errored=1), # error + _make_entry(passed=0, total=1, errored=1), # error + ] + + suites = _aggregate_suites(entries) + s = suites["pipeline"] + + assert s["n_runs"] == 2 # not 10 + assert len(s["pass_rates"]) == 2 + # Arrows reflect only the 2 real runs, not the 8 errors + arrows = _rich_score_arrows(s["score_values"]) + # accuracy went 0.7 → 0.8 → should show ↗ + assert "↗" in arrows + + +class TestScoreArrowsWithCleanData: + """Score arrows with only real runs (no errors to filter).""" + + def test_stable_scores_show_no_trend(self) -> None: + entries = [ + _make_entry(passed=2, total=2, cases={"a": _case(True, 0.8)}), + _make_entry(passed=2, total=2, cases={"a": _case(True, 0.8)}), + ] + suites = _aggregate_suites(entries) + arrows = _rich_score_arrows(suites["pipeline"]["score_values"]) + assert "→" in arrows + + def test_improving_scores_show_up(self) -> None: + entries = [ + _make_entry(passed=1, total=1, cases={"a": _case(True, 0.3)}), + _make_entry(passed=1, total=1, cases={"a": _case(True, 0.9)}), + ] + suites = _aggregate_suites(entries) + arrows = _rich_score_arrows(suites["pipeline"]["score_values"]) + assert "↗" in arrows + + def test_declining_scores_show_down(self) -> None: + entries = [ + _make_entry(passed=1, total=1, cases={"a": _case(True, 0.9)}), + _make_entry(passed=1, total=1, cases={"a": _case(True, 0.3)}), + ] + suites = _aggregate_suites(entries) + arrows = _rich_score_arrows(suites["pipeline"]["score_values"]) + assert "↘" in arrows diff --git a/uv.lock b/uv.lock index d7c8a6d..7594a42 100644 --- a/uv.lock +++ b/uv.lock @@ -383,6 +383,91 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/41/45/1a4ed80516f02155c51f51e8cedb3c1902296743db0bbc66608a0db2814f/jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe", size = 18437, upload-time = "2025-09-08T01:34:57.871Z" }, ] +[[package]] +name = "librt" +version = "0.8.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/56/9c/b4b0c54d84da4a94b37bd44151e46d5e583c9534c7e02250b961b1b6d8a8/librt-0.8.1.tar.gz", hash = "sha256:be46a14693955b3bd96014ccbdb8339ee8c9346fbe11c1b78901b55125f14c73", size = 177471, upload-time = "2026-02-17T16:13:06.101Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/5f/63f5fa395c7a8a93558c0904ba8f1c8d1b997ca6a3de61bc7659970d66bf/librt-0.8.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:81fd938344fecb9373ba1b155968c8a329491d2ce38e7ddb76f30ffb938f12dc", size = 65697, upload-time = "2026-02-17T16:11:06.903Z" }, + { url = "https://files.pythonhosted.org/packages/ff/e0/0472cf37267b5920eff2f292ccfaede1886288ce35b7f3203d8de00abfe6/librt-0.8.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5db05697c82b3a2ec53f6e72b2ed373132b0c2e05135f0696784e97d7f5d48e7", size = 68376, upload-time = "2026-02-17T16:11:08.395Z" }, + { url = "https://files.pythonhosted.org/packages/c8/be/8bd1359fdcd27ab897cd5963294fa4a7c83b20a8564678e4fd12157e56a5/librt-0.8.1-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d56bc4011975f7460bea7b33e1ff425d2f1adf419935ff6707273c77f8a4ada6", size = 197084, upload-time = "2026-02-17T16:11:09.774Z" }, + { url = "https://files.pythonhosted.org/packages/e2/fe/163e33fdd091d0c2b102f8a60cc0a61fd730ad44e32617cd161e7cd67a01/librt-0.8.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5cdc0f588ff4b663ea96c26d2a230c525c6fc62b28314edaaaca8ed5af931ad0", size = 207337, upload-time = "2026-02-17T16:11:11.311Z" }, + { url = "https://files.pythonhosted.org/packages/01/99/f85130582f05dcf0c8902f3d629270231d2f4afdfc567f8305a952ac7f14/librt-0.8.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:97c2b54ff6717a7a563b72627990bec60d8029df17df423f0ed37d56a17a176b", size = 219980, upload-time = "2026-02-17T16:11:12.499Z" }, + { url = "https://files.pythonhosted.org/packages/6f/54/cb5e4d03659e043a26c74e08206412ac9a3742f0477d96f9761a55313b5f/librt-0.8.1-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8f1125e6bbf2f1657d9a2f3ccc4a2c9b0c8b176965bb565dd4d86be67eddb4b6", size = 212921, upload-time = "2026-02-17T16:11:14.484Z" }, + { url = "https://files.pythonhosted.org/packages/b1/81/a3a01e4240579c30f3487f6fed01eb4bc8ef0616da5b4ebac27ca19775f3/librt-0.8.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8f4bb453f408137d7581be309b2fbc6868a80e7ef60c88e689078ee3a296ae71", size = 221381, upload-time = "2026-02-17T16:11:17.459Z" }, + { url = "https://files.pythonhosted.org/packages/08/b0/fc2d54b4b1c6fb81e77288ff31ff25a2c1e62eaef4424a984f228839717b/librt-0.8.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c336d61d2fe74a3195edc1646d53ff1cddd3a9600b09fa6ab75e5514ba4862a7", size = 216714, upload-time = "2026-02-17T16:11:19.197Z" }, + { url = "https://files.pythonhosted.org/packages/96/96/85daa73ffbd87e1fb287d7af6553ada66bf25a2a6b0de4764344a05469f6/librt-0.8.1-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:eb5656019db7c4deacf0c1a55a898c5bb8f989be904597fcb5232a2f4828fa05", size = 214777, upload-time = "2026-02-17T16:11:20.443Z" }, + { url = "https://files.pythonhosted.org/packages/12/9c/c3aa7a2360383f4bf4f04d98195f2739a579128720c603f4807f006a4225/librt-0.8.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c25d9e338d5bed46c1632f851babf3d13c78f49a225462017cf5e11e845c5891", size = 237398, upload-time = "2026-02-17T16:11:22.083Z" }, + { url = "https://files.pythonhosted.org/packages/61/19/d350ea89e5274665185dabc4bbb9c3536c3411f862881d316c8b8e00eb66/librt-0.8.1-cp310-cp310-win32.whl", hash = "sha256:aaab0e307e344cb28d800957ef3ec16605146ef0e59e059a60a176d19543d1b7", size = 54285, upload-time = "2026-02-17T16:11:23.27Z" }, + { url = "https://files.pythonhosted.org/packages/4f/d6/45d587d3d41c112e9543a0093d883eb57a24a03e41561c127818aa2a6bcc/librt-0.8.1-cp310-cp310-win_amd64.whl", hash = "sha256:56e04c14b696300d47b3bc5f1d10a00e86ae978886d0cee14e5714fafb5df5d2", size = 61352, upload-time = "2026-02-17T16:11:24.207Z" }, + { url = "https://files.pythonhosted.org/packages/1d/01/0e748af5e4fee180cf7cd12bd12b0513ad23b045dccb2a83191bde82d168/librt-0.8.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:681dc2451d6d846794a828c16c22dc452d924e9f700a485b7ecb887a30aad1fd", size = 65315, upload-time = "2026-02-17T16:11:25.152Z" }, + { url = "https://files.pythonhosted.org/packages/9d/4d/7184806efda571887c798d573ca4134c80ac8642dcdd32f12c31b939c595/librt-0.8.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a3b4350b13cc0e6f5bec8fa7caf29a8fb8cdc051a3bae45cfbfd7ce64f009965", size = 68021, upload-time = "2026-02-17T16:11:26.129Z" }, + { url = "https://files.pythonhosted.org/packages/ae/88/c3c52d2a5d5101f28d3dc89298444626e7874aa904eed498464c2af17627/librt-0.8.1-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:ac1e7817fd0ed3d14fd7c5df91daed84c48e4c2a11ee99c0547f9f62fdae13da", size = 194500, upload-time = "2026-02-17T16:11:27.177Z" }, + { url = "https://files.pythonhosted.org/packages/d6/5d/6fb0a25b6a8906e85b2c3b87bee1d6ed31510be7605b06772f9374ca5cb3/librt-0.8.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:747328be0c5b7075cde86a0e09d7a9196029800ba75a1689332348e998fb85c0", size = 205622, upload-time = "2026-02-17T16:11:28.242Z" }, + { url = "https://files.pythonhosted.org/packages/b2/a6/8006ae81227105476a45691f5831499e4d936b1c049b0c1feb17c11b02d1/librt-0.8.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f0af2bd2bc204fa27f3d6711d0f360e6b8c684a035206257a81673ab924aa11e", size = 218304, upload-time = "2026-02-17T16:11:29.344Z" }, + { url = "https://files.pythonhosted.org/packages/ee/19/60e07886ad16670aae57ef44dada41912c90906a6fe9f2b9abac21374748/librt-0.8.1-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:d480de377f5b687b6b1bc0c0407426da556e2a757633cc7e4d2e1a057aa688f3", size = 211493, upload-time = "2026-02-17T16:11:30.445Z" }, + { url = "https://files.pythonhosted.org/packages/9c/cf/f666c89d0e861d05600438213feeb818c7514d3315bae3648b1fc145d2b6/librt-0.8.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d0ee06b5b5291f609ddb37b9750985b27bc567791bc87c76a569b3feed8481ac", size = 219129, upload-time = "2026-02-17T16:11:32.021Z" }, + { url = "https://files.pythonhosted.org/packages/8f/ef/f1bea01e40b4a879364c031476c82a0dc69ce068daad67ab96302fed2d45/librt-0.8.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9e2c6f77b9ad48ce5603b83b7da9ee3e36b3ab425353f695cba13200c5d96596", size = 213113, upload-time = "2026-02-17T16:11:33.192Z" }, + { url = "https://files.pythonhosted.org/packages/9b/80/cdab544370cc6bc1b72ea369525f547a59e6938ef6863a11ab3cd24759af/librt-0.8.1-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:439352ba9373f11cb8e1933da194dcc6206daf779ff8df0ed69c5e39113e6a99", size = 212269, upload-time = "2026-02-17T16:11:34.373Z" }, + { url = "https://files.pythonhosted.org/packages/9d/9c/48d6ed8dac595654f15eceab2035131c136d1ae9a1e3548e777bb6dbb95d/librt-0.8.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:82210adabbc331dbb65d7868b105185464ef13f56f7f76688565ad79f648b0fe", size = 234673, upload-time = "2026-02-17T16:11:36.063Z" }, + { url = "https://files.pythonhosted.org/packages/16/01/35b68b1db517f27a01be4467593292eb5315def8900afad29fabf56304ba/librt-0.8.1-cp311-cp311-win32.whl", hash = "sha256:52c224e14614b750c0a6d97368e16804a98c684657c7518752c356834fff83bb", size = 54597, upload-time = "2026-02-17T16:11:37.544Z" }, + { url = "https://files.pythonhosted.org/packages/71/02/796fe8f02822235966693f257bf2c79f40e11337337a657a8cfebba5febc/librt-0.8.1-cp311-cp311-win_amd64.whl", hash = "sha256:c00e5c884f528c9932d278d5c9cbbea38a6b81eb62c02e06ae53751a83a4d52b", size = 61733, upload-time = "2026-02-17T16:11:38.691Z" }, + { url = "https://files.pythonhosted.org/packages/28/ad/232e13d61f879a42a4e7117d65e4984bb28371a34bb6fb9ca54ec2c8f54e/librt-0.8.1-cp311-cp311-win_arm64.whl", hash = "sha256:f7cdf7f26c2286ffb02e46d7bac56c94655540b26347673bea15fa52a6af17e9", size = 52273, upload-time = "2026-02-17T16:11:40.308Z" }, + { url = "https://files.pythonhosted.org/packages/95/21/d39b0a87ac52fc98f621fb6f8060efb017a767ebbbac2f99fbcbc9ddc0d7/librt-0.8.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a28f2612ab566b17f3698b0da021ff9960610301607c9a5e8eaca62f5e1c350a", size = 66516, upload-time = "2026-02-17T16:11:41.604Z" }, + { url = "https://files.pythonhosted.org/packages/69/f1/46375e71441c43e8ae335905e069f1c54febee63a146278bcee8782c84fd/librt-0.8.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:60a78b694c9aee2a0f1aaeaa7d101cf713e92e8423a941d2897f4fa37908dab9", size = 68634, upload-time = "2026-02-17T16:11:43.268Z" }, + { url = "https://files.pythonhosted.org/packages/0a/33/c510de7f93bf1fa19e13423a606d8189a02624a800710f6e6a0a0f0784b3/librt-0.8.1-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:758509ea3f1eba2a57558e7e98f4659d0ea7670bff49673b0dde18a3c7e6c0eb", size = 198941, upload-time = "2026-02-17T16:11:44.28Z" }, + { url = "https://files.pythonhosted.org/packages/dd/36/e725903416409a533d92398e88ce665476f275081d0d7d42f9c4951999e5/librt-0.8.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:039b9f2c506bd0ab0f8725aa5ba339c6f0cd19d3b514b50d134789809c24285d", size = 209991, upload-time = "2026-02-17T16:11:45.462Z" }, + { url = "https://files.pythonhosted.org/packages/30/7a/8d908a152e1875c9f8eac96c97a480df425e657cdb47854b9efaa4998889/librt-0.8.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5bb54f1205a3a6ab41a6fd71dfcdcbd278670d3a90ca502a30d9da583105b6f7", size = 224476, upload-time = "2026-02-17T16:11:46.542Z" }, + { url = "https://files.pythonhosted.org/packages/a8/b8/a22c34f2c485b8903a06f3fe3315341fe6876ef3599792344669db98fcff/librt-0.8.1-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:05bd41cdee35b0c59c259f870f6da532a2c5ca57db95b5f23689fcb5c9e42440", size = 217518, upload-time = "2026-02-17T16:11:47.746Z" }, + { url = "https://files.pythonhosted.org/packages/79/6f/5c6fea00357e4f82ba44f81dbfb027921f1ab10e320d4a64e1c408d035d9/librt-0.8.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:adfab487facf03f0d0857b8710cf82d0704a309d8ffc33b03d9302b4c64e91a9", size = 225116, upload-time = "2026-02-17T16:11:49.298Z" }, + { url = "https://files.pythonhosted.org/packages/f2/a0/95ced4e7b1267fe1e2720a111685bcddf0e781f7e9e0ce59d751c44dcfe5/librt-0.8.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:153188fe98a72f206042be10a2c6026139852805215ed9539186312d50a8e972", size = 217751, upload-time = "2026-02-17T16:11:50.49Z" }, + { url = "https://files.pythonhosted.org/packages/93/c2/0517281cb4d4101c27ab59472924e67f55e375bc46bedae94ac6dc6e1902/librt-0.8.1-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:dd3c41254ee98604b08bd5b3af5bf0a89740d4ee0711de95b65166bf44091921", size = 218378, upload-time = "2026-02-17T16:11:51.783Z" }, + { url = "https://files.pythonhosted.org/packages/43/e8/37b3ac108e8976888e559a7b227d0ceac03c384cfd3e7a1c2ee248dbae79/librt-0.8.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e0d138c7ae532908cbb342162b2611dbd4d90c941cd25ab82084aaf71d2c0bd0", size = 241199, upload-time = "2026-02-17T16:11:53.561Z" }, + { url = "https://files.pythonhosted.org/packages/4b/5b/35812d041c53967fedf551a39399271bbe4257e681236a2cf1a69c8e7fa1/librt-0.8.1-cp312-cp312-win32.whl", hash = "sha256:43353b943613c5d9c49a25aaffdba46f888ec354e71e3529a00cca3f04d66a7a", size = 54917, upload-time = "2026-02-17T16:11:54.758Z" }, + { url = "https://files.pythonhosted.org/packages/de/d1/fa5d5331b862b9775aaf2a100f5ef86854e5d4407f71bddf102f4421e034/librt-0.8.1-cp312-cp312-win_amd64.whl", hash = "sha256:ff8baf1f8d3f4b6b7257fcb75a501f2a5499d0dda57645baa09d4d0d34b19444", size = 62017, upload-time = "2026-02-17T16:11:55.748Z" }, + { url = "https://files.pythonhosted.org/packages/c7/7c/c614252f9acda59b01a66e2ddfd243ed1c7e1deab0293332dfbccf862808/librt-0.8.1-cp312-cp312-win_arm64.whl", hash = "sha256:0f2ae3725904f7377e11cc37722d5d401e8b3d5851fb9273d7f4fe04f6b3d37d", size = 52441, upload-time = "2026-02-17T16:11:56.801Z" }, + { url = "https://files.pythonhosted.org/packages/c5/3c/f614c8e4eaac7cbf2bbdf9528790b21d89e277ee20d57dc6e559c626105f/librt-0.8.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7e6bad1cd94f6764e1e21950542f818a09316645337fd5ab9a7acc45d99a8f35", size = 66529, upload-time = "2026-02-17T16:11:57.809Z" }, + { url = "https://files.pythonhosted.org/packages/ab/96/5836544a45100ae411eda07d29e3d99448e5258b6e9c8059deb92945f5c2/librt-0.8.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cf450f498c30af55551ba4f66b9123b7185362ec8b625a773b3d39aa1a717583", size = 68669, upload-time = "2026-02-17T16:11:58.843Z" }, + { url = "https://files.pythonhosted.org/packages/06/53/f0b992b57af6d5531bf4677d75c44f095f2366a1741fb695ee462ae04b05/librt-0.8.1-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:eca45e982fa074090057132e30585a7e8674e9e885d402eae85633e9f449ce6c", size = 199279, upload-time = "2026-02-17T16:11:59.862Z" }, + { url = "https://files.pythonhosted.org/packages/f3/ad/4848cc16e268d14280d8168aee4f31cea92bbd2b79ce33d3e166f2b4e4fc/librt-0.8.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0c3811485fccfda840861905b8c70bba5ec094e02825598bb9d4ca3936857a04", size = 210288, upload-time = "2026-02-17T16:12:00.954Z" }, + { url = "https://files.pythonhosted.org/packages/52/05/27fdc2e95de26273d83b96742d8d3b7345f2ea2bdbd2405cc504644f2096/librt-0.8.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e4af413908f77294605e28cfd98063f54b2c790561383971d2f52d113d9c363", size = 224809, upload-time = "2026-02-17T16:12:02.108Z" }, + { url = "https://files.pythonhosted.org/packages/7a/d0/78200a45ba3240cb042bc597d6f2accba9193a2c57d0356268cbbe2d0925/librt-0.8.1-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:5212a5bd7fae98dae95710032902edcd2ec4dc994e883294f75c857b83f9aba0", size = 218075, upload-time = "2026-02-17T16:12:03.631Z" }, + { url = "https://files.pythonhosted.org/packages/af/72/a210839fa74c90474897124c064ffca07f8d4b347b6574d309686aae7ca6/librt-0.8.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e692aa2d1d604e6ca12d35e51fdc36f4cda6345e28e36374579f7ef3611b3012", size = 225486, upload-time = "2026-02-17T16:12:04.725Z" }, + { url = "https://files.pythonhosted.org/packages/a3/c1/a03cc63722339ddbf087485f253493e2b013039f5b707e8e6016141130fa/librt-0.8.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4be2a5c926b9770c9e08e717f05737a269b9d0ebc5d2f0060f0fe3fe9ce47acb", size = 218219, upload-time = "2026-02-17T16:12:05.828Z" }, + { url = "https://files.pythonhosted.org/packages/58/f5/fff6108af0acf941c6f274a946aea0e484bd10cd2dc37610287ce49388c5/librt-0.8.1-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:fd1a720332ea335ceb544cf0a03f81df92abd4bb887679fd1e460976b0e6214b", size = 218750, upload-time = "2026-02-17T16:12:07.09Z" }, + { url = "https://files.pythonhosted.org/packages/71/67/5a387bfef30ec1e4b4f30562c8586566faf87e47d696768c19feb49e3646/librt-0.8.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:93c2af9e01e0ef80d95ae3c720be101227edae5f2fe7e3dc63d8857fadfc5a1d", size = 241624, upload-time = "2026-02-17T16:12:08.43Z" }, + { url = "https://files.pythonhosted.org/packages/d4/be/24f8502db11d405232ac1162eb98069ca49c3306c1d75c6ccc61d9af8789/librt-0.8.1-cp313-cp313-win32.whl", hash = "sha256:086a32dbb71336627e78cc1d6ee305a68d038ef7d4c39aaff41ae8c9aa46e91a", size = 54969, upload-time = "2026-02-17T16:12:09.633Z" }, + { url = "https://files.pythonhosted.org/packages/5c/73/c9fdf6cb2a529c1a092ce769a12d88c8cca991194dfe641b6af12fa964d2/librt-0.8.1-cp313-cp313-win_amd64.whl", hash = "sha256:e11769a1dbda4da7b00a76cfffa67aa47cfa66921d2724539eee4b9ede780b79", size = 62000, upload-time = "2026-02-17T16:12:10.632Z" }, + { url = "https://files.pythonhosted.org/packages/d3/97/68f80ca3ac4924f250cdfa6e20142a803e5e50fca96ef5148c52ee8c10ea/librt-0.8.1-cp313-cp313-win_arm64.whl", hash = "sha256:924817ab3141aca17893386ee13261f1d100d1ef410d70afe4389f2359fea4f0", size = 52495, upload-time = "2026-02-17T16:12:11.633Z" }, + { url = "https://files.pythonhosted.org/packages/c9/6a/907ef6800f7bca71b525a05f1839b21f708c09043b1c6aa77b6b827b3996/librt-0.8.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:6cfa7fe54fd4d1f47130017351a959fe5804bda7a0bc7e07a2cdbc3fdd28d34f", size = 66081, upload-time = "2026-02-17T16:12:12.766Z" }, + { url = "https://files.pythonhosted.org/packages/1b/18/25e991cd5640c9fb0f8d91b18797b29066b792f17bf8493da183bf5caabe/librt-0.8.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:228c2409c079f8c11fb2e5d7b277077f694cb93443eb760e00b3b83cb8b3176c", size = 68309, upload-time = "2026-02-17T16:12:13.756Z" }, + { url = "https://files.pythonhosted.org/packages/a4/36/46820d03f058cfb5a9de5940640ba03165ed8aded69e0733c417bb04df34/librt-0.8.1-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:7aae78ab5e3206181780e56912d1b9bb9f90a7249ce12f0e8bf531d0462dd0fc", size = 196804, upload-time = "2026-02-17T16:12:14.818Z" }, + { url = "https://files.pythonhosted.org/packages/59/18/5dd0d3b87b8ff9c061849fbdb347758d1f724b9a82241aa908e0ec54ccd0/librt-0.8.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:172d57ec04346b047ca6af181e1ea4858086c80bdf455f61994c4aa6fc3f866c", size = 206907, upload-time = "2026-02-17T16:12:16.513Z" }, + { url = "https://files.pythonhosted.org/packages/d1/96/ef04902aad1424fd7299b62d1890e803e6ab4018c3044dca5922319c4b97/librt-0.8.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6b1977c4ea97ce5eb7755a78fae68d87e4102e4aaf54985e8b56806849cc06a3", size = 221217, upload-time = "2026-02-17T16:12:17.906Z" }, + { url = "https://files.pythonhosted.org/packages/6d/ff/7e01f2dda84a8f5d280637a2e5827210a8acca9a567a54507ef1c75b342d/librt-0.8.1-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:10c42e1f6fd06733ef65ae7bebce2872bcafd8d6e6b0a08fe0a05a23b044fb14", size = 214622, upload-time = "2026-02-17T16:12:19.108Z" }, + { url = "https://files.pythonhosted.org/packages/1e/8c/5b093d08a13946034fed57619742f790faf77058558b14ca36a6e331161e/librt-0.8.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4c8dfa264b9193c4ee19113c985c95f876fae5e51f731494fc4e0cf594990ba7", size = 221987, upload-time = "2026-02-17T16:12:20.331Z" }, + { url = "https://files.pythonhosted.org/packages/d3/cc/86b0b3b151d40920ad45a94ce0171dec1aebba8a9d72bb3fa00c73ab25dd/librt-0.8.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:01170b6729a438f0dedc4a26ed342e3dc4f02d1000b4b19f980e1877f0c297e6", size = 215132, upload-time = "2026-02-17T16:12:21.54Z" }, + { url = "https://files.pythonhosted.org/packages/fc/be/8588164a46edf1e69858d952654e216a9a91174688eeefb9efbb38a9c799/librt-0.8.1-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:7b02679a0d783bdae30d443025b94465d8c3dc512f32f5b5031f93f57ac32071", size = 215195, upload-time = "2026-02-17T16:12:23.073Z" }, + { url = "https://files.pythonhosted.org/packages/f5/f2/0b9279bea735c734d69344ecfe056c1ba211694a72df10f568745c899c76/librt-0.8.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:190b109bb69592a3401fe1ffdea41a2e73370ace2ffdc4a0e8e2b39cdea81b78", size = 237946, upload-time = "2026-02-17T16:12:24.275Z" }, + { url = "https://files.pythonhosted.org/packages/e9/cc/5f2a34fbc8aeb35314a3641f9956fa9051a947424652fad9882be7a97949/librt-0.8.1-cp314-cp314-win32.whl", hash = "sha256:e70a57ecf89a0f64c24e37f38d3fe217a58169d2fe6ed6d70554964042474023", size = 50689, upload-time = "2026-02-17T16:12:25.766Z" }, + { url = "https://files.pythonhosted.org/packages/a0/76/cd4d010ab2147339ca2b93e959c3686e964edc6de66ddacc935c325883d7/librt-0.8.1-cp314-cp314-win_amd64.whl", hash = "sha256:7e2f3edca35664499fbb36e4770650c4bd4a08abc1f4458eab9df4ec56389730", size = 57875, upload-time = "2026-02-17T16:12:27.465Z" }, + { url = "https://files.pythonhosted.org/packages/84/0f/2143cb3c3ca48bd3379dcd11817163ca50781927c4537345d608b5045998/librt-0.8.1-cp314-cp314-win_arm64.whl", hash = "sha256:0d2f82168e55ddefd27c01c654ce52379c0750ddc31ee86b4b266bcf4d65f2a3", size = 48058, upload-time = "2026-02-17T16:12:28.556Z" }, + { url = "https://files.pythonhosted.org/packages/d2/0e/9b23a87e37baf00311c3efe6b48d6b6c168c29902dfc3f04c338372fd7db/librt-0.8.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2c74a2da57a094bd48d03fa5d196da83d2815678385d2978657499063709abe1", size = 68313, upload-time = "2026-02-17T16:12:29.659Z" }, + { url = "https://files.pythonhosted.org/packages/db/9a/859c41e5a4f1c84200a7d2b92f586aa27133c8243b6cac9926f6e54d01b9/librt-0.8.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a355d99c4c0d8e5b770313b8b247411ed40949ca44e33e46a4789b9293a907ee", size = 70994, upload-time = "2026-02-17T16:12:31.516Z" }, + { url = "https://files.pythonhosted.org/packages/4c/28/10605366ee599ed34223ac2bf66404c6fb59399f47108215d16d5ad751a8/librt-0.8.1-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2eb345e8b33fb748227409c9f1233d4df354d6e54091f0e8fc53acdb2ffedeb7", size = 220770, upload-time = "2026-02-17T16:12:33.294Z" }, + { url = "https://files.pythonhosted.org/packages/af/8d/16ed8fd452dafae9c48d17a6bc1ee3e818fd40ef718d149a8eff2c9f4ea2/librt-0.8.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9be2f15e53ce4e83cc08adc29b26fb5978db62ef2a366fbdf716c8a6c8901040", size = 235409, upload-time = "2026-02-17T16:12:35.443Z" }, + { url = "https://files.pythonhosted.org/packages/89/1b/7bdf3e49349c134b25db816e4a3db6b94a47ac69d7d46b1e682c2c4949be/librt-0.8.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:785ae29c1f5c6e7c2cde2c7c0e148147f4503da3abc5d44d482068da5322fd9e", size = 246473, upload-time = "2026-02-17T16:12:36.656Z" }, + { url = "https://files.pythonhosted.org/packages/4e/8a/91fab8e4fd2a24930a17188c7af5380eb27b203d72101c9cc000dbdfd95a/librt-0.8.1-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:1d3a7da44baf692f0c6aeb5b2a09c5e6fc7a703bca9ffa337ddd2e2da53f7732", size = 238866, upload-time = "2026-02-17T16:12:37.849Z" }, + { url = "https://files.pythonhosted.org/packages/b9/e0/c45a098843fc7c07e18a7f8a24ca8496aecbf7bdcd54980c6ca1aaa79a8e/librt-0.8.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5fc48998000cbc39ec0d5311312dda93ecf92b39aaf184c5e817d5d440b29624", size = 250248, upload-time = "2026-02-17T16:12:39.445Z" }, + { url = "https://files.pythonhosted.org/packages/82/30/07627de23036640c952cce0c1fe78972e77d7d2f8fd54fa5ef4554ff4a56/librt-0.8.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:e96baa6820280077a78244b2e06e416480ed859bbd8e5d641cf5742919d8beb4", size = 240629, upload-time = "2026-02-17T16:12:40.889Z" }, + { url = "https://files.pythonhosted.org/packages/fb/c1/55bfe1ee3542eba055616f9098eaf6eddb966efb0ca0f44eaa4aba327307/librt-0.8.1-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:31362dbfe297b23590530007062c32c6f6176f6099646bb2c95ab1b00a57c382", size = 239615, upload-time = "2026-02-17T16:12:42.446Z" }, + { url = "https://files.pythonhosted.org/packages/2b/39/191d3d28abc26c9099b19852e6c99f7f6d400b82fa5a4e80291bd3803e19/librt-0.8.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cc3656283d11540ab0ea01978378e73e10002145117055e03722417aeab30994", size = 263001, upload-time = "2026-02-17T16:12:43.627Z" }, + { url = "https://files.pythonhosted.org/packages/b9/eb/7697f60fbe7042ab4e88f4ee6af496b7f222fffb0a4e3593ef1f29f81652/librt-0.8.1-cp314-cp314t-win32.whl", hash = "sha256:738f08021b3142c2918c03692608baed43bc51144c29e35807682f8070ee2a3a", size = 51328, upload-time = "2026-02-17T16:12:45.148Z" }, + { url = "https://files.pythonhosted.org/packages/7c/72/34bf2eb7a15414a23e5e70ecb9440c1d3179f393d9349338a91e2781c0fb/librt-0.8.1-cp314-cp314t-win_amd64.whl", hash = "sha256:89815a22daf9c51884fb5dbe4f1ef65ee6a146e0b6a8df05f753e2e4a9359bf4", size = 58722, upload-time = "2026-02-17T16:12:46.85Z" }, + { url = "https://files.pythonhosted.org/packages/b2/c8/d148e041732d631fc76036f8b30fae4e77b027a1e95b7a84bb522481a940/librt-0.8.1-cp314-cp314t-win_arm64.whl", hash = "sha256:bf512a71a23504ed08103a13c941f763db13fb11177beb3d9244c98c29fb4a61", size = 48755, upload-time = "2026-02-17T16:12:47.943Z" }, +] + [[package]] name = "markdown" version = "3.10" @@ -576,6 +661,73 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5b/54/662a4743aa81d9582ee9339d4ffa3c8fd40a4965e033d77b9da9774d3960/mkdocs_material_extensions-1.3.1-py3-none-any.whl", hash = "sha256:adff8b62700b25cb77b53358dad940f3ef973dd6db797907c49e3c2ef3ab4e31", size = 8728, upload-time = "2023-11-22T19:09:43.465Z" }, ] +[[package]] +name = "mypy" +version = "1.20.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "librt", marker = "platform_python_implementation != 'PyPy'" }, + { name = "mypy-extensions" }, + { name = "pathspec" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f8/5c/b0089fe7fef0a994ae5ee07029ced0526082c6cfaaa4c10d40a10e33b097/mypy-1.20.0.tar.gz", hash = "sha256:eb96c84efcc33f0b5e0e04beacf00129dd963b67226b01c00b9dfc8affb464c3", size = 3815028, upload-time = "2026-03-31T16:55:14.959Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4d/a2/a965c8c3fcd4fa8b84ba0d46606181b0d0a1d50f274c67877f3e9ed4882c/mypy-1.20.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d99f515f95fd03a90875fdb2cca12ff074aa04490db4d190905851bdf8a549a8", size = 14430138, upload-time = "2026-03-31T16:52:37.843Z" }, + { url = "https://files.pythonhosted.org/packages/53/6e/043477501deeb8eabbab7f1a2f6cac62cfb631806dc1d6862a04a7f5011b/mypy-1.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bd0212976dc57a5bfeede7c219e7cd66568a32c05c9129686dd487c059c1b88a", size = 13311282, upload-time = "2026-03-31T16:55:11.021Z" }, + { url = "https://files.pythonhosted.org/packages/65/aa/bd89b247b83128197a214f29f0632ff3c14f54d4cd70d144d157bd7d7d6e/mypy-1.20.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f8426d4d75d68714abc17a4292d922f6ba2cfb984b72c2278c437f6dae797865", size = 13750889, upload-time = "2026-03-31T16:52:02.909Z" }, + { url = "https://files.pythonhosted.org/packages/fa/9d/2860be7355c45247ccc0be1501c91176318964c2a137bd4743f58ce6200e/mypy-1.20.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:02cca0761c75b42a20a2757ae58713276605eb29a08dd8a6e092aa347c4115ca", size = 14619788, upload-time = "2026-03-31T16:50:48.928Z" }, + { url = "https://files.pythonhosted.org/packages/75/7f/3ef3e360c91f3de120f205c8ce405e9caf9fc52ef14b65d37073e322c114/mypy-1.20.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b3a49064504be59e59da664c5e149edc1f26c67c4f8e8456f6ba6aba55033018", size = 14918849, upload-time = "2026-03-31T16:51:10.478Z" }, + { url = "https://files.pythonhosted.org/packages/ae/72/af970dfe167ef788df7c5e6109d2ed0229f164432ce828bc9741a4250e64/mypy-1.20.0-cp310-cp310-win_amd64.whl", hash = "sha256:ebea00201737ad4391142808ed16e875add5c17f676e0912b387739f84991e13", size = 10822007, upload-time = "2026-03-31T16:50:25.268Z" }, + { url = "https://files.pythonhosted.org/packages/93/94/ba9065c2ebe5421619aff684b793d953e438a8bfe31a320dd6d1e0706e81/mypy-1.20.0-cp310-cp310-win_arm64.whl", hash = "sha256:e80cf77847d0d3e6e3111b7b25db32a7f8762fd4b9a3a72ce53fe16a2863b281", size = 9756158, upload-time = "2026-03-31T16:48:36.213Z" }, + { url = "https://files.pythonhosted.org/packages/6e/1c/74cb1d9993236910286865679d1c616b136b2eae468493aa939431eda410/mypy-1.20.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4525e7010b1b38334516181c5b81e16180b8e149e6684cee5a727c78186b4e3b", size = 14343972, upload-time = "2026-03-31T16:49:04.887Z" }, + { url = "https://files.pythonhosted.org/packages/d5/0d/01399515eca280386e308cf57901e68d3a52af18691941b773b3380c1df8/mypy-1.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a17c5d0bdcca61ce24a35beb828a2d0d323d3fcf387d7512206888c900193367", size = 13225007, upload-time = "2026-03-31T16:50:08.151Z" }, + { url = "https://files.pythonhosted.org/packages/56/ac/b4ba5094fb2d7fe9d2037cd8d18bbe02bcf68fd22ab9ff013f55e57ba095/mypy-1.20.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f75ff57defcd0f1d6e006d721ccdec6c88d4f6a7816eb92f1c4890d979d9ee62", size = 13663752, upload-time = "2026-03-31T16:49:26.064Z" }, + { url = "https://files.pythonhosted.org/packages/db/a7/460678d3cf7da252d2288dad0c602294b6ec22a91932ec368cc11e44bb6e/mypy-1.20.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b503ab55a836136b619b5fc21c8803d810c5b87551af8600b72eecafb0059cb0", size = 14532265, upload-time = "2026-03-31T16:53:55.077Z" }, + { url = "https://files.pythonhosted.org/packages/a3/3e/051cca8166cf0438ae3ea80e0e7c030d7a8ab98dffc93f80a1aa3f23c1a2/mypy-1.20.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1973868d2adbb4584a3835780b27436f06d1dc606af5be09f187aaa25be1070f", size = 14768476, upload-time = "2026-03-31T16:50:34.587Z" }, + { url = "https://files.pythonhosted.org/packages/be/66/8e02ec184f852ed5c4abb805583305db475930854e09964b55e107cdcbc4/mypy-1.20.0-cp311-cp311-win_amd64.whl", hash = "sha256:2fcedb16d456106e545b2bfd7ef9d24e70b38ec252d2a629823a4d07ebcdb69e", size = 10818226, upload-time = "2026-03-31T16:53:15.624Z" }, + { url = "https://files.pythonhosted.org/packages/13/4b/383ad1924b28f41e4879a74151e7a5451123330d45652da359f9183bcd45/mypy-1.20.0-cp311-cp311-win_arm64.whl", hash = "sha256:379edf079ce44ac8d2805bcf9b3dd7340d4f97aad3a5e0ebabbf9d125b84b442", size = 9750091, upload-time = "2026-03-31T16:54:12.162Z" }, + { url = "https://files.pythonhosted.org/packages/be/dd/3afa29b58c2e57c79116ed55d700721c3c3b15955e2b6251dd165d377c0e/mypy-1.20.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:002b613ae19f4ac7d18b7e168ffe1cb9013b37c57f7411984abbd3b817b0a214", size = 14509525, upload-time = "2026-03-31T16:55:01.824Z" }, + { url = "https://files.pythonhosted.org/packages/54/eb/227b516ab8cad9f2a13c5e7a98d28cd6aa75e9c83e82776ae6c1c4c046c7/mypy-1.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a9336b5e6712f4adaf5afc3203a99a40b379049104349d747eb3e5a3aa23ac2e", size = 13326469, upload-time = "2026-03-31T16:51:41.23Z" }, + { url = "https://files.pythonhosted.org/packages/57/d4/1ddb799860c1b5ac6117ec307b965f65deeb47044395ff01ab793248a591/mypy-1.20.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f13b3e41bce9d257eded794c0f12878af3129d80aacd8a3ee0dee51f3a978651", size = 13705953, upload-time = "2026-03-31T16:48:55.69Z" }, + { url = "https://files.pythonhosted.org/packages/c5/b7/54a720f565a87b893182a2a393370289ae7149e4715859e10e1c05e49154/mypy-1.20.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9804c3ad27f78e54e58b32e7cb532d128b43dbfb9f3f9f06262b821a0f6bd3f5", size = 14710363, upload-time = "2026-03-31T16:53:26.948Z" }, + { url = "https://files.pythonhosted.org/packages/b2/2a/74810274848d061f8a8ea4ac23aaad43bd3d8c1882457999c2e568341c57/mypy-1.20.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:697f102c5c1d526bdd761a69f17c6070f9892eebcb94b1a5963d679288c09e78", size = 14947005, upload-time = "2026-03-31T16:50:17.591Z" }, + { url = "https://files.pythonhosted.org/packages/77/91/21b8ba75f958bcda75690951ce6fa6b7138b03471618959529d74b8544e2/mypy-1.20.0-cp312-cp312-win_amd64.whl", hash = "sha256:0ecd63f75fdd30327e4ad8b5704bd6d91fc6c1b2e029f8ee14705e1207212489", size = 10880616, upload-time = "2026-03-31T16:52:19.986Z" }, + { url = "https://files.pythonhosted.org/packages/8a/15/3d8198ef97c1ca03aea010cce4f1d4f3bc5d9849e8c0140111ca2ead9fdd/mypy-1.20.0-cp312-cp312-win_arm64.whl", hash = "sha256:f194db59657c58593a3c47c6dfd7bad4ef4ac12dbc94d01b3a95521f78177e33", size = 9813091, upload-time = "2026-03-31T16:53:44.385Z" }, + { url = "https://files.pythonhosted.org/packages/d6/a7/f64ea7bd592fa431cb597418b6dec4a47f7d0c36325fec7ac67bc8402b94/mypy-1.20.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b20c8b0fd5877abdf402e79a3af987053de07e6fb208c18df6659f708b535134", size = 14485344, upload-time = "2026-03-31T16:49:16.78Z" }, + { url = "https://files.pythonhosted.org/packages/bb/72/8927d84cfc90c6abea6e96663576e2e417589347eb538749a464c4c218a0/mypy-1.20.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:367e5c993ba34d5054d11937d0485ad6dfc60ba760fa326c01090fc256adf15c", size = 13327400, upload-time = "2026-03-31T16:53:08.02Z" }, + { url = "https://files.pythonhosted.org/packages/ab/4a/11ab99f9afa41aa350178d24a7d2da17043228ea10f6456523f64b5a6cf6/mypy-1.20.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f799d9db89fc00446f03281f84a221e50018fc40113a3ba9864b132895619ebe", size = 13706384, upload-time = "2026-03-31T16:52:28.577Z" }, + { url = "https://files.pythonhosted.org/packages/42/79/694ca73979cfb3535ebfe78733844cd5aff2e63304f59bf90585110d975a/mypy-1.20.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:555658c611099455b2da507582ea20d2043dfdfe7f5ad0add472b1c6238b433f", size = 14700378, upload-time = "2026-03-31T16:48:45.527Z" }, + { url = "https://files.pythonhosted.org/packages/84/24/a022ccab3a46e3d2cdf2e0e260648633640eb396c7e75d5a42818a8d3971/mypy-1.20.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:efe8d70949c3023698c3fca1e94527e7e790a361ab8116f90d11221421cd8726", size = 14932170, upload-time = "2026-03-31T16:49:36.038Z" }, + { url = "https://files.pythonhosted.org/packages/d8/9b/549228d88f574d04117e736f55958bd4908f980f9f5700a07aeb85df005b/mypy-1.20.0-cp313-cp313-win_amd64.whl", hash = "sha256:f49590891d2c2f8a9de15614e32e459a794bcba84693c2394291a2038bbaaa69", size = 10888526, upload-time = "2026-03-31T16:50:59.827Z" }, + { url = "https://files.pythonhosted.org/packages/91/17/15095c0e54a8bc04d22d4ff06b2139d5f142c2e87520b4e39010c4862771/mypy-1.20.0-cp313-cp313-win_arm64.whl", hash = "sha256:76a70bf840495729be47510856b978f1b0ec7d08f257ca38c9d932720bf6b43e", size = 9816456, upload-time = "2026-03-31T16:49:59.537Z" }, + { url = "https://files.pythonhosted.org/packages/4e/0e/6ca4a84cbed9e62384bc0b2974c90395ece5ed672393e553996501625fc5/mypy-1.20.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:0f42dfaab7ec1baff3b383ad7af562ab0de573c5f6edb44b2dab016082b89948", size = 14483331, upload-time = "2026-03-31T16:52:57.999Z" }, + { url = "https://files.pythonhosted.org/packages/7d/c5/5fe9d8a729dd9605064691816243ae6c49fde0bd28f6e5e17f6a24203c43/mypy-1.20.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:31b5dbb55293c1bd27c0fc813a0d2bb5ceef9d65ac5afa2e58f829dab7921fd5", size = 13342047, upload-time = "2026-03-31T16:54:21.555Z" }, + { url = "https://files.pythonhosted.org/packages/4c/33/e18bcfa338ca4e6b2771c85d4c5203e627d0c69d9de5c1a2cf2ba13320ba/mypy-1.20.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:49d11c6f573a5a08f77fad13faff2139f6d0730ebed2cfa9b3d2702671dd7188", size = 13719585, upload-time = "2026-03-31T16:51:53.89Z" }, + { url = "https://files.pythonhosted.org/packages/6b/8d/93491ff7b79419edc7eabf95cb3b3f7490e2e574b2855c7c7e7394ff933f/mypy-1.20.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7d3243c406773185144527f83be0e0aefc7bf4601b0b2b956665608bf7c98a83", size = 14685075, upload-time = "2026-03-31T16:54:04.464Z" }, + { url = "https://files.pythonhosted.org/packages/b5/9d/d924b38a4923f8d164bf2b4ec98bf13beaf6e10a5348b4b137eadae40a6e/mypy-1.20.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:a79c1eba7ac4209f2d850f0edd0a2f8bba88cbfdfefe6fb76a19e9d4fe5e71a2", size = 14919141, upload-time = "2026-03-31T16:54:51.785Z" }, + { url = "https://files.pythonhosted.org/packages/59/98/1da9977016678c0b99d43afe52ed00bb3c1a0c4c995d3e6acca1a6ebb9b4/mypy-1.20.0-cp314-cp314-win_amd64.whl", hash = "sha256:00e047c74d3ec6e71a2eb88e9ea551a2edb90c21f993aefa9e0d2a898e0bb732", size = 11050925, upload-time = "2026-03-31T16:51:30.758Z" }, + { url = "https://files.pythonhosted.org/packages/5e/e3/ba0b7a3143e49a9c4f5967dde6ea4bf8e0b10ecbbcca69af84027160ee89/mypy-1.20.0-cp314-cp314-win_arm64.whl", hash = "sha256:931a7630bba591593dcf6e97224a21ff80fb357e7982628d25e3c618e7f598ef", size = 10001089, upload-time = "2026-03-31T16:49:43.632Z" }, + { url = "https://files.pythonhosted.org/packages/12/28/e617e67b3be9d213cda7277913269c874eb26472489f95d09d89765ce2d8/mypy-1.20.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:26c8b52627b6552f47ff11adb4e1509605f094e29815323e487fc0053ebe93d1", size = 15534710, upload-time = "2026-03-31T16:52:12.506Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0c/3b5f2d3e45dc7169b811adce8451679d9430399d03b168f9b0489f43adaa/mypy-1.20.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:39362cdb4ba5f916e7976fccecaab1ba3a83e35f60fa68b64e9a70e221bb2436", size = 14393013, upload-time = "2026-03-31T16:54:41.186Z" }, + { url = "https://files.pythonhosted.org/packages/a3/49/edc8b0aa145cc09c1c74f7ce2858eead9329931dcbbb26e2ad40906daa4e/mypy-1.20.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:34506397dbf40c15dc567635d18a21d33827e9ab29014fb83d292a8f4f8953b6", size = 15047240, upload-time = "2026-03-31T16:54:31.955Z" }, + { url = "https://files.pythonhosted.org/packages/42/37/a946bb416e37a57fa752b3100fd5ede0e28df94f92366d1716555d47c454/mypy-1.20.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:555493c44a4f5a1b58d611a43333e71a9981c6dbe26270377b6f8174126a0526", size = 15858565, upload-time = "2026-03-31T16:53:36.997Z" }, + { url = "https://files.pythonhosted.org/packages/2f/99/7690b5b5b552db1bd4ff362e4c0eb3107b98d680835e65823fbe888c8b78/mypy-1.20.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:2721f0ce49cb74a38f00c50da67cb7d36317b5eda38877a49614dc018e91c787", size = 16087874, upload-time = "2026-03-31T16:52:48.313Z" }, + { url = "https://files.pythonhosted.org/packages/aa/76/53e893a498138066acd28192b77495c9357e5a58cc4be753182846b43315/mypy-1.20.0-cp314-cp314t-win_amd64.whl", hash = "sha256:47781555a7aa5fedcc2d16bcd72e0dc83eb272c10dd657f9fb3f9cc08e2e6abb", size = 12572380, upload-time = "2026-03-31T16:49:52.454Z" }, + { url = "https://files.pythonhosted.org/packages/76/9c/6dbdae21f01b7aacddc2c0bbf3c5557aa547827fdf271770fe1e521e7093/mypy-1.20.0-cp314-cp314t-win_arm64.whl", hash = "sha256:c70380fe5d64010f79fb863b9081c7004dd65225d2277333c219d93a10dad4dd", size = 10381174, upload-time = "2026-03-31T16:51:20.179Z" }, + { url = "https://files.pythonhosted.org/packages/21/66/4d734961ce167f0fd8380769b3b7c06dbdd6ff54c2190f3f2ecd22528158/mypy-1.20.0-py3-none-any.whl", hash = "sha256:a6e0641147cbfa7e4e94efdb95c2dab1aff8cfc159ded13e07f308ddccc8c48e", size = 2636365, upload-time = "2026-03-31T16:51:44.911Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + [[package]] name = "nodeenv" version = "1.9.1" @@ -605,11 +757,11 @@ wheels = [ [[package]] name = "pathspec" -version = "0.12.1" +version = "1.0.4" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fa/36/e27608899f9b8d4dff0617b2d9ab17ca5608956ca44461ac14ac48b44015/pathspec-1.0.4.tar.gz", hash = "sha256:0210e2ae8a21a9137c0d470578cb0e595af87edaa6ebf12ff176f14a02e0e645", size = 131200, upload-time = "2026-01-27T03:59:46.938Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" }, + { url = "https://files.pythonhosted.org/packages/ef/3c/2c197d226f9ea224a9ab8d197933f9da0ae0aac5b6e0f884e2b8d9c8e9f7/pathspec-1.0.4-py3-none-any.whl", hash = "sha256:fb6ae2fd4e7c921a165808a552060e722767cfa526f99ca5156ed2ce45a5c723", size = 55206, upload-time = "2026-01-27T03:59:45.137Z" }, ] [[package]] @@ -648,7 +800,7 @@ wheels = [ [[package]] name = "protest" -version = "0.1.1" +version = "0.1.2" source = { editable = "." } dependencies = [ { name = "typing-extensions" }, @@ -666,6 +818,7 @@ web = [ dev = [ { name = "jsonschema" }, { name = "mkdocs-material" }, + { name = "mypy" }, { name = "pre-commit" }, { name = "pytest" }, { name = "pytest-asyncio" }, @@ -691,6 +844,7 @@ provides-extras = ["rich", "web"] dev = [ { name = "jsonschema", specifier = ">=4.0.0" }, { name = "mkdocs-material", specifier = ">=9.7.0" }, + { name = "mypy", specifier = ">=1.0" }, { name = "pre-commit", specifier = ">=4.5.0" }, { name = "pytest", specifier = ">=9.0.1" }, { name = "pytest-asyncio", specifier = ">=1.3.0" },