Skip to content

Evaluation

EvalCase

Bases: BaseModel

A single evaluation test case.

Defines what to send to the agent and what to expect back.

Example

case = EvalCase( ... name="weather_lookup", ... prompt="What's the weather in NYC?", ... expected_tools=["get_weather"], ... expected_output_contains=["temperature", "New York"], ... max_iterations=5, ... )

EvalResult

Bases: BaseModel

Result from evaluating a single case.

EvalReport

Bases: BaseModel

Aggregated report from running an eval suite.

summary

summary() -> str

Generate a human-readable summary.

Source code in src/locus/evaluation/framework.py
def summary(self) -> str:
    """Generate a human-readable summary."""
    lines = [
        f"Eval Report: {self.passed}/{self.total_cases} passed "
        f"(avg score: {self.avg_score:.2f})",
        f"Total duration: {self.total_duration_ms:.0f}ms",
        "",
    ]
    for r in self.results:
        status = "PASS" if r.passed else "FAIL"
        lines.append(
            f"  [{status}] {r.case_name} (score: {r.score:.2f}, {r.duration_ms:.0f}ms)"
        )
        if not r.passed:
            for check_name, check_passed in r.checks.items():
                if not check_passed:
                    lines.append(f"         - {check_name}: FAILED")
            if r.error:
                lines.append(f"         - error: {r.error}")
    return "\n".join(lines)

EvalRunner

EvalRunner(agent: Any)

Run evaluation cases against an agent.

Example

runner = EvalRunner(agent=my_agent) report = runner.run( ... [ ... EvalCase( ... name="basic", prompt="Hello", expected_output_contains=["hello"] ... ), ... EvalCase( ... name="tool_use", prompt="Search for X", expected_tools=["search"] ... ), ... ] ... ) print(report.summary())

Source code in src/locus/evaluation/framework.py
def __init__(self, agent: Any) -> None:
    self.agent = agent

run

run(cases: list[EvalCase]) -> EvalReport

Run all eval cases and produce a report.

Source code in src/locus/evaluation/framework.py
def run(self, cases: list[EvalCase]) -> EvalReport:
    """Run all eval cases and produce a report."""
    results: list[EvalResult] = []

    for case in cases:
        result = self._run_case(case)
        results.append(result)

    passed = sum(1 for r in results if r.passed)
    scores = [r.score for r in results]
    total_duration = sum(r.duration_ms for r in results)

    return EvalReport(
        results=results,
        total_cases=len(cases),
        passed=passed,
        failed=len(cases) - passed,
        avg_score=sum(scores) / len(scores) if scores else 0.0,
        total_duration_ms=total_duration,
    )