Skip to content

Agent

Agent

Agent(model: str | Any | None = None, tools: list[Tool] | None = None, system_prompt: str | None = None, reflexion: ReflexionConfig | bool | None = None, grounding: GroundingConfig | bool | None = None, max_iterations: int = 20, conversation_manager: Any | None = None, checkpointer: Any | None = None, hooks: list[Any] | None = None, config: AgentConfig | None = None, **kwargs: Any)

Bases: AgentRuntimeMixin, BaseModel

Primary entry point for Locus agents.

Manages the ReAct loop with optional Reflexion and Grounding.

Usage

agent = Agent( model="openai:gpt-4o", # or oci:cohere.command-r-plus tools=[search, calculate], system_prompt="You are a helpful assistant.", )

Async streaming

async for event in agent.run("What is 2+2?"): print(event)

Sync execution

result = agent.run_sync("What is 2+2?") print(result.message)

Initialize an Agent.

Parameters:

Name Type Description Default
model str | Any | None

Model string or ModelProtocol instance

None
tools list[Tool] | None

List of tools available to the agent

None
system_prompt str | None

System prompt for the agent

None
reflexion ReflexionConfig | bool | None

Reflexion config (True for defaults, False/None to disable)

None
grounding GroundingConfig | bool | None

Grounding config (True for defaults, False/None to disable)

None
max_iterations int

Maximum iterations before stopping

20
conversation_manager Any | None

Conversation manager for message pruning

None
checkpointer Any | None

Checkpointer for state persistence

None
hooks list[Any] | None

Lifecycle hooks

None
config AgentConfig | None

Full AgentConfig (overrides other params)

None
**kwargs Any

Additional config options

{}
Source code in src/locus/agent/agent.py
def __init__(
    self,
    model: str | Any | None = None,
    tools: list[Tool] | None = None,
    system_prompt: str | None = None,
    reflexion: ReflexionConfig | bool | None = None,
    grounding: GroundingConfig | bool | None = None,
    max_iterations: int = 20,
    conversation_manager: Any | None = None,
    checkpointer: Any | None = None,
    hooks: list[Any] | None = None,
    config: AgentConfig | None = None,
    **kwargs: Any,
):
    """
    Initialize an Agent.

    Args:
        model: Model string or ModelProtocol instance
        tools: List of tools available to the agent
        system_prompt: System prompt for the agent
        reflexion: Reflexion config (True for defaults, False/None to disable)
        grounding: Grounding config (True for defaults, False/None to disable)
        max_iterations: Maximum iterations before stopping
        conversation_manager: Conversation manager for message pruning
        checkpointer: Checkpointer for state persistence
        hooks: Lifecycle hooks
        config: Full AgentConfig (overrides other params)
        **kwargs: Additional config options
    """
    # Build config from params or use provided
    if config is not None:
        agent_config = config
    else:
        # Handle reflexion
        reflexion_config = None
        if reflexion is True:
            reflexion_config = ReflexionConfig()
        elif isinstance(reflexion, ReflexionConfig):
            reflexion_config = reflexion

        # Handle grounding
        grounding_config = None
        if grounding is True:
            grounding_config = GroundingConfig()
        elif isinstance(grounding, GroundingConfig):
            grounding_config = grounding

        agent_config = AgentConfig(
            model=model or "openai:gpt-4o",
            tools=tools or [],
            system_prompt=system_prompt or "You are a helpful AI assistant.",
            reflexion=reflexion_config,
            grounding=grounding_config,
            max_iterations=max_iterations,
            conversation_manager=conversation_manager,
            checkpointer=checkpointer,
            hooks=hooks or [],
            **kwargs,
        )

    super().__init__(config=agent_config)
    self._initialize()

is_cancelled property

is_cancelled: bool

Check if cancellation has been requested.

model property

model: Any

Get the model instance.

tools property

tools: ToolRegistry

Get the tool registry.

system_prompt property

system_prompt: str

Get the configured system prompt as a string.

If the config value is a callable (dynamic prompt), it is coerced to its repr so this property never returns non-str. Use self.config.system_prompt directly to access the raw value (string or callable) when you need to invoke the dynamic form.

run_sync

run_sync(prompt: str, *, thread_id: str | None = None, metadata: dict[str, Any] | None = None) -> AgentResult

Run the agent synchronously.

Parameters:

Name Type Description Default
prompt str

User prompt to process

required
thread_id str | None

Optional thread ID for checkpointing

None
metadata dict[str, Any] | None

Additional metadata for tools

None

Returns:

Type Description
AgentResult

AgentResult with final message and state

Source code in src/locus/agent/agent.py
def run_sync(
    self,
    prompt: str,
    *,
    thread_id: str | None = None,
    metadata: dict[str, Any] | None = None,
) -> AgentResult:
    """
    Run the agent synchronously.

    Args:
        prompt: User prompt to process
        thread_id: Optional thread ID for checkpointing
        metadata: Additional metadata for tools

    Returns:
        AgentResult with final message and state
    """

    async def _run() -> AgentResult:
        started_at = datetime.now(UTC)
        stop_reason: StopReason = "complete"
        final_message: str = ""
        tool_errors = 0

        callback = self.config.callback_handler

        async for event in self.run(prompt, thread_id=thread_id, metadata=metadata):
            # Fire callback if set
            if callback is not None:
                callback(event)

            if isinstance(event, TerminateEvent):
                stop_reason = _normalize_stop_reason(event.reason)
                final_message = event.final_message or ""
            elif isinstance(event, ToolCompleteEvent):
                if event.error:
                    tool_errors += 1

        # Use actual final state from run() instead of reconstructing
        state = self._last_run_state
        if state is None:
            state = await self._create_initial_state(prompt, thread_id, metadata)
            if final_message:
                state = state.with_message(Message.assistant(final_message))

        # Structured-output coercion (no-op when output_schema is unset).
        parsed_obj = None
        parse_error_msg = None
        structured_message = final_message
        if self.config.output_schema is not None:
            parsed_obj, parse_error_msg, state = await self._structure_output(
                state, final_message or ""
            )
            if parsed_obj is not None:
                # Replace ``message`` with the canonical JSON form so callers
                # using ``result.message`` still see a schema-valid string.
                structured_message = parsed_obj.model_dump_json()

        # Run GSAR judgment when configured. Single-pass v1: judge
        # the final answer, surface the result on AgentResult.
        # Full Algorithm-1 outer loop (regenerate / replan) lives in
        # locus.reasoning.gsar_evaluator and can be wired
        # explicitly when the caller wants the loop dynamics.
        gsar_judgment, gsar_score_value, gsar_decision = await self._run_gsar_judgment(
            state, structured_message or final_message
        )

        elapsed_ms = (datetime.now(UTC) - started_at).total_seconds() * 1000
        metrics = ExecutionMetrics(
            iterations=state.iteration,
            tool_calls=len(state.tool_executions),
            tool_errors=tool_errors,
            total_tokens=state.total_tokens_used,
            prompt_tokens=state.prompt_tokens_used,
            completion_tokens=state.completion_tokens_used,
            cache_creation_input_tokens=state.cache_creation_tokens_used,
            cache_read_input_tokens=state.cache_read_tokens_used,
            duration_ms=elapsed_ms,
        )

        return AgentResult.from_state(
            state=state,
            stop_reason=stop_reason,
            metrics=metrics,
            started_at=started_at,
            parsed=parsed_obj,
            parse_error=parse_error_msg,
            message=structured_message,
            gsar_judgment=gsar_judgment,
            gsar_score=gsar_score_value,
            gsar_decision=gsar_decision,
        )

    async def _run_and_close_clients() -> AgentResult:
        # Wrap _run() so any model-level httpx client is shut down
        # *inside* this asyncio.run loop. Otherwise the client's
        # connections remain bound to the loop we're about to close;
        # when ``run_sync`` is called again, the next ``asyncio.run``
        # opens a fresh loop and the old client's ``__del__`` tries
        # to ``aclose`` against the now-closed loop, raising
        # ``RuntimeError: Event loop is closed``.
        try:
            return await _run()
        finally:
            close = getattr(self.model, "close", None)
            if close is not None:
                try:
                    await close()
                except Exception:  # noqa: BLE001 — cleanup must never mask a real error from _run()
                    pass

            # Drain any background tasks the SDK spawned (httpx's TLS
            # teardown schedules ``loop.call_soon`` callbacks via
            # anyio that fire after ``client.close()`` returns). If
            # we don't await them, the loop closes mid-flight and the
            # callbacks raise "Event loop is closed" on the asyncio
            # default exception handler — visible in stderr as
            # "Task exception was never retrieved".
            try:
                pending = [
                    t
                    for t in asyncio.all_tasks()
                    if t is not asyncio.current_task() and not t.done()
                ]
                if pending:
                    await asyncio.wait(pending, timeout=2.0)
            except Exception:  # noqa: BLE001 — best-effort drain; never block teardown
                pass

    try:
        asyncio.get_running_loop()
    except RuntimeError:
        # No running loop, create a new one
        return asyncio.run(_run_and_close_clients())
    else:
        # There's a running loop, run in a thread to avoid nesting
        import concurrent.futures

        with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
            future = executor.submit(asyncio.run, _run_and_close_clients())
            return future.result()

invoke

invoke(prompt: str, *, thread_id: str | None = None, metadata: dict[str, Any] | None = None) -> AgentResult

Invoke the agent (alias for run_sync).

Parameters:

Name Type Description Default
prompt str

User prompt to process

required
thread_id str | None

Optional thread ID for checkpointing

None
metadata dict[str, Any] | None

Additional metadata for tools

None

Returns:

Type Description
AgentResult

AgentResult with final message and state

Source code in src/locus/agent/agent.py
def invoke(
    self,
    prompt: str,
    *,
    thread_id: str | None = None,
    metadata: dict[str, Any] | None = None,
) -> AgentResult:
    """
    Invoke the agent (alias for run_sync).

    Args:
        prompt: User prompt to process
        thread_id: Optional thread ID for checkpointing
        metadata: Additional metadata for tools

    Returns:
        AgentResult with final message and state
    """
    return self.run_sync(prompt, thread_id=thread_id, metadata=metadata)

cancel

cancel() -> None

Cancel a running agent from an external thread.

Sets a signal that the agent loop checks at each iteration. The agent will stop gracefully with stop_reason="cancelled".

Thread-safe — can be called from any thread while the agent is running.

Example

import threading

def run_agent(): result = agent.run_sync("Long task...") print(result.stop_reason) # "cancelled"

t = threading.Thread(target=run_agent) t.start() time.sleep(5) agent.cancel() # Stop from main thread t.join()

Source code in src/locus/agent/agent.py
def cancel(self) -> None:
    """Cancel a running agent from an external thread.

    Sets a signal that the agent loop checks at each iteration.
    The agent will stop gracefully with stop_reason="cancelled".

    Thread-safe — can be called from any thread while the agent is running.

    Example:
        import threading

        def run_agent():
            result = agent.run_sync("Long task...")
            print(result.stop_reason)  # "cancelled"

        t = threading.Thread(target=run_agent)
        t.start()
        time.sleep(5)
        agent.cancel()  # Stop from main thread
        t.join()
    """
    if self._cancel_signal is None:
        self._cancel_signal = threading.Event()
    self._cancel_signal.set()

as_tool

as_tool(name: str | None = None, description: str | None = None) -> Tool

Wrap this agent as a Tool for use by another agent.

The returned tool accepts a prompt string and returns the agent's final response. This enables agent delegation — a parent agent can call a sub-agent as if it were any other tool.

Parameters:

Name Type Description Default
name str | None

Tool name (defaults to agent_id or "sub_agent")

None
description str | None

Tool description (defaults to system prompt excerpt)

None

Returns:

Type Description
Tool

A Tool that runs this agent when called

Example

researcher = Agent( ... model=model, tools=[search], system_prompt="You research topics." ... ) writer = Agent(model=model, tools=[researcher.as_tool("research")]) result = writer.run_sync("Write about quantum computing")

Source code in src/locus/agent/agent.py
def as_tool(
    self,
    name: str | None = None,
    description: str | None = None,
) -> Tool:
    """
    Wrap this agent as a Tool for use by another agent.

    The returned tool accepts a prompt string and returns the agent's
    final response. This enables agent delegation — a parent agent
    can call a sub-agent as if it were any other tool.

    Args:
        name: Tool name (defaults to agent_id or "sub_agent")
        description: Tool description (defaults to system prompt excerpt)

    Returns:
        A Tool that runs this agent when called

    Example:
        >>> researcher = Agent(
        ...     model=model, tools=[search], system_prompt="You research topics."
        ... )
        >>> writer = Agent(model=model, tools=[researcher.as_tool("research")])
        >>> result = writer.run_sync("Write about quantum computing")
    """
    from locus.tools.decorator import tool as tool_decorator

    agent = self
    tool_name = name or self.config.agent_id or "sub_agent"
    tool_desc = description or (
        "Delegate a task to a sub-agent. "
        "The sub-agent has its own tools and will work independently "
        "to answer your request. Send a clear, specific prompt."
    )

    @tool_decorator(name=tool_name, description=tool_desc)
    def agent_tool(prompt: str) -> str:
        """Run the sub-agent with the given prompt and return its response.

        Args:
            prompt: The task or question to delegate to the sub-agent

        Returns:
            The sub-agent's final response
        """
        result = agent.run_sync(prompt)
        if result.success:
            return result.message
        return f"Sub-agent finished with status '{result.stop_reason}': {result.message}"

    return agent_tool

resume async

resume(response: str) -> AsyncIterator[LocusEvent]

Resume agent execution after an interrupt.

When a tool calls ask_user() and the agent yields an InterruptEvent, call this method with the user's response to continue execution.

Parameters:

Name Type Description Default
response str

The user's response to the interrupt question

required

Yields:

Type Description
AsyncIterator[LocusEvent]

LocusEvent instances for the remaining execution

Example

async for event in agent.run("Build an app"): ... if isinstance(event, InterruptEvent): ... answer = input(event.question) ... async for event in agent.resume(answer): ... handle(event)

Source code in src/locus/agent/agent.py
async def resume(
    self,
    response: str,
) -> AsyncIterator[LocusEvent]:
    """
    Resume agent execution after an interrupt.

    When a tool calls ask_user() and the agent yields an InterruptEvent,
    call this method with the user's response to continue execution.

    Args:
        response: The user's response to the interrupt question

    Yields:
        LocusEvent instances for the remaining execution

    Example:
        >>> async for event in agent.run("Build an app"):
        ...     if isinstance(event, InterruptEvent):
        ...         answer = input(event.question)
        ...         async for event in agent.resume(answer):
        ...             handle(event)
    """
    if self._interrupt_state is None:
        raise RuntimeError("No interrupt to resume from. Call run() first.")

    # Add the user's response as a tool result for ask_user
    state = self._interrupt_state
    state = state.with_message(Message.system(f"[User Response] {response}"))

    # Store for _create_initial_state to pick up
    self._last_run_state = state
    self._interrupt_state = None

    # Re-run — _create_initial_state will load from checkpoint/state
    # We pass the original prompt; the state already has the full history
    prompt = self._interrupt_prompt or ""
    thread_id = self._interrupt_thread_id
    metadata = self._interrupt_metadata

    # Clear interrupt bookkeeping
    self._interrupt_prompt = None
    self._interrupt_thread_id = None
    self._interrupt_metadata = None

    # Continue execution from the interrupted state
    async for event in self._run_from_state(state, prompt, thread_id, metadata):
        yield event

add_tool

add_tool(tool: Tool) -> None

Register a tool on this agent after construction.

Locus compiles config.tools into the runtime ToolRegistry once, inside __init__ (via :func:locus.agent.initializer. initialize_agent). Mutating self.config.tools directly after that point is a silent no-op — the model never sees the added tool because the registry has already been built.

Use this method (or :meth:add_tools) when you want to compose a specialist fleet at runtime: build each specialist, wrap it via Agent.as_tool(...), and attach the wrappers to the orchestrator.

The tool is also appended to self.config.tools so that a subsequent re-initialisation (e.g. after a config-driven clone) sees the same shape.

Raises:

Type Description
TypeError

if tool is not a :class:locus.tools.Tool instance. Callable functions must be wrapped with the :func:@tool decorator first.

ValueError

if a tool with the same name is already registered (propagated from :meth:ToolRegistry.register).

Source code in src/locus/agent/agent.py
def add_tool(self, tool: Tool) -> None:
    """Register a tool on this agent after construction.

    Locus compiles ``config.tools`` into the runtime ``ToolRegistry``
    once, inside ``__init__`` (via :func:`locus.agent.initializer.
    initialize_agent`). Mutating ``self.config.tools`` directly after
    that point is a silent no-op — the model never sees the added
    tool because the registry has already been built.

    Use this method (or :meth:`add_tools`) when you want to compose a
    specialist fleet at runtime: build each specialist, wrap it via
    ``Agent.as_tool(...)``, and attach the wrappers to the
    orchestrator.

    The tool is also appended to ``self.config.tools`` so that a
    subsequent re-initialisation (e.g. after a config-driven
    clone) sees the same shape.

    Raises:
        TypeError: if ``tool`` is not a :class:`locus.tools.Tool`
            instance. Callable functions must be wrapped with the
            :func:`@tool` decorator first.
        ValueError: if a tool with the same ``name`` is already
            registered (propagated from
            :meth:`ToolRegistry.register`).
    """
    if not isinstance(tool, Tool):
        raise TypeError(
            f"Expected Tool instance (use @tool to wrap a function), got {type(tool)}"
        )
    self._initialize()
    self._tool_registry.register(tool)
    # Mirror into config so a re-initialisation reconstructs the
    # same surface. ``config.tools`` is a list[Any] by Pydantic
    # declaration, so we mutate in place rather than reassigning.
    self.config.tools.append(tool)

add_tools

add_tools(tools: list[Tool]) -> None

Register multiple tools at once.

Equivalent to calling :meth:add_tool for each entry. If any single registration fails (wrong type, duplicate name), the whole call fails: tools registered before the failing one remain in the registry. Validate inputs ahead of time when atomic behaviour is required.

Source code in src/locus/agent/agent.py
def add_tools(self, tools: list[Tool]) -> None:
    """Register multiple tools at once.

    Equivalent to calling :meth:`add_tool` for each entry. If any
    single registration fails (wrong type, duplicate name), the
    whole call fails: tools registered before the failing one
    remain in the registry. Validate inputs ahead of time when
    atomic behaviour is required.
    """
    for t in tools:
        self.add_tool(t)

run async

run(prompt: str, *, thread_id: str | None = None, metadata: dict[str, Any] | None = None) -> AsyncIterator[LocusEvent]

Run the agent with streaming events.

Parameters:

Name Type Description Default
prompt str

User prompt to process

required
thread_id str | None

Optional thread ID for checkpointing

None
metadata dict[str, Any] | None

Additional metadata for tools

None

Yields:

Type Description
AsyncIterator[LocusEvent]

LocusEvent instances for each step

Source code in src/locus/agent/runtime_loop.py
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
@_bus_bridge
async def run(
    self,
    prompt: str,
    *,
    thread_id: str | None = None,
    metadata: dict[str, Any] | None = None,
) -> AsyncIterator[LocusEvent]:
    """
    Run the agent with streaming events.

    Args:
        prompt: User prompt to process
        thread_id: Optional thread ID for checkpointing
        metadata: Additional metadata for tools

    Yields:
        LocusEvent instances for each step
    """
    self._initialize()

    # Create initial state
    state = await self._create_initial_state(prompt, thread_id, metadata)

    # Track metrics
    started_at = datetime.now(UTC)
    _total_tokens = 0
    _tool_calls_count = 0
    _tool_errors_count = 0
    _reflexion_evals = 0
    _grounding_evals = 0
    _last_assistant_content: str | None = None
    _last_no_tool_calls = False

    # Reset any user-supplied composable termination condition so
    # time-windowed checks (TimeLimit) start their clock at run start.
    if self.config.termination is not None:
        self.config.termination.reset()

    # Run hooks: before_invocation
    state = await self._run_before_invocation_hooks(prompt, state)

    # Inject long-term memories into the system prompt.
    if self._memory_manager is not None:
        state = await self._memory_manager.on_session_start(state)

    try:
        # Main ReAct loop
        while True:
            # Check time budget
            if self.config.time_budget_seconds is not None:
                elapsed = (datetime.now(UTC) - started_at).total_seconds()
                if elapsed >= self.config.time_budget_seconds:
                    yield TerminateEvent(
                        reason="time_budget",
                        iterations_used=state.iteration,
                        final_confidence=state.confidence,
                        total_tool_calls=len(state.tool_executions),
                        final_message=_last_assistant_content,
                    )
                    break

            # Check external cancellation
            if self.is_cancelled:
                yield TerminateEvent(
                    reason="cancelled",
                    iterations_used=state.iteration,
                    final_confidence=state.confidence,
                    total_tool_calls=_tool_calls_count,
                    final_message="Agent cancelled by external signal.",
                )
                break

            # User-supplied composable termination condition runs first
            # so MaxIterations(...) | TextMention("DONE") and friends
            # actually fire before the hard-coded fallbacks.
            if self.config.termination is not None:
                user_stop, user_reason = self.config.termination.check(
                    state,
                    last_message=_last_assistant_content or "",
                    no_tool_calls=_last_no_tool_calls,
                )
                if user_stop:
                    yield TerminateEvent(
                        reason=user_reason or "complete",
                        iterations_used=state.iteration,
                        final_confidence=state.confidence,
                        total_tool_calls=len(state.tool_executions),
                        final_message=_last_assistant_content,
                    )
                    break

            # Check termination conditions
            should_stop, stop_reason = state.should_terminate
            if should_stop and stop_reason:
                if stop_reason == "max_iterations" and state.iteration > 0:
                    # Inject summary request and do one final call WITHOUT tools
                    state = state.with_message(
                        Message.system(
                            "[Iteration Limit Reached]\n"
                            "You have used all available iterations. "
                            "Provide a final summary of your findings and conclusions "
                            "based on the work done so far. Do NOT call any more tools."
                        )
                    )
                    # Call model without tool schemas to force text response.
                    # Use the auxiliary (cheap) model when configured —
                    # this is just a final summary, no need to spend
                    # primary-model budget.
                    messages = list(state.messages)
                    if self._conversation_manager:
                        if hasattr(self._conversation_manager, "async_apply"):
                            messages = await self._conversation_manager.async_apply(messages)
                        else:
                            messages = self._conversation_manager.apply(messages)
                    messages = self._validate_messages(messages)

                    summary_model = self._auxiliary_model or self._model
                    response = await summary_model.complete(
                        messages=messages,
                        tools=None,  # No tools — force text summary
                        temperature=self.config.temperature,
                        max_tokens=self.config.max_tokens,
                    )
                    prompt_toks = response.usage.get("prompt_tokens", 0)
                    completion_toks = response.usage.get("completion_tokens", 0)
                    cache_creation_toks = response.usage.get("cache_creation_input_tokens", 0)
                    cache_read_toks = response.usage.get("cache_read_input_tokens", 0)
                    _total_tokens += prompt_toks + completion_toks
                    state = state.with_token_usage(
                        prompt_toks,
                        completion_toks,
                        cache_creation_tokens=cache_creation_toks,
                        cache_read_tokens=cache_read_toks,
                    )

                    summary = (
                        response.message.content
                        or _last_assistant_content
                        or self._build_fallback_summary(state)
                    )
                    yield TerminateEvent(
                        reason="max_iterations",
                        iterations_used=state.iteration,
                        final_confidence=state.confidence,
                        total_tool_calls=len(state.tool_executions),
                        final_message=summary,
                    )
                    break

                # All other stop reasons: hard stop
                yield TerminateEvent(
                    reason=stop_reason,
                    iterations_used=state.iteration,
                    final_confidence=state.confidence,
                    total_tool_calls=len(state.tool_executions),
                    final_message=_last_assistant_content,
                )
                break

            # Increment iteration
            state = state.next_iteration()

            # Planning: inject plan prompt on first iteration
            if self.config.planning and state.iteration == 1:
                state = state.with_message(
                    Message.system(
                        "[Planning Phase]\n"
                        "Before taking any action, create a step-by-step plan.\n"
                        "Format your plan as a numbered list:\n"
                        "1. First step\n"
                        "2. Second step\n"
                        "...\n\n"
                        "After stating your plan, begin executing step 1.\n"
                        "Do NOT call tools without a plan."
                    )
                )

            # Budget warning in explicit mode — nudge model to complete
            if self.config.completion_mode == "explicit":
                remaining = self.config.max_iterations - state.iteration
                if remaining == 2:
                    state = state.with_message(
                        Message.system(
                            f"[Budget Warning] You have {remaining} iterations left. "
                            "Start wrapping up. Call task_complete(summary='your findings') "
                            "to finish, or you'll hit the iteration limit."
                        )
                    )
                elif remaining == 0:
                    state = state.with_message(
                        Message.system(
                            "[Final Iteration] This is your LAST iteration. "
                            "You MUST call task_complete now with a summary of everything "
                            "you've found. Do NOT call any other tools."
                        )
                    )

            # Get model response
            response, state = await self._get_model_response(state)
            prompt_toks = response.usage.get("prompt_tokens", 0)
            completion_toks = response.usage.get("completion_tokens", 0)
            cache_creation_toks = response.usage.get("cache_creation_input_tokens", 0)
            cache_read_toks = response.usage.get("cache_read_input_tokens", 0)
            _total_tokens += prompt_toks + completion_toks
            state = state.with_token_usage(
                prompt_toks,
                completion_toks,
                cache_creation_tokens=cache_creation_toks,
                cache_read_tokens=cache_read_toks,
            )
            _last_assistant_content = response.message.content
            # Track for the user-supplied termination condition. Updated again
            # below if a Cohere-style text tool call is parsed out of the body.
            _last_no_tool_calls = not response.message.tool_calls

            # Store plan from first iteration if planning enabled
            if self.config.planning and state.iteration == 1 and response.message.content:
                state = state.with_metadata("plan", response.message.content)

            # Emit think event
            yield ThinkEvent(
                iteration=state.iteration,
                reasoning=response.message.content,
                tool_calls=list(response.message.tool_calls),
            )

            # If no structured tool calls, try parsing from text (Cohere fallback)
            if not response.message.tool_calls and response.message.content:
                parsed_calls = self._parse_text_tool_calls(response.message.content)
                if parsed_calls:
                    response = ModelResponse(
                        message=Message(
                            role=response.message.role,
                            content=response.message.content,
                            tool_calls=parsed_calls,
                            tool_call_id=response.message.tool_call_id,
                            name=response.message.name,
                        ),
                        usage=response.usage,
                        stop_reason=response.stop_reason,
                    )
                    # Update the assistant message in state with parsed tool calls
                    messages = list(state.messages)
                    messages[-1] = response.message
                    state = state.model_copy(update={"messages": tuple(messages)})
                    _last_no_tool_calls = False

            # If still no tool calls — in auto mode we're done, in explicit mode we continue
            if not response.message.tool_calls and self.config.completion_mode != "explicit":
                # Apply grounding before final response if enabled
                if (
                    self.config.grounding
                    and self.config.grounding.enabled
                    and self.config.grounding.check_before_final
                    and self._grounding_evaluator
                    and response.message.content
                    and len(state.tool_executions) > 0
                ):
                    grounding_event, state = await self._apply_grounding(
                        state, response.message.content
                    )
                    _grounding_evals += 1
                    yield grounding_event

                    # If grounding fails, inject guidance and continue loop
                    if grounding_event.requires_replan and _grounding_evals <= (
                        self.config.grounding.max_replans
                    ):
                        from locus.reasoning.grounding import GroundingResult

                        replan_guidance = self._grounding_evaluator.get_replan_guidance(
                            GroundingResult(
                                score=grounding_event.score,
                                ungrounded_claims=grounding_event.ungrounded_claims,
                                requires_replan=True,
                            )
                        )
                        state = state.with_message(
                            Message.system(f"[Grounding Check Failed]\n{replan_guidance}")
                        )
                        continue  # Re-enter loop for replanning

                yield TerminateEvent(
                    reason="complete",
                    iterations_used=state.iteration,
                    final_confidence=state.confidence,
                    total_tool_calls=len(state.tool_executions),
                    final_message=response.message.content,
                )
                break

            # Execute tool calls
            tool_results: list[ToolResult] = []
            reasoning_step_tools: list[ToolExecution] = []

            for tool_call in response.message.tool_calls:
                _tool_calls_count += 1

                # Emit tool start event
                yield ToolStartEvent(
                    tool_name=tool_call.name,
                    tool_call_id=tool_call.id,
                    arguments=tool_call.arguments,
                )

                # Run hooks: before_tool_call (event.cancel to skip)
                tool_event = await self._run_before_tool_hooks(
                    tool_call.name, tool_call.id, tool_call.arguments
                )

                # Check for cancel via event
                if tool_event.cancel:
                    cancel_msg = (
                        tool_event.cancel
                        if isinstance(tool_event.cancel, str)
                        else "Cancelled by hook"
                    )
                    result = ToolResult(
                        tool_call_id=tool_call.id,
                        name=tool_call.name,
                        content=cancel_msg,
                        error=None,
                        duration_ms=0.0,
                    )
                    tool_results.append(result)
                    execution = ToolExecution(
                        tool_name=result.name,
                        tool_call_id=result.tool_call_id,
                        arguments=tool_call.arguments,
                        result=result.content,
                    )
                    state = state.with_tool_execution(execution)
                    reasoning_step_tools.append(execution)
                    yield ToolCompleteEvent(
                        tool_name=result.name,
                        tool_call_id=result.tool_call_id,
                        result=result.content,
                        duration_ms=0.0,
                    )
                    continue

                modified_args = tool_event.arguments

                # Idempotent dedup: if the tool declared idempotent=True
                # and an earlier call in this run used the same arguments,
                # reuse the prior result instead of invoking the body.
                # Without this, ``@tool(idempotent=True)`` is silently a no-op
                # for the main Agent.run() path (despite being advertised on
                # the README hero example).
                cached = self._maybe_cached_idempotent_result(
                    state, tool_call.name, modified_args, tool_call.id
                )
                if cached is not None:
                    result = cached
                    # Track + emit immediately, skip executor entirely.
                    tool_results.append(result)
                    execution = ToolExecution(
                        tool_name=result.name,
                        tool_call_id=result.tool_call_id,
                        arguments=modified_args,
                        result=result.content if result.success else None,
                        error=result.error,
                        duration_ms=result.duration_ms,
                        idempotent_cache_hit=True,
                    )
                    state = state.with_tool_execution(execution)
                    reasoning_step_tools.append(execution)
                    yield ToolCompleteEvent(
                        tool_name=result.name,
                        tool_call_id=result.tool_call_id,
                        result=result.content,
                        error=result.error,
                        duration_ms=result.duration_ms,
                    )
                    continue

                # Execute the tool
                start_time = time.perf_counter()
                try:
                    ctx_factory = ToolContextFactory(
                        run_id=state.run_id,
                        agent_id=state.agent_id,
                        iteration=state.iteration,
                        state=state,
                        invocation_metadata=metadata or {},
                    )
                    [result] = await self._executor.execute(
                        [tool_call.model_copy(update={"arguments": modified_args})],
                        self._tool_registry,
                        ctx_factory,
                    )
                except Exception as e:  # noqa: BLE001 — user tool bodies can raise anything; surface as ToolResult.error
                    result = ToolResult(
                        tool_call_id=tool_call.id,
                        name=tool_call.name,
                        content="",
                        error=str(e),
                        duration_ms=(time.perf_counter() - start_time) * 1000,
                    )

                # Check for interrupt marker from ask_user tool
                if result.content and '"__interrupt__": true' in result.content:
                    import json as _json

                    try:
                        interrupt_data = _json.loads(result.content)
                        if interrupt_data.get("__interrupt__"):
                            self._last_run_state = state
                            self._interrupt_state = state
                            self._interrupt_prompt = prompt
                            self._interrupt_thread_id = thread_id
                            self._interrupt_metadata = metadata
                            yield InterruptEvent(
                                question=interrupt_data.get("question", ""),
                                options=interrupt_data.get("options"),
                                interrupt_id=result.tool_call_id,
                            )
                            return  # Pause the generator
                    except (ValueError, KeyError):
                        pass  # Not a valid interrupt marker, continue normally

                # Cap oversized tool results so they don't blow the
                # model's context window. When ``tool_result_store``
                # is configured we offload the full payload through
                # it and inline a recoverable reference key;
                # otherwise we fall back to lossy head-truncation.
                if (
                    self.config.max_tool_result_length > 0
                    and result.content
                    and len(result.content) > self.config.max_tool_result_length
                ):
                    if self.config.tool_result_store is not None:
                        result = self.config.tool_result_store.maybe_offload(
                            result,
                            run_id=state.run_id,
                            iteration=state.iteration,
                        )
                    else:
                        original_len = len(result.content)
                        result = ToolResult(
                            tool_call_id=result.tool_call_id,
                            name=result.name,
                            content=(
                                result.content[: self.config.max_tool_result_length]
                                + f"\n[OUTPUT TRUNCATED — original: {original_len} chars]"
                            ),
                            error=result.error,
                            duration_ms=result.duration_ms,
                        )

                tool_results.append(result)

                # Track execution
                execution = ToolExecution(
                    tool_name=result.name,
                    tool_call_id=result.tool_call_id,
                    arguments=modified_args,
                    result=result.content if result.success else None,
                    error=result.error,
                    duration_ms=result.duration_ms,
                )
                state = state.with_tool_execution(execution)
                reasoning_step_tools.append(execution)

                if result.error:
                    _tool_errors_count += 1

                # Emit tool complete event
                yield ToolCompleteEvent(
                    tool_name=result.name,
                    tool_call_id=result.tool_call_id,
                    result=result.content if result.success else None,
                    error=result.error,
                    duration_ms=result.duration_ms,
                )

                # Run hooks: after_tool_call (may return HookAction to retry)
                after_tool_event = await self._run_after_tool_hooks(
                    result.name,
                    result.content if result.success else None,
                    result.error,
                    tool_call_id=result.tool_call_id,
                    arguments=modified_args,
                )

                # Retry tool if hook set event.retry = True
                if after_tool_event.retry:
                    try:
                        ctx_factory = ToolContextFactory(
                            run_id=state.run_id,
                            agent_id=state.agent_id,
                            iteration=state.iteration,
                            state=state,
                            invocation_metadata=metadata or {},
                        )
                        [result] = await self._executor.execute(
                            [tool_call.model_copy(update={"arguments": modified_args})],
                            self._tool_registry,
                            ctx_factory,
                        )
                    except Exception as e:  # noqa: BLE001 — user tool bodies can raise anything; surface as ToolResult.error
                        result = ToolResult(
                            tool_call_id=tool_call.id,
                            name=tool_call.name,
                            content="",
                            error=str(e),
                            duration_ms=0.0,
                        )

                # Track write/verification for completion gate
                if result.name in self.config.verify_tools:
                    self._has_unverified_writes = True
                if result.name in self.config.verification_tools:
                    self._has_unverified_writes = False

            # Add tool results to messages
            for result in tool_results:
                state = state.with_message(Message.tool(result))

            # Inject verification reminder if write-like tools were used
            if self.config.verify_tools:
                tools_used = {e.tool_name for e in reasoning_step_tools}
                wrote = tools_used & self.config.verify_tools
                if wrote:
                    state = state.with_message(
                        Message.system(
                            "[Verification Reminder] You modified files/data. "
                            "Before completing, verify your changes:\n"
                            "- Run tests or checks if available\n"
                            "- Read back modified files to confirm correctness\n"
                            "- Fix any issues found\n"
                            "Do NOT call task_complete until verified."
                        )
                    )

            # Apply Reflexion if enabled
            if (
                self.config.reflexion
                and self.config.reflexion.enabled
                and self._reflector
                and state.iteration % self.config.reflexion.evaluate_every_n_iterations == 0
            ):
                reflect_event, state = await self._apply_reflexion(state, reasoning_step_tools)
                _reflexion_evals += 1
                yield reflect_event

                # Inject guidance when agent is stuck or looping
                if self.config.reflexion.include_guidance and reflect_event.guidance:
                    guidance = f"[Agent Self-Reflection]\n{reflect_event.guidance}"
                    # Add replan suggestion if planning is enabled and agent is stuck
                    if self.config.planning and reflect_event.assessment in (
                        "stuck",
                        "loop_detected",
                    ):
                        guidance += (
                            "\n\n[Replan] Your current approach isn't working. "
                            "Create a NEW plan with a different strategy, then execute it."
                        )
                    state = state.with_message(Message.system(guidance))

            # Record reasoning step
            reasoning_step = ReasoningStep(
                iteration=state.iteration,
                thought=response.message.content,
                tool_calls=list(response.message.tool_calls),
                tool_results=reasoning_step_tools,
                reflection=None,  # Will be updated if reflexion was applied
                confidence_delta=0.0,
            )
            state = state.with_reasoning_step(reasoning_step)

            # Checkpoint if enabled
            if (
                self.config.checkpointer
                and self.config.checkpoint_every_n_iterations > 0
                and state.iteration % self.config.checkpoint_every_n_iterations == 0
            ):
                _cp_thread = thread_id or state.run_id
                await self.config.checkpointer.save(
                    state,
                    _cp_thread,
                )
                from locus.observability.emit import (  # noqa: PLC0415
                    EV_CHECKPOINT_SAVED,
                    emit,
                )

                await emit(
                    EV_CHECKPOINT_SAVED,
                    thread_id=_cp_thread,
                    iteration=state.iteration,
                    backend=type(self.config.checkpointer).__name__,
                    trigger="every_n_iterations",
                )

    except Exception as e:
        # Emit error termination
        state = state.with_error(str(e))
        yield TerminateEvent(
            reason="error",
            iterations_used=state.iteration,
            final_confidence=state.confidence,
            total_tool_calls=len(state.tool_executions),
        )
        raise

    finally:
        # Clear cancel signal
        if self._cancel_signal is not None:
            self._cancel_signal.clear()

        # Save output to state if output_key configured
        if self.config.output_key:
            final_msg = ""
            for msg in reversed(state.messages):
                if msg.role.value == "assistant" and msg.content:
                    final_msg = msg.content
                    break
            if final_msg:
                state = state.with_metadata(self.config.output_key, final_msg)

        # Store final state for run_sync access
        self._last_run_state = state

        # Run hooks: after_invocation
        _duration_ms = (datetime.now(UTC) - started_at).total_seconds() * 1000  # noqa: F841
        await self._run_after_invocation_hooks(state, len(state.errors) == 0)

        # Extract and persist long-term memories from this session.
        if self._memory_manager is not None:
            await self._memory_manager.on_session_end(state)

        # Final checkpoint
        if self.config.checkpointer and thread_id:
            await self.config.checkpointer.save(state, thread_id)
            from locus.observability.emit import (  # noqa: PLC0415
                EV_CHECKPOINT_SAVED,
                emit,
            )

            await emit(
                EV_CHECKPOINT_SAVED,
                thread_id=thread_id,
                iteration=state.iteration,
                backend=type(self.config.checkpointer).__name__,
                trigger="final",
            )

AgentConfig

AgentConfig

Bases: BaseModel

Configuration for an Agent instance.

All parameters can be validated before agent creation.

validate_model classmethod

validate_model(v: Any) -> Any

Validate model is a string or ModelProtocol.

Source code in src/locus/agent/config.py
@field_validator("model", mode="before")
@classmethod
def validate_model(cls, v: Any) -> Any:
    """Validate model is a string or ModelProtocol."""
    if isinstance(v, str):
        if ":" not in v:
            raise ValueError(
                f"Model string must be 'provider:model', got: {v}. Example: 'openai:gpt-4o'"
            )
        return v
    # Assume it's a ModelProtocol instance
    return v

validate_tools classmethod

validate_tools(v: Any) -> list[Any]

Ensure tools is a list.

Source code in src/locus/agent/config.py
@field_validator("tools", mode="before")
@classmethod
def validate_tools(cls, v: Any) -> list[Any]:
    """Ensure tools is a list."""
    if v is None:
        return []
    if not isinstance(v, list):
        return [v]
    return v

with_reflexion

with_reflexion(enabled: bool = True, confidence_threshold: float = 0.85, **kwargs: Any) -> AgentConfig

Return a copy with Reflexion configured.

Source code in src/locus/agent/config.py
def with_reflexion(
    self,
    enabled: bool = True,
    confidence_threshold: float = 0.85,
    **kwargs: Any,
) -> AgentConfig:
    """Return a copy with Reflexion configured."""
    return self.model_copy(
        update={
            "reflexion": ReflexionConfig(
                enabled=enabled,
                confidence_threshold=confidence_threshold,
                **kwargs,
            )
        }
    )

with_grounding

with_grounding(enabled: bool = True, threshold: float = 0.65, **kwargs: Any) -> AgentConfig

Return a copy with Grounding configured.

Source code in src/locus/agent/config.py
def with_grounding(
    self,
    enabled: bool = True,
    threshold: float = 0.65,
    **kwargs: Any,
) -> AgentConfig:
    """Return a copy with Grounding configured."""
    return self.model_copy(
        update={
            "grounding": GroundingConfig(
                enabled=enabled,
                threshold=threshold,
                **kwargs,
            )
        }
    )

with_hooks

with_hooks(*hooks: Any) -> AgentConfig

Return a copy with additional hooks.

Source code in src/locus/agent/config.py
def with_hooks(self, *hooks: Any) -> AgentConfig:
    """Return a copy with additional hooks."""
    return self.model_copy(update={"hooks": [*self.hooks, *hooks]})

AgentResult

AgentResult

Bases: BaseModel

Result from an agent execution.

Contains the final message, state, and execution metrics.

success property

success: bool

Whether execution completed successfully.

confidence property

confidence: float

Final confidence score.

iterations property

iterations: int

Number of iterations used.

text property

text: str

Alias for message.

Many AI SDKs surface the final assistant text as .text; Locus's primary field is .message. Both names now work.

messages property

messages: tuple[Message, ...]

All messages from the conversation.

tool_executions property

tool_executions: tuple[ToolExecution, ...]

All tool executions.

reasoning_steps property

reasoning_steps: tuple[ReasoningStep, ...]

All reasoning steps.

last_assistant_message property

last_assistant_message: str | None

Get the last assistant message content.

parsed_as

parsed_as(schema: type[T]) -> T

Return parsed cast to schema, with a runtime check.

Use this when you want a typed handle on the structured output without casting yourself::

picks = result.parsed_as(VendorList)
for v in picks.vendors:
    ...

Raises ValueError if parsed is None (parse failed or no schema configured) and TypeError if parsed is the wrong concrete type.

Source code in src/locus/agent/result.py
def parsed_as(self, schema: type[T]) -> T:
    """Return ``parsed`` cast to ``schema``, with a runtime check.

    Use this when you want a typed handle on the structured output without
    casting yourself::

        picks = result.parsed_as(VendorList)
        for v in picks.vendors:
            ...

    Raises ``ValueError`` if ``parsed`` is None (parse failed or no schema
    configured) and ``TypeError`` if ``parsed`` is the wrong concrete type.
    """
    if self.parsed is None:
        if self.parse_error:
            raise ValueError(f"AgentResult has no parsed output: {self.parse_error}")
        raise ValueError("AgentResult has no parsed output (no output_schema was configured)")
    if not isinstance(self.parsed, schema):
        raise TypeError(f"Expected {schema.__name__}, got {type(self.parsed).__name__}")
    return self.parsed

to_dict

to_dict() -> dict[str, Any]

Export result to dictionary.

Source code in src/locus/agent/result.py
def to_dict(self) -> dict[str, Any]:
    """Export result to dictionary."""
    return self.model_dump(mode="json")

from_state classmethod

from_state(state: AgentState, stop_reason: StopReason, metrics: ExecutionMetrics | None = None, started_at: datetime | None = None, error: str | None = None, grounding_score: float | None = None, ungrounded_claims: list[str] | None = None, parsed: BaseModel | None = None, parse_error: str | None = None, message: str | None = None, gsar_judgment: Any = None, gsar_score: float | None = None, gsar_decision: str | None = None) -> AgentResult

Create a result from final state.

Extracts the final message from the last assistant response unless an explicit message is supplied (used after a structuring re-prompt).

Source code in src/locus/agent/result.py
@classmethod
def from_state(
    cls,
    state: AgentState,
    stop_reason: StopReason,
    metrics: ExecutionMetrics | None = None,
    started_at: datetime | None = None,
    error: str | None = None,
    grounding_score: float | None = None,
    ungrounded_claims: list[str] | None = None,
    parsed: BaseModel | None = None,
    parse_error: str | None = None,
    message: str | None = None,
    gsar_judgment: Any = None,
    gsar_score: float | None = None,
    gsar_decision: str | None = None,
) -> AgentResult:
    """
    Create a result from final state.

    Extracts the final message from the last assistant response unless an
    explicit ``message`` is supplied (used after a structuring re-prompt).
    """
    # Find the last assistant message if not provided
    final_message = message
    if final_message is None:
        final_message = ""
        for msg in reversed(state.messages):
            if msg.role.value == "assistant":
                final_message = msg.content or ""
                break

    return cls(
        message=final_message,
        state=state,
        stop_reason=stop_reason,
        metrics=metrics or ExecutionMetrics(),
        started_at=started_at or state.started_at,
        completed_at=datetime.now(UTC),
        error=error,
        grounding_score=grounding_score,
        ungrounded_claims=ungrounded_claims or [],
        parsed=parsed,
        parse_error=parse_error,
        gsar_judgment=gsar_judgment,
        gsar_score=gsar_score,
        gsar_decision=gsar_decision,
    )

AgentState

AgentState

Bases: BaseModel

Immutable state for an agent execution.

All updates return a new state instance (functional updates).

has_tool_loop property

has_tool_loop: bool

Check if agent is stuck in a tool loop across iterations.

Multiple calls to the same tool in one turn (parallel execution) is normal. A loop is the same call signature — name and arguments — repeating across consecutive iterations. Same name with different arguments (paged discovery, sweeping inputs, retrying with a corrected parameter) counts as forward progress and is not a loop.

last_tool_calls property

last_tool_calls: list[ToolCall]

Get tool calls from the last assistant message.

called_terminal_tool property

called_terminal_tool: bool

Check if a terminal tool was called.

should_terminate property

should_terminate: tuple[bool, str | None]

Check if the agent should terminate.

In "auto" mode: stops on confidence, no_tools, tool_loop, or terminal_tool. In "explicit" mode: only stops on terminal_tool, max_iterations, or budgets. Use "explicit" for multi-step tasks that require verification before completion.

Returns:

Type Description
tuple[bool, str | None]

Tuple of (should_stop, reason)

total_tokens property

total_tokens: int

Total tokens used. Returns real count if tracked, else char/4 estimate.

with_message

with_message(message: Message) -> AgentState

Add a message to the conversation.

Source code in src/locus/core/state.py
def with_message(self, message: Message) -> AgentState:
    """Add a message to the conversation."""
    return self.model_copy(
        update={
            "messages": (*self.messages, message),
            "updated_at": datetime.now(UTC),
        }
    )

with_messages

with_messages(messages: list[Message]) -> AgentState

Add multiple messages to the conversation.

Source code in src/locus/core/state.py
def with_messages(self, messages: list[Message]) -> AgentState:
    """Add multiple messages to the conversation."""
    return self.model_copy(
        update={
            "messages": (*self.messages, *messages),
            "updated_at": datetime.now(UTC),
        }
    )

with_iteration

with_iteration(iteration: int) -> AgentState

Update the current iteration.

Source code in src/locus/core/state.py
def with_iteration(self, iteration: int) -> AgentState:
    """Update the current iteration."""
    return self.model_copy(
        update={
            "iteration": iteration,
            "updated_at": datetime.now(UTC),
        }
    )

next_iteration

next_iteration() -> AgentState

Increment iteration counter.

Source code in src/locus/core/state.py
def next_iteration(self) -> AgentState:
    """Increment iteration counter."""
    return self.with_iteration(self.iteration + 1)

with_provider_state

with_provider_state(provider_state: dict[str, Any] | None) -> AgentState

Replace the provider continuation state.

Server-stateful transports (e.g. OCIResponsesModel) return a continuation token in ModelResponse.provider_state; the agent calls this to thread the token into the next turn.

Source code in src/locus/core/state.py
def with_provider_state(self, provider_state: dict[str, Any] | None) -> AgentState:
    """Replace the provider continuation state.

    Server-stateful transports (e.g. ``OCIResponsesModel``) return
    a continuation token in ``ModelResponse.provider_state``; the
    agent calls this to thread the token into the next turn.
    """
    return self.model_copy(
        update={
            "provider_state": provider_state,
            "updated_at": datetime.now(UTC),
        }
    )

with_tool_execution

with_tool_execution(execution: ToolExecution) -> AgentState

Record a tool execution.

Source code in src/locus/core/state.py
def with_tool_execution(self, execution: ToolExecution) -> AgentState:
    """Record a tool execution."""
    return self.model_copy(
        update={
            "tool_executions": (*self.tool_executions, execution),
            "tool_history": (*self.tool_history, execution.tool_name),
            "updated_at": datetime.now(UTC),
        }
    )

with_reasoning_step

with_reasoning_step(step: ReasoningStep) -> AgentState

Add a reasoning step to the trace.

Source code in src/locus/core/state.py
def with_reasoning_step(self, step: ReasoningStep) -> AgentState:
    """Add a reasoning step to the trace."""
    return self.model_copy(
        update={
            "reasoning_steps": (*self.reasoning_steps, step),
            "updated_at": datetime.now(UTC),
        }
    )

with_confidence

with_confidence(confidence: float) -> AgentState

Update confidence score.

Source code in src/locus/core/state.py
def with_confidence(self, confidence: float) -> AgentState:
    """Update confidence score."""
    clamped = max(0.0, min(1.0, confidence))
    return self.model_copy(
        update={
            "confidence": clamped,
            "confidence_history": (*self.confidence_history, clamped),
            "updated_at": datetime.now(UTC),
        }
    )

adjust_confidence

adjust_confidence(delta: float, diminishing: bool = True) -> AgentState

Adjust confidence with optional diminishing returns.

Parameters:

Name Type Description Default
delta float

Raw confidence adjustment (-1.0 to 1.0)

required
diminishing bool

If True, positive deltas are scaled by (1 - current_confidence)

True
Source code in src/locus/core/state.py
def adjust_confidence(self, delta: float, diminishing: bool = True) -> AgentState:
    """
    Adjust confidence with optional diminishing returns.

    Args:
        delta: Raw confidence adjustment (-1.0 to 1.0)
        diminishing: If True, positive deltas are scaled by (1 - current_confidence)
    """
    if diminishing and delta > 0:
        # Diminishing returns: harder to increase confidence as it gets higher
        effective_delta = delta * (1.0 - self.confidence)
    else:
        effective_delta = delta

    return self.with_confidence(self.confidence + effective_delta)

with_error

with_error(error: str) -> AgentState

Record an error.

Source code in src/locus/core/state.py
def with_error(self, error: str) -> AgentState:
    """Record an error."""
    return self.model_copy(
        update={
            "errors": (*self.errors, error),
            "updated_at": datetime.now(UTC),
        }
    )

with_metadata

with_metadata(key: str, value: Any) -> AgentState

Set a metadata value.

Source code in src/locus/core/state.py
def with_metadata(self, key: str, value: Any) -> AgentState:
    """Set a metadata value."""
    return self.model_copy(
        update={
            "metadata": {**self.metadata, key: value},
            "updated_at": datetime.now(UTC),
        }
    )

with_token_usage

with_token_usage(prompt_tokens: int, completion_tokens: int, cache_creation_tokens: int = 0, cache_read_tokens: int = 0) -> AgentState

Record token usage from a model response.

cache_creation_tokens and cache_read_tokens are populated only when Anthropic returns prompt-cache stats on the response usage (i.e., the AnthropicModel was configured with prompt_cache=True). Default 0 for other providers.

Source code in src/locus/core/state.py
def with_token_usage(
    self,
    prompt_tokens: int,
    completion_tokens: int,
    cache_creation_tokens: int = 0,
    cache_read_tokens: int = 0,
) -> AgentState:
    """Record token usage from a model response.

    ``cache_creation_tokens`` and ``cache_read_tokens`` are populated
    only when Anthropic returns prompt-cache stats on the response
    usage (i.e., the AnthropicModel was configured with
    ``prompt_cache=True``). Default 0 for other providers.
    """
    return self.model_copy(
        update={
            "total_tokens_used": self.total_tokens_used + prompt_tokens + completion_tokens,
            "prompt_tokens_used": self.prompt_tokens_used + prompt_tokens,
            "completion_tokens_used": self.completion_tokens_used + completion_tokens,
            "cache_creation_tokens_used": (
                self.cache_creation_tokens_used + cache_creation_tokens
            ),
            "cache_read_tokens_used": self.cache_read_tokens_used + cache_read_tokens,
            "updated_at": datetime.now(UTC),
        }
    )

to_checkpoint

to_checkpoint() -> dict[str, Any]

Serialize state for checkpointing.

Source code in src/locus/core/state.py
def to_checkpoint(self) -> dict[str, Any]:
    """Serialize state for checkpointing."""
    return self.model_dump(mode="json")

from_checkpoint classmethod

from_checkpoint(data: dict[str, Any]) -> AgentState

Restore state from checkpoint.

Source code in src/locus/core/state.py
@classmethod
def from_checkpoint(cls, data: dict[str, Any]) -> AgentState:
    """Restore state from checkpoint."""
    return cls.model_validate(data)