Skip to content

ant_ai.hooks.protocol

PostModelPass dataclass

Hook accepted the result; continue with the original result.

Source code in src/ant_ai/hooks/protocol.py
15
16
17
18
19
@dataclass(frozen=True)
class PostModelPass:
    """Hook accepted the result; continue with the original result."""

    result: StepResult

PostModelBlock dataclass

Hook hard-blocked. Raise HookBlockedError immediately.

Source code in src/ant_ai/hooks/protocol.py
22
23
24
25
26
@dataclass(frozen=True)
class PostModelBlock:
    """Hook hard-blocked. Raise HookBlockedError immediately."""

    reason: str | None

PostModelRetry dataclass

Hook requests a retry with the given critique injected into state.

Source code in src/ant_ai/hooks/protocol.py
29
30
31
32
33
@dataclass(frozen=True)
class PostModelRetry:
    """Hook requests a retry with the given critique injected into state."""

    reason: str | None

PostModelFallback dataclass

Hook provides a pre-built safe result, bypassing further validation.

Source code in src/ant_ai/hooks/protocol.py
36
37
38
39
40
@dataclass(frozen=True)
class PostModelFallback:
    """Hook provides a pre-built safe result, bypassing further validation."""

    result: StepResult

AgentHook

Base class for agent lifecycle hooks.

Override only the methods you need — all defaults are no-ops so partial implementations work without any boilerplate.

Lifecycle order per agent invocation:

before_agent
  (loop)
    before_model
    wrap_model_call → LLM
    after_model → PostModelDecision
    [if tool calls] wrap_tool_call → tools
    [on retry: back to wrap_model_call, skipping before_model]
after_agent

Example:

class ContentGuardrailHook(AgentHook):
    async def after_model(self, result, ctx):
        if "banned" in result.output.raw:
            return PostModelRetry(reason="Contains banned content")
        return PostModelPass(result=result)
Source code in src/ant_ai/hooks/protocol.py
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
class AgentHook:
    """Base class for agent lifecycle hooks.

    Override only the methods you need — all defaults are no-ops so partial
    implementations work without any boilerplate.

    Lifecycle order per agent invocation:

        before_agent
          (loop)
            before_model
            wrap_model_call → LLM
            after_model → PostModelDecision
            [if tool calls] wrap_tool_call → tools
            [on retry: back to wrap_model_call, skipping before_model]
        after_agent

    Example:

        class ContentGuardrailHook(AgentHook):
            async def after_model(self, result, ctx):
                if "banned" in result.output.raw:
                    return PostModelRetry(reason="Contains banned content")
                return PostModelPass(result=result)
    """

    async def before_agent(self, state: State, ctx: InvocationContext | None) -> None:
        """Called once before the agent starts processing.

        Args:
            state: Current agent state.
            ctx: Invocation context, or None if not available.
        """

    async def after_agent(self, state: State, ctx: InvocationContext | None) -> None:
        """Called once after the agent finishes (or is closed).

        Args:
            state: Current agent state.
            ctx: Invocation context, or None if not available.
        """

    async def before_model(self, state: State, ctx: InvocationContext | None) -> None:
        """Called once per outer loop step. Not called during retry attempts.

        Args:
            state: Current agent state.
            ctx: Invocation context, or None if not available.
        """

    async def after_model(
        self,
        result: StepResult,
        ctx: InvocationContext | None,
    ) -> PostModelDecision:
        """Called after each LLM step. Return a decision to control flow.

        Args:
            result: The step result produced by the LLM step.
            ctx: Invocation context, or None if not available.

        Returns:
            A `PostModelPass` to accept, `PostModelBlock` to raise an error,
            `PostModelRetry` to re-run the step with a critique, or
            `PostModelFallback` to substitute a safe pre-built result.
        """
        return PostModelPass(result=result)

    async def wrap_model_call(
        self,
        call_next: WrapCall,
        state: State,
        ctx: InvocationContext | None,
    ) -> AsyncGenerator:
        """Wrapper around each individual LLM call.

        Must yield every item produced by `call_next`, or substitute its own.
        Yield a `StepResult` directly to short-circuit the LLM call entirely.

        Args:
            call_next: The next callable in the chain to delegate to.
            state: Current agent state.
            ctx: Invocation context, or None if not available.
        """
        async for item in call_next(state, ctx):
            yield item

    async def wrap_tool_call(
        self,
        call_next: WrapCall,
        state: State,
        ctx: InvocationContext | None,
    ) -> AsyncGenerator:
        """Wrapper around the tool execution step.

        Args:
            call_next: The next callable in the chain to delegate to.
            state: Current agent state.
            ctx: Invocation context, or None if not available.
        """
        async for item in call_next(state, ctx):
            yield item

before_agent async

before_agent(
    state: State, ctx: InvocationContext | None
) -> None

Called once before the agent starts processing.

Parameters:

Name Type Description Default
state State

Current agent state.

required
ctx InvocationContext | None

Invocation context, or None if not available.

required
Source code in src/ant_ai/hooks/protocol.py
74
75
76
77
78
79
80
async def before_agent(self, state: State, ctx: InvocationContext | None) -> None:
    """Called once before the agent starts processing.

    Args:
        state: Current agent state.
        ctx: Invocation context, or None if not available.
    """

after_agent async

after_agent(
    state: State, ctx: InvocationContext | None
) -> None

Called once after the agent finishes (or is closed).

Parameters:

Name Type Description Default
state State

Current agent state.

required
ctx InvocationContext | None

Invocation context, or None if not available.

required
Source code in src/ant_ai/hooks/protocol.py
82
83
84
85
86
87
88
async def after_agent(self, state: State, ctx: InvocationContext | None) -> None:
    """Called once after the agent finishes (or is closed).

    Args:
        state: Current agent state.
        ctx: Invocation context, or None if not available.
    """

before_model async

before_model(
    state: State, ctx: InvocationContext | None
) -> None

Called once per outer loop step. Not called during retry attempts.

Parameters:

Name Type Description Default
state State

Current agent state.

required
ctx InvocationContext | None

Invocation context, or None if not available.

required
Source code in src/ant_ai/hooks/protocol.py
90
91
92
93
94
95
96
async def before_model(self, state: State, ctx: InvocationContext | None) -> None:
    """Called once per outer loop step. Not called during retry attempts.

    Args:
        state: Current agent state.
        ctx: Invocation context, or None if not available.
    """

after_model async

after_model(
    result: StepResult, ctx: InvocationContext | None
) -> PostModelDecision

Called after each LLM step. Return a decision to control flow.

Parameters:

Name Type Description Default
result StepResult

The step result produced by the LLM step.

required
ctx InvocationContext | None

Invocation context, or None if not available.

required

Returns:

Type Description
PostModelDecision

A PostModelPass to accept, PostModelBlock to raise an error,

PostModelDecision

PostModelRetry to re-run the step with a critique, or

PostModelDecision

PostModelFallback to substitute a safe pre-built result.

Source code in src/ant_ai/hooks/protocol.py
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
async def after_model(
    self,
    result: StepResult,
    ctx: InvocationContext | None,
) -> PostModelDecision:
    """Called after each LLM step. Return a decision to control flow.

    Args:
        result: The step result produced by the LLM step.
        ctx: Invocation context, or None if not available.

    Returns:
        A `PostModelPass` to accept, `PostModelBlock` to raise an error,
        `PostModelRetry` to re-run the step with a critique, or
        `PostModelFallback` to substitute a safe pre-built result.
    """
    return PostModelPass(result=result)

wrap_model_call async

wrap_model_call(
    call_next: WrapCall,
    state: State,
    ctx: InvocationContext | None,
) -> AsyncGenerator

Wrapper around each individual LLM call.

Must yield every item produced by call_next, or substitute its own. Yield a StepResult directly to short-circuit the LLM call entirely.

Parameters:

Name Type Description Default
call_next WrapCall

The next callable in the chain to delegate to.

required
state State

Current agent state.

required
ctx InvocationContext | None

Invocation context, or None if not available.

required
Source code in src/ant_ai/hooks/protocol.py
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
async def wrap_model_call(
    self,
    call_next: WrapCall,
    state: State,
    ctx: InvocationContext | None,
) -> AsyncGenerator:
    """Wrapper around each individual LLM call.

    Must yield every item produced by `call_next`, or substitute its own.
    Yield a `StepResult` directly to short-circuit the LLM call entirely.

    Args:
        call_next: The next callable in the chain to delegate to.
        state: Current agent state.
        ctx: Invocation context, or None if not available.
    """
    async for item in call_next(state, ctx):
        yield item

wrap_tool_call async

wrap_tool_call(
    call_next: WrapCall,
    state: State,
    ctx: InvocationContext | None,
) -> AsyncGenerator

Wrapper around the tool execution step.

Parameters:

Name Type Description Default
call_next WrapCall

The next callable in the chain to delegate to.

required
state State

Current agent state.

required
ctx InvocationContext | None

Invocation context, or None if not available.

required
Source code in src/ant_ai/hooks/protocol.py
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
async def wrap_tool_call(
    self,
    call_next: WrapCall,
    state: State,
    ctx: InvocationContext | None,
) -> AsyncGenerator:
    """Wrapper around the tool execution step.

    Args:
        call_next: The next callable in the chain to delegate to.
        state: Current agent state.
        ctx: Invocation context, or None if not available.
    """
    async for item in call_next(state, ctx):
        yield item