Skip to content

ant_ai.llm.protocol

ChatLLM

Bases: Protocol

Interface for a language model that generates chat responses.

Source code in src/ant_ai/llm/protocol.py
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
class ChatLLM(Protocol):
    """Interface for a language model that generates chat responses."""

    def invoke(
        self,
        messages: list[Message],
        *,
        ctx: InvocationContext | None = None,
        tools: list | None = None,
        response_format: dict | type[BaseModel] | None = None,
    ) -> ChatLLMResponse:
        """Send messages and return a complete response synchronously.

        Args:
            messages: Conversation history to send to the model.
            ctx: Invocation context, or None if not available.
            tools: Tool schemas to expose to the model, or None for no tools.
            response_format: Constrain the output to a JSON schema or Pydantic model.

        Returns:
            The complete model response.
        """
        raise NotImplementedError

    async def ainvoke(
        self,
        messages: list[Message],
        *,
        ctx: InvocationContext | None = None,
        tools: list | None = None,
        response_format: dict | type[BaseModel] | None = None,
    ) -> ChatLLMResponse:
        """Send messages and return a complete response asynchronously.

        Args:
            messages: Conversation history to send to the model.
            ctx: Invocation context, or None if not available.
            tools: Tool schemas to expose to the model, or None for no tools.
            response_format: Constrain the output to a JSON schema or Pydantic model.

        Returns:
            The complete model response.
        """
        raise NotImplementedError

    def stream(
        self,
        messages: list[Message],
        *,
        ctx: InvocationContext | None = None,
        tools: list | None = None,
        response_format: dict | type[BaseModel] | None = None,
    ) -> AsyncIterator[ChatLLMStreamChunk]:
        """Send messages and stream the response as chunks.

        Args:
            messages: Conversation history to send to the model.
            ctx: Invocation context, or None if not available.
            tools: Tool schemas to expose to the model, or None for no tools.
            response_format: Constrain the output to a JSON schema or Pydantic model.

        Returns:
            An async iterator of response chunks.
        """
        raise NotImplementedError

invoke

invoke(
    messages: list[Message],
    *,
    ctx: InvocationContext | None = None,
    tools: list | None = None,
    response_format: dict | type[BaseModel] | None = None,
) -> ChatLLMResponse

Send messages and return a complete response synchronously.

Parameters:

Name Type Description Default
messages list[Message]

Conversation history to send to the model.

required
ctx InvocationContext | None

Invocation context, or None if not available.

None
tools list | None

Tool schemas to expose to the model, or None for no tools.

None
response_format dict | type[BaseModel] | None

Constrain the output to a JSON schema or Pydantic model.

None

Returns:

Type Description
ChatLLMResponse

The complete model response.

Source code in src/ant_ai/llm/protocol.py
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
def invoke(
    self,
    messages: list[Message],
    *,
    ctx: InvocationContext | None = None,
    tools: list | None = None,
    response_format: dict | type[BaseModel] | None = None,
) -> ChatLLMResponse:
    """Send messages and return a complete response synchronously.

    Args:
        messages: Conversation history to send to the model.
        ctx: Invocation context, or None if not available.
        tools: Tool schemas to expose to the model, or None for no tools.
        response_format: Constrain the output to a JSON schema or Pydantic model.

    Returns:
        The complete model response.
    """
    raise NotImplementedError

ainvoke async

ainvoke(
    messages: list[Message],
    *,
    ctx: InvocationContext | None = None,
    tools: list | None = None,
    response_format: dict | type[BaseModel] | None = None,
) -> ChatLLMResponse

Send messages and return a complete response asynchronously.

Parameters:

Name Type Description Default
messages list[Message]

Conversation history to send to the model.

required
ctx InvocationContext | None

Invocation context, or None if not available.

None
tools list | None

Tool schemas to expose to the model, or None for no tools.

None
response_format dict | type[BaseModel] | None

Constrain the output to a JSON schema or Pydantic model.

None

Returns:

Type Description
ChatLLMResponse

The complete model response.

Source code in src/ant_ai/llm/protocol.py
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
async def ainvoke(
    self,
    messages: list[Message],
    *,
    ctx: InvocationContext | None = None,
    tools: list | None = None,
    response_format: dict | type[BaseModel] | None = None,
) -> ChatLLMResponse:
    """Send messages and return a complete response asynchronously.

    Args:
        messages: Conversation history to send to the model.
        ctx: Invocation context, or None if not available.
        tools: Tool schemas to expose to the model, or None for no tools.
        response_format: Constrain the output to a JSON schema or Pydantic model.

    Returns:
        The complete model response.
    """
    raise NotImplementedError

stream

stream(
    messages: list[Message],
    *,
    ctx: InvocationContext | None = None,
    tools: list | None = None,
    response_format: dict | type[BaseModel] | None = None,
) -> AsyncIterator[ChatLLMStreamChunk]

Send messages and stream the response as chunks.

Parameters:

Name Type Description Default
messages list[Message]

Conversation history to send to the model.

required
ctx InvocationContext | None

Invocation context, or None if not available.

None
tools list | None

Tool schemas to expose to the model, or None for no tools.

None
response_format dict | type[BaseModel] | None

Constrain the output to a JSON schema or Pydantic model.

None

Returns:

Type Description
AsyncIterator[ChatLLMStreamChunk]

An async iterator of response chunks.

Source code in src/ant_ai/llm/protocol.py
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
def stream(
    self,
    messages: list[Message],
    *,
    ctx: InvocationContext | None = None,
    tools: list | None = None,
    response_format: dict | type[BaseModel] | None = None,
) -> AsyncIterator[ChatLLMStreamChunk]:
    """Send messages and stream the response as chunks.

    Args:
        messages: Conversation history to send to the model.
        ctx: Invocation context, or None if not available.
        tools: Tool schemas to expose to the model, or None for no tools.
        response_format: Constrain the output to a JSON schema or Pydantic model.

    Returns:
        An async iterator of response chunks.
    """
    raise NotImplementedError