Skip to content

ant_ai.llm.integrations.openai_llm

OpenAIChat

Bases: ChatLLM

Interface for a language model that generates chat responses using OpenAI's API.

Source code in src/ant_ai/llm/integrations/openai_llm.py
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
class OpenAIChat(ChatLLM):
    """
    Interface for a language model that generates chat responses using OpenAI's API.
    """

    def __init__(self, model: str = "gpt-5-nano", api_key: str | None = None):
        self.model: str = model
        self.client = OpenAI(api_key=api_key)
        self.async_client = AsyncOpenAI(api_key=api_key)

    @staticmethod
    def _to_openai_messages(
        messages: list[Message], tools: list | None = None
    ) -> list[ChatCompletionMessageParam]:
        """Converts the internal Message objects into the shape expected by the OpenAI Python SDK. Casting just to make hinters happy."""
        return cast(
            list[ChatCompletionMessageParam],
            [m.model_dump(exclude={"kind"}) for m in messages],
        )

    def invoke(
        self,
        messages: list[Message],
        *,
        ctx: InvocationContext | None = None,
        tools: list | None = None,
        response_format: dict | type[BaseModel] | None = None,
    ) -> ChatLLMResponse:
        openai_messages = self._to_openai_messages(messages)

        response = self.client.chat.completions.create(
            model=self.model,
            messages=openai_messages,
            **_drop_none(
                tools=tools,
                response_format=response_format,
            ),
        )
        content = response.choices[0].message.content or ""
        return ChatLLMResponse(message=Message(role="assistant", content=content))

    async def ainvoke(
        self,
        messages: list[Message],
        *,
        ctx: InvocationContext | None = None,
        tools: list | None = None,
        response_format: dict | type[BaseModel] | None = None,
    ) -> ChatLLMResponse:
        openai_messages = self._to_openai_messages(messages)

        response = await self.async_client.chat.completions.create(
            model=self.model,
            messages=openai_messages,
            **_drop_none(
                tools=tools,
                response_format=response_format,
            ),
        )
        content = response.choices[0].message.content or ""
        return ChatLLMResponse(message=Message(role="assistant", content=content))

    def stream(
        self,
        messages: list[Message],
        *,
        ctx: InvocationContext | None = None,
        tools: list | None = None,
        response_format: dict | type[BaseModel] | None = None,
    ) -> AsyncIterator[ChatLLMStreamChunk]:
        openai_messages = self._to_openai_messages(messages)

        async def gen() -> AsyncIterator[ChatLLMStreamChunk]:
            stream = await self.async_client.chat.completions.create(
                model=self.model,
                messages=openai_messages,
                stream=True,
                **_drop_none(
                    tools=tools,
                    response_format=response_format,
                ),
            )

            async for chunk in stream:
                delta = chunk.choices[0].delta.content
                if not delta:
                    continue

                yield ChatLLMStreamChunk(
                    delta=MessageChunk(role="assistant", delta=delta)
                )

        return gen()