Skip to content

Adapters API Reference

Wrap a pydantic-ai Agent to conform to the agent callable contract.

Parameters:

Name Type Description Default
agent Any

A pydantic-ai Agent instance.

required
Example
from pydantic_ai import Agent
from pytest_agent_eval.adapters.pydantic_ai import PydanticAIAdapter

my_agent = Agent("openai:gpt-4o", system_prompt="You are helpful.")

@pytest.fixture
def llm_eval_agent():
    return PydanticAIAdapter(my_agent)
Source code in src/pytest_agent_eval/adapters/pydantic_ai.py
class PydanticAIAdapter:
    """Wrap a pydantic-ai Agent to conform to the agent callable contract.

    Args:
        agent: A pydantic-ai ``Agent`` instance.

    Example:
        ```python
        from pydantic_ai import Agent
        from pytest_agent_eval.adapters.pydantic_ai import PydanticAIAdapter

        my_agent = Agent("openai:gpt-4o", system_prompt="You are helpful.")

        @pytest.fixture
        def llm_eval_agent():
            return PydanticAIAdapter(my_agent)
        ```
    """

    def __init__(self, agent: Any) -> None:
        """Store the pydantic-ai agent to delegate calls to."""
        self._agent = agent

    async def __call__(self, history: list[dict[str, Any]]) -> tuple[str, list[str]]:
        """Run the agent and normalise output to (reply, tool_calls)."""
        user_msg = history[-1]["content"] if history else ""
        message_history = history[:-1]
        result = await self._agent.run(user_msg, message_history=message_history)

        tool_calls = [msg.tool_name for msg in result.all_messages() if hasattr(msg, "tool_name") and msg.tool_name]

        reply = result.output if isinstance(result.output, str) else str(result.output)
        return reply, tool_calls

__call__(history: list[dict[str, Any]]) -> tuple[str, list[str]] async

Run the agent and normalise output to (reply, tool_calls).

Source code in src/pytest_agent_eval/adapters/pydantic_ai.py
async def __call__(self, history: list[dict[str, Any]]) -> tuple[str, list[str]]:
    """Run the agent and normalise output to (reply, tool_calls)."""
    user_msg = history[-1]["content"] if history else ""
    message_history = history[:-1]
    result = await self._agent.run(user_msg, message_history=message_history)

    tool_calls = [msg.tool_name for msg in result.all_messages() if hasattr(msg, "tool_name") and msg.tool_name]

    reply = result.output if isinstance(result.output, str) else str(result.output)
    return reply, tool_calls

__init__(agent: Any) -> None

Store the pydantic-ai agent to delegate calls to.

Source code in src/pytest_agent_eval/adapters/pydantic_ai.py
def __init__(self, agent: Any) -> None:
    """Store the pydantic-ai agent to delegate calls to."""
    self._agent = agent

Wrap a LangChain Runnable to conform to the agent callable contract.

Expects the runnable to accept {"messages": [...]} and return an AIMessage or object with a content attribute.

Parameters:

Name Type Description Default
runnable Any

A LangChain Runnable (e.g. a compiled graph or chain).

required
Example
from pytest_agent_eval.adapters.langchain import LangChainAdapter

@pytest.fixture
def llm_eval_agent():
    return LangChainAdapter(my_langchain_graph)
Source code in src/pytest_agent_eval/adapters/langchain.py
class LangChainAdapter:
    """Wrap a LangChain Runnable to conform to the agent callable contract.

    Expects the runnable to accept ``{"messages": [...]}`` and return an
    ``AIMessage`` or object with a ``content`` attribute.

    Args:
        runnable: A LangChain Runnable (e.g. a compiled graph or chain).

    Example:
        ```python
        from pytest_agent_eval.adapters.langchain import LangChainAdapter

        @pytest.fixture
        def llm_eval_agent():
            return LangChainAdapter(my_langchain_graph)
        ```
    """

    def __init__(self, runnable: Any) -> None:
        """Store the LangChain runnable to delegate calls to."""
        self._runnable = runnable

    async def __call__(self, history: list[dict[str, Any]]) -> tuple[str, list[str]]:
        """Run the runnable and normalise output to (reply, tool_calls)."""
        result = await self._runnable.ainvoke({"messages": history})

        if hasattr(result, "content"):
            reply = str(result.content)
            tool_calls = [tc["name"] for tc in getattr(result, "tool_calls", []) or []]
        elif isinstance(result, dict) and "messages" in result:
            last = result["messages"][-1]
            reply = str(last.content)
            tool_calls = [tc["name"] for tc in getattr(last, "tool_calls", []) or []]
        else:
            reply = str(result)
            tool_calls = []

        return reply, tool_calls

__call__(history: list[dict[str, Any]]) -> tuple[str, list[str]] async

Run the runnable and normalise output to (reply, tool_calls).

Source code in src/pytest_agent_eval/adapters/langchain.py
async def __call__(self, history: list[dict[str, Any]]) -> tuple[str, list[str]]:
    """Run the runnable and normalise output to (reply, tool_calls)."""
    result = await self._runnable.ainvoke({"messages": history})

    if hasattr(result, "content"):
        reply = str(result.content)
        tool_calls = [tc["name"] for tc in getattr(result, "tool_calls", []) or []]
    elif isinstance(result, dict) and "messages" in result:
        last = result["messages"][-1]
        reply = str(last.content)
        tool_calls = [tc["name"] for tc in getattr(last, "tool_calls", []) or []]
    else:
        reply = str(result)
        tool_calls = []

    return reply, tool_calls

__init__(runnable: Any) -> None

Store the LangChain runnable to delegate calls to.

Source code in src/pytest_agent_eval/adapters/langchain.py
def __init__(self, runnable: Any) -> None:
    """Store the LangChain runnable to delegate calls to."""
    self._runnable = runnable

Wrap an AsyncOpenAI client to conform to the agent callable contract.

Parameters:

Name Type Description Default
client Any

An openai.AsyncOpenAI or openai.AsyncAzureOpenAI instance.

required
model str

Model name to use for completions (e.g. "gpt-4o").

required
system_prompt str | None

Optional system prompt prepended to every call.

None
Example
from openai import AsyncOpenAI
from pytest_agent_eval.adapters.openai import OpenAIAdapter

@pytest.fixture
def llm_eval_agent():
    client = AsyncOpenAI()
    return OpenAIAdapter(client, model="gpt-4o")
Source code in src/pytest_agent_eval/adapters/openai.py
class OpenAIAdapter:
    """Wrap an AsyncOpenAI client to conform to the agent callable contract.

    Args:
        client: An ``openai.AsyncOpenAI`` or ``openai.AsyncAzureOpenAI`` instance.
        model: Model name to use for completions (e.g. ``"gpt-4o"``).
        system_prompt: Optional system prompt prepended to every call.

    Example:
        ```python
        from openai import AsyncOpenAI
        from pytest_agent_eval.adapters.openai import OpenAIAdapter

        @pytest.fixture
        def llm_eval_agent():
            client = AsyncOpenAI()
            return OpenAIAdapter(client, model="gpt-4o")
        ```
    """

    def __init__(
        self,
        client: Any,
        model: str,
        system_prompt: str | None = None,
    ) -> None:
        """Store the OpenAI client, model name, and optional system prompt."""
        self._client = client
        self._model = model
        self._system_prompt = system_prompt

    async def __call__(self, history: list[dict[str, Any]]) -> tuple[str, list[str]]:
        """Run a chat completion and normalise to (reply, tool_calls)."""
        messages: list[dict[str, Any]] = []
        if self._system_prompt:
            messages.append({"role": "system", "content": self._system_prompt})
        messages.extend(history)

        response = await self._client.chat.completions.create(
            model=self._model,
            messages=messages,
        )
        message = response.choices[0].message
        reply = message.content or ""
        tool_calls = [tc.function.name for tc in (message.tool_calls or [])]
        return reply, tool_calls

__call__(history: list[dict[str, Any]]) -> tuple[str, list[str]] async

Run a chat completion and normalise to (reply, tool_calls).

Source code in src/pytest_agent_eval/adapters/openai.py
async def __call__(self, history: list[dict[str, Any]]) -> tuple[str, list[str]]:
    """Run a chat completion and normalise to (reply, tool_calls)."""
    messages: list[dict[str, Any]] = []
    if self._system_prompt:
        messages.append({"role": "system", "content": self._system_prompt})
    messages.extend(history)

    response = await self._client.chat.completions.create(
        model=self._model,
        messages=messages,
    )
    message = response.choices[0].message
    reply = message.content or ""
    tool_calls = [tc.function.name for tc in (message.tool_calls or [])]
    return reply, tool_calls

__init__(client: Any, model: str, system_prompt: str | None = None) -> None

Store the OpenAI client, model name, and optional system prompt.

Source code in src/pytest_agent_eval/adapters/openai.py
def __init__(
    self,
    client: Any,
    model: str,
    system_prompt: str | None = None,
) -> None:
    """Store the OpenAI client, model name, and optional system prompt."""
    self._client = client
    self._model = model
    self._system_prompt = system_prompt