Docs / Mistral AI

Mistral AI Integration Guide

Add trace-level observability to Mistral AI agents. Wrap chat.complete() and chat.stream() calls with Nexus spans to capture token counts, latency, tool calls, and finish reasons for every request.

Installation

Install the Mistral Python SDK and the Nexus agent package:

pip install mistralai nexus-agent

Initialize both clients with your API keys:

import nexus_agent
from mistralai import Mistral

nexus = nexus_agent.Nexus(api_key="YOUR_NEXUS_API_KEY", agent_id="my-mistral-agent")
client = Mistral(api_key="YOUR_MISTRAL_API_KEY")

Basic tracing — chat.complete()

Start a trace and span before calling client.chat.complete(), then end both after using the usage object from the response:

def ask_mistral(prompt: str, model: str = "mistral-large-latest") -> str:
    trace = nexus.start_trace(name="mistral_chat", metadata={"model": model})
    span = nexus.start_span(
        trace_id=trace["trace_id"],
        name="chat_complete",
        metadata={"model": model, "prompt_preview": prompt[:120]},
    )
    try:
        response = client.chat.complete(
            model=model,
            messages=[{"role": "user", "content": prompt}],
        )
        choice = response.choices[0]
        usage = response.usage
        nexus.end_span(
            span_id=span["id"],
            status="success",
            metadata={
                "tokens_input": usage.prompt_tokens,
                "tokens_output": usage.completion_tokens,
                "finish_reason": choice.finish_reason,
            },
        )
        nexus.end_trace(trace_id=trace["trace_id"], status="success")
        return choice.message.content
    except Exception as e:
        nexus.end_span(span_id=span["id"], status="error", metadata={"error": str(e)})
        nexus.end_trace(trace_id=trace["trace_id"], status="error")
        raise

The response.usage object provides prompt_tokens and completion_tokens. finish_reason will be stop for a normal completion or tool_calls when the model triggered a function.

Tool use

When using Mistral function calling, record the tool names in the span metadata so you can filter traces by which tools were invoked:

def ask_with_tools(prompt: str, tools: list) -> str:
    trace = nexus.start_trace(name="mistral_tool_call")
    span = nexus.start_span(
        trace_id=trace["trace_id"],
        name="chat_complete_with_tools",
        metadata={"model": "mistral-large-latest", "tools": [t["function"]["name"] for t in tools]},
    )
    try:
        response = client.chat.complete(
            model="mistral-large-latest",
            messages=[{"role": "user", "content": prompt}],
            tools=tools,
        )
        choice = response.choices[0]
        tool_calls = choice.message.tool_calls or []
        nexus.end_span(
            span_id=span["id"],
            status="success",
            metadata={
                "tokens_input": response.usage.prompt_tokens,
                "tokens_output": response.usage.completion_tokens,
                "tool_calls": [tc.function.name for tc in tool_calls],
                "finish_reason": choice.finish_reason,
            },
        )
        nexus.end_trace(trace_id=trace["trace_id"], status="success")
        return choice.message
    except Exception as e:
        nexus.end_span(span_id=span["id"], status="error", metadata={"error": str(e)})
        nexus.end_trace(trace_id=trace["trace_id"], status="error")
        raise

Streaming — chat.stream()

For streaming responses, end the Nexus span after the stream is exhausted and token usage is available via stream.get_final_response().usage:

import time

def ask_mistral_stream(prompt: str, model: str = "mistral-small-latest") -> str:
    trace = nexus.start_trace(name="mistral_stream", metadata={"model": model})
    span = nexus.start_span(
        trace_id=trace["trace_id"],
        name="chat_stream",
        metadata={"model": model},
    )
    started = time.monotonic()
    chunks = []
    try:
        with client.chat.stream(
            model=model,
            messages=[{"role": "user", "content": prompt}],
        ) as stream:
            for event in stream:
                delta = event.data.choices[0].delta.content or ""
                chunks.append(delta)
                print(delta, end="", flush=True)
            # Usage is available after stream exhausted
            usage = stream.get_final_response().usage
        latency_ms = (time.monotonic() - started) * 1000
        nexus.end_span(
            span_id=span["id"],
            status="success",
            metadata={
                "tokens_input": usage.prompt_tokens,
                "tokens_output": usage.completion_tokens,
                "latency_ms": round(latency_ms, 1),
            },
        )
        nexus.end_trace(trace_id=trace["trace_id"], status="success")
        return "".join(chunks)
    except Exception as e:
        nexus.end_span(span_id=span["id"], status="error", metadata={"error": str(e)})
        nexus.end_trace(trace_id=trace["trace_id"], status="error")
        raise

Decorator helper

For simpler instrumentation, use a decorator that wraps any function calling Mistral with a Nexus trace automatically:

import functools
import time

def traced_mistral(model: str = "mistral-large-latest"):
    """Decorator that wraps any function calling Mistral with a Nexus trace."""
    def decorator(fn):
        @functools.wraps(fn)
        def wrapper(*args, **kwargs):
            trace = nexus.start_trace(name=fn.__name__, metadata={"model": model})
            span = nexus.start_span(
                trace_id=trace["trace_id"],
                name=fn.__name__,
                metadata={"model": model},
            )
            started = time.monotonic()
            try:
                result = fn(*args, **kwargs)
                nexus.end_span(
                    span_id=span["id"],
                    status="success",
                    metadata={"latency_ms": round((time.monotonic() - started) * 1000, 1)},
                )
                nexus.end_trace(trace_id=trace["trace_id"], status="success")
                return result
            except Exception as e:
                nexus.end_span(span_id=span["id"], status="error", metadata={"error": str(e)})
                nexus.end_trace(trace_id=trace["trace_id"], status="error")
                raise
        return wrapper
    return decorator


@traced_mistral(model="mistral-small-latest")
def classify_text(text: str) -> str:
    response = client.chat.complete(
        model="mistral-small-latest",
        messages=[{"role": "user", "content": f"Classify: {text}"}],
    )
    return response.choices[0].message.content

Troubleshooting

Traces not appearing in the dashboard
Check that agent_id matches what you configured in Nexus. Both start_trace and end_trace must be called — an unclosed trace won't appear until it's ended.
Token counts are zero
Make sure you're reading from response.usage (not the choice object). For streaming, usage is only available after calling get_final_response() once the stream is exhausted.
finish_reason is always "tool_calls"
This is expected when the model triggers a function call instead of returning a text completion. Handle the tool call, call the function, and start a new trace turn for the follow-up completion.