Docs Semantic Kernel

Microsoft Semantic Kernel Integration

Add distributed tracing to Semantic Kernel agents. Instrument kernel function invocations, plugin calls, and planner steps to debug slow tool calls, stalled planners, and unexpected function selection.

Installation

pip install keylightdigital-nexus semantic-kernel

Get your API key from Dashboard → API Keys.

Basic kernel trace

Wrap a kernel.invoke call in a Nexus trace to capture the full invocation as a single observable unit:

import asyncio
import os
import semantic_kernel as sk
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion
from nexus_sdk import NexusClient

nexus = NexusClient(api_key=os.environ["NEXUS_API_KEY"])

kernel = sk.Kernel()
kernel.add_service(OpenAIChatCompletion(
    service_id="gpt-4o",
    api_key=os.environ["OPENAI_API_KEY"],
    ai_model_id="gpt-4o",
))

async def invoke_with_trace(function_name: str, plugin_name: str, **kwargs):
    trace = nexus.start_trace({
        "agent_id": "semantic-kernel-agent",
        "name": f"invoke: {plugin_name}.{function_name}",
        "status": "running",
        "started_at": nexus.now(),
        "metadata": {
            "plugin": plugin_name,
            "function": function_name,
            "environment": os.environ.get("APP_ENV", "dev"),
        },
    })

    try:
        result = await kernel.invoke(
            plugin_name=plugin_name,
            function_name=function_name,
            **kwargs,
        )
        nexus.end_trace(trace["trace_id"], {"status": "success"})
        return str(result)
    except Exception as e:
        nexus.end_trace(trace["trace_id"], {"status": "error", "metadata": {"error": str(e)}})
        raise

Plugin call spans

Add a span for each plugin function call to track individual tool execution times and outputs:

async def invoke_plugin_with_spans(trace_id: str, plugin_name: str, function_name: str, input_text: str):
    """Invoke a plugin function and record it as a Nexus span."""
    span = nexus.start_span(trace_id, {
        "name": f"plugin:{plugin_name}.{function_name}",
        "type": "tool",
        "metadata": {
            "plugin": plugin_name,
            "function": function_name,
            "input_length": len(input_text),
        },
    })
    try:
        result = await kernel.invoke(
            plugin_name=plugin_name,
            function_name=function_name,
            input=input_text,
        )
        output = str(result)
        nexus.end_span(span["id"], {"output": output[:500]})
        return output
    except Exception as e:
        nexus.end_span(span["id"], {"error": str(e)})
        raise

# Usage:
async def run_pipeline(query: str):
    trace = nexus.start_trace({
        "agent_id": "sk-pipeline",
        "name": f"pipeline: {query[:60]}",
        "status": "running",
        "started_at": nexus.now(),
        "metadata": {"query": query[:200]},
    })
    trace_id = trace["trace_id"]

    summary = await invoke_plugin_with_spans(trace_id, "SummaryPlugin", "Summarize", query)
    translation = await invoke_plugin_with_spans(trace_id, "TranslationPlugin", "Translate", summary)

    nexus.end_trace(trace_id, {"status": "success"})
    return translation

Planner step tracing

Trace each step the FunctionChoiceTerminationStrategy or sequential planner executes so you can see exactly which functions were selected and why:

from semantic_kernel.planners import SequentialPlanner

async def run_plan_with_trace(goal: str):
    planner = SequentialPlanner(kernel, service_id="gpt-4o")

    trace = nexus.start_trace({
        "agent_id": "sk-planner",
        "name": f"plan: {goal[:60]}",
        "status": "running",
        "started_at": nexus.now(),
        "metadata": {"goal": goal[:200], "planner": "SequentialPlanner"},
    })
    trace_id = trace["trace_id"]

    # Create the plan
    plan_span = nexus.start_span(trace_id, {
        "name": "planner:create_plan",
        "type": "tool",
        "metadata": {"goal": goal[:200]},
    })
    try:
        plan = await planner.create_plan(goal)
        nexus.end_span(plan_span["id"], {
            "output": f"Plan with {len(plan._steps)} steps",
            "metadata": {"step_count": len(plan._steps)},
        })
    except Exception as e:
        nexus.end_span(plan_span["id"], {"error": str(e)})
        nexus.end_trace(trace_id, {"status": "error", "metadata": {"error": str(e)}})
        raise

    # Execute each step
    for i, step in enumerate(plan._steps):
        step_span = nexus.start_span(trace_id, {
            "name": f"plan:step_{i + 1}:{step.name}",
            "type": "tool",
            "metadata": {
                "step_index": i,
                "plugin": step.plugin_name,
                "function": step.name,
            },
        })
        try:
            step_result = await kernel.invoke(step)
            nexus.end_span(step_span["id"], {"output": str(step_result)[:500]})
        except Exception as e:
            nexus.end_span(step_span["id"], {"error": str(e)})
            nexus.end_trace(trace_id, {"status": "error", "metadata": {"error": str(e), "failed_step": i}})
            raise

    nexus.end_trace(trace_id, {"status": "success", "metadata": {"steps_executed": len(plan._steps)}})
    return plan

Async kernels

Semantic Kernel is async-first. Use asyncio.gather to fan out plugin calls in parallel while keeping spans correctly nested under the same trace:

import asyncio

async def run_parallel_plugins(query: str):
    trace = nexus.start_trace({
        "agent_id": "sk-parallel",
        "name": f"parallel-invoke: {query[:60]}",
        "status": "running",
        "started_at": nexus.now(),
        "metadata": {"query": query[:200], "mode": "parallel"},
    })
    trace_id = trace["trace_id"]

    async def traced_invoke(plugin: str, function: str, **kwargs):
        span = nexus.start_span(trace_id, {
            "name": f"plugin:{plugin}.{function}",
            "type": "tool",
            "metadata": {"plugin": plugin, "function": function},
        })
        try:
            result = await kernel.invoke(plugin_name=plugin, function_name=function, **kwargs)
            nexus.end_span(span["id"], {"output": str(result)[:300]})
            return str(result)
        except Exception as e:
            nexus.end_span(span["id"], {"error": str(e)})
            raise

    try:
        results = await asyncio.gather(
            traced_invoke("SentimentPlugin", "Analyze", input=query),
            traced_invoke("EntityPlugin", "Extract", input=query),
            traced_invoke("LanguagePlugin", "Detect", input=query),
        )
        nexus.end_trace(trace_id, {"status": "success"})
        return results
    except Exception as e:
        nexus.end_trace(trace_id, {"status": "error", "metadata": {"error": str(e)}})
        raise

Debugging patterns

Wrong plugin selected by planner

If the planner picks an unexpected function, check the function metadata on each plan:step_* span. Mismatched function descriptions in plugin annotations are the most common cause.

Slow plugin calls

Sort spans by duration in the Nexus trace view to identify which plugin is the bottleneck. Parallel invocation via asyncio.gather is the fastest fix when plugins are independent.

Planner creates too many steps

A high step_count on the planner:create_plan span often means the goal is underspecified. Add constraints to the goal string or reduce the number of registered plugins so the planner has a smaller search space.

Ready to instrument your Semantic Kernel agents?

Start for free — no credit card required. See traces in under 5 minutes.

Start free →