{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "from dataclasses import dataclass\n",
    "from typing import Any, AsyncGenerator, Dict, Generator, List, Optional, Union\n",
    "\n",
    "import pydantic\n",
    "from anthropic import Anthropic\n",
    "from anthropic.types import Message as AnthropicMessage\n",
    "from anthropic.types import MessageParam as AnthropicMessageParam\n",
    "from openai import NOT_GIVEN, OpenAI\n",
    "from openai.types.chat import (\n",
    "    ChatCompletion,\n",
    "    ChatCompletionChunk,\n",
    "    ChatCompletionMessage,\n",
    "    ChatCompletionMessageParam,\n",
    "    ChatCompletionStreamOptionsParam,\n",
    "    ChatCompletionSystemMessageParam,\n",
    "    ChatCompletionToolMessageParam,\n",
    "    ChatCompletionToolParam,\n",
    "    ChatCompletionUserMessageParam,\n",
    ")\n",
    "from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter\n",
    "from opentelemetry.sdk.resources import Resource\n",
    "from opentelemetry.sdk.trace.export import ConsoleSpanExporter, SimpleSpanProcessor\n",
    "from opentelemetry.trace import Status, StatusCode, get_current_span\n",
    "from opentelemetry.util.types import Attributes\n",
    "from typing_extensions import assert_never\n",
    "\n",
    "import openinference.instrumentation as oi\n",
    "from openinference.instrumentation import (\n",
    "    TracerProvider,\n",
    "    get_input_attributes,\n",
    "    get_llm_attributes,\n",
    "    get_output_attributes,\n",
    "    get_span_kind_attributes,\n",
    "    get_tool_attributes,\n",
    "    suppress_tracing,\n",
    "    using_attributes,\n",
    ")\n",
    "from openinference.semconv.resource import ResourceAttributes"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Either instrument with `TracerProvider` from `openinference.instrumentation`."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "endpoint = \"http://127.0.0.1:6006/v1/traces\"\n",
    "resource = Resource(attributes={ResourceAttributes.PROJECT_NAME: \"openinference-tracer\"})\n",
    "tracer_provider = TracerProvider(resource=resource)\n",
    "tracer_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint)))\n",
    "tracer_provider.add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter()))\n",
    "tracer = tracer_provider.get_tracer(__name__)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Or using `phoenix.otel.register` (in which case, comment out cell above and uncomment this cell)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# from phoenix.otel import register\n",
    "\n",
    "# tracer_provider = register(protocol=\"http/protobuf\")\n",
    "# tracer = tracer_provider.get_tracer(__name__)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## LLMs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "anthropic_client = Anthropic()\n",
    "openai_client = OpenAI()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Context Manager"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with tracer.start_as_current_span(\"llm-span\", openinference_span_kind=\"llm\") as span:\n",
    "    invocation_parameters = {\n",
    "        \"temperature\": 0.5,\n",
    "    }\n",
    "    input_messages: List[ChatCompletionMessageParam] = [\n",
    "        ChatCompletionSystemMessageParam(\n",
    "            role=\"system\",\n",
    "            content=\"You are a helpful assistant.\",\n",
    "        ),\n",
    "        ChatCompletionUserMessageParam(\n",
    "            role=\"user\",\n",
    "            content=\"Hello, world!\",\n",
    "        ),\n",
    "    ]\n",
    "    span.set_llm(\n",
    "        provider=\"openai\",\n",
    "        system=\"openai\",\n",
    "        input_messages=input_messages,\n",
    "        model_name=\"gpt-4o\",\n",
    "        invocation_parameters=invocation_parameters,\n",
    "    )\n",
    "    span.set_input(input_messages[-1][\"content\"])\n",
    "    message = openai_client.chat.completions.create(  # type: ignore[call-overload]\n",
    "        messages=input_messages,\n",
    "        model=\"gpt-4o\",\n",
    "        **invocation_parameters,\n",
    "    )\n",
    "    output_message = message.choices[0].message\n",
    "    assert (token_usage := message.usage) is not None\n",
    "    assert isinstance(completion_tokens := token_usage.completion_tokens, int)\n",
    "    assert isinstance(prompt_tokens := token_usage.prompt_tokens, int)\n",
    "    span.set_status(Status(StatusCode.OK))\n",
    "    span.set_llm(\n",
    "        output_messages=[\n",
    "            {\n",
    "                \"role\": output_message.role,\n",
    "                \"content\": output_message.content,\n",
    "            }\n",
    "        ],\n",
    "        token_count={\n",
    "            \"completion\": completion_tokens,\n",
    "            \"prompt\": prompt_tokens,\n",
    "        },\n",
    "    )\n",
    "    span.set_output(output_message.content)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with tracer.start_as_current_span(\"llm-span-tool-calls\", openinference_span_kind=\"llm\") as span:\n",
    "    input_messages = [\n",
    "        ChatCompletionUserMessageParam(\n",
    "            role=\"user\",\n",
    "            content=\"What's the weather like in San Francisco?\",\n",
    "        )\n",
    "    ]\n",
    "    invocation_parameters = {\n",
    "        \"temperature\": 0.5,\n",
    "    }\n",
    "    tools: List[ChatCompletionToolParam] = [\n",
    "        {\n",
    "            \"type\": \"function\",\n",
    "            \"function\": {\n",
    "                \"name\": \"get_weather\",\n",
    "                \"description\": \"finds the weather for a given city\",\n",
    "                \"parameters\": {\n",
    "                    \"type\": \"object\",\n",
    "                    \"properties\": {\n",
    "                        \"city\": {\n",
    "                            \"type\": \"string\",\n",
    "                            \"description\": \"The city to find the weather for, e.g. 'London'\",\n",
    "                        }\n",
    "                    },\n",
    "                    \"required\": [\"city\"],\n",
    "                },\n",
    "            },\n",
    "        },\n",
    "    ]\n",
    "    span.set_llm(\n",
    "        provider=\"openai\",\n",
    "        system=\"openai\",\n",
    "        input_messages=input_messages,\n",
    "        model_name=\"gpt-4o\",\n",
    "        invocation_parameters=invocation_parameters,\n",
    "    )\n",
    "    span.set_input(input_messages[-1][\"content\"])\n",
    "    message = openai_client.chat.completions.create(  # type: ignore[call-overload]\n",
    "        model=\"gpt-4\",\n",
    "        tools=tools,\n",
    "        messages=input_messages,\n",
    "        **invocation_parameters,\n",
    "    )\n",
    "    span.set_status(Status(StatusCode.OK))\n",
    "    output_message = message.choices[0].message\n",
    "    assert (token_usage := message.usage) is not None\n",
    "    assert isinstance(completion_tokens := token_usage.completion_tokens, int)\n",
    "    assert isinstance(prompt_tokens := token_usage.prompt_tokens, int)\n",
    "    assert (tool_calls := output_message.tool_calls)\n",
    "    assert len(tool_calls) == 1\n",
    "    tool_call = tool_calls[0]\n",
    "    span.set_llm(\n",
    "        output_messages=[\n",
    "            {\n",
    "                \"role\": output_message.role,\n",
    "                \"content\": output_message.content,\n",
    "                \"tool_calls\": [\n",
    "                    {\n",
    "                        \"id\": tool_call.id,\n",
    "                        \"function\": {\n",
    "                            \"name\": tool_call.function.name,\n",
    "                            \"arguments\": tool_call.function.arguments,\n",
    "                        },\n",
    "                    }\n",
    "                ],\n",
    "            }\n",
    "        ],\n",
    "        token_count={\n",
    "            \"completion\": completion_tokens,\n",
    "            \"prompt\": prompt_tokens,\n",
    "        },\n",
    "    )\n",
    "    span.set_output(f\"{tool_call.function.name}({tool_call.function.arguments})\")\n",
    "\n",
    "with tracer.start_as_current_span(\"llm-span-content-blocks\", openinference_span_kind=\"llm\") as span:\n",
    "    input_messages.append(output_message.model_dump())\n",
    "    input_messages.append(\n",
    "        ChatCompletionToolMessageParam(content=\"sunny\", role=\"tool\", tool_call_id=tool_call.id)\n",
    "    )\n",
    "    span.set_llm(\n",
    "        provider=\"openai\",\n",
    "        system=\"openai\",\n",
    "        input_messages=input_messages,\n",
    "        model_name=\"gpt-4o\",\n",
    "        invocation_parameters=invocation_parameters,\n",
    "    )\n",
    "    span.set_input(input_messages[-1][\"content\"])\n",
    "    message = openai_client.chat.completions.create(  # type: ignore[call-overload]\n",
    "        model=\"gpt-4\",\n",
    "        tools=tools,\n",
    "        messages=input_messages,\n",
    "        **invocation_parameters,\n",
    "    )\n",
    "    span.set_status(Status(StatusCode.OK))\n",
    "    output_message = message.choices[0].message\n",
    "    assert (token_usage := message.usage) is not None\n",
    "    assert isinstance(completion_tokens := token_usage.completion_tokens, int)\n",
    "    assert isinstance(prompt_tokens := token_usage.prompt_tokens, int)\n",
    "    span.set_status(Status(StatusCode.OK))\n",
    "    span.set_llm(\n",
    "        output_messages=[\n",
    "            {\n",
    "                \"role\": output_message.role,\n",
    "                \"content\": output_message.content,\n",
    "            }\n",
    "        ],\n",
    "        token_count={\n",
    "            \"completion\": completion_tokens,\n",
    "            \"prompt\": prompt_tokens,\n",
    "        },\n",
    "    )\n",
    "    span.set_output(output_message.content)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with tracer.start_as_current_span(\"llm-span-content-blocks\", openinference_span_kind=\"llm\") as span:\n",
    "    invocation_parameters = {\n",
    "        \"max_tokens\": 100,\n",
    "    }\n",
    "    span.set_llm(\n",
    "        provider=\"anthropic\",\n",
    "        system=\"anthropic\",\n",
    "        input_messages=[\n",
    "            {\n",
    "                \"role\": \"user\",\n",
    "                \"contents\": [\n",
    "                    {\n",
    "                        \"type\": \"image\",\n",
    "                        \"image\": {\n",
    "                            \"url\": \"https://upload.wikimedia.org/wikipedia/commons/a/a7/Camponotus_flavomarginatus_ant.jpg\",\n",
    "                        },\n",
    "                    },\n",
    "                    {\n",
    "                        \"type\": \"text\",\n",
    "                        \"text\": \"What is in the above image?\",\n",
    "                    },\n",
    "                ],\n",
    "            }\n",
    "        ],\n",
    "        model_name=\"gpt-4o\",\n",
    "        invocation_parameters=invocation_parameters,\n",
    "    )\n",
    "    span.set_input(\"What is in the above image?\")\n",
    "    message = anthropic_client.messages.create(  # type: ignore[call-overload]\n",
    "        model=\"claude-3-5-sonnet-20240620\",\n",
    "        messages=[\n",
    "            {\n",
    "                \"role\": \"user\",\n",
    "                \"content\": [\n",
    "                    {\n",
    "                        \"type\": \"image\",\n",
    "                        \"source\": {\n",
    "                            \"type\": \"url\",\n",
    "                            \"url\": \"https://upload.wikimedia.org/wikipedia/commons/a/a7/Camponotus_flavomarginatus_ant.jpg\",\n",
    "                        },\n",
    "                    },\n",
    "                    {\"type\": \"text\", \"text\": \"What is in the above image?\"},\n",
    "                ],\n",
    "            }\n",
    "        ],\n",
    "        **invocation_parameters,\n",
    "    )\n",
    "    span.set_status(Status(StatusCode.OK))\n",
    "    token_usage = message.usage\n",
    "    prompt_tokens = token_usage.input_tokens\n",
    "    completion_tokens = token_usage.output_tokens\n",
    "    output_message = message.content[0]\n",
    "    assert output_message.type == \"text\"\n",
    "    output_message_text = output_message.text\n",
    "    span.set_llm(\n",
    "        output_messages=[\n",
    "            {\n",
    "                \"role\": message.role,\n",
    "                \"content\": output_message_text,\n",
    "            }\n",
    "        ],\n",
    "        token_count={\n",
    "            \"completion\": completion_tokens,\n",
    "            \"prompt\": prompt_tokens,\n",
    "        },\n",
    "    )\n",
    "    span.set_output(output_message_text)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Decorator"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{\n",
      "    \"name\": \"invoke_llm\",\n",
      "    \"context\": {\n",
      "        \"trace_id\": \"0xc7bb8d54d387366acd95bed9cc786384\",\n",
      "        \"span_id\": \"0x342a2d77030a7360\",\n",
      "        \"trace_state\": \"[]\"\n",
      "    },\n",
      "    \"kind\": \"SpanKind.INTERNAL\",\n",
      "    \"parent_id\": null,\n",
      "    \"start_time\": \"2025-04-14T19:46:55.646785Z\",\n",
      "    \"end_time\": \"2025-04-14T19:46:57.732399Z\",\n",
      "    \"status\": {\n",
      "        \"status_code\": \"OK\"\n",
      "    },\n",
      "    \"attributes\": {\n",
      "        \"input.value\": \"{\\\"input_messages\\\": [{\\\"role\\\": \\\"user\\\", \\\"content\\\": \\\"What's the weather like in San Francisco?\\\"}], \\\"invocation_parameters\\\": {\\\"temperature\\\": 0.5}}\",\n",
      "        \"input.mime_type\": \"application/json\",\n",
      "        \"llm.provider\": \"openai\",\n",
      "        \"llm.system\": \"openai\",\n",
      "        \"llm.model_name\": \"gpt-4o\",\n",
      "        \"llm.invocation_parameters\": \"{\\\"temperature\\\": 0.5}\",\n",
      "        \"llm.input_messages.0.message.role\": \"user\",\n",
      "        \"llm.input_messages.0.message.content\": \"What's the weather like in San Francisco?\",\n",
      "        \"llm.tools.0.tool.json_schema\": \"{\\\"type\\\": \\\"function\\\", \\\"function\\\": {\\\"name\\\": \\\"get_weather\\\", \\\"description\\\": \\\"finds the weather for a given city\\\", \\\"parameters\\\": {\\\"type\\\": \\\"object\\\", \\\"properties\\\": {\\\"city\\\": {\\\"type\\\": \\\"string\\\", \\\"description\\\": \\\"The city to find the weather for, e.g. 'London'\\\"}}, \\\"required\\\": [\\\"city\\\"]}}}\",\n",
      "        \"llm.output_messages.0.message.role\": \"assistant\",\n",
      "        \"llm.output_messages.0.message.tool_calls.0.tool_call.id\": \"call_YzwVpyvSHwjWY6nHKx2MhfJs\",\n",
      "        \"llm.output_messages.0.message.tool_calls.0.tool_call.function.name\": \"get_weather\",\n",
      "        \"llm.output_messages.0.message.tool_calls.0.tool_call.function.arguments\": \"{\\n  \\\"city\\\": \\\"San Francisco\\\"\\n}\",\n",
      "        \"llm.token_count.prompt\": 71,\n",
      "        \"llm.token_count.completion\": 17,\n",
      "        \"output.value\": \"{\\\"id\\\": \\\"chatcmpl-BMK3cf5oaBBM1KtCQTx97Ae8Zc5RK\\\", \\\"choices\\\": [{\\\"finish_reason\\\": \\\"tool_calls\\\", \\\"index\\\": 0, \\\"logprobs\\\": null, \\\"message\\\": {\\\"content\\\": null, \\\"refusal\\\": null, \\\"role\\\": \\\"assistant\\\", \\\"audio\\\": null, \\\"function_call\\\": null, \\\"tool_calls\\\": [{\\\"id\\\": \\\"call_YzwVpyvSHwjWY6nHKx2MhfJs\\\", \\\"function\\\": {\\\"arguments\\\": \\\"{\\\\n  \\\\\\\"city\\\\\\\": \\\\\\\"San Francisco\\\\\\\"\\\\n}\\\", \\\"name\\\": \\\"get_weather\\\"}, \\\"type\\\": \\\"function\\\"}], \\\"annotations\\\": []}}], \\\"created\\\": 1744660016, \\\"model\\\": \\\"gpt-4-0613\\\", \\\"object\\\": \\\"chat.completion\\\", \\\"service_tier\\\": \\\"default\\\", \\\"system_fingerprint\\\": null, \\\"usage\\\": {\\\"completion_tokens\\\": 17, \\\"prompt_tokens\\\": 71, \\\"total_tokens\\\": 88, \\\"completion_tokens_details\\\": {\\\"accepted_prediction_tokens\\\": 0, \\\"audio_tokens\\\": 0, \\\"reasoning_tokens\\\": 0, \\\"rejected_prediction_tokens\\\": 0}, \\\"prompt_tokens_details\\\": {\\\"audio_tokens\\\": 0, \\\"cached_tokens\\\": 0}}}\",\n",
      "        \"output.mime_type\": \"application/json\",\n",
      "        \"openinference.span.kind\": \"LLM\"\n",
      "    },\n",
      "    \"events\": [],\n",
      "    \"links\": [],\n",
      "    \"resource\": {\n",
      "        \"attributes\": {\n",
      "            \"openinference.project.name\": \"openinference-tracer\"\n",
      "        },\n",
      "        \"schema_url\": \"\"\n",
      "    }\n",
      "}\n",
      "{\n",
      "    \"name\": \"get_weather\",\n",
      "    \"context\": {\n",
      "        \"trace_id\": \"0xda08b2bb5d7272660cc9869263eb8141\",\n",
      "        \"span_id\": \"0x4ae1117f077a7ed6\",\n",
      "        \"trace_state\": \"[]\"\n",
      "    },\n",
      "    \"kind\": \"SpanKind.INTERNAL\",\n",
      "    \"parent_id\": null,\n",
      "    \"start_time\": \"2025-04-14T19:46:57.801146Z\",\n",
      "    \"end_time\": \"2025-04-14T19:46:57.801222Z\",\n",
      "    \"status\": {\n",
      "        \"status_code\": \"OK\"\n",
      "    },\n",
      "    \"attributes\": {\n",
      "        \"input.value\": \"{\\\"city\\\": \\\"San Francisco\\\"}\",\n",
      "        \"input.mime_type\": \"application/json\",\n",
      "        \"tool.name\": \"get_weather\",\n",
      "        \"tool.parameters\": \"{\\\"type\\\": \\\"object\\\", \\\"title\\\": \\\"get_weather\\\", \\\"description\\\": \\\"Imagine you were making an API call here.\\\", \\\"properties\\\": {\\\"city\\\": {\\\"type\\\": \\\"string\\\"}}, \\\"required\\\": [\\\"city\\\"]}\",\n",
      "        \"tool.description\": \"Imagine you were making an API call here.\",\n",
      "        \"output.value\": \"sunny\",\n",
      "        \"output.mime_type\": \"text/plain\",\n",
      "        \"openinference.span.kind\": \"TOOL\"\n",
      "    },\n",
      "    \"events\": [],\n",
      "    \"links\": [],\n",
      "    \"resource\": {\n",
      "        \"attributes\": {\n",
      "            \"openinference.project.name\": \"openinference-tracer\"\n",
      "        },\n",
      "        \"schema_url\": \"\"\n",
      "    }\n",
      "}\n",
      "San Francisco is sunny\n",
      "{\n",
      "    \"name\": \"invoke_llm\",\n",
      "    \"context\": {\n",
      "        \"trace_id\": \"0xc3cf745baf361ddf503f41d4aa230d51\",\n",
      "        \"span_id\": \"0x57a36a465249cfdb\",\n",
      "        \"trace_state\": \"[]\"\n",
      "    },\n",
      "    \"kind\": \"SpanKind.INTERNAL\",\n",
      "    \"parent_id\": null,\n",
      "    \"start_time\": \"2025-04-14T19:46:57.814482Z\",\n",
      "    \"end_time\": \"2025-04-14T19:46:58.906962Z\",\n",
      "    \"status\": {\n",
      "        \"status_code\": \"OK\"\n",
      "    },\n",
      "    \"attributes\": {\n",
      "        \"input.value\": \"{\\\"input_messages\\\": [{\\\"role\\\": \\\"user\\\", \\\"content\\\": \\\"What's the weather like in San Francisco?\\\"}, {\\\"content\\\": null, \\\"refusal\\\": null, \\\"role\\\": \\\"assistant\\\", \\\"audio\\\": null, \\\"function_call\\\": null, \\\"tool_calls\\\": [{\\\"id\\\": \\\"call_YzwVpyvSHwjWY6nHKx2MhfJs\\\", \\\"function\\\": {\\\"arguments\\\": \\\"{\\\\n  \\\\\\\"city\\\\\\\": \\\\\\\"San Francisco\\\\\\\"\\\\n}\\\", \\\"name\\\": \\\"get_weather\\\"}, \\\"type\\\": \\\"function\\\"}], \\\"annotations\\\": []}, {\\\"content\\\": \\\"sunny\\\", \\\"role\\\": \\\"tool\\\", \\\"tool_call_id\\\": \\\"call_YzwVpyvSHwjWY6nHKx2MhfJs\\\"}], \\\"invocation_parameters\\\": {\\\"temperature\\\": 0.5}}\",\n",
      "        \"input.mime_type\": \"application/json\",\n",
      "        \"llm.provider\": \"openai\",\n",
      "        \"llm.system\": \"openai\",\n",
      "        \"llm.model_name\": \"gpt-4o\",\n",
      "        \"llm.invocation_parameters\": \"{\\\"temperature\\\": 0.5}\",\n",
      "        \"llm.input_messages.0.message.role\": \"user\",\n",
      "        \"llm.input_messages.0.message.content\": \"What's the weather like in San Francisco?\",\n",
      "        \"llm.input_messages.1.message.role\": \"assistant\",\n",
      "        \"llm.input_messages.1.message.tool_calls.0.tool_call.id\": \"call_YzwVpyvSHwjWY6nHKx2MhfJs\",\n",
      "        \"llm.input_messages.1.message.tool_calls.0.tool_call.function.name\": \"get_weather\",\n",
      "        \"llm.input_messages.1.message.tool_calls.0.tool_call.function.arguments\": \"{\\n  \\\"city\\\": \\\"San Francisco\\\"\\n}\",\n",
      "        \"llm.input_messages.2.message.role\": \"tool\",\n",
      "        \"llm.input_messages.2.message.content\": \"sunny\",\n",
      "        \"llm.output_messages.0.message.role\": \"assistant\",\n",
      "        \"llm.output_messages.0.message.content\": \"The weather in San Francisco is sunny.\",\n",
      "        \"llm.token_count.prompt\": 42,\n",
      "        \"llm.token_count.completion\": 9,\n",
      "        \"output.value\": \"{\\\"id\\\": \\\"chatcmpl-BMK3e21WHYR9QbKDLr6JgKbi3qUIh\\\", \\\"choices\\\": [{\\\"finish_reason\\\": \\\"stop\\\", \\\"index\\\": 0, \\\"logprobs\\\": null, \\\"message\\\": {\\\"content\\\": \\\"The weather in San Francisco is sunny.\\\", \\\"refusal\\\": null, \\\"role\\\": \\\"assistant\\\", \\\"audio\\\": null, \\\"function_call\\\": null, \\\"tool_calls\\\": null, \\\"annotations\\\": []}}], \\\"created\\\": 1744660018, \\\"model\\\": \\\"gpt-4-0613\\\", \\\"object\\\": \\\"chat.completion\\\", \\\"service_tier\\\": \\\"default\\\", \\\"system_fingerprint\\\": null, \\\"usage\\\": {\\\"completion_tokens\\\": 9, \\\"prompt_tokens\\\": 42, \\\"total_tokens\\\": 51, \\\"completion_tokens_details\\\": {\\\"accepted_prediction_tokens\\\": 0, \\\"audio_tokens\\\": 0, \\\"reasoning_tokens\\\": 0, \\\"rejected_prediction_tokens\\\": 0}, \\\"prompt_tokens_details\\\": {\\\"audio_tokens\\\": 0, \\\"cached_tokens\\\": 0}}}\",\n",
      "        \"output.mime_type\": \"application/json\",\n",
      "        \"openinference.span.kind\": \"LLM\"\n",
      "    },\n",
      "    \"events\": [],\n",
      "    \"links\": [],\n",
      "    \"resource\": {\n",
      "        \"attributes\": {\n",
      "            \"openinference.project.name\": \"openinference-tracer\"\n",
      "        },\n",
      "        \"schema_url\": \"\"\n",
      "    }\n",
      "}\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "ChatCompletion(id='chatcmpl-BMK3e21WHYR9QbKDLr6JgKbi3qUIh', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='The weather in San Francisco is sunny.', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None, annotations=[]))], created=1744660018, model='gpt-4-0613', object='chat.completion', service_tier='default', system_fingerprint=None, usage=CompletionUsage(completion_tokens=9, prompt_tokens=42, total_tokens=51, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "def get_oi_attributes_from_openai_inputs(\n",
    "    input_messages: List[ChatCompletionMessageParam],\n",
    "    invocation_parameters: Dict[str, Any],\n",
    "    tools: Optional[List[ChatCompletionToolParam]] = None,\n",
    ") -> Dict[str, Any]:\n",
    "    oi_input_messages = [\n",
    "        convert_openai_message_param_to_oi_message(message) for message in input_messages\n",
    "    ]\n",
    "    oi_tools = [convert_openai_tool_param_to_oi_tool(tool) for tool in tools or []]\n",
    "    return {\n",
    "        **get_input_attributes(\n",
    "            {\n",
    "                \"input_messages\": input_messages,\n",
    "                \"invocation_parameters\": invocation_parameters,\n",
    "            }\n",
    "        ),\n",
    "        **get_llm_attributes(\n",
    "            provider=\"openai\",\n",
    "            system=\"openai\",\n",
    "            model_name=\"gpt-4o\",\n",
    "            input_messages=oi_input_messages,\n",
    "            invocation_parameters=invocation_parameters,\n",
    "            tools=oi_tools,\n",
    "        ),\n",
    "    }\n",
    "\n",
    "\n",
    "def convert_openai_message_param_to_oi_message(\n",
    "    message_param: Union[ChatCompletionMessageParam, ChatCompletionMessage],\n",
    ") -> oi.Message:\n",
    "    if isinstance(message_param, ChatCompletionMessage):\n",
    "        role: str = message_param.role\n",
    "        oi_message = oi.Message(\n",
    "            role=role,\n",
    "        )\n",
    "        if isinstance(content := message_param.content, str):\n",
    "            oi_message[\"content\"] = content\n",
    "        if message_param.tool_calls is not None:\n",
    "            oi_tool_calls: List[oi.ToolCall] = []\n",
    "            for tool_call in message_param.tool_calls:\n",
    "                function = tool_call.function\n",
    "                oi_tool_calls.append(\n",
    "                    oi.ToolCall(\n",
    "                        id=tool_call.id,\n",
    "                        function=oi.ToolCallFunction(\n",
    "                            name=function.name,\n",
    "                            arguments=function.arguments,\n",
    "                        ),\n",
    "                    )\n",
    "                )\n",
    "            oi_message[\"tool_calls\"] = oi_tool_calls\n",
    "        return oi_message\n",
    "    if isinstance(message_param, dict):\n",
    "        role = message_param[\"role\"]\n",
    "        assert isinstance(message_param[\"content\"], str)\n",
    "        content = message_param[\"content\"]\n",
    "    else:\n",
    "        assert_never(message_param)\n",
    "    return oi.Message(role=role, content=content)\n",
    "\n",
    "\n",
    "def convert_openai_tool_param_to_oi_tool(\n",
    "    tool_param: ChatCompletionToolParam,\n",
    ") -> oi.Tool:\n",
    "    assert tool_param[\"type\"] == \"function\"\n",
    "    return oi.Tool(\n",
    "        json_schema=dict(tool_param),\n",
    "    )\n",
    "\n",
    "\n",
    "def get_oi_attributes_from_openai_outputs(\n",
    "    response: ChatCompletion,\n",
    ") -> Dict[str, Any]:\n",
    "    choices = response.choices\n",
    "    assert len(choices) == 1\n",
    "    message = response.choices[0].message\n",
    "    role = message.role\n",
    "    oi_message = oi.Message(role=role)\n",
    "    if isinstance(message.content, str):\n",
    "        oi_message[\"content\"] = message.content\n",
    "    if isinstance(message.tool_calls, list):\n",
    "        oi_tool_calls: List[oi.ToolCall] = []\n",
    "        for tool_call in message.tool_calls:\n",
    "            tool_call_id = tool_call.id\n",
    "            function_name = tool_call.function.name\n",
    "            function_arguments = tool_call.function.arguments\n",
    "            oi_tool_calls.append(\n",
    "                oi.ToolCall(\n",
    "                    id=tool_call_id,\n",
    "                    function=oi.ToolCallFunction(\n",
    "                        name=function_name,\n",
    "                        arguments=function_arguments,\n",
    "                    ),\n",
    "                )\n",
    "            )\n",
    "        oi_message[\"tool_calls\"] = oi_tool_calls\n",
    "    output_messages = [oi_message]\n",
    "    token_usage = response.usage\n",
    "    assert token_usage is not None\n",
    "    prompt_tokens = token_usage.prompt_tokens\n",
    "    completion_tokens = token_usage.completion_tokens\n",
    "    return {\n",
    "        **get_llm_attributes(\n",
    "            output_messages=output_messages,\n",
    "            token_count={\n",
    "                \"completion\": completion_tokens,\n",
    "                \"prompt\": prompt_tokens,\n",
    "            },\n",
    "        ),\n",
    "        **get_output_attributes(response),\n",
    "    }\n",
    "\n",
    "\n",
    "@tracer.llm(\n",
    "    process_input=get_oi_attributes_from_openai_inputs,\n",
    "    process_output=get_oi_attributes_from_openai_outputs,\n",
    ")\n",
    "def invoke_llm(\n",
    "    input_messages: List[ChatCompletionMessageParam],\n",
    "    invocation_parameters: Dict[str, Any],\n",
    "    tools: Optional[List[ChatCompletionToolParam]] = None,\n",
    ") -> ChatCompletion:\n",
    "    response = openai_client.chat.completions.create(\n",
    "        model=\"gpt-4\",\n",
    "        tools=tools or NOT_GIVEN,\n",
    "        messages=input_messages,\n",
    "        **invocation_parameters,\n",
    "    )\n",
    "    assert isinstance(response, ChatCompletion)\n",
    "    return response\n",
    "\n",
    "\n",
    "@tracer.tool\n",
    "def get_weather(city: str) -> str:\n",
    "    \"\"\"\n",
    "    Imagine you were making an API call here.\n",
    "    \"\"\"\n",
    "    return \"sunny\"\n",
    "\n",
    "\n",
    "input_messages = [\n",
    "    ChatCompletionUserMessageParam(\n",
    "        role=\"user\",\n",
    "        content=\"What's the weather like in San Francisco?\",\n",
    "    )\n",
    "]\n",
    "invocation_parameters = {\n",
    "    \"temperature\": 0.5,\n",
    "}\n",
    "tools = [\n",
    "    {\n",
    "        \"type\": \"function\",\n",
    "        \"function\": {\n",
    "            \"name\": \"get_weather\",\n",
    "            \"description\": \"finds the weather for a given city\",\n",
    "            \"parameters\": {\n",
    "                \"type\": \"object\",\n",
    "                \"properties\": {\n",
    "                    \"city\": {\n",
    "                        \"type\": \"string\",\n",
    "                        \"description\": \"The city to find the weather for, e.g. 'London'\",\n",
    "                    }\n",
    "                },\n",
    "                \"required\": [\"city\"],\n",
    "            },\n",
    "        },\n",
    "    },\n",
    "]\n",
    "response = invoke_llm(input_messages, invocation_parameters, tools=tools)\n",
    "output_message = response.choices[0].message\n",
    "tool_call = output_message.tool_calls[0]\n",
    "city = json.loads(tool_call.function.arguments)[\"city\"]\n",
    "weather = get_weather(city)\n",
    "print(f\"{city} is {weather}\")\n",
    "input_messages.append(output_message)\n",
    "input_messages.append(\n",
    "    ChatCompletionToolMessageParam(\n",
    "        content=weather,\n",
    "        role=\"tool\",\n",
    "        tool_call_id=tool_call.id,\n",
    "    )\n",
    ")\n",
    "invoke_llm(input_messages, invocation_parameters)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def process_input(\n",
    "    input_messages: List[AnthropicMessageParam],\n",
    "    invocation_parameters: Dict[str, Any],\n",
    ") -> Dict[str, Any]:\n",
    "    oi_input_messages = [to_oi_message(message) for message in input_messages]\n",
    "    return {\n",
    "        **get_input_attributes(\n",
    "            {\n",
    "                \"input_messages\": input_messages,\n",
    "                \"invocation_parameters\": invocation_parameters,\n",
    "            }\n",
    "        ),\n",
    "        **get_llm_attributes(\n",
    "            input_messages=oi_input_messages,\n",
    "            invocation_parameters=invocation_parameters,\n",
    "        ),\n",
    "    }\n",
    "\n",
    "\n",
    "def to_oi_message(message: AnthropicMessageParam) -> oi.Message:\n",
    "    role = message[\"role\"]\n",
    "    content = message[\"content\"]\n",
    "    if isinstance(content, str):\n",
    "        return oi.Message(role=role, content=content)\n",
    "\n",
    "    contents: List[oi.MessageContent] = []\n",
    "    for content_block in content:\n",
    "        if not isinstance(content_block, dict):\n",
    "            raise NotImplementedError(\"Only typed dict message params are supported\")\n",
    "        if (content_type := content_block[\"type\"]) == \"text\":\n",
    "            assert isinstance(text := content_block.get(\"text\"), str)\n",
    "            contents.append(oi.TextMessageContent(type=\"text\", text=text))\n",
    "        elif content_type == \"image\":\n",
    "            assert isinstance(source := content_block.get(\"source\"), dict)\n",
    "            assert isinstance(url := source.get(\"url\"), str)\n",
    "            contents.append(\n",
    "                oi.ImageMessageContent(\n",
    "                    type=\"image\",\n",
    "                    image=oi.Image(url=url),\n",
    "                )\n",
    "            )\n",
    "        else:\n",
    "            raise NotImplementedError(\"Only text and image message content blocks are supported\")\n",
    "    return oi.Message(role=role, contents=contents)\n",
    "\n",
    "\n",
    "def process_output(message: AnthropicMessage) -> Dict[str, Any]:\n",
    "    assert len(message.content) == 1\n",
    "    content_block = message.content[0]\n",
    "    assert content_block.type == \"text\"\n",
    "    content_block_text = content_block.text\n",
    "    return {\n",
    "        **get_llm_attributes(\n",
    "            output_messages=[\n",
    "                {\n",
    "                    \"role\": message.role,\n",
    "                    \"content\": content_block_text,\n",
    "                }\n",
    "            ],\n",
    "            token_count={\n",
    "                \"completion\": message.usage.output_tokens,\n",
    "                \"prompt\": message.usage.input_tokens,\n",
    "            },\n",
    "        ),\n",
    "        **get_output_attributes(content_block_text),\n",
    "    }\n",
    "\n",
    "\n",
    "@tracer.llm(\n",
    "    process_input=process_input,\n",
    "    process_output=process_output,\n",
    ")\n",
    "def get_image_description(\n",
    "    input_messages: List[AnthropicMessageParam], invocation_parameters: Dict[str, Any]\n",
    ") -> AnthropicMessage:\n",
    "    output_message = anthropic_client.messages.create(\n",
    "        model=\"claude-3-5-sonnet-20240620\",\n",
    "        messages=input_messages,\n",
    "        **invocation_parameters,\n",
    "    )\n",
    "    return output_message\n",
    "\n",
    "\n",
    "response = get_image_description(\n",
    "    input_messages=[\n",
    "        {\n",
    "            \"role\": \"user\",\n",
    "            \"content\": [\n",
    "                {\n",
    "                    \"type\": \"image\",\n",
    "                    \"source\": {\n",
    "                        \"type\": \"url\",\n",
    "                        \"url\": \"https://upload.wikimedia.org/wikipedia/commons/a/a7/Camponotus_flavomarginatus_ant.jpg\",\n",
    "                    },\n",
    "                },\n",
    "                {\"type\": \"text\", \"text\": \"What is in the above image?\"},\n",
    "            ],\n",
    "        }\n",
    "    ],\n",
    "    invocation_parameters={\"temperature\": 0.5, \"max_tokens\": 100},\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_attributes_from_generator_outputs(outputs: List[ChatCompletionChunk]) -> Attributes:\n",
    "    role: Optional[str] = None\n",
    "    content = \"\"\n",
    "    oi_token_count = oi.TokenCount()\n",
    "    for chunk in outputs:\n",
    "        if choices := chunk.choices:\n",
    "            assert len(choices) == 1\n",
    "            delta = choices[0].delta\n",
    "            if isinstance(delta.content, str):\n",
    "                content += delta.content\n",
    "            if isinstance(delta.role, str):\n",
    "                role = delta.role\n",
    "        if (usage := chunk.usage) is not None:\n",
    "            if (prompt_tokens := usage.prompt_tokens) is not None:\n",
    "                oi_token_count[\"prompt\"] = prompt_tokens\n",
    "            if (completion_tokens := usage.completion_tokens) is not None:\n",
    "                oi_token_count[\"completion\"] = completion_tokens\n",
    "    oi_messages = []\n",
    "    if role and content:\n",
    "        oi_messages.append(oi.Message(role=role, content=content))\n",
    "    return {\n",
    "        **get_llm_attributes(\n",
    "            output_messages=oi_messages,\n",
    "            token_count=oi_token_count,\n",
    "        ),\n",
    "        **get_output_attributes(content),\n",
    "    }\n",
    "\n",
    "\n",
    "@tracer.llm(\n",
    "    process_input=get_oi_attributes_from_openai_inputs,\n",
    "    process_output=get_attributes_from_generator_outputs,\n",
    ")\n",
    "def sync_chat_stream(\n",
    "    input_messages: List[ChatCompletionMessageParam],\n",
    "    invocation_parameters: Dict[str, Any],\n",
    ") -> Generator[str, None, None]:\n",
    "    stream = openai_client.chat.completions.create(\n",
    "        model=\"gpt-4o\",\n",
    "        messages=input_messages,\n",
    "        stream=True,\n",
    "        stream_options=ChatCompletionStreamOptionsParam(include_usage=True),\n",
    "        **invocation_parameters,\n",
    "    )\n",
    "    for chunk in stream:\n",
    "        yield chunk\n",
    "\n",
    "\n",
    "input_messages = [\n",
    "    ChatCompletionUserMessageParam(\n",
    "        role=\"user\",\n",
    "        content=\"Who won the World Cup in 2022?\",\n",
    "    )\n",
    "]\n",
    "invocation_parameters = {\n",
    "    \"temperature\": 0.5,\n",
    "}\n",
    "for chunk in sync_chat_stream(input_messages, invocation_parameters):\n",
    "    print(chunk)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "@tracer.llm(\n",
    "    process_input=get_oi_attributes_from_openai_inputs,\n",
    "    process_output=get_attributes_from_generator_outputs,\n",
    ")\n",
    "async def async_chat_stream(\n",
    "    input_messages: List[ChatCompletionMessageParam],\n",
    "    invocation_parameters: Dict[str, Any],\n",
    ") -> AsyncGenerator[str, None]:\n",
    "    stream = openai_client.chat.completions.create(\n",
    "        model=\"gpt-4o\",\n",
    "        messages=input_messages,\n",
    "        stream=True,\n",
    "        stream_options=ChatCompletionStreamOptionsParam(include_usage=True),\n",
    "        **invocation_parameters,\n",
    "    )\n",
    "    for chunk in stream:\n",
    "        yield chunk\n",
    "\n",
    "\n",
    "input_messages = [\n",
    "    ChatCompletionUserMessageParam(\n",
    "        role=\"user\",\n",
    "        content=\"Who won the World Cup in 2022?\",\n",
    "    )\n",
    "]\n",
    "invocation_parameters = {\n",
    "    \"temperature\": 0.5,\n",
    "}\n",
    "async for chunk in async_chat_stream(input_messages, invocation_parameters):\n",
    "    print(chunk)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Chains"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "with tracer.start_as_current_span(\n",
    "    \"chain-span-with-plain-text-io\",\n",
    "    openinference_span_kind=\"chain\",\n",
    ") as span:\n",
    "    span.set_input(\"input\")\n",
    "    span.set_output(\"output\")\n",
    "    span.set_status(Status(StatusCode.OK))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Context Manager"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "with tracer.start_as_current_span(\n",
    "    \"chain-span-with-json-io\",\n",
    "    openinference_span_kind=\"chain\",\n",
    ") as span:\n",
    "    span.set_input(\n",
    "        {\"input-key\": \"input-value\"},\n",
    "    )\n",
    "    span.set_output(\n",
    "        json.dumps({\"output-key\": \"output-value\"}),\n",
    "        mime_type=\"application/json\",\n",
    "    )\n",
    "    span.set_status(Status(StatusCode.OK))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "with tracer.start_as_current_span(\n",
    "    \"chain-span-with-attribute-getters\",\n",
    "    attributes={\n",
    "        **get_span_kind_attributes(\"chain\"),\n",
    "        **get_input_attributes(\"input\"),\n",
    "    },\n",
    ") as span:\n",
    "    span.set_attributes(get_output_attributes(\"output\"))\n",
    "    span.set_status(Status(StatusCode.OK))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "class InputModel(pydantic.BaseModel):\n",
    "    input: str\n",
    "\n",
    "\n",
    "@dataclass\n",
    "class OutputModel:\n",
    "    output: str\n",
    "\n",
    "\n",
    "with tracer.start_as_current_span(\n",
    "    \"chain-span-with-pydantic-input-and-dataclass-output\",\n",
    "    openinference_span_kind=\"chain\",\n",
    ") as span:\n",
    "    span.set_input(InputModel(input=\"input\"))\n",
    "    span.set_output(OutputModel(output=\"output\"))\n",
    "    span.set_status(Status(StatusCode.OK))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Decorator"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "@tracer.chain\n",
    "def decorated_chain_with_plain_text_output(input: str) -> str:\n",
    "    return \"output\"\n",
    "\n",
    "\n",
    "decorated_chain_with_plain_text_output(\"input\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "@tracer.chain\n",
    "def decorated_chain_with_json_output(input: str) -> Dict[str, Any]:\n",
    "    return {\"output\": \"output\"}\n",
    "\n",
    "\n",
    "decorated_chain_with_json_output(\"input\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "@tracer.chain()\n",
    "def decorated_chain_with_no_parameters(input: str) -> Dict[str, Any]:\n",
    "    return {\"output\": \"output\"}\n",
    "\n",
    "\n",
    "decorated_chain_with_no_parameters(\"input\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "@tracer.chain(name=\"decorated-chain-with-overriden-name\")\n",
    "def this_name_should_be_overriden(input: str) -> Dict[str, Any]:\n",
    "    return {\"output\": \"output\"}\n",
    "\n",
    "\n",
    "this_name_should_be_overriden(\"input\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def chain_with_decorator_applied_as_function(input: str) -> Dict[str, Any]:\n",
    "    return {\"output\": \"output\"}\n",
    "\n",
    "\n",
    "decorated = tracer.chain(chain_with_decorator_applied_as_function)\n",
    "decorated(\"input\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def this_name_should_be_overriden_with_decorator_applied_as_function_with_parameters(\n",
    "    input: str,\n",
    ") -> Dict[str, Any]:\n",
    "    return {\"output\": \"output\"}\n",
    "\n",
    "\n",
    "decorated = tracer.chain(\n",
    "    name=\"decorated-chain-with-decorator-applied-as-function-with-overriden-name\"\n",
    ")(this_name_should_be_overriden_with_decorator_applied_as_function_with_parameters)\n",
    "decorated(\"input\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "@tracer.chain\n",
    "async def decorated_async_chain(input: str) -> str:\n",
    "    return \"output\"\n",
    "\n",
    "\n",
    "await decorated_async_chain(\"input\")  # type: ignore[top-level-await]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "@tracer.chain\n",
    "def decorated_chain_with_error(input: str) -> str:\n",
    "    raise ValueError(\"error\")\n",
    "\n",
    "\n",
    "try:\n",
    "    decorated_chain_with_error(\"input\")\n",
    "except ValueError as e:\n",
    "    print(e)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "@tracer.chain\n",
    "def decorated_chain_with_child_span(input: str) -> str:\n",
    "    with tracer.start_as_current_span(\n",
    "        \"child-span\",\n",
    "        openinference_span_kind=\"chain\",\n",
    "        attributes=get_input_attributes(\"child-span-input\"),\n",
    "    ) as child_span:\n",
    "        output = \"output\"\n",
    "        child_span.set_output(output)\n",
    "        child_span.set_status(Status(StatusCode.OK))\n",
    "        return output\n",
    "\n",
    "\n",
    "decorated_chain_with_child_span(\"input\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "@tracer.chain\n",
    "def decorated_chain_with_child_span_error(input: str) -> str:\n",
    "    with tracer.start_as_current_span(\n",
    "        \"child-span\",\n",
    "        openinference_span_kind=\"chain\",\n",
    "        attributes=get_input_attributes(\"child-span-input\"),\n",
    "    ):\n",
    "        raise ValueError(\"error\")\n",
    "\n",
    "\n",
    "try:\n",
    "    decorated_chain_with_child_span_error(\"input\")\n",
    "except ValueError as e:\n",
    "    print(e)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class ChainRunner:\n",
    "    @tracer.chain\n",
    "    def decorated_chain_method(self, input1: str, input2: str) -> str:\n",
    "        return \"output\"\n",
    "\n",
    "\n",
    "chain_runner = ChainRunner()\n",
    "chain_runner.decorated_chain_method(\"input1\", \"input2\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "@tracer.chain\n",
    "def decorated_chain_with_input_and_output_set_inside_the_wrapped_function(input: str) -> str:\n",
    "    span = get_current_span()\n",
    "    span.set_input(\"overridden-input\")  # type: ignore[attr-defined]\n",
    "    span.set_output(\"overridden-output\")  # type: ignore[attr-defined]\n",
    "    return \"output\"\n",
    "\n",
    "\n",
    "decorated_chain_with_input_and_output_set_inside_the_wrapped_function(\"input\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Suppress Tracing"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "with suppress_tracing():\n",
    "    with tracer.start_as_current_span(\n",
    "        \"THIS-SPAN-SHOULD-NOT-BE-TRACED\",\n",
    "        openinference_span_kind=\"chain\",\n",
    "    ) as span:\n",
    "        span.set_input(\"input\")\n",
    "        span.set_output(\"output\")\n",
    "        span.set_status(Status(StatusCode.OK))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "@tracer.chain\n",
    "def decorated_chain_with_suppress_tracing(input: str) -> str:\n",
    "    return \"output\"\n",
    "\n",
    "\n",
    "with suppress_tracing():\n",
    "    decorated_chain_with_suppress_tracing(\"input\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Context Attributes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "with using_attributes(session_id=\"123\"):\n",
    "    with tracer.start_as_current_span(\n",
    "        \"chain-span-with-context-attributes\",\n",
    "        openinference_span_kind=\"chain\",\n",
    "    ) as span:\n",
    "        span.set_input(\"input\")\n",
    "        span.set_output(\"output\")\n",
    "        span.set_status(Status(StatusCode.OK))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "@tracer.chain\n",
    "def decorated_chain_with_context_attributes(input: str) -> str:\n",
    "    return \"output\"\n",
    "\n",
    "\n",
    "with using_attributes(session_id=\"123\"):\n",
    "    decorated_chain_with_context_attributes(\"input\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Agents"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Context Managers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "with tracer.start_as_current_span(\n",
    "    \"agent-span-with-plain-text-io\",\n",
    "    openinference_span_kind=\"agent\",\n",
    ") as span:\n",
    "    span.set_input(\"input\")\n",
    "    span.set_output(\"output\")\n",
    "    span.set_status(Status(StatusCode.OK))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Decorators"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "@tracer.agent\n",
    "def decorated_agent(input: str) -> str:\n",
    "    return \"output\"\n",
    "\n",
    "\n",
    "decorated_agent(\"input\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Tools"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Context Managers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "with tracer.start_as_current_span(\n",
    "    \"tool-span\",\n",
    "    openinference_span_kind=\"tool\",\n",
    ") as span:\n",
    "    span.set_input(\"input\")\n",
    "    span.set_output(\"output\")\n",
    "    span.set_tool(\n",
    "        name=\"tool-name\",\n",
    "        description=\"tool-description\",\n",
    "        parameters={\"input\": \"input\"},\n",
    "    )\n",
    "    span.set_status(Status(StatusCode.OK))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "with tracer.start_as_current_span(\n",
    "    \"tool-span-with-getter\",\n",
    "    openinference_span_kind=\"tool\",\n",
    ") as span:\n",
    "    span.set_attributes(\n",
    "        get_tool_attributes(\n",
    "            name=\"tool-name\",\n",
    "            description=\"tool-description\",\n",
    "            parameters={\"input\": \"input\"},\n",
    "        )\n",
    "    )\n",
    "    span.set_status(Status(StatusCode.OK))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "@tracer.tool\n",
    "def decorated_tool(input1: str, input2: int) -> None:\n",
    "    \"\"\"\n",
    "    tool-description\n",
    "    \"\"\"\n",
    "\n",
    "\n",
    "decorated_tool(\"input1\", 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "@tracer.tool\n",
    "async def decorated_tool_async(input1: str, input2: int) -> None:\n",
    "    \"\"\"\n",
    "    tool-description\n",
    "    \"\"\"\n",
    "\n",
    "\n",
    "await decorated_tool_async(\"input1\", 1)  # type: ignore[top-level-await]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [],
   "source": [
    "@tracer.tool(\n",
    "    name=\"decorated-tool-with-overriden-name\",\n",
    "    description=\"overriden-tool-description\",\n",
    ")\n",
    "def this_tool_name_should_be_overriden(input1: str, input2: int) -> None:\n",
    "    \"\"\"\n",
    "    this tool description should be overriden\n",
    "    \"\"\"\n",
    "\n",
    "\n",
    "this_tool_name_should_be_overriden(\"input1\", 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "@tracer.tool\n",
    "def tool_with_changes_inside_the_wrapped_function(input1: str, input2: int) -> str:\n",
    "    span = get_current_span()\n",
    "    print(type(span))\n",
    "    span.set_input(\"inside-input\")  # type: ignore[attr-defined]\n",
    "    span.set_output(\"inside-output\")  # type: ignore[attr-defined]\n",
    "    span.set_tool(  # type: ignore[attr-defined]\n",
    "        name=\"inside-tool-name\",\n",
    "        description=\"inside-tool-description\",\n",
    "        parameters={\"inside-input\": \"inside-input\"},\n",
    "    )\n",
    "    return \"output\"\n",
    "\n",
    "\n",
    "tool_with_changes_inside_the_wrapped_function(\"input1\", 1)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": ".venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
