{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "provenance": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "3xW4O0zuKfWD"
      },
      "outputs": [],
      "source": [
        "import os\n",
        "from openai import OpenAI\n",
        "from google.colab import userdata"
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "api_key = userdata.get('OPENAI_API_KEY')\n",
        "MODEL = \"gpt-4o-mini\"\n",
        "\n",
        "openai = OpenAI(api_key=api_key)"
      ],
      "metadata": {
        "id": "vUydnLeLKs03"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "source": [
        "# Chat Completion API\n",
        "\n",
        "https://platform.openai.com/docs/guides/text?api-mode=chat"
      ],
      "metadata": {
        "id": "5wlqKfVBKyjD"
      }
    },
    {
      "cell_type": "markdown",
      "source": [
        "Developer Role is replacing System role\n",
        "\n",
        "https://cdn.openai.com/spec/model-spec-2024-05-08.html#definitions\n",
        "\n",
        "https://platform.openai.com/docs/guides/prompt-engineering"
      ],
      "metadata": {
        "id": "jksPaX9wNgAr"
      }
    },
    {
      "cell_type": "markdown",
      "source": [
        "![developer-role.png]()"
      ],
      "metadata": {
        "id": "1B2KLdxlNBw9"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "prompts = [\n",
        "    # Both are working for now and might me keep working in the future, but OpenAI's intenstion is clear\n",
        "    # they want to use developer role\n",
        "    # { \"role\": \"system\", \"content\": \"You are an helpfull assistant\"},\n",
        "    { \"role\": \"developer\", \"content\": \"You are an helpfull assistant\"},\n",
        "    { \"role\": \"user\", \"content\": \"Tell me a joke about the internet\"}\n",
        "]\n",
        "\n",
        "response = openai.chat.completions.create(\n",
        "    model=MODEL,\n",
        "    messages= prompts\n",
        ")"
      ],
      "metadata": {
        "id": "dBrUkMCUKz0s"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "print(response.choices[0].finish_reason)"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "WItchxDLK2x6",
        "outputId": "14521242-d748-4fbb-a185-821538f13b3c"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "stop\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "print(response.choices[0].message.content)"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "V2OvZRgFK38l",
        "outputId": "4b98ca90-afb1-4d3a-c9a2-c796142e07ff"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Why did the computer go to therapy?\n",
            "\n",
            "Because it had too many unresolved issues!\n"
          ]
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "# Responses API\n",
        "\n",
        "https://platform.openai.com/docs/guides/text?api-mode=responses"
      ],
      "metadata": {
        "id": "tCzSosP5K-JR"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "prompts = [\n",
        "    # { \"role\": \"system\", \"content\": \"You are an helpfull assistant\"},\n",
        "    { \"role\": \"developer\", \"content\": \"You are an helpfull assistant\"},\n",
        "    { \"role\": \"user\", \"content\": \"Tell me a joke about the internet\"}\n",
        "]\n",
        "\n",
        "response = openai.responses.create(\n",
        "    model=MODEL,\n",
        "    input=prompts  # input property can accept both string as well as array of messages\n",
        ")"
      ],
      "metadata": {
        "id": "2IPLSF30K--S"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "print(response.output_text)"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "zIxOV55hLJoz",
        "outputId": "7aeaf58a-52ca-4a90-abdc-40f088a23ac2"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Why did the computer go to therapy?\n",
            "\n",
            "Because it had too many bytes from the internet!\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "print(response)"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "he7ebZZoLC08",
        "outputId": "211e8ed8-3cb6-4b3d-d582-6d09fa5b5cc9"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Response(id='resp_67e04d40c65c8192ad80e4b3c1b92c0e0a3f0ef2d3ca6501', created_at=1742753088.0, error=None, incomplete_details=None, instructions=None, metadata={}, model='gpt-4o-mini-2024-07-18', object='response', output=[ResponseOutputMessage(id='msg_67e04d4136508192b84bb22a956d85330a3f0ef2d3ca6501', content=[ResponseOutputText(annotations=[], text='Why did the computer go to therapy?\\n\\nBecause it had too many unresolved issues with its cache!', type='output_text')], role='assistant', status='completed', type='message')], parallel_tool_calls=True, temperature=1.0, tool_choice='auto', tools=[], top_p=1.0, max_output_tokens=None, previous_response_id=None, reasoning=Reasoning(effort=None, generate_summary=None), status='completed', text=ResponseTextConfig(format=ResponseFormatText(type='text')), truncation='disabled', usage=ResponseUsage(input_tokens=42, output_tokens=20, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=62, input_tokens_details={'cached_tokens': 0}), user=None, store=True)\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "print(response.instructions)\n",
        "print(response.tools)\n",
        "print(response.status)\n",
        "print(response.store)"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "2jbA57efLF8z",
        "outputId": "3eb282fb-cf60-4a98-a31b-e795fa60eb44"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "None\n",
            "[]\n",
            "completed\n",
            "True\n"
          ]
        }
      ]
    }
  ]
}