{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "provenance": [],
      "authorship_tag": "ABX9TyNoeYSfwY4STodRdSsGkens",
      "include_colab_link": true
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/madaan/memprompt/blob/main/CompletionAndChat.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "\n",
        "!pip install openai"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "nGCAmm0Ko-v4",
        "outputId": "34ea7fcd-dca7-4427-f594-6642414a4e3b"
      },
      "execution_count": 3,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
            "Collecting openai\n",
            "  Downloading openai-0.27.2-py3-none-any.whl (70 kB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m70.1/70.1 KB\u001b[0m \u001b[31m2.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hRequirement already satisfied: requests>=2.20 in /usr/local/lib/python3.9/dist-packages (from openai) (2.27.1)\n",
            "Requirement already satisfied: tqdm in /usr/local/lib/python3.9/dist-packages (from openai) (4.65.0)\n",
            "Collecting aiohttp\n",
            "  Downloading aiohttp-3.8.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.0 MB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.0/1.0 MB\u001b[0m \u001b[31m19.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hRequirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.9/dist-packages (from requests>=2.20->openai) (2.0.12)\n",
            "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.9/dist-packages (from requests>=2.20->openai) (2022.12.7)\n",
            "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.9/dist-packages (from requests>=2.20->openai) (1.26.15)\n",
            "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.9/dist-packages (from requests>=2.20->openai) (3.4)\n",
            "Collecting yarl<2.0,>=1.0\n",
            "  Downloading yarl-1.8.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (264 kB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m264.6/264.6 KB\u001b[0m \u001b[31m21.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hRequirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.9/dist-packages (from aiohttp->openai) (22.2.0)\n",
            "Collecting multidict<7.0,>=4.5\n",
            "  Downloading multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (114 kB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m114.2/114.2 KB\u001b[0m \u001b[31m14.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hCollecting frozenlist>=1.1.1\n",
            "  Downloading frozenlist-1.3.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (158 kB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m158.8/158.8 KB\u001b[0m \u001b[31m16.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hCollecting aiosignal>=1.1.2\n",
            "  Downloading aiosignal-1.3.1-py3-none-any.whl (7.6 kB)\n",
            "Collecting async-timeout<5.0,>=4.0.0a3\n",
            "  Downloading async_timeout-4.0.2-py3-none-any.whl (5.8 kB)\n",
            "Installing collected packages: multidict, frozenlist, async-timeout, yarl, aiosignal, aiohttp, openai\n",
            "Successfully installed aiohttp-3.8.4 aiosignal-1.3.1 async-timeout-4.0.2 frozenlist-1.3.3 multidict-6.0.4 openai-0.27.2 yarl-1.8.2\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 5,
      "metadata": {
        "id": "_IT3UtNhozOZ"
      },
      "outputs": [],
      "source": [
        "from collections import Counter\n",
        "import os\n",
        "from typing import Dict, Any\n",
        "import openai\n",
        "import random\n",
        "import time\n",
        "\n",
        "\n",
        "# from https://github.com/openai/openai-cookbook/blob/main/examples/How_to_handle_rate_limits.ipynb\n",
        "def retry_with_exponential_backoff(\n",
        "    func,\n",
        "    initial_delay: float = 1,\n",
        "    exponential_base: float = 2,\n",
        "    jitter: bool = True,\n",
        "    max_retries: int = 10,\n",
        "    errors: tuple = (openai.error.RateLimitError,),\n",
        "):\n",
        "    \"\"\"Retry a function with exponential backoff.\"\"\"\n",
        "\n",
        "    def wrapper(*args, **kwargs):\n",
        "        # Initialize variables\n",
        "        num_retries = 0\n",
        "        delay = initial_delay\n",
        "\n",
        "        # Loop until a successful response or max_retries is hit or an exception is raised\n",
        "        while True:\n",
        "            try:\n",
        "\n",
        "                return func(*args, **kwargs)\n",
        "\n",
        "            # Retry on specified errors\n",
        "            except errors as e:\n",
        "                # Increment retries\n",
        "                num_retries += 1\n",
        "\n",
        "                # Check if max retries has been reached\n",
        "                if num_retries > max_retries:\n",
        "                    raise Exception(f\"Maximum number of retries ({max_retries}) exceeded.\")\n",
        "\n",
        "                # Increment the delay\n",
        "                delay *= exponential_base * (1 + jitter * random.random())\n",
        "\n",
        "                # Sleep for the delay\n",
        "                time.sleep(delay)\n",
        "\n",
        "            # Raise exceptions for any errors not specified\n",
        "            except Exception as e:\n",
        "                raise e\n",
        "\n",
        "    return wrapper\n",
        "\n",
        "class BaseAPIWrapper:\n",
        "    @staticmethod\n",
        "    def call(\n",
        "        prompt: str,\n",
        "        max_tokens: int,\n",
        "        engine: str,\n",
        "        stop_token: str,\n",
        "        temperature: float,\n",
        "        num_completions: int = 1,\n",
        "    ) -> dict:\n",
        "        raise NotImplementedError()\n",
        "\n",
        "    @staticmethod\n",
        "    def get_first_response(response) -> Dict[str, Any]:\n",
        "        raise NotImplementedError()\n",
        "\n",
        "    @staticmethod\n",
        "    def get_majority_answer(response) -> Dict[str, Any]:\n",
        "        raise NotImplementedError()\n",
        "\n",
        "    @staticmethod\n",
        "    def get_all_responses(response) -> Dict[str, Any]:\n",
        "        raise NotImplementedError()\n",
        "\n",
        "\n",
        "class CompletionAPIWrapper(BaseAPIWrapper):\n",
        "    @staticmethod\n",
        "    @retry_with_exponential_backoff\n",
        "    def call(\n",
        "        prompt: str,\n",
        "        max_tokens: int,\n",
        "        engine: str,\n",
        "        stop_token: str,\n",
        "        temperature: float,\n",
        "        num_completions: int = 1,\n",
        "    ) -> dict:\n",
        "        \"\"\"Calls the completion API.\n",
        "\n",
        "        if the num_completions is > 2, we call the API multiple times. This is to prevent\n",
        "        overflow issues that can occur when the number of completions is too large.\n",
        "        \"\"\"\n",
        "        if num_completions > 2:\n",
        "            response_combined = dict()\n",
        "            num_completions_remaining = num_completions\n",
        "            for i in range(0, num_completions, 2):\n",
        "                # note that we are calling the same function --- this prevents backoff from being reset for the entire function\n",
        "                response = CompletionAPIWrapper.call(\n",
        "                    prompt=prompt,\n",
        "                    max_tokens=max_tokens,\n",
        "                    engine=engine,\n",
        "                    stop_token=stop_token,\n",
        "                    temperature=temperature,\n",
        "                    num_completions=min(num_completions_remaining, 2),\n",
        "                )\n",
        "                num_completions_remaining -= 2\n",
        "                if i == 0:\n",
        "                    response_combined = response\n",
        "                else:\n",
        "                    response_combined[\"choices\"] += response[\"choices\"]\n",
        "            return response_combined\n",
        "        response = openai.Completion.create(\n",
        "            engine=engine,\n",
        "            prompt=prompt,\n",
        "            temperature=temperature,\n",
        "            max_tokens=max_tokens,\n",
        "            top_p=1,\n",
        "            stop=[stop_token],\n",
        "            # logprobs=3,\n",
        "            n=num_completions,\n",
        "        )\n",
        "        return response\n",
        "\n",
        "    @staticmethod\n",
        "    def get_first_response(response) -> Dict[str, Any]:\n",
        "        \"\"\"Returns the first response from the list of responses.\"\"\"\n",
        "        text = response[\"choices\"][0][\"text\"]\n",
        "        return text\n",
        "\n",
        "    @staticmethod\n",
        "    def get_majority_answer(response) -> Dict[str, Any]:\n",
        "        \"\"\"Returns the majority answer from the list of responses.\"\"\"\n",
        "        answers = [choice[\"text\"] for choice in response[\"choices\"]]\n",
        "        answers = Counter(answers)\n",
        "        # if there is a tie, return the first answer\n",
        "        if answers.most_common(1)[0][1] == answers.most_common(2)[1][1]:\n",
        "            return CompletionAPIWrapper.get_first_response(response)\n",
        "\n",
        "        return answers.most_common(1)[0][0]\n",
        "\n",
        "    @staticmethod\n",
        "    def get_all_responses(response) -> Dict[str, Any]:\n",
        "        \"\"\"Returns the list of responses.\"\"\"\n",
        "        return [choice[\"text\"] for choice in response[\"choices\"]]  # type: ignore\n",
        "\n",
        "\n",
        "class ChatGPTAPIWrapper(BaseAPIWrapper):\n",
        "    @staticmethod\n",
        "    @retry_with_exponential_backoff\n",
        "    def call(\n",
        "        prompt: str,\n",
        "        max_tokens: int,\n",
        "        engine: str,\n",
        "        stop_token: str,\n",
        "        temperature: float,\n",
        "        num_completions: int = 1,\n",
        "    ) -> dict:\n",
        "        \"\"\"Calls the Chat API.\n",
        "\n",
        "        if the num_completions is > 2, we call the API multiple times. This is to prevent\n",
        "        overflow issues that can occur when the number of completions is too large.\n",
        "        \"\"\"\n",
        "        messages = [\n",
        "            {\n",
        "                \"role\": \"system\",\n",
        "                \"content\": \"You are ChatGPT, a large language model trained by OpenAI.\",\n",
        "            },\n",
        "            {\"role\": \"user\", \"content\": prompt},\n",
        "        ]\n",
        "        if num_completions > 2:\n",
        "            response_combined = dict()\n",
        "            num_completions_remaining = num_completions\n",
        "            for i in range(0, num_completions, 2):\n",
        "                # note that we are calling the same function --- this prevents backoff from being reset for the entire function\n",
        "                response = ChatGPTAPIWrapper.call(\n",
        "                    prompt=prompt,\n",
        "                    max_tokens=max_tokens,\n",
        "                    engine=engine,\n",
        "                    stop_token=stop_token,\n",
        "                    temperature=temperature,\n",
        "                    num_completions=min(num_completions_remaining, 2),\n",
        "                )\n",
        "                num_completions_remaining -= 2\n",
        "                if i == 0:\n",
        "                    response_combined = response\n",
        "                else:\n",
        "                    response_combined[\"choices\"] += response[\"choices\"]\n",
        "            return response_combined\n",
        "        response = openai.ChatCompletion.create(\n",
        "            model=engine,\n",
        "            messages=messages,\n",
        "            temperature=temperature,\n",
        "            max_tokens=max_tokens,\n",
        "            top_p=1,\n",
        "            stop=[stop_token],\n",
        "            # logprobs=3,\n",
        "            n=num_completions,\n",
        "        )\n",
        "        return response\n",
        "\n",
        "    @staticmethod\n",
        "    def get_first_response(response) -> Dict[str, Any]:\n",
        "        \"\"\"Returns the first response from the list of responses.\"\"\"\n",
        "        text = response[\"choices\"][0][\"message\"][\"content\"]\n",
        "        return text\n",
        "\n",
        "    @staticmethod\n",
        "    def get_majority_answer(response) -> Dict[str, Any]:\n",
        "        \"\"\"Returns the majority answer from the list of responses.\"\"\"\n",
        "        answers = [choice[\"message\"][\"content\"] for choice in response[\"choices\"]]\n",
        "        answers = Counter(answers)\n",
        "        # if there is a tie, return the first answer\n",
        "        if answers.most_common(1)[0][1] == answers.most_common(2)[1][1]:\n",
        "            return ChatGPTAPIWrapper.get_first_response(response)\n",
        "\n",
        "        return answers.most_common(1)[0][0]\n",
        "\n",
        "    @staticmethod\n",
        "    def get_all_responses(response) -> Dict[str, Any]:\n",
        "        \"\"\"Returns the list of responses.\"\"\"\n",
        "        return [choice[\"message\"][\"content\"] for choice in response[\"choices\"]]  # type: ignore\n",
        "\n",
        "\n",
        "class OpenaiAPIWrapper:\n",
        "    chat_engines = [\"gpt-3.5-turbo\", \"gpt-4\", \"gpt-3.5-turbo-0301\", \"gpt-4-0314\"]\n",
        "\n",
        "    @staticmethod\n",
        "    def get_api_wrapper(engine: str) -> BaseAPIWrapper:\n",
        "        if engine in OpenaiAPIWrapper.chat_engines:\n",
        "            return ChatGPTAPIWrapper\n",
        "        else:\n",
        "            return CompletionAPIWrapper\n",
        "\n",
        "\n",
        "    @staticmethod\n",
        "    def call(\n",
        "        prompt: str,\n",
        "        max_tokens: int,\n",
        "        engine: str,\n",
        "        stop_token: str,\n",
        "        temperature: float,\n",
        "        num_completions: int = 1,\n",
        "    ) -> dict:\n",
        "        api_wrapper = OpenaiAPIWrapper.get_api_wrapper(engine)\n",
        "        return api_wrapper.call(prompt, max_tokens, engine, stop_token, temperature, num_completions)\n",
        "\n",
        "    @staticmethod\n",
        "    def get_first_response(response) -> Dict[str, Any]:\n",
        "        api_wrapper = OpenaiAPIWrapper.get_api_wrapper(response[\"model\"])\n",
        "        return api_wrapper.get_first_response(response)\n",
        "\n",
        "    @staticmethod\n",
        "    def get_majority_answer(response) -> Dict[str, Any]:\n",
        "        api_wrapper = OpenaiAPIWrapper.get_api_wrapper(response[\"model\"])\n",
        "        return api_wrapper.get_majority_answer(response)\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "\n",
        "def test_completion():\n",
        "    prompt = \"\"\"Optimize the following Python code:\n",
        "  \n",
        "# Start of code\n",
        "n = int(input())\n",
        "result = 0\n",
        "for i in range(1, n + 1):\n",
        "  result += i\n",
        "return result\n",
        "\"\"\"\n",
        "    engine = \"text-davinci-003\"\n",
        "    num_completions = 3\n",
        "    max_tokens = 300\n",
        "    response = OpenaiAPIWrapper.call(\n",
        "        prompt=prompt,\n",
        "        max_tokens=max_tokens,\n",
        "        engine=engine,\n",
        "        stop_token=\"Optimize the following Python code:\\n\\n\",\n",
        "        temperature=0.7,\n",
        "        num_completions=num_completions,\n",
        "    )\n",
        "    print(response)\n",
        "    print(OpenaiAPIWrapper.get_first_response(response))\n",
        "    print(OpenaiAPIWrapper.get_majority_answer(response))\n",
        "\n",
        "\n",
        "def test_chat():\n",
        "    prompt = \"\"\"Optimize the following Python code:\n",
        "  \n",
        "# Start of code\n",
        "n = int(input())\n",
        "result = 0\n",
        "for i in range(1, n + 1):\n",
        "  result += i\n",
        "return result\n",
        "\"\"\" \n",
        "    engine = \"gpt-3.5-turbo\"\n",
        "    num_completions = 3\n",
        "    max_tokens = 300\n",
        "    response = OpenaiAPIWrapper.call(\n",
        "        prompt=prompt,\n",
        "        max_tokens=max_tokens,\n",
        "        engine=engine,\n",
        "        stop_token=\"End of code\",\n",
        "        temperature=0.7,\n",
        "        num_completions=num_completions,\n",
        "    )\n",
        "    print(response)\n",
        "    print(OpenaiAPIWrapper.get_first_response(response))\n",
        "    print(OpenaiAPIWrapper.get_majority_answer(response))\n",
        "\n"
      ],
      "metadata": {
        "id": "Iwxc6pwPo2QE"
      },
      "execution_count": 7,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "\n",
        "\n",
        "openai.api_key = \"sk-???\""
      ],
      "metadata": {
        "id": "9cddTQ34pG5R"
      },
      "execution_count": 8,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "\n",
        "if __name__ == \"__main__\":\n",
        "    # test the API\n",
        "    print(\"Testing completion API\")\n",
        "    test_completion()\n",
        "    print(\"Testing chat API\")\n",
        "    test_chat()"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "BAc0JQ1Xo38f",
        "outputId": "164ea5df-57fa-4335-a513-a1eab90ff708"
      },
      "execution_count": 9,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Testing completion API\n",
            "{\n",
            "  \"choices\": [\n",
            "    {\n",
            "      \"finish_reason\": null,\n",
            "      \"index\": 0,\n",
            "      \"logprobs\": null,\n",
            "      \"text\": \"# End of code\\n\\ndef calculate_sum(n): \\n  return (n * (n + 1)) // 2\\n\\nn = int(input())\\nresult = calculate_sum(n) \\nprint(result)\"\n",
            "    },\n",
            "    {\n",
            "      \"finish_reason\": \"stop\",\n",
            "      \"index\": 1,\n",
            "      \"logprobs\": null,\n",
            "      \"text\": \"# End of code\\n\\ndef sum(n):\\n  return (n*(n+1))//2\\n\\nn = int(input())\\nresult = sum(n)\\nprint(result)\"\n",
            "    },\n",
            "    {\n",
            "      \"finish_reason\": \"stop\",\n",
            "      \"index\": 0,\n",
            "      \"logprobs\": null,\n",
            "      \"text\": \"# End of code\\n\\n# Optimized code\\nn = int(input())\\nreturn n * (n+1) // 2\"\n",
            "    }\n",
            "  ],\n",
            "  \"created\": 1680366579,\n",
            "  \"id\": \"cmpl-70YOhXoWRwEBhVBiMWGozGebWDLkL\",\n",
            "  \"model\": \"text-davinci-003\",\n",
            "  \"object\": \"text_completion\",\n",
            "  \"usage\": {\n",
            "    \"completion_tokens\": 94,\n",
            "    \"prompt_tokens\": 46,\n",
            "    \"total_tokens\": 140\n",
            "  }\n",
            "}\n",
            "# End of code\n",
            "\n",
            "def calculate_sum(n): \n",
            "  return (n * (n + 1)) // 2\n",
            "\n",
            "n = int(input())\n",
            "result = calculate_sum(n) \n",
            "print(result)\n",
            "# End of code\n",
            "\n",
            "def calculate_sum(n): \n",
            "  return (n * (n + 1)) // 2\n",
            "\n",
            "n = int(input())\n",
            "result = calculate_sum(n) \n",
            "print(result)\n",
            "Testing chat API\n",
            "{\n",
            "  \"choices\": [\n",
            "    {\n",
            "      \"finish_reason\": \"stop\",\n",
            "      \"index\": 0,\n",
            "      \"message\": {\n",
            "        \"content\": \"Here's an optimized version of the code:\\n\\n# Start of code\\nn = int(input())\\nresult = (n * (n + 1)) // 2\\nreturn result\\n\\nExplanation:\\n\\nThe original code uses a loop to calculate the sum of integers from 1 to n, whereas the optimized code uses a mathematical formula to calculate the same result.\\n\\nThe formula used is:\\n\\nsum = n * (n + 1) / 2\\n\\nThis formula calculates the sum of integers from 1 to n in constant time, regardless of the value of n.\\n\\nThe optimized code replaces the loop with this formula, resulting in a significant improvement in performance.\",\n",
            "        \"role\": \"assistant\"\n",
            "      }\n",
            "    },\n",
            "    {\n",
            "      \"finish_reason\": \"stop\",\n",
            "      \"index\": 1,\n",
            "      \"message\": {\n",
            "        \"content\": \"Here's an optimized version of the code:\\n\\n# Start of optimized code\\nn = int(input())\\nresult = (n * (n + 1)) // 2\\nreturn result\\n\\nThe optimized version of the code uses the formula for the sum of the first n natural numbers, which is n(n+1)/2. This formula is mathematically equivalent to the original code, but it does not require a loop to compute the sum. Instead, it uses a single arithmetic operation to calculate the result directly, which is more efficient.\",\n",
            "        \"role\": \"assistant\"\n",
            "      }\n",
            "    },\n",
            "    {\n",
            "      \"finish_reason\": \"stop\",\n",
            "      \"index\": 0,\n",
            "      \"message\": {\n",
            "        \"content\": \"Here's an optimized version of the code:\\n\\n# Start of code\\nn = int(input())\\nresult = (n*(n+1)) // 2\\nreturn result\\n\\nExplanation:\\n\\nInstead of iterating over each number from 1 to n and adding it to the result variable, we can use a mathematical formula to calculate the sum of all numbers from 1 to n. The formula is:\\n\\nsum = n*(n+1) / 2\\n\\nThis formula is derived from the fact that the sum of the first n natural numbers is equal to n*(n+1)/2. \\n\\nUsing this formula, we can calculate the sum of all numbers from 1 to n in constant time, which is much faster than iterating over each number.\",\n",
            "        \"role\": \"assistant\"\n",
            "      }\n",
            "    }\n",
            "  ],\n",
            "  \"created\": 1680366581,\n",
            "  \"id\": \"chatcmpl-70YOjC8rOe4NNvNYEINAQrh9lZhz7\",\n",
            "  \"model\": \"gpt-3.5-turbo-0301\",\n",
            "  \"object\": \"chat.completion\",\n",
            "  \"usage\": {\n",
            "    \"completion_tokens\": 240,\n",
            "    \"prompt_tokens\": 71,\n",
            "    \"total_tokens\": 311\n",
            "  }\n",
            "}\n",
            "Here's an optimized version of the code:\n",
            "\n",
            "# Start of code\n",
            "n = int(input())\n",
            "result = (n * (n + 1)) // 2\n",
            "return result\n",
            "\n",
            "Explanation:\n",
            "\n",
            "The original code uses a loop to calculate the sum of integers from 1 to n, whereas the optimized code uses a mathematical formula to calculate the same result.\n",
            "\n",
            "The formula used is:\n",
            "\n",
            "sum = n * (n + 1) / 2\n",
            "\n",
            "This formula calculates the sum of integers from 1 to n in constant time, regardless of the value of n.\n",
            "\n",
            "The optimized code replaces the loop with this formula, resulting in a significant improvement in performance.\n",
            "Here's an optimized version of the code:\n",
            "\n",
            "# Start of code\n",
            "n = int(input())\n",
            "result = (n * (n + 1)) // 2\n",
            "return result\n",
            "\n",
            "Explanation:\n",
            "\n",
            "The original code uses a loop to calculate the sum of integers from 1 to n, whereas the optimized code uses a mathematical formula to calculate the same result.\n",
            "\n",
            "The formula used is:\n",
            "\n",
            "sum = n * (n + 1) / 2\n",
            "\n",
            "This formula calculates the sum of integers from 1 to n in constant time, regardless of the value of n.\n",
            "\n",
            "The optimized code replaces the loop with this formula, resulting in a significant improvement in performance.\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [],
      "metadata": {
        "id": "4AhnoBPupypp"
      },
      "execution_count": null,
      "outputs": []
    }
  ]
}