{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "d006b2ea-9dfe-49c7-88a9-a5a0775185fd",
   "metadata": {},
   "source": [
    "# Additional End of week Exercise - week 2\n",
    "\n",
    "Now use everything you've learned from Week 2 to build a full prototype for the technical question/answerer you built in Week 1 Exercise.\n",
    "\n",
    "This should include a Gradio UI, streaming, use of the system prompt to add expertise, and the ability to switch between models. Bonus points if you can demonstrate use of a tool!\n",
    "\n",
    "If you feel bold, see if you can add audio input so you can talk to it, and have it respond with audio. ChatGPT or Claude can help you, or email me if you have questions.\n",
    "\n",
    "I will publish a full solution here soon - unless someone beats me to it...\n",
    "\n",
    "There are so many commercial applications for this, from a language tutor, to a company onboarding solution, to a companion AI to a course (like this one!) I can't wait to see your results."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "05fc552b",
   "metadata": {},
   "outputs": [],
   "source": [
    "import openai\n",
    "import anthropic\n",
    "import gradio as gr\n",
    "import dotenv\n",
    "import os"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a07e7793-b8f5-44f4-aded-5562f633271a",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Chatbot:\n",
    "  def __init__(self, apiKey, publisher='openai'):\n",
    "    if publisher not in ['openai', 'claude']:\n",
    "      raise ValueError(f\"publisher must be openai or claude, but got {publisher}\")\n",
    "    self.publisher = publisher\n",
    "    self.systemPrompt = None\n",
    "    self.historyPrompt = []\n",
    "    self.llm = openai.OpenAI(api_key=apiKey) if publisher == 'openai' else anthropic.Anthropic(api_key=apiKey)\n",
    "    \n",
    "  def setSystemPrompt(self, systemPrompt:str):\n",
    "    self.systemPrompt = systemPrompt.strip()\n",
    "    if len(self.historyPrompt) == 0:\n",
    "      self.historyPrompt.append({\"role\": \"system\", \"content\": f\"{systemPrompt}\"})\n",
    "    else:\n",
    "      self.historyPrompt[0] = {\"role\": \"system\", \"content\": f\"{systemPrompt}\"}\n",
    "      \n",
    "  def _prompt2obj(self, role:str, prompt:str):\n",
    "    return {\n",
    "      \"role\": role,\n",
    "      \"content\": prompt.strip()\n",
    "    }\n",
    "    \n",
    "  def unpackText(self, chunk):\n",
    "    text = ''\n",
    "    if self.publisher == 'openai':\n",
    "      text = chunk.choices[0].delta.content or ''\n",
    "    elif self.publisher == 'claude':\n",
    "      if chunk.type == \"content_block_delta\":\n",
    "        text = chunk.delta.text or ''\n",
    "        \n",
    "    return text\n",
    "      \n",
    "  def chat(self, message):\n",
    "    self.historyPrompt.append(self._prompt2obj(\"user\", message))\n",
    "    completeReply = \"\"\n",
    "\n",
    "    if self.publisher == 'openai':\n",
    "      stream = self.llm.chat.completions.create(model='gpt-4o-mini',\n",
    "                                            messages=self.historyPrompt,\n",
    "                                            stream=True)\n",
    "    elif self.publisher == 'claude':\n",
    "      stream = self.llm.messages.create(system=self.historyPrompt[0][\"content\"],\n",
    "                                          model=\"claude-sonnet-4-20250514\",\n",
    "                                          max_tokens=200,\n",
    "                                          messages=self.historyPrompt[1:],\n",
    "                                          stream=True)\n",
    "    \n",
    "    for chunk in stream:\n",
    "      completeReply += self.unpackText(chunk)\n",
    "      yield completeReply\n",
    "      \n",
    "  \n",
    "    self.historyPrompt.append(self._prompt2obj(\"assistant\", completeReply))\n",
    "    \n",
    "  def _gradioChatWrapper(self):\n",
    "    def gradioChatFn(message, history):\n",
    "      for partial_reply in self.chat(message):\n",
    "        yield partial_reply\n",
    "    return gradioChatFn\n",
    "  \n",
    "  def getAllPrompt(self):\n",
    "    return self.historyPrompt\n",
    "    \n",
    "  def run(self):\n",
    "    gradioFn = self._gradioChatWrapper()\n",
    "    gr.ChatInterface(fn=gradioFn, type=\"messages\").launch()\n",
    "    \n",
    "  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1fca53e8",
   "metadata": {},
   "outputs": [],
   "source": [
    "# initial env\n",
    "dotenv.load_dotenv(\".env\", override=True)\n",
    "openaiKey = os.getenv(\"OPENAI_API_KEY\")\n",
    "claudeKey = os.getenv(\"ANTHROPIC_API_KEY\")\n",
    "openaiInfo = {\n",
    "  'apiKey': openaiKey,\n",
    "  'publisher': 'openai'\n",
    "}\n",
    "claudeInfo = {\n",
    "  'apiKey': claudeKey,\n",
    "  'publisher': 'claude'\n",
    "}\n",
    "\n",
    "SYSTEM_PROMPT = \"\"\"\n",
    "You are a technical experts and responds every question I asked with an explanation.\n",
    "\"\"\"\n",
    "\n",
    "openaiChatbot = Chatbot(**openaiInfo)\n",
    "openaiChatbot.setSystemPrompt(SYSTEM_PROMPT)\n",
    "openaiChatbot.run()\n",
    "\n",
    "# claudeChatbot = Chatbot(**claudeInfo)\n",
    "# claudeChatbot.setSystemPrompt(SYSTEM_PROMPT)\n",
    "# claudeChatbot.run()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "59a2ac0f",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "3.10.15",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.15"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
