{ "cells": [ { "cell_type": "raw", "metadata": {}, "source": [ "---\n", "description: Gradio app.py\n", "output-file: app.html\n", "title: app\n", "\n", "---\n", "\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "language": "python" }, "outputs": [ { "data": { "text/plain": [ "True" ] }, "execution_count": null, "metadata": {}, "output_type": "execute_result" } ], "source": [ "#| eval: false\n", "load_dotenv()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "[GPT4 streaming output example on hugging face 🤗](https://huggingface.co/spaces/ysharma/ChatGPT4/blob/main/app.pyhttps://huggingface.co/spaces/ysharma/ChatGPT4/blob/main/app.py) \n", "[Gradio lite let's you insert Gradio app in browser JS](https://www.gradio.app/guides/gradio-litehttps://www.gradio.app/guides/gradio-lite) \n", "[Streaming output](https://www.gradio.app/main/guides/streaming-outputshttps://www.gradio.app/main/guides/streaming-outputs)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "language": "python" }, "outputs": [], "source": [ "#| eval: false\n", "client = OpenAI()\n", "assistant = client.beta.assistants.create(\n", " name=\"Vegan Recipe Finder\",\n", " instructions=SYSTEM_PROMPT,\n", " # + \"\\nChoose the best single matching recipe to the user's query out of the vegan recipe search returned recipes\",\n", " model=\"gpt-4o\",\n", " tools=[VEGAN_RECIPE_SEARCH_TOOL_SCHEMA],\n", ")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "language": "python" }, "outputs": [], "source": [ "class EventHandler(AssistantEventHandler):\n", " @override\n", " def on_event(self, event):\n", " # Retrieve events that are denoted with 'requires_action'\n", " # since these will have our tool_calls\n", " if event.event == \"thread.run.requires_action\":\n", " run_id = event.data.id # Retrieve the run ID from the event data\n", " self.handle_requires_action(event.data, run_id)\n", "\n", " def handle_requires_action(self, data, run_id):\n", " tool_outputs = []\n", " for tool_call in data.required_action.submit_tool_outputs.tool_calls:\n", " if tool_call.function.name == \"vegan_recipe_edamam_search\":\n", " fn_args = json.loads(tool_call.function.arguments)\n", " data = vegan_recipe_edamam_search(\n", " query=fn_args.get(\"query\"),\n", " )\n", " tool_outputs.append({\"tool_call_id\": tool_call.id, \"output\": data})\n", "\n", " self.submit_tool_outputs(tool_outputs, run_id)\n", "\n", " def submit_tool_outputs(self, tool_outputs, run_id):\n", " client.beta.threads.runs.submit_tool_outputs_stream(\n", " thread_id=self.current_run.thread_id,\n", " run_id=self.current_run.id,\n", " tool_outputs=tool_outputs,\n", " event_handler=EventHandler(),\n", " )" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "data": { "text/markdown": [ "---\n", "\n", "[source](https://gitlab.com/animalequality/lv-recipe-chatbot/blob/main/lv_recipe_chatbot/app.py#L22){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", "\n", "### handle_requires_action\n", "\n", "> handle_requires_action (data)" ], "text/plain": [ "---\n", "\n", "[source](https://gitlab.com/animalequality/lv-recipe-chatbot/blob/main/lv_recipe_chatbot/app.py#L22){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", "\n", "### handle_requires_action\n", "\n", "> handle_requires_action (data)" ] }, "execution_count": 1, "metadata": {}, "output_type": "execute_result" } ], "source": [ "#| echo: false\n", "#| output: asis\n", "show_doc(handle_requires_action)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "language": "python" }, "outputs": [], "source": [ "def run_conversation() -> str:\n", " run = client.beta.threads.runs.create_and_poll(\n", " thread_id=thread.id,\n", " assistant_id=assistant.id,\n", " )\n", " while True:\n", " tool_outputs = []\n", " tool_calls = (\n", " []\n", " if not run.required_action\n", " else run.required_action.submit_tool_outputs.tool_calls\n", " )\n", "\n", " for tool_call in tool_calls:\n", " if tool_call.function.name == \"vegan_recipe_edamam_search\":\n", " fn_args = json.loads(tool_call.function.arguments)\n", " data = vegan_recipe_edamam_search(\n", " query=fn_args.get(\"query\"),\n", " )\n", " tool_outputs.append({\"tool_call_id\": tool_call.id, \"output\": data})\n", "\n", " if tool_outputs:\n", " try:\n", " run = client.beta.threads.runs.submit_tool_outputs_and_poll(\n", " thread_id=thread.id,\n", " run_id=run.id,\n", " tool_outputs=tool_outputs,\n", " )\n", " print(\"Tool outputs submitted successfully.\")\n", "\n", " except Exception as e:\n", " print(\"Failed to submit tool outputs:\", e)\n", " return \"Sorry failed to run tools. Try again with a different query.\"\n", "\n", " if run.status == \"completed\":\n", " messages = client.beta.threads.messages.list(thread_id=thread.id)\n", " data = messages.data\n", " content = data[0].content\n", " return content[0].text.value\n", " time.sleep(0.05)" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "data": { "text/markdown": [ "---\n", "\n", "[source](https://gitlab.com/animalequality/lv-recipe-chatbot/blob/main/lv_recipe_chatbot/app.py#L34){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", "\n", "### run_convo_stream\n", "\n", "> run_convo_stream (thread, content:str, client:openai.OpenAI, assistant)" ], "text/plain": [ "---\n", "\n", "[source](https://gitlab.com/animalequality/lv-recipe-chatbot/blob/main/lv_recipe_chatbot/app.py#L34){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", "\n", "### run_convo_stream\n", "\n", "> run_convo_stream (thread, content:str, client:openai.OpenAI, assistant)" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "#| echo: false\n", "#| output: asis\n", "show_doc(run_convo_stream)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "language": "python" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "skip\n" ] } ], "source": [ "thread = client.beta.threads.create()\n", "\n", "test_msgs = [\n", " \"Hello\",\n", " \"What can I make with tempeh, whole wheat bread, and lettuce?\",\n", "]\n", "for m in test_msgs:\n", " for txt in run_convo_stream(thread, m, client, assistant):\n", " print(txt, end=\"\")\n", " print()" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "data": { "text/markdown": [ "---\n", "\n", "[source](https://gitlab.com/animalequality/lv-recipe-chatbot/blob/main/lv_recipe_chatbot/app.py#L62){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", "\n", "### predict\n", "\n", "> predict (message, history, client:openai.OpenAI, assistant, thread)" ], "text/plain": [ "---\n", "\n", "[source](https://gitlab.com/animalequality/lv-recipe-chatbot/blob/main/lv_recipe_chatbot/app.py#L62){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", "\n", "### predict\n", "\n", "> predict (message, history, client:openai.OpenAI, assistant, thread)" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "#| echo: false\n", "#| output: asis\n", "show_doc(predict)" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "data": { "text/markdown": [ "---\n", "\n", "[source](https://gitlab.com/animalequality/lv-recipe-chatbot/blob/main/lv_recipe_chatbot/app.py#L102){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", "\n", "### create_demo\n", "\n", "> create_demo (client:openai.OpenAI, assistant)" ], "text/plain": [ "---\n", "\n", "[source](https://gitlab.com/animalequality/lv-recipe-chatbot/blob/main/lv_recipe_chatbot/app.py#L102){target=\"_blank\" style=\"float:right; font-size:smaller\"}\n", "\n", "### create_demo\n", "\n", "> create_demo (client:openai.OpenAI, assistant)" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "#| echo: false\n", "#| output: asis\n", "show_doc(create_demo)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "language": "python" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "skip\n" ] } ], "source": [ "if \"demo\" in globals():\n", " demo.close()\n", "\n", "demo = create_demo(client, assistant)\n", "demo.launch()" ] } ], "metadata": { "kernelspec": { "display_name": "local-lv-chatbot", "language": "python", "name": "local-lv-chatbot" }, "widgets": { "application/vnd.jupyter.widget-state+json": { "state": {}, "version_major": 2, "version_minor": 0 } } }, "nbformat": 4, "nbformat_minor": 4 }