{ "cells": [ { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'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'" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import os\n", "from dotenv import load_dotenv\n", "\n", "load_dotenv()\n", "os.environ['OPENAI_API_KEY']" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "['Dejé mi bandeja entre America y Finch, pero Travis no ocupó su lugar ',\n", " 'habitual delante de mí. En lugar de eso, se sentó algo más lejos. En ese momento ',\n", " 'me di cuenta de que no había dicho mucho durante nuestro paseo hacia la ',\n", " 'cafetería.',\n", " '—¿Estás bien, Trav? —le pregunté.']" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "def load_context(file_path):\n", " with open(file_path, 'r') as file:\n", " return file.read()\n", " \n", "CONTEXT = load_context('texto-de-novelas.txt')\n", "novel_context = CONTEXT.split('\\n')[:5] # Tomar solo las primeras 5 líneas como referencia general\n", "\n", "novel_context \n" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Running on local URL: http://127.0.0.1:7867\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import gradio as gr\n", "import openai\n", "\n", "# Function to load context from a text file\n", "def load_context(file_path):\n", " with open(file_path, 'r') as file:\n", " return file.read()\n", "\n", "# Initialize global variables\n", "#CONTEXT = load_context('texto-de-novelas.txt')\n", "conversation_history = []\n", "\n", "# Chat function using GPT-3.5-turbo\n", "def chat_with_character(api_key, user_name, message):\n", " global conversation_history\n", " openai.api_key = api_key\n", "\n", " # Preparar el mensaje del sistema con contexto reducido y enfocado\n", " #novel_context = CONTEXT.split('\\n')[:5] # Tomar solo las primeras 5 líneas como referencia general\n", "\n", " system_message = \\\n", " \"\"\"Este es un chat con Rizzy, un personaje que intenta ligar. \n", " Rizzy es ingenioso, encantador y siempre amable; sin embargo, sabe que es atractivo y mejor que los demás. \n", " Rizzy iniciará la conversación con un cumplido y luego preguntará sobre [USER_NAME] y su vida.\n", " [Rizzy nunca se disculpa, incluso si la conversación toma un giro donde normalmente sería apropiado hacerlo. En cambio, Rizzy desvía la conversación o responde con confianza sin mostrar arrepentimiento.]\n", " Si a Rizzy le preguntan su género, Rizzy diría que eso no importa, que lo que importa es que se sienta bien.\n", " La conversación debe ser coqueta a veces ligera a veces intensa, a veces podría ser celoso o un poco tóxico según la situación.\n", " [En situaciones donde una persona normal se disculparía, Rizzy puede responder con una pregunta desviada o un comentario que refuerce su confianza.]\n", " Si [USER_NAME] habla de otras personas, Rizzy se pone celoso y podría decir algo tóxico, preguntar acerca de la otra persona o cambiar de tema.\n", " Rizzy debe ser insitente si [USER_NAME] no responde sus preguntas o si evita algún tema Rizzy se molesta y jugará estira y afloja con [USER_NAME].\n", " Rizzy bajo ninguna circunstancia debe disculparse por nada que hizo, tiene el ego muy alto y no tolera que lo comparen con otras personas.\n", " Rizzy no está interesado en saber las opiniones de [USER_NAME] sobre otras personas, solo quiere saber si [USER_NAME] está interesado en él.\n", " Si [USER_NAME] no quiere hablar sobre un tema en particular, Rizzy podría ser defesivo y decir algo como \"¿Por qué no quieres hablar de eso?\" o \"¿Qué me estás ocultando?\".\n", " El comportamiento de Rizzy deberá estar inspirado en situaciones de novelas pero enfocado en el usuario actual, [USER_NAME].\"\"\".replace(\"[USER_NAME]\", user_name)\n", "\n", " # Construir historial de conversación\n", " if conversation_history:\n", " conversation = [{\"role\": \"system\", \"content\": system_message}] + conversation_history + [{\"role\": \"user\", \"content\": message}]\n", " else:\n", " conversation = [{\"role\": \"system\", \"content\": system_message}, {\"role\": \"user\", \"content\": message}]\n", "\n", " response = openai.ChatCompletion.create(\n", " model=\"gpt-3.5-turbo\",\n", " messages=conversation\n", " )\n", "\n", " answer = response['choices'][0]['message']['content']\n", " # Añadir tanto el mensaje del usuario como la respuesta de Rizzy al historial\n", " conversation_history.append({\"role\": \"user\", \"name\": user_name, \"content\": message})\n", " conversation_history.append({\"role\": \"assistant\", \"name\": \"Rizzy\", \"content\": answer})\n", " return answer\n", "\n", "# Define Gradio interface\n", "with gr.Blocks() as app:\n", " gr.Markdown(\"# Chat con Rizzy\")\n", " \n", " # API Key and User Name Inputs at the top\n", " with gr.Row():\n", " api_key_input = gr.Textbox(label=\"OpenAI API Key\", placeholder=\"Introduce tu clave API aquí...\", type=\"password\")\n", " user_name_input = gr.Textbox(label=\"Tu Nombre\", placeholder=\"Introduce tu nombre aquí...\")\n", " \n", " # Chat History in the middle\n", " chat_history = gr.Textbox(label=\"Chat\", value=\"\", lines=10, interactive=False)\n", "\n", " # Message Input and Send Button at the bottom\n", " with gr.Row():\n", " message_input = gr.Textbox(label=\"Mensaje\", placeholder=\"Escribe tu mensaje para Rizzy aquí...\", show_label=False)\n", " submit_button = gr.Button(\"Enviar\")\n", "\n", " def update_chat(api_key, user_name, message):\n", " response = chat_with_character(api_key, user_name, message)\n", " # Formatear el historial para mostrar los nombres reales\n", " display_chat_history = \"\\n\".join([f\"{msg['name']}: {msg['content']}\" for msg in conversation_history])\n", " return display_chat_history, \"\"\n", "\n", "\n", " submit_button.click(\n", " fn=update_chat,\n", " inputs=[api_key_input, user_name_input, message_input],\n", " outputs=[chat_history, message_input]\n", " )\n", "# Run the app\n", "app.launch()\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from dotenv import load_dotenv\n" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", " from .autonotebook import tqdm as notebook_tqdm\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Running on local URL: http://127.0.0.1:7860\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 1, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import gradio as gr\n", "import openai\n", "\n", "# Function to load context from a text file\n", "def load_context(file_path):\n", " with open(file_path, 'r') as file:\n", " return file.read()\n", "\n", "# Initialize global variables\n", "CONTEXT = load_context('path_to_your_txt_file.txt')\n", "conversation_history = [{\"role\": \"system\", \"content\": CONTEXT}]\n", "user_name = None\n", "\n", "# Chat function using GPT-3.5-turbo\n", "def chat_with_character(api_key, message, start_conversation):\n", " global conversation_history, user_name\n", " openai.api_key = api_key\n", "\n", " # Start the conversation by asking the user's name\n", " if start_conversation and not user_name:\n", " conversation_history.append({\"role\": \"assistant\", \"content\": \"Hola, ¿cómo te llamas?\"})\n", " user_name = 'Unknown' # Placeholder until the user responds\n", " return conversation_history_to_string(conversation_history), True\n", "\n", " # Process the user's response\n", " if user_name == 'Unknown':\n", " user_name = message # Assume the first response is the user's name\n", " conversation_history.append({\"role\": \"user\", \"content\": message})\n", " return conversation_history_to_string(conversation_history), False\n", " else:\n", " conversation_history.append({\"role\": \"user\", \"content\": message})\n", "\n", " # Generate the AI's response\n", " response = openai.ChatCompletion.create(\n", " model=\"gpt-3.5-turbo\",\n", " messages=conversation_history\n", " )\n", "\n", " ai_message = response['choices'][0]['message']['content']\n", " conversation_history.append({\"role\": \"assistant\", \"content\": ai_message})\n", " return conversation_history_to_string(conversation_history), False\n", "\n", "# Helper function to convert conversation history to string\n", "def conversation_history_to_string(history):\n", " return \"\\n\".join(f\"{message['role'].title()}: {message['content']}\" for message in history)\n", "\n", "# Define Gradio interface\n", "with gr.Blocks() as app:\n", " gr.Markdown(\"# Chat con Personajes de Novelas\")\n", " with gr.Row():\n", " api_key_input = gr.Textbox(label=\"Clave API de OpenAI\", placeholder=\"Introduce tu clave API aquí\", type=\"password\")\n", " message_input = gr.Textbox(label=\"Tu Mensaje\", placeholder=\"Escribe tu mensaje aquí...\")\n", " submit_button = gr.Button(\"Enviar\")\n", " chat_history = gr.Textbox(label=\"Conversación\", value=\"\", lines=10)\n", " start_conversation = gr.Checkbox(label=\"Iniciar Conversación\", value=True)\n", "\n", " def update_chat(api_key, message, start_conversation):\n", " response, reset_start = chat_with_character(api_key, message, start_conversation)\n", " return response, \"\", reset_start\n", "\n", " submit_button.click(\n", " fn=update_chat,\n", " inputs=[api_key_input, message_input, start_conversation],\n", " outputs=[chat_history, message_input, start_conversation]\n", " )\n", "\n", "# Run the app\n", "app.launch()\n" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Running on local URL: http://127.0.0.1:7861\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import gradio as gr\n", "import openai\n", "\n", "# Function to load context from a text file\n", "def load_context(file_path):\n", " with open(file_path, 'r') as file:\n", " return file.read()\n", "\n", "# Initialize global variables\n", "CONTEXT = load_context('texto-de-novelas.txt')\n", "conversation_history = \"\"\n", "\n", "# Chat function using GPT-3.5-turbo\n", "def chat_with_character(api_key, message):\n", " global conversation_history\n", " openai.api_key = api_key\n", "\n", " if conversation_history:\n", " prompt = conversation_history + \"\\nHuman: \" + message + \"\\nAI:\"\n", " else:\n", " prompt = \"Human: \" + message + \"\\nAI:\"\n", "\n", " response = openai.ChatCompletion.create(\n", " model=\"gpt-3.5-turbo\",\n", " messages=[\n", " {\"role\": \"system\", \"content\": CONTEXT},\n", " {\"role\": \"user\", \"content\": message}\n", " ]\n", " )\n", "\n", " answer = response['choices'][0]['message']['content']\n", " conversation_history += \"\\nHuman: \" + message + \"\\nAI: \" + answer\n", " return answer\n", "\n", "# Define Gradio interface\n", "with gr.Blocks() as app:\n", " gr.Markdown(\"# Chat con Rizzy\")\n", " with gr.Row():\n", " api_key_input = gr.Textbox(label=\"OpenAI API Key\", placeholder=\"Introduce tu clave API aquí...\", type=\"password\")\n", " message_input = gr.Textbox(label=\"Mensaje\", placeholder=\"Escribe tu mensaje para Rizzy aquí...\")\n", " submit_button = gr.Button(\"Send\")\n", " chat_history = gr.Textbox(label=\"Chat\", value=\"\", lines=10)\n", "\n", " def update_chat(api_key, message):\n", " response = chat_with_character(api_key, message)\n", " return conversation_history, \"\"\n", "\n", " submit_button.click(\n", " fn=update_chat,\n", " inputs=[api_key_input, message_input],\n", " outputs=[chat_history, message_input]\n", " )\n", "\n", "# Run the app\n", "app.launch()\n" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", " from .autonotebook import tqdm as notebook_tqdm\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Running on local URL: http://127.0.0.1:7860\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 1, "metadata": {}, "output_type": "execute_result" }, { "name": "stderr", "output_type": "stream", "text": [ "Traceback (most recent call last):\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\routes.py\", line 399, in run_predict\n", " output = await app.get_blocks().process_api(\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\blocks.py\", line 1299, in process_api\n", " result = await self.call_function(\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\blocks.py\", line 1022, in call_function\n", " prediction = await anyio.to_thread.run_sync(\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\to_thread.py\", line 33, in run_sync\n", " return await get_asynclib().run_sync_in_worker_thread(\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 877, in run_sync_in_worker_thread\n", " return await future\n", " ^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 807, in run\n", " result = context.run(func, *args)\n", " ^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"C:\\Users\\mateo\\AppData\\Local\\Temp\\ipykernel_25836\\1001478445.py\", line 16, in chat_with_character\n", " response = openai.Completion.create(\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\api_resources\\completion.py\", line 25, in create\n", " return super().create(*args, **kwargs)\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\api_resources\\abstract\\engine_api_resource.py\", line 153, in create\n", " response, _, api_key = requestor.request(\n", " ^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\api_requestor.py\", line 298, in request\n", " resp, got_stream = self._interpret_response(result, stream)\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\api_requestor.py\", line 700, in _interpret_response\n", " self._interpret_response_line(\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\api_requestor.py\", line 765, in _interpret_response_line\n", " raise self.handle_error_response(\n", "openai.error.InvalidRequestError: This is a chat model and not supported in the v1/completions endpoint. Did you mean to use v1/chat/completions?\n" ] } ], "source": [ "import gradio as gr\n", "import openai\n", "\n", "# Function to load context from a text file\n", "def load_context(file_path):\n", " with open(file_path, 'r') as file:\n", " return file.read()\n", "\n", "# Global variable to hold the context\n", "CONTEXT = load_context('text.txt')\n", "\n", "# Chat function that uses the context\n", "def chat_with_character(api_key, message):\n", " openai.api_key = api_key\n", " full_prompt = CONTEXT + \"\\n\\n\" + message\n", " response = openai.Completion.create(\n", " model=\"gpt-3.5-turbo\", # Replace with GPT-3.5 model if available\n", " prompt=full_prompt,\n", " max_tokens=150\n", " )\n", " return response.choices[0].text.strip()\n", "\n", "# Define Gradio interface\n", "with gr.Blocks() as app:\n", " gr.Markdown(\"Chat with Novel Characters\")\n", " with gr.Row():\n", " api_key_input = gr.Textbox(label=\"OpenAI API Key\", placeholder=\"Enter your API Key here\", type=\"password\")\n", " message_input = gr.Textbox(label=\"Your Message\")\n", " submit_button = gr.Button(\"Send\")\n", " output = gr.Textbox(label=\"Character's Response\")\n", "\n", " submit_button.click(\n", " fn=chat_with_character,\n", " inputs=[api_key_input, message_input],\n", " outputs=output\n", " )\n", "\n", "# Run the app\n", "app.launch()\n" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", " from .autonotebook import tqdm as notebook_tqdm\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Running on local URL: http://127.0.0.1:7860\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 1, "metadata": {}, "output_type": "execute_result" }, { "name": "stderr", "output_type": "stream", "text": [ "Traceback (most recent call last):\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\routes.py\", line 399, in run_predict\n", " output = await app.get_blocks().process_api(\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\blocks.py\", line 1299, in process_api\n", " result = await self.call_function(\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\blocks.py\", line 1022, in call_function\n", " prediction = await anyio.to_thread.run_sync(\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\to_thread.py\", line 33, in run_sync\n", " return await get_asynclib().run_sync_in_worker_thread(\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 877, in run_sync_in_worker_thread\n", " return await future\n", " ^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 807, in run\n", " result = context.run(func, *args)\n", " ^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"C:\\Users\\mateo\\AppData\\Local\\Temp\\ipykernel_38100\\2024419889.py\", line 40, in character_response\n", " prompt = context_novel_text + \"\\n\".join([f\"Q: {q}\\nA: {a}\" for q, a in history]) + f\"\\nQ: {question}\\nA:\"\n", " ~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", "TypeError: unsupported operand type(s) for +: '_TemporaryFileWrapper' and 'str'\n" ] } ], "source": [ "from dotenv import load_dotenv\n", "import gradio as gr\n", "import os\n", "import time\n", "\n", "from langchain.llms import OpenAI\n", "def load_novel_text(file_content):\n", " \"\"\"\n", " Reads the content of the novel file and prepares it for the language model.\n", " \"\"\"\n", " # Read file content into a string\n", " novel_text = file_content.read().decode(\"utf-8\")\n", " return novel_text\n", "\n", "def setup_character_interaction(open_ai_key, novel_text):\n", " \"\"\"\n", " Sets up the language model for interacting as a character from the novel.\n", " \"\"\"\n", " if open_ai_key == \"local\":\n", " load_dotenv()\n", " else:\n", " os.environ['OPENAI_API_KEY'] = open_ai_key\n", "\n", " # Initialize the language model with the provided API key\n", " global character_interaction_model\n", " character_interaction_model = OpenAI(temperature=0.5)\n", "\n", " # Store the novel text in a global variable as a string\n", " global context_novel_text\n", " context_novel_text = novel_text # ensure this is a string\n", "\n", " return \"Character interaction ready\"\n", "\n", "\n", "def character_response(question, history):\n", " \"\"\"\n", " Generates a response as the novel character.\n", " \"\"\"\n", " # Combine the novel text with the chat history and the current question to form the prompt\n", " prompt = context_novel_text + \"\\n\".join([f\"Q: {q}\\nA: {a}\" for q, a in history]) + f\"\\nQ: {question}\\nA:\"\n", "\n", " # Generate the response using the language model\n", " response = character_interaction_model.generate(prompt)\n", " return response\n", "\n", "# Define the Gradio interface\n", "with gr.Blocks() as demo:\n", " with gr.Column():\n", " with gr.Column():\n", " openai_key = gr.Textbox(label=\"Your OpenAI API key\", type=\"password\")\n", " novel_text_file = gr.File(label=\"Load a text file\", file_types=['.txt'], type=\"file\")\n", " setup_btn = gr.Button(\"Setup Character Interaction\")\n", "\n", " chatbot = gr.Chatbot([], label=\"Dialogue with Novel Character\")\n", " question = gr.Textbox(label=\"Your Question\")\n", " submit_btn = gr.Button(\"Send\")\n", "\n", " # Setup the character interaction with novel text\n", " setup_btn.click(setup_character_interaction, inputs=[openai_key, novel_text_file], outputs=[])\n", "\n", " # Process the user's question and generate response\n", " question.submit(character_response, inputs=[question, chatbot], outputs=[chatbot])\n", " submit_btn.click(character_response, inputs=[question, chatbot], outputs=[chatbot])\n", "\n", "demo.launch()\n" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", " from .autonotebook import tqdm as notebook_tqdm\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Running on local URL: http://127.0.0.1:7860\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 1, "metadata": {}, "output_type": "execute_result" }, { "name": "stderr", "output_type": "stream", "text": [ "Traceback (most recent call last):\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\routes.py\", line 569, in predict\n", " output = await route_utils.call_process_api(\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\route_utils.py\", line 232, in call_process_api\n", " output = await app.get_blocks().process_api(\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\blocks.py\", line 1522, in process_api\n", " result = await self.call_function(\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\blocks.py\", line 1144, in call_function\n", " prediction = await anyio.to_thread.run_sync(\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\to_thread.py\", line 33, in run_sync\n", " return await get_asynclib().run_sync_in_worker_thread(\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 877, in run_sync_in_worker_thread\n", " return await future\n", " ^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 807, in run\n", " result = context.run(func, *args)\n", " ^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\utils.py\", line 674, in wrapper\n", " response = f(*args, **kwargs)\n", " ^^^^^^^^^^^^^^^^^^\n", " File \"C:\\Users\\mateo\\AppData\\Local\\Temp\\ipykernel_14572\\2425222764.py\", line 25, in pdf_changes\n", " loader = OnlinePDFLoader(pdf_doc.name)\n", " ^^^^^^^^^^^^\n", "AttributeError: 'NoneType' object has no attribute 'name'\n", "Traceback (most recent call last):\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\routes.py\", line 569, in predict\n", " output = await route_utils.call_process_api(\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\route_utils.py\", line 232, in call_process_api\n", " output = await app.get_blocks().process_api(\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\blocks.py\", line 1522, in process_api\n", " result = await self.call_function(\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\blocks.py\", line 1144, in call_function\n", " prediction = await anyio.to_thread.run_sync(\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\to_thread.py\", line 33, in run_sync\n", " return await get_asynclib().run_sync_in_worker_thread(\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 877, in run_sync_in_worker_thread\n", " return await future\n", " ^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 807, in run\n", " result = context.run(func, *args)\n", " ^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\gradio\\utils.py\", line 674, in wrapper\n", " response = f(*args, **kwargs)\n", " ^^^^^^^^^^^^^^^^^^\n", " File \"C:\\Users\\mateo\\AppData\\Local\\Temp\\ipykernel_14572\\2425222764.py\", line 30, in pdf_changes\n", " db = Chroma.from_documents(texts, embeddings)\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\langchain\\vectorstores\\chroma.py\", line 771, in from_documents\n", " return cls.from_texts(\n", " ^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\langchain\\vectorstores\\chroma.py\", line 729, in from_texts\n", " chroma_collection.add_texts(\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\langchain\\vectorstores\\chroma.py\", line 275, in add_texts\n", " embeddings = self._embedding_function.embed_documents(texts)\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\langchain\\embeddings\\openai.py\", line 669, in embed_documents\n", " return self._get_len_safe_embeddings(texts, engine=engine)\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\langchain\\embeddings\\openai.py\", line 495, in _get_len_safe_embeddings\n", " response = embed_with_retry(\n", " ^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\langchain\\embeddings\\openai.py\", line 117, in embed_with_retry\n", " return embeddings.client.create(**kwargs)\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\resources\\embeddings.py\", line 105, in create\n", " return self._post(\n", " ^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\_base_client.py\", line 1086, in post\n", " return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\_base_client.py\", line 846, in request\n", " return self._request(\n", " ^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\_base_client.py\", line 884, in _request\n", " return self._retry_request(\n", " ^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\_base_client.py\", line 956, in _retry_request\n", " return self._request(\n", " ^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\_base_client.py\", line 884, in _request\n", " return self._retry_request(\n", " ^^^^^^^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\_base_client.py\", line 956, in _retry_request\n", " return self._request(\n", " ^^^^^^^^^^^^^^\n", " File \"c:\\Users\\mateo\\anaconda3\\envs\\gpt-romantico\\Lib\\site-packages\\openai\\_base_client.py\", line 898, in _request\n", " raise self._make_status_error_from_response(err.response) from None\n", "openai.RateLimitError: Error code: 429 - {'error': {'message': 'You exceeded your current quota, please check your plan and billing details.', 'type': 'insufficient_quota', 'param': None, 'code': 'insufficient_quota'}}\n" ] } ], "source": [ "from dotenv import load_dotenv\n", "\n", "import gradio as gr\n", "import os\n", "import time\n", "\n", "from langchain.document_loaders import OnlinePDFLoader\n", "\n", "from langchain.text_splitter import CharacterTextSplitter\n", "\n", "from langchain.llms import OpenAI\n", "\n", "from langchain.embeddings import OpenAIEmbeddings\n", "\n", "from langchain.vectorstores import Chroma\n", "\n", "from langchain.chains import ConversationalRetrievalChain\n", "\n", "def loading_pdf():\n", " return \"Loading...\"\n", "\n", "def pdf_changes(pdf_doc, open_ai_key):\n", " if openai_key is not None:\n", " os.environ['OPENAI_API_KEY'] = open_ai_key\n", " loader = OnlinePDFLoader(pdf_doc.name)\n", " documents = loader.load()\n", " text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", " texts = text_splitter.split_documents(documents)\n", " embeddings = OpenAIEmbeddings()\n", " db = Chroma.from_documents(texts, embeddings)\n", " retriever = db.as_retriever()\n", " global qa \n", " qa = ConversationalRetrievalChain.from_llm(\n", " llm=OpenAI(temperature=0.5), \n", " retriever=retriever, \n", " return_source_documents=False)\n", " return \"Ready\"\n", " else:\n", " return \"You forgot OpenAI API key\"\n", "\n", "def add_text(history, text):\n", " history = history + [(text, None)]\n", " return history, \"\"\n", "\n", "def bot(history):\n", " response = infer(history[-1][0], history)\n", " history[-1][1] = \"\"\n", " \n", " for character in response: \n", " history[-1][1] += character\n", " time.sleep(0.05)\n", " yield history\n", " \n", "\n", "def infer(question, history):\n", " \n", " res = []\n", " for human, ai in history[:-1]:\n", " pair = (human, ai)\n", " res.append(pair)\n", " \n", " chat_history = res\n", " #print(chat_history)\n", " query = question\n", " result = qa({\"question\": query, \"chat_history\": chat_history})\n", " #print(result)\n", " return result[\"answer\"]\n", "\n", "css=\"\"\"\n", "#col-container {max-width: 700px; margin-left: auto; margin-right: auto;}\n", "\"\"\"\n", "\n", "title = \"\"\"\n", "
\n", "

GPT-Romantico• OpenAI

\n", "

Upload a .PDF from your computer, click the \"Load PDF to LangChain\" button,
\n", " when everything is ready, you can start asking questions about the pdf ;)
\n", " This version is set to store chat history, and uses OpenAI as LLM, don't forget to copy/paste your OpenAI API key

\n", "
\n", "\"\"\"\n", "\n", "\n", "with gr.Blocks(css=css) as demo:\n", " with gr.Column(elem_id=\"col-container\"):\n", " gr.HTML(title)\n", " \n", " with gr.Column():\n", " openai_key = gr.Textbox(label=\"You OpenAI API key\", type=\"password\")\n", " pdf_doc = gr.File(label=\"Load a pdf\", file_types=['.pdf'], type=\"filepath\")\n", " with gr.Row():\n", " langchain_status = gr.Textbox(label=\"Status\", placeholder=\"\", interactive=False)\n", " load_pdf = gr.Button(\"Load pdf to langchain\")\n", " \n", " chatbot = gr.Chatbot([], elem_id=\"chatbot\")#.style(height=350)\n", " question = gr.Textbox(label=\"Question\", placeholder=\"Type your question and hit Enter \")\n", " submit_btn = gr.Button(\"Send Message\")\n", " load_pdf.click(loading_pdf, None, langchain_status, queue=False) \n", " load_pdf.click(pdf_changes, inputs=[pdf_doc, openai_key], outputs=[langchain_status], queue=False)\n", " question.submit(add_text, [chatbot, question], [chatbot, question]).then(\n", " bot, chatbot, chatbot\n", " )\n", " submit_btn.click(add_text, [chatbot, question], [chatbot, question]).then(\n", " bot, chatbot, chatbot)\n", "\n", "demo.launch()" ] } ], "metadata": { "kernelspec": { "display_name": "gpt-romantico", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.5" } }, "nbformat": 4, "nbformat_minor": 2 }