Spaces:
Sleeping
Sleeping
File size: 5,610 Bytes
8408bd8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 |
import gradio as gr
import plotly.express as px
from openai import OpenAI
import time
import json
import sys
print("Script iniciado")
try:
client = OpenAI(api_key="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")
print("Cliente OpenAI inicializado")
except Exception as e:
print(f"Error al inicializar el cliente OpenAI: {e}")
sys.exit(1)
assistant_id = "asst_0hq3iRy6LX0YLZP0QVzg17fT"
print(f"ID del asistente: {assistant_id}")
def random_plot():
print("Generando gr谩fico aleatorio")
df = px.data.iris()
fig = px.scatter(df, x="sepal_width", y="sepal_length", color="species",
size='petal_length', hover_data=['petal_width'])
return fig
def print_like_dislike(x: gr.LikeData):
print(x.index, x.value, x.liked)
def add_message(history, message):
if message["text"] is not None and message["text"].strip() != "":
history.append((message["text"], None))
return history, gr.MultimodalTextbox(value=None, interactive=True)
def bot(history):
print("Iniciando funci贸n bot")
try:
last_message = history[-1][0] if history else "Hola"
print(f"脷ltimo mensaje: {last_message}")
thread = client.beta.threads.create()
print(f"Hilo creado: {thread.id}")
client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=last_message
)
print("Mensaje del usuario a帽adido al hilo")
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant_id
)
print(f"Ejecuci贸n iniciada: {run.id}")
timeout = 120
start_time = time.time()
while run.status not in ["completed", "failed", "cancelled"]:
if time.time() - start_time > timeout:
print("Tiempo de espera agotado")
client.beta.threads.runs.cancel(thread_id=thread.id, run_id=run.id)
return history + [("Lo siento, la respuesta est谩 tardando demasiado. Por favor, intenta reformular tu pregunta.", None)]
time.sleep(2)
run = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id)
print(f"Estado de la ejecuci贸n: {run.status}")
if run.status == "requires_action":
print("La ejecuci贸n requiere una acci贸n")
required_actions = run.required_action.submit_tool_outputs.tool_calls
tool_outputs = []
for action in required_actions:
print(f"Acci贸n requerida: {action.type}")
print(f"Funci贸n: {action.function.name}")
print(f"Argumentos: {action.function.arguments}")
tool_outputs.append({
"tool_call_id": action.id,
"output": json.dumps({"status": "success", "message": "Funci贸n ejecutada correctamente"})
})
if tool_outputs:
run = client.beta.threads.runs.submit_tool_outputs(
thread_id=thread.id,
run_id=run.id,
tool_outputs=tool_outputs
)
else:
client.beta.threads.runs.cancel(thread_id=thread.id, run_id=run.id)
return history + [("Lo siento, el asistente requiere acciones adicionales que no puedo manejar en este momento. Por favor, intenta reformular tu pregunta.", None)]
if run.status != "completed":
print(f"La ejecuci贸n termin贸 con estado: {run.status}")
return history + [("Lo siento, hubo un problema al procesar tu mensaje. Por favor, intenta de nuevo o reformula tu pregunta.", None)]
messages = client.beta.threads.messages.list(thread_id=thread.id)
print("Mensajes recuperados del hilo")
bot_response = ""
for message in messages.data:
if message.role == "assistant":
for content in message.content:
if content.type == 'text':
bot_response += content.text.value + "\n"
if not bot_response:
print("No se encontr贸 respuesta del asistente")
bot_response = "Lo siento, no pude generar una respuesta. Por favor, intenta reformular tu pregunta."
print(f"Respuesta del bot: {bot_response}")
history[-1] = (history[-1][0], bot_response.strip())
return history
except Exception as e:
print(f"Error en la funci贸n bot: {e}")
return history + [("Lo siento, ocurri贸 un error inesperado. Por favor, intenta de nuevo.", None)]
print("Definiendo la interfaz Gradio")
fig = random_plot()
with gr.Blocks(fill_height=True) as demo:
chatbot = gr.Chatbot(
elem_id="chatbot",
bubble_full_width=False,
scale=1,
)
chat_input = gr.MultimodalTextbox(interactive=True,
file_count="multiple",
placeholder="Enter message or upload file...", show_label=False)
chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])
bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name="bot_response")
bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
chatbot.like(print_like_dislike, None, None)
print("Iniciando la aplicaci贸n Gradio")
demo.queue()
demo.launch(share=True)
print("Script finalizado") |