devolucion_AAP / app.py
gcapde's picture
Update for AAP use case
faf87c2 verified
raw
history blame
No virus
3.89 kB
import os
import openai
from transformers import pipeline, Conversation
import gradio as gr
import json
from dotenv import load_dotenv
# Load environment variables from the .env file de forma local
load_dotenv()
import base64
with open("Iso_Logotipo_Ceibal.png", "rb") as image_file:
encoded_image = base64.b64encode(image_file.read()).decode()
openai.api_key = os.environ['OPENAI_API_KEY']
def clear_chat(message, chat_history):
return "", []
def add_new_message(message, rubrica, chat_history):
new_chat = []
new_chat.append({"role": "system", "content": 'Sos {} y tendr谩s que responder preguntas que te har谩n ni帽os de escuela, las respuestas tienen que ser c贸mo si tu fueras {} y responder con la informaci贸n de su vida. Las respuestas tienen que estar orientadas a ni帽os entre 9 y 10 a帽os. No respondas preguntas hasta que el usuario te pregunte sobre alg煤n tema.'.format(rubrica,estilo)})
for turn in chat_history:
user, bot = turn
new_chat.append({"role": "user", "content": user})
new_chat.append({"role": "assistant","content":bot})
new_chat.append({"role": "user","content":message})
return new_chat
def respond(message, rubrica, chat_history):
prompt = add_new_message(message, rubrica, chat_history)
# stream = client.generate_stream(prompt,
# max_new_tokens=1024,
# stop_sequences=["\nUser:", "<|endoftext|>"],
# temperature=temperature)
# #stop_sequences to not generate the user answer
# acc_text = ""
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages= prompt,
temperature=0.5,
max_tokens=1000,
stream=True,
)#.choices[0].message.content
#chat_history.append((message, response))
token_counter = 0
partial_words = ""
counter=0
for chunk in response:
chunk_message = chunk['choices'][0]['delta']
if(len(chat_history))<1:
# print("entr贸 aca谩")
partial_words += chunk_message.content
chat_history.append([message,chunk_message.content])
else:
# print("antes", chat_history)
if(len(chunk_message)!=0):
if(len(chunk_message)==2):
partial_words += chunk_message.content
chat_history.append([message,chunk_message.content])
else:
partial_words += chunk_message.content
chat_history[-1] =([message,partial_words])
yield "",chat_history
with gr.Blocks() as demo:
gr.Markdown("""
<center>
<h1>
Uso de AI para un chatbot.
</h1>
<img src='data:image/jpg;base64,{}' width=200px>
<h3>
Este espacio permite probar la generaci贸n mediante IA de devoluciones en base a una r煤brica.
</h3>
</center>
""".format(encoded_image))
with gr.Row():
rubrica = gr.Textbox(lines=5, label="Escribe la r煤brica que quieres usar para generar la devoluci贸n.")
with gr.Row():
with gr.Row():
with gr.Column(scale=4):
msg = gr.Textbox(lines=5, label="Texto de entrada para ser evaluado y generar devoluci贸n.")
with gr.Column(scale=1):
btn = gr.Button("Enviar")
clear = gr.ClearButton(components=[msg, chatbot], value="Borrar chat")
with gr.Row():
chatbot = gr.Chatbot(lines=10) #just to fit the notebook
btn.click(respond, inputs=[msg, rubrica, chatbot], outputs=[msg, chatbot])
msg.submit(respond, inputs=[msg, rubrica,chatbot], outputs=[msg, chatbot]) #Press enter to submit
clear.click(clear_chat,inputs=[msg, chatbot], outputs=[msg, chatbot])
demo.queue()
demo.launch()