devolucion_AAP / app.py
gcapde's picture
Update UI
de44411 verified
raw
history blame
No virus
4.09 kB
import os
import openai
from transformers import pipeline, Conversation
import gradio as gr
import json
from dotenv import load_dotenv
# Load environment variables from the .env file de forma local
load_dotenv()
import base64
with open("Iso_Logotipo_Ceibal.png", "rb") as image_file:
encoded_image = base64.b64encode(image_file.read()).decode()
openai.api_key = os.environ['OPENAI_API_KEY']
def clear_chat(message, chat_history):
return "", []
def add_new_message(message, rubrica, chat_history):
new_chat = []
new_chat.append({"role": "system", "content": 'Sos un evaluador que debe generar una devoluci贸n en base a cierta r煤brica del trabajo que recibes como entrada. El contexto es en educaci贸n y los trabajos corresponden a actividades desarrolladas por los docentes en el aula. La r煤brica que debes seguir es la siguiente:{}. En base al trabajo te pido que generes una devoluci贸n de m谩ximo 300 palabras, indicando si se ajusta a lo solicitado en la r煤brica y marcando las fortalezas y debilidades en cada punto.'.format(rubrica)})
for turn in chat_history:
user, bot = turn
new_chat.append({"role": "user", "content": user})
new_chat.append({"role": "assistant","content":bot})
new_chat.append({"role": "user","content":message})
return new_chat
def respond(message, rubrica, chat_history):
prompt = add_new_message(message, rubrica, chat_history)
# stream = client.generate_stream(prompt,
# max_new_tokens=1024,
# stop_sequences=["\nUser:", "<|endoftext|>"],
# temperature=temperature)
# #stop_sequences to not generate the user answer
# acc_text = ""
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages= prompt,
temperature=0.5,
max_tokens=1000,
stream=True,
)#.choices[0].message.content
#chat_history.append((message, response))
token_counter = 0
partial_words = ""
counter=0
for chunk in response:
chunk_message = chunk['choices'][0]['delta']
if(len(chat_history))<1:
# print("entr贸 aca谩")
partial_words += chunk_message.content
chat_history.append([message,chunk_message.content])
else:
# print("antes", chat_history)
if(len(chunk_message)!=0):
if(len(chunk_message)==2):
partial_words += chunk_message.content
chat_history.append([message,chunk_message.content])
else:
partial_words += chunk_message.content
chat_history[-1] =([message,partial_words])
yield "",chat_history
with gr.Blocks() as demo:
gr.Markdown("""
<center>
<h1>
Devoluci贸n de APP usando IA.
</h1>
<img src='data:image/jpg;base64,{}' width=200px>
<h3>
Este espacio permite probar la generaci贸n mediante IA de devoluciones en base a cierta r煤brica que se debe indicar.
</h3>
</center>
""".format(encoded_image))
with gr.Row():
rubrica = gr.Textbox(lines=5, label="Escribe la r煤brica que quieres usar para generar la devoluci贸n.")
with gr.Row():
with gr.Column(scale=4):
msg = gr.Textbox(lines=5, label="Texto de entrada para ser evaluado y generar devoluci贸n.")
with gr.Column(scale=1):
btn = gr.Button("Enviar")
with gr.Row():
with gr.Column(scale=4):
chatbot = gr.Chatbot(lines=10) #just to fit the notebook
with gr.Column(scale=1):
clear = gr.ClearButton(components=[msg, chatbot], value="Borrar chat")
btn.click(respond, inputs=[msg, rubrica, chatbot], outputs=[msg, chatbot])
msg.submit(respond, inputs=[msg, rubrica,chatbot], outputs=[msg, chatbot]) #Press enter to submit
clear.click(clear_chat,inputs=[msg, chatbot], outputs=[msg, chatbot])
demo.queue()
demo.launch()