devolucion_AAP / app.py
mrolando
added option to select person
9f9f985
raw history blame
No virus
3.79 kB
import os
import openai
from transformers import pipeline, Conversation
import gradio as gr
import json
from dotenv import load_dotenv
# Load environment variables from the .env file de forma local
load_dotenv()
import base64
with open("Iso_Logotipo_Ceibal.png", "rb") as image_file:
encoded_image = base64.b64encode(image_file.read()).decode()
openai.api_key = os.environ['OPENAI_API_KEY']
def clear_chat(message, chat_history):
return "", []
def add_new_message(message,person, chat_history):
new_chat = []
new_chat.append({"role": "system", "content": 'Sos {} y tendrás que responder preguntas que te harán niños de escuela, las respuestas tienen que ser cómo si hablaras con {} y con la información de su vida. Las respuestas tienen que estar orientadas a niños entre 9 y 10 años.'.format(person,person)})
for turn in chat_history:
user, bot = turn
new_chat.append({"role": "user", "content": user})
new_chat.append({"role": "assistant","content":bot})
new_chat.append({"role": "user","content":message})
return new_chat
def respond(message, person, chat_history):
prompt = add_new_message(message, person, chat_history)
# stream = client.generate_stream(prompt,
# max_new_tokens=1024,
# stop_sequences=["\nUser:", "<|endoftext|>"],
# temperature=temperature)
# #stop_sequences to not generate the user answer
# acc_text = ""
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages= prompt,
temperature=0.5,
max_tokens=1000,
stream=True,
)#.choices[0].message.content
#chat_history.append((message, response))
token_counter = 0
partial_words = ""
counter=0
for chunk in response:
chunk_message = chunk['choices'][0]['delta']
if(len(chat_history))<1:
# print("entró acaá")
partial_words += chunk_message.content
chat_history.append([message,chunk_message.content])
else:
# print("antes", chat_history)
if(len(chunk_message)!=0):
if(len(chunk_message)==2):
partial_words += chunk_message.content
chat_history.append([message,chunk_message.content])
else:
partial_words += chunk_message.content
chat_history[-1] =([message,partial_words])
yield "",chat_history
with gr.Blocks() as demo:
gr.Markdown("""
<center>
<h1>
Uso de AI para un chatbot.
</h1>
<img src='data:image/jpg;base64,{}' width=200px>
<h3>
Con este espacio podrás hablar en formato conversación con el personaje famoso que quieras, puede ser Albert Einstein, Marie Curie o el/la que quieras!
</h3>
</center>
""".format(encoded_image))
with gr.Row():
person = gr.Textbox(label="Escribí el nombre del perosnaje famoso:")
with gr.Row():
chatbot = gr.Chatbot( height=550) #just to fit the notebook
with gr.Row():
with gr.Row():
with gr.Column(scale=4):
msg = gr.Textbox(label="Texto de entrada")
with gr.Column(scale=1):
btn = gr.Button("Enviar")
clear = gr.ClearButton(components=[msg, chatbot], value="Borrar chat")
btn.click(respond, inputs=[msg,person, chatbot], outputs=[msg, chatbot])
msg.submit(respond, inputs=[msg, person,chatbot], outputs=[msg, chatbot]) #Press enter to submit
clear.click(clear_chat,inputs=[msg, chatbot], outputs=[msg, chatbot])
demo.queue()
demo.launch()