File size: 3,366 Bytes
3ba1016
 
2459c1f
 
3ba1016
2459c1f
 
 
 
 
 
 
 
 
 
 
b25290f
2459c1f
 
 
 
3ba1016
2459c1f
3ba1016
2459c1f
 
 
 
 
 
3ba1016
2459c1f
 
 
 
 
 
 
 
 
a75a1b2
2459c1f
 
527456d
3ba1016
527456d
3ba1016
527456d
 
 
 
3ba1016
527456d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2459c1f
 
 
 
 
 
ae27e6c
2459c1f
 
 
 
35052c3
2459c1f
 
 
 
 
 
 
3ba1016
2459c1f
3ba1016
2459c1f
3ba1016
 
35052c3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import os
import openai
from transformers import pipeline, Conversation
import gradio as gr
import json
from dotenv import load_dotenv

# Load environment variables from the .env file de forma local
load_dotenv()
import base64

with open("Iso_Logotipo_Ceibal.png", "rb") as image_file:
    encoded_image = base64.b64encode(image_file.read()).decode()


openai.api_key = os.environ['OPENAI_API_KEY']
access_pwd = os.environ['INTERFACE_PWD']

def clear_chat(message, chat_history):
     return "", []

def add_new_message(message, chat_history):
     new_chat = []
        
     for turn in chat_history:
          user, bot = turn
          new_chat.append({"role": "user", "content": user})
          new_chat.append({"role": "assistant","content":bot})
     new_chat.append({"role": "user","content":message})
     return new_chat

def respond(message, chat_history):
    prompt = add_new_message(message, chat_history)
    # stream = client.generate_stream(prompt,
    #                                   max_new_tokens=1024,
    #                                   stop_sequences=["\nUser:", "<|endoftext|>"],
    #                                   temperature=temperature)
    #                                   #stop_sequences to not generate the user answer
    # acc_text = ""
    response = openai.ChatCompletion.create(
        model="gpt-4o", # gpt-4o, gpt-4-turbo, gpt-4, and gpt-3.5-turbo (también se puede fijar a una versión, ej. gpt-4o-2024-05-13)
        messages= prompt,
        temperature=0.5,
        max_tokens=1000,
        stream=True,
        )#.choices[0].message.content
    #chat_history.append((message, response))

    token_counter = 0 
    partial_words = "" 

    counter=0
    for chunk in response:
        chunk_message = chunk['choices'][0]['delta']
        if(len(chat_history))<1:
            # print("entró acaá")
            partial_words += chunk_message.content
            chat_history.append([message,chunk_message.content])
        else:
            # print("antes", chat_history)
            if(len(chunk_message)!=0):
                if(len(chunk_message)==2):
                    partial_words += chunk_message.content
                    chat_history.append([message,chunk_message.content])
                else:
                    partial_words += chunk_message.content
                    chat_history[-1] =([message,partial_words])
        yield "",chat_history


with gr.Blocks() as demo:
    gr.Markdown("""
    <center>
    <img src='data:image/jpg;base64,{}' width=200px>
    <h3>
    En este espacio podrás interactuar con ChatGPT y su modelo GPT4!
    </h3>
    </center>
    """.format(encoded_image))
    with gr.Row():
        chatbot = gr.Chatbot(height=250) #just to fit the notebook
    with gr.Row():
        with gr.Row():
            with gr.Column(scale=4):
                msg = gr.Textbox(label="Texto de entrada")
            with gr.Column(scale=1):
                btn = gr.Button("Enviar")
                clear = gr.ClearButton(components=[msg, chatbot], value="Borrar chat")
   
    btn.click(respond, inputs=[msg, chatbot], outputs=[msg, chatbot])
    msg.submit(respond, inputs=[msg ,chatbot], outputs=[msg, chatbot]) #Press enter to submit
    clear.click(clear_chat,inputs=[msg, chatbot], outputs=[msg, chatbot])
    
demo.queue()
demo.launch(auth = ('Ceibal_IA', access_pwd))