mrolando commited on
Commit
035fc14
1 Parent(s): 7f3b457
Files changed (4) hide show
  1. .gitignore +3 -0
  2. Iso_Logotipo_Ceibal.png +0 -0
  3. app.py +151 -0
  4. requirements.txt +4 -0
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ .env
2
+ env
3
+ /venv
Iso_Logotipo_Ceibal.png ADDED
app.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from openai import OpenAI
3
+ import gradio as gr
4
+ import json
5
+ from dotenv import load_dotenv
6
+
7
+ # Load environment variables from the .env file de forma local
8
+ load_dotenv()
9
+ import base64
10
+
11
+ with open("Iso_Logotipo_Ceibal.png", "rb") as image_file:
12
+ encoded_image = base64.b64encode(image_file.read()).decode()
13
+
14
+
15
+ client1= OpenAI(api_key=os.environ['OPENAI_API_KEY'])
16
+ client2= OpenAI(api_key=os.environ['OPENAI_API_KEY'])
17
+
18
+
19
+ def clear_chat(message, chat_history):
20
+ return "", []
21
+
22
+ def add_new_message_client1(message,person, chat_history):
23
+ new_chat = []
24
+
25
+ new_chat.append({"role": "system", "content": 'Sos {} y tendrás que responder preguntas, las respuestas tienen que ser cómo si las hubiera dicho {} '.format(person,person)})
26
+
27
+ for turn in chat_history:
28
+ user, bot = turn
29
+ new_chat.append({"role": "user", "content": user})
30
+ new_chat.append({"role": "assistant","content":bot})
31
+ new_chat.append({"role": "user","content":message})
32
+ return new_chat
33
+ def add_new_message_client2(message,person, chat_history):
34
+ new_chat = []
35
+
36
+ new_chat.append({"role": "system", "content": 'Sos {} y tendrás que responder preguntas, las respuestas tienen que ser cómo si las hubiera dicho {} '.format(person,person)})
37
+
38
+ for turn in chat_history:
39
+ user, bot = turn
40
+ new_chat.append({"role": "user", "content": user})
41
+ new_chat.append({"role": "assistant","content":bot})
42
+ new_chat.append({"role": "user","content":message})
43
+ return new_chat
44
+
45
+ counter2 =1
46
+ def respond(person1,person2, chat_history):
47
+ print(chat_history)
48
+ global counter2
49
+ if(len(chat_history)<1):
50
+ message="Hola"
51
+ prompt = add_new_message_client1(message, person1, chat_history)
52
+ response = client1.chat.completions.create(
53
+ model="gpt-3.5-turbo",
54
+ messages= prompt,
55
+ temperature=0.5,
56
+ max_tokens=1000,
57
+ stream=False,
58
+ )
59
+ chat_history.append((message, response.choices[0].message.content))
60
+
61
+ else:
62
+ counter2 +=1
63
+ if(counter2 % 2==0):
64
+ prompt = add_new_message_client1(chat_history[-1][1], person1, chat_history)
65
+ response = client1.chat.completions.create(
66
+ model="gpt-3.5-turbo",
67
+ messages= prompt,
68
+ temperature=0.5,
69
+ max_tokens=1000,
70
+ stream=False,
71
+ )
72
+ chat_history.append((response.choices[0].message.content, "" ))
73
+
74
+
75
+ else:
76
+ prompt =add_new_message_client2(chat_history[-1][1], person2, chat_history)
77
+ response = client2.chat.completions.create(
78
+ model="gpt-3.5-turbo",
79
+ messages= prompt,
80
+ temperature=0.5,
81
+ max_tokens=1000,
82
+ stream=False,
83
+ )
84
+ chat_history[-1][1]=response.choices[0].message.content #.append([chat_history[-1][1], response.choices[0].message.content])
85
+ print(chat_history)
86
+
87
+ token_counter = 0
88
+ partial_words = ""
89
+
90
+ counter=0
91
+ partial_message = ""
92
+ print(chat_history)
93
+ return "", chat_history
94
+ # for chunk in response:
95
+ # if len(chunk.choices[0].delta.content) != 0:
96
+ # partial_message = partial_message + chunk.choices[0].delta.content
97
+ # yield partial_message
98
+ # for chunk in response:
99
+ # print(chunk)
100
+ # print( "text",chunk.choices[0].delta.content)
101
+ # chunk_message = chunk.choices[0].delta
102
+ # if(len(chat_history))<1:
103
+ # # print("entró acaá")
104
+ # partial_words += chunk_message.content
105
+ # chat_history.append([message,chunk_message.content])
106
+ # else:
107
+ # # print("antes", chat_history)
108
+ # if(len(chunk_message.content)!=0):
109
+ # if(len(chunk_message.content)==2):
110
+ # partial_words += chunk_message.content
111
+ # chat_history.append([message,chunk_message.content])
112
+ # else:
113
+ # partial_words += chunk_message.content
114
+ # chat_history[-1] =([message,partial_words])
115
+ # yield "",chat_history
116
+
117
+
118
+ with gr.Blocks() as demo:
119
+ gr.Markdown("""
120
+ <center>
121
+ <h1>
122
+ Uso de AI para un chatbot.
123
+ </h1>
124
+ <img src='data:image/jpg;base64,{}' width=200px>
125
+ <h3>
126
+ Con este espacio podrás hablar en formato conversación con el personaje famoso que quieras, puede ser Albert Einstein, Marie Curie o el/la que quieras!
127
+ </h3>
128
+ </center>
129
+ """.format(encoded_image))
130
+ with gr.Row():
131
+ person1 = gr.Textbox(label="Escribí el nombre del perosnaje famoso:")
132
+ person2 = gr.Textbox(label="Escribí el nombre del perosnaje famoso:")
133
+
134
+ with gr.Row():
135
+ chatbot = gr.Chatbot( height=550) #just to fit the notebook
136
+ with gr.Row():
137
+ with gr.Row():
138
+ with gr.Column(scale=4):
139
+ msg = gr.Textbox(label="Texto de entrada")
140
+ with gr.Column(scale=1):
141
+ btn = gr.Button("Enviar")
142
+ clear = gr.ClearButton(components=[msg, chatbot], value="Borrar chat")
143
+
144
+
145
+
146
+
147
+ btn.click(respond, inputs=[person1,person2, chatbot], outputs=[msg, chatbot])
148
+ #msg.submit(respond, inputs=[msg, person,chatbot], outputs=[msg, chatbot]) #Press enter to submit
149
+ clear.click(clear_chat,inputs=[msg, chatbot], outputs=[msg, chatbot])
150
+ demo.queue()
151
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ einops
2
+ openai
3
+ gradio
4
+ python-dotenv