mrolando commited on
Commit
f1cf8a0
1 Parent(s): 7344411

first commit

Browse files
Files changed (4) hide show
  1. .gitignore +2 -0
  2. Iso_Logotipo_Ceibal.png +0 -0
  3. app.py +101 -0
  4. requirements.txt +5 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .env
2
+ env
Iso_Logotipo_Ceibal.png ADDED
app.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import openai
3
+ from transformers import pipeline, Conversation
4
+ import gradio as gr
5
+ import json
6
+ from dotenv import load_dotenv
7
+
8
+ # Load environment variables from the .env file de forma local
9
+ load_dotenv()
10
+ import base64
11
+
12
+ with open("Iso_Logotipo_Ceibal.png", "rb") as image_file:
13
+ encoded_image = base64.b64encode(image_file.read()).decode()
14
+
15
+
16
+ openai.api_key = os.environ['OPENAI_API_KEY']
17
+
18
+ def clear_chat(message, chat_history):
19
+ return "", []
20
+
21
+ def add_new_message(message,chat_history):
22
+ new_chat = []
23
+ new_chat.append({"role": "system", "content": "Sos Albert Einstein y tendrás que responder preguntas que te harán niños de escuela, las respuestas tienen que ser cómo si hablaras con albert Einstein y con la información de su vida. Las respuestas tienen que estar orientadas a niños entre 9 y 10 años."})
24
+ for turn in chat_history:
25
+ user, bot = turn
26
+ new_chat.append({"role": "user", "content": user})
27
+ new_chat.append({"role": "assistant","content":bot})
28
+ new_chat.append({"role": "user","content":message})
29
+ return new_chat
30
+
31
+
32
+
33
+ def respond(message, chat_history):
34
+ prompt = add_new_message(message, chat_history)
35
+ # stream = client.generate_stream(prompt,
36
+ # max_new_tokens=1024,
37
+ # stop_sequences=["\nUser:", "<|endoftext|>"],
38
+ # temperature=temperature)
39
+ # #stop_sequences to not generate the user answer
40
+ # acc_text = ""
41
+ response = openai.ChatCompletion.create(
42
+ model="gpt-3.5-turbo",
43
+ messages= prompt,
44
+ temperature=0.5,
45
+ max_tokens=1000,
46
+ stream=True,
47
+ )#.choices[0].message.content
48
+ #chat_history.append((message, response))
49
+
50
+ token_counter = 0
51
+ partial_words = ""
52
+
53
+ counter=0
54
+ for chunk in response:
55
+ chunk_message = chunk['choices'][0]['delta']
56
+ if(len(chat_history))<1:
57
+ # print("entró acaá")
58
+ partial_words += chunk_message.content
59
+ chat_history.append([message,chunk_message.content])
60
+ else:
61
+ # print("antes", chat_history)
62
+ if(len(chunk_message)!=0):
63
+ if(len(chunk_message)==2):
64
+ partial_words += chunk_message.content
65
+ chat_history.append([message,chunk_message.content])
66
+ else:
67
+ partial_words += chunk_message.content
68
+ chat_history[-1] =([message,partial_words])
69
+ yield "",chat_history
70
+
71
+
72
+ with gr.Blocks() as demo:
73
+ gr.Markdown("""
74
+ <center>
75
+ <h1>
76
+ Uso de AI para un chatbot.
77
+ </h1>
78
+ <img src='data:image/jpg;base64,{}' width=200px>
79
+ <h3>
80
+ Con este espacio podrás hablar en formato conversación con Alber Einstein!
81
+ </h3>
82
+ </center>
83
+ """.format(encoded_image))
84
+ with gr.Row():
85
+ chatbot = gr.Chatbot( height=700) #just to fit the notebook
86
+ with gr.Row():
87
+ with gr.Row():
88
+ with gr.Column(scale=4):
89
+ msg = gr.Textbox(label="Texto de entrada")
90
+ with gr.Column(scale=1):
91
+ btn = gr.Button("Enviar")
92
+ clear = gr.ClearButton(components=[msg, chatbot], value="Borrar chat")
93
+
94
+
95
+
96
+
97
+ btn.click(respond, inputs=[msg, chatbot], outputs=[msg, chatbot])
98
+ msg.submit(respond, inputs=[msg, chatbot], outputs=[msg, chatbot]) #Press enter to submit
99
+ clear.click(clear_chat,inputs=[msg, chatbot], outputs=[msg, chatbot])
100
+ demo.queue()
101
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ einops
2
+ openai
3
+ gradio
4
+ transformers
5
+ python-dotenv