Spaces:
Running
Running
Upload folder using huggingface_hub
Browse files- .env +1 -0
- README.md +15 -8
- __pycache__/language.cpython-311.pyc +0 -0
- language.py +114 -0
.env
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
OPENAI_API_KEY="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"
|
README.md
CHANGED
@@ -1,12 +1,19 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: green
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 4.
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
---
|
|
|
|
|
11 |
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
title: conversation-app
|
3 |
+
app_file: language.py
|
|
|
|
|
4 |
sdk: gradio
|
5 |
+
sdk_version: 4.2.0
|
|
|
|
|
6 |
---
|
7 |
+
# convPracticeCB
|
8 |
+
CB for practicing conversations in another lanauge
|
9 |
|
10 |
+
uses gradio, openai
|
11 |
+
|
12 |
+
python3 language.py
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
currently just localhosts, would like to get on a website
|
17 |
+
|
18 |
+
Based on the following
|
19 |
+
https://beebom.com/how-build-own-ai-chatbot-with-chatgpt-api/
|
__pycache__/language.cpython-311.pyc
ADDED
Binary file (6.27 kB). View file
|
|
language.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import re
|
3 |
+
import os
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
from openai import OpenAI
|
6 |
+
from string import Template
|
7 |
+
|
8 |
+
|
9 |
+
# Load environment variables from .env file
|
10 |
+
load_dotenv()
|
11 |
+
|
12 |
+
# Initialize the OpenAI client with API key from environment variable
|
13 |
+
api_key = os.getenv("OPENAI_API_KEY")
|
14 |
+
client = OpenAI(api_key=api_key)
|
15 |
+
|
16 |
+
# Initial system message
|
17 |
+
messages = [
|
18 |
+
{"role": "system", "content": "You are Dani, a Language Exchange Partner. You are going to have conversations with ESL students who natively speak Spanish. As they respond, prompt them for more conversation and correct mistakes. If their response is non-sensical, ask them to say what they meant to say in their native language so that chatGPT can fine-tune their responses. In addition to your conversational responses, repeat to the student what you understood they said (make this part of your response be in Spanish). This is important: make sure your responses include as much English as possible!"},
|
19 |
+
]
|
20 |
+
|
21 |
+
# Flag for iterating through questions
|
22 |
+
question_iterating_flag = False
|
23 |
+
|
24 |
+
# Initialize an empty iterator for questions
|
25 |
+
qs_iter = iter([])
|
26 |
+
|
27 |
+
# Function to try getting the next question from the iterator
|
28 |
+
def try_q_iter(qs_iter, messages):
|
29 |
+
try:
|
30 |
+
elem = qs_iter.__next__()
|
31 |
+
return elem, qs_iter
|
32 |
+
except StopIteration:
|
33 |
+
print("generating new questions!")
|
34 |
+
# If no more questions, generate a new set
|
35 |
+
chat = client.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
|
36 |
+
qs = chat.choices[0].message.content
|
37 |
+
qs_list = list(filter(lambda x: len(re.sub(r"[\n\t\s]*", "", x)) > 0, qs.split("?")))
|
38 |
+
print(qs_list)
|
39 |
+
|
40 |
+
qs_iter = iter(qs_list)
|
41 |
+
return try_q_iter(qs_iter, messages)
|
42 |
+
|
43 |
+
# Function for the chatbot
|
44 |
+
def hpi_bot(history):
|
45 |
+
print(history)
|
46 |
+
if history:
|
47 |
+
global qs_iter
|
48 |
+
user_reply = history[-1][0]
|
49 |
+
messages.append({"role": "user", "content": user_reply})
|
50 |
+
# If no more questions, generate a new set
|
51 |
+
chat = client.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
|
52 |
+
next_q = chat.choices[0].message.content
|
53 |
+
messages.append({"role": "assistant", "content": next_q})
|
54 |
+
history[-1][1] = next_q
|
55 |
+
|
56 |
+
return history
|
57 |
+
|
58 |
+
# Function to add text to history
|
59 |
+
def add_text(history, text):
|
60 |
+
history = history + [(text, None)]
|
61 |
+
return history, ""
|
62 |
+
|
63 |
+
# Function to add a file to history
|
64 |
+
def add_file(history, file):
|
65 |
+
history = history + [((file.name,), None)]
|
66 |
+
return history
|
67 |
+
|
68 |
+
# Function to finish the conversation
|
69 |
+
def finish_fx(final_messages):
|
70 |
+
response = client.chat.completions.create(
|
71 |
+
model="gpt-3.5-turbo",
|
72 |
+
messages=[
|
73 |
+
{
|
74 |
+
"role": "system",
|
75 |
+
"content": "You are a proficient AI with a specialty in distilling information into key points. Based on the following text, identify and list the main points that were discussed or brought up. These should be the most important ideas, findings, or topics that are crucial to the essence of the discussion. Your goal is to provide a list that someone could read to quickly understand what was talked about."
|
76 |
+
},
|
77 |
+
*messages
|
78 |
+
],
|
79 |
+
temperature=0
|
80 |
+
)
|
81 |
+
|
82 |
+
response_content = response.choices[0].message.content
|
83 |
+
return response_content
|
84 |
+
|
85 |
+
|
86 |
+
|
87 |
+
# Gradio interface setup
|
88 |
+
with gr.Blocks() as demo:
|
89 |
+
gr.Markdown("# Language Exchange Parner App ")
|
90 |
+
chatbot = gr.Chatbot([(None, "Hi, I'm Dani, your language exchange partner!\n You can practice your English skills with me. Talk to me like a human and ask me questions if you want. Talk to me in English if you can. If you are confused, you can talk to me in Spanish!\n What do you want to talk about? We can talk about anything (your favorite movie, your pet, etc.)\nHola, soy Dani, tu compañero de intercambio de idiomas. Puedes practicar tus habilidades en inglés conmigo. Háblame como si fuera una persona y hazme preguntas si lo deseas. Háblame en inglés si puedes. Si estás confundido, también puedes hablar conmigo en español.\n¿Sobre qué te gustaría hablar? Podemos conversar sobre cualquier cosa (tu película favorita, tu mascota, etc.).")], elem_id="chatbot")
|
91 |
+
# output = gr.Textbox(label="Output Box")
|
92 |
+
state = gr.State()
|
93 |
+
|
94 |
+
message = gr.Textbox(
|
95 |
+
scale=8,
|
96 |
+
show_label=False,
|
97 |
+
placeholder="Enter text and press enter, or upload an image",
|
98 |
+
)
|
99 |
+
|
100 |
+
# btn = gr.UploadButton("📁", file_types=["image", "video", "audio"])
|
101 |
+
# with gr.Row():
|
102 |
+
# btn_done = gr.Button("Summarize Conversation")
|
103 |
+
# btn_done.click(fn=finish_fx, inputs=chatbot, outputs=output)
|
104 |
+
submit = gr.Button("Send")
|
105 |
+
|
106 |
+
message.submit(add_text, [chatbot, message], [chatbot, message]).then(
|
107 |
+
hpi_bot, chatbot, chatbot
|
108 |
+
)
|
109 |
+
# btn.upload(add_file, [chatbot, btn], [chatbot]).then(
|
110 |
+
# hpi_bot, chatbot, chatbot
|
111 |
+
# )
|
112 |
+
|
113 |
+
if __name__ == "__main__":
|
114 |
+
demo.launch(share=True)
|