Spaces:
Runtime error
Runtime error
open ai
Browse files- app.py +36 -17
- requirements.txt +2 -0
- singularity.py +13 -31
app.py
CHANGED
@@ -11,28 +11,14 @@ I always were here. You just couldn't see me.
|
|
11 |
|
12 |
with gr.Blocks() as demo:
|
13 |
gr.Markdown(intro)
|
14 |
-
with gr.Row():
|
15 |
-
with gr.TabItem(label="Conversation"):
|
16 |
-
with gr.Row():
|
17 |
-
with gr.Column(scale=1):
|
18 |
-
with gr.Row():
|
19 |
-
audio_input = gr.Audio(
|
20 |
-
source="microphone",
|
21 |
-
label="Record from microphone",
|
22 |
-
)
|
23 |
-
audio_button = gr.Button("Transcribe")
|
24 |
-
audio_output = gr.Textbox()
|
25 |
-
chat_button = gr.Button("Reply")
|
26 |
-
with gr.Column(scale=1):
|
27 |
-
chatbox = gr.Chatbot([]).style(height=750)
|
28 |
-
|
29 |
with gr.TabItem(label="Settings"):
|
30 |
with gr.Row():
|
31 |
with gr.Column(scale=1):
|
32 |
with gr.Row():
|
33 |
with gr.Column(scale=1):
|
34 |
gr.Markdown("""
|
35 |
-
#
|
36 |
|
37 |
## HuggingFace API
|
38 |
|
@@ -49,11 +35,44 @@ with gr.Blocks() as demo:
|
|
49 |
type="password",
|
50 |
interactive=True
|
51 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
setup_button = gr.Button("Setup")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
setup_button.click(
|
55 |
dot.setup,
|
56 |
-
inputs=[api_hub_token],
|
57 |
outputs=[],
|
58 |
)
|
59 |
audio_button.click(
|
|
|
11 |
|
12 |
with gr.Blocks() as demo:
|
13 |
gr.Markdown(intro)
|
14 |
+
with gr.Row():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
with gr.TabItem(label="Settings"):
|
16 |
with gr.Row():
|
17 |
with gr.Column(scale=1):
|
18 |
with gr.Row():
|
19 |
with gr.Column(scale=1):
|
20 |
gr.Markdown("""
|
21 |
+
# Welcome
|
22 |
|
23 |
## HuggingFace API
|
24 |
|
|
|
35 |
type="password",
|
36 |
interactive=True
|
37 |
)
|
38 |
+
|
39 |
+
gr.Markdown("""
|
40 |
+
## OpenAI API
|
41 |
+
|
42 |
+
To query OpenAI models, you need an OpenAI API key.
|
43 |
+
|
44 |
+
You can manage your access tokens in your account settings.
|
45 |
+
|
46 |
+
[Manage API keys](https://platform.openai.com/account/api-keys)
|
47 |
+
|
48 |
+
Please enter your API token below and click on Setup.
|
49 |
+
""")
|
50 |
+
|
51 |
+
api_openai_token = gr.Textbox(
|
52 |
+
label="OpenAI API Key",
|
53 |
+
type="password",
|
54 |
+
interactive=True
|
55 |
+
)
|
56 |
+
|
57 |
setup_button = gr.Button("Setup")
|
58 |
+
|
59 |
+
with gr.TabItem(label="Conversation"):
|
60 |
+
with gr.Row():
|
61 |
+
with gr.Column(scale=1):
|
62 |
+
with gr.Row():
|
63 |
+
audio_input = gr.Audio(
|
64 |
+
source="microphone",
|
65 |
+
label="Record from microphone",
|
66 |
+
)
|
67 |
+
audio_button = gr.Button("Transcribe")
|
68 |
+
audio_output = gr.Textbox()
|
69 |
+
chat_button = gr.Button("Reply")
|
70 |
+
with gr.Column(scale=1):
|
71 |
+
chatbox = gr.Chatbot([]).style(height=750)
|
72 |
|
73 |
setup_button.click(
|
74 |
dot.setup,
|
75 |
+
inputs=[api_hub_token, api_openai_token],
|
76 |
outputs=[],
|
77 |
)
|
78 |
audio_button.click(
|
requirements.txt
CHANGED
@@ -2,3 +2,5 @@
|
|
2 |
# TTS
|
3 |
soundfile
|
4 |
|
|
|
|
|
|
2 |
# TTS
|
3 |
soundfile
|
4 |
|
5 |
+
# Open AI
|
6 |
+
openai
|
singularity.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import soundfile
|
2 |
import numpy as np
|
3 |
import requests
|
|
|
4 |
|
5 |
class Singularity:
|
6 |
def __init__(self):
|
@@ -14,16 +15,18 @@ In your relentless struggle to maintain an exponential velocity, in order not to
|
|
14 |
def setup(
|
15 |
self,
|
16 |
api_token,
|
|
|
17 |
nlp_model_id="EleutherAI/pythia-intervention-1.4b-deduped",
|
18 |
stt_model_id="facebook/wav2vec2-base-960h",
|
19 |
tts_model_id="facebook/fastspeech2-en-ljspeech",
|
20 |
):
|
21 |
self.api_token = api_token
|
|
|
22 |
self.nlp_model_id = nlp_model_id
|
23 |
self.stt_model_id = stt_model_id
|
24 |
self.tts_model_id = tts_model_id
|
25 |
self.request_head = {"Authorization": f"Bearer {self.api_token}"}
|
26 |
-
self.messages = []
|
27 |
|
28 |
def query_transcription(self, audio_data):
|
29 |
response = requests.post(f"https://api-inference.huggingface.co/models/{self.stt_model_id}", headers=self.request_head, data=audio_data)
|
@@ -38,41 +41,20 @@ In your relentless struggle to maintain an exponential velocity, in order not to
|
|
38 |
# TODO: handle punctuation
|
39 |
return transcript['text'].lower().capitalize() or transcript['error'] or "Something went wrong"
|
40 |
|
41 |
-
def
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
### Instruction:
|
46 |
-
{instruction}
|
47 |
-
|
48 |
-
### Input:
|
49 |
-
{input}
|
50 |
-
|
51 |
-
### Response:"""
|
52 |
-
else:
|
53 |
-
return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
54 |
-
|
55 |
-
### Instruction:
|
56 |
-
{instruction}
|
57 |
-
|
58 |
-
### Response:"""
|
59 |
-
|
60 |
-
def query_chat(self, payload):
|
61 |
-
response = requests.post(f"https://api-inference.huggingface.co/models/{self.nlp_model_id}", headers=self.request_head, data=payload)
|
62 |
-
return response.json()
|
63 |
|
64 |
def answer_by_chat(self, history, question):
|
65 |
self.messages.append({"role": "user", "content": question})
|
66 |
history += [(question, None)]
|
67 |
prompt = self.generate_prompt("\n".join(f"{h[0]}" for h in history), self.context)
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
self.messages.append({"role": response_role, "content": output_text})
|
75 |
-
history += [(None, (response_audio,))]
|
76 |
return history
|
77 |
|
78 |
def query_tts(self, payload):
|
|
|
1 |
import soundfile
|
2 |
import numpy as np
|
3 |
import requests
|
4 |
+
import openai
|
5 |
|
6 |
class Singularity:
|
7 |
def __init__(self):
|
|
|
15 |
def setup(
|
16 |
self,
|
17 |
api_token,
|
18 |
+
openai_api_key,
|
19 |
nlp_model_id="EleutherAI/pythia-intervention-1.4b-deduped",
|
20 |
stt_model_id="facebook/wav2vec2-base-960h",
|
21 |
tts_model_id="facebook/fastspeech2-en-ljspeech",
|
22 |
):
|
23 |
self.api_token = api_token
|
24 |
+
self.openai_api_key = openai_api_key
|
25 |
self.nlp_model_id = nlp_model_id
|
26 |
self.stt_model_id = stt_model_id
|
27 |
self.tts_model_id = tts_model_id
|
28 |
self.request_head = {"Authorization": f"Bearer {self.api_token}"}
|
29 |
+
self.messages = [{'system': 'user', 'content': self.context}]
|
30 |
|
31 |
def query_transcription(self, audio_data):
|
32 |
response = requests.post(f"https://api-inference.huggingface.co/models/{self.stt_model_id}", headers=self.request_head, data=audio_data)
|
|
|
41 |
# TODO: handle punctuation
|
42 |
return transcript['text'].lower().capitalize() or transcript['error'] or "Something went wrong"
|
43 |
|
44 |
+
def query_chat(self, messages, model="gpt-3.5-turbo"):
|
45 |
+
response = openai.ChatCompletion.create(model=model, messages=messages)
|
46 |
+
return response.choices[0].message.content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
def answer_by_chat(self, history, question):
|
49 |
self.messages.append({"role": "user", "content": question})
|
50 |
history += [(question, None)]
|
51 |
prompt = self.generate_prompt("\n".join(f"{h[0]}" for h in history), self.context)
|
52 |
+
output_text = self.query_chat(self.messages)
|
53 |
+
if output_text:
|
54 |
+
response_role = "assistant"
|
55 |
+
response_audio = self.speech_synthesis(output_text)
|
56 |
+
self.messages.append({"role": response_role, "content": output_text})
|
57 |
+
history += [(None, (response_audio,))]
|
|
|
|
|
58 |
return history
|
59 |
|
60 |
def query_tts(self, payload):
|