shengxiong commited on
Commit
2bf90a8
1 Parent(s): f5f1842

Just a quick MVP to get things going.

Browse files

On branch main
Your branch is up to date with 'origin/main'.

Changes to be committed:
modified: .gitignore
modified: app.py
new file: last_audio.mp3
modified: supplier.py

Files changed (4) hide show
  1. .gitignore +2 -1
  2. app.py +4 -14
  3. last_audio.mp3 +0 -0
  4. supplier.py +21 -5
.gitignore CHANGED
@@ -5,4 +5,5 @@ __pycache__
5
 
6
  # files local to the server os
7
  server.py
8
- *.pem
 
 
5
 
6
  # files local to the server os
7
  server.py
8
+ *.pem
9
+ *.ipynb
app.py CHANGED
@@ -1,11 +1,7 @@
1
  import gradio as gr
2
  from supplier import *
3
 
4
- background_examples=[
5
- "This is a friendly chatbot with humour.", # casual setting
6
- "This is a friendly chatbot which respond to user in Chinese.", # romantic setting
7
- "We are at the mideval castle, I am talking to my chatbot donkey.", # fantasy setting
8
- ]
9
 
10
  # create a system setting for llm
11
  with gr.Blocks() as settings:
@@ -22,13 +18,8 @@ with gr.Blocks() as settings:
22
  clear = gr.Button("Clear")
23
  submit = gr.Button("Submit")
24
 
25
- gr.Examples(background_examples,background_edit,label="Examples")
26
  clear.click(lambda:None,None,background_edit,queue=False)
27
  submit.click(update_sys,background_edit,background,queue=False)
28
- # with gr.Row():
29
- # voices_list = gr.Dropdown([v["Name"] for v in get_voices()],label="Voices")
30
-
31
- # voices_list.change(lambda voice:App_state.update({"voice":voice}),voices_list,queue=False)
32
 
33
  with gr.Blocks() as chat_window:
34
  with gr.Row():
@@ -37,16 +28,15 @@ with gr.Blocks() as chat_window:
37
  chatbot_speech = gr.Audio()
38
  with gr.Column():
39
  chat_clear = gr.Button("Clear")
40
- # play_speech = gr.Button("Play")
41
  chat_clear.click(lambda:None,None,chatbot,queue=False)
42
- # play_speech.click(text_to_audio,chatbot,chatbot_speech,queue=False)
43
 
44
  with gr.Column():
45
  msg = gr.Textbox()
46
  submit = gr.Button("Submit")
47
- gr.Examples(["Hello","How are you?"],msg,label="Examples")
48
  audio = gr.Audio(sources="microphone",type="filepath")
49
- # gr.Interface(translate,inputs=gr.Audio(source="microphone",type="filepath"),outputs = "text")
50
 
51
  audio.change(translate,audio,msg,queue=False)
52
  msg.submit(send_chat,[msg,chatbot],[msg,chatbot])
 
1
  import gradio as gr
2
  from supplier import *
3
 
4
+ BASE_SAMPLES = ["What can you help me with?","What materials do we have?","What projects do we have today?"]
 
 
 
 
5
 
6
  # create a system setting for llm
7
  with gr.Blocks() as settings:
 
18
  clear = gr.Button("Clear")
19
  submit = gr.Button("Submit")
20
 
 
21
  clear.click(lambda:None,None,background_edit,queue=False)
22
  submit.click(update_sys,background_edit,background,queue=False)
 
 
 
 
23
 
24
  with gr.Blocks() as chat_window:
25
  with gr.Row():
 
28
  chatbot_speech = gr.Audio()
29
  with gr.Column():
30
  chat_clear = gr.Button("Clear")
31
+ play_speech = gr.Button("Play")
32
  chat_clear.click(lambda:None,None,chatbot,queue=False)
33
+ play_speech.click(text_to_audio,chatbot,chatbot_speech,queue=False)
34
 
35
  with gr.Column():
36
  msg = gr.Textbox()
37
  submit = gr.Button("Submit")
38
+ gr.Examples(BASE_SAMPLES,msg,label="Examples")
39
  audio = gr.Audio(sources="microphone",type="filepath")
 
40
 
41
  audio.change(translate,audio,msg,queue=False)
42
  msg.submit(send_chat,[msg,chatbot],[msg,chatbot])
last_audio.mp3 ADDED
Binary file (382 kB). View file
 
supplier.py CHANGED
@@ -1,4 +1,5 @@
1
- import openai
 
2
  # import boto3
3
  import os
4
 
@@ -6,7 +7,7 @@ import pydub
6
  import numpy as np
7
 
8
  # setup the api keys
9
- openai.api_key = os.environ.get("OPENAI_API_KEY")
10
  # aws_access_key_id = os.environ.get("AWS_ACCESS_KEY_ID")
11
  # aws_secret_access_key = os.environ.get("AWS_SECRET_ACCESS_KEY")
12
 
@@ -81,7 +82,7 @@ def send_chat(text,messages=[]):
81
  }
82
  ])
83
 
84
- res = openai.chat.completions.create(
85
  model="gpt-4-turbo",
86
  messages=openai_messages
87
  )
@@ -96,9 +97,24 @@ def send_chat(text,messages=[]):
96
  def translate(file_path):
97
  if file_path:
98
  f = open(file_path,"rb")
99
- res = openai.audio.translations.create(
100
  file=f,
101
  model="whisper-1")
102
  return res.text
103
  else:
104
- return ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tkinter.filedialog import Open
2
+ from openai import OpenAI
3
  # import boto3
4
  import os
5
 
 
7
  import numpy as np
8
 
9
  # setup the api keys
10
+ OPENAI_CLIENT = OpenAI()
11
  # aws_access_key_id = os.environ.get("AWS_ACCESS_KEY_ID")
12
  # aws_secret_access_key = os.environ.get("AWS_SECRET_ACCESS_KEY")
13
 
 
82
  }
83
  ])
84
 
85
+ res = OPENAI_CLIENT.chat.completions.create(
86
  model="gpt-4-turbo",
87
  messages=openai_messages
88
  )
 
97
  def translate(file_path):
98
  if file_path:
99
  f = open(file_path,"rb")
100
+ res = OPENAI_CLIENT.audio.translations.create(
101
  file=f,
102
  model="whisper-1")
103
  return res.text
104
  else:
105
+ return ""
106
+
107
+ def text_to_audio(chat_messages):
108
+ text = chat_messages[0][-1]
109
+ print("content:",text)
110
+ print("type:",type(text))
111
+ response = OPENAI_CLIENT.audio.speech.create(
112
+ model="tts-1",
113
+ voice="nova",
114
+ input=text,
115
+
116
+ )
117
+
118
+ response.stream_to_file("last_audio.mp3")
119
+ print(response)
120
+ # return response.