WWMachine commited on
Commit
fa063fb
·
verified ·
1 Parent(s): 8f9154b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -12
app.py CHANGED
@@ -40,9 +40,9 @@ def respond(
40
  yield response
41
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
  chatbot = gr.ChatInterface(
47
  respond,
48
  type="messages",
@@ -76,9 +76,9 @@ from huggingface_hub import InferenceClient
76
  DEEPGRAM_API_KEY = "0c72698eb40f85fc25b56a76039e795be653afed"
77
 
78
  def deepgram_stt(audio_file_path):
79
- """
80
- Send user microphone audio to Deepgram STT
81
- """
82
  url = "https://api.deepgram.com/v1/listen"
83
  headers = {
84
  "Authorization": f"Token {DEEPGRAM_API_KEY}",
@@ -93,9 +93,9 @@ def deepgram_stt(audio_file_path):
93
 
94
 
95
  def deepgram_tts(text):
96
- """
97
- Convert model output → speech using Deepgram TTS
98
- """
99
  url = "https://api.deepgram.com/v1/speak?model=aura-asteria-en" # any model
100
  headers = {
101
  "Authorization": f"Token {DEEPGRAM_API_KEY}",
@@ -122,9 +122,9 @@ def respond_audio(
122
  top_p,
123
  hf_token: gr.OAuthToken,
124
  ):
125
- """
126
- STT → send to model → TTS
127
- """
128
  client = InferenceClient(
129
  token=hf_token.token,
130
  model="openai/gpt-oss-20b"
 
40
  yield response
41
 
42
 
43
+
44
+ #For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
+
46
  chatbot = gr.ChatInterface(
47
  respond,
48
  type="messages",
 
76
  DEEPGRAM_API_KEY = "0c72698eb40f85fc25b56a76039e795be653afed"
77
 
78
  def deepgram_stt(audio_file_path):
79
+
80
+ #Send user microphone audio to Deepgram STT
81
+
82
  url = "https://api.deepgram.com/v1/listen"
83
  headers = {
84
  "Authorization": f"Token {DEEPGRAM_API_KEY}",
 
93
 
94
 
95
  def deepgram_tts(text):
96
+
97
+ #Convert model output → speech using Deepgram TTS
98
+
99
  url = "https://api.deepgram.com/v1/speak?model=aura-asteria-en" # any model
100
  headers = {
101
  "Authorization": f"Token {DEEPGRAM_API_KEY}",
 
122
  top_p,
123
  hf_token: gr.OAuthToken,
124
  ):
125
+
126
+ #STT → send to model → TTS
127
+
128
  client = InferenceClient(
129
  token=hf_token.token,
130
  model="openai/gpt-oss-20b"