RamAnanth1 commited on
Commit
e8653e3
1 Parent(s): ceb8b6b

Use audio input instead of url

Browse files
Files changed (1) hide show
  1. app.py +6 -8
app.py CHANGED
@@ -48,11 +48,10 @@ def get_audio_from_upload(audio):
48
  upload_endpoint,
49
  headers=headers,
50
  data=_read_array(audio))
51
- print(upload_response.json())
52
- return
53
 
54
- def get_transcript_url(url):
55
-
56
  # JSON that tells the API which file to trancsribe
57
  json={
58
  # URL of the audio file to process
@@ -215,11 +214,10 @@ with gr.Blocks() as demo:
215
 
216
  with gr.Column(elem_id = "col_container"):
217
 
218
- inputs = gr.Textbox(label = "Enter the url for the audio file")
219
  #audio_intelligence_options = gr.CheckboxGroup(audio_intelligence_list, label="Audio Intelligence Options")
220
- audio_input = gr.Audio(source = "upload",label = "Input Audio")
221
  b1 = gr.Button('Process Audio')
222
- b2 = gr.Button("Upload Audio")
223
 
224
  with gr.Tabs():
225
  with gr.TabItem('Transcript') as transcript_tab:
@@ -236,7 +234,7 @@ with gr.Blocks() as demo:
236
  b1.click(get_transcript_url, [inputs], [transcript, summary, sentiment_analysis,topic_detection])
237
  b2.click(get_audio_from_upload, audio_input)
238
 
239
- #examples = gr.Examples(examples = [["https://github.com/AssemblyAI-Examples/assemblyai-and-python-in-5-minutes/blob/main/audio.mp3?raw=true"]], inputs = inputs, outputs=[transcript, summary, sentiment_analysis, topic_detection], cache_examples = True, fn = get_transcript_url)
240
 
241
 
242
  demo.queue().launch(debug=True)
 
48
  upload_endpoint,
49
  headers=headers,
50
  data=_read_array(audio))
51
+ return upload_response.json()['upload_url']
 
52
 
53
+ def get_transcript_url(audio):
54
+ url = get_audio_from_upload(audio)
55
  # JSON that tells the API which file to trancsribe
56
  json={
57
  # URL of the audio file to process
 
214
 
215
  with gr.Column(elem_id = "col_container"):
216
 
 
217
  #audio_intelligence_options = gr.CheckboxGroup(audio_intelligence_list, label="Audio Intelligence Options")
218
+ inputs = gr.Audio(source = "upload",label = "Upload the input Audio file")
219
  b1 = gr.Button('Process Audio')
220
+
221
 
222
  with gr.Tabs():
223
  with gr.TabItem('Transcript') as transcript_tab:
 
234
  b1.click(get_transcript_url, [inputs], [transcript, summary, sentiment_analysis,topic_detection])
235
  b2.click(get_audio_from_upload, audio_input)
236
 
237
+ examples = gr.Examples(examples = [["audio.mp3"]], inputs = inputs, outputs=[transcript, summary, sentiment_analysis, topic_detection], cache_examples = True, fn = get_transcript_url)
238
 
239
 
240
  demo.queue().launch(debug=True)