peb-peb commited on
Commit
6086f8c
1 Parent(s): 3422eeb

add more example test cases

Browse files
Files changed (3) hide show
  1. app.py +10 -14
  2. examples/mononeg.wav +3 -0
  3. examples/monopos.wav +3 -0
app.py CHANGED
@@ -1,25 +1,15 @@
1
  import gradio as gr
2
  import requests
3
- # from transcribe import transcribe
 
4
  from sentiment_analysis import sentiment_analyser
5
  from summary import summarizer
6
  from topic import topic_gen
7
- from data import data
8
-
9
- def transcribe2():
10
- response = requests.post("https://dwarkesh-whisper-speaker-recognition.hf.space/run/predict", json={
11
- "data": [
12
- {"name":"audio.wav","data":"data:audio/wav;base64,UklGRiQAAABXQVZFZm10IBAAAAABAAEARKwAAIhYAQACABAAZGF0YQAAAAA="},
13
- 2,
14
- ]}).json()
15
 
16
- data = response["data"]
17
 
18
  def main(audio_file, number_of_speakers):
19
  # Audio to Text Converter
20
- # text_data = transcribe(audio_file, number_of_speakers)
21
- # print(text_data)
22
- text_data = data
23
  topic = topic_gen(text_data)[0]["generated_text"]
24
  summary = summarizer(text_data)[0]["summary_text"]
25
  sent_analy = sentiment_analyser(text_data)
@@ -28,7 +18,11 @@ def main(audio_file, number_of_speakers):
28
 
29
  # UI Interface on the Hugging Face Page
30
  with gr.Blocks() as demo:
31
- gr.Markdown("# Shravan - Unlocking Value from Call Data")
 
 
 
 
32
  with gr.Box():
33
  with gr.Row():
34
  with gr.Column():
@@ -46,6 +40,8 @@ with gr.Blocks() as demo:
46
  gr.Examples(
47
  examples=[
48
  ["./examples/sample4.wav", 2],
 
 
49
  ],
50
  inputs=[audio_file, number_of_speakers],
51
  outputs=[topic, summary, sentiment_analysis],
 
1
  import gradio as gr
2
  import requests
3
+
4
+ from transcribe import transcribe
5
  from sentiment_analysis import sentiment_analyser
6
  from summary import summarizer
7
  from topic import topic_gen
 
 
 
 
 
 
 
 
8
 
 
9
 
10
  def main(audio_file, number_of_speakers):
11
  # Audio to Text Converter
12
+ text_data = transcribe(audio_file, number_of_speakers)
 
 
13
  topic = topic_gen(text_data)[0]["generated_text"]
14
  summary = summarizer(text_data)[0]["summary_text"]
15
  sent_analy = sentiment_analyser(text_data)
 
18
 
19
  # UI Interface on the Hugging Face Page
20
  with gr.Blocks() as demo:
21
+ gr.Markdown("""
22
+ # Shravan - Unlocking Value from Call Data
23
+
24
+ > **NOTE:** You need to give a `.wav` audio file and the audio file should be `monochannel`.
25
+ """)
26
  with gr.Box():
27
  with gr.Row():
28
  with gr.Column():
 
40
  gr.Examples(
41
  examples=[
42
  ["./examples/sample4.wav", 2],
43
+ ["./examples/monopos.wav", 2],
44
+ ["./examples/mononeg.wav", 2],
45
  ],
46
  inputs=[audio_file, number_of_speakers],
47
  outputs=[topic, summary, sentiment_analysis],
examples/mononeg.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48ff84445d6358625065758ff2911259481b66151c054b712e752a6aa02f2f21
3
+ size 2939982
examples/monopos.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebd054310c0b00ea193bc2dba688a95abccb236fd477df51fd0fd277c15748cf
3
+ size 3787854