jpdiazpardo commited on
Commit
0d49d91
1 Parent(s): f15fab7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -19
app.py CHANGED
@@ -46,14 +46,14 @@ title = "Scream: Fine-Tuned Whisper model for automatic gutural speech recogniti
46
  classifier = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", top_k=None)
47
 
48
  #Functions-----------------------------------------------------------------------------------------------------------------------
49
- def transcribe(link,download,thumbnail,file,use_timestamps,sentiment_analysis):#file, return_timestamps, *kwargs):
50
  '''inputs: file, return_timestamps'''
51
- outputs = pipe(file, batch_size=BATCH_SIZE, generate_kwargs={"task": 'transcribe'}, return_timestamps=True)
52
  text = outputs["text"]
53
  timestamps = outputs["chunks"]
54
 
55
  #If return timestamps is True, return html text with timestamps format
56
- if use_timestamps==True:
57
  spider_text = [f"{chunk['text']}" for chunk in timestamps] #Text for spider chart without timestamps
58
  timestamps = [f"[{format_timestamp(chunk['timestamp'][0])} -> {format_timestamp(chunk['timestamp'][1])}] {chunk['text']}" for chunk in timestamps]
59
 
@@ -69,7 +69,7 @@ def transcribe(link,download,thumbnail,file,use_timestamps,sentiment_analysis):#
69
  av_dict = calculate_average(trans_dict)
70
  fig = spider_chart(av_dict)
71
 
72
- return file, text, fig, av_dict
73
 
74
  embed_html = '<iframe src="https://www.youtube.com/embed/YOUTUBE_ID'\
75
  'title="YouTube video player" frameborder="0" allow="accelerometer;'\
@@ -77,39 +77,42 @@ embed_html = '<iframe src="https://www.youtube.com/embed/YOUTUBE_ID'\
77
  'picture-in-picture" allowfullscreen></iframe>'
78
 
79
  def download(link):
 
 
80
  subprocess.run(['python3', 'youtubetowav.py', link])
81
  return thumbnail.update(value=embed_html.replace("YOUTUBE_ID",get_youtube_video_id(link)))
82
 
83
- def hide_sa(value):
84
  if value == True:
85
- return sa_plot.update(visible=True), sa_frequency.update(visible=True)
86
  else:
87
- return sa_plot.update(visible=False), sa_frequency.update(visible=False)
88
 
89
  #----------------------------------------------------------------------------------------------------------------------------------------------
90
 
91
  #Components------------------------------------------------------------------------------------------------------------------------------------
92
 
93
  #Input components
94
- yt_link = gr.Textbox(value=None,label="YouTube link", info = "Optional: Copy and paste YouTube URL")
95
- audio_input = gr.Audio(source="upload", type="filepath", label="Upload audio file for transcription")
96
- download_button = gr.Button(value="Download")
97
- thumbnail = gr.HTML(value="", label = "Thumbnail")
98
- sa_checkbox = gr.Checkbox(value=True, label="Sentiment analysis")
 
99
 
100
  inputs = [yt_link, #0
101
  download_button, #1
102
  thumbnail, #2
103
  audio_input, #3
104
- gr.Checkbox(value=True, label="Return timestamps"), #4
105
- sa_checkbox] #5
106
 
107
  #Ouput components
108
  audio_out = gr.Audio(label="Processed Audio", type="filepath", info = "Vocals only")
109
- sa_plot = gr.Plot(label="Sentiment Analysis")
110
- sa_frequency = gr.Label(label="Frequency")
111
 
112
- outputs = [audio_out, gr.outputs.HTML("text"), sa_plot, sa_frequency]
113
 
114
  #----------------------------------------------------------------------------------------------------------------------------------------------------
115
 
@@ -117,12 +120,12 @@ outputs = [audio_out, gr.outputs.HTML("text"), sa_plot, sa_frequency]
117
 
118
  with gr.Blocks() as demo:
119
  download_button.click(download, inputs=[yt_link], outputs=[thumbnail])
120
- sa_checkbox.change(hide_sa, inputs=[sa_checkbox], outputs=[sa_plot, sa_frequency])
121
 
122
 
123
  with gr.Column():
124
  gr.Interface(title = title, fn=transcribe, inputs = inputs, outputs = outputs,
125
- description=description, allow_flagging="never", article = article,
126
  examples="examples")
127
 
128
 
 
46
  classifier = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", top_k=None)
47
 
48
  #Functions-----------------------------------------------------------------------------------------------------------------------
49
+ def transcribe(*args):
50
  '''inputs: file, return_timestamps'''
51
+ outputs = pipe(args[3], batch_size=BATCH_SIZE, generate_kwargs={"task": 'transcribe'}, return_timestamps=True)
52
  text = outputs["text"]
53
  timestamps = outputs["chunks"]
54
 
55
  #If return timestamps is True, return html text with timestamps format
56
+ if args[4]==True:
57
  spider_text = [f"{chunk['text']}" for chunk in timestamps] #Text for spider chart without timestamps
58
  timestamps = [f"[{format_timestamp(chunk['timestamp'][0])} -> {format_timestamp(chunk['timestamp'][1])}] {chunk['text']}" for chunk in timestamps]
59
 
 
69
  av_dict = calculate_average(trans_dict)
70
  fig = spider_chart(av_dict)
71
 
72
+ return args[3], text, fig, av_dict
73
 
74
  embed_html = '<iframe src="https://www.youtube.com/embed/YOUTUBE_ID'\
75
  'title="YouTube video player" frameborder="0" allow="accelerometer;'\
 
77
  'picture-in-picture" allowfullscreen></iframe>'
78
 
79
  def download(link):
80
+ '''Runs youtubetowav.py
81
+ inputs: link from textbox'''
82
  subprocess.run(['python3', 'youtubetowav.py', link])
83
  return thumbnail.update(value=embed_html.replace("YOUTUBE_ID",get_youtube_video_id(link)))
84
 
85
+ def hide_sentiment(value):
86
  if value == True:
87
+ return sentiment_plot.update(visible=True), sentiment_frequency.update(visible=True)
88
  else:
89
+ return sentiment_plot.update(visible=False), sentiment_frequency.update(visible=False)
90
 
91
  #----------------------------------------------------------------------------------------------------------------------------------------------
92
 
93
  #Components------------------------------------------------------------------------------------------------------------------------------------
94
 
95
  #Input components
96
+ yt_link = gr.Textbox(value=None,label="YouTube link", info = "Optional: Copy and paste YouTube URL") #0
97
+ download_button = gr.Button(value="Download") #1
98
+ thumbnail = gr.HTML(value="", label = "Thumbnail") #2
99
+ audio_input = gr.Audio(source="upload", type="filepath", label="Upload audio file for transcription") #3
100
+ timestamp_checkbox = gr.Checkbox(value=True, label="Return timestamps") #4
101
+ sentiment_checkbox = gr.Checkbox(value=True, label="Sentiment analysis") #5
102
 
103
  inputs = [yt_link, #0
104
  download_button, #1
105
  thumbnail, #2
106
  audio_input, #3
107
+ timestamp_checkbox, #4
108
+ sentiment_checkbox] #5
109
 
110
  #Ouput components
111
  audio_out = gr.Audio(label="Processed Audio", type="filepath", info = "Vocals only")
112
+ sentiment_plot = gr.Plot(label="Sentiment Analysis")
113
+ sentiment_frequency = gr.Label(label="Frequency")
114
 
115
+ outputs = [audio_out, gr.outputs.HTML("text"), sentiment_plot, sentiment_frequency]
116
 
117
  #----------------------------------------------------------------------------------------------------------------------------------------------------
118
 
 
120
 
121
  with gr.Blocks() as demo:
122
  download_button.click(download, inputs=[yt_link], outputs=[thumbnail])
123
+ sentiment_checkbox.change(hide_sentiment, inputs=[sentiment_checkbox], outputs=[sentiment_plot, sentiment_frequency])
124
 
125
 
126
  with gr.Column():
127
  gr.Interface(title = title, fn=transcribe, inputs = inputs, outputs = outputs,
128
+ description=description, cache_examples=True, allow_flagging="never", article = article,
129
  examples="examples")
130
 
131