jpdiazpardo's picture
Update app.py
113540f
#Imports---------------------------------------------------------------
import gradio as gr
import subprocess
import torch
from transformers import pipeline
import os
#User defined functions (UDF)
from functions.charts import spider_chart
from functions.dictionaries import calculate_average, transform_dict
from functions.icon import generate_icon
from functions.timestamp import format_timestamp
from functions.youtube import get_youtube_video_id
#----------------------------------------------------------------------
MODEL_NAME = "openai/whisper-medium"
#MODEL_NAME = "jpdiazpardo/whisper-tiny-metal"
BATCH_SIZE = 8
device = 0 if torch.cuda.is_available() else "cpu"
#Transformers pipeline
pipe = pipeline(
task="automatic-speech-recognition",
model=MODEL_NAME,
chunk_length_s=30,
device=device
)
#Formating---------------------------------------------------------------------------------------------
title = "Whisper Demo: Transcribe Audio"
description = ("Transcribe long-form audio inputs with the click of a button! Demo uses the"
f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
" of arbitrary length. Check some of the 'cool' examples below")
linkedin = generate_icon("linkedin")
github = generate_icon("github")
article = ("<div style='text-align: center; max-width:800px; margin:10px auto;'>"
f"<p>{linkedin} <a href='https://www.linkedin.com/in/juanpablodiazp/' target='_blank'>Juan Pablo Díaz Pardo</a><br>"
f"{github} <a href='https://github.com/jpdiazpardo' target='_blank'>jpdiazpardo</a></p>")
title = "Scream: Fine-Tuned Whisper model for automatic gutural speech recognition 🤟🤟🤟"
examples = [["Whitechapel - Prostatic Fluid Asphyxiation.wav",True,True],
["Suicide Silence - Genocide.wav",True,True]
]
#-------------------------------------------------------------------------------------------------------------------------------
#Define classifier for sentiment analysis
classifier = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", top_k=None)
#Functions-----------------------------------------------------------------------------------------------------------------------
def transcribe(file,use_timestamps=True,sentiment_analysis=True):
'''inputs: file, return_timestamps'''
outputs = pipe(file, batch_size=BATCH_SIZE, generate_kwargs={"task": 'transcribe'}, return_timestamps=True)
text = outputs["text"]
timestamps = outputs["chunks"]
#If return timestamps is True, return html text with timestamps format
if use_timestamps==True:
spider_text = [f"{chunk['text']}" for chunk in timestamps] #Text for spider chart without timestamps
timestamps = [f"[{format_timestamp(chunk['timestamp'][0])} -> {format_timestamp(chunk['timestamp'][1])}] {chunk['text']}" for chunk in timestamps]
else:
timestamps = [f"{chunk['text']}" for chunk in timestamps]
spider_text = timestamps
text = "<br>".join(str(feature) for feature in timestamps)
text = f"<h4>Transcription</h4><div style='overflow-y: scroll; height: 150px;'>{text}</div>"
spider_text = "\n".join(str(feature) for feature in spider_text)
trans_dict=[transform_dict(classifier.predict(t)[0]) for t in spider_text.split("\n")]
av_dict = calculate_average(trans_dict)
fig = spider_chart(av_dict)
return text, fig, av_dict
embed_html = '<iframe src="https://www.youtube.com/embed/YOUTUBE_ID'\
'title="YouTube video player" frameborder="0" allow="accelerometer;'\
'autoplay; clipboard-write; encrypted-media; gyroscope;'\
'picture-in-picture" allowfullscreen></iframe>'
def download(link):
'''Runs youtubetowav.py
inputs: link from textbox'''
subprocess.run(['python3', 'youtubetowav.py', link])
return thumbnail.update(value=embed_html.replace("YOUTUBE_ID",get_youtube_video_id(link)))
def hide_sentiment(value):
if value == True:
return sentiment_plot.update(visible=True), sentiment_frequency.update(visible=True)
else:
return sentiment_plot.update(visible=False), sentiment_frequency.update(visible=False)
#----------------------------------------------------------------------------------------------------------------------------------------------
#Components------------------------------------------------------------------------------------------------------------------------------------
#Input components
#yt_link = gr.Textbox(value=None,label="YouTube link", info = "Optional: Copy and paste YouTube URL") #0
#download_button = gr.Button(value="Download") #1
#thumbnail = gr.HTML(value="", label = "Thumbnail") #2
audio_input = gr.Audio(source="upload", type="filepath", label="Upload audio file for transcription") #3
timestamp_checkbox = gr.Checkbox(value=True, label="Return timestamps") #4
sentiment_checkbox = gr.Checkbox(value=True, label="Sentiment analysis") #5
inputs = [audio_input, #0
timestamp_checkbox, #1
sentiment_checkbox] #2
#Ouput components
#audio_out = gr.Audio(label="Processed Audio", type="filepath", info = "Vocals only")
sentiment_plot = gr.Plot(label="Sentiment Analysis")
sentiment_frequency = gr.Label(label="Frequency")
outputs = [gr.outputs.HTML("text"), sentiment_plot, sentiment_frequency]
#----------------------------------------------------------------------------------------------------------------------------------------------------
#Launch demo-----------------------------------------------------------------------------------------------------------------------------------------
with gr.Blocks() as demo:
#download_button.click(download, inputs=[yt_link], outputs=[thumbnail])
sentiment_checkbox.change(hide_sentiment, inputs=[sentiment_checkbox], outputs=[sentiment_plot, sentiment_frequency])
with gr.Column():
gr.Interface(title = title, fn=transcribe, inputs = inputs, outputs = outputs,
description=description, cache_examples=True, allow_flagging="never", article = article,
examples=examples)
demo.queue(concurrency_count=3)
demo.launch(debug = True)
#----------------------------------------------------------------------------------------------------------------------------------------------------