import gradio as gr import torch from charts import spider_chart from icon import generate_icon from transformers import pipeline from timestamp import format_timestamp MODEL_NAME = "openai/whisper-medium" BATCH_SIZE = 8 device = 0 if torch.cuda.is_available() else "cpu" pipe = pipeline( task="automatic-speech-recognition", model=MODEL_NAME, chunk_length_s=30, device=device, ) #Define classifier for sentiment analysis classifier = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", top_k=None) def transcribe(file, task, return_timestamps): outputs = pipe(file, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True) text = outputs["text"] timestamps = outputs["chunks"] #If return timestamps is True, return html text with timestamps format if return_timestamps==True: spider_text = [f"{chunk['text']}" for chunk in timestamps] #Text for spider chart without timestamps timestamps = [f"[{format_timestamp(chunk['timestamp'][0])} -> {format_timestamp(chunk['timestamp'][1])}] {chunk['text']}" for chunk in timestamps] else: timestamps = [f"{chunk['text']}" for chunk in timestamps] spider_text = timestamps text = "
".join(str(feature) for feature in timestamps) text = f"

Transcription

{text}
" spider_text = "\n".join(str(feature) for feature in spider_text) fig = spider_chart(classifier, spider_text) return file, text, fig inputs = [gr.Audio(source="upload", label="Audio file", type="filepath"), gr.Radio(["transcribe"], label="Task", value="transcribe"), gr.Checkbox(value=True, label="Return timestamps")] outputs = [gr.Audio(label="Processed Audio", type="filepath"), gr.outputs.HTML("text"), gr.Plot(label="fig")] title = "Whisper Demo: Transcribe Audio" MODEL_NAME1 = "jpdiazpardo/whisper-tiny-metal" description = ("Transcribe long-form audio inputs with the click of a button! Demo uses the" f" checkpoint [{MODEL_NAME1}](https://huggingface.co/{MODEL_NAME1}) and 🤗 Transformers to transcribe audio files" " of arbitrary length. Check some of the 'cool' examples below") examples = [["When a Demon Defiles a Witch.wav","transcribe",True] ["Immaculate Misconception.wav","transcribe", True]] linkedin = generate_icon("linkedin") github = generate_icon("github") article = ("
" f"

{linkedin} Juan Pablo Díaz Pardo
" f"{github} jpdiazpardo

" ) title = "Scream: Fine-Tuned Whisper model for automatic gutural speech recognition 🤟🤟🤟" demo = gr.Interface(title = title, fn=transcribe, inputs = inputs, outputs = outputs, description=description, cache_examples=True, allow_flagging="never", article = article , examples=examples) demo.queue(concurrency_count=3) demo.launch(debug = True)