jvalero's picture
Update app.py
5892808 verified
from transformers import pipeline
import torch
from transformers import pipeline
from transformers.pipelines.audio_utils import ffmpeg_read
import gradio as gr
import gradio as gr
device = 0 if torch.cuda.is_available() else "cpu"
MODEL_ID = "jvalero/wav2vec2-base-vinyl_condition"
pipe = pipeline(
task="audio-classification",
model=MODEL_ID,
chunk_length_s=30,
device=device,
)
def get_vinyl_condition(filepath):
output = pipe(
filepath,
max_new_tokens=256,
chunk_length_s=30,
batch_size=8,
)
return output[0]["label"]
demo = gr.Blocks()
demo = gr.Blocks()
file_transcribe = gr.Interface(
fn=get_vinyl_condition,
inputs=[
gr.Audio(sources="upload", label="Audio file", type="filepath"),
],
outputs="label",
title="Vinyl Condition Classificator",
description=(
"Get your vinyl condition based on the goldmine grading standard! Demo uses the"
f" checkpoint [{MODEL_ID}](https://huggingface.co/{MODEL_ID}) and 🤗 Transformers to get the condition of audio files"
" of arbitrary length."
),
examples=[
["./example.mp3"],
["./example1.mp3"],
],
cache_examples=True,
allow_flagging="never",
)
with demo:
gr.TabbedInterface([file_transcribe], ["Get Viny Condition"])
demo.launch()