import gradio as gr | |
import torch | |
from transformers import pipeline | |
# Load the model from Hugging Face space | |
model = pipeline(task="audio-classification", model="your-username/model-name") | |
def classify_audio(audio_file): | |
# Perform inference using the Hugging Face model | |
result = model(audio_file.read()) | |
return result[0]["label"] | |
# Gradio Interface | |
iface = gr.Interface( | |
fn=classify_audio, | |
inputs=gr.Audio(type="file", label="Upload or Record Audio"), | |
outputs=gr.Textbox(), | |
live=True, | |
) | |
# Launch the Gradio interface | |
iface.launch() | |