File size: 736 Bytes
59b76ad b11593d 8324298 c2e5f66 931ef66 b11593d 3125b67 b11593d 3125b67 b11593d 3125b67 59b76ad |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 |
import gradio as gr
import torch
from model import ECAPA_gender
# Load the model
model = ECAPA_gender.from_pretrained("JaesungHuh/ecapa-gender")
model.eval()
def predict_gender(filepath):
audio = model.load_audio(filepath)
with torch.no_grad():
output = model.forward(audio)
probs = torch.softmax(output, dim=1)
prob_dict = {model.pred2gender[i]: float(prob) for i, prob in enumerate(probs[0])}
return prob_dict
audio_component = gr.Audio(type='filepath', label='Upload your audio file here')
label_component = gr.Label(label='Gender classification result')
demo = gr.Interface(fn=predict_gender, inputs=audio_component, outputs=label_component, examples=['00001.wav', '00002.wav'])
demo.launch() |