|
import gradio as gr |
|
from transformers import BertTokenizer, BertForSequenceClassification |
|
import torch |
|
|
|
|
|
model_folder = "FYP_Model" |
|
tokenizer = BertTokenizer.from_pretrained(model_folder) |
|
model = BertForSequenceClassification.from_pretrained(model_folder) |
|
|
|
def classify_audio(audio_file): |
|
|
|
audio_content = audio_file.read() |
|
inputs = tokenizer(audio_content, return_tensors="pt", truncation=True) |
|
|
|
|
|
outputs = model(**inputs) |
|
logits = outputs.logits |
|
predicted_class = torch.argmax(logits).item() |
|
|
|
return f"Predicted class: {predicted_class}" |
|
|
|
|
|
iface = gr.Interface( |
|
fn=classify_audio, |
|
inputs=gr.Audio(type="file", label="Upload or Record Audio"), |
|
outputs=gr.Textbox(), |
|
live=True, |
|
) |
|
|
|
|
|
iface.launch() |
|
|