moro23's picture
Update app.py
db2f8b5 verified
raw
history blame
1.63 kB
## libraries for data preprocessing
import numpy as np
import pandas as pd
## libraries for training dl models
import tensorflow as tf
from tensorflow import keras
## libraries for reading audio files
import librosa as lib
import gradio as gr
## lets load the model
model = keras.models.load_model('heartbeatsound_classification.h5')
def loading_sound_file(sound_file, sr=22050, duration=10):
input_length = sr * duration
X, sr = lib.load(sound_file, sr=sr, duration=duration)
dur = lib.get_duration(y=X, sr=sr)
# # pad audio file same duration
# if (round(dur) < duration):
# print ("fixing audio lenght :", file_name)
# y = lib.util.fix_length(X, input_length)
# extract normalized mfcc feature from data
mfccs = np.mean(lib.feature.mfcc(y=X, sr=sr, n_mfcc=25).T,axis=0)
data = np.array(mfccs).reshape([-1,1])
return data
def heart_signal_classification(data):
X = loading_sound_file(data)
pred = model.predict(X)
result = pred[0].argmax()
## lets create our labels
labels = {
0: 'Artifact',
1: 'Murmur',
2: 'Normal'
}
label = labels[pred[0].argmax()]
return label
################### Gradio Web APP ################################
title = "Heart Signal Classification App"
Input = gr.Audio(sources=["upload"], type="filepath")
Output1 = gr.Textbox(label="Type Of Heart Signal")
description = "Type Of Signal: Artifact, Murmur, Normal"
iface = gr.Interface(fn=heart_signal_classification, inputs=Input, outputs=Output1, title=title, description=description)
iface.launch(inline=False)