Ridzuan's picture
update app
0c2d8c4
import streamlit as st
from keras.models import load_model
import os
import streamlit as st
import numpy as np
import pandas as pd
from sklearn.preprocessing import normalize
import matplotlib.pyplot as plt
import librosa
import librosa.display
import IPython.display as ipd
import streamlit.components.v1 as components
from io import BytesIO
from scipy.io.wavfile import read, write
from googlesearch import search
import requests
from bs4 import BeautifulSoup
classifier = load_model('./model/bestmodel.h5')
def prepare_test(file):
max_size=350
features=[]
data_ori, sample_rate = librosa.load(file)
data, _ = librosa.effects.trim(data_ori)
spec_bw = padding(librosa.feature.spectral_bandwidth(y = data, sr= sample_rate), 20, max_size).astype('float32')
cent = padding(librosa.feature.spectral_centroid(y = data, sr=sample_rate), 20, max_size).astype('float32')
mfcc = librosa.feature.mfcc(y=data, sr=sample_rate,n_mfcc=20)
mfccs = padding(normalize(mfcc, axis=1), 20, max_size).astype('float32')
rms = padding(librosa.feature.rms(y = data),20, max_size).astype('float32')
y = librosa.effects.harmonic(data)
tonnetz = padding(librosa.feature.tonnetz(y=y, sr=sample_rate,fmin=75),20, max_size).astype('float32')
image = padding(librosa.feature.chroma_cens(y = data, sr=sample_rate,fmin=75), 20, max_size).astype('float32')
image=np.dstack((image,spec_bw))
image=np.dstack((image,cent))
image=np.dstack((image,mfccs))
image=np.dstack((image,rms))
image=np.dstack((image,tonnetz))
features.append(image[np.newaxis,...])
output = np.concatenate(features,axis=0)
return output
def padding(array, xx, yy):
h = array.shape[0]
w = array.shape[1]
a = max((xx - h) // 2,0)
aa = max(0,xx - a - h)
b = max(0,(yy - w) // 2)
bb = max(yy - b - w,0)
return np.pad(array, pad_width=((a, aa), (b, bb)), mode='constant')
def model():
select = st.selectbox('Please select one to find out more about the model',
('Choose','Model Training History', 'Confusion Matrix', 'Train-Test Scores'))
if select == 'Model Training History':
st.header('Train-Val Accuracy History')
st.write('Train and Validation accuracy scores are comparable this indicates that the Model is moderately trained.')
st.image('./img/bestmodelacc.png')
st.header('Train-Val Loss History')
st.write('The training was stopped with EarlyStopping() when the Validation loss score starts to saturate.')
st.image('./img/bestmodelloss.png')
if select == 'Confusion Matrix':
st.header('Confusion Matrix')
st.write('Below is the Confusion Matrix for this Model normalized by rows indicating Recall scores which are over 80%.')
st.image('./img/bestmodelcm.png')
if select == 'Train-Test Scores':
st.header('Train-Test Accuracy scores')
st.write('Train accuracy score is 94% and Test accuracy score is 90%. There is a 4% difference indicating that the model fitted moderately')
acc = [0.943, 0.908]
loss = [0.177, 0.304]
traintest_df = pd.DataFrame(list(zip(acc,loss)),columns=['Accuracy', 'loss'],index =['Train','Test'])
st.dataframe(traintest_df.style.format("{:.3}"))
st.header('Model Precision, Recall, F1-score & MCC')
st.write('Macro avg F1 score is 91%.')
st.write('Matthew’s correlation coefficient: 0.885')
prec = [0.92, 0.86, 0.91, 0.97, 0.88,0.91]
re = [0.92, 0.83, 0.96, 0.92, 0.90, 0.91]
f1 = [0.92, 0.85, 0.94, 0.94, 0.89, 0.91]
traintest_df2 = pd.DataFrame(list(zip(prec,re,f1)),columns=['Precision', 'Recall', 'F1-score'],index =['angry', 'happy', 'neutral', 'sad', 'surprise','macro avg'])
st.dataframe(traintest_df2.style.format("{:.3}"))
def plot_features(data,sample_rate):
fig1, ax1 = plt.subplots(figsize=(6, 2))
img = librosa.display.waveshow(y = data, sr=sample_rate, x_axis="time")
ax1.set(title = 'Sample Waveform')
st.pyplot(plt.gcf())
fig2, ax2 = plt.subplots(figsize=(6, 2))
cens = librosa.feature.chroma_cens(y = data, sr=sample_rate,fmin=75)
img_cens = librosa.display.specshow(cens, y_axis = 'chroma', x_axis='time', ax=ax2)
ax2.set(title = 'Chroma_CENS')
st.pyplot(plt.gcf())
fig3, ax3 = plt.subplots(figsize=(6, 2))
spec_bw = librosa.feature.spectral_bandwidth(y = data, sr= sample_rate)
cent = librosa.feature.spectral_centroid(y = data, sr=sample_rate)
times = librosa.times_like(spec_bw)
S, phase = librosa.magphase(librosa.stft(y=data))
librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max), y_axis='log', x_axis='time', ax=ax3)
ax3.fill_between(times, np.maximum(0, cent[0] - spec_bw[0]),
np.minimum(cent[0] + spec_bw[0], sample_rate/2),
alpha=0.5, label='Centroid +- bandwidth')
ax3.plot(times, cent[0], label='Spectral centroid', color='w')
ax3.legend(loc='lower right')
ax3.set(title='log Power spectrogram')
st.pyplot(plt.gcf())
fig4, ax4 = plt.subplots(figsize=(6, 2))
mfcc = librosa.feature.mfcc(y=data, sr=sample_rate,n_mfcc=40)
mfccs = normalize(mfcc, axis=1)
img_mfcc = librosa.display.specshow(mfccs, y_axis = 'mel', x_axis='time', ax=ax4)
ax4.set(title = 'Sample Mel-Frequency Cepstral Coefficients')
st.pyplot(plt.gcf())
fig5, ax5 = plt.subplots(figsize=(6, 2))
rms = librosa.feature.rms(y=data)
times = librosa.times_like(rms)
ax5.semilogy(times, rms[0], label='RMS Energy')
ax5.set_title(f'RMS Energy')
ax5.set(xticks=[])
ax5.legend()
ax5.label_outer()
st.pyplot(plt.gcf())
fig6, ax6= plt.subplots(figsize=(6, 2))
y = librosa.effects.harmonic(data)
tonnetz = librosa.feature.tonnetz(y=y, sr=sample_rate,fmin=75)
img_tonnetz = librosa.display.specshow(tonnetz,
y_axis='tonnetz', x_axis='time', ax=ax6)
ax6.set(title=f'Tonal Centroids(Tonnetz)')
ax6.label_outer()
fig6.colorbar(img_tonnetz, ax=ax6)
st.pyplot(plt.gcf())
def find_definition(emo):
word_to_search = emo
scrape_url = 'https://www.oxfordlearnersdictionaries.com/definition/english/' + word_to_search
headers = {"User-Agent": "mekmek"}
web_response = requests.get(scrape_url, headers=headers)
if web_response.status_code == 200:
soup = BeautifulSoup(web_response.text, 'html.parser')
try:
for sense in soup.find_all('li', class_='sense'):
definition = sense.find('span', class_='def').text
for example in soup.find_all('ul', class_='examples'):
example_1 = example.text.split('.')[0:1]
except AttributeError:
print('Word not found!!')
else:
print('Failed to get response...')
return definition, example_1
def get_search_results(emo):
results_lis =[]
results = search(f"Understanding {emo}", num_results=3)
for result in results:
results_lis.append(result)
result_1 = results_lis[0]
result_2 = results_lis[1]
result_3 = results_lis[2]
return result_1, result_2, result_3
def get_content(emo):
definition, example_1 = find_definition(emo)
result_1, result_2, result_3 = get_search_results(emo)
with st.expander(f"Word Definition of {emo.capitalize()}"):
st.write(definition.capitalize()+'.')
with st.expander(f'Example of {emo.capitalize()}'):
with st.container():
st.write(f'1) {example_1[0].capitalize()}'+'.')
with st.expander(f'The following links will help you understand more on {emo.capitalize()}'):
with st.container():
st.write(f"Check out this link ➡ {result_1}")
st.write(f"Check out this link ➡ {result_2}")
st.write(f"Check out this link ➡ {result_3}")
if emo == 'anger':
with st.expander(f'Video on {emo.capitalize()}'):
with st.container():
st.video('https://www.youtube.com/watch?v=weMeIh10cLs')
if emo in 'happiness':
with st.expander(f'Video on {emo.capitalize()}'):
with st.container():
st.video('https://www.youtube.com/watch?v=FDF2DidUAyY')
if emo in 'sadness':
with st.expander(f'Video on {emo.capitalize()}'):
with st.container():
st.video('https://www.youtube.com/watch?v=34rqQEkuhK4')
if emo in 'surprised':
with st.expander(f'Video on {emo.capitalize()}'):
with st.container():
st.video('https://www.youtube.com/watch?v=UYoBi0EssLE')
def upload():
upload_file = st.sidebar.file_uploader('Upload an audio .wav file. Currently max 8 seconds', type=".wav", accept_multiple_files = False)
if upload_file:
st.write('Sample Audio')
st.audio(upload_file, format='audio/wav')
if st.sidebar.button('Show Features'):
with st.spinner(f'Showing....'):
data_ori, sample_rate = librosa.load(upload_file)
data, _ = librosa.effects.trim(data_ori)
plot_features(data,sample_rate)
st.sidebar.success("Completed")
if st.sidebar.button('Classify'):
with st.spinner(f'Classifying....'):
test = prepare_test(upload_file)
pred = classifier.predict(test)
pred_df = pd.DataFrame(pred.T,index=['anger', 'happiness','neutral','sadness','surprised'],columns =['Scores'])
emo = pred_df[pred_df['Scores'] == pred_df.max().values[0]].index[0]
st.info(f'The predicted Emotion: {emo.upper()}')
st.sidebar.success("Classification completed")
if emo:
get_content(emo)
def record():
with st.spinner(f'Recording....'):
st.sidebar.write('To start press Start Recording and stop to finish recording')
parent_dir = os.path.dirname(os.path.abspath(__file__))
build_dir = os.path.join(parent_dir, "st_audiorec/frontend/build")
st_audiorec = components.declare_component("st_audiorec", path=build_dir)
val = st_audiorec()
if isinstance(val, dict):
st.sidebar.success("Audio Recorded")
ind, val = zip(*val['arr'].items())
ind = np.array(ind, dtype=int)
val = np.array(val)
sorted_ints = val[ind]
stream = BytesIO(b"".join([int(v).to_bytes(1, "big") for v in sorted_ints]))
wav_bytes = stream.read()
rate, data = read(BytesIO(wav_bytes))
reversed_data = data[::-1]
bytes_wav = bytes()
byte_io = BytesIO(bytes_wav)
write(byte_io, rate, reversed_data)
if st.sidebar.button('Show Features'):
with st.spinner(f'Showing....'):
data_ori, sample_rate = librosa.load(byte_io)
data, _ = librosa.effects.trim(data_ori)
plot_features(data,sample_rate)
st.sidebar.success("Completed")
if st.sidebar.button('Classify'):
with st.spinner(f'Classifying....'):
test = prepare_test(byte_io)
pred = classifier.predict(test)
pred_df = pd.DataFrame(pred.T,index=['anger', 'happiness','neutral','sadness','surprised'],columns =['Scores'])
emo = pred_df[pred_df['Scores'] == pred_df.max().values[0]].index[0]
st.info(f'The predicted Emotion: {emo.upper()}')
st.sidebar.success("Classification completed")
if emo:
get_content(emo)