gngpostalsrvc's picture
added application file
e2e3b5f
raw
history blame
2.03 kB
# -*- coding: utf-8 -*-
import crepe
import spacy
import librosa
import gradio as gr
import pandas as pd
from transformers import pipeline
asr = pipeline('automatic-speech-recognition', model='facebook/wav2vec2-large-960h-lv60-self')
emo = pipeline('sentiment-analysis', model='arpanghoshal/EmoRoBERTa')
lang_model = spacy.load("en_core_web_sm")
def transcribe_and_describe(audio):
audio, sr = librosa.load(audio, sr=16000)
text = asr(audio)['text']
doc = lang_model(text)
filler_words = [token.text for token in doc if token.pos_ == 'INTJ']
filler_word_pr = len(filler_words) / len(doc)
flatness = pd.DataFrame(librosa.feature.spectral_flatness(y=audio).T).describe().T
loudness = pd.DataFrame(librosa.feature.rms(audio).T).describe().T
time, frequency, confidence, activation = crepe.predict(audio, sr)
frequency = pd.DataFrame(frequency.T).describe().T
mean_spectral_flatness = flatness.loc[0, 'mean']
spectral_flatness_std = flatness.loc[0, 'std']
mean_pitch = frequency.loc[0, 'mean']
pitch_std = frequency.loc[0, 'std']
mean_volume = loudness.loc[0, 'mean']
volume_std = loudness.loc[0, 'std']
words_per_minute = len(text.split(" ")) / (librosa.get_duration(audio, sr) / 60)
emotion = emo(text)[0]['label']
return (text, filler_word_pr, words_per_minute, mean_pitch, pitch_std, mean_volume, volume_std, mean_spectral_flatness, spectral_flatness_std, emotion)
gr.Interface(
fn=transcribe_and_describe,
inputs=gr.Audio(source="microphone", type="filepath"),
outputs=[
gr.Text(label="Transcription"),
gr.Text(label="Rate of Speech (WPM)"),
gr.Text(label="Filler Word Percent"),
gr.Text(label="Mean Pitch (Hz)"),
gr.Text(label="Pitch Variation (Hz)"),
gr.Text(label="Mean Volume (W)"),
gr.Text(label="Volume Variation (W)"),
gr.Text(label="Mean Spectral Flatness (dB)"),
gr.Text(label="Spectral Flatness Variation (dB)"),
gr.Text(label="Emotion")
]
).launch()