File size: 2,385 Bytes
e2e3b5f
 
 
 
 
 
ad7f048
e2e3b5f
 
ad7f048
 
e2e3b5f
f2ba1f6
e2e3b5f
 
 
 
 
 
 
f2ba1f6
 
 
 
e2e3b5f
 
4a46889
e2e3b5f
 
 
 
 
 
 
 
 
 
 
 
 
f2ba1f6
e2e3b5f
 
 
 
 
bccf3ee
f778236
e2e3b5f
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
# -*- coding: utf-8 -*-

import crepe
import librosa
import gradio as gr
import pandas as pd
from transformers import pipeline, RobertaTokenizerFast, TFRobertaForSequenceClassification

asr = pipeline('automatic-speech-recognition', model='facebook/wav2vec2-large-960h-lv60-self')
tokenizer = RobertaTokenizerFast.from_pretrained("arpanghoshal/EmoRoBERTa")
model = TFRobertaForSequenceClassification.from_pretrained("arpanghoshal/EmoRoBERTa")
emo = pipeline('sentiment-analysis', model='arpanghoshal/EmoRoBERTa')
pos = pipeline("token-classification", model="vblagoje/bert-english-uncased-finetuned-pos")

def transcribe_and_describe(audio):

  audio, sr = librosa.load(audio, sr=16000)

  text = asr(audio)['text']

  tagged_text = pos(text)
  filler_words = [entry['word'] for entry in tagged_text if entry['entity'] == 'INTJ']
  filler_word_pr =  len(filler_words) / len(tagged_text)

  flatness = pd.DataFrame(librosa.feature.spectral_flatness(y=audio).T).describe().T
  loudness = pd.DataFrame(librosa.feature.rms(audio).T).describe().T
  time, frequency, confidence, activation = crepe.predict(audio, sr)
  frequency = pd.DataFrame(frequency.T).describe().T

  mean_spectral_flatness = flatness.loc[0, 'mean'] 
  spectral_flatness_std = flatness.loc[0, 'std'] 
  mean_pitch = frequency.loc[0, 'mean'] 
  pitch_std = frequency.loc[0, 'std'] 
  mean_volume = loudness.loc[0, 'mean'] 
  volume_std = loudness.loc[0, 'std'] 

  words_per_minute = len(text.split(" ")) / (librosa.get_duration(audio, sr) / 60)

  emotion = emo(text)[0]['label']

  return (text, f"{filler_word_pr:.2f}", f"{words_per_minute:.2f}", f"{mean_pitch:.2f}", f"{pitch_std:.2f}", f"{mean_volume:.2f}", f"{volume_std:.2f}", f"{mean_spectral_flatness:.2f}", f"{spectral_flatness_std:.2f}",  emotion)

gr.Interface(
    fn=transcribe_and_describe, 
    inputs=gr.Audio(source="microphone", type="filepath"), 
    outputs=[
        gr.Text(label="Transcription"),
        gr.Text(label="Filler Word Percent"),
        gr.Text(label="Rate of Speech (WPM)"), 
        gr.Text(label="Mean Pitch (Hz)"), 
        gr.Text(label="Pitch Variation (Hz)"), 
        gr.Text(label="Mean Volume (W)"),
        gr.Text(label="Volume Variation (W)"),
        gr.Text(label="Mean Spectral Flatness (dB)"),
        gr.Text(label="Spectral Flatness Variation (dB)"),
        gr.Text(label="Emotion")
        ]
        ).launch()