File size: 5,509 Bytes
a246647
fc92389
 
 
a246647
fc92389
 
 
 
 
 
 
 
 
 
 
 
 
a246647
fc92389
a246647
fc92389
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
364e0ff
fc92389
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
import gradio as gr
import whisper
from transformers import pipeline
import pandas as pd, numpy as np
import os
import torchaudio
import librosa
from scipy.io.wavfile import write
import shutil
import soundfile as sf
import noisereduce as nr
from scipy.stats import skew
from tqdm import tqdm
import requests
import pickle
import dash
import dash_bootstrap_components as dbc
from dash import html

sr = 8000

url = "https://huggingface.co/spaces/aslanovaf/Sentiment_Analysis_Azerbaijani/resolve/main/sentiment_model_8000.pickle"
hf_token = os.environ.get("HF_TOKEN")
headers = {"Authorization": f"Bearer {hf_token}"}

response = requests.get(url, headers=headers) 
if response.status_code == 200:
    model = pickle.loads(response.content)
else:
    st.markdown(f"Failed to download TTS from {url} (Status code: {response.status_code})")


def split_full_audio_15_sec(audio_file):
    audio, orig_sr = sf.read(audio_file)
    audio = librosa.resample(y=audio, orig_sr=orig_sr, target_sr=sr)
    
    chunk_length = 15 * sr
    total_length = len(audio)
    start_index = 0
    end_index = min(chunk_length, total_length)
    f = 0
    chunks = []

    while start_index < total_length:
        chunk = audio[start_index:end_index]
        chunk_name = f"example_{f}.wav"
        chunk_duration = len(chunk)/sr
        if chunk_duration<3:
            break
        chunks.append(chunk)
        start_index = end_index
        end_index = min(end_index + chunk_length, total_length)
        f+=1
    return chunks

def get_mfcc(name):
    resampled_audio = name
    
    try:
        reduced_noise = nr.reduce_noise(resampled_audio, sr=sr)
        ft1 = librosa.feature.mfcc(y=reduced_noise, sr = sr, n_mfcc=16)
        ft2 = librosa.feature.zero_crossing_rate(reduced_noise)[0]
        ft3 = librosa.feature.spectral_rolloff(y=reduced_noise)[0]
        ft4 = librosa.feature.spectral_centroid(y=reduced_noise)[0]
        ft1_trunc = np.hstack((np.mean(ft1, axis=1), np.std(ft1, axis=1), skew(ft1, axis = 1), np.max(ft1, axis = 1), np.min(ft1, axis = 1)))
        ft2_trunc = np.hstack((np.mean(ft2), np.std(ft2), skew(ft2), np.max(ft2), np.min(ft2)))
        ft3_trunc = np.hstack((np.mean(ft3), np.std(ft3), skew(ft3), np.max(ft3), np.min(ft3)))
        ft4_trunc = np.hstack((np.mean(ft4), np.std(ft4), skew(ft4), np.max(ft4), np.min(ft4)))
        return pd.Series(np.hstack((ft1_trunc, ft2_trunc, ft3_trunc, ft4_trunc)))
    except:
        print('bad file')
        return pd.Series([0]*95)

    
def analyze_sentiment(audio):
    chunks = split_full_audio_15_sec(audio)
    chunked_df = pd.DataFrame(data={'Chunk_order': [f'Chunk_{i+1}' for i in range(len(chunks))], 'Data': chunks})
    df_features = chunked_df['Data'].apply(get_mfcc)
    df = pd.concat([chunked_df, df_features], axis=1)
    df = df.drop(columns=['Data'])
    df.columns = ['Chunk_order']+[f'Feature_{i+1}' for i in range(95)]
    df['Prediction'] = model.predict(df.drop(columns=['Chunk_order']))

    df['Prediction'] = df['Prediction'].map({
    'pozitive_normal':'Normal',
    'scope':'Silence',
    'neqativ':'Negative'
    })

    clean_df = df[['Chunk_order', 'Prediction']]
    predictions = df['Prediction'].tolist()
    final_prediction = 'Negative' if 'Negative' in predictions else 'Normal' if 'Normal' in predictions else 'Silence'
    final_prediction_2x = 'Negative' if predictions.count('Negative')>1 else 'Normal' if 'Normal' in predictions else 'Silence'

    color_map = {
        'Normal':'success',
        'Silence': 'warning',
        'Negative': 'danger'
    }
    
    return (', '.join(predictions), final_prediction)


title = """<h1 align="center">🎀 Azerbaijani Audio Speech Sentiment Analysis πŸ’¬</h1>"""
image_path = "thmbnail.jpg"
description = """
πŸ’» This demo showcases a general-purpose sentiment analysis process. It is trained on a collection of audio calls from banking/fintech industries based on audio features. The main analysis predicts one of the categories (Normal/Negative/Silence) for each 15-second bucket in the audio. The final category for the whole audio is also estimated. 
<br>
βš™οΈ Components of the tool:<br>
<br>
&nbsp;&nbsp;&nbsp;&nbsp; - Sentiment analysis directly of the audios.<br>
<br>
❓ Use the microphone for real-time audio recording.<br>
↑ Or upload an audio file.<br>
<br>

⚑️ The model will extract audio features and perform sentiment analysis on the audio.<br>

"""

custom_css = """
#banner-image {
    display: block;
    margin-left: auto;
    margin-right: auto;
}
#chat-message {
    font-size: 14px;
    min-height: 300px;
}
"""

block = gr.Blocks(css=custom_css)


with block:
    gr.HTML(title)

    with gr.Row():
        with gr.Column():
            gr.HTML(description)
        with gr.Column():
            gr.Image(image_path, elem_id="banner-image", show_label=False)

    gr.Interface(
        fn=analyze_sentiment,
        inputs=[
            gr.Audio(sources=["upload", "microphone"], type="filepath", label="Input Audio"),
        ],
        outputs=[gr.Textbox(label="Sentiment Analysis Results of 15-second buckets"),gr.Textbox(label="Final Prediction")],
        # layout="vertical",
        # theme="huggingface",
        examples=[
            ["./Recording_1.wav", "analyze_sentiment"],
            ["./Recording_2.wav", "analyze_sentiment"],
        ],
        cache_examples=False,
        allow_flagging="never",
    )
    # gr.TabbedInterface([mic, file], ["Audio from Microphone", "Audio from File"])

block.launch()