File size: 4,411 Bytes
20ec090
 
 
6e77739
20ec090
 
 
 
 
 
 
6e77739
20ec090
6e77739
20ec090
0eb04fd
6e77739
20ec090
 
 
 
 
 
6e77739
 
 
 
5d9cd28
6e77739
20ec090
 
 
 
 
 
 
 
6e77739
20ec090
 
6e77739
 
 
 
 
 
 
20ec090
 
0eb04fd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6e77739
20ec090
 
 
 
6e77739
 
20ec090
 
 
 
 
 
6e77739
 
 
 
 
 
 
 
 
20ec090
 
6e77739
 
 
20ec090
 
 
6e77739
20ec090
6e77739
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import os
import re
import functools
from functools import partial

import requests
import pandas as pd
import plotly.express as px

import torch
import gradio as gr
from transformers import pipeline, Wav2Vec2ProcessorWithLM
from pyannote.audio import Pipeline
import whisperx

from utils import split_into_sentences, create_fig, color_map
from utils import speech_to_text as stt

os.environ["TOKENIZERS_PARALLELISM"] = "false"
device = 0 if torch.cuda.is_available() else -1


# Audio components
whisper_device = "cuda" if torch.cuda.is_available() else "cpu"
whisper = whisperx.load_model("tiny.en", whisper_device)
alignment_model, metadata = whisperx.load_align_model(language_code="en", device=whisper_device)
speaker_segmentation = Pipeline.from_pretrained("pyannote/speaker-diarization@2.1",
                                    use_auth_token=os.environ['ENO_TOKEN'])


# Text components
emotion_pipeline = pipeline(
    "text-classification",
    model="bhadresh-savani/distilbert-base-uncased-emotion",
    device=device,
)

EXAMPLES = [["Customer_Support_Call.wav"]]


speech_to_text = partial(
    stt, 
    speaker_segmentation=speaker_segmentation, 
    whisper=whisper, 
    alignment_model=alignment_model, 
    metadata=metadata, 
    whisper_device=whisper_device
    )

def sentiment(diarized, emotion_pipeline):
    """
    diarized: a list of tuples. Each tuple has a string to be displayed and a label for highlighting.
        The start/end times are not highlighted [(speaker text, speaker id), (start time/end time, None)]

    This function gets the customer's sentiment and returns a list for highlighted text as well
    as a plot of sentiment over time.
    """

    customer_sentiments = []
    plot_sentences = []
    to_plot = []

    # used to set the x range of ticks on the plot
    x_min = 100
    x_max = 0

    for i in range(0, len(diarized), 2):
        speaker_speech, speaker_id = diarized[i]
        times, _ = diarized[i + 1]

        sentences = split_into_sentences(speaker_speech)
        start_time, end_time = times[5:].split("-")
        start_time, end_time = float(start_time), float(end_time)
        interval_size = (end_time - start_time) / len(sentences)

        if "Customer" in speaker_id:

            outputs = emotion_pipeline(sentences)

            for idx, (o, t) in enumerate(zip(outputs, sentences)):
                sent = "neutral"
                if o["score"] > thresholds[o["label"]]:
                    customer_sentiments.append(
                        (t + f"({round(idx*interval_size+start_time,1)} s)", o["label"])
                    )
                    if o["label"] in {"joy", "love", "surprise"}:
                        sent = "positive"
                    elif o["label"] in {"sadness", "anger", "fear"}:
                        sent = "negative"
                if sent != "neutral":
                    to_plot.append((start_time + idx * interval_size, sent))
                    plot_sentences.append(t)

            if start_time < x_min:
                x_min = start_time
            if end_time > x_max:
                x_max = end_time

    fig = create_fig(x_min, x_max, to_plot, plot_sentences)

    return customer_sentiments, fig

with gr.Blocks() as demo:

    with gr.Row():
        with gr.Column():
            audio = gr.Audio(label="Audio file", type="filepath")
            btn = gr.Button("Transcribe and Diarize")

            gr.Markdown("**Call Transcript:**")
            diarized = gr.HighlightedText(label="Call Transcript")
            sentiment_btn = gr.Button("Get Customer Sentiment")
            analyzed = gr.HighlightedText(color_map=color_map)
            plot = gr.Plot(label="Sentiment over time", type="plotly")

        with gr.Column():
            gr.Markdown("## Example Files")
            gr.Examples(
                examples=EXAMPLES,
                inputs=[audio],
                outputs=[diarized],
                fn=speech_to_text,
                cache_examples=True
            )
    # when example button is clicked, convert audio file to text and diarize
    btn.click(
        fn=speech_to_text,
        inputs=audio,
        outputs=diarized,
    )

    # when sentiment button clicked, display highlighted text and plot
    sentiment_btn.click(fn=partial(sentiment, emotion_pipeline=emotion_pipeline), inputs=diarized, outputs=[analyzed, plot])

demo.launch(debug=1)