mukaddamzaid commited on
Commit
13ee7c9
β€’
1 Parent(s): 8fa4ea7

Add Applications Files

Browse files
Files changed (2) hide show
  1. app.py +173 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import whisper
4
+ from transformers import pipeline
5
+
6
+ # β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
7
+
8
+ title = "Whisper to Emotion"
9
+
10
+ # β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
11
+
12
+ whisper_model = whisper.load_model("medium")
13
+
14
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
15
+
16
+ emotion_classifier = pipeline(
17
+ "text-classification", model='bhadresh-savani/distilbert-base-uncased-emotion')
18
+
19
+
20
+ def translate_and_classify(audio):
21
+
22
+ print("""
23
+ β€”
24
+ Sending audio to Whisper ...
25
+ β€”
26
+ """)
27
+ audio = whisper.load_audio(audio)
28
+ audio = whisper.pad_or_trim(audio)
29
+
30
+ mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device)
31
+
32
+ _, probs = whisper_model.detect_language(mel)
33
+
34
+ transcript_options = whisper.DecodingOptions(task="transcribe", fp16=False)
35
+ translate_options = whisper.DecodingOptions(task="translate", fp16=False)
36
+
37
+ transcription = whisper.decode(whisper_model, mel, transcript_options)
38
+ translation = whisper.decode(whisper_model, mel, translate_options)
39
+
40
+ print("Language Spoken: " + transcription.language)
41
+ print("Transcript: " + transcription.text)
42
+ print("Translated: " + translation.text)
43
+
44
+ emotion = emotion_classifier(translation.text)
45
+ detected_emotion = emotion[0]["label"]
46
+ print("Detected Emotion: ", detected_emotion)
47
+ return transcription.text, detected_emotion
48
+
49
+
50
+ css = """
51
+ .gradio-container {
52
+ font-family: 'IBM Plex Sans', sans-serif;
53
+ }
54
+ .gr-button {
55
+ color: white;
56
+ border-color: black;
57
+ background: black;
58
+ }
59
+ input[type='range'] {
60
+ accent-color: black;
61
+ }
62
+ .dark input[type='range'] {
63
+ accent-color: #dfdfdf;
64
+ }
65
+ .container {
66
+ max-width: 730px;
67
+ margin: auto;
68
+ padding-top: 1.5rem;
69
+ }
70
+ #gallery {
71
+ min-height: 22rem;
72
+ margin-bottom: 15px;
73
+ margin-left: auto;
74
+ margin-right: auto;
75
+ border-bottom-right-radius: .5rem !important;
76
+ border-bottom-left-radius: .5rem !important;
77
+ }
78
+ #gallery>div>.h-full {
79
+ min-height: 20rem;
80
+ }
81
+ .details:hover {
82
+ text-decoration: underline;
83
+ }
84
+ .gr-button {
85
+ white-space: nowrap;
86
+ }
87
+ .gr-button:focus {
88
+ border-color: rgb(147 197 253 / var(--tw-border-opacity));
89
+ outline: none;
90
+ box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
91
+ --tw-border-opacity: 1;
92
+ --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
93
+ --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
94
+ --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
95
+ --tw-ring-opacity: .5;
96
+ }
97
+ #advanced-btn {
98
+ font-size: .7rem !important;
99
+ line-height: 19px;
100
+ margin-top: 12px;
101
+ margin-bottom: 12px;
102
+ padding: 2px 8px;
103
+ border-radius: 14px !important;
104
+ }
105
+ #advanced-options {
106
+ display: none;
107
+ margin-bottom: 20px;
108
+ }
109
+ .footer {
110
+ margin-bottom: 45px;
111
+ margin-top: 35px;
112
+ text-align: center;
113
+ border-bottom: 1px solid #e5e5e5;
114
+ }
115
+ .footer>p {
116
+ font-size: .8rem;
117
+ display: inline-block;
118
+ padding: 0 10px;
119
+ transform: translateY(10px);
120
+ background: white;
121
+ }
122
+ .dark .footer {
123
+ border-color: #303030;
124
+ }
125
+ .dark .footer>p {
126
+ background: #0b0f19;
127
+ }
128
+
129
+
130
+ """
131
+ with gr.Blocks(css=css) as demo:
132
+ gr.Markdown("""
133
+ ## Emotion Detection From Speech with Whisper
134
+ """)
135
+ gr.HTML('''
136
+ <p style="margin-bottom: 10px; font-size: 94%">
137
+ Whisper is a general-purpose speech recognition model released by OpenAI that can perform multilingual speech recognition as well as speech translation and language identification. Combined with an emotion detection model,this allows for detecting emotion directly from speech in multiple languages and can potentially be used to analyze sentiment from customer calls. It could also be used to transcribe and detect different emotions to enable a data-driven analysis for psychotherapy.
138
+ </p>
139
+ ''')
140
+
141
+ with gr.Column():
142
+ # gr.Markdown(""" ### Record audio """)
143
+ with gr.Tab("Record Audio"):
144
+ audio_input_r = gr.Audio(
145
+ label='Record Audio Input', source="microphone", type="filepath")
146
+ transcribe_audio_r = gr.Button('Transcribe')
147
+
148
+ with gr.Tab("Upload Audio as File"):
149
+ audio_input_u = gr.Audio(
150
+ label='Upload Audio', source="upload", type="filepath")
151
+ transcribe_audio_u = gr.Button('Transcribe')
152
+
153
+ with gr.Row():
154
+ transcript_output = gr.Textbox(
155
+ label="Transcription in the language you spoke", lines=3)
156
+ emotion_output = gr.Textbox(label="Detected Emotion")
157
+
158
+ transcribe_audio_r.click(translate_and_classify, inputs=audio_input_r, outputs=[
159
+ transcript_output, emotion_output])
160
+ transcribe_audio_u.click(translate_and_classify, inputs=audio_input_u, outputs=[
161
+ transcript_output, emotion_output])
162
+ gr.HTML('''
163
+ <div class="footer">
164
+ <p>Whisper Model by <a href="https://github.com/openai/whisper" style="text-decoration: underline;" target="_blank">OpenAI</a> -
165
+ <a href="https://huggingface.co/bhadresh-savani/distilbert-base-uncased-emotion" style="text-decoration: underline;" target="_blank">Emotion Detection Model</a>
166
+ </p>
167
+ </div>
168
+ ''')
169
+ gr.Markdown(
170
+ "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=zaidmukaddam.whisper_to_emotion)")
171
+
172
+
173
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch
2
+ transformers
3
+ git+https://github.com/openai/whisper.git