Spaces:
Runtime error
Runtime error
yappeizhen
commited on
Commit
β’
803a411
1
Parent(s):
abe60f1
feat: customisations
Browse files- __pycache__/app.cpython-311.pyc +0 -0
- app.py +14 -23
__pycache__/app.cpython-311.pyc
ADDED
Binary file (7.65 kB). View file
|
|
app.py
CHANGED
@@ -74,15 +74,14 @@ def inference(audio, sentiment_option):
|
|
74 |
title = """<h1 align="center">β Lim Kopi π¬</h1>"""
|
75 |
image_path = "coffee_logo.jpg"
|
76 |
description = """
|
77 |
-
π» This
|
78 |
-
|
79 |
-
βοΈ Components of the tool:<br>
|
80 |
<br>
|
81 |
- Real-time multilingual speech recognition<br>
|
82 |
- Language identification<br>
|
83 |
- Sentiment analysis of the transcriptions<br>
|
84 |
<br>
|
85 |
-
π― The sentiment analysis results are provided as a dictionary with different emotions and their corresponding scores.<br>
|
86 |
<br>
|
87 |
|
88 |
π The sentiment analysis results are displayed with emojis representing the corresponding sentiment.<br>
|
@@ -94,7 +93,7 @@ description = """
|
|
94 |
β Use the microphone for real-time speech recognition.<br>
|
95 |
<br>
|
96 |
|
97 |
-
β‘οΈ The model will transcribe the audio and perform sentiment analysis on the transcribed text.<br>
|
98 |
|
99 |
"""
|
100 |
|
@@ -108,9 +107,12 @@ custom_css = """
|
|
108 |
font-size: 14px;
|
109 |
min-height: 300px;
|
110 |
}
|
|
|
|
|
|
|
111 |
"""
|
112 |
|
113 |
-
block = gr.Blocks(css=custom_css)
|
114 |
|
115 |
with block:
|
116 |
gr.HTML(title)
|
@@ -123,34 +125,23 @@ with block:
|
|
123 |
|
124 |
with gr.Group():
|
125 |
with gr.Box():
|
126 |
-
audio = gr.Audio(
|
127 |
-
label="Input Audio",
|
128 |
-
show_label=False,
|
129 |
-
source="microphone",
|
130 |
-
type="filepath"
|
131 |
-
)
|
132 |
-
|
133 |
sentiment_option = gr.Radio(
|
134 |
choices=["Sentiment Only", "Sentiment + Score"],
|
135 |
label="Select an option",
|
136 |
-
default="Sentiment Only"
|
137 |
)
|
138 |
-
|
|
|
|
|
|
|
|
|
139 |
btn = gr.Button("Transcribe")
|
140 |
|
141 |
lang_str = gr.Textbox(label="Language")
|
142 |
|
143 |
text = gr.Textbox(label="Transcription")
|
144 |
|
145 |
-
sentiment_output = gr.Textbox(label="Sentiment Analysis Results"
|
146 |
|
147 |
btn.click(inference, inputs=[audio, sentiment_option], outputs=[lang_str, text, sentiment_output])
|
148 |
|
149 |
-
gr.HTML('''
|
150 |
-
<div class="footer">
|
151 |
-
<p>Model by <a href="https://github.com/openai/whisper" style="text-decoration: underline;" target="_blank">OpenAI</a>
|
152 |
-
</p>
|
153 |
-
</div>
|
154 |
-
''')
|
155 |
-
|
156 |
block.launch()
|
|
|
74 |
title = """<h1 align="center">β Lim Kopi π¬</h1>"""
|
75 |
image_path = "coffee_logo.jpg"
|
76 |
description = """
|
77 |
+
π» This MVP shows how we can use Whisper to conduct audio sentiment analysis on voice recordings of customer service agents. Whisper is a general speech recognition model built by OpenAI. It is trained on a large dataset of diverse audio and supports multilingual speech recognition, speech translation, and language identification tasks.<br><br>
|
78 |
+
βοΈ MVP Components:<br>
|
|
|
79 |
<br>
|
80 |
- Real-time multilingual speech recognition<br>
|
81 |
- Language identification<br>
|
82 |
- Sentiment analysis of the transcriptions<br>
|
83 |
<br>
|
84 |
+
π― The sentiment analysis results are provided as a dictionary with different emotions and their corresponding scores, so customer service agents can receive feedback on the overall call quality and customer receptiveness.<br>
|
85 |
<br>
|
86 |
|
87 |
π The sentiment analysis results are displayed with emojis representing the corresponding sentiment.<br>
|
|
|
93 |
β Use the microphone for real-time speech recognition.<br>
|
94 |
<br>
|
95 |
|
96 |
+
β‘οΈ The model will transcribe the audio for record-keeping, and perform sentiment analysis on the transcribed text.<br>
|
97 |
|
98 |
"""
|
99 |
|
|
|
107 |
font-size: 14px;
|
108 |
min-height: 300px;
|
109 |
}
|
110 |
+
.svelte-1mwvhlq {
|
111 |
+
display: none !important;
|
112 |
+
}
|
113 |
"""
|
114 |
|
115 |
+
block = gr.Blocks(title="Lim Kopi Call Center Service", css=custom_css)
|
116 |
|
117 |
with block:
|
118 |
gr.HTML(title)
|
|
|
125 |
|
126 |
with gr.Group():
|
127 |
with gr.Box():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
sentiment_option = gr.Radio(
|
129 |
choices=["Sentiment Only", "Sentiment + Score"],
|
130 |
label="Select an option",
|
|
|
131 |
)
|
132 |
+
audio = gr.Audio(
|
133 |
+
source="microphone",
|
134 |
+
type="filepath"
|
135 |
+
)
|
136 |
+
with gr.Box():
|
137 |
btn = gr.Button("Transcribe")
|
138 |
|
139 |
lang_str = gr.Textbox(label="Language")
|
140 |
|
141 |
text = gr.Textbox(label="Transcription")
|
142 |
|
143 |
+
sentiment_output = gr.Textbox(label="Sentiment Analysis Results")
|
144 |
|
145 |
btn.click(inference, inputs=[audio, sentiment_option], outputs=[lang_str, text, sentiment_output])
|
146 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
block.launch()
|