khanguyen commited on
Commit
27d0fae
1 Parent(s): d4239af

Upload Final_project.py

Browse files
Files changed (1) hide show
  1. Final_project.py +434 -0
Final_project.py ADDED
@@ -0,0 +1,434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### AUDIO RECORDER
2
+
3
+ import os
4
+ import streamlit as st
5
+ import streamlit.components.v1 as components
6
+
7
+ import io
8
+ import librosa
9
+ import numpy as np
10
+
11
+ import torch
12
+ from speechbrain.pretrained import EncoderDecoderASR
13
+ from speechbrain.pretrained import SpeakerRecognition
14
+
15
+ import soundfile
16
+ import hnswlib
17
+ import time
18
+ from datetime import datetime
19
+
20
+ #st.set_page_config(layout="wide")
21
+ #padding_top = 0
22
+ #st.markdown(f"""
23
+ # <style>
24
+ # .reportview-container .main .block-container{{
25
+ # padding-top: {padding_top}rem;
26
+ # }}
27
+ # </style>""",
28
+ # unsafe_allow_html=True,)
29
+
30
+ ## DESIGN implement changes to the standard streamlit UI/UX
31
+ st.set_page_config(page_title="VOICE PASSWORD")
32
+ ## Design move app further up and remove top padding
33
+ st.markdown('''<style>.css-1egvi7u {margin-top: -3rem;}</style>''',
34
+ unsafe_allow_html=True)
35
+ ## Design change st.Audio to fixed height of 45 pixels
36
+ st.markdown('''<style>.stAudio {height: 45px;}</style>''',
37
+ unsafe_allow_html=True)
38
+ ## Design change hyperlink href link color
39
+ st.markdown('''<style>.css-v37k9u a {color: #ff4c4b;}</style>''',
40
+ unsafe_allow_html=True) # darkmode
41
+ st.markdown('''<style>.css-nlntq9 a {color: #ff4c4b;}</style>''',
42
+ unsafe_allow_html=True) # lightmode
43
+
44
+
45
+
46
+ primaryColor = "#919E8B" # green
47
+ backgroundColor = "#FBF6F1" # sepia yellow
48
+ secondaryBackgroundColor = "#EBD2B9" # wheat
49
+ textColor = "#5D6169" # grey
50
+
51
+
52
+
53
+ def save_audio(file):
54
+ if file.size > 4000000:
55
+ return 1
56
+ # if not os.path.exists("audio"):
57
+ # os.makedirs("audio")
58
+ folder = "audio"
59
+ datetoday = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
60
+ # clear the folder to avoid storage overload
61
+ for filename in os.listdir(folder):
62
+ file_path = os.path.join(folder, filename)
63
+ try:
64
+ if os.path.isfile(file_path) or os.path.islink(file_path):
65
+ os.unlink(file_path)
66
+ except Exception as e:
67
+ print('Failed to delete %s. Reason: %s' % (file_path, e))
68
+ try:
69
+ with open("log0.txt", "a") as f:
70
+ f.write(f"{file.name} - {file.size} - {datetoday};\n")
71
+ except:
72
+ pass
73
+
74
+ with open(os.path.join(folder, file.name), "wb") as f:
75
+ f.write(file.getbuffer())
76
+ return 0
77
+
78
+
79
+
80
+ ###CREATING SIDEBAR
81
+ # Using object notation
82
+ st.sidebar.subheader("Menu")
83
+ add_selectbox = st.sidebar.selectbox(
84
+ "Please select",
85
+ ("Home", "Tutorial", "About"), key= 'sidebar')
86
+
87
+
88
+ with st.sidebar:
89
+ st.write('##')
90
+ st.write('##')
91
+ st.write('##')
92
+ st.write('##')
93
+
94
+
95
+ #rate = st.select_slider(
96
+ # 'Wanna rate this app? 😎 ',
97
+ # options=['awful', 'bad', 'okay', 'good', 'great'])
98
+
99
+ #if rate == 'awful' or rate == 'bad' or rate =='okay':
100
+ # title = st.text_input('Feedback', '')
101
+ # if title != '':
102
+ # time.sleep(3)
103
+ # st.write('Thank you for your feedback!')
104
+
105
+ #if rate =='good' or rate=='great':
106
+ # txt = st.text_input('Feedback', '')
107
+ # if txt != '':
108
+ # time.sleep(3)
109
+ # st.write('Thank you for your support!')
110
+
111
+
112
+ if st.session_state.sidebar == 'Home':
113
+
114
+ def audiorec_demo_app():
115
+
116
+ parent_dir = os.path.dirname(os.path.abspath(__file__))
117
+ # Custom REACT-based component for recording client audio in browser
118
+ build_dir = os.path.join(parent_dir, "st_audiorec/frontend/build")
119
+ # specify directory and initialize st_audiorec object functionality
120
+ st_audiorec = components.declare_component("st_audiorec", path=build_dir)
121
+
122
+ # TITLE and Creator information
123
+ st.title('Voice password')
124
+ st.markdown('Audio recorder implemented by '
125
+ '[Stefan Rummer](https://www.linkedin.com/in/stefanrmmr/) - '
126
+ 'view project source code on '
127
+ '[GitHub](https://github.com/stefanrmmr/streamlit_audio_recorder)')
128
+ st.write('\n\n')
129
+
130
+ # STREAMLIT AUDIO RECORDER Instance
131
+ st_audiorec()
132
+
133
+ if __name__ == '__main__':
134
+
135
+ # call main function
136
+ audiorec_demo_app()
137
+
138
+
139
+
140
+
141
+
142
+
143
+ # Print the current working directory
144
+ # st.write("Current working directory: {0}".format(os.getcwd()))
145
+
146
+ ## Change the current working directory
147
+ # E:/Finalproject
148
+
149
+ # Print the current working directory
150
+ # st.write("New Current working directory: {0}".format(os.getcwd()))
151
+
152
+ asr_model = EncoderDecoderASR.from_hparams(source="speechbrain/asr-transformer-transformerlm-librispeech",
153
+ savedir="pretrained_models/asr-transformer-transformerlm-librispeech",
154
+ run_opts={"device":"cpu"})
155
+
156
+ ### UPLOAD RECORDED AUDIO
157
+
158
+ uploaded_file = st.file_uploader("Choose a file")
159
+
160
+ if uploaded_file is not None:
161
+
162
+ ### SPEECH_TO_TEXT
163
+ #st.write(uploaded_file)
164
+ st.write("#")
165
+
166
+ if not os.path.exists("audio"):
167
+ os.makedirs("audio")
168
+ path = os.path.join("audio", uploaded_file.name)
169
+ if_save_audio = save_audio(uploaded_file)
170
+ spoken = asr_model.transcribe_file(path)
171
+
172
+ with st.spinner('Processing...'):
173
+ time.sleep(3)
174
+
175
+ st.write('You said:')
176
+ st.info(spoken)
177
+
178
+
179
+
180
+
181
+ ### SPEAKER RECOGNITION
182
+ ## Upload pretrained model
183
+
184
+ verifier = SpeakerRecognition.from_hparams(source="speechbrain/spkrec-ecapa-voxceleb", run_opts={"device":"cpu"})
185
+
186
+
187
+ ### Base_audio processing
188
+ ## Upload sample voice
189
+ # Change the current working directory
190
+ os.chdir('E:/Finalproject')
191
+ cur = os.getcwd()
192
+
193
+
194
+ def audio_to_numpy(filenames):
195
+ x, sr = librosa.load(filenames, sr=30000)
196
+ if x.shape[0] <= 30000:
197
+ x = np.pad(x, (0, 30000-x.shape[0]), 'constant', constant_values=(0, 0))
198
+ if len(q.shape) == 1:
199
+ x = x[..., None]
200
+ return x
201
+
202
+
203
+ voice_1 = os.path.join(cur, 'An.wav')
204
+ g = audio_to_numpy(voice_1)
205
+ my_embeddings1 = np.squeeze(
206
+ verifier.encode_batch(torch.tensor(g)).detach().cpu().numpy())
207
+ #st.write(my_embeddings1.shape)
208
+ #st.write(g.shape)
209
+
210
+
211
+ voice_2 = os.path.join(cur, 'SampleVoice_kha.wav')
212
+ k = audio_to_numpy(voice_2)
213
+ my_embeddings2 = np.squeeze(
214
+ verifier.encode_batch(torch.tensor(k)).detach().cpu().numpy())
215
+ #st.write(my_embeddings2.shape)
216
+ #st.write(k.shape)
217
+
218
+
219
+ voice_3 = os.path.join(cur, 'Tan.wav')
220
+ m = audio_to_numpy(voice_3)
221
+ my_embeddings3 = np.squeeze(
222
+ verifier.encode_batch(torch.tensor(m)).detach().cpu().numpy())
223
+
224
+
225
+ voice_4 = os.path.join(cur, 'Phu.wav')
226
+ n = audio_to_numpy(voice_4)
227
+ my_embeddings4 = np.squeeze(
228
+ verifier.encode_batch(torch.tensor(n)).detach().cpu().numpy())
229
+
230
+
231
+ os.chdir('C:/Users/Administrator/Downloads')
232
+
233
+ q = audio_to_numpy(uploaded_file.name)
234
+ my_embeddings = np.squeeze(
235
+ verifier.encode_batch(torch.tensor(q)).detach().cpu().numpy())
236
+
237
+
238
+ #st.write(my_embeddings.shape)
239
+ #st.write(q.shape)
240
+
241
+
242
+ my_id_1 = 1
243
+ my_id_2 = 2
244
+ my_id_3 = 3
245
+ my_id_4 = 4
246
+
247
+
248
+ p = hnswlib.Index(space = 'cosine', dim = 192)
249
+ p.init_index(max_elements = 1000, ef_construction = 200, M = 16)
250
+ # với my_embedding là embedding voice của các em
251
+ # và my_id là id của các em trong database (ví dụ my_id=0)
252
+ p.add_items(my_embeddings1, my_id_1)
253
+ p.add_items(my_embeddings2, my_id_2)
254
+ p.add_items(my_embeddings3, my_id_3)
255
+ p.add_items(my_embeddings4, my_id_4)
256
+
257
+
258
+ # ta thực hiện search bằng dòng code sau
259
+ # vơi labels là array chưa k id giống với target_embed nhất
260
+ target_embed = my_embeddings
261
+ labels, distances = p.knn_query(target_embed, k = 4)
262
+
263
+ st.write("#")
264
+
265
+ if spoken == 'TWO SIX ZERO SIX': # labels[0][0] == 2: #
266
+ st.success('Password Correct')
267
+ if labels[0][0] == 2 and distances[0][0] <0.3:
268
+ st.balloons()
269
+ st.snow()
270
+ st.write('Welcome to my Youtube channel. Please click the following link: https://www.youtube.com/channel/UCViAzz3Qtz8IQdUI9DiJ3WA/featured')
271
+ else:
272
+ st.error('Invalid speaker. Please try again!')
273
+
274
+ else:
275
+ st.error('Incorrect password. Please try again!')
276
+
277
+
278
+
279
+ with st.sidebar:
280
+
281
+ st.sidebar.subheader("Voice labels name")
282
+ col1, col2, col3, col4 = st.columns(4)
283
+ with col1:
284
+ st.markdown("Ân - 1")
285
+ with col2:
286
+ st.markdown("Kha - 2")
287
+ with col3:
288
+ st.markdown("Tân - 3")
289
+ with col4:
290
+ st.markdown("Phú - 4")
291
+ st.write(labels)
292
+
293
+ st.write('#')
294
+
295
+ st.sidebar.subheader("Distance to each labels")
296
+ st.write(distances)
297
+
298
+ st.write('#')
299
+
300
+ st.sidebar.subheader("Recorded audio file")
301
+ file_details = {"Filename": uploaded_file.name, "FileSize": uploaded_file.size}
302
+ st.sidebar.write(file_details)
303
+
304
+
305
+
306
+ if st.session_state.sidebar == 'Tutorial':
307
+
308
+ st.title('Tutorial')
309
+
310
+ st.write('This is the `tutorial page` of this application')
311
+ st.write('#')
312
+ # Step1
313
+ st.markdown('##### Step 1: Voice recording')
314
+ st.markdown('- Press `Start Recording` to record your voice password')
315
+ st.markdown('- Click `Stop` to end the audio')
316
+ st.markdown('- If you want to record again, click `Reset` to reset the audio')
317
+
318
+
319
+ # Step2
320
+ st.markdown('##### Step 2: Audio download')
321
+ st.markdown('- Press `Download` to end the audio')
322
+ st.markdown('- The recorded audio will be downloaded to Downloads Folder on your desktop')
323
+
324
+ # Step3
325
+ st.markdown('##### Step 3: Audio upload')
326
+ st.markdown('- Click `Browse files` to upload the audio')
327
+ st.markdown('- Choose your recorded audio in the Downloads Folder')
328
+
329
+ # Step4
330
+ st.markdown('##### Step 4: Finish')
331
+ st.markdown('- It will take about 15 sec to process the data')
332
+ st.markdown('- In case of `incorrect password` or `invalid speaker`, click `Χ` next to the uploaded file to delete the audio and record again as from step 1')
333
+
334
+
335
+
336
+ if st.session_state.sidebar == 'About':
337
+
338
+ st.title('About my project')
339
+
340
+ st.markdown('### Project Title: **Application of voice password and speaker verification**')
341
+ st.markdown('#### Project Description')
342
+
343
+ st.markdown('''
344
+ - As digital technology advanced in today's world, the potential of privacy violation has been a threat to user's information
345
+ - Thus, this AI application is designed to be capable of verifying user's identity, based on the voice characteristics such as tones, features, and at the same time integrating with voice password authentication.
346
+ ''')
347
+
348
+ st.markdown('''- ###### [GitHub repository of the web-application](https://github.com/Kha1135123/VoiceAuthentication_Finalproject)''')
349
+
350
+
351
+ st.markdown("##### Theory")
352
+ with st.expander("See Wikipedia definition_Speech Recognition"):
353
+ components.iframe("https://en.wikipedia.org/wiki/Speech_recognition",
354
+ height=320, scrolling=True)
355
+ with st.expander("See Wikipedia definition_Speaker Recognition"):
356
+ components.iframe("https://en.wikipedia.org/wiki/Speaker_recognition",
357
+ height=320, scrolling=True)
358
+
359
+
360
+ st.markdown('#### *Project goals*')
361
+ st.markdown('''
362
+ - Build a security system using voice password authentication combined with speaker recognition as follows:
363
+ - First, with the audio input, the system will verify the voice password before continuing to run the Speaker Recognition Model to identify user.
364
+ - If both the correct password and target user's voice are matched with the input, the system will navigate the user, or give the user a link to a private website.
365
+ - The main part this AI model needs to process is to extract features of the speaker's voice to verify it, and to transcribe audio to text.
366
+ ''')
367
+
368
+
369
+ st.markdown('#### **Scope of work**')
370
+ st.markdown('''
371
+ - Find an appropriated pretrained model in speech recognition and voice recognition
372
+ - Process recorded audio on Streamlit platform.
373
+ - A completed Streamlit application will be built after accomplishing the basic objectives.
374
+ - After this project, I will be more experienced in data processing related to audio and in deploying an application on Streamlit.
375
+ ''')
376
+
377
+ st.markdown('''
378
+ #### *A brief introduction about the project*
379
+
380
+ ##### *Model*
381
+ - Speech to text Pretrained Model: [speechbrain/ASR-Wav2Vec2 model -- Commonvoice-en](https://huggingface.co/speechbrain/asr-wav2vec2-commonvoice-en)
382
+ - Speaker Verification: [speechbrain/ECAPA-TDNN model -- Voxceleb](https://huggingface.co/speechbrain/spkrec-ecapa-voxceleb)
383
+ ##### *Methods*
384
+ - Applying ASR pretrained model to translate speech to text.
385
+ - Converting audio file into numpy array by librosa module.
386
+ - Using cosine similarity based on the user's embeddings extracting from the audio to identify voices by ECAPA-TDNN model.
387
+ ##### *Note*
388
+ - **Reference**:
389
+ - Streamlit audio recorder: https://github.com/stefanrmmr/streamlit_audio_recorder
390
+ - Streamlit API reference: https://docs.streamlit.io/library/api-reference
391
+ - To set up audio recorder component, read and follow the instruction in [here](https://github.com/stefanrmmr/streamlit_audio_recorder#readme) ''')
392
+ st.write("#")
393
+ st.markdown(''' - If you want to try them we recommend to clone our GitHub repo''')
394
+ st.code("git clone https://github.com/Kha1135123/VoiceAuthentication_Finalproject.git", language='bash')
395
+
396
+ st.markdown('''
397
+ After that, just change the following relevant sections in the Final_project.py file to use this model:
398
+ - Change the current working directory to Downloads Folder of your desktop in order to allow the computer to detect to recorded audio file as similar: ''')
399
+ st.code( "os.chdir('C:/Users/Administrator/Downloads')", language='python')
400
+
401
+
402
+ st.markdown('''
403
+ - Afterwards, change the working directory back to the directory of your Streamlit project by:
404
+ ''')
405
+ st.code("os.chdir('/home/ _Your_project_folder_')", language='python')
406
+
407
+
408
+ st.markdown('''
409
+ - To verify speaker, you will need to have at least 2 audio recording from different people, including the target audio that you want the application to recognize. Put those audio in your project folder. and then use the code below to take the path of the audio in your computer. ''')
410
+ sp = '''
411
+ cur = os.getcwd()
412
+ voice_1 = os.path.join(cur, '_SampleVoice_audio.wav')
413
+ '''
414
+ st.code(sp, language='python')
415
+
416
+
417
+
418
+
419
+
420
+ st.write('#')
421
+ st.markdown('''
422
+ #### *Author*
423
+ - Nguyễn Mạnh Kha _ Class of 2022 _ Le Hong Phong High School for the Gifted, Hochiminh City, Vietnam ''')
424
+
425
+
426
+
427
+ st.write('#')
428
+
429
+ st.caption('Made by @khanguyen')
430
+
431
+
432
+
433
+
434
+