Jmmianda commited on
Commit
1163ca9
1 Parent(s): 3e6d37e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -0
app.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from torchaudio.sox_effects import apply_effects_file
4
+ from transformers import AutoFeatureExtractor, AutoModelForAudioXVector
5
+
6
+ device = "cuda" if toch.cuda.is_available() else "cpu"
7
+ EFFECTS = [
8
+ ['remix', '-'], # pour fusionner tous les canaux
9
+ ["channels", "1"], #channel-->mono
10
+ ["rate", "16000"], # rééchantillonner à 16000 Hz
11
+ ["gain", "-1.0"], #Atténuation -1 dB
12
+ ["silence", "1", "0.1", "0.1%", "-1", "0.1", "0.1%"],
13
+ # ['pad', '0', '1.5'], # pour ajouter 1,5 seconde à la fin
14
+ ['trim', '0', '10'], # obtenir les 10 premières secondes
15
+ ]
16
+
17
+ model_name = "microsoft/unispeech-sat-base-plus-sv"
18
+ feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)
19
+ model = AutoModelForAudioXVector.from_pretrained(model_name).to(device)
20
+
21
+ #Réglage de la valeur seuil
22
+ SEUIL = 0,85
23
+
24
+ cosine_similarity = torch.nn.CosineSimilarity(dim=-1)
25
+
26
+ def similarity_fn(path1, path2):
27
+ if not (path1 and path2):
28
+ return 'ERROR: Please record audio for *both* speakers!'
29
+ #Applying the effects to both the audio input files
30
+ wav1, _ = apply_effects_file(path1, EFFECTS)
31
+ wav2, _ = apply_effects_file(path2,EFFECTS)
32
+ #Extracting features
33
+ input1 = feature_extractor(wav1.squeeze(0), return_tensors="pt", sampling_rate=16000).input_values.to(device)
34
+ input2 = feature_extractor(wav2.squeeze(0), return_tensors="pt", sampling_rate=16000).input_values.to(device)
35
+ with torch.no_grad():
36
+ emb1 = model(input1).embeddings
37
+ emb2 = model(input2).embeddings
38
+ emb1 = torch.nn.functional.normalize(emb1, dim=-1).to(device)
39
+ emb2 = torch.nn.functional.normalize(emb2, dim=-1).to(device)
40
+ similarity = cosine_similarity(emb1, emb2).numpy()[0]
41
+ if similarity>= THRESHOLD:
42
+ return f"Similarity score is {similarity :.0%}. Audio belongs to the same person "
43
+ elif similarity< THRESHOLD:
44
+ return f"Similarity score is {similarity:.0%}. Audio doesn't belong to the same person.Authentication failed!"
45
+
46
+ inputs = [
47
+ gr.inputs.Audio(source="microphone", type="filepath", optional=True, label="Speaker #1"),
48
+ gr.inputs.Audio(source="microphone", type="filepath", optional=True, label="Speaker #2"),
49
+ ]
50
+
51
+ outputs = gr.outputs.Textbox(label="Output Text")
52
+ description = (
53
+ "This app evaluates whether the given audio speech inputs belong to the same individual based on Cosine Similarity score. "
54
+ )
55
+
56
+ interface = gr.Interface(
57
+ fn=similarity_fn,
58
+ inputs=inputs,
59
+ outputs=outputs,
60
+ title="Voice Authentication with UniSpeech-SAT + X-Vectors",
61
+ description=description,
62
+ layout="horizontal",
63
+ theme="grass",
64
+ allow_flagging=False,
65
+ live=False,
66
+ examples=[
67
+ ["cate_blanch.mp3", "cate_blanch_2.mp3"],
68
+ ["cate_blanch.mp3", "denzel_washington.mp3"]
69
+ ]
70
+ )
71
+
72
+ interface.launch(enable_queue=True)