RobotJelly commited on
Commit
d550e96
1 Parent(s): 4f0b2ef
Files changed (1) hide show
  1. app.py +6 -8
app.py CHANGED
@@ -16,8 +16,6 @@ import cv2
16
  from IPython.display import Audio
17
 
18
  classes_names = ['Benjamin_Netanyau', 'Jens_Stoltenberg', 'Julia_Gillard', 'Magaret_Tarcher', 'Nelson_Mandela']
19
- labels = [0, 1, 2, 3, 4]
20
- class_labels = {0: 'Benjamin_Netanyau', 1: 'Jens_Stoltenberg', 2: 'Julia_Gillard', 3: 'Magaret_Tarcher', 4: 'Nelson_Mandela'}
21
 
22
  # Percentage of samples to use for validation
23
  # VALID_SPLIT = 0.1
@@ -91,13 +89,13 @@ input = [gr.inputs.Audio(source="upload", type="filepath", label="Take audio sam
91
  # the app outputs two segmented images
92
  output = [gr.outputs.Textbox(label="Predicted Speaker"), gr.outputs.Audio(label="Corresponding Audio")]
93
  # it's good practice to pass examples, description and a title to guide users
94
- examples = [['/content/drive/MyDrive/Downloads/16000_pcm_speeches/audio/Benjamin_Netanyau/260.wav', 'Benjamin_Netanyau'],
95
- ['/content/drive/MyDrive/Downloads/16000_pcm_speeches/audio/Jens_Stoltenberg/611.wav', 'Jens_Stoltenberg'],
96
- ['/content/drive/MyDrive/Downloads/16000_pcm_speeches/audio/Julia_Gillard/65.wav', 'Julia_Gillard'],
97
- ['/content/drive/MyDrive/Downloads/16000_pcm_speeches/audio/Magaret_Tarcher/1083.wav', 'Magaret_Tarcher'],
98
- ['/content/drive/MyDrive/Downloads/16000_pcm_speeches/audio/Nelson_Mandela/605.wav', 'Nelson_Mandela']]
99
  title = "Speaker Recognition"
100
  description = "Select the noisy audio samples from examples to check whether the speaker recognised by the model is correct or not even in presence of noise !!!"
101
 
102
  gr.Interface(fn=predict, inputs = input, outputs = output, examples=examples, allow_flagging=False, analytics_enabled=False,
103
- title=title, description=description, article="Space By: <u><a href="https://github.com/robotjellyzone"><b>Kavya Bisht</b></a></u> \n Based on <a href="https://keras.io/examples/audio/speaker_recognition_using_cnn/"><b>this notebook</b></a>").launch(enable_queue=True, debug=True)
 
16
  from IPython.display import Audio
17
 
18
  classes_names = ['Benjamin_Netanyau', 'Jens_Stoltenberg', 'Julia_Gillard', 'Magaret_Tarcher', 'Nelson_Mandela']
 
 
19
 
20
  # Percentage of samples to use for validation
21
  # VALID_SPLIT = 0.1
 
89
  # the app outputs two segmented images
90
  output = [gr.outputs.Textbox(label="Predicted Speaker"), gr.outputs.Audio(label="Corresponding Audio")]
91
  # it's good practice to pass examples, description and a title to guide users
92
+ examples = [['audios/260.wav', 'Benjamin_Netanyau'],
93
+ ['audios/611.wav', 'Jens_Stoltenberg'],
94
+ ['audios/65.wav', 'Julia_Gillard'],
95
+ ['audios/1083.wav', 'Magaret_Tarcher'],
96
+ ['audios/605.wav', 'Nelson_Mandela']]
97
  title = "Speaker Recognition"
98
  description = "Select the noisy audio samples from examples to check whether the speaker recognised by the model is correct or not even in presence of noise !!!"
99
 
100
  gr.Interface(fn=predict, inputs = input, outputs = output, examples=examples, allow_flagging=False, analytics_enabled=False,
101
+ title=title, description=description, article="Space By: <u><a href='https://github.com/robotjellyzone'><b>Kavya Bisht</b></a></u> \n Based on <a href='https://keras.io/examples/audio/speaker_recognition_using_cnn/'><b>this notebook</b></a>").launch(enable_queue=True, debug=True)