Alidr79 commited on
Commit
2b8a3f4
1 Parent(s): 8f83fe0

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +181 -0
app.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.utils.data import DataLoader
3
+ import numpy as np
4
+ from tqdm import tqdm
5
+ from transformers import SpeechT5HifiGan
6
+ from datasets import load_dataset
7
+ from tqdm import tqdm
8
+ import soundfile as sf
9
+ import librosa
10
+
11
+ dataset = load_dataset('pourmand1376/asr-farsi-youtube-chunked-10-seconds', split = "test")
12
+
13
+
14
+ import librosa
15
+ from datasets import load_dataset, Audio
16
+
17
+ def resample_audio(example):
18
+ # Resample to 16 kHz
19
+ y_resampled = librosa.resample(example["audio"]["array"], orig_sr=example["audio"]["sampling_rate"], target_sr=16000)
20
+
21
+ # Update the example with the resampled audio and new sample rate
22
+ example["audio"]["array"] = y_resampled
23
+ example["audio"]["sampling_rate"] = 16000
24
+
25
+ return example
26
+
27
+
28
+ dataset = dataset.select(range(1000))
29
+ dataset = dataset.map(resample_audio)
30
+
31
+
32
+ import torch
33
+ from torch.utils.data import DataLoader
34
+ import numpy as np
35
+ from tqdm import tqdm
36
+ from transformers import SpeechT5HifiGan
37
+ from datasets import load_dataset
38
+ from tqdm import tqdm
39
+ import soundfile as sf
40
+ import librosa
41
+
42
+
43
+
44
+ def set_seed(seed):
45
+ torch.manual_seed(seed)
46
+ if torch.cuda.is_available():
47
+ torch.cuda.manual_seed_all(seed)
48
+
49
+ set_seed(1)
50
+ # Load model directly
51
+ from transformers import AutoProcessor, AutoModelForTextToSpectrogram
52
+
53
+ processor = AutoProcessor.from_pretrained("Alidr79/speecht5_v3_youtube")
54
+ model = AutoModelForTextToSpectrogram.from_pretrained("Alidr79/speecht5_v3_youtube")
55
+
56
+
57
+ from speechbrain.inference.classifiers import EncoderClassifier
58
+ import os
59
+
60
+ spk_model_name = "speechbrain/spkrec-xvect-voxceleb"
61
+
62
+ device = "cuda" if torch.cuda.is_available() else "cpu"
63
+ speaker_model = EncoderClassifier.from_hparams(
64
+ source=spk_model_name,
65
+ run_opts={"device": device},
66
+ savedir=os.path.join("/tmp", spk_model_name),
67
+ )
68
+
69
+
70
+ def create_speaker_embedding(waveform):
71
+ with torch.no_grad():
72
+ speaker_embeddings = speaker_model.encode_batch(torch.tensor(waveform))
73
+ speaker_embeddings = torch.nn.functional.normalize(speaker_embeddings, dim=2)
74
+ speaker_embeddings = speaker_embeddings.squeeze().cpu().numpy()
75
+ return speaker_embeddings
76
+
77
+ vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
78
+
79
+ from PersianG2p import Persian_g2p_converter
80
+ from scipy.io import wavfile
81
+ import soundfile as sf
82
+
83
+
84
+ PersianG2Pconverter = Persian_g2p_converter(use_large = True)
85
+
86
+ import noisereduce as nr
87
+
88
+ def denoise_audio(audio, sr):
89
+ # Perform noise reduction
90
+ denoised_audio = nr.reduce_noise(y=audio, sr=sr)
91
+ return denoised_audio
92
+
93
+
94
+ import noisereduce as nr
95
+ from pydub import AudioSegment
96
+ def match_target_amplitude(sound, target_dBFS):
97
+ change_in_dBFS = target_dBFS - sound.dBFS
98
+ return sound.apply_gain(change_in_dBFS)
99
+
100
+ import librosa
101
+ def tts_fn(slider_value, input_text):
102
+ audio_embedding = dataset[slider_value]['audio']['array']
103
+ sample_rate_embedding = dataset[slider_value]['audio']['sampling_rate']
104
+ if sample_rate_embedding != 16000:
105
+ audio_embedding = librosa.resample(audio_embedding, orig_sr=sample_rate_embedding, target_sr=16_000)
106
+
107
+
108
+ with torch.no_grad():
109
+ speaker_embedding = create_speaker_embedding(audio_embedding)
110
+ speaker_embedding = torch.tensor(speaker_embedding).unsqueeze(0)
111
+
112
+ phonemes = PersianG2Pconverter.transliterate(input_text, tidy = False, secret = True)
113
+ # text = "</s>"
114
+ # for i in phonemes.replace(' .', '').split(" "):
115
+ # text += i + " <pad> "
116
+
117
+ text = phonemes
118
+
119
+ print("sentence:", input_text)
120
+ print("sentence phonemes:", text)
121
+
122
+ with torch.no_grad():
123
+ inputs = processor(text = text, return_tensors="pt")
124
+
125
+ with torch.no_grad():
126
+ spectrogram = model.generate_speech(inputs["input_ids"], speaker_embedding, minlenratio = 2, maxlenratio = 4, threshold = 0.35)
127
+
128
+ with torch.no_grad():
129
+ speech = vocoder(spectrogram)
130
+
131
+ speech = speech.numpy().reshape(-1)
132
+ speech_denoised = denoise_audio(speech, 16000)
133
+ sf.write("in_speech.wav", speech_denoised, 16000)
134
+
135
+ sound = AudioSegment.from_wav("in_speech.wav", "wav")
136
+ normalized_sound = match_target_amplitude(sound, -20.0)
137
+ normalized_sound.export("out_sound.wav", format="wav")
138
+
139
+ sample_rate_out, audio_out = wavfile.read("out_sound.wav")
140
+
141
+ assert sample_rate_out == 16_000
142
+
143
+ return 16000, (audio_out.reshape(-1)).astype(np.int16)
144
+
145
+
146
+ def master_fn(slider_value, input_text):
147
+ if "." not in input_text:
148
+ input_text += '.'
149
+
150
+ all_speech = []
151
+ for sentence in input_text.split("."):
152
+ sampling_rate_response, audio_chunk_response = tts_fn(slider_value, sentence)
153
+ all_speech.append(audio_chunk_response)
154
+
155
+ audio_response = np.concatenate(all_speech)
156
+ return sampling_rate_response, audio_response
157
+
158
+ import gradio as gr
159
+
160
+ slider = gr.Slider(
161
+ minimum=0,
162
+ maximum=(len(dataset)-1),
163
+ value=600,
164
+ step=1,
165
+ label="Select a speaker(Good examples : 600, 604, 910, 7, 13)"
166
+ )
167
+
168
+ # Create the text input component
169
+ text_input = gr.Textbox(
170
+ label="Enter some text",
171
+ placeholder="Type something here..."
172
+ )
173
+
174
+
175
+ demo = gr.Interface(
176
+ fn = master_fn,
177
+ inputs=[slider, text_input], # List of inputs
178
+ outputs = "audio"
179
+ )
180
+
181
+ demo.launch()