Somalitts commited on
Commit
61cbc18
·
verified ·
1 Parent(s): 25352e2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +159 -64
app.py CHANGED
@@ -3,95 +3,190 @@ import torch
3
  import torchaudio
4
  import re
5
  import os
6
- from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan
7
- from speechbrain.pretrained import EncoderClassifier
 
8
 
 
 
 
 
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
10
 
11
- # Load models
12
  processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
13
  model = SpeechT5ForTextToSpeech.from_pretrained("Somalitts/8aad").to(device)
14
  vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
15
 
 
 
16
  speaker_model = EncoderClassifier.from_hparams(
17
  source="speechbrain/spkrec-xvect-voxceleb",
18
  run_opts={"device": device},
19
- savedir="./spk_model"
20
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
- # Speaker embedding
 
23
  EMB_PATH = "speaker_embedding.pt"
 
 
 
 
 
24
  if os.path.exists(EMB_PATH):
 
25
  speaker_embedding = torch.load(EMB_PATH).to(device)
26
  else:
27
- audio, sr = torchaudio.load("1.wav")
28
- audio = torchaudio.functional.resample(audio, sr, 16000).mean(dim=0).unsqueeze(0).to(device)
29
- with torch.no_grad():
30
- emb = speaker_model.encode_batch(audio)
31
- emb = torch.nn.functional.normalize(emb, dim=2).squeeze()
32
- torch.save(emb.cpu(), EMB_PATH)
33
- speaker_embedding = emb
34
-
35
- # Number conversion (Somali)
36
- number_words = {
37
- 0: "eber", 1: "koow", 2: "labo", 3: "seddex", 4: "afar", 5: "shan",
38
- 6: "lix", 7: "todobo", 8: "sideed", 9: "sagaal", 10: "toban",
39
- 11: "toban iyo koow", 12: "toban iyo labo", 13: "toban iyo seddex",
40
- 14: "toban iyo afar", 15: "toban iyo shan", 16: "toban iyo lix",
41
- 17: "toban iyo todobo", 18: "toban iyo sideed", 19: "toban iyo sagaal",
42
- 20: "labaatan", 30: "sodon", 40: "afartan", 50: "konton",
43
- 60: "lixdan", 70: "todobaatan", 80: "sideetan", 90: "sagaashan",
44
- 100: "boqol", 1000: "kun",
45
- }
46
-
47
- def number_to_words(number):
48
- if number < 20:
49
- return number_words[number]
50
- elif number < 100:
51
- tens, unit = divmod(number, 10)
52
- return number_words[tens * 10] + (" " + number_words[unit] if unit else "")
53
- elif number < 1000:
54
- hundreds, remainder = divmod(number, 100)
55
- return (number_words[hundreds] + " boqol" if hundreds > 1 else "BOQOL") + (" " + number_to_words(remainder) if remainder else "")
56
- elif number < 1000000:
57
- thousands, remainder = divmod(number, 1000)
58
- return (number_to_words(thousands) + " kun" if thousands > 1 else "KUN") + (" " + number_to_words(remainder) if remainder else "")
59
- elif number < 1000000000:
60
- millions, remainder = divmod(number, 1000000)
61
- return number_to_words(millions) + " malyan" + (" " + number_to_words(remainder) if remainder else "")
62
- elif number < 1000000000000:
63
- billions, remainder = divmod(number, 1000000000)
64
- return number_to_words(billions) + " milyaar" + (" " + number_to_words(remainder) if remainder else "")
65
- else:
66
- return str(number)
67
-
68
- def replace_numbers_with_words(text):
69
- def replace(match):
70
- number = int(match.group())
71
- return number_to_words(number)
72
- return re.sub(r'\b\d+\b', replace, text)
73
 
74
  def normalize_text(text):
 
75
  text = text.lower()
76
- text = replace_numbers_with_words(text)
77
- text = re.sub(r'[^\w\s]', '', text)
 
 
 
78
  return text
79
 
80
- # TTS function
 
 
81
  def text_to_speech(text):
82
- text = normalize_text(text)
83
- inputs = processor(text=text, return_tensors="pt").to(device)
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  with torch.no_grad():
85
- speech = model.generate_speech(inputs["input_ids"], speaker_embedding.unsqueeze(0), vocoder=vocoder)
86
- return (16000, speech.cpu().numpy())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
88
- # Gradio Interface
89
  iface = gr.Interface(
90
  fn=text_to_speech,
91
- inputs=gr.Textbox(label="Geli qoraalka af-soomaali"),
92
- outputs=gr.Audio(label="Codka la abuuray", type="numpy"),
93
- title="Somali TTS",
94
- description="TTS Soomaaliyeed oo la adeegsaday cod gaar ah (11.wav)"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  )
96
 
97
- iface.launch()
 
 
3
  import torchaudio
4
  import re
5
  import os
6
+ import numpy as np
7
+ import soundfile as sf
8
+ from pydub import AudioSegment, effects
9
 
10
+ # --- Model Loading ---
11
+ print("Loading models, this may take a moment...")
12
+
13
+ # Check for CUDA (GPU) availability
14
  device = "cuda" if torch.cuda.is_available() else "cpu"
15
+ print(f"Using device: {device}")
16
 
17
+ # Load the core TTS models from Hugging Face
18
  processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
19
  model = SpeechT5ForTextToSpeech.from_pretrained("Somalitts/8aad").to(device)
20
  vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
21
 
22
+ # Load the speaker encoder model from SpeechBrain
23
+ # This model creates the voice profile (embedding) from an audio sample.
24
  speaker_model = EncoderClassifier.from_hparams(
25
  source="speechbrain/spkrec-xvect-voxceleb",
26
  run_opts={"device": device},
27
+ savedir=os.path.join("models", "spk_model") # Organized model saving
28
  )
29
+ print("Models loaded successfully.")
30
+
31
+
32
+ # --- Speaker Embedding Generation ---
33
+ # This section creates the unique voice identity for the TTS.
34
+
35
+ def create_speaker_embedding(audio_path):
36
+ """
37
+ Normalizes the input audio and creates a high-quality speaker embedding.
38
+ """
39
+ print("Creating speaker embedding...")
40
+ # 1. Pre-process the audio for better quality
41
+ print(f"Normalizing audio file: {audio_path}")
42
+ raw_audio = AudioSegment.from_wav(audio_path)
43
+ normalized_audio = effects.normalize(raw_audio)
44
+
45
+ # pydub works with milliseconds
46
+ normalized_audio_path = "normalized_speaker.wav"
47
+ normalized_audio.export(normalized_audio_path, format="wav")
48
+
49
+ # 2. Generate the embedding
50
+ waveform, sr = torchaudio.load(normalized_audio_path)
51
+ # Resample if necessary and move to the correct device
52
+ if sr != 16000:
53
+ waveform = torchaudio.functional.resample(waveform, sr, 16000)
54
+
55
+ with torch.no_grad():
56
+ embedding = speaker_model.encode_batch(waveform.to(device))
57
+ # Normalize the embedding itself for model compatibility
58
+ embedding = torch.nn.functional.normalize(embedding, dim=2).squeeze()
59
+
60
+ print("Speaker embedding created and cached.")
61
+ return embedding
62
 
63
+ # Path to the speaker audio and the cached embedding
64
+ SPEAKER_WAV = "1.wav"
65
  EMB_PATH = "speaker_embedding.pt"
66
+
67
+ if not os.path.exists(SPEAKER_WAV):
68
+ raise FileNotFoundError(f"Error: Speaker audio file not found at '{SPEAKER_WAV}'. Please create this file.")
69
+
70
+ # Create and cache the embedding if it doesn't exist
71
  if os.path.exists(EMB_PATH):
72
+ print("Loading cached speaker embedding.")
73
  speaker_embedding = torch.load(EMB_PATH).to(device)
74
  else:
75
+ speaker_embedding = create_speaker_embedding(SPEAKER_WAV)
76
+ # Cache the embedding for faster startups next time
77
+ torch.save(speaker_embedding.cpu(), EMB_PATH)
78
+
79
+
80
+ # --- Text Normalization (Somali) ---
81
+ # This function cleans the text and converts numbers to words.
82
+
83
+ def number_to_somali_words(num_str):
84
+ """Converts a string of digits into Somali words."""
85
+ num = int(num_str)
86
+ if num < 0: return "eber ka yar" # Handle negative case
87
+
88
+ units = ["", "koow", "labo", "saddex", "afar", "shan", "lix", "toddobo", "siddeed", "sagaal"]
89
+ teens = ["toban", "kow iyo toban", "laba iyo toban", "saddex iyo toban", "afar iyo toban", "shan iyo toban", "lix iyo toban", "toddobo iyo toban", "siddeed iyo toban", "sagaal iyo toban"]
90
+ tens = ["", "toban", "labaatan", "soddon", "afartan", "konton", "lixdan", "toddobaatan", "siddeetan", "sagaashan"]
91
+
92
+ if num == 0: return "eber"
93
+ if num < 10: return units[num]
94
+ if num < 20: return teens[num-10]
95
+ if num < 100:
96
+ ten, unit = divmod(num, 10)
97
+ return tens[ten] + ((" iyo " + units[unit]) if unit != 0 else "")
98
+ if num < 1000:
99
+ hundred, rest = divmod(num, 100)
100
+ return (units[hundred] if hundred > 1 else "") + " boqol" + ((" iyo " + number_to_somali_words(str(rest))) if rest != 0 else "")
101
+ if num < 1000000:
102
+ thousand, rest = divmod(num, 1000)
103
+ return number_to_somali_words(str(thousand)) + " kun" + ((" iyo " + number_to_somali_words(str(rest))) if rest != 0 else "")
104
+ return num_str # Fallback for very large numbers
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
  def normalize_text(text):
107
+ """Cleans and normalizes Somali text for TTS."""
108
  text = text.lower()
109
+ # Convert numbers to words using a regex substitution
110
+ text = re.sub(r"\d+", lambda m: number_to_somali_words(m.group(0)), text)
111
+ # Remove special characters except for basic punctuation that might indicate pauses
112
+ text = re.sub(r'[^\w\s,\.]', '', text)
113
+ text = text.strip()
114
  return text
115
 
116
+
117
+ # --- Core TTS Function ---
118
+
119
  def text_to_speech(text):
120
+ """
121
+ Generates speech from text, including pre- and post-processing steps.
122
+ """
123
+ print(f"Received text: {text}")
124
+ # 1. Normalize the input text
125
+ normalized_text = normalize_text(text)
126
+ if not normalized_text:
127
+ print("Warning: Text is empty after normalization.")
128
+ # Return silence if there's no text to process
129
+ return (16000, np.zeros(16000).astype(np.int16))
130
+
131
+ print(f"Normalized text: {normalized_text}")
132
+
133
+ # 2. Process text and generate speech
134
+ inputs = processor(text=normalized_text, return_tensors="pt").to(device)
135
  with torch.no_grad():
136
+ speech_tensor = model.generate_speech(
137
+ inputs["input_ids"],
138
+ speaker_embedding.unsqueeze(0),
139
+ vocoder=vocoder
140
+ )
141
+
142
+ speech_numpy = speech_tensor.cpu().numpy()
143
+
144
+ # 3. Post-process the audio to make it sound more human
145
+ print("Post-processing generated audio...")
146
+ # Convert numpy array to a pydub AudioSegment
147
+ # Ensure numpy array is in the correct format (16-bit PCM)
148
+ audio_segment = AudioSegment(
149
+ speech_numpy.tobytes(),
150
+ frame_rate=16000,
151
+ sample_width=speech_numpy.dtype.itemsize,
152
+ channels=1
153
+ )
154
+
155
+ # Apply normalization - this is a key step for better quality
156
+ processed_audio = effects.normalize(audio_segment)
157
+
158
+ # Convert back to numpy array for Gradio output
159
+ processed_numpy = np.array(processed_audio.get_array_of_samples())
160
+
161
+ print("Speech generation complete.")
162
+ return (16000, processed_numpy)
163
+
164
+
165
+ # --- Gradio Web Interface ---
166
 
 
167
  iface = gr.Interface(
168
  fn=text_to_speech,
169
+ inputs=gr.Textbox(
170
+ label="Qoraalka Geli (Enter Somali Text)",
171
+ placeholder="Ku soo dhawaada barnaamijka codka ee Soomaaliyeed..."
172
+ ),
173
+ outputs=gr.Audio(
174
+ label="Codka La Soo Saaray (Generated Audio)",
175
+ type="numpy"
176
+ ),
177
+ title="🤖 Somali Text-to-Speech (Tayada Sare)",
178
+ description=(
179
+ "Ku qor qoraal af-Soomaali ah si aad ugu beddesho cod dabiici ah oo aad moodo mid dad."
180
+ "Codkan waxaa lagu sameeyay iyadoo la isticmaalayo faylka codka ee `1.wav`."
181
+ "\n\n(Enter Somali text to convert it to natural, human-like speech. "
182
+ "This voice was created using the `1.wav` audio file.)"
183
+ ),
184
+ examples=[
185
+ ["Sidee tahay saaxiib? Maanta waa maalin qurux badan."],
186
+ ["Qiimaha badeecadani waa 2500 oo shilin."],
187
+ ["Barnaamijkan waxaa sameeyay sirdoon macmal ah."],
188
+ ]
189
  )
190
 
191
+ if __name__ == "__main__":
192
+ iface.launch(share=True) # Set share=True to get a public link