import torch from transformers import SpeechT5ForTextToSpeech, SpeechT5Processor, SpeechT5HifiGan import soundfile as sf import gradio as gr import scipy.io.wavfile as wav import numpy as np import wave from datasets import load_dataset, Audio, config from IPython.display import Audio # Load the TTS model from the Hugging Face Hub checkpoint = "Abdullah-Habib/urdu_speech_tt" # Replace with your actual model name processor = SpeechT5Processor.from_pretrained(checkpoint) model = SpeechT5ForTextToSpeech.from_pretrained(checkpoint) tokenizer = processor.tokenizer vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") # Buckwalter to Unicode mapping buck2uni = { u"\u0627":"a", u"\u0627":"a", u"\u0675":"a", u"\u0673":"a", u"\u0630":"a", u"\u0622":"aa", u"\u0628":"b", u"\u067E":"p", u"\u062A":"t", u"\u0637":"t", u"\u0679":"t", u"\u062C":"j", u"\u0633":"s", u"\u062B":"s", u"\u0635":"s", u"\u0686":"ch", u"\u062D":"h", u"\u0647":"h", u"\u0629":"h", u"\u06DF":"h", u"\u062E":"kh", u"\u062F":"d", u"\u0688":"d", u"\u0630":"z", u"\u0632":"z", u"\u0636":"z", u"\u0638":"z", u"\u068E":"z", u"\u0631":"r", u"\u0691":"r", u"\u0634":"sh", u"\u063A":"gh", u"\u0641":"f", u"\u06A9":"k", u"\u0642":"k", u"\u06AF":"g", u"\u0644":"l", u"\u0645":"m", u"\u0646":"n", u"\u06BA":"n", u"\u0648":"o", u"\u0649":"y", u"\u0626":"y", u"\u06CC":"y", u"\u06D2":"e", u"\u06C1":"h", u"\u064A":"e" , u"\u06C2":"ah" , u"\u06BE":"h" , u"\u0639":"a" , u"\u0643":"k" , u"\u0621":"a", u"\u0624":"o", u"\u060C":"" #seperator ulta comma } def transString(string, reverse=0): """Given a Unicode string, transliterate into Buckwalter. To go from Buckwalter back to Unicode, set reverse=1""" for k, v in buck2uni.items(): if not reverse: string = string.replace(k, v) else: string = string.replace(v, k) return string def generate_audio(text): # Convert input text to Roman Urdu roman_urdu = transString(text) # Tokenize the input text inputs = processor(text=roman_urdu, return_tensors="pt", type = "numpy") # Generate audio from the SpeechT5 model # speaker_embeddings = torch.tensor(np.load("speaker_embeddings.npy")) speaker_embeddings = torch.load("speaker_embeddings_29.pt") # speaker_embeddings= torch.tensor([[-0.0917, -0.0461, 0.0347, 0.0341, 0.0197, -0.0438, -0.0377, -0.0212, 0.0361, 0.0220, -0.0676, -0.0731, 0.0827, 0.0132, 0.0187, 0.0577, -0.0026, 0.0618, 0.0088, 0.0159, 0.0344, 0.0243, -0.0164, -0.0430, -0.0556, -0.0044, -0.0413, -0.0003, 0.0310, 0.0369, -0.0034, 0.0424, 0.0474, 0.0102, 0.0392, -0.0611, 0.0405, 0.0652, -0.0386, -0.0638, 0.0255, -0.0411, 0.0398, 0.0490, 0.0297, -0.1218, -0.0206, 0.0146,-0.0649, 0.0550, 0.0177, 0.0407, 0.0017, -0.0113, -0.0990, -0.0015,0.0158, 0.0481, 0.0286, 0.0300, 0.0346, -0.0104, -0.0142, -0.0005,0.0264, 0.0412, 0.0227, -0.0389, -0.0489, -0.0750, 0.0238, 0.0101,0.0171, 0.0141, 0.0224, 0.0344, 0.0402, 0.0336, -0.0641, -0.0818, -0.0731, -0.0470, -0.0512, -0.0602, -0.0344, -0.0442, -0.0541, 0.0097, 0.0198, 0.0482, 0.0323, -0.0885, 0.0210, -0.0798, 0.0417, -0.0436, 0.0402, 0.0256, -0.0641, -0.0668, -0.0023, -0.0706, -0.0928, 0.0121, 0.0355, -0.0376, 0.0522, 0.0482, 0.0200, 0.0290, -0.0698, -0.0232, 0.0878, 0.0044, 0.0559, 0.0581, -0.0718, 0.0095, -0.0538, 0.0125, 0.0023, -0.0562, 0.0424, 0.0261, -0.0498, 0.0255, -0.0840, 0.0331, 0.0406, 0.0162, -0.0522, 0.0218, 0.0323, 0.0359, 0.0128, -0.0891, -0.0569, 0.0031, -0.0694, -0.0102, 0.0118, 0.0033, 0.0127, 0.0589, -0.0783, 0.0179, 0.0200, -0.0371, 0.0325, -0.1033, 0.0483, -0.0343, -0.0714, 0.0102, 0.0665, 0.0278, 0.0285, -0.0653, -0.0834, 0.0196, 0.0399, 0.0085, 0.0246, -0.0400, 0.0215, 0.0083, 0.0302, 0.0204, 0.0360, 0.0309, -0.0306, -0.0828, 0.0142, -0.0614, -0.0103, 0.0372, -0.0456, 0.0291, 0.0565, -0.0271, 0.0518, -0.0671, 0.0012, -0.0048, -0.0565, -0.0092, 0.0336, 0.0476, -0.0351, -0.0698, 0.0487, 0.0313, -0.0491, 0.0401, 0.0246, 0.0178, 0.0405, 0.0012, 0.0311, -0.0041, 0.0367, 0.0330, -0.0609, 0.0099, -0.0097, 0.0173, 0.0494, -0.0305, 0.0272, -0.0349, 0.0025, -0.0697, -0.0414, 0.0604, -0.0707, 0.0420, 0.0380, -0.0731, 0.0546, 0.0339, -0.0758, 0.0365, -0.0712, -0.0140, 0.0365, 0.0477, 0.0796, 0.0572, 0.0212, 0.0098, 0.0133, 0.0261, 0.0329, -0.0269, 0.0437, -0.0359, 0.0296, 0.0180, -0.0008, 0.0668, -0.0448, 0.0269, -0.0734, 0.0194, -0.0494, 0.0432, 0.0449, 0.0442, 0.0389, 0.0530, 0.0420, 0.0021, 0.0084, -0.0820, -0.0081, 0.0326, 0.0265, 0.0536, -0.0714, 0.0188, 0.0298, -0.0737, 0.0110, 0.0340, 0.0016, 0.0262, 0.0179, 0.0109, 0.0426, -0.0538, 0.0649, 0.0160, 0.0146, -0.0419, -0.0851, 0.0138, 0.0399, 0.0445, -0.0849, -0.0425, 0.0293, 0.0477, 0.0108, -0.0941, -0.0386, 0.0600, 0.0089, 0.0557,-0.0892, 0.0026, 0.0192, 0.0136, -0.0207, -0.0023, 0.0163, 0.0263, -0.0112, 0.0245, 0.0411, 0.0285, 0.0267, 0.0297, 0.0213, -0.0577, 0.0169, 0.0592, 0.0227, 0.0290, 0.0074, 0.0197, 0.0282, 0.0368,0.0064, 0.0092, -0.0896, -0.0693, -0.0295, 0.0316, -0.0674, 0.0645,-0.0655, 0.0355, -0.0389, 0.0134, 0.0299, -0.0534, 0.0537, 0.0900, -0.0770, -0.0666, -0.0600, -0.0019, 0.0276, 0.0590, -0.0705, 0.0222, 0.0517, -0.0089, 0.0063, -0.0270, 0.0185, -0.0626, -0.0065, 0.0187,-0.0670, 0.0216, 0.0356, 0.0384, -0.0268, -0.0628, -0.0443, -0.0195, -0.0495, 0.1405, 0.0274, -0.0455, -0.0068, 0.0686, -0.0756, -0.0073, -0.0981, 0.0025, 0.0383, 0.0157, 0.0651, 0.0252, -0.0665, 0.0054, 0.0223, 0.0509, 0.0101, 0.0454, -0.0527, 0.0252, -0.0157, -0.0022, 0.0526, 0.0224, 0.0494, 0.0293, -0.0808, -0.1220, 0.0196, 0.0135, 0.0303, -0.0467, 0.0411, -0.0639, 0.0358, 0.0499, 0.0425, 0.0169, -0.0579, 0.0388, 0.0414, -0.0101, 0.0490, -0.0773, 0.0478, -0.0238, -0.0142, -0.0508, 0.0018, -0.0085, 0.0198, 0.0126, 0.0133, -0.0554, -0.0583, -0.0699, -0.0167, 0.0131, 0.0288, -0.0132, 0.0343, -0.0476, -0.0039, -0.0825, -0.1180, -0.0570, -0.0590, 0.0233, 0.0500, -0.0328, -0.0426, 0.0241, 0.0441, 0.0372, 0.0488, -0.0366, -0.0233, -0.0118, -0.0256, 0.0254, 0.0041, 0.0119, 0.0423, 0.0178, -0.0245, -0.0769, 0.0056, 0.0428, 0.0341, -0.0009, -0.0197, 0.0395, 0.0247, 0.0090, 0.0098, -0.0083, 0.0346, 0.0411, 0.0416, 0.0413, 0.0312, 0.0054, 0.0390, -0.0571, -0.0403, 0.0441, -0.0132, 0.0117, 0.0467, 0.0516,-0.0639, 0.0296, 0.0337, -0.0557, 0.0110, 0.0277, -0.0026, 0.0347, 0.0301, 0.0056, -0.0572, -0.0663, 0.0124, -0.0065, 0.0222, 0.0441,-0.0570, -0.0519, 0.0132, 0.0323, 0.0401, 0.0357, -0.0555, 0.0310,0.0028, -0.0102, -0.0598, 0.0153, -0.0438, 0.0268, -0.0097, 0.0388,-0.0330, -0.0277, -0.0581, -0.0389, 0.0099, 0.0371, -0.0455, 0.0553, 0.0753, -0.0154, -0.0385, 0.0359, 0.0403, 0.0464, 0.0499, -0.0365]]) speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder) return speech def text_to_speech(text): # Generate audio audio_output = generate_audio(text) output_path = "output.wav" sf.write(output_path, audio_output.numpy(), 16000, "PCM_16") return output_path examples = [ ['اگر رشتے داری ہے تو پیسے کی'], ['میری تعلیم جیکی کی ہے۔'] ] interface = gr.Interface(fn=text_to_speech, inputs="text", outputs="audio", verbose = True, title="Urdu TTS", description = "A simple Urdu Text to Speech Application. It is not by any means perfect and will not work for all text. You can sometimes expect it to generate random noise on an input of your choice. Right now it works successfully on very basic urdu text, such the ones in the example.", examples = examples) interface.launch()