import torch from parler_tts import ParlerTTSForConditionalGeneration from transformers import AutoTokenizer import soundfile as sf import gradio as gr import os # Set device (GPU if available, else CPU) device = "cuda:0" if torch.cuda.is_available() else "cpu" # Load model and tokenizer from Hugging Face Hub # These will be downloaded automatically by the Space when it builds # The model will be loaded to the GPU if available in the Space's runtime model = ParlerTTSForConditionalGeneration.from_pretrained("parler-tts/parler-tts-tiny-v1").to(device) tokenizer = AutoTokenizer.from_pretrained("parler-tts/parler-tts-tiny-v1") def predict_tts(text, voice_description): if not text: return None, "Please enter some text." if not voice_description: return None, "Please provide a voice description." try: input_ids = tokenizer(voice_description, return_tensors="pt").input_ids.to(device) prompt_input_ids = tokenizer(text, return_tensors="pt").input_ids.to(device) with torch.no_grad(): # Disable gradient calculation for inference to save memory and speed generation = model.generate(input_ids=input_ids, prompt_input_ids=prompt_input_ids) audio_arr = generation.cpu().numpy().squeeze() sampling_rate = model.config.sampling_rate # Gradio's Audio output component expects a filepath to an audio file output_path = "output_audio.wav" sf.write(output_path, audio_arr, sampling_rate) return output_path, "Speech generated successfully!" except Exception as e: return None, f"An error occurred: {str(e)}" # Gradio Interface definition for the Space iface = gr.Interface( fn=predict_tts, inputs=[ gr.Textbox(lines=5, label="Text to Convert", placeholder="Enter your text here..."), gr.Textbox(lines=3, label="Voice Description", placeholder="e.g., A female speaker with a calm and clear speech, very high quality audio."), ], outputs=[ gr.Audio(label="Generated Speech", type="filepath"), gr.Textbox(label="Status") ], title="Parler-TTS Tiny: Natural Language Guided Text-to-Speech", description="Enter text and describe the voice you want (gender, tone, speed, quality) to generate speech using the tiny Parler-TTS model.", examples=[ ["Hello, my name is Parler TTS. How can I help you today?", "A friendly female voice speaking clearly."], ["The quick brown fox jumps over the lazy dog.", "A deep male voice, speaking slowly and thoughtfully."], ["We're excited to announce our new product!", "An enthusiastic female voice with high pitch."], ], allow_flagging="never" # This prevents users from flagging your outputs for feedback ) # This standard Gradio line tells the Space to launch the interface if __name__ == "__main__": iface.launch()