import os from huggingface_hub import hf_hub_download import gradio as gr from piper import PiperVoice from io import BytesIO import wave import numpy as np def text_to_speech(text): # Load voice data model_path = hf_hub_download(repo_id="sekhan/luxembourgish-voice", repo_type='dataset', filename="high/lu_rtl_high3239.onnx", token=os.environ['HF_TOKEN']) config_path = hf_hub_download(repo_id="sekhan/luxembourgish-voice", repo_type='dataset', filename="high/lu_rtl_high3239.onnx.json", token=os.environ['HF_TOKEN']) # Load Lux. voice voice = PiperVoice.load(model_path, config_path) buffer = BytesIO() with wave.open(buffer, 'wb') as wav_file: wav_file.setframerate(voice.config.sample_rate) wav_file.setsampwidth(2) wav_file.setnchannels(1) voice.synthesize(text, wav_file, sentence_silence=0.5, length_scale=1.1, noise_scale=0.75) buffer.seek(0) audio_data = np.frombuffer(buffer.read(), dtype=np.int16) return audio_data.tobytes(), None # Gradio Interface with gr.Blocks(theme=gr.themes.Base(), css="footer {visibility: hidden}") as blocks: gr.Markdown("# Luxembourgish Text-to-Speech Synthesizer") gr.Markdown("Enter Luxembourgish text to synthesize it into speech. This is a very early demo. Your spontaneous text data are not saved and only used for the speech synthesis.") input_text = gr.Textbox(label="Input Text", max_lines=3, placeholder="Enter text here...") submit_button = gr.Button("Synthesize") output_audio = gr.Audio(label="Synthesized Speech", type="numpy", show_download_button=False) output_text = gr.Textbox(label="Output Text", visible=False) def process_and_output(text): audio, message = text_to_speech(text) if message: return audio, message else: return audio, None submit_button.click(process_and_output, inputs=input_text, outputs=[output_audio, output_text]) blocks.launch()