File size: 3,814 Bytes
4d15203
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59d9bcb
 
 
 
4d15203
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e8ac110
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
import tempfile
import edge_tts
import gradio as gr
import asyncio

# --- Final, VERIFIED Language & Voice Configuration ---
language_dict = {
    "English": {
        "Jenny (Female, US)": "en-US-JennyNeural",
        "Andrew (Male, US)": "en-US-AndrewNeural",
        "Sonia (Female, UK)": "en-GB-SoniaNeural",
        "Ryan (Male, UK)": "en-GB-RyanNeural"
    },
    "Amharic": {
        "Mekdes (Female)": "am-ET-MekdesNeural",
        "Ameha (Male)": "am-ET-AmehaNeural"
    },
    "Tigrinya": {
        # WORKAROUND: Using Amharic voices as a fallback for Tigrinya.
        "Lulia (Female)": "am-ET-MekdesNeural",
        "Birhane (Male)": "am-ET-AmehaNeural"
    },
    "Oromo": {
        # This is a mock-up. It uses Swahili voices as a fallback.
        "Zuri (Female)": "sw-KE-ZuriNeural",
        "Rafiki (Male)": "sw-KE-RafikiNeural"
    },
    "Somali": {
        "Ubax (Female)": "so-SO-UbaxNeural",
        "Muuse (Male)": "so-SO-MuuseNeural"
    },
    "Arabic": {
        "Zariyah (Female, KSA)": "ar-SA-ZariyahNeural",
        "Hamed (Male, KSA)": "ar-SA-HamedNeural"
    },
    "French": {
        "Denise (Female)": "fr-FR-DeniseNeural",
        "Henri (Male)": "fr-FR-HenriNeural"
    },
    "German": {
        "Katja (Female)": "de-DE-KatjaNeural",
        "Conrad (Male)": "de-DE-ConradNeural"
    },
    "Italian": {
        "Elsa (Female)": "it-IT-ElsaNeural",
        "Diego (Male)": "it-IT-DiegoNeural"
    },
    "Japanese": {
        "Nanami (Female)": "ja-JP-NanamiNeural",
        "Keita (Male)": "ja-JP-KeitaNeural"
    },
    "Korean": {
        "Sun-Hi (Female)": "ko-KR-SunHiNeural",
        "InJoon (Male)": "ko-KR-InJoonNeural"
    },
    "Chinese (Simplified)": {
        "Xiaoxiao (Female)": "zh-CN-XiaoxiaoNeural",
        "Yunxi (Male)": "zh-CN-YunxiNeural"
    },
    "Chinese (Traditional)": {
        "HsiaoChen (Female)": "zh-TW-HsiaoChenNeural",
        "YunJhe (Male)": "zh-TW-YunJheNeural"
    }
}

async def text_to_speech_edge(text, language, speaker):
    try:
        voice = language_dict[language][speaker]
    except KeyError:
        raise gr.Error(f"Error: Voice '{speaker}' not found for {language}.")
        
    try:
        communicate = edge_tts.Communicate(text, voice)
        with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
            tmp_path = tmp_file.name
            await asyncio.wait_for(communicate.save(tmp_path), timeout=60)
        return tmp_path
            
    except asyncio.TimeoutError:
        raise gr.Error("Error: Request timed out. Please try again.")
    except Exception as e:
        raise gr.Error(f"An unexpected error occurred: {str(e)}")

def update_speakers(language):
    speakers = list(language_dict.get(language, []))
    return gr.Dropdown(choices=speakers, value=speakers[0] if speakers else None, interactive=True)

# --- Gradio Interface ---
with gr.Blocks(title="SelamGPT TTS", theme=gr.themes.Soft()) as demo:
    gr.Markdown("# SelamGPT Text-to-Speech")
    
    with gr.Row():
        language = gr.Dropdown(
            choices=list(language_dict.keys()),
            value="Amharic",
            label="Language"
        )
        speaker = gr.Dropdown(
            label="Speaker",
            choices=list(language_dict["Amharic"].keys()),
            value="Mekdes (Female)"
        )
    
    with gr.Column():
        input_text = gr.Textbox(label="Input Text", placeholder="Enter text here...")
        generate_btn = gr.Button("Generate Audio", variant="primary")
        
    output_audio = gr.Audio(label="Output Audio", autoplay=True)

    language.change(fn=update_speakers, inputs=language, outputs=speaker)
    generate_btn.click(fn=text_to_speech_edge, inputs=[input_text, language, speaker], outputs=output_audio)

if __name__ == "__main__":
    demo.launch()