Karthik64001 commited on
Commit
0196eee
1 Parent(s): 40f51c9

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -0
app.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import VitsModel, AutoTokenizer
3
+ import torch
4
+ import scipy.io.wavfile
5
+
6
+ # Initialize TTS models
7
+ tts = TTS(model_name="tts_models/multilingual/multi-dataset/your_tts", progress_bar=False, gpu=False)
8
+ zh_tts = TTS(model_name="tts_models/zh-CN/baker/tacotron2-DDC-GST", progress_bar=False, gpu=False)
9
+ de_tts = TTS(model_name="tts_models/de/thorsten/vits", gpu=False)
10
+ es_tts = TTS(model_name="tts_models/es/mai/tacotron2-DDC", progress_bar=False, gpu=False)
11
+ tam_tts_model = VitsModel.from_pretrained("facebook/mms-tts-tam")
12
+ tam_tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-tam")
13
+
14
+ def text_to_speech(text: str, speaker_wav, speaker_wav_file, language: str):
15
+ if speaker_wav_file and not speaker_wav:
16
+ speaker_wav = speaker_wav_file
17
+ file_path = "output.wav"
18
+ if language == "zh-CN":
19
+ zh_tts.tts_to_file(text, file_path=file_path)
20
+ elif language == "de":
21
+ de_tts.tts_to_file(text, file_path=file_path)
22
+ elif language == "es":
23
+ es_tts.tts_to_file(text, file_path=file_path)
24
+ elif language == "tam":
25
+ inputs = tam_tokenizer(text, return_tensors="pt")
26
+ with torch.no_grad():
27
+ output = tam_tts_model(**inputs).waveform
28
+ scipy.io.wavfile.write(file_path, rate=tam_tts_model.config.sampling_rate, data=output.numpy())
29
+ else:
30
+ if speaker_wav is not None:
31
+ tts.tts_to_file(text, speaker_wav=speaker_wav, language=language, file_path=file_path)
32
+ else:
33
+ tts.tts_to_file(text, speaker=tts.speakers[0], language=language, file_path=file_path)
34
+ return file_path
35
+
36
+ title = "Voice-Cloning-Demo"
37
+
38
+ def toggle(choice):
39
+ if choice == "mic":
40
+ return gr.update(visible=True, value=None), gr.update(visible=False, value=None)
41
+ else:
42
+ return gr.update(visible=False, value=None), gr.update(visible=True, value=None)
43
+
44
+ def handle_language_change(choice):
45
+ if choice in ["zh-CN", "de", "es", "tam"]:
46
+ return gr.update(visible=False), gr.update(visible=False), gr.update(visible(False))
47
+ else:
48
+ return gr.update(visible=True), gr.update(visible(True)), gr.update(visible(True))
49
+
50
+ warming_text = """Please note that Chinese, German, Spanish, and Tamil are currently not supported for voice cloning."""
51
+
52
+ with gr.Blocks() as demo:
53
+ with gr.Row():
54
+ with gr.Column():
55
+ text_input = gr.Textbox(label="Input the text", value="", max_lines=3)
56
+ lan_input = gr.Radio(label="Language", choices=["en", "fr-fr", "pt-br", "zh-CN", "de", "es", "tam"], value="en")
57
+ gr.Markdown(warming_text)
58
+ radio = gr.Radio(["mic", "file"], value="mic",
59
+ label="How would you like to upload your audio?")
60
+ audio_input_mic = gr.Audio(label="Voice to clone", source="microphone", type="filepath", visible=True)
61
+ audio_input_file = gr.Audio(label="Voice to clone", type="filepath", visible=False)
62
+
63
+ with gr.Row():
64
+ with gr.Column():
65
+ btn_clear = gr.Button("Clear")
66
+ with gr.Column():
67
+ btn = gr.Button("Submit", variant="primary")
68
+ with gr.Column():
69
+ audio_output = gr.Audio(label="Output")
70
+
71
+ btn.click(text_to_speech, inputs=[text_input, audio_input_mic,
72
+ audio_input_file, lan_input], outputs=audio_output)
73
+ radio.change(toggle, radio, [audio_input_mic, audio_input_file])
74
+ lan_input.change(handle_language_change, lan_input, [radio, audio_input_mic, audio_input_file])
75
+
76
+ demo.launch(enable_queue=True)