debugged
Browse files
app.py
CHANGED
@@ -13,7 +13,7 @@ vocoder = HiFiGANXvectorLightningModule.load_from_checkpoint("vocoder_finetuned.
|
|
13 |
xvector_model = hydra.utils.instantiate(vocoder.cfg.data.xvector.model)
|
14 |
xvector_model = xvector_model.to('cpu')
|
15 |
preprocessor = PreprocessForInfer(miipher.cfg)
|
16 |
-
|
17 |
@torch.inference_mode()
|
18 |
def main(wav_path,transcript,lang_code):
|
19 |
wav,sr =torchaudio.load(wav_path)
|
@@ -44,6 +44,7 @@ description = """
|
|
44 |
This repository provices pretrained weights and demo of Miipher implementation by [Wataru-Nakata](https://github.com/Wataru-Nakata/miipher)
|
45 |
Miipher was originally proposed by Koizumi et. al. [arxiv](https://arxiv.org/abs/2303.01664)
|
46 |
Please note that the model differs in many ways from the paper.
|
|
|
47 |
**Non commercial use only** as the weights are provided in CC-BY-NC 2.0.
|
48 |
"""
|
49 |
inputs = [gr.Audio(label="noisy audio",type='filepath'),gr.Textbox(label="Transcript", value="Your transcript here", max_lines=1),
|
@@ -52,4 +53,4 @@ outputs = gr.Audio(label="Output")
|
|
52 |
|
53 |
demo = gr.Interface(fn=main, inputs=inputs, outputs=outputs,description=description)
|
54 |
|
55 |
-
demo.launch()
|
|
|
13 |
xvector_model = hydra.utils.instantiate(vocoder.cfg.data.xvector.model)
|
14 |
xvector_model = xvector_model.to('cpu')
|
15 |
preprocessor = PreprocessForInfer(miipher.cfg)
|
16 |
+
preprocessor.cfg.preprocess.text2phone_model.is_cuda=False
|
17 |
@torch.inference_mode()
|
18 |
def main(wav_path,transcript,lang_code):
|
19 |
wav,sr =torchaudio.load(wav_path)
|
|
|
44 |
This repository provices pretrained weights and demo of Miipher implementation by [Wataru-Nakata](https://github.com/Wataru-Nakata/miipher)
|
45 |
Miipher was originally proposed by Koizumi et. al. [arxiv](https://arxiv.org/abs/2303.01664)
|
46 |
Please note that the model differs in many ways from the paper.
|
47 |
+
|
48 |
**Non commercial use only** as the weights are provided in CC-BY-NC 2.0.
|
49 |
"""
|
50 |
inputs = [gr.Audio(label="noisy audio",type='filepath'),gr.Textbox(label="Transcript", value="Your transcript here", max_lines=1),
|
|
|
53 |
|
54 |
demo = gr.Interface(fn=main, inputs=inputs, outputs=outputs,description=description)
|
55 |
|
56 |
+
demo.launch(share=True)
|