Ahsen Khaliq commited on
Commit
5cf4d17
1 Parent(s): 121bd30

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -8
app.py CHANGED
@@ -26,13 +26,10 @@ inputs = [gr.inputs.Audio(label="Source Audio", type='file'),gr.inputs.Audio(lab
26
  outputs = gr.outputs.Audio(label="Output Audio", type='file')
27
 
28
 
29
- title = "VITS"
30
- description = "demo for VITS: Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
31
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2106.06103'>Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech</a> | <a href='https://github.com/jaywalnut310/vits'>Github Repo</a></p>"
32
-
33
- examples = [
34
- ["We propose VITS, Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech."],
35
- ["Our method adopts variational inference augmented with normalizing flows and an adversarial training process, which improves the expressive power of generative modeling."]
36
- ]
37
 
38
  gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=examples, enable_queue=True).launch()
 
26
  outputs = gr.outputs.Audio(label="Output Audio", type='file')
27
 
28
 
29
+ title = "VQMIVC"
30
+ description = "Gradio demo for VQMIVC: Vector Quantization and Mutual Information-Based Unsupervised Speech Representation Disentanglement for One-shot Voice Conversion. To use it, simply add your audio, or click one of the examples to load them. Read more at the links below."
31
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2106.10132' target='_blank'>VQMIVC: Vector Quantization and Mutual Information-Based Unsupervised Speech Representation Disentanglement for One-shot Voice Conversion</a> | <a href='https://github.com/Wendison/VQMIVC' target='_blank'>Github Repo</a></p>"
32
+
33
+ examples=[['source.wav','ref.wav']]
 
 
 
34
 
35
  gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=examples, enable_queue=True).launch()