Update app.py
Browse files
app.py
CHANGED
@@ -119,8 +119,25 @@ def plot_sample(mode):
|
|
119 |
|
120 |
return make_subplot_latent(test_images, quant)
|
121 |
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
gr.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
|
120 |
return make_subplot_latent(test_images, quant)
|
121 |
|
122 |
+
demo = gr.Blocks()
|
123 |
+
|
124 |
+
with demo:
|
125 |
+
gr.Markdown("# Vector-Quantized Variational Autoencoders (VQ-VAE)")
|
126 |
+
gr.Markdown("""This space is to demonstrate the use of VQ-VAEs. Similar to tradiitonal VAEs, VQ-VAEs try to create a useful latent representation.
|
127 |
+
However, VQ-VAEs latent space is **discrete** rather than continuous. Below, we can view how well this model compresses and reconstructs MNIST digits, but more importantly, we can see a
|
128 |
+
discretized latent representation. These discrete representations can then be paired with a network like PixelCNN to generate novel images.
|
129 |
+
|
130 |
+
VQ-VAEs are one of the tools used by DALL-E and are some of the only models that perform on par with VAEs but with a discrete latent space.""")
|
131 |
+
|
132 |
+
with gr.Row():
|
133 |
+
with gr.Column():
|
134 |
+
with gr.Row():
|
135 |
+
radio = gr.Radio(choices=['Reconstruction','Latent Representation'])
|
136 |
+
with gr.Row():
|
137 |
+
button = gr.Button('Run')
|
138 |
+
with gr.Column():
|
139 |
+
out = gr.Plot()
|
140 |
+
|
141 |
+
button.click(plot_sample, radio, out)
|
142 |
+
|
143 |
+
demo.launch()
|