Update app.py
Browse files
app.py
CHANGED
@@ -354,6 +354,17 @@ def run(source, action_source, hair_source, top_source, bottom_source, target, a
|
|
354 |
return 'demo.gif'
|
355 |
|
356 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
357 |
gr.Interface(
|
358 |
fn=run,
|
359 |
inputs=[
|
@@ -382,5 +393,5 @@ gr.Interface(
|
|
382 |
live=False,
|
383 |
cache_examples=True,
|
384 |
title="TranSVAE for Unsupervised Video Domain Adaptation",
|
385 |
-
description=
|
386 |
).launch()
|
|
|
354 |
return 'demo.gif'
|
355 |
|
356 |
|
357 |
+
desc = """
|
358 |
+
Welcome to the demo page of TranSVAE, a disentanglement framework designed for unsupervised video domain adaptation. In this live demo, you are able to:
|
359 |
+
|
360 |
+
- Explore domain disentanglement and transfer in TranSVAE with Sprites avatars;
|
361 |
+
- Customize the Sprites avatars by yourself via changing their actions, hair colors, top wears, and bottom wears.
|
362 |
+
|
363 |
+
For more details, read the [TranSVAE paper]() and visit our [project page](https://ldkong.com/TranSVAE). The training and testing code is available at our [GitHub Repo](https://github.com/ldkong1205/TranSVAE).
|
364 |
+
|
365 |
+
Have fun!
|
366 |
+
"""
|
367 |
+
|
368 |
gr.Interface(
|
369 |
fn=run,
|
370 |
inputs=[
|
|
|
393 |
live=False,
|
394 |
cache_examples=True,
|
395 |
title="TranSVAE for Unsupervised Video Domain Adaptation",
|
396 |
+
description=desc
|
397 |
).launch()
|