Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -694,18 +694,7 @@ with gr.Blocks(css="""
|
|
694 |
with gr.Tab("LWM Model and Framework"):
|
695 |
gr.Image("images/lwm_model_v2.png")
|
696 |
gr.Markdown("This figure depicts the offline pre-training and online embedding generation process for LWM. The channel is divided into fixed-size patches, which are linearly embedded and combined with positional encodings before being passed through a Transformer encoder. During self-supervised pre-training, some embeddings are masked, and LWM leverages self-attention to extract deep features, allowing the decoder to reconstruct the masked values. For downstream tasks, the generated LWM embeddings enhance performance. The right block shows the LWM architecture, inspired by the original Transformer introduced in the [**Attention Is All You Need**](https://arxiv.org/abs/1706.03762) paper.")
|
697 |
-
|
698 |
-
gr.Markdown("""
|
699 |
-
<!-- Google tag (gtag.js) -->
|
700 |
-
<script async src="https://www.googletagmanager.com/gtag/js?id=G-6C4E5YS7FX"></script>
|
701 |
-
<script>
|
702 |
-
window.dataLayer = window.dataLayer || [];
|
703 |
-
function gtag(){dataLayer.push(arguments);}
|
704 |
-
gtag('js', new Date());
|
705 |
-
|
706 |
-
gtag('config', 'G-6C4E5YS7FX');
|
707 |
-
</script>
|
708 |
-
""")
|
709 |
# Launch the app
|
710 |
if __name__ == "__main__":
|
711 |
demo.launch()
|
|
|
694 |
with gr.Tab("LWM Model and Framework"):
|
695 |
gr.Image("images/lwm_model_v2.png")
|
696 |
gr.Markdown("This figure depicts the offline pre-training and online embedding generation process for LWM. The channel is divided into fixed-size patches, which are linearly embedded and combined with positional encodings before being passed through a Transformer encoder. During self-supervised pre-training, some embeddings are masked, and LWM leverages self-attention to extract deep features, allowing the decoder to reconstruct the masked values. For downstream tasks, the generated LWM embeddings enhance performance. The right block shows the LWM architecture, inspired by the original Transformer introduced in the [**Attention Is All You Need**](https://arxiv.org/abs/1706.03762) paper.")
|
697 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
698 |
# Launch the app
|
699 |
if __name__ == "__main__":
|
700 |
demo.launch()
|