osanseviero HF staff commited on
Commit
4d36fab
1 Parent(s): 168baa1

Add example video

Browse files
Files changed (3) hide show
  1. .gitattributes +1 -0
  2. app.py +1 -3
  3. video.mp4 +3 -0
.gitattributes CHANGED
@@ -25,3 +25,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ *mp4* filter=lfs diff=lfs merge=lfs -text
app.py CHANGED
@@ -8,8 +8,6 @@ import torch.nn as nn
8
  import torchvision
9
  import matplotlib.pyplot as plt
10
 
11
- torch.hub.download_url_to_file('https://viratdata.org/video/VIRAT_S_010204_05_000856_000890.mp4', 'video.mp4')
12
-
13
  def get_attention_maps(pixel_values, attentions, nh):
14
  threshold = 0.6
15
  w_featmap = pixel_values.shape[-2] // model.config.patch_size
@@ -72,7 +70,7 @@ model = ViTModel.from_pretrained("facebook/dino-vits8", add_pooling_layer=False)
72
  title = "Interactive demo: DINO"
73
  description = "Demo for Facebook AI's DINO, a new method for self-supervised training of Vision Transformers. Using this method, they are capable of segmenting objects within an image without having ever been trained to do so. This can be observed by displaying the self-attention of the heads from the last layer for the [CLS] token query. This demo uses a ViT-S/8 trained with DINO. To use it, simply upload an image or use the example image below. Results will show up in a few seconds."
74
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2104.14294'>Emerging Properties in Self-Supervised Vision Transformers</a> | <a href='https://github.com/facebookresearch/dino'>Github Repo</a></p>"
75
- examples =['video.mp4']
76
  iface = gr.Interface(fn=visualize_attention,
77
  inputs=gr.inputs.Video(gr.inputs.Video()),
78
  outputs=[gr.outputs.Video(label=f'result_video')],
 
8
  import torchvision
9
  import matplotlib.pyplot as plt
10
 
 
 
11
  def get_attention_maps(pixel_values, attentions, nh):
12
  threshold = 0.6
13
  w_featmap = pixel_values.shape[-2] // model.config.patch_size
 
70
  title = "Interactive demo: DINO"
71
  description = "Demo for Facebook AI's DINO, a new method for self-supervised training of Vision Transformers. Using this method, they are capable of segmenting objects within an image without having ever been trained to do so. This can be observed by displaying the self-attention of the heads from the last layer for the [CLS] token query. This demo uses a ViT-S/8 trained with DINO. To use it, simply upload an image or use the example image below. Results will show up in a few seconds."
72
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2104.14294'>Emerging Properties in Self-Supervised Vision Transformers</a> | <a href='https://github.com/facebookresearch/dino'>Github Repo</a></p>"
73
+ examples =[['video.mp4']]
74
  iface = gr.Interface(fn=visualize_attention,
75
  inputs=gr.inputs.Video(gr.inputs.Video()),
76
  outputs=[gr.outputs.Video(label=f'result_video')],
video.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddf775baddf4c40f54ba19555c54504c6c5e6adfe0c2cb1c71ad791fcaf122a1
3
+ size 9935859