File size: 2,438 Bytes
33d5fe3
 
 
 
 
 
 
 
 
e590c8f
33d5fe3
 
648f835
7170e86
 
 
 
648f835
 
33d5fe3
648f835
4161fb7
33d5fe3
 
 
 
 
7170e86
648f835
33d5fe3
 
 
 
 
 
 
 
 
 
648f835
 
 
 
 
 
 
 
 
 
33d5fe3
 
 
 
 
1d8eb50
648f835
33d5fe3
 
8d61b3b
33d5fe3
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
from video_generation import VideoGenerator

import gradio as gr
import shutil
import os
from argparse import Namespace
import subprocess

os.environ['KMP_DUPLICATE_LIB_OK']='True'


def func(resize, video):
    output_dir = '/tmp/outputs'
    input_video = '/tmp/input.mp4'
    if os.path.exists(input_video):
        os.remove(input_video)
    subprocess.call(f"ffmpeg -ss 00:00:00 -i {video} -to 00:00:05 -c copy {input_video}".split())
    if os.path.exists(output_dir):
        shutil.rmtree(output_dir)

    os.makedirs(output_dir)

    args = Namespace(
        arch="vit_small",
        patch_size=8,
        pretrained_weights="dino_deitsmall8_pretrain.pth",
        checkpoint_key="teacher",
        input_path=input_video,
        output_path=output_dir,
        threshold=0.6,
        resize=resize,
        video_only=False,
        fps=30.0,
        video_format="mp4"
    )

    vid_generator = VideoGenerator(args)
    vid_generator.run()

    # Make a video that puts the resized input video + the attn output video together as one
    ffmpeg_cmd = f"""
        ffmpeg
        -i {output_dir}/original-reshaped.mp4
        -i {output_dir}/video.mp4
        -filter_complex hstack
        {output_dir}/stacked.mp4
    """
    subprocess.call(ffmpeg_cmd.split())
    return f'{output_dir}/stacked.mp4'

title = "Interactive demo: DINO"
description = "Demo for Facebook AI's DINO, a new method for self-supervised training of Vision Transformers. Using this method, they are capable of segmenting objects within an image without having ever been trained to do so. This can be observed by displaying the self-attention of the heads from the last layer for the [CLS] token query. This demo uses a ViT-S/8 trained with DINO. To use it, simply upload an image or use the example image below. Results will show up in a few seconds."
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2104.14294'>Emerging Properties in Self-Supervised Vision Transformers</a> | <a href='https://github.com/facebookresearch/dino'>Github Repo</a></p>"
iface = gr.Interface(fn=func, 
                     inputs=[gr.inputs.Slider(120, 420, 20, label="resize"), gr.inputs.Video(type=None)],
                     outputs='video',
                     title=title,
                     description=description,
                     examples=[[420, 'skate-jump.mp4']],
                     article=article)

iface.launch(debug=True)