File size: 3,697 Bytes
469404a
 
 
2e23408
469404a
 
 
 
 
 
93443ff
469404a
 
 
 
 
 
 
 
93443ff
469404a
 
 
 
 
93443ff
469404a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import os

import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
import torchvision.transforms as T
from PIL import Image
from decord import VideoReader
from decord import cpu
from slowfast.models.uniformerv2_model import VisionTransformer
from kinetics_class_index import kinetics_classnames
from transforms import (
    GroupNormalize, GroupScale, GroupCenterCrop, 
    Stack, ToTorchFormatTensor
)

import gradio as gr



# Device on which to run the model
# Set to cuda to load on GPU
device = "cpu"
# Pick a pretrained model 
model = VisionTransformer.from_pretrained("not-lain/uniformerv2_b16")

# Set to eval mode and move to desired device
model = model.to(device)
model = model.eval()

# Create an id to label name mapping
kinetics_id_to_classname = {}
for k, v in kinetics_classnames.items():
    kinetics_id_to_classname[k] = v


def get_index(num_frames, num_segments=8):
    seg_size = float(num_frames - 1) / num_segments
    start = int(seg_size / 2)
    offsets = np.array([
        start + int(np.round(seg_size * idx)) for idx in range(num_segments)
    ])
    return offsets


def load_video(video_path):
    vr = VideoReader(video_path, ctx=cpu(0))
    num_frames = len(vr)
    frame_indices = get_index(num_frames, 8)

    # transform
    crop_size = 224
    scale_size = 256
    input_mean = [0.485, 0.456, 0.406]
    input_std = [0.229, 0.224, 0.225]

    transform = T.Compose([
        GroupScale(int(scale_size)),
        GroupCenterCrop(crop_size),
        Stack(),
        ToTorchFormatTensor(),
        GroupNormalize(input_mean, input_std) 
    ])

    images_group = list()
    for frame_index in frame_indices:
        img = Image.fromarray(vr[frame_index].asnumpy())
        images_group.append(img)
    torch_imgs = transform(images_group)
    return torch_imgs
    

def inference(video):
    vid = load_video(video)
    
    # The model expects inputs of shape: B x C x H x W
    TC, H, W = vid.shape
    inputs = vid.reshape(1, TC//3, 3, H, W).permute(0, 2, 1, 3, 4)
    
    prediction = model(inputs)
    prediction = F.softmax(prediction, dim=1).flatten()

    return {kinetics_id_to_classname[str(i)]: float(prediction[i]) for i in range(400)}
    

def set_example_video(example: list) -> dict:
    return gr.Video.update(value=example[0])


demo = gr.Blocks()
with demo:
    gr.Markdown(
        """
        # UniFormerV2-B
        Gradio demo for <a href='https://github.com/OpenGVLab/UniFormerV2' target='_blank'>UniFormerV2</a>: To use it, simply upload your video, or click one of the examples to load them. Read more at the links below.
        """
    )

    with gr.Box():
        with gr.Row():
                with gr.Column():
                    with gr.Row():
                        input_video = gr.Video(label='Input Video')
                    with gr.Row():
                        submit_button = gr.Button('Submit')
                with gr.Column():
                    label = gr.Label(num_top_classes=5)
        with gr.Row():
            example_videos = gr.Dataset(components=[input_video], samples=[['hitting_baseball.mp4'], ['hoverboarding.mp4'], ['yoga.mp4']])

    gr.Markdown(
        """
        <p style='text-align: center'><a href='https://arxiv.org/abs/2211.09552' target='_blank'>[Arxiv] UniFormerV2: Spatiotemporal Learning by Arming Image ViTs with Video UniFormer</a> | <a href='https://github.com/OpenGVLab/UniFormerV2' target='_blank'>Github Repo</a></p>
        """
    )

    submit_button.click(fn=inference, inputs=input_video, outputs=label)
    example_videos.click(fn=set_example_video, inputs=example_videos, outputs=example_videos.components)

demo.launch(enable_queue=True)