Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
from huggingface_hub import hf_hub_download
|
2 |
import gradio as gr
|
3 |
import numpy as np
|
4 |
import imageio
|
@@ -12,21 +11,6 @@ from utils import IMAGENET_MEAN, IMAGENET_STD, num_frames, patch_size, input_siz
|
|
12 |
from labels import K400_label_map, SSv2_label_map, UCF_label_map
|
13 |
|
14 |
|
15 |
-
# MODELS = {
|
16 |
-
# 'K400': [
|
17 |
-
# './TFVideoMAE_S_K400_16x224_FT',
|
18 |
-
# './TFVideoMAE_S_K400_16x224_PT'
|
19 |
-
# ],
|
20 |
-
# 'SSv2': [
|
21 |
-
# './TFVideoMAE_S_K400_16x224_FT',
|
22 |
-
# './TFVideoMAE_S_K400_16x224_PT'
|
23 |
-
# ],
|
24 |
-
# 'UCF' : [
|
25 |
-
# './TFVideoMAE_S_K400_16x224_FT',
|
26 |
-
# './TFVideoMAE_S_K400_16x224_PT'
|
27 |
-
# ]
|
28 |
-
# }
|
29 |
-
|
30 |
LABEL_MAPS = {
|
31 |
'K400': K400_label_map,
|
32 |
'SSv2': SSv2_label_map,
|
@@ -57,7 +41,6 @@ def get_model(model_type):
|
|
57 |
# pt_path = hf_hub_download(
|
58 |
# repo_id='innat/videomae', filename=model_type + '_PT', repo_type="model"
|
59 |
# )
|
60 |
-
|
61 |
ft_model = keras.models.load_model(model_type + '_FT')
|
62 |
pt_model = keras.models.load_model(model_type + '_PT')
|
63 |
|
@@ -75,6 +58,10 @@ def get_model(model_type):
|
|
75 |
|
76 |
|
77 |
def inference(video_file, model_type, mask_ratio):
|
|
|
|
|
|
|
|
|
78 |
# get sample data
|
79 |
container = read_video(video_file)
|
80 |
frames = frame_sampling(container, num_frames=num_frames)
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
import imageio
|
|
|
11 |
from labels import K400_label_map, SSv2_label_map, UCF_label_map
|
12 |
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
LABEL_MAPS = {
|
15 |
'K400': K400_label_map,
|
16 |
'SSv2': SSv2_label_map,
|
|
|
41 |
# pt_path = hf_hub_download(
|
42 |
# repo_id='innat/videomae', filename=model_type + '_PT', repo_type="model"
|
43 |
# )
|
|
|
44 |
ft_model = keras.models.load_model(model_type + '_FT')
|
45 |
pt_model = keras.models.load_model(model_type + '_PT')
|
46 |
|
|
|
58 |
|
59 |
|
60 |
def inference(video_file, model_type, mask_ratio):
|
61 |
+
print(video_file)
|
62 |
+
print(model_type)
|
63 |
+
print(mask_ratio)
|
64 |
+
print('---------------')
|
65 |
# get sample data
|
66 |
container = read_video(video_file)
|
67 |
frames = frame_sampling(container, num_frames=num_frames)
|