mskrishna commited on
Commit
5657e8d
1 Parent(s): 4efa0da

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +148 -0
app.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import os
3
+ from ChatUniVi.constants import *
4
+ from ChatUniVi.conversation import conv_templates, SeparatorStyle
5
+ from ChatUniVi.model.builder import load_pretrained_model
6
+ from ChatUniVi.utils import disable_torch_init
7
+ from ChatUniVi.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
8
+ from PIL import Image
9
+ from decord import VideoReader, cpu
10
+ import numpy as np
11
+
12
+
13
+ def _get_rawvideo_dec(video_path, image_processor, max_frames=MAX_IMAGE_LENGTH, image_resolution=224, video_framerate=1, s=None, e=None):
14
+ # speed up video decode via decord.
15
+
16
+ if s is None:
17
+ start_time, end_time = None, None
18
+ else:
19
+ start_time = int(s)
20
+ end_time = int(e)
21
+ start_time = start_time if start_time >= 0. else 0.
22
+ end_time = end_time if end_time >= 0. else 0.
23
+ if start_time > end_time:
24
+ start_time, end_time = end_time, start_time
25
+ elif start_time == end_time:
26
+ end_time = start_time + 1
27
+
28
+ if os.path.exists(video_path):
29
+ vreader = VideoReader(video_path, ctx=cpu(0))
30
+ else:
31
+ print(video_path)
32
+ raise FileNotFoundError
33
+
34
+ fps = vreader.get_avg_fps()
35
+ f_start = 0 if start_time is None else int(start_time * fps)
36
+ f_end = int(min(1000000000 if end_time is None else end_time * fps, len(vreader) - 1))
37
+ num_frames = f_end - f_start + 1
38
+ if num_frames > 0:
39
+ # T x 3 x H x W
40
+ sample_fps = int(video_framerate)
41
+ t_stride = int(round(float(fps) / sample_fps))
42
+
43
+ all_pos = list(range(f_start, f_end + 1, t_stride))
44
+ if len(all_pos) > max_frames:
45
+ sample_pos = [all_pos[_] for _ in np.linspace(0, len(all_pos) - 1, num=max_frames, dtype=int)]
46
+ else:
47
+ sample_pos = all_pos
48
+
49
+ patch_images = [Image.fromarray(f) for f in vreader.get_batch(sample_pos).asnumpy()]
50
+
51
+ patch_images = torch.stack([image_processor.preprocess(img, return_tensors='pt')['pixel_values'][0] for img in patch_images])
52
+ slice_len = patch_images.shape[0]
53
+
54
+ return patch_images, slice_len
55
+ else:
56
+ print("video path: {} error.".format(video_path))
57
+
58
+
59
+ if __name__ == '__main__':
60
+ # Model Parameter
61
+ model_path = "Chat-UniVi/Chat-UniVi" # or "Chat-UniVi/Chat-UniVi-13B"
62
+ video_path = ${video_path}
63
+
64
+ # The number of visual tokens varies with the length of the video. "max_frames" is the maximum number of frames.
65
+ # When the video is long, we will uniformly downsample the video to meet the frames when equal to the "max_frames".
66
+ max_frames = 100
67
+
68
+ # The number of frames retained per second in the video.
69
+ video_framerate = 1
70
+
71
+ # Input Text
72
+ qs = "Describe the video."
73
+
74
+ # Sampling Parameter
75
+ conv_mode = "simple"
76
+ temperature = 0.2
77
+ top_p = None
78
+ num_beams = 1
79
+
80
+ disable_torch_init()
81
+ model_path = os.path.expanduser(model_path)
82
+ model_name = "ChatUniVi"
83
+ tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, None, model_name)
84
+
85
+ mm_use_im_start_end = getattr(model.config, "mm_use_im_start_end", False)
86
+ mm_use_im_patch_token = getattr(model.config, "mm_use_im_patch_token", True)
87
+ if mm_use_im_patch_token:
88
+ tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
89
+ if mm_use_im_start_end:
90
+ tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
91
+ model.resize_token_embeddings(len(tokenizer))
92
+
93
+ vision_tower = model.get_vision_tower()
94
+ if not vision_tower.is_loaded:
95
+ vision_tower.load_model()
96
+ image_processor = vision_tower.image_processor
97
+
98
+ if model.config.config["use_cluster"]:
99
+ for n, m in model.named_modules():
100
+ m = m.to(dtype=torch.bfloat16)
101
+
102
+ # Check if the video exists
103
+ if video_path is not None:
104
+ video_frames, slice_len = _get_rawvideo_dec(video_path, image_processor, max_frames=max_frames, video_framerate=video_framerate)
105
+
106
+ cur_prompt = qs
107
+ if model.config.mm_use_im_start_end:
108
+ qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN * slice_len + DEFAULT_IM_END_TOKEN + '\n' + qs
109
+ else:
110
+ qs = DEFAULT_IMAGE_TOKEN * slice_len + '\n' + qs
111
+
112
+ conv = conv_templates[conv_mode].copy()
113
+ conv.append_message(conv.roles[0], qs)
114
+ conv.append_message(conv.roles[1], None)
115
+ prompt = conv.get_prompt()
116
+
117
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(
118
+ 0).cuda()
119
+
120
+ stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
121
+ keywords = [stop_str]
122
+ stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
123
+
124
+ with torch.inference_mode():
125
+ output_ids = model.generate(
126
+ input_ids,
127
+ images=video_frames.half().cuda(),
128
+ do_sample=True,
129
+ temperature=temperature,
130
+ top_p=top_p,
131
+ num_beams=num_beams,
132
+ output_scores=True,
133
+ return_dict_in_generate=True,
134
+ max_new_tokens=1024,
135
+ use_cache=True,
136
+ stopping_criteria=[stopping_criteria])
137
+
138
+ output_ids = output_ids.sequences
139
+ input_token_len = input_ids.shape[1]
140
+ n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
141
+ if n_diff_input_output > 0:
142
+ print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
143
+ outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
144
+ outputs = outputs.strip()
145
+ if outputs.endswith(stop_str):
146
+ outputs = outputs[:-len(stop_str)]
147
+ outputs = outputs.strip()
148
+ print(outputs)