DongfuJiang commited on
Commit
4ad4a2a
1 Parent(s): 2d2f625
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -5,7 +5,7 @@ colorFrom: green
5
  colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 4.24.0
8
- app_file: app.py
9
  pinned: false
10
  license: apache-2.0
11
  short_description: Multimodal Language Model
 
5
  colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 4.24.0
8
+ app_file: app_high_res.py
9
  pinned: false
10
  license: apache-2.0
11
  short_description: Multimodal Language Model
app.py CHANGED
@@ -10,8 +10,8 @@ import functools
10
  from transformers import AutoProcessor, Idefics2ForConditionalGeneration
11
  from models.conversation import conv_templates
12
  from typing import List
13
- processor = AutoProcessor.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-high-res-35k-mantis-2epoch_4096")
14
- model = Idefics2ForConditionalGeneration.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-high-res-35k-mantis-2epoch_4096", device_map="auto")
15
  MAX_NUM_FRAMES = 24
16
  conv_template = conv_templates["idefics_2"]
17
 
@@ -23,6 +23,14 @@ for item in examples:
23
  item['images'] = [os.path.join("./examples", video_id, x) for x in item['images']]
24
  item['video'] = os.path.join("./examples", item['video'])
25
 
 
 
 
 
 
 
 
 
26
  VIDEO_EVAL_PROMPT = """
27
  Suppose you are an expert in judging and evaluating the quality of AI-generated videos,
28
  please watch the following frames of a given video and see the text prompt for generating the video,
@@ -155,6 +163,8 @@ def eval_video(prompt, video:str):
155
  video_frames = read_video_pyav(container, indices)
156
 
157
  frames = [Image.fromarray(x) for x in video_frames]
 
 
158
 
159
  eval_prompt = VIDEO_EVAL_PROMPT.format(text_prompt=prompt)
160
  eval_prompt += "<video>"
 
10
  from transformers import AutoProcessor, Idefics2ForConditionalGeneration
11
  from models.conversation import conv_templates
12
  from typing import List
13
+ processor = AutoProcessor.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-95k-mantis-2epoch_4096")
14
+ model = Idefics2ForConditionalGeneration.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-95k-mantis-2epoch_4096", device_map="auto")
15
  MAX_NUM_FRAMES = 24
16
  conv_template = conv_templates["idefics_2"]
17
 
 
23
  item['images'] = [os.path.join("./examples", video_id, x) for x in item['images']]
24
  item['video'] = os.path.join("./examples", item['video'])
25
 
26
+ with open("./examples/hd.json", 'r') as f:
27
+ hd_examples = json.load(f)
28
+
29
+ for item in hd_examples:
30
+ item['video'] = os.path.join("./examples", item['video'])
31
+
32
+ examples = hd_examples + examples
33
+
34
  VIDEO_EVAL_PROMPT = """
35
  Suppose you are an expert in judging and evaluating the quality of AI-generated videos,
36
  please watch the following frames of a given video and see the text prompt for generating the video,
 
163
  video_frames = read_video_pyav(container, indices)
164
 
165
  frames = [Image.fromarray(x) for x in video_frames]
166
+ # resize to 256 x 256
167
+ frames = [x.resize((256, 256)) for x in frames]
168
 
169
  eval_prompt = VIDEO_EVAL_PROMPT.format(text_prompt=prompt)
170
  eval_prompt += "<video>"
app_high_res.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import spaces
3
+ import os
4
+ import time
5
+ import json
6
+ import numpy as np
7
+ import av
8
+ import torch
9
+ from PIL import Image
10
+ import functools
11
+ from transformers import AutoProcessor, Idefics2ForConditionalGeneration
12
+ from models.conversation import conv_templates
13
+ from typing import List
14
+ processor = AutoProcessor.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-high-res-35k-mantis-2epoch_4096")
15
+ model = Idefics2ForConditionalGeneration.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-high-res-35k-mantis-2epoch_4096", device_map="auto", torch_dtype=torch.float16)
16
+ MAX_NUM_FRAMES = 24
17
+ conv_template = conv_templates["idefics_2"]
18
+
19
+ with open("./examples/all_subsets.json", 'r') as f:
20
+ examples = json.load(f)
21
+
22
+ for item in examples:
23
+ video_id = item['images'][0].split("_")[0]
24
+ item['images'] = [os.path.join("./examples", video_id, x) for x in item['images']]
25
+ item['video'] = os.path.join("./examples", item['video'])
26
+
27
+ with open("./examples/hd.json", 'r') as f:
28
+ hd_examples = json.load(f)
29
+
30
+ for item in hd_examples:
31
+ item['video'] = os.path.join("./examples", item['video'])
32
+
33
+ examples = hd_examples + examples
34
+
35
+ VIDEO_EVAL_PROMPT = """
36
+ Suppose you are an expert in judging and evaluating the quality of AI-generated videos,
37
+ please watch the following frames of a given video and see the text prompt for generating the video,
38
+ then give scores from 7 different dimensions:
39
+ (1) visual quality: the quality of the video in terms of clearness, resolution, brightness, and color
40
+ (2) object consistency, the consistency of objects or humans in video
41
+ (3) dynamic degree, the degree of dynamic changes
42
+ (4) motion smoothness, the smoothness of motion or movements
43
+ (5) text-to-video alignment, the alignment between the text prompt and the video content
44
+ (6) factual consistency, the consistency of the video content with the common-sense and factual knowledge
45
+ (7) overall score, the overall quality of the video
46
+ for each dimension, output a number from [1,2,3,4],
47
+ in which '1' is 'Bad', '2' is 'Average', '3' is 'Good', '4' is 'Perfect'
48
+ Here is an output example:
49
+ visual quality: 3
50
+ object consistency: 4
51
+ dynamic degree: 4
52
+ motion smoothness: 1
53
+ text-to-video alignment: 1
54
+ factual consistency: 2
55
+ overall score: 1
56
+
57
+ For this video, the text prompt is "{text_prompt}",
58
+ all the frames of video are as follows:
59
+
60
+ """
61
+ @spaces.GPU
62
+ def generate(text:str, images:List[Image.Image], history: List[dict], **kwargs):
63
+ global processor, model
64
+ model = model.to("cuda") if model.device.type != "cuda" else model
65
+ if not images:
66
+ images = None
67
+
68
+ user_role = conv_template.roles[0]
69
+ assistant_role = conv_template.roles[1]
70
+
71
+ idefics_2_message = []
72
+ cur_img_idx = 0
73
+ cur_vid_idx = 0
74
+ all_videos = [x for x in images if isinstance(x, list)]
75
+ flatten_images = []
76
+ for x in images:
77
+ if isinstance(x, list):
78
+ flatten_images.extend(x)
79
+ else:
80
+ flatten_images.append(x)
81
+
82
+ print(history)
83
+ for i, message in enumerate(history):
84
+ if message["role"] == user_role:
85
+ idefics_2_message.append({
86
+ "role": user_role,
87
+ "content": []
88
+ })
89
+ message_text = message["text"]
90
+ num_video_tokens_in_text = message_text.count("<video>")
91
+ if num_video_tokens_in_text > 0:
92
+ for _ in range(num_video_tokens_in_text):
93
+ message_text = message_text.replace("<video>", "<image> " * len(all_videos[cur_vid_idx]), 1)
94
+ cur_vid_idx += 1
95
+ num_image_tokens_in_text = message_text.count("<image>")
96
+ if num_image_tokens_in_text > 0:
97
+ sub_texts = [x.strip() for x in message_text.split("<image>")]
98
+ if sub_texts[0]:
99
+ idefics_2_message[-1]["content"].append({"type": "text", "text": sub_texts[0]})
100
+ for sub_text in sub_texts[1:]:
101
+ idefics_2_message[-1]["content"].append({"type": "image"})
102
+ if sub_text:
103
+ idefics_2_message.append({
104
+ "role": user_role,
105
+ "content": [{"type": "text", "text": sub_text}]
106
+ })
107
+ else:
108
+ idefics_2_message[-1]["content"].append({"type": "text", "text": message_text})
109
+ elif message["role"] == assistant_role:
110
+ if i == len(history) - 1 and not message["text"]:
111
+ break
112
+ idefics_2_message.append({
113
+ "role": assistant_role,
114
+ "content": [{"type": "text", "text": message["text"]}]
115
+ })
116
+ if text:
117
+ assert idefics_2_message[-1]["role"] == assistant_role and not idefics_2_message[-1]["content"], "Internal error"
118
+ idefics_2_message.append({
119
+ "role": user_role,
120
+ "content": [{"type": "text", "text": text}]
121
+ })
122
+
123
+ print(idefics_2_message)
124
+ prompt = processor.apply_chat_template(idefics_2_message, add_generation_prompt=True)
125
+
126
+ images = [Image.open(x) if isinstance(x, str) else x for x in flatten_images]
127
+ inputs = processor(text=prompt, images=images, return_tensors="pt")
128
+ inputs = {k: v.to(model.device) for k, v in inputs.items()}
129
+ outputs = model.generate(**inputs, max_new_tokens=1024)
130
+ generated_text = processor.decode(outputs[0, inputs["input_ids"].shape[-1]:], skip_special_tokens=True)
131
+ return generated_text
132
+
133
+
134
+ def read_video_pyav(container, indices):
135
+ '''
136
+ Decode the video with PyAV decoder.
137
+
138
+ Args:
139
+ container (av.container.input.InputContainer): PyAV container.
140
+ indices (List[int]): List of frame indices to decode.
141
+
142
+ Returns:
143
+ np.ndarray: np array of decoded frames of shape (num_frames, height, width, 3).
144
+ '''
145
+ frames = []
146
+ container.seek(0)
147
+ start_index = indices[0]
148
+ end_index = indices[-1]
149
+ for i, frame in enumerate(container.decode(video=0)):
150
+ if i > end_index:
151
+ break
152
+ if i >= start_index and i in indices:
153
+ frames.append(frame)
154
+ return np.stack([x.to_ndarray(format="rgb24") for x in frames])
155
+
156
+ def eval_video(prompt, video:str):
157
+ container = av.open(video)
158
+
159
+ # sample uniformly 8 frames from the video
160
+ total_frames = container.streams.video[0].frames
161
+ if total_frames > MAX_NUM_FRAMES:
162
+ indices = np.arange(0, total_frames, total_frames / MAX_NUM_FRAMES).astype(int)
163
+ else:
164
+ indices = np.arange(total_frames)
165
+ video_frames = read_video_pyav(container, indices)
166
+
167
+ frames = [Image.fromarray(x) for x in video_frames]
168
+
169
+ eval_prompt = VIDEO_EVAL_PROMPT.format(text_prompt=prompt)
170
+ eval_prompt += "<video>"
171
+ user_role = conv_template.roles[0]
172
+ assistant_role = conv_template.roles[1]
173
+ chat_messages = [
174
+ {
175
+ "role": user_role,
176
+ "text": eval_prompt
177
+ },
178
+ {
179
+ "role": assistant_role,
180
+ "text": ""
181
+ }
182
+ ]
183
+ response = generate(None, [frames], chat_messages)
184
+ return response
185
+
186
+ def build_demo():
187
+ with gr.Blocks() as demo:
188
+ gr.Markdown("""
189
+ ## Video Evaluation
190
+ upload a video along with a text prompt when generating the video, this model will evaluate the video's quality from 7 different dimensions.
191
+ """)
192
+ with gr.Row():
193
+ video = gr.Video(width=500, label="Video")
194
+ with gr.Column():
195
+ eval_prompt_template = gr.Textbox(VIDEO_EVAL_PROMPT.strip(' \n'), label="Evaluation Prompt Template", interactive=False, max_lines=26)
196
+ video_prompt = gr.Textbox(label="Text Prompt", lines=1)
197
+ with gr.Row():
198
+ eval_button = gr.Button("Evaluate Video")
199
+ clear_button = gr.ClearButton([video, video_prompt])
200
+ eval_result = gr.Textbox(label="Evaluation result", interactive=False, lines=7)
201
+
202
+ eval_button.click(
203
+ eval_video, [video_prompt, video], [eval_result]
204
+ )
205
+
206
+ dummy_id = gr.Textbox("id", label="id", visible=False, min_width=50)
207
+ dummy_output = gr.Textbox("reference score", label="reference scores", visible=False, lines=7)
208
+
209
+ gr.Examples(
210
+ examples=
211
+ [
212
+ [
213
+ item['id'],
214
+ item['prompt'],
215
+ item['video'],
216
+ item['conversations'][1]['value']
217
+ ] for item in examples
218
+ ],
219
+ inputs=[dummy_id, video_prompt, video, dummy_output],
220
+ )
221
+
222
+ # gr.Markdown("""
223
+ # ## Citation
224
+ # ```
225
+ # @article{jiang2024mantis,
226
+ # title={MANTIS: Interleaved Multi-Image Instruction Tuning},
227
+ # author={Jiang, Dongfu and He, Xuan and Zeng, Huaye and Wei, Con and Ku, Max and Liu, Qian and Chen, Wenhu},
228
+ # journal={arXiv preprint arXiv:2405.01483},
229
+ # year={2024}
230
+ # }
231
+ # ```""")
232
+ return demo
233
+
234
+
235
+ if __name__ == "__main__":
236
+ demo = build_demo()
237
+ demo.launch(share=True)
examples/1006309.mp4 CHANGED
Binary files a/examples/1006309.mp4 and b/examples/1006309.mp4 differ
 
examples/3005033.mp4 CHANGED
Binary files a/examples/3005033.mp4 and b/examples/3005033.mp4 differ
 
examples/7004180.mp4 CHANGED
Binary files a/examples/7004180.mp4 and b/examples/7004180.mp4 differ
 
examples/a400480.mp4 CHANGED
Binary files a/examples/a400480.mp4 and b/examples/a400480.mp4 differ
 
examples/a500010.mp4 CHANGED
Binary files a/examples/a500010.mp4 and b/examples/a500010.mp4 differ
 
examples/a500251.mp4 CHANGED
Binary files a/examples/a500251.mp4 and b/examples/a500251.mp4 differ
 
examples/b304986.mp4 CHANGED
Binary files a/examples/b304986.mp4 and b/examples/b304986.mp4 differ
 
examples/b402727.mp4 CHANGED
Binary files a/examples/b402727.mp4 and b/examples/b402727.mp4 differ
 
examples/b404675.mp4 CHANGED
Binary files a/examples/b404675.mp4 and b/examples/b404675.mp4 differ
 
examples/d401950.mp4 CHANGED
Binary files a/examples/d401950.mp4 and b/examples/d401950.mp4 differ
 
examples/d500506.mp4 CHANGED
Binary files a/examples/d500506.mp4 and b/examples/d500506.mp4 differ
 
examples/d500937.mp4 CHANGED
Binary files a/examples/d500937.mp4 and b/examples/d500937.mp4 differ
 
examples/hd.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "id": "hd1",
4
+ "conversations": [
5
+ {
6
+ "from": "human",
7
+ "value": ""
8
+ },
9
+ {
10
+ "from": "gpt",
11
+ "value": ""
12
+ }
13
+ ],
14
+ "video": "hd1.mp4",
15
+ "prompt": "An indoor gym"
16
+ },
17
+ {
18
+ "id": "hd2",
19
+ "conversations": [
20
+ {
21
+ "from": "human",
22
+ "value": ""
23
+ },
24
+ {
25
+ "from": "gpt",
26
+ "value": ""
27
+ }
28
+ ],
29
+ "video": "hd2.mp4",
30
+ "prompt": "None"
31
+ },
32
+ {
33
+ "id": "hd3",
34
+ "conversations": [
35
+ {
36
+ "from": "human",
37
+ "value": ""
38
+ },
39
+ {
40
+ "from": "gpt",
41
+ "value": ""
42
+ }
43
+ ],
44
+ "video": "hd3.mp4",
45
+ "prompt": "A person barbecuing"
46
+ },
47
+ {
48
+ "id": "hd4",
49
+ "conversations": [
50
+ {
51
+ "from": "human",
52
+ "value": ""
53
+ },
54
+ {
55
+ "from": "gpt",
56
+ "value": ""
57
+ }
58
+ ],
59
+ "video": "hd4.mp4",
60
+ "prompt": "A child eating"
61
+ }
62
+ ]
examples/hd1.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:057409b842da7f266df0f46639a729a69eff2819e1cd0b567815bdfd93b59343
3
+ size 97124
examples/hd2.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:970d1797c5e357672a3f2cfcfabcacb6eeced4032811551166cfcb7eb63a4814
3
+ size 311262
examples/hd3.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91ebff6f8e885d97bfddac3cd39c56957de269c552dc012fa711624860a7f1d6
3
+ size 223946
examples/hd4.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbd672b2647c473997ef2edca63607367e2768d8de8b0683d1b504977c305c49
3
+ size 3860683
examples/r003679.mp4 CHANGED
Binary files a/examples/r003679.mp4 and b/examples/r003679.mp4 differ
 
examples/r004061.mp4 CHANGED
Binary files a/examples/r004061.mp4 and b/examples/r004061.mp4 differ
 
examples/r100916.mp4 CHANGED
Binary files a/examples/r100916.mp4 and b/examples/r100916.mp4 differ