DongfuJiang commited on
Commit
96112f7
1 Parent(s): 2827015
Files changed (3) hide show
  1. README.md +1 -1
  2. app.py +0 -237
  3. app_regression.py +2 -2
README.md CHANGED
@@ -5,7 +5,7 @@ colorFrom: green
5
  colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 4.24.0
8
- app_file: app_generation.py
9
  pinned: false
10
  license: apache-2.0
11
  short_description: Multimodal Language Model
 
5
  colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 4.24.0
8
+ app_file: app_regression.py
9
  pinned: false
10
  license: apache-2.0
11
  short_description: Multimodal Language Model
app.py DELETED
@@ -1,237 +0,0 @@
1
- import gradio as gr
2
- import spaces
3
- import os
4
- import time
5
- import json
6
- import numpy as np
7
- import av
8
- from PIL import Image
9
- import functools
10
- from transformers import AutoProcessor, Idefics2ForConditionalGeneration
11
- from models.conversation import conv_templates
12
- from typing import List
13
- processor = AutoProcessor.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-95k-mantis-2epoch_4096")
14
- model = Idefics2ForConditionalGeneration.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-95k-mantis-2epoch_4096", device_map="auto")
15
- MAX_NUM_FRAMES = 24
16
- conv_template = conv_templates["idefics_2"]
17
-
18
- with open("./examples/all_subsets.json", 'r') as f:
19
- examples = json.load(f)
20
-
21
- for item in examples:
22
- video_id = item['images'][0].split("_")[0]
23
- item['images'] = [os.path.join("./examples", video_id, x) for x in item['images']]
24
- item['video'] = os.path.join("./examples", item['video'])
25
-
26
- with open("./examples/hd.json", 'r') as f:
27
- hd_examples = json.load(f)
28
-
29
- for item in hd_examples:
30
- item['video'] = os.path.join("./examples", item['video'])
31
-
32
- examples = hd_examples + examples
33
-
34
- VIDEO_EVAL_PROMPT = """
35
- Suppose you are an expert in judging and evaluating the quality of AI-generated videos,
36
- please watch the following frames of a given video and see the text prompt for generating the video,
37
- then give scores from 7 different dimensions:
38
- (1) visual quality: the quality of the video in terms of clearness, resolution, brightness, and color
39
- (2) object consistency, the consistency of objects or humans in video
40
- (3) dynamic degree, the degree of dynamic changes
41
- (4) motion smoothness, the smoothness of motion or movements
42
- (5) text-to-video alignment, the alignment between the text prompt and the video content
43
- (6) factual consistency, the consistency of the video content with the common-sense and factual knowledge
44
- (7) overall score, the overall quality of the video
45
- for each dimension, output a number from [1,2,3], in which '1' is 'Bad', '2' is 'Average', '3' is 'Good'.
46
- Here is an output example:
47
- visual quality: 3
48
- object consistency: 2
49
- dynamic degree: 2
50
- motion smoothness: 1
51
- text-to-video alignment: 1
52
- factual consistency: 2
53
- overall score: 1
54
-
55
- For this video, the text prompt is "{text_prompt}",
56
- all the frames of video are as follows:
57
-
58
- """
59
- @spaces.GPU
60
- def generate(text:str, images:List[Image.Image], history: List[dict], **kwargs):
61
- global processor, model
62
- model = model.to("cuda") if model.device.type != "cuda" else model
63
- if not images:
64
- images = None
65
-
66
- user_role = conv_template.roles[0]
67
- assistant_role = conv_template.roles[1]
68
-
69
- idefics_2_message = []
70
- cur_img_idx = 0
71
- cur_vid_idx = 0
72
- all_videos = [x for x in images if isinstance(x, list)]
73
- flatten_images = []
74
- for x in images:
75
- if isinstance(x, list):
76
- flatten_images.extend(x)
77
- else:
78
- flatten_images.append(x)
79
-
80
- print(history)
81
- for i, message in enumerate(history):
82
- if message["role"] == user_role:
83
- idefics_2_message.append({
84
- "role": user_role,
85
- "content": []
86
- })
87
- message_text = message["text"]
88
- num_video_tokens_in_text = message_text.count("<video>")
89
- if num_video_tokens_in_text > 0:
90
- for _ in range(num_video_tokens_in_text):
91
- message_text = message_text.replace("<video>", "<image> " * len(all_videos[cur_vid_idx]), 1)
92
- cur_vid_idx += 1
93
- num_image_tokens_in_text = message_text.count("<image>")
94
- if num_image_tokens_in_text > 0:
95
- sub_texts = [x.strip() for x in message_text.split("<image>")]
96
- if sub_texts[0]:
97
- idefics_2_message[-1]["content"].append({"type": "text", "text": sub_texts[0]})
98
- for sub_text in sub_texts[1:]:
99
- idefics_2_message[-1]["content"].append({"type": "image"})
100
- if sub_text:
101
- idefics_2_message.append({
102
- "role": user_role,
103
- "content": [{"type": "text", "text": sub_text}]
104
- })
105
- else:
106
- idefics_2_message[-1]["content"].append({"type": "text", "text": message_text})
107
- elif message["role"] == assistant_role:
108
- if i == len(history) - 1 and not message["text"]:
109
- break
110
- idefics_2_message.append({
111
- "role": assistant_role,
112
- "content": [{"type": "text", "text": message["text"]}]
113
- })
114
- if text:
115
- assert idefics_2_message[-1]["role"] == assistant_role and not idefics_2_message[-1]["content"], "Internal error"
116
- idefics_2_message.append({
117
- "role": user_role,
118
- "content": [{"type": "text", "text": text}]
119
- })
120
-
121
- print(idefics_2_message)
122
- prompt = processor.apply_chat_template(idefics_2_message, add_generation_prompt=True)
123
-
124
- images = [Image.open(x) if isinstance(x, str) else x for x in flatten_images]
125
- inputs = processor(text=prompt, images=images, return_tensors="pt")
126
- inputs = {k: v.to(model.device) for k, v in inputs.items()}
127
- outputs = model.generate(**inputs, max_new_tokens=1024)
128
- generated_text = processor.decode(outputs[0, inputs["input_ids"].shape[-1]:], skip_special_tokens=True)
129
- return generated_text
130
-
131
-
132
- def read_video_pyav(container, indices):
133
- '''
134
- Decode the video with PyAV decoder.
135
-
136
- Args:
137
- container (av.container.input.InputContainer): PyAV container.
138
- indices (List[int]): List of frame indices to decode.
139
-
140
- Returns:
141
- np.ndarray: np array of decoded frames of shape (num_frames, height, width, 3).
142
- '''
143
- frames = []
144
- container.seek(0)
145
- start_index = indices[0]
146
- end_index = indices[-1]
147
- for i, frame in enumerate(container.decode(video=0)):
148
- if i > end_index:
149
- break
150
- if i >= start_index and i in indices:
151
- frames.append(frame)
152
- return np.stack([x.to_ndarray(format="rgb24") for x in frames])
153
-
154
- def eval_video(prompt, video:str):
155
- container = av.open(video)
156
-
157
- # sample uniformly 8 frames from the video
158
- total_frames = container.streams.video[0].frames
159
- if total_frames > MAX_NUM_FRAMES:
160
- indices = np.arange(0, total_frames, total_frames / MAX_NUM_FRAMES).astype(int)
161
- else:
162
- indices = np.arange(total_frames)
163
- video_frames = read_video_pyav(container, indices)
164
-
165
- frames = [Image.fromarray(x) for x in video_frames]
166
- # resize to 256 x 256
167
- frames = [x.resize((256, 256)) for x in frames]
168
-
169
- eval_prompt = VIDEO_EVAL_PROMPT.format(text_prompt=prompt)
170
- eval_prompt += "<video>"
171
- user_role = conv_template.roles[0]
172
- assistant_role = conv_template.roles[1]
173
- chat_messages = [
174
- {
175
- "role": user_role,
176
- "text": eval_prompt
177
- },
178
- {
179
- "role": assistant_role,
180
- "text": ""
181
- }
182
- ]
183
- response = generate(None, [frames], chat_messages)
184
- return response
185
-
186
- def build_demo():
187
- with gr.Blocks() as demo:
188
- gr.Markdown("""
189
- ## Video Evaluation
190
- upload a video along with a text prompt when generating the video, this model will evaluate the video's quality from 7 different dimensions.
191
- """)
192
- with gr.Row():
193
- video = gr.Video(width=500, label="Video")
194
- with gr.Column():
195
- eval_prompt_template = gr.Textbox(VIDEO_EVAL_PROMPT.strip(' \n'), label="Evaluation Prompt Template", interactive=False, max_lines=26)
196
- video_prompt = gr.Textbox(label="Text Prompt", lines=1)
197
- with gr.Row():
198
- eval_button = gr.Button("Evaluate Video")
199
- clear_button = gr.ClearButton([video, video_prompt])
200
- eval_result = gr.Textbox(label="Evaluation result", interactive=False, lines=7)
201
-
202
- eval_button.click(
203
- eval_video, [video_prompt, video], [eval_result]
204
- )
205
-
206
- dummy_id = gr.Textbox("id", label="id", visible=False, min_width=50)
207
- dummy_output = gr.Textbox("reference score", label="reference scores", visible=False, lines=7)
208
-
209
- gr.Examples(
210
- examples=
211
- [
212
- [
213
- item['id'],
214
- item['prompt'],
215
- item['video'],
216
- item['conversations'][1]['value']
217
- ] for item in examples
218
- ],
219
- inputs=[dummy_id, video_prompt, video, dummy_output],
220
- )
221
-
222
- # gr.Markdown("""
223
- # ## Citation
224
- # ```
225
- # @article{jiang2024mantis,
226
- # title={MANTIS: Interleaved Multi-Image Instruction Tuning},
227
- # author={Jiang, Dongfu and He, Xuan and Zeng, Huaye and Wei, Con and Ku, Max and Liu, Qian and Chen, Wenhu},
228
- # journal={arXiv preprint arXiv:2405.01483},
229
- # year={2024}
230
- # }
231
- # ```""")
232
- return demo
233
-
234
-
235
- if __name__ == "__main__":
236
- demo = build_demo()
237
- demo.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_regression.py CHANGED
@@ -14,8 +14,8 @@ from models.conversation import conv_templates
14
  from typing import List
15
 
16
 
17
- processor = AutoProcessor.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-40k-mantis-2epoch_4096_regression")
18
- model = Idefics2ForSequenceClassification.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-40k-mantis-2epoch_4096_regression", torch_dtype=torch.bfloat16).eval()
19
 
20
  MAX_NUM_FRAMES = 24
21
  conv_template = conv_templates["idefics_2"]
 
14
  from typing import List
15
 
16
 
17
+ processor = AutoProcessor.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-refined-40k_4096_regression")
18
+ model = Idefics2ForSequenceClassification.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-refined-40k_4096_regression", torch_dtype=torch.bfloat16).eval()
19
 
20
  MAX_NUM_FRAMES = 24
21
  conv_template = conv_templates["idefics_2"]