File size: 15,117 Bytes
032e687
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
import logging
import os
import torch
from datasets import Dataset as HFDataset
from datasets import DatasetDict
from mmengine import print_log
import mmengine
from PIL import Image
import numpy as np
from mmengine.dist import master_only

from xtuner.registry import BUILDER
from xtuner.dataset.huggingface import build_origin_dataset
import copy

from vlm.datasets.evaluation.base_eval_dataset import BaseEvalDataset
from .encode_fn import video_lisa_encode_multi_conv_fn
import json
import torchvision.transforms as T
from torchvision.transforms.functional import InterpolationMode

SEG_QUESTIONS = [
    "Please segment the object according to the description: {class_name}",
]

ANSWER_LIST = [
    "It is [SEG].",
    "Sure, [SEG].",
    "Sure, it is [SEG].",
    "Sure, the segmentation result is [SEG].",
    "[SEG].",
]


def multi_template_fn(conversations, template_map):
    for conv in conversations:
        for i, single_turn_conversation in enumerate(conv):
            input = single_turn_conversation.get('input', '')
            if input is None:
                input = ''
            input_text = template_map.INSTRUCTION.format(input=input, round=i + 1)
            system = single_turn_conversation.get('system', '')
            if system != '' and system is not None:
                system = template_map.SYSTEM.format(system=system)
                input_text = system + input_text
            single_turn_conversation['input'] = input_text

            if template_map.get('SUFFIX', None):
                output_text = single_turn_conversation.get('output', '')
                output_text += template_map.SUFFIX
                single_turn_conversation['output'] = output_text

            # SUFFIX_AS_EOS is False ==> need_eos_token is True
            single_turn_conversation['need_eos_token'] = \
                not template_map.get('SUFFIX_AS_EOS', False)
            single_turn_conversation['sep'] = template_map.get('SEP', '')


class VideoRefSAM2EvalDataset(BaseEvalDataset):
    IMAGENET_MEAN = (0.485, 0.456, 0.406)
    IMAGENET_STD = (0.229, 0.224, 0.225)
    IMG_CONTEXT_TOKEN = '<IMG_CONTEXT>'
    IMG_START_TOKEN = '<img>'
    IMG_END_TOKEN = '</img>'

    FAST_IMG_CONTEXT_TOKEN = '<FAST_IMG_CONTEXT>'
    FAST_IMG_START_TOKEN = '<fast_img>'
    FAST_IMG_END_TOKEN = '</fast_img>'

    METAINFO: dict = dict(name='revos')

    def __init__(self,
                 image_folder,
                 expression_file,
                 mask_file,
                 extra_image_processor=None,
                 tokenizer=None,
                 offline_processed_text_folder=None,
                 template_map_fn=None,
                 max_length=2048,
                 lazy=True,
                 special_tokens=None,
                 # eval settings
                 num_frames=5,
                 # eval name
                 eval_name=None,
                 use_fast=False,
                 fast_pool_size=2,
    ):
        super().__init__()
        assert lazy is True
        self.tokenizer = BUILDER.build(tokenizer)
        assert offline_processed_text_folder or (expression_file and tokenizer)
        self.lazy = lazy

        self.max_length = max_length

        self.template_map = template_map_fn['template']

        if offline_processed_text_folder and expression_file:
            print_log(
                'Both `offline_processed_text_folder` and '
                '`data_path` are set, and we load dataset from'
                '`offline_processed_text_folder` '
                f'({offline_processed_text_folder})',
                logger='current',
                level=logging.WARNING)

        if offline_processed_text_folder is not None:
            raise NotImplementedError
        else:
            vid2metaid, metas, mask_dict = self.json_file_preprocess(expression_file, mask_file)
            self.vid2metaid = vid2metaid
            self.videos = list(self.vid2metaid.keys())
            self.mask_dict = mask_dict
            self.json_datas = metas
            json_datas = metas
            self.text_data = json_datas
            # json_data = DatasetDict({'train': HFDataset.from_list(json_datas)})
            # if self.lazy:
            #     self.text_data = build_origin_dataset(json_data, 'train')
            # else:
            #     raise NotImplementedError

        self.image_folder = image_folder
        if extra_image_processor is not None:
            self.extra_image_processor = BUILDER.build(extra_image_processor)
        self.down_ratio = 1
        self.repeats = 1

        self._system = ''

        self.downsample_ratio = 0.5
        self.image_size = 448
        patch_size = 14
        self.patch_token = int((self.image_size // patch_size) ** 2 * (self.downsample_ratio ** 2))

        self.transformer = T.Compose([
            T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
            T.Resize((self.image_size, self.image_size), interpolation=InterpolationMode.BICUBIC),
            T.ToTensor(),
            T.Normalize(mean=self.IMAGENET_MEAN, std=self.IMAGENET_STD)
        ])

        if special_tokens is not None:
            self.tokenizer.add_tokens(special_tokens, special_tokens=True)


        self.num_frames = num_frames

        self.use_fast = use_fast
        self.fast_pool_size = fast_pool_size

        # save to json
        if eval_name is None:
            eval_name = 'results'
        self.eval_name = eval_name

    def __len__(self):
        return len(self.vid2metaid) * self.repeats

    @property
    def modality_length(self):
        length_list = []
        for data_dict in self.vid2metaid:
            cur_len = 10000
            length_list.append(cur_len)
        return length_list

    def real_len(self):
        return len(self.vid2metaid)

    def json_file_preprocess(self, expression_file, mask_file):
        # prepare expression annotation files
        with open(expression_file, 'r') as f:
            expression_datas = json.load(f)['videos']

        metas = []
        anno_count = 0  # serve as anno_id
        vid2metaid = {}
        for vid_name in expression_datas:
            vid_express_data = expression_datas[vid_name]

            vid_frames = sorted(vid_express_data['frames'])
            vid_len = len(vid_frames)

            exp_id_list = sorted(list(vid_express_data['expressions'].keys()))
            for exp_id in exp_id_list:
                exp_dict = vid_express_data['expressions'][exp_id]
                meta = {}
                meta['video'] = vid_name
                meta['exp'] = exp_dict['exp']  # str
                meta['mask_anno_id'] = exp_dict['anno_id']

                if 'obj_id' in exp_dict.keys():
                    meta['obj_id'] = exp_dict['obj_id']
                else:
                    meta['obj_id'] = [0, ]  # Ref-Youtube-VOS only has one object per expression
                meta['anno_id'] = [str(anno_count), ]
                anno_count += 1
                meta['frames'] = vid_frames
                meta['exp_id'] = exp_id

                meta['length'] = vid_len
                metas.append(meta)
                if vid_name not in vid2metaid.keys():
                    vid2metaid[vid_name] = []
                vid2metaid[vid_name].append(len(metas) - 1)

        # process mask annotation files
        with open(mask_file, 'rb') as f:
            mask_dict = json.load(f)

        return vid2metaid, metas, mask_dict

    def create_img_to_refs_mapping(self, refs_train):
        img2refs = {}
        for ref in refs_train:
            img2refs[ref["image_id"]] = img2refs.get(ref["image_id"], []) + [ref, ]
        return img2refs

    def dataset_map_fn(self, data_dict):
        images = []

        len_frames = len(data_dict[0]['frames'])
        for objet_info in data_dict:
            assert len_frames == len(objet_info['frames'])
        selected_frame_indexes = range(len_frames)

        for selected_frame_index in selected_frame_indexes:
            frame_id = data_dict[0]['frames'][selected_frame_index]
            images.append(os.path.join(data_dict[0]['video'], frame_id + '.jpg'))
        num_frames = len(images) if len(images) < self.num_frames else self.num_frames
        num_fast_frames = len(images)

        # prepare text
        expressions = [object_info['exp'] for object_info in data_dict]
        # Modify: To n dialogues
        text_dict = self.prepare_text(num_frames, expressions, num_image_tokens=self.patch_token,
                                      num_fast_frames=num_fast_frames)
        ret = {'images': images, 'video_masks': None, 'conversation': text_dict['conversation']}

        return ret

    def prepare_text(self, n_frames, expressions, num_image_tokens=256, num_fast_frames=0):

        if self.use_fast:
            fast_frame_token_str = f'{self.FAST_IMG_START_TOKEN}' \
                                   f'{self.FAST_IMG_CONTEXT_TOKEN * num_fast_frames * self.fast_pool_size * self.fast_pool_size}' \
                                   f'{self.FAST_IMG_END_TOKEN}' + '\n'
        else:
            fast_frame_token_str = ''

        frame_token_str = f'{self.IMG_START_TOKEN}' \
                          f'{self.IMG_CONTEXT_TOKEN * num_image_tokens}' \
                          f'{self.IMG_END_TOKEN}'

        questions = []
        for i, exp in enumerate(expressions):
            question_template = SEG_QUESTIONS[0]
            questions.append(question_template.format(class_name=exp))

        eval_conversation_list = []
        for i, question in enumerate(questions):
            qa_list = []
            frame_tokens = frame_token_str + '\n'
            frame_tokens = frame_tokens * n_frames
            frame_tokens = frame_tokens.strip()
            qa_list.append(
                {'from': 'human', 'value': fast_frame_token_str + frame_tokens + question}
            )
            qa_list.append(
                {'from': 'gpt', 'value': ''}
            )
            assert len(qa_list) == 2

            input = ''
            conversation = []
            for msg in qa_list:
                if msg['from'] == 'human':
                    input += msg['value']
                elif msg['from'] == 'gpt':
                    if msg['value'] == '':
                        conversation.append({'input': input,})
                    else:
                        conversation.append({'input': input, 'output': msg['value']})
                    input = ''
                else:
                    raise NotImplementedError

            # add system information
            conversation[0].update({'system': self._system})
            eval_conversation_list.append(conversation)
        return {'conversation': eval_conversation_list}

    def __getitem__(self, index):
        index = index % self.real_len()
        selected_video_objects = self.vid2metaid[self.videos[index]]
        video_objects_infos = [copy.deepcopy(self.text_data[idx]) for idx in selected_video_objects]

        selected_objects = video_objects_infos
        text_prompts = [copy.deepcopy(item['exp']) for item in selected_objects]

        data_dict = self.dataset_map_fn(selected_objects)
        multi_template_fn(data_dict['conversation'], self.template_map)
        result = video_lisa_encode_multi_conv_fn(data_dict, input_ids_with_output=False, tokenizer=self.tokenizer, max_length=self.max_length)
        data_dict.update(result)

        assert 'images' in data_dict.keys()
        pixel_values = []
        if self.use_fast:
            fast_pixel_values = []
        extra_pixel_values = []
        if data_dict.get('images', None) is not None:
            frames_files = data_dict['images']
            frames_files = [os.path.join(self.image_folder, frame_file) for frame_file in frames_files]

            ori_width, ori_height = None, None
            for frame_idx, frame_path in enumerate(frames_files):
                frame_image = Image.open(frame_path).convert('RGB')
                if ori_height is None:
                    ori_width, ori_height = frame_image.size
                else:
                    assert ori_width == frame_image.size[0]
                    assert ori_height == frame_image.size[1]

                if self.extra_image_processor is not None:
                    g_image = np.array(frame_image)  # for grounding
                    g_image = self.extra_image_processor.apply_image(g_image)
                    g_pixel_values = torch.from_numpy(g_image).permute(2, 0, 1).contiguous()
                    extra_pixel_values.append(g_pixel_values)
                if self.use_fast:
                    frame_image = self.transformer(frame_image)
                    fast_pixel_values.append(frame_image)
                    if frame_idx < self.num_frames:
                        pixel_values.append(frame_image)
                else:
                    if frame_idx < self.num_frames:
                        frame_image = self.transformer(frame_image)
                        pixel_values.append(frame_image)

            pixel_values = torch.stack(pixel_values, dim=0) # (n_f, 3, h, w)
            data_dict['pixel_values'] = pixel_values
            if self.use_fast:
                fast_pixel_values = torch.stack(fast_pixel_values, dim=0)  # (n_f, 3, h, w)
                data_dict['fast_pixel_values'] = fast_pixel_values
            if self.extra_image_processor is not None:
                data_dict['g_pixel_values'] = extra_pixel_values
        else:
            data_dict['pixel_values'] = torch.zeros(0, 3, self.image_size, self.image_size)
            ori_width, ori_height = None, None

        data_dict['type'] = 'video'
        data_dict['video_id'] = index
        data_dict['text_prompts'] = text_prompts
        data_dict['image_folder'] = self.image_folder
        data_dict['ori_height'] = ori_height
        data_dict['ori_width'] = ori_width
        data_dict['id'] = index

        return data_dict

    @master_only
    def evaluate(self, results, work_dir):
        final_results = {}
        for idx, item in enumerate(results):
            _id = item['id']
            # vid_id = self.videos[idx]
            vid_id = self.videos[_id]
            selected_video_objects = self.vid2metaid[vid_id]
            video_objects_infos = [copy.deepcopy(self.text_data[idx]) for idx in selected_video_objects]
            text_prompts = [copy.deepcopy(item['exp']) for item in video_objects_infos]
            exp_ids = [copy.deepcopy(item['exp_id']) for item in video_objects_infos]
            final_results[vid_id] = {}
            assert len(text_prompts) == len(item['prediction_masks']), f"{len(text_prompts)}-----{len(item['prediction_masks'])}"
            for idt, text in enumerate(text_prompts):
                exp_id = exp_ids[idt]
                final_results[vid_id][exp_id] = {
                    'exp': text,
                    'prediction_masks': item['prediction_masks'][idt],
                }

        mmengine.dump(final_results, os.path.join(work_dir, f'{self.eval_name}.json'))
        return {"Dummy": 0}