File size: 12,881 Bytes
d590472
 
 
b6cf174
82c0673
d590472
91afecf
 
871ccef
d590472
 
 
 
 
 
 
 
 
 
8a06826
d590472
80d9acd
 
 
 
 
82c0673
 
 
 
80d9acd
 
41bd290
 
45cb7c6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80d9acd
c989494
80d9acd
 
 
45cb7c6
41bd290
91afecf
c989494
 
41bd290
 
571110a
82c0673
 
 
 
 
 
 
 
 
 
 
 
 
41bd290
45cb7c6
 
b6cf174
 
45cb7c6
 
b6cf174
 
45cb7c6
 
 
 
41bd290
 
 
 
80d9acd
45cb7c6
 
 
0413b02
b2bb295
 
cd8cc8f
b2bb295
 
cd8cc8f
b2bb295
0413b02
45cb7c6
 
 
 
0413b02
80d9acd
82c0673
 
91afecf
82c0673
 
 
 
 
 
80d9acd
3437928
82c0673
 
 
 
c989494
b6cf174
 
c989494
82c0673
c989494
b6cf174
82c0673
3437928
 
 
41bd290
45cb7c6
d590472
 
b2bb295
 
871ccef
b2bb295
871ccef
b2bb295
 
 
 
 
 
 
d590472
 
 
 
 
 
 
b2bb295
 
 
 
 
d590472
 
 
b2bb295
 
d590472
 
 
b2bb295
 
 
 
871ccef
b2bb295
 
 
 
 
 
 
 
 
 
871ccef
 
 
 
 
 
41bd290
b2bb295
871ccef
b2bb295
871ccef
 
 
 
 
 
 
b2bb295
 
871ccef
b2bb295
871ccef
 
d590472
 
b2bb295
d590472
b2bb295
d590472
 
871ccef
 
 
 
 
 
 
 
 
d590472
 
 
 
 
 
 
 
f6b381b
8a06826
f6b381b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82c0673
 
 
f6b381b
 
d590472
871ccef
8a06826
 
871ccef
f6b381b
 
45cb7c6
41bd290
b6cf174
 
 
 
 
 
 
 
 
 
 
 
 
871ccef
8a06826
41bd290
f6b381b
 
 
b6cf174
b2bb295
b6cf174
 
 
 
0413b02
 
d590472
871ccef
 
d590472
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
import json
import os
import cv2
from tqdm import tqdm
import re

from generate_features import write_npy


def read_json_file(file_path):
    with open(file_path, 'r') as json_file:
        data = json.load(json_file)
    return data


def search_a_file_in_directory(directory, file_name):
    for root, dirs, files in os.walk(directory):
        if file_name in files:
            return os.path.join(root, file_name)
    raise FileNotFoundError(f"{file_name} not found in {directory}.")

def normalize_bbox(bbox, width, height):
    """
    Normalize the bbox
    """
    xmin, ymin, xmax, ymax = bbox
    xmin = int(round(xmin / width, 2) * 100)
    ymin = int(round(ymin / height, 2) * 100)
    xmax = int(round(xmax / width, 2) * 100)
    ymax = int(round(ymax / height, 2) * 100)
    
    return [xmin, ymin, xmax, ymax]


def write_frames(video_path, frame_dir, start_vid, end_vid, asked_frames):
    """
    Write frames to a directory
    """
    if asked_frames > (end_vid - start_vid):
        raise ValueError("asked_frames is greater than the frames in the video")
    
    dir_name = os.path.splitext(os.path.basename(video_path))[0]

    base_path = os.path.join(frame_dir, dir_name)
    if not os.path.exists(base_path):
        os.makedirs(base_path)

    if asked_frames == 0:
        step = 1
    else:
        step = (end_vid - start_vid) // asked_frames
    
    cap = cv2.VideoCapture(video_path)
    cap.set(cv2.CAP_PROP_POS_FRAMES, start_vid)
    count = 0
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        cv2.imwrite(os.path.join(base_path, f'{count}.jpg') , frame)

        count += 1
        if count == asked_frames:
            break
        next_frame = start_vid + step * count
        cap.set(cv2.CAP_PROP_POS_FRAMES, next_frame)

    cap.release()
    cv2.destroyAllWindows()



def generate_prompt(vid, vid_path, question, answer,
                    begin_fid, end_fid,
                    temporal_gt_begin_fid, temporal_gt_end_fid, 
                    frame_count,
                    question_tid, vido_data, asked_frames = 0):

    
    # the unique id = vid + begin_fid + end_fid
    vid = vid + '-' + str(begin_fid) + '-' + str(end_fid)

    target = vido_data['subject/objects'][question_tid]['category']

    frame_to_bbox = {}
    for fid in range(temporal_gt_begin_fid, temporal_gt_end_fid + 1):
        start_bbox = vido_data['trajectories'][fid]
        found = False
        for a_dict in start_bbox:
            if question_tid is a_dict['tid']:
                start_bbox = a_dict['bbox']
                found = True
                break
        if not found:
            raise ValueError("start_bbox not found")
        start_bbox = list(start_bbox.values())
        frame_to_bbox[fid] = start_bbox
    
    # # verify the start_bbox
    # cap = cv2.VideoCapture(vid_path)
    # if not cap.isOpened():
    #     raise ValueError("Error opening video file")
    # cap.set(cv2.CAP_PROP_POS_FRAMES, int(temporal_gt_begin_fid))
    # ret, frame = cap.read()
    # if not ret:
    #     raise ValueError("Error reading video file")
    # cv2.rectangle(frame, (start_bbox[0], start_bbox[1]), 
    #                         (start_bbox[2], start_bbox[3]), (0, 0, 255), 1)
    # cv2.imshow('Frame', frame)
    # cv2.waitKey(0)
    
    
    width = vido_data['width']
    height = vido_data['height']
    
    # normalize the frame count
    original_frame_count = end_fid - begin_fid + 1
    
    if asked_frames > original_frame_count:
        if begin_fid + asked_frames < frame_count:
            new_end_fid = begin_fid + asked_frames - 1
            # print(f"adjusting end_fid from {end_fid} to {new_end_fid}")
            end_fid = new_end_fid
        else:
            # print(f"asked end_fid {begin_fid + asked_frames} is greater than frame_count {frame_count}")
            return None
    
    if asked_frames != 0:
        frame_count = asked_frames
    else:
        frame_count = original_frame_count
        
    # normalized start and end frame
    normalize_frame_to_bbox = {}
    for fid in range(temporal_gt_begin_fid, temporal_gt_end_fid + 1):

        relative_start_fid = fid - begin_fid
        if asked_frames != 0:
            normalized_frame = asked_frames * (relative_start_fid / original_frame_count)
            normalized_frame = int(normalized_frame)
            
        normalize_frame_to_bbox[normalized_frame] = normalize_bbox(frame_to_bbox[fid], width, height)
    
    # write_npy(vid_path, vid, begin_fid, end_fid, asked_frames)

    new_prompt = r' The {} is at '.format(target)
    bboxes = re.sub(r'\s+', '', str(normalize_frame_to_bbox))
    new_prompt += bboxes
    json_obj = {}
    json_obj["id"] = vid
    
    question_dict = {"from": "human", "value": "<video>\n"+question['description']}
    answer_dict = {"from": "gpt", "value": answer['description'] + new_prompt}
    json_obj["conversations"] = [question_dict, answer_dict]
    
    # token = {"<s0>": start_frame, "<e0>" : end_frame}
    json_obj["meta"] = {"asked_frames": asked_frames, "vid_path": vid_path, 
                        "begin_fid": begin_fid, "end_fid": end_fid, 
                        "temporal_gt_begin_fid": temporal_gt_begin_fid, "temporal_gt_end_fid": temporal_gt_end_fid}
    
    return json_obj


def visualize_video(vid_path, begin_fid, end_fid, temporal_gt_begin_fid, temporal_gt_end_fid,
                    question_tid, answer_tid, vido_data):
    """
    visualize the video from begin_fid to end_fid
    """
    
    def on_trackbar(val):
        cap.set(cv2.CAP_PROP_POS_FRAMES, val)
        ret, frame = cap.read()
        if ret:
            cv2.imshow('Video', frame)
    
    # Load the video
    cap = cv2.VideoCapture(vid_path)
    
    # Check if video opened successfully
    if not cap.isOpened(): 
        print("Error opening video file")
        
    total_frames = end_fid - begin_fid + 1
    cv2.namedWindow('Video')
    cv2.createTrackbar('Frame', 'Video', begin_fid, end_fid, on_trackbar)
        
    cap.set(cv2.CAP_PROP_POS_FRAMES, begin_fid)

    # Read until video is completed
    while(cap.isOpened()):

        current_fid = int(cap.get(cv2.CAP_PROP_POS_FRAMES))
        ret, frame = cap.read()
        if ret == True:
            
            if current_fid < begin_fid:
                current_fid = begin_fid
                cap.set(cv2.CAP_PROP_POS_FRAMES, current_fid)
                
            
            cv2.setTrackbarPos('Frame', 'Video', current_fid)
            # show current frame number and total frame number
            cv2.putText(frame, f"frame: {current_fid}/{end_fid}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 1)
            if current_fid >= temporal_gt_begin_fid and current_fid <= temporal_gt_end_fid:
                cv2.putText(frame, f"temporal_gt: {temporal_gt_begin_fid}/{temporal_gt_end_fid}", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 1)
            elif current_fid < temporal_gt_begin_fid:
                cv2.putText(frame, f"start in {temporal_gt_begin_fid - current_fid}", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1)
            elif current_fid > temporal_gt_end_fid:
                cv2.putText(frame, f"end {current_fid - temporal_gt_end_fid} frames before", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1)

            # add bbox
            try:
                question_bbox = vido_data['trajectories'][current_fid][question_tid]['bbox']
                #  BGR
                cv2.rectangle(frame, (question_bbox['xmin'], question_bbox['ymin']), 
                            (question_bbox['xmax'], question_bbox['ymax']), (0, 0, 255), 1)
                category = vido_data['subject/objects'][question_tid]['category']
                cv2.putText(frame, f"question_{category}", (question_bbox['xmin'], question_bbox['ymax']), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 1)
            except IndexError:
                # print(f"question bbox not found: current_fid: {current_fid}")
                pass
                
            
            try:    
                answer_bbox = vido_data['trajectories'][current_fid][answer_tid]['bbox']
                cv2.rectangle(frame, (answer_bbox['xmin'], answer_bbox['ymin']),
                            (answer_bbox['xmax'], answer_bbox['ymax']), (0, 255, 0), 1)
                category = vido_data['subject/objects'][answer_tid]['category']
                cv2.putText(frame, f"answer_{category}", (answer_bbox['xmin'] + (answer_bbox['xmax'] - answer_bbox['xmin']) // 2, answer_bbox['ymin'] + (answer_bbox['ymax'] - answer_bbox['ymin']) // 2), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1)
            except IndexError:
                # print(f"answer bbox not found: current_fid: {current_fid}")
                pass
            
            
            # Display the resulting frame
            cv2.imshow('Video', frame)
            
            if current_fid >= end_fid:
                break
        
            # Wait for a key press
            key = cv2.waitKey(25) & 0xFF
            
            # If the 'p' key is pressed, pause the video
            if key == ord('p'):
                cv2.waitKey(-1)  # wait until any key is pressed
                
            # If the 'q' key is pressed, break from the loop
            elif key == ord('q'):
                break
        
        # Break the loop
        else: 
            break
    # When everything done, release the video capture object
    cap.release()
    cv2.destroyAllWindows()
    
def process_record(record, vidor_anno_path_base, vidor_path_base):
    all_results = []
    
    vid = record['vid']
    begin_fid = record['used_segment']['begin_fid']
    end_fid = record['used_segment']['end_fid']
    
    # temporal_gt_begin_fid can be 0, count 8456 out of 36202
    temporal_gt_begin_fid = record['temporal_gt']['begin_fid']
    # temporal_gt_end_fid can be frame_count, count 6622 out of 36202
    temporal_gt_end_fid = record['temporal_gt']['end_fid'] - 1
    
    frame_count = record['frame_count']
    # path to vidor annotation file
    vidor_anno_path = search_a_file_in_directory(vidor_anno_path_base, vid + '.json')
    
    # all other related data in this file
    vido_data = read_json_file(vidor_anno_path)
    
    # each record has only one caption
    if len(record['captions']) >= 2:
        raise ValueError("more than one captions")
    answer_tid = record['captions'][0]['target_id']


    # each record might has multiple questions, 
    for q_index, question in enumerate(record['questions']):
        
        question_tid = question['target_id']
        # question_bbox_at_begin_fid = vido_data['trajectories'][temporal_gt_begin_fid][question_tid]
        
        # path to video file
        vid_path = vido_data['video_path']
        vid_path = os.path.join(vidor_path_base, vid_path)
        
        if temporal_gt_begin_fid == -1:
            continue
        
        result = generate_prompt(vid, vid_path, question, record['captions'][0], 
                        begin_fid, end_fid,
                        temporal_gt_begin_fid, temporal_gt_end_fid, 
                        frame_count,
                        question_tid, vido_data, asked_frames = 100)

        if result is not None:
            all_results.append(result)
        # print(json.dumps(result, indent=4))
        # visualize_video(vid_path, begin_fid, end_fid, temporal_gt_begin_fid, temporal_gt_end_fid,
        #                 question_tid, answer_tid, vido_data)
            
    return all_results
            
def main():
    vidor_anno_path_base = 'vidor/train_annotation/training/'
    vidor_path_base = 'vidor/train/video'
    vidstg_data = read_json_file('VidSTG-Dataset/annotations/train_annotations.json')
    # vidor_data = read_json_file('vidor/train_annotation/training/0000/2401075277.json')

    all_results = []
    
    # remeber the current index
    current_index = 0
    if os.path.exists('current_index.txt'):
        with open('current_index.txt', 'r') as f:
            current_index = int(f.read())
        with open('results.json', 'r') as json_file:
            all_results = json.load(json_file)
        
    
    for index, record in tqdm(enumerate(vidstg_data), total=len(vidstg_data)):
        
        if index < current_index:
            continue
        
        results = process_record(record, vidor_anno_path_base, vidor_path_base)
            
        if results is None:
            continue
        all_results.extend(results)
        
        if index % 100 == 0:
            with open('results.json', 'w') as json_file:
                json.dump(all_results, json_file)
            with open('current_index.txt', 'w') as f:
                f.write(str(index))

            

if __name__ == "__main__":
    main()