|
import json |
|
import os |
|
import cv2 |
|
from tqdm import tqdm |
|
import re |
|
|
|
from generate_features import write_npy |
|
|
|
|
|
def read_json_file(file_path): |
|
with open(file_path, 'r') as json_file: |
|
data = json.load(json_file) |
|
return data |
|
|
|
|
|
def search_a_file_in_directory(directory, file_name): |
|
for root, dirs, files in os.walk(directory): |
|
if file_name in files: |
|
return os.path.join(root, file_name) |
|
raise FileNotFoundError(f"{file_name} not found in {directory}.") |
|
|
|
def normalize_bbox(bbox, width, height): |
|
""" |
|
Normalize the bbox |
|
""" |
|
xmin, ymin, xmax, ymax = bbox |
|
xmin = int(round(xmin / width, 2) * 100) |
|
ymin = int(round(ymin / height, 2) * 100) |
|
xmax = int(round(xmax / width, 2) * 100) |
|
ymax = int(round(ymax / height, 2) * 100) |
|
|
|
return [xmin, ymin, xmax, ymax] |
|
|
|
|
|
def write_frames(video_path, frame_dir, start_vid, end_vid, asked_frames): |
|
""" |
|
Write frames to a directory |
|
""" |
|
if asked_frames > (end_vid - start_vid): |
|
raise ValueError("asked_frames is greater than the frames in the video") |
|
|
|
dir_name = os.path.splitext(os.path.basename(video_path))[0] |
|
|
|
base_path = os.path.join(frame_dir, dir_name) |
|
if not os.path.exists(base_path): |
|
os.makedirs(base_path) |
|
|
|
if asked_frames == 0: |
|
step = 1 |
|
else: |
|
step = (end_vid - start_vid) // asked_frames |
|
|
|
cap = cv2.VideoCapture(video_path) |
|
cap.set(cv2.CAP_PROP_POS_FRAMES, start_vid) |
|
count = 0 |
|
while cap.isOpened(): |
|
ret, frame = cap.read() |
|
if not ret: |
|
break |
|
cv2.imwrite(os.path.join(base_path, f'{count}.jpg') , frame) |
|
|
|
count += 1 |
|
if count == asked_frames: |
|
break |
|
next_frame = start_vid + step * count |
|
cap.set(cv2.CAP_PROP_POS_FRAMES, next_frame) |
|
|
|
cap.release() |
|
cv2.destroyAllWindows() |
|
|
|
|
|
|
|
def generate_prompt(vid, vid_path, question, answer, |
|
begin_fid, end_fid, |
|
temporal_gt_begin_fid, temporal_gt_end_fid, |
|
frame_count, |
|
question_tid, vido_data, asked_frames = 0): |
|
|
|
|
|
|
|
vid = vid + '-' + str(begin_fid) + '-' + str(end_fid) |
|
|
|
target = vido_data['subject/objects'][question_tid]['category'] |
|
|
|
frame_to_bbox = {} |
|
for fid in range(temporal_gt_begin_fid, temporal_gt_end_fid + 1): |
|
start_bbox = vido_data['trajectories'][fid] |
|
found = False |
|
for a_dict in start_bbox: |
|
if question_tid is a_dict['tid']: |
|
start_bbox = a_dict['bbox'] |
|
found = True |
|
break |
|
if not found: |
|
raise ValueError("start_bbox not found") |
|
start_bbox = list(start_bbox.values()) |
|
frame_to_bbox[fid] = start_bbox |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
width = vido_data['width'] |
|
height = vido_data['height'] |
|
|
|
|
|
original_frame_count = end_fid - begin_fid + 1 |
|
|
|
if asked_frames > original_frame_count: |
|
if begin_fid + asked_frames < frame_count: |
|
new_end_fid = begin_fid + asked_frames - 1 |
|
|
|
end_fid = new_end_fid |
|
else: |
|
|
|
return None |
|
|
|
if asked_frames != 0: |
|
frame_count = asked_frames |
|
else: |
|
frame_count = original_frame_count |
|
|
|
|
|
normalize_frame_to_bbox = {} |
|
for fid in range(temporal_gt_begin_fid, temporal_gt_end_fid + 1): |
|
|
|
relative_start_fid = fid - begin_fid |
|
if asked_frames != 0: |
|
normalized_frame = asked_frames * (relative_start_fid / original_frame_count) |
|
normalized_frame = int(normalized_frame) |
|
|
|
normalize_frame_to_bbox[normalized_frame] = normalize_bbox(frame_to_bbox[fid], width, height) |
|
|
|
|
|
|
|
new_prompt = r' The {} is at '.format(target) |
|
bboxes = re.sub(r'\s+', '', str(normalize_frame_to_bbox)) |
|
new_prompt += bboxes |
|
json_obj = {} |
|
json_obj["id"] = vid |
|
|
|
question_dict = {"from": "human", "value": "<video>\n"+question['description']} |
|
answer_dict = {"from": "gpt", "value": answer['description'] + new_prompt} |
|
json_obj["conversations"] = [question_dict, answer_dict] |
|
|
|
|
|
json_obj["meta"] = {"asked_frames": asked_frames, "vid_path": vid_path, |
|
"begin_fid": begin_fid, "end_fid": end_fid, |
|
"temporal_gt_begin_fid": temporal_gt_begin_fid, "temporal_gt_end_fid": temporal_gt_end_fid} |
|
|
|
return json_obj |
|
|
|
|
|
def visualize_video(vid_path, begin_fid, end_fid, temporal_gt_begin_fid, temporal_gt_end_fid, |
|
question_tid, answer_tid, vido_data): |
|
""" |
|
visualize the video from begin_fid to end_fid |
|
""" |
|
|
|
def on_trackbar(val): |
|
cap.set(cv2.CAP_PROP_POS_FRAMES, val) |
|
ret, frame = cap.read() |
|
if ret: |
|
cv2.imshow('Video', frame) |
|
|
|
|
|
cap = cv2.VideoCapture(vid_path) |
|
|
|
|
|
if not cap.isOpened(): |
|
print("Error opening video file") |
|
|
|
total_frames = end_fid - begin_fid + 1 |
|
cv2.namedWindow('Video') |
|
cv2.createTrackbar('Frame', 'Video', begin_fid, end_fid, on_trackbar) |
|
|
|
cap.set(cv2.CAP_PROP_POS_FRAMES, begin_fid) |
|
|
|
|
|
while(cap.isOpened()): |
|
|
|
current_fid = int(cap.get(cv2.CAP_PROP_POS_FRAMES)) |
|
ret, frame = cap.read() |
|
if ret == True: |
|
|
|
if current_fid < begin_fid: |
|
current_fid = begin_fid |
|
cap.set(cv2.CAP_PROP_POS_FRAMES, current_fid) |
|
|
|
|
|
cv2.setTrackbarPos('Frame', 'Video', current_fid) |
|
|
|
cv2.putText(frame, f"frame: {current_fid}/{end_fid}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 1) |
|
if current_fid >= temporal_gt_begin_fid and current_fid <= temporal_gt_end_fid: |
|
cv2.putText(frame, f"temporal_gt: {temporal_gt_begin_fid}/{temporal_gt_end_fid}", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 1) |
|
elif current_fid < temporal_gt_begin_fid: |
|
cv2.putText(frame, f"start in {temporal_gt_begin_fid - current_fid}", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1) |
|
elif current_fid > temporal_gt_end_fid: |
|
cv2.putText(frame, f"end {current_fid - temporal_gt_end_fid} frames before", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1) |
|
|
|
|
|
try: |
|
question_bbox = vido_data['trajectories'][current_fid][question_tid]['bbox'] |
|
|
|
cv2.rectangle(frame, (question_bbox['xmin'], question_bbox['ymin']), |
|
(question_bbox['xmax'], question_bbox['ymax']), (0, 0, 255), 1) |
|
category = vido_data['subject/objects'][question_tid]['category'] |
|
cv2.putText(frame, f"question_{category}", (question_bbox['xmin'], question_bbox['ymax']), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 1) |
|
except IndexError: |
|
|
|
pass |
|
|
|
|
|
try: |
|
answer_bbox = vido_data['trajectories'][current_fid][answer_tid]['bbox'] |
|
cv2.rectangle(frame, (answer_bbox['xmin'], answer_bbox['ymin']), |
|
(answer_bbox['xmax'], answer_bbox['ymax']), (0, 255, 0), 1) |
|
category = vido_data['subject/objects'][answer_tid]['category'] |
|
cv2.putText(frame, f"answer_{category}", (answer_bbox['xmin'] + (answer_bbox['xmax'] - answer_bbox['xmin']) // 2, answer_bbox['ymin'] + (answer_bbox['ymax'] - answer_bbox['ymin']) // 2), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1) |
|
except IndexError: |
|
|
|
pass |
|
|
|
|
|
|
|
cv2.imshow('Video', frame) |
|
|
|
if current_fid >= end_fid: |
|
break |
|
|
|
|
|
key = cv2.waitKey(25) & 0xFF |
|
|
|
|
|
if key == ord('p'): |
|
cv2.waitKey(-1) |
|
|
|
|
|
elif key == ord('q'): |
|
break |
|
|
|
|
|
else: |
|
break |
|
|
|
cap.release() |
|
cv2.destroyAllWindows() |
|
|
|
def process_record(record, vidor_anno_path_base, vidor_path_base): |
|
all_results = [] |
|
|
|
vid = record['vid'] |
|
begin_fid = record['used_segment']['begin_fid'] |
|
end_fid = record['used_segment']['end_fid'] |
|
|
|
|
|
temporal_gt_begin_fid = record['temporal_gt']['begin_fid'] |
|
|
|
temporal_gt_end_fid = record['temporal_gt']['end_fid'] - 1 |
|
|
|
frame_count = record['frame_count'] |
|
|
|
vidor_anno_path = search_a_file_in_directory(vidor_anno_path_base, vid + '.json') |
|
|
|
|
|
vido_data = read_json_file(vidor_anno_path) |
|
|
|
|
|
if len(record['captions']) >= 2: |
|
raise ValueError("more than one captions") |
|
answer_tid = record['captions'][0]['target_id'] |
|
|
|
|
|
|
|
for q_index, question in enumerate(record['questions']): |
|
|
|
question_tid = question['target_id'] |
|
|
|
|
|
|
|
vid_path = vido_data['video_path'] |
|
vid_path = os.path.join(vidor_path_base, vid_path) |
|
|
|
if temporal_gt_begin_fid == -1: |
|
continue |
|
|
|
result = generate_prompt(vid, vid_path, question, record['captions'][0], |
|
begin_fid, end_fid, |
|
temporal_gt_begin_fid, temporal_gt_end_fid, |
|
frame_count, |
|
question_tid, vido_data, asked_frames = 100) |
|
|
|
if result is not None: |
|
all_results.append(result) |
|
|
|
|
|
|
|
|
|
return all_results |
|
|
|
def main(): |
|
vidor_anno_path_base = 'vidor/train_annotation/training/' |
|
vidor_path_base = 'vidor/train/video' |
|
vidstg_data = read_json_file('VidSTG-Dataset/annotations/train_annotations.json') |
|
|
|
|
|
all_results = [] |
|
|
|
|
|
current_index = 0 |
|
if os.path.exists('current_index.txt'): |
|
with open('current_index.txt', 'r') as f: |
|
current_index = int(f.read()) |
|
with open('results.json', 'r') as json_file: |
|
all_results = json.load(json_file) |
|
|
|
|
|
for index, record in tqdm(enumerate(vidstg_data), total=len(vidstg_data)): |
|
|
|
if index < current_index: |
|
continue |
|
|
|
results = process_record(record, vidor_anno_path_base, vidor_path_base) |
|
|
|
if results is None: |
|
continue |
|
all_results.extend(results) |
|
|
|
if index % 100 == 0: |
|
with open('results.json', 'w') as json_file: |
|
json.dump(all_results, json_file) |
|
with open('current_index.txt', 'w') as f: |
|
f.write(str(index)) |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|
|
|
|
|
|
|