Shikra_V / VidSTG /read_annotation.py
gospacex's picture
do not write image features to the disk
3437928
raw
history blame contribute delete
No virus
12.9 kB
import json
import os
import cv2
from tqdm import tqdm
import re
from generate_features import write_npy
def read_json_file(file_path):
with open(file_path, 'r') as json_file:
data = json.load(json_file)
return data
def search_a_file_in_directory(directory, file_name):
for root, dirs, files in os.walk(directory):
if file_name in files:
return os.path.join(root, file_name)
raise FileNotFoundError(f"{file_name} not found in {directory}.")
def normalize_bbox(bbox, width, height):
"""
Normalize the bbox
"""
xmin, ymin, xmax, ymax = bbox
xmin = int(round(xmin / width, 2) * 100)
ymin = int(round(ymin / height, 2) * 100)
xmax = int(round(xmax / width, 2) * 100)
ymax = int(round(ymax / height, 2) * 100)
return [xmin, ymin, xmax, ymax]
def write_frames(video_path, frame_dir, start_vid, end_vid, asked_frames):
"""
Write frames to a directory
"""
if asked_frames > (end_vid - start_vid):
raise ValueError("asked_frames is greater than the frames in the video")
dir_name = os.path.splitext(os.path.basename(video_path))[0]
base_path = os.path.join(frame_dir, dir_name)
if not os.path.exists(base_path):
os.makedirs(base_path)
if asked_frames == 0:
step = 1
else:
step = (end_vid - start_vid) // asked_frames
cap = cv2.VideoCapture(video_path)
cap.set(cv2.CAP_PROP_POS_FRAMES, start_vid)
count = 0
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
cv2.imwrite(os.path.join(base_path, f'{count}.jpg') , frame)
count += 1
if count == asked_frames:
break
next_frame = start_vid + step * count
cap.set(cv2.CAP_PROP_POS_FRAMES, next_frame)
cap.release()
cv2.destroyAllWindows()
def generate_prompt(vid, vid_path, question, answer,
begin_fid, end_fid,
temporal_gt_begin_fid, temporal_gt_end_fid,
frame_count,
question_tid, vido_data, asked_frames = 0):
# the unique id = vid + begin_fid + end_fid
vid = vid + '-' + str(begin_fid) + '-' + str(end_fid)
target = vido_data['subject/objects'][question_tid]['category']
frame_to_bbox = {}
for fid in range(temporal_gt_begin_fid, temporal_gt_end_fid + 1):
start_bbox = vido_data['trajectories'][fid]
found = False
for a_dict in start_bbox:
if question_tid is a_dict['tid']:
start_bbox = a_dict['bbox']
found = True
break
if not found:
raise ValueError("start_bbox not found")
start_bbox = list(start_bbox.values())
frame_to_bbox[fid] = start_bbox
# # verify the start_bbox
# cap = cv2.VideoCapture(vid_path)
# if not cap.isOpened():
# raise ValueError("Error opening video file")
# cap.set(cv2.CAP_PROP_POS_FRAMES, int(temporal_gt_begin_fid))
# ret, frame = cap.read()
# if not ret:
# raise ValueError("Error reading video file")
# cv2.rectangle(frame, (start_bbox[0], start_bbox[1]),
# (start_bbox[2], start_bbox[3]), (0, 0, 255), 1)
# cv2.imshow('Frame', frame)
# cv2.waitKey(0)
width = vido_data['width']
height = vido_data['height']
# normalize the frame count
original_frame_count = end_fid - begin_fid + 1
if asked_frames > original_frame_count:
if begin_fid + asked_frames < frame_count:
new_end_fid = begin_fid + asked_frames - 1
# print(f"adjusting end_fid from {end_fid} to {new_end_fid}")
end_fid = new_end_fid
else:
# print(f"asked end_fid {begin_fid + asked_frames} is greater than frame_count {frame_count}")
return None
if asked_frames != 0:
frame_count = asked_frames
else:
frame_count = original_frame_count
# normalized start and end frame
normalize_frame_to_bbox = {}
for fid in range(temporal_gt_begin_fid, temporal_gt_end_fid + 1):
relative_start_fid = fid - begin_fid
if asked_frames != 0:
normalized_frame = asked_frames * (relative_start_fid / original_frame_count)
normalized_frame = int(normalized_frame)
normalize_frame_to_bbox[normalized_frame] = normalize_bbox(frame_to_bbox[fid], width, height)
# write_npy(vid_path, vid, begin_fid, end_fid, asked_frames)
new_prompt = r' The {} is at '.format(target)
bboxes = re.sub(r'\s+', '', str(normalize_frame_to_bbox))
new_prompt += bboxes
json_obj = {}
json_obj["id"] = vid
question_dict = {"from": "human", "value": "<video>\n"+question['description']}
answer_dict = {"from": "gpt", "value": answer['description'] + new_prompt}
json_obj["conversations"] = [question_dict, answer_dict]
# token = {"<s0>": start_frame, "<e0>" : end_frame}
json_obj["meta"] = {"asked_frames": asked_frames, "vid_path": vid_path,
"begin_fid": begin_fid, "end_fid": end_fid,
"temporal_gt_begin_fid": temporal_gt_begin_fid, "temporal_gt_end_fid": temporal_gt_end_fid}
return json_obj
def visualize_video(vid_path, begin_fid, end_fid, temporal_gt_begin_fid, temporal_gt_end_fid,
question_tid, answer_tid, vido_data):
"""
visualize the video from begin_fid to end_fid
"""
def on_trackbar(val):
cap.set(cv2.CAP_PROP_POS_FRAMES, val)
ret, frame = cap.read()
if ret:
cv2.imshow('Video', frame)
# Load the video
cap = cv2.VideoCapture(vid_path)
# Check if video opened successfully
if not cap.isOpened():
print("Error opening video file")
total_frames = end_fid - begin_fid + 1
cv2.namedWindow('Video')
cv2.createTrackbar('Frame', 'Video', begin_fid, end_fid, on_trackbar)
cap.set(cv2.CAP_PROP_POS_FRAMES, begin_fid)
# Read until video is completed
while(cap.isOpened()):
current_fid = int(cap.get(cv2.CAP_PROP_POS_FRAMES))
ret, frame = cap.read()
if ret == True:
if current_fid < begin_fid:
current_fid = begin_fid
cap.set(cv2.CAP_PROP_POS_FRAMES, current_fid)
cv2.setTrackbarPos('Frame', 'Video', current_fid)
# show current frame number and total frame number
cv2.putText(frame, f"frame: {current_fid}/{end_fid}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 1)
if current_fid >= temporal_gt_begin_fid and current_fid <= temporal_gt_end_fid:
cv2.putText(frame, f"temporal_gt: {temporal_gt_begin_fid}/{temporal_gt_end_fid}", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 1)
elif current_fid < temporal_gt_begin_fid:
cv2.putText(frame, f"start in {temporal_gt_begin_fid - current_fid}", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1)
elif current_fid > temporal_gt_end_fid:
cv2.putText(frame, f"end {current_fid - temporal_gt_end_fid} frames before", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1)
# add bbox
try:
question_bbox = vido_data['trajectories'][current_fid][question_tid]['bbox']
# BGR
cv2.rectangle(frame, (question_bbox['xmin'], question_bbox['ymin']),
(question_bbox['xmax'], question_bbox['ymax']), (0, 0, 255), 1)
category = vido_data['subject/objects'][question_tid]['category']
cv2.putText(frame, f"question_{category}", (question_bbox['xmin'], question_bbox['ymax']), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 1)
except IndexError:
# print(f"question bbox not found: current_fid: {current_fid}")
pass
try:
answer_bbox = vido_data['trajectories'][current_fid][answer_tid]['bbox']
cv2.rectangle(frame, (answer_bbox['xmin'], answer_bbox['ymin']),
(answer_bbox['xmax'], answer_bbox['ymax']), (0, 255, 0), 1)
category = vido_data['subject/objects'][answer_tid]['category']
cv2.putText(frame, f"answer_{category}", (answer_bbox['xmin'] + (answer_bbox['xmax'] - answer_bbox['xmin']) // 2, answer_bbox['ymin'] + (answer_bbox['ymax'] - answer_bbox['ymin']) // 2), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1)
except IndexError:
# print(f"answer bbox not found: current_fid: {current_fid}")
pass
# Display the resulting frame
cv2.imshow('Video', frame)
if current_fid >= end_fid:
break
# Wait for a key press
key = cv2.waitKey(25) & 0xFF
# If the 'p' key is pressed, pause the video
if key == ord('p'):
cv2.waitKey(-1) # wait until any key is pressed
# If the 'q' key is pressed, break from the loop
elif key == ord('q'):
break
# Break the loop
else:
break
# When everything done, release the video capture object
cap.release()
cv2.destroyAllWindows()
def process_record(record, vidor_anno_path_base, vidor_path_base):
all_results = []
vid = record['vid']
begin_fid = record['used_segment']['begin_fid']
end_fid = record['used_segment']['end_fid']
# temporal_gt_begin_fid can be 0, count 8456 out of 36202
temporal_gt_begin_fid = record['temporal_gt']['begin_fid']
# temporal_gt_end_fid can be frame_count, count 6622 out of 36202
temporal_gt_end_fid = record['temporal_gt']['end_fid'] - 1
frame_count = record['frame_count']
# path to vidor annotation file
vidor_anno_path = search_a_file_in_directory(vidor_anno_path_base, vid + '.json')
# all other related data in this file
vido_data = read_json_file(vidor_anno_path)
# each record has only one caption
if len(record['captions']) >= 2:
raise ValueError("more than one captions")
answer_tid = record['captions'][0]['target_id']
# each record might has multiple questions,
for q_index, question in enumerate(record['questions']):
question_tid = question['target_id']
# question_bbox_at_begin_fid = vido_data['trajectories'][temporal_gt_begin_fid][question_tid]
# path to video file
vid_path = vido_data['video_path']
vid_path = os.path.join(vidor_path_base, vid_path)
if temporal_gt_begin_fid == -1:
continue
result = generate_prompt(vid, vid_path, question, record['captions'][0],
begin_fid, end_fid,
temporal_gt_begin_fid, temporal_gt_end_fid,
frame_count,
question_tid, vido_data, asked_frames = 100)
if result is not None:
all_results.append(result)
# print(json.dumps(result, indent=4))
# visualize_video(vid_path, begin_fid, end_fid, temporal_gt_begin_fid, temporal_gt_end_fid,
# question_tid, answer_tid, vido_data)
return all_results
def main():
vidor_anno_path_base = 'vidor/train_annotation/training/'
vidor_path_base = 'vidor/train/video'
vidstg_data = read_json_file('VidSTG-Dataset/annotations/train_annotations.json')
# vidor_data = read_json_file('vidor/train_annotation/training/0000/2401075277.json')
all_results = []
# remeber the current index
current_index = 0
if os.path.exists('current_index.txt'):
with open('current_index.txt', 'r') as f:
current_index = int(f.read())
with open('results.json', 'r') as json_file:
all_results = json.load(json_file)
for index, record in tqdm(enumerate(vidstg_data), total=len(vidstg_data)):
if index < current_index:
continue
results = process_record(record, vidor_anno_path_base, vidor_path_base)
if results is None:
continue
all_results.extend(results)
if index % 100 == 0:
with open('results.json', 'w') as json_file:
json.dump(all_results, json_file)
with open('current_index.txt', 'w') as f:
f.write(str(index))
if __name__ == "__main__":
main()