|
import os |
|
import cv2 |
|
import json |
|
|
|
import imageio |
|
|
|
import matplotlib.pyplot as plt |
|
|
|
|
|
base_path = "/home/datasets/vidor" |
|
base_vid_path = "train/video" |
|
base_ann_path = "train_annotation/training" |
|
|
|
output_dir = 'visual_output' |
|
|
|
ann_path = os.path.join(base_path, base_ann_path, "0000/2401075277.json") |
|
|
|
if not os.path.exists(ann_path): |
|
print(f"Error: Annotation file not found at path: {ann_path}") |
|
exit() |
|
|
|
|
|
with open(ann_path) as f: |
|
annotation_data = json.load(f) |
|
|
|
video_path_ = annotation_data["video_path"] |
|
video_path = os.path.join(base_path, base_vid_path, video_path_) |
|
print(video_path) |
|
|
|
if not os.path.exists(video_path): |
|
print(f"Error: Video file not found at path: {video_path}") |
|
exit() |
|
|
|
|
|
video_path = os.path.abspath(video_path) |
|
print(f"Video path: {video_path}") |
|
|
|
fps = annotation_data["fps"] |
|
frame_count = annotation_data["frame_count"] |
|
|
|
width = annotation_data["width"] |
|
height = annotation_data["height"] |
|
|
|
subject_objects = {obj["tid"]: obj["category"] for obj in annotation_data["subject/objects"]} |
|
trajectories = annotation_data["trajectories"] |
|
relation_instances = annotation_data.get("relation_instances", []) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
output_dir = os.path.join(output_dir, video_path_[:4]) |
|
print(output_dir) |
|
output_path = os.path.join(output_dir, video_path_[5:]) |
|
print(output_path) |
|
|
|
if not os.path.exists(output_dir): |
|
|
|
os.makedirs(output_dir) |
|
print(f"Directory '{output_dir}' created.") |
|
else: |
|
print(f"Directory '{output_dir}' already exists.") |
|
|
|
reader = imageio.get_reader(video_path, 'ffmpeg') |
|
writer = imageio.get_writer(output_path, fps=fps) |
|
|
|
|
|
frame_idx = 0 |
|
|
|
for frame in reader: |
|
if frame_idx >= frame_count: |
|
break |
|
|
|
|
|
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) |
|
|
|
|
|
if frame_idx < len(trajectories): |
|
for obj in trajectories[frame_idx]: |
|
tid = obj["tid"] |
|
bbox = obj["bbox"] |
|
category = subject_objects.get(tid, "unknown") |
|
|
|
xmin, ymin, xmax, ymax = bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"] |
|
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2) |
|
cv2.putText(frame, category, (xmin, ymin - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) |
|
|
|
|
|
for relation in relation_instances: |
|
subject_tid = relation["subject_tid"] |
|
object_tid = relation["object_tid"] |
|
predicate = relation["predicate"] |
|
begin_fid = relation["begin_fid"] |
|
end_fid = relation["end_fid"] |
|
|
|
if frame_idx >= begin_fid and frame_idx < end_fid and tid in [subject_tid, object_tid]: |
|
subject_bbox = [bbox["bbox"] for bbox in trajectories[begin_fid] if bbox["tid"] == subject_tid][0] |
|
object_bbox = [bbox["bbox"] for bbox in trajectories[begin_fid] if bbox["tid"] == object_tid][0] |
|
|
|
subject_x, subject_y, _, _ = subject_bbox |
|
object_x, object_y, _, _ = object_bbox |
|
|
|
text = f"{subject_objects.get(subject_tid, 'unknown')} {predicate} {subject_objects.get(object_tid, 'unknown')}" |
|
text_size, _ = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.8, 5) |
|
cv2.rectangle(frame, (xmin + 10, ymin + 20 - text_size[1]), (xmin + 10 + text_size[0], ymin + 30), (0, 0, 0), -1) |
|
cv2.putText(frame, text, (xmin + 10, ymin + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2) |
|
|
|
|
|
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
|
|
|
|
|
writer.append_data(frame) |
|
frame_idx += 1 |
|
|
|
reader.close() |
|
writer.close() |
|
print('Annotated video saved to {output_path}') |
|
|