first commit
Browse files- VidOR/bbox_visual.py +123 -0
- VidSTG/read_annotation.py +99 -0
- readme.md +47 -0
VidOR/bbox_visual.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import cv2
|
3 |
+
import json
|
4 |
+
|
5 |
+
import imageio
|
6 |
+
|
7 |
+
import matplotlib.pyplot as plt
|
8 |
+
|
9 |
+
|
10 |
+
base_path = "/home/datasets/vidor"
|
11 |
+
base_vid_path = "train/video"
|
12 |
+
base_ann_path = "train_annotation/training"
|
13 |
+
|
14 |
+
output_dir = 'visual_output'
|
15 |
+
|
16 |
+
ann_path = os.path.join(base_path, base_ann_path, "0000/2401075277.json")
|
17 |
+
|
18 |
+
if not os.path.exists(ann_path):
|
19 |
+
print(f"Error: Annotation file not found at path: {ann_path}")
|
20 |
+
exit()
|
21 |
+
|
22 |
+
# Load annotation data
|
23 |
+
with open(ann_path) as f:
|
24 |
+
annotation_data = json.load(f)
|
25 |
+
|
26 |
+
video_path_ = annotation_data["video_path"]
|
27 |
+
video_path = os.path.join(base_path, base_vid_path, video_path_)
|
28 |
+
print(video_path)
|
29 |
+
|
30 |
+
if not os.path.exists(video_path):
|
31 |
+
print(f"Error: Video file not found at path: {video_path}")
|
32 |
+
exit()
|
33 |
+
|
34 |
+
# Convert relative video path to absolute path
|
35 |
+
video_path = os.path.abspath(video_path)
|
36 |
+
print(f"Video path: {video_path}")
|
37 |
+
|
38 |
+
fps = annotation_data["fps"]
|
39 |
+
frame_count = annotation_data["frame_count"]
|
40 |
+
|
41 |
+
width = annotation_data["width"]
|
42 |
+
height = annotation_data["height"]
|
43 |
+
|
44 |
+
subject_objects = {obj["tid"]: obj["category"] for obj in annotation_data["subject/objects"]}
|
45 |
+
trajectories = annotation_data["trajectories"]
|
46 |
+
relation_instances = annotation_data.get("relation_instances", [])
|
47 |
+
|
48 |
+
|
49 |
+
# Open video
|
50 |
+
#cap = cv2.VideoCapture(video_path, cv2.CAP_FFMPEG)
|
51 |
+
#if not cap.isOpened():
|
52 |
+
#print(f"Error: Could not open video at path: {video_path}")
|
53 |
+
#exit()
|
54 |
+
|
55 |
+
|
56 |
+
# Define the codec and create VideoWriter object
|
57 |
+
|
58 |
+
output_dir = os.path.join(output_dir, video_path_[:4])
|
59 |
+
print(output_dir)
|
60 |
+
output_path = os.path.join(output_dir, video_path_[5:])
|
61 |
+
print(output_path)
|
62 |
+
|
63 |
+
if not os.path.exists(output_dir):
|
64 |
+
# Create the directory if it does not exist
|
65 |
+
os.makedirs(output_dir)
|
66 |
+
print(f"Directory '{output_dir}' created.")
|
67 |
+
else:
|
68 |
+
print(f"Directory '{output_dir}' already exists.")
|
69 |
+
|
70 |
+
reader = imageio.get_reader(video_path, 'ffmpeg')
|
71 |
+
writer = imageio.get_writer(output_path, fps=fps)
|
72 |
+
|
73 |
+
|
74 |
+
frame_idx = 0
|
75 |
+
|
76 |
+
for frame in reader:
|
77 |
+
if frame_idx >= frame_count:
|
78 |
+
break
|
79 |
+
|
80 |
+
# Convert frame to OpenCV format
|
81 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
|
82 |
+
|
83 |
+
# Draw bounding boxes for the current frame
|
84 |
+
if frame_idx < len(trajectories):
|
85 |
+
for obj in trajectories[frame_idx]:
|
86 |
+
tid = obj["tid"]
|
87 |
+
bbox = obj["bbox"]
|
88 |
+
category = subject_objects.get(tid, "unknown")
|
89 |
+
|
90 |
+
xmin, ymin, xmax, ymax = bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]
|
91 |
+
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)
|
92 |
+
cv2.putText(frame, category, (xmin, ymin - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
|
93 |
+
|
94 |
+
# Draw relation text
|
95 |
+
for relation in relation_instances:
|
96 |
+
subject_tid = relation["subject_tid"]
|
97 |
+
object_tid = relation["object_tid"]
|
98 |
+
predicate = relation["predicate"]
|
99 |
+
begin_fid = relation["begin_fid"]
|
100 |
+
end_fid = relation["end_fid"]
|
101 |
+
|
102 |
+
if frame_idx >= begin_fid and frame_idx < end_fid and tid in [subject_tid, object_tid]:
|
103 |
+
subject_bbox = [bbox["bbox"] for bbox in trajectories[begin_fid] if bbox["tid"] == subject_tid][0]
|
104 |
+
object_bbox = [bbox["bbox"] for bbox in trajectories[begin_fid] if bbox["tid"] == object_tid][0]
|
105 |
+
|
106 |
+
subject_x, subject_y, _, _ = subject_bbox
|
107 |
+
object_x, object_y, _, _ = object_bbox
|
108 |
+
|
109 |
+
text = f"{subject_objects.get(subject_tid, 'unknown')} {predicate} {subject_objects.get(object_tid, 'unknown')}"
|
110 |
+
text_size, _ = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.8, 5)
|
111 |
+
cv2.rectangle(frame, (xmin + 10, ymin + 20 - text_size[1]), (xmin + 10 + text_size[0], ymin + 30), (0, 0, 0), -1)
|
112 |
+
cv2.putText(frame, text, (xmin + 10, ymin + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
|
113 |
+
|
114 |
+
# Convert frame back to imageio format
|
115 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
116 |
+
|
117 |
+
# Write the frame to the output video
|
118 |
+
writer.append_data(frame)
|
119 |
+
frame_idx += 1
|
120 |
+
|
121 |
+
reader.close()
|
122 |
+
writer.close()
|
123 |
+
print('Annotated video saved to {output_path}')
|
VidSTG/read_annotation.py
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
import cv2
|
4 |
+
import pprint
|
5 |
+
import imageio
|
6 |
+
import matplotlib.pyplot as plt
|
7 |
+
|
8 |
+
def read_json_file(file_path):
|
9 |
+
with open(file_path, 'r') as json_file:
|
10 |
+
data = json.load(json_file)
|
11 |
+
return data
|
12 |
+
|
13 |
+
|
14 |
+
def search_a_file_in_directory(directory, file_name):
|
15 |
+
import os
|
16 |
+
for root, dirs, files in os.walk(directory):
|
17 |
+
if file_name in files:
|
18 |
+
return os.path.join(root, file_name)
|
19 |
+
return None
|
20 |
+
|
21 |
+
|
22 |
+
vidor_anno_path_base = 'vidor/train_annotation/training/'
|
23 |
+
vidor_path_base = 'vidor/train/video'
|
24 |
+
|
25 |
+
# vidor_data = read_json_file('vidor/train_annotation/training/0000/2401075277.json')
|
26 |
+
vidstg_data = read_json_file('VidSTG-Dataset/annotations/train_annotations.json')
|
27 |
+
|
28 |
+
|
29 |
+
# print(json.dumps(vidstg_data, indent=4))
|
30 |
+
|
31 |
+
|
32 |
+
sequence_id = 4
|
33 |
+
|
34 |
+
# find the vid path in vidor annotations
|
35 |
+
vid = vidstg_data[sequence_id]['vid']
|
36 |
+
begin_fid = vidstg_data[sequence_id]['used_segment']['begin_fid']
|
37 |
+
end_fid = vidstg_data[sequence_id]['used_segment']['end_fid']
|
38 |
+
captions = vidstg_data[sequence_id]['captions']
|
39 |
+
questions = vidstg_data[sequence_id]['questions']
|
40 |
+
print(f"vid: {vid}, caption: {captions}, question: {questions}")
|
41 |
+
|
42 |
+
temporal_gt_begin_fid = vidstg_data[sequence_id]['temporal_gt']['begin_fid']
|
43 |
+
temporal_gt_end_fid = vidstg_data[sequence_id]['temporal_gt']['end_fid']
|
44 |
+
|
45 |
+
print(f"begin_fid: {begin_fid}, end_fid: {end_fid}")
|
46 |
+
print(f"temporal_gt_begin_fid: {temporal_gt_begin_fid}, temporal_gt_end_fid: {temporal_gt_end_fid}")
|
47 |
+
|
48 |
+
vidor_anno_path = search_a_file_in_directory(vidor_anno_path_base, vid + '.json')
|
49 |
+
|
50 |
+
if vidor_anno_path:
|
51 |
+
vido_data = read_json_file(vidor_anno_path)
|
52 |
+
|
53 |
+
vid_path = vido_data['video_path']
|
54 |
+
|
55 |
+
vid_path = os.path.join(vidor_path_base, vid_path)
|
56 |
+
print(vid_path)
|
57 |
+
|
58 |
+
# Load the video
|
59 |
+
cap = cv2.VideoCapture(vid_path)
|
60 |
+
|
61 |
+
# Check if video opened successfully
|
62 |
+
if not cap.isOpened():
|
63 |
+
print("Error opening video file")
|
64 |
+
|
65 |
+
cap.set(cv2.CAP_PROP_POS_FRAMES, temporal_gt_begin_fid)
|
66 |
+
|
67 |
+
# Read until video is completed
|
68 |
+
while(cap.isOpened()):
|
69 |
+
# Capture frame-by-frame
|
70 |
+
ret, frame = cap.read()
|
71 |
+
if ret == True:
|
72 |
+
|
73 |
+
current_fid = cap.get(cv2.CAP_PROP_POS_FRAMES)
|
74 |
+
|
75 |
+
print(f"Current frame id: {current_fid}")
|
76 |
+
# Display the resulting frame
|
77 |
+
cv2.imshow('Frame', frame)
|
78 |
+
|
79 |
+
if current_fid >= temporal_gt_end_fid:
|
80 |
+
break
|
81 |
+
|
82 |
+
# Press Q on keyboard to exit
|
83 |
+
if cv2.waitKey(25) & 0xFF == ord('q'):
|
84 |
+
break
|
85 |
+
|
86 |
+
# Break the loop
|
87 |
+
else:
|
88 |
+
break
|
89 |
+
|
90 |
+
# When everything done, release the video capture object
|
91 |
+
cap.release()
|
92 |
+
|
93 |
+
# Closes all the frames
|
94 |
+
cv2.destroyAllWindows()
|
95 |
+
|
96 |
+
|
97 |
+
|
98 |
+
|
99 |
+
|
readme.md
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
|
4 |
+
### scripts for processing the datasets
|
5 |
+
|
6 |
+
|
7 |
+
structure for vidor dataset
|
8 |
+
```
|
9 |
+
vidor
|
10 |
+
├── train
|
11 |
+
│ └── video
|
12 |
+
│ ├── 0000
|
13 |
+
│ ├── 0001
|
14 |
+
│ ├── 0002
|
15 |
+
├── train_annotation
|
16 |
+
│ └── training
|
17 |
+
│ ├── 0000
|
18 |
+
│ ├── 0001
|
19 |
+
│ ├── 0002
|
20 |
+
│ ├── 0003
|
21 |
+
├── validation
|
22 |
+
│ └── video
|
23 |
+
│ ├── 0001
|
24 |
+
│ ├── 0004
|
25 |
+
│ ├── 0005
|
26 |
+
├── validation_annotation
|
27 |
+
│ └── validation
|
28 |
+
│ ├── 0001
|
29 |
+
│ ├── 0004
|
30 |
+
```
|
31 |
+
|
32 |
+
structure for vitstg
|
33 |
+
|
34 |
+
```
|
35 |
+
VidSTG-Dataset
|
36 |
+
├── annotations
|
37 |
+
│ ├── read_annotation.py
|
38 |
+
│ ├── test_annotations.json
|
39 |
+
│ ├── test_files.json
|
40 |
+
│ ├── train_annotations.json
|
41 |
+
│ ├── train_files.json
|
42 |
+
│ ├── val_annotations.json
|
43 |
+
│ └── val_files.json
|
44 |
+
├── example.jpg
|
45 |
+
└── README.md
|
46 |
+
|
47 |
+
```
|