Output all bbox
Browse files- VidSTG/read_annotation.py +37 -56
- run_vtimellm.md +10 -2
VidSTG/read_annotation.py
CHANGED
@@ -2,6 +2,7 @@ import json
|
|
2 |
import os
|
3 |
import cv2
|
4 |
from tqdm import tqdm
|
|
|
5 |
|
6 |
from generate_features import write_npy
|
7 |
|
@@ -23,10 +24,10 @@ def normalize_bbox(bbox, width, height):
|
|
23 |
Normalize the bbox
|
24 |
"""
|
25 |
xmin, ymin, xmax, ymax = bbox
|
26 |
-
xmin = round(xmin / width,
|
27 |
-
ymin = round(ymin / height,
|
28 |
-
xmax = round(xmax / width,
|
29 |
-
ymax = round(ymax / height,
|
30 |
|
31 |
return [xmin, ymin, xmax, ymax]
|
32 |
|
@@ -81,16 +82,19 @@ def generate_prompt(vid, vid_path, question, answer,
|
|
81 |
|
82 |
target = vido_data['subject/objects'][question_tid]['category']
|
83 |
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
|
|
|
|
|
|
94 |
|
95 |
# # verify the start_bbox
|
96 |
# cap = cv2.VideoCapture(vid_path)
|
@@ -105,26 +109,6 @@ def generate_prompt(vid, vid_path, question, answer,
|
|
105 |
# cv2.imshow('Frame', frame)
|
106 |
# cv2.waitKey(0)
|
107 |
|
108 |
-
found = False
|
109 |
-
end_bbox = vido_data['trajectories'][temporal_gt_end_fid]
|
110 |
-
for a_dict in end_bbox:
|
111 |
-
if question_tid is a_dict['tid']:
|
112 |
-
end_bbox = a_dict['bbox']
|
113 |
-
found = True
|
114 |
-
break
|
115 |
-
if not found:
|
116 |
-
raise ValueError("end_bbox not found")
|
117 |
-
end_bbox = list(end_bbox.values())
|
118 |
-
|
119 |
-
# # verify the end_bbox
|
120 |
-
# cap.set(cv2.CAP_PROP_POS_FRAMES, int(temporal_gt_end_fid))
|
121 |
-
# ret, frame2 = cap.read()
|
122 |
-
# cv2.rectangle(frame2, (end_bbox[0], end_bbox[1]),
|
123 |
-
# (end_bbox[2], end_bbox[3]), (0, 0, 255), 1)
|
124 |
-
# cv2.imshow('Frame', frame2)
|
125 |
-
# cv2.waitKey(0)
|
126 |
-
# cv2.destroyAllWindows()
|
127 |
-
|
128 |
|
129 |
width = vido_data['width']
|
130 |
height = vido_data['height']
|
@@ -147,33 +131,30 @@ def generate_prompt(vid, vid_path, question, answer,
|
|
147 |
frame_count = original_frame_count
|
148 |
|
149 |
# normalized start and end frame
|
150 |
-
|
151 |
-
|
152 |
-
start_frame = asked_frames * (start_frame / original_frame_count)
|
153 |
-
start_frame = int(start_frame)
|
154 |
-
|
155 |
-
end_frame = temporal_gt_end_fid - begin_fid
|
156 |
-
if asked_frames != 0:
|
157 |
-
end_frame = asked_frames * (end_frame / original_frame_count)
|
158 |
-
end_frame = int(end_frame)
|
159 |
-
|
160 |
-
write_npy(vid_path, vid, begin_fid, end_fid, asked_frames)
|
161 |
|
|
|
|
|
|
|
|
|
|
|
|
|
162 |
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
new_prompt
|
168 |
json_obj = {}
|
169 |
json_obj["id"] = vid
|
170 |
|
171 |
question_dict = {"from": "human", "value": "<video>\n"+question['description']}
|
172 |
-
answer_dict = {"from": "gpt", "value": answer['description'] + new_prompt
|
173 |
json_obj["conversations"] = [question_dict, answer_dict]
|
174 |
|
175 |
-
token = {"<s0>": start_frame, "<e0>" : end_frame
|
176 |
-
json_obj["meta"] = {"
|
177 |
|
178 |
return json_obj
|
179 |
|
@@ -319,6 +300,9 @@ def process_record(record, vidor_anno_path_base, vidor_path_base):
|
|
319 |
|
320 |
if result is not None:
|
321 |
all_results.append(result)
|
|
|
|
|
|
|
322 |
|
323 |
return all_results
|
324 |
|
@@ -349,9 +333,6 @@ def main():
|
|
349 |
if results is None:
|
350 |
continue
|
351 |
all_results.extend(results)
|
352 |
-
# print(json.dumps(result, indent=4))
|
353 |
-
# visualize_video(vid_path, begin_fid, end_fid, temporal_gt_begin_fid, temporal_gt_end_fid,
|
354 |
-
# question_tid, answer_tid, vido_data)
|
355 |
|
356 |
if index % 100 == 0:
|
357 |
with open('results.json', 'w') as json_file:
|
|
|
2 |
import os
|
3 |
import cv2
|
4 |
from tqdm import tqdm
|
5 |
+
import re
|
6 |
|
7 |
from generate_features import write_npy
|
8 |
|
|
|
24 |
Normalize the bbox
|
25 |
"""
|
26 |
xmin, ymin, xmax, ymax = bbox
|
27 |
+
xmin = int(round(xmin / width, 2) * 100)
|
28 |
+
ymin = int(round(ymin / height, 2) * 100)
|
29 |
+
xmax = int(round(xmax / width, 2) * 100)
|
30 |
+
ymax = int(round(ymax / height, 2) * 100)
|
31 |
|
32 |
return [xmin, ymin, xmax, ymax]
|
33 |
|
|
|
82 |
|
83 |
target = vido_data['subject/objects'][question_tid]['category']
|
84 |
|
85 |
+
frame_to_bbox = {}
|
86 |
+
for fid in range(temporal_gt_begin_fid, temporal_gt_end_fid + 1):
|
87 |
+
start_bbox = vido_data['trajectories'][fid]
|
88 |
+
found = False
|
89 |
+
for a_dict in start_bbox:
|
90 |
+
if question_tid is a_dict['tid']:
|
91 |
+
start_bbox = a_dict['bbox']
|
92 |
+
found = True
|
93 |
+
break
|
94 |
+
if not found:
|
95 |
+
raise ValueError("start_bbox not found")
|
96 |
+
start_bbox = list(start_bbox.values())
|
97 |
+
frame_to_bbox[fid] = start_bbox
|
98 |
|
99 |
# # verify the start_bbox
|
100 |
# cap = cv2.VideoCapture(vid_path)
|
|
|
109 |
# cv2.imshow('Frame', frame)
|
110 |
# cv2.waitKey(0)
|
111 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
|
113 |
width = vido_data['width']
|
114 |
height = vido_data['height']
|
|
|
131 |
frame_count = original_frame_count
|
132 |
|
133 |
# normalized start and end frame
|
134 |
+
normalize_frame_to_bbox = {}
|
135 |
+
for fid in range(temporal_gt_begin_fid, temporal_gt_end_fid + 1):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
|
137 |
+
relative_start_fid = fid - begin_fid
|
138 |
+
if asked_frames != 0:
|
139 |
+
normalized_frame = asked_frames * (relative_start_fid / original_frame_count)
|
140 |
+
normalized_frame = int(normalized_frame)
|
141 |
+
|
142 |
+
normalize_frame_to_bbox[normalized_frame] = normalize_bbox(frame_to_bbox[fid], width, height)
|
143 |
|
144 |
+
write_npy(vid_path, vid, begin_fid, end_fid, asked_frames)
|
145 |
+
|
146 |
+
new_prompt = r' The {} is at '.format(target)
|
147 |
+
bboxes = re.sub(r'\s+', '', str(normalize_frame_to_bbox))
|
148 |
+
new_prompt += bboxes
|
149 |
json_obj = {}
|
150 |
json_obj["id"] = vid
|
151 |
|
152 |
question_dict = {"from": "human", "value": "<video>\n"+question['description']}
|
153 |
+
answer_dict = {"from": "gpt", "value": answer['description'] + new_prompt}
|
154 |
json_obj["conversations"] = [question_dict, answer_dict]
|
155 |
|
156 |
+
# token = {"<s0>": start_frame, "<e0>" : end_frame}
|
157 |
+
json_obj["meta"] = {"duration": frame_count}
|
158 |
|
159 |
return json_obj
|
160 |
|
|
|
300 |
|
301 |
if result is not None:
|
302 |
all_results.append(result)
|
303 |
+
# print(json.dumps(result, indent=4))
|
304 |
+
# visualize_video(vid_path, begin_fid, end_fid, temporal_gt_begin_fid, temporal_gt_end_fid,
|
305 |
+
# question_tid, answer_tid, vido_data)
|
306 |
|
307 |
return all_results
|
308 |
|
|
|
333 |
if results is None:
|
334 |
continue
|
335 |
all_results.extend(results)
|
|
|
|
|
|
|
336 |
|
337 |
if index % 100 == 0:
|
338 |
with open('results.json', 'w') as json_file:
|
run_vtimellm.md
CHANGED
@@ -1,3 +1,5 @@
|
|
|
|
|
|
1 |
python -m vtimellm.inference --model_base lmsys/vicuna-7b-v1.5 \
|
2 |
--pretrain_mm_mlp_adapter checkpoints/vicuna-7b-v1.5/vtimellm-vicuna-v1-5-7b/vtimellm-vicuna-v1-5-7b-stage1/mm_projector.bin \
|
3 |
--stage2 checkpoints/vicuna-7b-v1.5/vtimellm-vicuna-v1-5-7b/vtimellm-vicuna-v1-5-7b-stage2 \
|
@@ -9,7 +11,7 @@ python demo_gradio.py --model_base lmsys/vicuna-7b-v1.5 \
|
|
9 |
--stage2 ../checkpoints/vicuna-7b-v1.5/vtimellm-vicuna-v1-5-7b/vtimellm-vicuna-v1-5-7b-stage2 \
|
10 |
--stage3 ../checkpoints/vicuna-7b-v1.5/vtimellm-vicuna-v1-5-7b/vtimellm-vicuna-v1-5-7b-stage3
|
11 |
|
12 |
-
|
13 |
ssh -t -t -i /home/datasets/xitong_id_rsa xitong@newton.ist.ucf.edu -L 7860:localhost:7860 ssh evc23 -L 7860:localhost:7860
|
14 |
|
15 |
### generate validation datasets
|
@@ -24,6 +26,12 @@ python Shikra_V/VidSTG/read_annotation_multithread.py --vidstg VidSTG-Dataset/an
|
|
24 |
python Shikra_V/VidSTG/read_annotation_multithread.py --vidstg VidSTG-Dataset/annotations/test_annotations.json --vidor_anno_path_base vidor/validation_annotation/validation --vidor_path_base vidor/validation/video
|
25 |
```
|
26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
### Verify by using my trained stage2
|
28 |
|
29 |
```
|
@@ -44,4 +52,4 @@ python demo_gradio.py --model_base lmsys/vicuna-7b-v1.5 \
|
|
44 |
|
45 |
### status
|
46 |
|
47 |
-
we have generated 44087 training samples
|
|
|
1 |
+
|
2 |
+
### run example inference
|
3 |
python -m vtimellm.inference --model_base lmsys/vicuna-7b-v1.5 \
|
4 |
--pretrain_mm_mlp_adapter checkpoints/vicuna-7b-v1.5/vtimellm-vicuna-v1-5-7b/vtimellm-vicuna-v1-5-7b-stage1/mm_projector.bin \
|
5 |
--stage2 checkpoints/vicuna-7b-v1.5/vtimellm-vicuna-v1-5-7b/vtimellm-vicuna-v1-5-7b-stage2 \
|
|
|
11 |
--stage2 ../checkpoints/vicuna-7b-v1.5/vtimellm-vicuna-v1-5-7b/vtimellm-vicuna-v1-5-7b-stage2 \
|
12 |
--stage3 ../checkpoints/vicuna-7b-v1.5/vtimellm-vicuna-v1-5-7b/vtimellm-vicuna-v1-5-7b-stage3
|
13 |
|
14 |
+
### port forwarding
|
15 |
ssh -t -t -i /home/datasets/xitong_id_rsa xitong@newton.ist.ucf.edu -L 7860:localhost:7860 ssh evc23 -L 7860:localhost:7860
|
16 |
|
17 |
### generate validation datasets
|
|
|
26 |
python Shikra_V/VidSTG/read_annotation_multithread.py --vidstg VidSTG-Dataset/annotations/test_annotations.json --vidor_anno_path_base vidor/validation_annotation/validation --vidor_path_base vidor/validation/video
|
27 |
```
|
28 |
|
29 |
+
### Calculate the iou by using test datasets
|
30 |
+
```
|
31 |
+
python vtimellm/eval/eval.py --stage3 checkpoints/vtimellm-vicuna-v1-5-7b-stage3_xl_300_epoch/checkpoint-1700 --data_path data/xl/test/results_test.json --feat_folder data/xl/test/stage4_features_test --log_path vtimellm/eval/log/iou.txt --task iou
|
32 |
+
|
33 |
+
```
|
34 |
+
|
35 |
### Verify by using my trained stage2
|
36 |
|
37 |
```
|
|
|
52 |
|
53 |
### status
|
54 |
|
55 |
+
we have generated 44087 training samples, 4892 validation samples and 5655 test samples
|