ShuhuaiRen
commited on
Commit
•
cfa73c2
1
Parent(s):
9ca3dce
Upload 3 files
Browse files
data/video_highlight_detection/qvhighlights/get_coco_format.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import argparse
|
3 |
+
import os
|
4 |
+
from copy import deepcopy
|
5 |
+
import pdb
|
6 |
+
import numpy as np
|
7 |
+
import random
|
8 |
+
from pathlib import Path
|
9 |
+
from collections import Counter
|
10 |
+
|
11 |
+
# read json files
|
12 |
+
def read_json(path):
|
13 |
+
with open(path, "r") as fin:
|
14 |
+
datas = json.load(fin)
|
15 |
+
annos = datas["annotations"]
|
16 |
+
return annos
|
17 |
+
|
18 |
+
|
19 |
+
def read_jsonl(path):
|
20 |
+
anno = []
|
21 |
+
with open(path, "r") as fin:
|
22 |
+
datas = fin.readlines()
|
23 |
+
for data in datas:
|
24 |
+
anno.append(json.loads(data.strip()))
|
25 |
+
return anno
|
26 |
+
|
27 |
+
|
28 |
+
|
29 |
+
def write_json(data, path):
|
30 |
+
with open(path, "w") as fout:
|
31 |
+
json.dump(data, fout)
|
32 |
+
return
|
33 |
+
|
34 |
+
|
35 |
+
def read_txt(path):
|
36 |
+
data = []
|
37 |
+
with open(path, "r") as fin:
|
38 |
+
lines = fin.readlines()
|
39 |
+
for i, line in enumerate(lines):
|
40 |
+
# e.g. AO8RW 0.0 6.9##a person is putting a book on a shelf.
|
41 |
+
line = line.strip("\n")
|
42 |
+
cap = line.split("##")[-1]
|
43 |
+
if len(cap) < 2:
|
44 |
+
continue
|
45 |
+
terms = line.split("##")[0].split(" ")
|
46 |
+
vid = terms[0] + ".mp4"
|
47 |
+
start_time = float(terms[1])
|
48 |
+
end_time = float(terms[2])
|
49 |
+
data.append({"image_id": vid, "caption": cap, "timestamp": [start_time, end_time], "id": i})
|
50 |
+
return data
|
51 |
+
|
52 |
+
|
53 |
+
def filter_sent(sent):
|
54 |
+
sent = sent.strip(" ")
|
55 |
+
if len(sent) < 2:
|
56 |
+
return False
|
57 |
+
sent = sent.replace("#", "")
|
58 |
+
return sent
|
59 |
+
|
60 |
+
|
61 |
+
if __name__ == "__main__":
|
62 |
+
parser = argparse.ArgumentParser()
|
63 |
+
parser.add_argument('--dataset', default='qvhighlights') # anet
|
64 |
+
parser.add_argument('--anno_path', default='annotations_raw/')
|
65 |
+
parser.add_argument('--video_path', default='videos/') # ActivityNet_asr_denseCap/anet_6fps_224
|
66 |
+
parser.add_argument('--outpath', default='./')
|
67 |
+
args = parser.parse_args()
|
68 |
+
'''output data example:
|
69 |
+
{
|
70 |
+
"annotations": [
|
71 |
+
{
|
72 |
+
"image_id": "3MSZA.mp4",
|
73 |
+
"caption": "person turn a light on.",
|
74 |
+
"timestamp": [24.3, 30.4],
|
75 |
+
}],
|
76 |
+
}
|
77 |
+
'''
|
78 |
+
miss_videos = []
|
79 |
+
num_clips = []
|
80 |
+
for split in ["train", "val"]: # "val", "test"
|
81 |
+
if args.dataset == "charades":
|
82 |
+
filename = f"charades_sta_{split}.txt"
|
83 |
+
annos = read_txt(os.path.join(args.anno_path, filename))
|
84 |
+
data = {}
|
85 |
+
data["annotations"] = annos
|
86 |
+
elif args.dataset == "qvhighlights":
|
87 |
+
filename = f"highlight_{split}_release.jsonl"
|
88 |
+
annos = read_jsonl(os.path.join(args.anno_path, filename))
|
89 |
+
new_data = []
|
90 |
+
for jterm in annos:
|
91 |
+
new_term = {}
|
92 |
+
new_term["image_id"] = "v_" + jterm["vid"] + ".mp4"
|
93 |
+
# check the existance of the video
|
94 |
+
if not os.path.exists(os.path.join(args.video_path, split, new_term["image_id"])):
|
95 |
+
miss_videos.append(new_term["image_id"])
|
96 |
+
continue
|
97 |
+
new_term["id"] = jterm["qid"]
|
98 |
+
new_term["caption"] = jterm["query"]
|
99 |
+
new_term["timestamp"] = jterm["relevant_windows"]
|
100 |
+
new_term["duration"] = jterm["duration"]
|
101 |
+
new_term["relevant_clip_ids"] = jterm["relevant_clip_ids"]
|
102 |
+
new_term["saliency_scores"] = jterm["saliency_scores"]
|
103 |
+
new_data.append(new_term)
|
104 |
+
num_clips.append(int(jterm["duration"]/2))
|
105 |
+
data = {}
|
106 |
+
data["annotations"] = new_data
|
107 |
+
else:
|
108 |
+
print("Do not support this dataset!")
|
109 |
+
exit(0)
|
110 |
+
|
111 |
+
print(f"==> {args.dataset} dataset \t# examples num: {len(new_data)} \t# miss videos num: {len(miss_videos)}\t# raw data num: {len(annos)}")
|
112 |
+
out_name = "{}.caption_coco_format.json".format(split)
|
113 |
+
Path(args.outpath).mkdir(parents=True, exist_ok=True)
|
114 |
+
write_json(data, os.path.join(args.outpath, out_name))
|
115 |
+
|
116 |
+
if len(num_clips) >= 1:
|
117 |
+
count = Counter(num_clips)
|
118 |
+
# sort count dict with the clip num
|
119 |
+
print(count)
|
120 |
+
print(max(list(count.keys())))
|
121 |
+
|
data/video_highlight_detection/qvhighlights/train.caption_coco_format.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/video_highlight_detection/qvhighlights/val.caption_coco_format.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|