ShuhuaiRen
commited on
Commit
•
dda7ec8
1
Parent(s):
093f527
upload data
Browse files- .gitattributes +1 -0
- TimeIT.py +241 -0
- data/dense_video_captioning/anet/instruct_dvc_10.0k_anet.json +0 -0
- data/dense_video_captioning/vitt/instruct_dvc_5.1k_vitt.json +0 -0
- data/dense_video_captioning/youcook2/instruct_dvc_1.2k_youcook2.json +0 -0
- data/step_localization/coin/instruct_action_9.0k_coin.json +0 -0
- data/step_localization/hirest_step/instruct_action_0.5k_hirest.json +0 -0
- data/temporal_video_grounding/charades/instruct_tvg_12.4k_charades.json +0 -0
- data/temporal_video_grounding/didemo/instruct_tvg_33.0k_didemo.json +0 -0
- data/temporal_video_grounding/hirest/instruct_tvg_0.5k_hirest.json +0 -0
- data/temporal_video_grounding/queryd/instruct_tvg_14.6k_queryd.json +0 -0
- data/transcribed_speech_generation/yttemporal/instruct_tsp_31.6k_yttemporal.json +3 -0
- data/video_highlight_detection/qvhighlights/instruct_vhd_6.9k_qvhighlights.json +0 -0
- data/video_summarization/summe/instruct_vhd_25_summe.json +1 -0
- data/video_summarization/tvsum/instruct_vhd_50_tvsum.json +1 -0
.gitattributes
CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
56 |
+
data/transcribed_speech_generation/yttemporal/instruct_tsp_31.6k_yttemporal.json filter=lfs diff=lfs merge=lfs -text
|
TimeIT.py
ADDED
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset loading script
|
2 |
+
# import os
|
3 |
+
# import csv
|
4 |
+
import json
|
5 |
+
import random
|
6 |
+
import datasets
|
7 |
+
|
8 |
+
# from typing import List
|
9 |
+
|
10 |
+
_DESCRIPTION = """\
|
11 |
+
A video-centric instruction-tuning dataset involving timestamps for Video Large Language Models
|
12 |
+
"""
|
13 |
+
|
14 |
+
# TODO: Add a link to an official homepage for the dataset here
|
15 |
+
_HOMEPAGE = "https://github.com/RenShuhuai-Andy/TimeChat"
|
16 |
+
|
17 |
+
# TODO: Add the licence for the dataset here if you can find it
|
18 |
+
_LICENSE = ""
|
19 |
+
_SEED = 1234 # for determinstic random sampling
|
20 |
+
|
21 |
+
|
22 |
+
# TODO: Add link to the official dataset URLs here
|
23 |
+
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
24 |
+
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
25 |
+
_URLS = {
|
26 |
+
"charades": {
|
27 |
+
"train": "./data/temporal_video_grounding/charades/instruct_tvg_12.4k_charades.json",
|
28 |
+
"instruction": "",
|
29 |
+
},
|
30 |
+
"didemo": {
|
31 |
+
"train": "./data/temporal_video_grounding/didemo/instruct_tvg_33.0k_didemo.json",
|
32 |
+
"instruction": "",
|
33 |
+
},
|
34 |
+
"queryd": {
|
35 |
+
"train": "./data/temporal_video_grounding/queryd/instruct_tvg_14.6k_queryd.json",
|
36 |
+
"instruction": "",
|
37 |
+
},
|
38 |
+
"hirest_grounding": {
|
39 |
+
"train": "./data/temporal_video_grounding/hirest/instruct_tvg_0.5k_hirest.json",
|
40 |
+
"instruction": "",
|
41 |
+
},
|
42 |
+
"qvhighlights": {
|
43 |
+
"train": "./data/video_highlight_detection/qvhighlights/instruct_vhd_6.9k_qvhighlights.json",
|
44 |
+
"instruction": "",
|
45 |
+
},
|
46 |
+
"youcook2": {
|
47 |
+
"train": "./data/dense_video_captioning/youcook2/instruct_dvc_1.2k_youcook2.json",
|
48 |
+
"instruction": "",
|
49 |
+
},
|
50 |
+
"anet": {
|
51 |
+
"train": "./data/dense_video_captioning/anet/instruct_dvc_10.0k_anet.json",
|
52 |
+
"instruction": "",
|
53 |
+
},
|
54 |
+
"vitt": {
|
55 |
+
"train": "./data/dense_video_captioning/vitt/instruct_dvc_5.1k_vitt.json",
|
56 |
+
"instruction": "",
|
57 |
+
},
|
58 |
+
"tvsum": {
|
59 |
+
"train": "./data/video_summarization/tvsum/instruct_vhd_50_tvsum.json",
|
60 |
+
"instruction": "",
|
61 |
+
},
|
62 |
+
"summe": {
|
63 |
+
"train": "./data/video_summarization/summe/instruct_vhd_50_tvsum.json",
|
64 |
+
"instruction": "",
|
65 |
+
},
|
66 |
+
"coin": {
|
67 |
+
"train": "./data/step_localization/coin/instruct_action_9.0k_coin.json",
|
68 |
+
"instruction": "",
|
69 |
+
},
|
70 |
+
"hirest_step": {
|
71 |
+
"train": "./data/step_localization/hirest_step/instruct_action_0.5k_hirest.json",
|
72 |
+
"instruction": "",
|
73 |
+
},
|
74 |
+
"yttemporal": {
|
75 |
+
"train": "./data/transcribed_speech_generation/yttemporal/instruct_tsp_31.6k_yttemporal.json",
|
76 |
+
"instruction": "",
|
77 |
+
},
|
78 |
+
}
|
79 |
+
|
80 |
+
_CITATION = ""
|
81 |
+
|
82 |
+
|
83 |
+
class TimeITDataset(datasets.GeneratorBasedBuilder):
|
84 |
+
"""TODO: Short description of my dataset."""
|
85 |
+
|
86 |
+
VERSION = datasets.Version("1.0.1")
|
87 |
+
|
88 |
+
# This is an example of a dataset with multiple configurations.
|
89 |
+
# If you don't want/need to define several sub-sets in your dataset,
|
90 |
+
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
|
91 |
+
|
92 |
+
# If you need to make complex sub-parts in the datasets with configurable options
|
93 |
+
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
|
94 |
+
# BUILDER_CONFIG_CLASS = MyBuilderConfig
|
95 |
+
|
96 |
+
# You will be able to load one or the other configurations in the following list with
|
97 |
+
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
98 |
+
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
99 |
+
BUILDER_CONFIGS = [
|
100 |
+
datasets.BuilderConfig(
|
101 |
+
name="charades", version=VERSION, description="Charades-STA dataset for Temporal Video Grounding"
|
102 |
+
),
|
103 |
+
datasets.BuilderConfig(
|
104 |
+
name="didemo", version=VERSION, description="DiDeMo dataset for Temporal Video Grounding"
|
105 |
+
),
|
106 |
+
datasets.BuilderConfig(
|
107 |
+
name="queryd", version=VERSION, description="QuerYD dataset for Temporal Video Grounding"
|
108 |
+
),
|
109 |
+
datasets.BuilderConfig(
|
110 |
+
name="hirest_grounding", version=VERSION, description="HiREST_grounding dataset for Temporal Video Grounding"
|
111 |
+
),
|
112 |
+
datasets.BuilderConfig(
|
113 |
+
name="qvhighlights", version=VERSION, description="QVHighlights dataset for Video Highlight Detection"
|
114 |
+
),
|
115 |
+
datasets.BuilderConfig(
|
116 |
+
name="youcook2", version=VERSION, description="YouCook2 dataset for Dense Video Captioning"
|
117 |
+
),
|
118 |
+
datasets.BuilderConfig(
|
119 |
+
name="anet", version=VERSION, description="ActivityNet Captions dataset for Dense Video Captioning"
|
120 |
+
),
|
121 |
+
datasets.BuilderConfig(
|
122 |
+
name="vitt", version=VERSION, description="ViTT dataset for Dense Video Captioning"
|
123 |
+
),
|
124 |
+
datasets.BuilderConfig(
|
125 |
+
name="tvsum", version=VERSION, description="TVSum dataset for Video Summarization"
|
126 |
+
),
|
127 |
+
datasets.BuilderConfig(
|
128 |
+
name="summe", version=VERSION, description="SumMe dataset for Video Summarization"
|
129 |
+
),
|
130 |
+
datasets.BuilderConfig(
|
131 |
+
name="coin", version=VERSION, description="COIN dataset for Step Localization"
|
132 |
+
),
|
133 |
+
datasets.BuilderConfig(
|
134 |
+
name="hirest_step", version=VERSION, description="HiREST_step dataset for Step Localization"
|
135 |
+
),
|
136 |
+
datasets.BuilderConfig(
|
137 |
+
name="yttemporal", version=VERSION, description="YT-Temporal dataset for Transcribed Speech Generation"
|
138 |
+
),
|
139 |
+
]
|
140 |
+
|
141 |
+
DEFAULT_CONFIG_NAME = "youcook2" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
142 |
+
|
143 |
+
def _info(self):
|
144 |
+
# unified schema
|
145 |
+
features = datasets.Features(
|
146 |
+
{
|
147 |
+
"video": datasets.Value("string"),
|
148 |
+
"QA": [
|
149 |
+
{"q": datasets.Value("string"),
|
150 |
+
"a": datasets.Value("string")
|
151 |
+
},
|
152 |
+
],
|
153 |
+
}
|
154 |
+
)
|
155 |
+
|
156 |
+
return datasets.DatasetInfo(
|
157 |
+
# This is the description that will appear on the datasets page.
|
158 |
+
description=_DESCRIPTION,
|
159 |
+
# This defines the different columns of the dataset and their types
|
160 |
+
features=features, # Here we define them above because they are different between the two configurations
|
161 |
+
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
|
162 |
+
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
|
163 |
+
# supervised_keys=("sentence", "label"),
|
164 |
+
# Homepage of the dataset for documentation
|
165 |
+
homepage=_HOMEPAGE,
|
166 |
+
# License for the dataset if available
|
167 |
+
license=_LICENSE,
|
168 |
+
# Citation for the dataset
|
169 |
+
citation=_CITATION,
|
170 |
+
)
|
171 |
+
|
172 |
+
def _split_generators(self, dl_manager):
|
173 |
+
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
|
174 |
+
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
|
175 |
+
|
176 |
+
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
177 |
+
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
178 |
+
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
179 |
+
urls = _URLS[self.config.name]
|
180 |
+
data_dir = dl_manager.download_and_extract(urls) # a dict string
|
181 |
+
ret = []
|
182 |
+
# control the choice of the instruction
|
183 |
+
random.seed(_SEED)
|
184 |
+
|
185 |
+
if "train" in data_dir:
|
186 |
+
ret.append(
|
187 |
+
datasets.SplitGenerator(
|
188 |
+
name=datasets.Split.TRAIN,
|
189 |
+
# These kwargs will be passed to _generate_examples
|
190 |
+
gen_kwargs={
|
191 |
+
"filepath": data_dir["train"],
|
192 |
+
"split": "train",
|
193 |
+
"instruction_path": data_dir["instruction"],
|
194 |
+
"data_dir": data_dir,
|
195 |
+
},
|
196 |
+
)
|
197 |
+
)
|
198 |
+
if "val" in data_dir:
|
199 |
+
ret.append(
|
200 |
+
datasets.SplitGenerator(
|
201 |
+
name=datasets.Split.VALIDATION,
|
202 |
+
# These kwargs will be passed to _generate_examples
|
203 |
+
gen_kwargs={
|
204 |
+
"filepath": data_dir["val"],
|
205 |
+
"split": "dev",
|
206 |
+
"instruction_path": data_dir["instruction"],
|
207 |
+
"data_dir": data_dir,
|
208 |
+
},
|
209 |
+
)
|
210 |
+
)
|
211 |
+
if "test" in data_dir:
|
212 |
+
ret.append(
|
213 |
+
datasets.SplitGenerator(
|
214 |
+
name=datasets.Split.TEST,
|
215 |
+
# These kwargs will be passed to _generate_examples
|
216 |
+
gen_kwargs={
|
217 |
+
"filepath": data_dir["test"],
|
218 |
+
"split": "test",
|
219 |
+
"instruction_path": data_dir["instruction"],
|
220 |
+
"data_dir": data_dir,
|
221 |
+
},
|
222 |
+
)
|
223 |
+
)
|
224 |
+
return ret
|
225 |
+
|
226 |
+
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
227 |
+
|
228 |
+
def _generate_examples(self, filepath, split, instruction_path, data_dir=None):
|
229 |
+
# print("instruction path: ", instruction_path)
|
230 |
+
instructions = json.load(open(instruction_path))
|
231 |
+
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
232 |
+
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
233 |
+
timeitdata = json.load(filepath)
|
234 |
+
for i, d in enumerate(timeitdata):
|
235 |
+
# yield i, {
|
236 |
+
# "question": d["QA"][0]['q'],
|
237 |
+
# "answer": d["QA"][0]['a'],
|
238 |
+
# "video_path": d["video"],
|
239 |
+
# }
|
240 |
+
# # print(d)
|
241 |
+
yield i, d
|
data/dense_video_captioning/anet/instruct_dvc_10.0k_anet.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/dense_video_captioning/vitt/instruct_dvc_5.1k_vitt.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/dense_video_captioning/youcook2/instruct_dvc_1.2k_youcook2.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/step_localization/coin/instruct_action_9.0k_coin.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/step_localization/hirest_step/instruct_action_0.5k_hirest.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/temporal_video_grounding/charades/instruct_tvg_12.4k_charades.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/temporal_video_grounding/didemo/instruct_tvg_33.0k_didemo.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/temporal_video_grounding/hirest/instruct_tvg_0.5k_hirest.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/temporal_video_grounding/queryd/instruct_tvg_14.6k_queryd.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/transcribed_speech_generation/yttemporal/instruct_tsp_31.6k_yttemporal.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9156641dad2e4c413db3568ae1b409f65ad2c22392de17396955922536a2184a
|
3 |
+
size 64370082
|
data/video_highlight_detection/qvhighlights/instruct_vhd_6.9k_qvhighlights.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/video_summarization/summe/instruct_vhd_25_summe.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[{"video": "SumMe/videos/Air_Force_One.mp4", "QA": [{"q": "From the summe dataset, generate a summarized version of the video, focusing on extracting key frames that best represent the overall narrative. The output should be a list of timestamps in seconds and their corresponding salient scores", "a": "The highlight timestamps are in the 57.0, 62.4, 68.4, 73.2, 78.0, 79.2, 80.4, 84.6, 155.4, 156.6, 157.8, 159.6, 160.8, 161.4, 162.6, 164.4, 165.6, 167.4, 169.2, 171.0, 172.2 seconds. Their saliency scores are 2.1, 1.8, 2.1, 2.9, 4.2, 3.9, 3.7, 3.1, 1.8, 2.3, 2.9, 3.1, 3.4, 3.4, 3.7, 3.1, 3.1, 2.6, 2.1, 2.1, 1.8."}]}, {"video": "SumMe/videos/Excavators river crossing.mp4", "QA": [{"q": "From the summe dataset, generate a summarized version of the video, focusing on extracting key frames that best represent the overall narrative. The output should be a list of timestamps in seconds and their corresponding salient scores", "a": "The highlight timestamps are in the 4.8, 7.2, 12.0, 28.8, 30.0, 38.4, 39.0, 42.0, 45.0, 50.4, 63.0, 69.6, 70.8, 78.6, 81.0, 89.4, 98.4, 99.0, 101.4, 104.4, 109.8, 112.2, 120.0, 327.0, 347.4, 378.0, 385.8 seconds. Their saliency scores are 3.4, 2.9, 2.3, 2.9, 1.8, 3.7, 3.4, 3.4, 3.4, 2.3, 2.3, 2.6, 3.9, 2.9, 2.6, 2.1, 3.1, 2.9, 2.9, 3.4, 3.1, 2.9, 2.6, 1.8, 1.5, 2.3, 2.1."}]}, {"video": "SumMe/videos/Fire Domino.mp4", "QA": [{"q": "You are given a video from the summe dataset. Please find the highlight contents in the video, determining the highlight timestamps and its saliency score on a scale from 1 to 5. The output format should be like: 'The highlight timestamps are in the 82, 84, 86, 88, 90, 92, 94, 96, 98, 100 second. Their saliency scores are 1.3, 1.7, 1.7, 1.7, 1.7, 1.3, 1.7, 2.3, 2.3, 2.3'.", "a": "The highlight timestamps are in the 8.5, 10.0, 11.0, 12.0, 23.5, 44.5, 45.0 seconds. Their saliency scores are 1.8, 3.7, 4.5, 4.2, 2.1, 4.7, 4.2."}]}, {"video": "SumMe/videos/Jumps.mp4", "QA": [{"q": "From the summe dataset, generate a summarized version of the video, focusing on extracting key frames that best represent the overall narrative. The output should be a list of timestamps in seconds and their corresponding salient scores", "a": "The highlight timestamps are in the 9.6, 13.8, 31.2 seconds. Their saliency scores are 2.9, 4.7, 1.3."}]}, {"video": "SumMe/videos/Kids_playing_in_leaves.mp4", "QA": [{"q": "Identify and extract the most emotionally impactful moments from the video provided by summe dataset, rating their intensity on a scale from 1 to 5.", "a": "The highlight timestamps are in the 3.5, 5.0, 6.5, 12.0, 13.0, 17.0, 31.5, 73.1, 91.6 seconds. Their saliency scores are 2.3, 4.2, 4.2, 3.1, 3.4, 2.9, 2.1, 2.6, 1.8."}]}, {"video": "SumMe/videos/Notre_Dame.mp4", "QA": [{"q": "You are given a video from the summe dataset. Please find the highlight contents in the video, determining the highlight timestamps and its saliency score on a scale from 1 to 5. The output format should be like: 'The highlight timestamps are in the 82, 84, 86, 88, 90, 92, 94, 96, 98, 100 second. Their saliency scores are 1.3, 1.7, 1.7, 1.7, 1.7, 1.3, 1.7, 2.3, 2.3, 2.3'.", "a": "The highlight timestamps are in the 6.9, 7.5, 15.6, 41.9, 45.6, 91.2, 92.5, 97.5, 98.1, 123.8, 128.8, 138.8, 142.5, 178.1, 180.0 seconds. Their saliency scores are 3.7, 3.7, 1.8, 1.8, 1.8, 3.7, 3.4, 2.3, 1.8, 2.6, 3.1, 2.3, 2.1, 2.3, 2.6."}]}, {"video": "SumMe/videos/Paintball.mp4", "QA": [{"q": "Go through the video content from summe dataset, and upon identifying highlight moments, list their timestamps. Subsequently, provide a saliency score for each identified highlight.", "a": "The highlight timestamps are in the 11.3, 83.8, 100.1, 101.4, 102.6, 103.2, 103.9, 105.7, 110.7, 195.2, 230.2, 230.9, 232.1, 232.7, 234.0, 234.6, 235.9, 237.1, 237.7, 238.4, 239.0, 239.6, 240.2, 240.9, 241.5, 242.1, 242.7, 244.6, 245.9, 246.5, 247.1, 247.7, 248.4, 249.0, 249.6, 250.2, 250.9, 251.5, 252.1, 252.8, 253.4, 254.0 seconds. Their saliency scores are 4.5, 1.5, 4.3, 4.3, 4.5, 4.5, 4.5, 1.9, 1.2, 1.2, 2.4, 2.6, 2.9, 2.9, 2.4, 2.4, 1.7, 2.2, 2.4, 2.6, 2.6, 2.4, 2.4, 2.6, 2.6, 2.6, 2.2, 1.7, 1.5, 1.5, 1.5, 1.7, 1.7, 1.7, 1.7, 1.7, 1.9, 1.9, 1.9, 1.5, 1.5, 1.5."}]}, {"video": "SumMe/videos/Playing_on_water_slide.mp4", "QA": [{"q": "In the video presented from summe dataset, seek moments that could serve as an executive summary for a busy stakeholder. It's vital to notate their timestamps and to score each based on their level of saliency.", "a": "The highlight timestamps are in the 7.5, 8.0, 30.0, 91.6, 95.1, 96.6, 97.1, 98.1 seconds. Their saliency scores are 2.3, 2.6, 2.6, 2.3, 2.6, 2.6, 2.9, 2.3."}]}, {"video": "SumMe/videos/Saving dolphines.mp4", "QA": [{"q": "Identify and extract the most emotionally impactful moments from the video provided by summe dataset, rating their intensity on a scale from 1 to 5.", "a": "The highlight timestamps are in the 3.5, 23.5, 30.5, 31.5, 41.5, 51.1, 59.1, 61.1, 66.1, 66.6, 78.6, 79.1, 103.6, 110.1, 111.6, 134.6, 135.6, 217.2 seconds. Their saliency scores are 2.1, 2.3, 3.4, 3.1, 3.4, 2.6, 2.3, 2.3, 2.1, 2.1, 2.3, 2.6, 1.3, 2.1, 2.1, 2.3, 2.6, 1.8."}]}, {"video": "SumMe/videos/Scuba.mp4", "QA": [{"q": "You are given a video from the summe dataset. Please find the highlight contents in the video, determining the highlight timestamps and its saliency score on a scale from 1 to 5. The output format should be like: 'The highlight timestamps are in the 82, 84, 86, 88, 90, 92, 94, 96, 98, 100 second. Their saliency scores are 1.3, 1.7, 1.7, 1.7, 1.7, 1.3, 1.7, 2.3, 2.3, 2.3'.", "a": "The highlight timestamps are in the 17.0, 20.5, 37.5, 39.5, 56.5, 64.0, 73.5 seconds. Their saliency scores are 2.2, 2.2, 2.2, 3.4, 3.1, 1.9, 2.6."}]}, {"video": "SumMe/videos/St Maarten Landing.mp4", "QA": [{"q": "Watch the provided video from the summe dataset and mark out the timestamps with stand-out visual content. Document the timestamps of these highlights and evaluate their saliency scores.", "a": "The highlight timestamps are in the 0.6, 27.0, 31.2, 36.0, 49.8, 51.0, 52.2, 52.8, 54.6, 57.6 seconds. Their saliency scores are 1.5, 2.9, 1.2, 1.2, 4.1, 4.5, 5.0, 4.5, 3.1, 2.2."}]}, {"video": "SumMe/videos/Base jumping.mp4", "QA": [{"q": "Identify and extract the most emotionally impactful moments from the video provided by summe dataset, rating their intensity on a scale from 1 to 5.", "a": "The highlight timestamps are in the 5.0, 5.5, 28.0, 28.5, 30.0, 44.5, 45.5, 51.1, 82.6, 83.6, 84.1, 96.6, 98.1, 124.1, 126.1 seconds. Their saliency scores are 2.8, 2.3, 3.0, 3.0, 2.6, 3.0, 3.0, 2.1, 3.0, 3.2, 3.2, 2.3, 2.1, 3.0, 3.7."}]}, {"video": "SumMe/videos/Statue of Liberty.mp4", "QA": [{"q": "Watch the provided video from the summe dataset and mark out the timestamps with stand-out visual content. Document the timestamps of these highlights and evaluate their saliency scores.", "a": "The highlight timestamps are in the 26.4, 29.4, 31.8, 41.4, 48.6, 97.8, 114.6, 117.6, 121.2, 132.0 seconds. Their saliency scores are 2.2, 2.9, 3.1, 1.9, 2.2, 2.2, 2.4, 2.2, 1.7, 1.9."}]}, {"video": "SumMe/videos/Uncut_Evening_Flight.mp4", "QA": [{"q": "In the video presented from summe dataset, seek moments that could serve as an executive summary for a busy stakeholder. It's vital to notate their timestamps and to score each based on their level of saliency.", "a": "The highlight timestamps are in the 2.0, 3.0, 6.5, 11.0, 13.0, 17.5, 64.6, 66.1, 70.1, 70.6, 74.6, 91.1, 92.1, 111.6, 114.6, 117.1, 119.6, 145.1, 164.2, 166.7, 167.7, 173.7, 286.8, 289.8, 306.3, 308.8, 312.8, 314.3, 318.3 seconds. Their saliency scores are 3.4, 4.2, 4.2, 2.6, 2.6, 2.1, 1.8, 2.1, 2.1, 2.1, 1.8, 2.6, 2.9, 2.3, 2.9, 2.1, 1.8, 1.5, 2.1, 2.6, 2.3, 1.8, 2.1, 2.3, 1.8, 2.6, 3.9, 5.0, 3.4."}]}, {"video": "SumMe/videos/Valparaiso_Downhill.mp4", "QA": [{"q": "Identify and extract the most emotionally impactful moments from the video provided by summe dataset, rating their intensity on a scale from 1 to 5.", "a": "The highlight timestamps are in the 10.5, 13.0, 14.0, 18.0, 29.0, 30.5, 34.0, 34.5, 37.5, 40.0, 67.6, 91.1, 126.1, 129.1, 136.1, 161.7, 163.7, 166.2 seconds. Their saliency scores are 2.1, 3.1, 3.1, 1.8, 2.1, 1.8, 1.8, 2.1, 2.1, 3.1, 2.9, 3.7, 1.5, 2.6, 2.3, 4.2, 3.4, 2.1."}]}, {"video": "SumMe/videos/car_over_camera.mp4", "QA": [{"q": "In the video presented from summe dataset, seek moments that could serve as an executive summary for a busy stakeholder. It's vital to notate their timestamps and to score each based on their level of saliency.", "a": "The highlight timestamps are in the 36.5, 37.0, 56.1, 57.1, 61.1, 62.1, 62.6, 63.1, 63.6, 64.6, 86.1, 86.6, 87.1, 87.6, 88.1, 89.6, 93.1, 94.6, 103.6, 106.1, 110.1, 110.6, 111.1, 111.6, 112.1, 112.6, 140.1, 140.6, 141.1, 141.6, 142.1, 143.1, 144.6 seconds. Their saliency scores are 2.1, 2.9, 2.1, 2.3, 2.3, 2.3, 2.9, 2.9, 2.9, 2.3, 2.9, 3.4, 3.9, 4.2, 4.2, 3.4, 2.3, 2.1, 2.6, 2.3, 3.4, 4.2, 4.2, 3.9, 2.1, 1.5, 1.0, 2.1, 2.6, 2.6, 2.3, 2.3, 1.3."}]}, {"video": "SumMe/videos/paluma_jump.mp4", "QA": [{"q": "Identify and extract the most emotionally impactful moments from the video provided by summe dataset, rating their intensity on a scale from 1 to 5.", "a": "The highlight timestamps are in the 1.5, 42.5, 61.6, 62.1, 67.1, 79.1, 80.6, 82.1 seconds. Their saliency scores are 1.5, 4.2, 4.7, 4.7, 1.8, 2.1, 1.8, 2.3."}]}, {"video": "SumMe/videos/playing_ball.mp4", "QA": [{"q": "Identify and extract the most emotionally impactful moments from the video provided by summe dataset, rating their intensity on a scale from 1 to 5.", "a": "The highlight timestamps are in the 2.0, 2.5, 5.0, 14.0, 14.5, 18.0, 21.0, 21.5, 55.0 seconds. Their saliency scores are 2.8, 3.2, 2.0, 2.5, 3.5, 2.8, 3.0, 2.8, 3.5."}]}, {"video": "SumMe/videos/Bearpark_climbing.mp4", "QA": [{"q": "From the summe dataset, generate a summarized version of the video, focusing on extracting key frames that best represent the overall narrative. The output should be a list of timestamps in seconds and their corresponding salient scores", "a": "The highlight timestamps are in the 3.6, 10.2, 49.2, 52.2, 77.4, 78.6, 84.0, 85.8, 101.4, 104.4, 111.0 seconds. Their saliency scores are 2.3, 2.6, 2.3, 1.8, 2.3, 2.3, 2.3, 2.6, 2.6, 3.1, 2.3."}]}, {"video": "SumMe/videos/Bike Polo.mp4", "QA": [{"q": "Go through the video content from summe dataset, and upon identifying highlight moments, list their timestamps. Subsequently, provide a saliency score for each identified highlight.", "a": "The highlight timestamps are in the 23.5, 29.0, 31.5, 48.0, 49.0, 60.0, 71.5, 72.5, 74.0, 86.0, 86.5 seconds. Their saliency scores are 3.7, 2.9, 2.1, 1.5, 1.5, 1.8, 3.1, 4.2, 3.1, 3.7, 3.7."}]}, {"video": "SumMe/videos/Bus_in_Rock_Tunnel.mp4", "QA": [{"q": "Watch the provided video from the summe dataset and mark out the timestamps with stand-out visual content. Document the timestamps of these highlights and evaluate their saliency scores.", "a": "The highlight timestamps are in the 48.5, 50.5, 51.0, 68.0, 80.5, 86.0, 89.5, 91.0, 92.5, 96.0, 109.0, 126.0, 139.6, 146.1, 167.1, 167.6 seconds. Their saliency scores are 2.9, 3.1, 3.1, 2.1, 1.8, 2.1, 2.6, 3.4, 3.1, 2.6, 1.8, 2.1, 2.1, 2.1, 2.3, 2.1."}]}, {"video": "SumMe/videos/Car_railcrossing.mp4", "QA": [{"q": "Watch the provided video from the summe dataset and mark out the timestamps with stand-out visual content. Document the timestamps of these highlights and evaluate their saliency scores.", "a": "The highlight timestamps are in the 11.0, 18.0, 21.0, 21.5, 79.1, 80.6, 82.1, 84.6, 86.6, 88.6, 112.6, 114.1, 143.1, 157.2, 159.7, 160.7 seconds. Their saliency scores are 2.5, 3.0, 4.8, 5.0, 3.2, 3.5, 3.2, 2.5, 2.2, 2.0, 2.5, 2.0, 1.5, 2.2, 3.0, 2.5."}]}, {"video": "SumMe/videos/Cockpit_Landing.mp4", "QA": [{"q": "You are given a video from the summe dataset. Please find the highlight contents in the video, determining the highlight timestamps and its saliency score on a scale from 1 to 5. The output format should be like: 'The highlight timestamps are in the 82, 84, 86, 88, 90, 92, 94, 96, 98, 100 second. Their saliency scores are 1.3, 1.7, 1.7, 1.7, 1.7, 1.3, 1.7, 2.3, 2.3, 2.3'.", "a": "The highlight timestamps are in the 6.0, 29.5, 32.0, 38.5, 39.5, 43.5, 68.1, 73.1, 74.1, 76.6, 78.6, 81.1, 110.6, 113.6, 114.6, 142.6, 239.7, 242.7, 243.7, 246.2, 257.8, 260.8, 276.3, 277.3, 281.8, 285.8, 288.3, 289.8, 292.3, 295.3 seconds. Their saliency scores are 2.6, 1.5, 1.5, 2.1, 2.1, 1.8, 1.8, 2.6, 3.4, 3.1, 2.3, 2.3, 1.8, 2.9, 2.9, 1.8, 2.1, 2.6, 2.3, 2.1, 1.8, 1.8, 2.1, 1.8, 2.3, 2.6, 3.4, 4.2, 4.5, 3.7."}]}, {"video": "SumMe/videos/Cooking.mp4", "QA": [{"q": "From the summe dataset, generate a summarized version of the video, focusing on extracting key frames that best represent the overall narrative. The output should be a list of timestamps in seconds and their corresponding salient scores", "a": "The highlight timestamps are in the 34.0, 53.0, 69.1, 70.1 seconds. Their saliency scores are 3.4, 3.4, 2.9, 4.3."}]}, {"video": "SumMe/videos/Eiffel Tower.mp4", "QA": [{"q": "From the summe dataset, generate a summarized version of the video, focusing on extracting key frames that best represent the overall narrative. The output should be a list of timestamps in seconds and their corresponding salient scores", "a": "The highlight timestamps are in the 3.6, 5.4, 30.6, 47.4, 49.2, 49.8, 52.2, 80.4, 101.4, 102.6, 113.4, 117.0, 127.2, 127.8, 135.6, 136.2, 178.2, 195.6 seconds. Their saliency scores are 2.3, 3.7, 2.1, 1.8, 3.7, 4.5, 3.7, 2.3, 2.3, 2.3, 2.1, 2.3, 3.1, 3.1, 2.6, 2.6, 1.0, 3.4."}]}]
|
data/video_summarization/tvsum/instruct_vhd_50_tvsum.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[{"video": "TVSum/videos/AwmHb44_ouw.mp4", "QA": [{"q": "Watch the provided video from the tvsum dataset and mark out the timestamps with stand-out visual content. Document the timestamps of these highlights and evaluate their saliency scores.", "a": "The highlight timestamps are in the 0.0, 1.5, 96.1, 96.6, 99.1, 102.1, 103.1, 106.1, 109.1, 110.1, 120.1, 123.1, 125.6, 128.1, 132.1, 138.1, 138.6, 142.1, 142.6, 143.6, 147.6, 152.2, 152.7, 153.7, 156.2, 159.7, 164.2, 165.7, 172.7, 174.2, 176.2, 188.2, 192.2, 196.2, 198.2, 198.7, 200.2, 222.2, 248.7, 250.2, 256.3, 257.8, 260.8, 274.3, 275.3, 284.3, 284.8, 300.3 seconds. Their saliency scores are 2.9, 2.9, 3.0, 3.0, 2.5, 3.1, 3.1, 2.7, 2.6, 2.6, 3.0, 2.8, 2.7, 2.5, 2.6, 2.6, 2.6, 3.1, 3.1, 3.1, 2.2, 3.0, 3.0, 3.0, 3.8, 3.0, 3.0, 3.0, 2.7, 2.4, 2.6, 3.8, 2.7, 2.5, 3.5, 3.5, 2.6, 2.6, 2.5, 2.5, 2.8, 2.8, 2.7, 3.5, 3.5, 3.0, 3.0, 2.7."}]}, {"video": "TVSum/videos/akI8YFjEmUw.mp4", "QA": [{"q": "You are given a video from the tvsum dataset. Please find the highlight contents in the video, determining the highlight timestamps and its saliency score on a scale from 1 to 5. The output format should be like: 'The highlight timestamps are in the 82, 84, 86, 88, 90, 92, 94, 96, 98, 100 second. Their saliency scores are 1.3, 1.7, 1.7, 1.7, 1.7, 1.3, 1.7, 2.3, 2.3, 2.3'.", "a": "The highlight timestamps are in the 26.5, 32.5, 33.0, 35.0, 38.0, 44.0, 45.5, 66.1, 67.1, 72.1, 76.1, 110.1, 110.6, 122.1, 124.6, 126.1, 128.1, 130.6 seconds. Their saliency scores are 2.4, 2.7, 2.7, 2.7, 2.7, 2.5, 2.5, 2.6, 2.6, 2.3, 2.5, 2.6, 2.6, 3.0, 2.5, 2.4, 3.1, 2.3."}]}, {"video": "TVSum/videos/i3wAGJaaktw.mp4", "QA": [{"q": "Go through the video content from tvsum dataset, and upon identifying highlight moments, list their timestamps. Subsequently, provide a saliency score for each identified highlight.", "a": "The highlight timestamps are in the 5.0, 5.5, 10.0, 28.0, 32.5, 34.0, 43.0, 50.1, 56.1, 64.1, 88.1, 89.6, 91.1, 92.1, 92.6, 93.6, 95.1, 98.1, 111.6, 148.6 seconds. Their saliency scores are 3.2, 3.2, 2.9, 2.8, 2.6, 2.6, 2.5, 2.5, 3.2, 2.8, 3.3, 3.3, 2.9, 3.6, 3.6, 3.6, 3.3, 3.3, 1.2, 1.8."}]}, {"video": "TVSum/videos/Bhxk-O1Y7Ho.mp4", "QA": [{"q": "You are given a video from the tvsum dataset. Please find the highlight contents in the video, determining the highlight timestamps and its saliency score on a scale from 1 to 5. The output format should be like: 'The highlight timestamps are in the 82, 84, 86, 88, 90, 92, 94, 96, 98, 100 second. Their saliency scores are 1.3, 1.7, 1.7, 1.7, 1.7, 1.3, 1.7, 2.3, 2.3, 2.3'.", "a": "The highlight timestamps are in the 0.0, 70.1, 71.6, 74.1, 78.1, 86.6, 110.1, 110.6, 118.1, 121.1, 123.1, 148.1, 149.6, 152.2, 154.2, 162.2, 164.2, 166.2, 168.7, 170.2, 174.2, 200.2, 268.3, 272.3, 274.8, 276.3, 286.3, 294.3, 294.8, 299.3, 326.3, 332.3, 336.8, 338.3, 339.8, 344.3, 396.4, 426.4, 427.4, 434.4, 435.9, 438.4, 438.9, 442.4, 448.4, 449.4 seconds. Their saliency scores are 3.0, 2.7, 2.7, 3.0, 3.2, 2.7, 3.2, 3.2, 3.0, 2.8, 2.4, 2.5, 2.5, 2.5, 2.5, 2.8, 2.9, 2.9, 2.8, 2.5, 3.3, 2.5, 2.8, 3.4, 2.4, 2.8, 2.9, 2.9, 2.9, 2.8, 3.0, 3.0, 3.0, 3.0, 3.0, 2.5, 2.2, 2.5, 2.5, 3.0, 3.0, 3.2, 3.2, 3.2, 2.8, 2.8."}]}, {"video": "TVSum/videos/0tmA_C6XwfM.mp4", "QA": [{"q": "Watch the provided video from the tvsum dataset and mark out the timestamps with stand-out visual content. Document the timestamps of these highlights and evaluate their saliency scores.", "a": "The highlight timestamps are in the 28.2, 36.6, 52.2, 56.4, 60.6, 61.2, 64.2, 72.6, 73.8, 76.8, 100.2 seconds. Their saliency scores are 2.9, 2.5, 2.9, 3.0, 3.5, 3.5, 3.2, 3.0, 3.0, 2.8, 2.0."}]}, {"video": "TVSum/videos/3eYKfiOEJNs.mp4", "QA": [{"q": "From the tvsum dataset, generate a summarized version of the video, focusing on extracting key frames that best represent the overall narrative. The output should be a list of timestamps in seconds and their corresponding salient scores", "a": "The highlight timestamps are in the 50.4, 54.6, 55.8, 60.6, 64.2, 64.8, 66.0, 76.2, 78.0, 82.2, 84.6, 88.2, 96.6, 135.6, 138.6, 170.4, 172.2, 176.4, 178.8 seconds. Their saliency scores are 3.1, 3.2, 3.2, 2.8, 2.8, 2.8, 2.8, 2.8, 2.8, 3.2, 3.5, 3.1, 2.5, 1.9, 2.5, 3.0, 3.2, 3.0, 2.5."}]}, {"video": "TVSum/videos/xxdtq8mxegs.mp4", "QA": [{"q": "Identify and extract the most emotionally impactful moments from the video provided by tvsum dataset, rating their intensity on a scale from 1 to 5.", "a": "The highlight timestamps are in the 23.0, 34.5, 35.0, 36.5, 45.5, 59.0, 64.5, 66.5, 68.5, 72.5, 73.5, 76.5, 78.5, 82.5, 83.0, 88.0, 88.5, 90.5, 94.5, 97.5 seconds. Their saliency scores are 1.9, 2.3, 2.3, 2.5, 1.9, 2.0, 2.2, 2.4, 2.5, 3.2, 3.2, 3.4, 3.9, 3.6, 3.6, 2.9, 2.4, 2.6, 2.5, 2.5."}]}, {"video": "TVSum/videos/WG0MBPpPC6I.mp4", "QA": [{"q": "Go through the video content from tvsum dataset, and upon identifying highlight moments, list their timestamps. Subsequently, provide a saliency score for each identified highlight.", "a": "The highlight timestamps are in the 8.1, 52.6, 82.6, 83.8, 106.4, 126.4, 168.3, 180.8, 190.2, 192.7, 200.2, 222.1, 230.3, 242.1, 258.4, 264.0, 266.5, 269.7, 276.6, 286.6, 300.3, 324.1, 326.6, 338.5, 339.1, 344.1, 352.3, 354.1, 356.6, 366.7, 367.3, 370.4, 387.3, 394.2 seconds. Their saliency scores are 2.6, 2.9, 2.4, 2.4, 2.6, 2.6, 2.9, 2.5, 2.9, 2.7, 2.4, 2.6, 2.0, 2.8, 2.6, 2.5, 2.8, 2.6, 3.2, 3.0, 3.4, 2.9, 3.3, 3.2, 3.2, 3.0, 2.9, 2.3, 2.5, 2.6, 2.6, 2.5, 1.7, 2.5."}]}, {"video": "TVSum/videos/Hl-__g2gn_A.mp4", "QA": [{"q": "Identify and extract the most emotionally impactful moments from the video provided by tvsum dataset, rating their intensity on a scale from 1 to 5.", "a": "The highlight timestamps are in the 0.0, 8.1, 60.1, 72.6, 73.2, 78.2, 82.0, 84.5, 112.6, 114.5, 128.3, 128.9, 150.8, 166.4, 168.3, 170.2, 172.0, 172.7, 176.4, 177.1, 178.3, 187.1, 188.3, 206.5, 226.5, 227.7 seconds. Their saliency scores are 1.3, 3.0, 2.5, 3.8, 3.8, 3.2, 2.4, 3.0, 2.8, 2.2, 2.8, 2.8, 2.0, 3.4, 3.5, 3.7, 3.9, 3.9, 2.4, 2.4, 2.4, 2.7, 2.5, 3.0, 3.3, 3.3."}]}, {"video": "TVSum/videos/Yi4Ij2NM7U4.mp4", "QA": [{"q": "You are given a video from the tvsum dataset. Please find the highlight contents in the video, determining the highlight timestamps and its saliency score on a scale from 1 to 5. The output format should be like: 'The highlight timestamps are in the 82, 84, 86, 88, 90, 92, 94, 96, 98, 100 second. Their saliency scores are 1.3, 1.7, 1.7, 1.7, 1.7, 1.3, 1.7, 2.3, 2.3, 2.3'.", "a": "The highlight timestamps are in the 53.8, 58.8, 61.2, 120.6, 124.4, 125.6, 130.6, 132.5, 134.4, 136.2, 138.1, 142.5, 156.2, 158.1, 160.6, 164.4, 166.2, 168.1, 175.0, 183.1, 236.2, 240.6, 243.1, 245.6, 250.6, 256.2, 260.6, 268.8, 293.1, 298.1, 298.8, 334.4, 368.8, 372.5, 374.4, 375.6, 376.9, 380.0 seconds. Their saliency scores are 2.5, 2.9, 2.6, 2.5, 2.5, 2.5, 2.5, 2.4, 2.5, 2.8, 2.5, 2.5, 2.6, 2.9, 2.5, 2.5, 2.6, 2.8, 2.6, 1.9, 2.6, 2.6, 2.4, 2.4, 2.4, 3.2, 3.4, 3.2, 2.5, 2.9, 2.9, 2.8, 2.5, 3.3, 3.0, 3.0, 2.6, 2.4."}]}, {"video": "TVSum/videos/37rzWOQsNIw.mp4", "QA": [{"q": "In the video presented from tvsum dataset, seek moments that could serve as an executive summary for a busy stakeholder. It's vital to notate their timestamps and to score each based on their level of saliency.", "a": "The highlight timestamps are in the 0.0, 4.0, 8.0, 8.5, 10.5, 18.0, 18.5, 21.5, 30.0, 38.0, 45.0, 66.1, 70.1, 74.1, 75.6, 152.2, 154.2, 159.2, 162.2, 181.2 seconds. Their saliency scores are 3.2, 2.9, 2.5, 2.5, 2.4, 3.1, 3.1, 3.0, 2.5, 2.4, 2.1, 3.1, 3.0, 2.8, 2.8, 2.4, 2.6, 2.5, 2.5, 2.3."}]}, {"video": "TVSum/videos/98MoyGZKHXc.mp4", "QA": [{"q": "Identify and extract the most emotionally impactful moments from the video provided by tvsum dataset, rating their intensity on a scale from 1 to 5.", "a": "The highlight timestamps are in the 28.2, 28.8, 66.6, 69.0, 76.2, 76.8, 82.2, 82.8, 98.4, 104.4, 106.8, 108.6, 116.4, 117.0, 128.4, 130.2 seconds. Their saliency scores are 3.1, 3.1, 4.3, 3.8, 3.0, 3.0, 3.2, 3.2, 2.8, 3.5, 3.3, 2.6, 3.2, 3.2, 2.9, 2.6."}]}, {"video": "TVSum/videos/LRw_obCPUt0.mp4", "QA": [{"q": "From the tvsum dataset, generate a summarized version of the video, focusing on extracting key frames that best represent the overall narrative. The output should be a list of timestamps in seconds and their corresponding salient scores", "a": "The highlight timestamps are in the 2.5, 4.4, 6.3, 8.1, 11.3, 56.3, 58.2, 60.1, 62.6, 65.1, 66.3, 108.2, 118.2, 128.3, 137.6, 150.1, 158.3, 198.3, 198.9, 200.8, 236.5, 240.2 seconds. Their saliency scores are 2.6, 2.9, 2.8, 2.5, 1.6, 2.5, 2.6, 2.7, 3.0, 2.6, 3.0, 2.4, 3.0, 3.6, 3.6, 2.6, 3.5, 2.6, 2.6, 2.3, 2.9, 3.5."}]}, {"video": "TVSum/videos/cjibtmSLxQ4.mp4", "QA": [{"q": "From the tvsum dataset, generate a summarized version of the video, focusing on extracting key frames that best represent the overall narrative. The output should be a list of timestamps in seconds and their corresponding salient scores", "a": "The highlight timestamps are in the 9.0, 21.0, 30.0, 38.5, 44.0, 44.5, 60.6, 74.1, 82.1, 100.1, 102.1, 103.1, 124.1, 146.1, 147.6, 150.2, 151.7, 154.2, 172.2, 174.2, 176.2, 179.7, 190.2, 191.2, 198.2, 199.2, 200.2, 201.7, 204.7, 232.2, 258.3, 258.8, 259.8, 262.8, 272.3, 274.3, 275.8, 308.3, 314.3, 322.8, 380.4, 386.4, 388.4, 518.0, 519.5, 524.0, 530.0, 536.0, 537.0, 554.1, 555.1, 580.1, 584.1, 588.1, 590.1, 591.1, 606.1, 612.1, 620.1, 620.6, 626.6, 628.1, 631.1 seconds. Their saliency scores are 3.5, 2.4, 2.7, 2.6, 3.3, 3.3, 3.2, 3.4, 3.5, 2.5, 2.5, 2.5, 3.5, 2.8, 2.8, 2.8, 2.8, 2.2, 2.5, 3.0, 3.2, 3.1, 3.0, 3.0, 2.7, 2.7, 2.6, 2.6, 2.5, 2.5, 2.6, 2.6, 2.6, 2.6, 2.6, 2.6, 2.6, 2.4, 2.2, 2.5, 3.2, 3.7, 2.6, 3.5, 3.5, 3.8, 3.4, 3.7, 3.7, 2.4, 2.4, 2.8, 2.5, 3.0, 3.2, 3.2, 2.8, 2.3, 2.7, 2.7, 2.5, 2.6, 2.4."}]}, {"video": "TVSum/videos/b626MiF1ew4.mp4", "QA": [{"q": "Go through the video content from tvsum dataset, and upon identifying highlight moments, list their timestamps. Subsequently, provide a saliency score for each identified highlight.", "a": "The highlight timestamps are in the 0.0, 18.8, 28.1, 30.6, 36.2, 58.1, 58.8, 61.2, 62.5, 74.4, 80.6, 112.5, 124.4, 152.5, 182.5, 192.5, 212.5, 218.1, 221.2 seconds. Their saliency scores are 2.5, 2.6, 2.4, 2.2, 1.9, 3.3, 3.3, 2.5, 2.6, 2.6, 2.5, 2.5, 3.0, 2.6, 2.5, 3.0, 2.6, 2.9, 2.6."}]}, {"video": "TVSum/videos/XkqCExn6_Us.mp4", "QA": [{"q": "You are given a video from the tvsum dataset. Please find the highlight contents in the video, determining the highlight timestamps and its saliency score on a scale from 1 to 5. The output format should be like: 'The highlight timestamps are in the 82, 84, 86, 88, 90, 92, 94, 96, 98, 100 second. Their saliency scores are 1.3, 1.7, 1.7, 1.7, 1.7, 1.3, 1.7, 2.3, 2.3, 2.3'.", "a": "The highlight timestamps are in the 3.0, 12.0, 22.0, 24.5, 26.0, 41.0, 56.1, 64.6, 76.1, 80.1, 82.1, 86.1, 92.1, 100.1, 101.1, 106.1, 114.1 seconds. Their saliency scores are 2.6, 2.5, 3.0, 2.2, 2.8, 2.4, 2.9, 2.7, 2.8, 3.6, 2.8, 2.6, 3.2, 2.6, 2.6, 2.8, 2.3."}]}, {"video": "TVSum/videos/GsAD1KT1xo8.mp4", "QA": [{"q": "Go through the video content from tvsum dataset, and upon identifying highlight moments, list their timestamps. Subsequently, provide a saliency score for each identified highlight.", "a": "The highlight timestamps are in the 78.1, 86.1, 88.1, 92.1, 94.6, 97.6, 99.6, 114.1, 115.1, 128.1, 136.1 seconds. Their saliency scores are 3.3, 2.8, 2.9, 3.0, 3.0, 2.4, 2.4, 2.6, 2.6, 2.5, 2.9."}]}, {"video": "TVSum/videos/PJrm840pAUI.mp4", "QA": [{"q": "Identify and extract the most emotionally impactful moments from the video provided by tvsum dataset, rating their intensity on a scale from 1 to 5.", "a": "The highlight timestamps are in the 2.5, 3.1, 10.6, 28.1, 32.5, 42.5, 46.2, 47.5, 50.6, 112.5, 114.4, 132.5, 166.3, 174.4, 192.5, 200.6, 241.2, 246.2, 254.4, 257.5, 258.1, 260.6 seconds. Their saliency scores are 2.5, 2.5, 2.0, 2.2, 2.2, 2.5, 2.6, 2.6, 2.1, 2.1, 2.4, 2.2, 2.2, 3.3, 2.2, 3.3, 1.2, 2.4, 2.5, 2.2, 2.4, 2.8."}]}, {"video": "TVSum/videos/91IHQYk1IQM.mp4", "QA": [{"q": "Watch the provided video from the tvsum dataset and mark out the timestamps with stand-out visual content. Document the timestamps of these highlights and evaluate their saliency scores.", "a": "The highlight timestamps are in the 10.0, 10.5, 18.0, 38.0, 42.0, 42.5, 44.5, 62.1, 64.1, 66.1, 83.1 seconds. Their saliency scores are 2.2, 2.2, 2.5, 2.8, 3.5, 3.5, 2.9, 2.5, 3.0, 2.2, 3.2."}]}, {"video": "TVSum/videos/RBCABdttQmI.mp4", "QA": [{"q": "From the tvsum dataset, generate a summarized version of the video, focusing on extracting key frames that best represent the overall narrative. The output should be a list of timestamps in seconds and their corresponding salient scores", "a": "The highlight timestamps are in the 6.0, 6.5, 32.5, 34.5, 68.6, 84.6, 88.1, 94.1, 124.1, 136.1, 144.1, 156.2, 170.2, 216.2, 222.7, 226.2, 232.2, 266.3, 273.3, 274.3, 286.3, 302.3, 312.3, 312.8, 317.8, 326.3, 336.3, 340.3, 340.8, 346.3, 352.4, 354.4, 356.4 seconds. Their saliency scores are 3.5, 3.5, 3.0, 3.0, 2.3, 2.5, 2.5, 2.4, 2.5, 2.4, 2.4, 2.3, 2.2, 3.3, 3.0, 3.2, 3.1, 2.9, 2.5, 2.5, 2.2, 2.4, 3.0, 3.0, 3.0, 2.5, 2.5, 2.8, 2.8, 2.5, 2.9, 2.4, 2.5."}]}, {"video": "TVSum/videos/z_6gVvQb2d0.mp4", "QA": [{"q": "You are given a video from the tvsum dataset. Please find the highlight contents in the video, determining the highlight timestamps and its saliency score on a scale from 1 to 5. The output format should be like: 'The highlight timestamps are in the 82, 84, 86, 88, 90, 92, 94, 96, 98, 100 second. Their saliency scores are 1.3, 1.7, 1.7, 1.7, 1.7, 1.3, 1.7, 2.3, 2.3, 2.3'.", "a": "The highlight timestamps are in the 132.1, 133.1, 134.6, 138.1, 139.6, 142.1, 143.6, 152.7, 154.2, 156.2, 158.2, 160.2, 169.2, 172.2, 190.2, 194.2, 196.2, 202.2, 205.2, 210.2, 212.2, 214.2, 220.7, 224.2, 224.7, 226.7, 230.2, 232.2, 233.7, 236.2, 238.2, 240.2, 246.2, 247.7 seconds. Their saliency scores are 3.3, 3.3, 3.0, 3.0, 3.0, 2.8, 2.8, 3.1, 2.5, 2.3, 2.4, 3.0, 2.6, 4.2, 2.8, 2.3, 3.4, 3.7, 2.6, 2.4, 3.2, 3.0, 2.4, 3.3, 3.3, 2.9, 2.3, 3.2, 3.2, 2.6, 2.4, 2.6, 3.2, 3.2."}]}, {"video": "TVSum/videos/fWutDQy1nnY.mp4", "QA": [{"q": "You are given a video from the tvsum dataset. Please find the highlight contents in the video, determining the highlight timestamps and its saliency score on a scale from 1 to 5. The output format should be like: 'The highlight timestamps are in the 82, 84, 86, 88, 90, 92, 94, 96, 98, 100 second. Their saliency scores are 1.3, 1.7, 1.7, 1.7, 1.7, 1.3, 1.7, 2.3, 2.3, 2.3'.", "a": "The highlight timestamps are in the 90.2, 108.3, 114.3, 148.4, 150.4, 151.9, 156.4, 190.0, 198.0, 200.1, 202.1, 204.6, 208.1, 211.1, 214.6, 220.1, 236.2, 242.2, 254.7, 260.2, 270.2, 276.3, 282.3, 292.3, 296.3, 311.4, 338.4, 341.9, 344.5, 350.5, 351.5, 356.5, 362.0, 364.0, 370.0, 378.0, 382.1, 383.1, 386.1, 412.6 seconds. Their saliency scores are 2.9, 2.6, 2.7, 2.6, 2.8, 2.8, 3.3, 2.9, 3.0, 3.1, 3.3, 3.0, 3.2, 3.0, 2.9, 2.6, 3.0, 3.3, 3.0, 2.6, 3.0, 3.0, 3.0, 2.6, 2.8, 2.5, 2.9, 2.6, 3.0, 3.0, 3.0, 2.9, 2.6, 2.8, 3.0, 2.6, 2.8, 2.8, 2.9, 2.8."}]}, {"video": "TVSum/videos/J0nA4VgnoCo.mp4", "QA": [{"q": "Go through the video content from tvsum dataset, and upon identifying highlight moments, list their timestamps. Subsequently, provide a saliency score for each identified highlight.", "a": "The highlight timestamps are in the 26.3, 27.5, 30.7, 31.9, 94.5, 110.1, 172.7, 176.4, 234.6, 242.1, 242.7, 244.6, 248.4, 268.4, 272.1, 272.8, 277.2, 292.2, 300.3, 305.3, 308.4, 314.1, 315.9, 320.3, 320.9, 322.8, 325.3, 328.5, 330.3, 332.8, 338.5, 339.1, 340.3, 342.8, 346.6, 348.5, 355.4, 358.5, 362.2, 364.1, 369.1, 369.7, 374.1, 382.3, 386.0, 410.4, 411.0, 452.3, 453.0, 456.1, 464.2, 466.1, 472.3 seconds. Their saliency scores are 2.6, 2.6, 2.6, 2.6, 3.4, 2.7, 2.4, 2.1, 2.4, 3.0, 3.0, 2.8, 2.8, 2.7, 3.7, 3.7, 2.6, 3.0, 3.2, 2.9, 2.6, 2.6, 2.6, 3.8, 3.8, 3.5, 3.0, 3.0, 3.2, 2.8, 3.4, 3.4, 3.5, 2.9, 2.8, 2.9, 2.8, 2.3, 2.5, 3.0, 2.9, 2.9, 3.0, 2.4, 2.5, 3.2, 3.2, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5."}]}, {"video": "TVSum/videos/4wU_LUjG5Ic.mp4", "QA": [{"q": "In the video presented from tvsum dataset, seek moments that could serve as an executive summary for a busy stakeholder. It's vital to notate their timestamps and to score each based on their level of saliency.", "a": "The highlight timestamps are in the 10.0, 11.3, 16.3, 19.4, 20.6, 22.5, 50.0, 51.3, 52.6, 98.2, 104.5, 126.4, 128.3, 128.9, 138.3, 138.9 seconds. Their saliency scores are 3.0, 3.0, 2.6, 2.5, 2.5, 2.1, 2.5, 2.5, 3.4, 2.4, 3.3, 2.2, 2.4, 2.4, 2.5, 2.5."}]}, {"video": "TVSum/videos/VuWGsYPqAX8.mp4", "QA": [{"q": "Go through the video content from tvsum dataset, and upon identifying highlight moments, list their timestamps. Subsequently, provide a saliency score for each identified highlight.", "a": "The highlight timestamps are in the 0.0, 41.4, 62.4, 85.8, 86.4, 92.4, 93.0, 103.2, 105.0, 108.6, 114.6, 118.2, 124.2, 125.4, 130.2, 142.8, 172.2, 178.8, 181.2, 182.4, 195.0, 212.4, 216.0 seconds. Their saliency scores are 2.3, 2.4, 3.5, 2.7, 2.9, 3.0, 3.0, 2.5, 2.2, 2.1, 3.0, 2.5, 3.3, 3.3, 3.2, 2.8, 2.2, 3.0, 2.5, 3.5, 3.5, 3.2, 2.1."}]}, {"video": "TVSum/videos/JKpqYvAdIsw.mp4", "QA": [{"q": "Identify and extract the most emotionally impactful moments from the video provided by tvsum dataset, rating their intensity on a scale from 1 to 5.", "a": "The highlight timestamps are in the 0.0, 4.2, 4.8, 16.2, 18.0, 48.0, 54.0, 58.2, 90.6, 109.2, 112.2, 116.4, 124.2, 128.4, 140.4 seconds. Their saliency scores are 2.4, 2.4, 2.4, 2.5, 2.7, 2.9, 2.9, 3.0, 2.5, 2.6, 2.2, 2.4, 2.6, 3.0, 2.5."}]}, {"video": "TVSum/videos/xmEERLqJ2kU.mp4", "QA": [{"q": "You are given a video from the tvsum dataset. Please find the highlight contents in the video, determining the highlight timestamps and its saliency score on a scale from 1 to 5. The output format should be like: 'The highlight timestamps are in the 82, 84, 86, 88, 90, 92, 94, 96, 98, 100 second. Their saliency scores are 1.3, 1.7, 1.7, 1.7, 1.7, 1.3, 1.7, 2.3, 2.3, 2.3'.", "a": "The highlight timestamps are in the 230.2, 234.2, 248.2, 249.2, 252.3, 257.3, 260.3, 260.8, 264.3, 266.3, 267.3, 270.3, 271.3, 272.8, 278.3, 316.3, 326.3, 329.3, 330.3, 334.3, 334.8, 336.8, 338.3, 339.3, 356.4, 368.4, 374.4, 376.4, 378.4, 382.4, 390.4, 396.4, 402.4, 408.4, 412.4, 412.9, 420.4, 440.4 seconds. Their saliency scores are 2.6, 2.7, 2.7, 2.7, 2.9, 2.8, 2.6, 2.6, 2.5, 2.6, 2.6, 2.7, 2.7, 2.4, 2.9, 2.5, 2.9, 2.8, 3.0, 3.2, 3.2, 3.0, 2.9, 2.9, 2.6, 2.5, 3.0, 3.1, 3.0, 3.4, 3.3, 3.2, 3.3, 2.4, 2.6, 2.6, 2.3, 2.5."}]}, {"video": "TVSum/videos/byxOvuiIJV0.mp4", "QA": [{"q": "In the video presented from tvsum dataset, seek moments that could serve as an executive summary for a busy stakeholder. It's vital to notate their timestamps and to score each based on their level of saliency.", "a": "The highlight timestamps are in the 12.5, 32.5, 38.2, 40.0, 62.6, 66.3, 71.3, 78.2, 86.3, 98.2, 102.6, 103.9, 104.5, 128.3 seconds. Their saliency scores are 2.5, 2.6, 3.5, 2.4, 2.4, 3.3, 1.6, 2.5, 2.5, 3.1, 2.6, 2.6, 2.9, 3.3."}]}, {"video": "TVSum/videos/_xMr-HKMfVA.mp4", "QA": [{"q": "Identify and extract the most emotionally impactful moments from the video provided by tvsum dataset, rating their intensity on a scale from 1 to 5.", "a": "The highlight timestamps are in the 18.0, 21.0, 48.0, 60.1, 69.1, 72.1, 80.1, 80.6, 82.6, 84.1, 85.6, 88.1, 94.1, 98.1, 118.6, 120.6 seconds. Their saliency scores are 2.6, 2.5, 2.2, 2.6, 2.5, 2.5, 3.5, 3.5, 2.6, 3.7, 3.7, 3.0, 3.0, 2.7, 2.6, 2.6."}]}, {"video": "TVSum/videos/WxtbjNsCQ8A.mp4", "QA": [{"q": "In the video presented from tvsum dataset, seek moments that could serve as an executive summary for a busy stakeholder. It's vital to notate their timestamps and to score each based on their level of saliency.", "a": "The highlight timestamps are in the 58.6, 66.1, 66.6, 67.6, 70.1, 72.1, 74.1, 75.6, 77.6, 79.1, 82.1, 83.1, 88.1, 89.6, 92.1, 93.6, 96.1, 97.6, 98.6, 100.1, 118.1, 119.1, 122.1, 124.1, 140.1, 172.2, 173.2, 192.2, 194.2, 222.2, 232.2, 233.7, 238.2, 241.2 seconds. Their saliency scores are 2.1, 3.2, 3.2, 3.2, 2.8, 2.9, 3.0, 3.0, 2.4, 2.4, 3.0, 3.0, 2.4, 2.4, 2.6, 2.6, 3.0, 3.0, 2.6, 2.4, 2.3, 2.3, 2.5, 3.0, 2.7, 3.1, 3.1, 2.3, 2.4, 2.5, 2.9, 2.9, 3.0, 2.4."}]}, {"video": "TVSum/videos/uGu_10sucQo.mp4", "QA": [{"q": "You are given a video from the tvsum dataset. Please find the highlight contents in the video, determining the highlight timestamps and its saliency score on a scale from 1 to 5. The output format should be like: 'The highlight timestamps are in the 82, 84, 86, 88, 90, 92, 94, 96, 98, 100 second. Their saliency scores are 1.3, 1.7, 1.7, 1.7, 1.7, 1.3, 1.7, 2.3, 2.3, 2.3'.", "a": "The highlight timestamps are in the 31.2, 36.2, 40.6, 64.4, 76.2, 90.6, 98.1, 100.6, 106.2, 107.5, 112.5, 113.8, 118.1, 124.4 seconds. Their saliency scores are 2.4, 2.8, 2.5, 2.5, 2.5, 2.4, 2.6, 3.0, 3.3, 3.3, 2.6, 2.6, 2.5, 2.4."}]}, {"video": "TVSum/videos/EE-bNr36nyA.mp4", "QA": [{"q": "Go through the video content from tvsum dataset, and upon identifying highlight moments, list their timestamps. Subsequently, provide a saliency score for each identified highlight.", "a": "The highlight timestamps are in the 6.0, 10.5, 20.0, 32.0, 43.5, 46.0, 49.0, 68.1, 80.1, 86.1, 90.1, 94.1 seconds. Their saliency scores are 2.1, 1.5, 1.1, 3.1, 2.7, 2.8, 2.6, 2.5, 2.9, 2.7, 2.4, 2.5."}]}, {"video": "TVSum/videos/Se3oxnaPsz0.mp4", "QA": [{"q": "In the video presented from tvsum dataset, seek moments that could serve as an executive summary for a busy stakeholder. It's vital to notate their timestamps and to score each based on their level of saliency.", "a": "The highlight timestamps are in the 39.5, 42.5, 43.5, 46.5, 48.5, 50.5, 52.5, 74.5, 76.5, 86.5, 87.5, 89.5, 98.5, 102.5, 103.5, 106.5, 112.5, 113.5 seconds. Their saliency scores are 1.9, 3.2, 3.2, 2.5, 2.6, 2.5, 3.3, 2.8, 3.0, 3.5, 3.5, 3.3, 2.6, 3.5, 3.5, 2.8, 3.4, 3.4."}]}, {"video": "TVSum/videos/gzDbaEs1Rlg.mp4", "QA": [{"q": "Identify and extract the most emotionally impactful moments from the video provided by tvsum dataset, rating their intensity on a scale from 1 to 5.", "a": "The highlight timestamps are in the 34.2, 40.2, 63.0, 71.4, 92.4, 96.6, 98.4, 104.4, 106.2, 106.8, 109.8, 114.6, 120.6, 123.0, 130.8, 136.2, 138.6, 144.6, 148.2, 150.0, 158.4, 159.6, 163.2, 164.4, 172.2, 175.2, 176.4, 186.6, 204.6, 205.2 seconds. Their saliency scores are 2.7, 1.9, 1.3, 2.7, 2.4, 2.4, 2.5, 2.6, 3.0, 3.0, 2.8, 2.7, 2.5, 2.5, 2.5, 2.6, 2.8, 2.9, 3.2, 3.2, 3.5, 3.5, 2.5, 2.5, 3.0, 2.6, 2.2, 2.5, 2.6, 2.6."}]}, {"video": "TVSum/videos/oDXZc0tZe04.mp4", "QA": [{"q": "From the tvsum dataset, generate a summarized version of the video, focusing on extracting key frames that best represent the overall narrative. The output should be a list of timestamps in seconds and their corresponding salient scores", "a": "The highlight timestamps are in the 2.5, 92.1, 95.1, 112.1, 112.6, 115.6, 152.2, 152.7, 154.2, 156.2, 206.2, 250.2, 254.3, 262.3, 264.3, 266.3, 285.8, 286.3, 294.3, 296.3, 304.3, 305.8, 308.8, 311.3, 334.3, 338.3, 344.3, 345.3, 348.3, 364.9, 368.9, 370.4 seconds. Their saliency scores are 2.9, 2.5, 2.4, 3.0, 3.0, 2.5, 2.5, 2.5, 2.4, 2.4, 3.3, 2.5, 2.6, 2.2, 2.4, 2.3, 2.2, 2.4, 3.0, 2.6, 3.0, 3.0, 2.6, 2.5, 2.9, 2.6, 2.2, 2.2, 2.2, 2.5, 2.6, 2.1."}]}, {"video": "TVSum/videos/qqR6AEXwxoQ.mp4", "QA": [{"q": "In the video presented from tvsum dataset, seek moments that could serve as an executive summary for a busy stakeholder. It's vital to notate their timestamps and to score each based on their level of saliency.", "a": "The highlight timestamps are in the 56.1, 57.1, 58.6, 59.6, 60.1, 74.6, 80.1, 82.1, 83.1, 83.6, 85.1, 92.1, 94.1, 98.1, 119.1, 122.1, 148.1, 149.6, 150.7, 152.2, 152.7, 153.2, 153.7, 154.2, 155.2, 156.2, 157.2, 157.7, 158.2, 159.2, 160.2, 161.2, 162.2, 163.7, 166.2, 166.7, 167.2, 168.2, 168.7, 169.7, 170.7, 172.7, 173.7, 174.7, 176.2, 177.2, 240.2, 240.7, 244.2, 245.2 seconds. Their saliency scores are 4.0, 4.0, 2.4, 2.4, 2.7, 2.2, 2.5, 4.5, 4.5, 4.5, 2.5, 2.1, 2.2, 2.1, 2.5, 2.2, 3.5, 3.5, 2.9, 3.1, 3.1, 3.1, 3.1, 3.4, 3.4, 3.9, 3.9, 3.9, 3.0, 3.0, 2.9, 2.9, 2.3, 2.3, 2.6, 2.6, 2.6, 4.2, 4.2, 4.2, 2.5, 2.5, 2.5, 2.1, 2.2, 2.2, 2.5, 2.5, 2.5, 2.5."}]}, {"video": "TVSum/videos/EYqVtI9YWJA.mp4", "QA": [{"q": "In the video presented from tvsum dataset, seek moments that could serve as an executive summary for a busy stakeholder. It's vital to notate their timestamps and to score each based on their level of saliency.", "a": "The highlight timestamps are in the 20.0, 21.5, 24.0, 34.5, 35.5, 46.0, 98.1, 99.6, 114.1, 132.6, 134.1, 134.6, 138.1, 144.1, 144.6, 160.2, 161.7, 165.2, 170.2, 170.7, 176.2, 177.2, 179.2, 180.7, 188.2 seconds. Their saliency scores are 2.7, 2.7, 2.7, 2.7, 2.7, 2.6, 3.3, 3.3, 3.0, 2.6, 2.7, 2.7, 2.8, 3.5, 3.5, 3.0, 3.0, 2.9, 3.0, 3.0, 3.3, 3.3, 3.2, 3.0, 2.8."}]}, {"video": "TVSum/videos/eQu1rNs0an0.mp4", "QA": [{"q": "Watch the provided video from the tvsum dataset and mark out the timestamps with stand-out visual content. Document the timestamps of these highlights and evaluate their saliency scores.", "a": "The highlight timestamps are in the 4.5, 6.0, 7.5, 12.0, 14.0, 17.0, 22.0, 48.0, 50.1, 52.6, 65.6, 67.6, 68.6, 85.1, 100.1, 106.1, 109.1, 110.6, 136.1, 138.1, 139.1 seconds. Their saliency scores are 2.8, 2.1, 2.1, 3.3, 1.9, 2.5, 2.8, 2.7, 3.0, 2.4, 3.0, 2.9, 2.9, 2.0, 2.5, 2.8, 2.8, 2.7, 2.9, 3.2, 3.2."}]}, {"video": "TVSum/videos/JgHubY5Vw3Y.mp4", "QA": [{"q": "From the tvsum dataset, generate a summarized version of the video, focusing on extracting key frames that best represent the overall narrative. The output should be a list of timestamps in seconds and their corresponding salient scores", "a": "The highlight timestamps are in the 0.0, 12.5, 54.1, 55.1, 57.1, 72.1, 80.1, 96.1, 100.1, 102.1, 107.6 seconds. Their saliency scores are 1.9, 3.0, 2.4, 2.4, 2.3, 2.4, 2.6, 3.5, 3.5, 3.8, 3.7."}]}, {"video": "TVSum/videos/iVt07TCkFM0.mp4", "QA": [{"q": "From the tvsum dataset, generate a summarized version of the video, focusing on extracting key frames that best represent the overall narrative. The output should be a list of timestamps in seconds and their corresponding salient scores", "a": "The highlight timestamps are in the 8.8, 11.3, 14.4, 52.6, 58.8, 66.3, 68.2, 72.6, 76.3 seconds. Their saliency scores are 3.0, 2.5, 2.7, 3.2, 2.9, 2.9, 3.3, 3.2, 2.8."}]}, {"video": "TVSum/videos/E11zDS9XGzg.mp4", "QA": [{"q": "From the tvsum dataset, generate a summarized version of the video, focusing on extracting key frames that best represent the overall narrative. The output should be a list of timestamps in seconds and their corresponding salient scores", "a": "The highlight timestamps are in the 40.5, 43.0, 90.5, 92.5, 112.5, 113.0, 119.5, 124.0, 128.5, 130.5, 133.0, 148.5, 158.5, 162.5, 163.5, 166.0, 170.5, 174.5, 178.5, 248.5, 251.0, 253.0, 256.5, 258.0, 260.5, 262.5, 264.0, 266.0, 268.5, 270.5, 274.5, 288.5, 290.5, 304.5, 306.5, 307.0, 312.5, 318.5, 320.5, 322.5, 325.5, 378.5, 388.5, 391.5, 395.5, 422.5, 428.5, 430.0, 444.5, 445.0, 446.5, 456.5 seconds. Their saliency scores are 3.2, 2.4, 2.5, 2.6, 3.3, 3.3, 2.1, 2.2, 2.4, 2.5, 2.4, 2.3, 2.8, 3.0, 3.0, 2.5, 2.6, 2.5, 2.4, 3.3, 3.0, 2.3, 2.4, 2.4, 2.5, 2.9, 2.9, 2.5, 2.5, 2.8, 2.2, 2.5, 2.5, 2.6, 2.7, 2.7, 2.2, 2.8, 3.1, 2.5, 2.3, 2.5, 2.8, 2.6, 2.2, 2.4, 2.3, 2.3, 2.5, 2.5, 2.7, 2.5."}]}, {"video": "TVSum/videos/NyBmCxDoHJU.mp4", "QA": [{"q": "Go through the video content from tvsum dataset, and upon identifying highlight moments, list their timestamps. Subsequently, provide a saliency score for each identified highlight.", "a": "The highlight timestamps are in the 13.2, 20.4, 42.6, 43.2, 60.6, 66.6, 72.6, 106.2, 114.6, 115.8, 118.2, 118.8, 136.2, 138.6, 176.4 seconds. Their saliency scores are 2.3, 3.1, 3.0, 3.0, 2.6, 2.9, 2.5, 2.8, 2.9, 2.9, 3.0, 3.0, 2.6, 2.9, 2.8."}]}, {"video": "TVSum/videos/kLxoNp-UchI.mp4", "QA": [{"q": "In the video presented from tvsum dataset, seek moments that could serve as an executive summary for a busy stakeholder. It's vital to notate their timestamps and to score each based on their level of saliency.", "a": "The highlight timestamps are in the 12.0, 22.5, 26.0, 50.1, 58.1, 84.1, 96.1, 101.1, 106.1, 118.1 seconds. Their saliency scores are 3.0, 2.5, 2.7, 2.7, 2.0, 2.7, 2.6, 2.7, 3.0, 2.7."}]}, {"video": "TVSum/videos/jcoYJXDG9sw.mp4", "QA": [{"q": "In the video presented from tvsum dataset, seek moments that could serve as an executive summary for a busy stakeholder. It's vital to notate their timestamps and to score each based on their level of saliency.", "a": "The highlight timestamps are in the 28.0, 41.0, 51.1, 54.1, 82.1, 89.1, 90.1, 92.1, 93.6, 94.6, 96.1, 97.1, 98.6, 99.6, 102.1, 102.6, 108.1, 108.6, 122.6, 125.6, 128.1, 129.6, 159.2, 164.2, 182.2 seconds. Their saliency scores are 2.4, 2.1, 1.9, 2.4, 2.1, 2.7, 3.5, 3.0, 3.0, 2.7, 3.8, 3.8, 3.2, 3.2, 3.3, 3.3, 2.8, 2.8, 2.9, 2.9, 2.9, 2.9, 3.0, 3.0, 2.3."}]}, {"video": "TVSum/videos/XzYM3PfTM4w.mp4", "QA": [{"q": "From the tvsum dataset, generate a summarized version of the video, focusing on extracting key frames that best represent the overall narrative. The output should be a list of timestamps in seconds and their corresponding salient scores", "a": "The highlight timestamps are in the 18.5, 20.0, 22.0, 32.5, 42.0, 43.5, 45.0, 50.1, 51.1, 52.1, 56.1, 68.6 seconds. Their saliency scores are 2.6, 2.9, 2.9, 2.9, 2.7, 2.7, 2.6, 2.8, 2.8, 3.2, 3.2, 2.8."}]}, {"video": "TVSum/videos/_esJrBWj2d8.mp4", "QA": [{"q": "You are given a video from the tvsum dataset. Please find the highlight contents in the video, determining the highlight timestamps and its saliency score on a scale from 1 to 5. The output format should be like: 'The highlight timestamps are in the 82, 84, 86, 88, 90, 92, 94, 96, 98, 100 second. Their saliency scores are 1.3, 1.7, 1.7, 1.7, 1.7, 1.3, 1.7, 2.3, 2.3, 2.3'.", "a": "The highlight timestamps are in the 4.5, 18.5, 27.0, 36.5, 44.5, 48.5, 51.0, 56.5, 58.5, 66.5, 68.5, 75.5, 112.5, 115.5, 116.5, 120.0, 122.5, 126.5, 127.0, 206.5, 215.0 seconds. Their saliency scores are 2.4, 2.5, 2.0, 2.2, 3.0, 3.0, 2.7, 2.6, 2.3, 2.9, 3.3, 3.1, 3.0, 2.4, 2.7, 2.4, 2.1, 2.6, 2.6, 2.2, 2.4."}]}, {"video": "TVSum/videos/HT5vyqe0Xaw.mp4", "QA": [{"q": "From the tvsum dataset, generate a summarized version of the video, focusing on extracting key frames that best represent the overall narrative. The output should be a list of timestamps in seconds and their corresponding salient scores", "a": "The highlight timestamps are in the 39.0, 44.0, 54.6, 58.1, 64.1, 65.1, 66.1, 67.1, 82.1, 84.1, 88.6, 90.1, 110.1, 148.1, 152.2, 160.2, 161.7, 163.2, 164.2, 165.2, 168.2, 169.7, 258.3, 276.3, 280.3, 284.3, 300.3 seconds. Their saliency scores are 2.5, 2.7, 2.3, 2.6, 3.0, 3.0, 3.0, 3.0, 2.8, 2.9, 2.3, 2.0, 2.5, 3.0, 2.2, 4.0, 4.0, 2.4, 2.9, 2.9, 4.0, 4.0, 2.4, 2.5, 2.4, 2.9, 2.6."}]}, {"video": "TVSum/videos/sTEELN-vY30.mp4", "QA": [{"q": "You are given a video from the tvsum dataset. Please find the highlight contents in the video, determining the highlight timestamps and its saliency score on a scale from 1 to 5. The output format should be like: 'The highlight timestamps are in the 82, 84, 86, 88, 90, 92, 94, 96, 98, 100 second. Their saliency scores are 1.3, 1.7, 1.7, 1.7, 1.7, 1.3, 1.7, 2.3, 2.3, 2.3'.", "a": "The highlight timestamps are in the 0.0, 0.5, 10.5, 11.5, 13.5, 45.0, 66.5, 68.5, 70.5, 100.5, 120.5, 130.5, 132.0 seconds. Their saliency scores are 2.9, 2.9, 4.3, 4.3, 4.0, 2.6, 2.8, 3.0, 3.5, 2.2, 2.9, 3.2, 3.2."}]}, {"video": "TVSum/videos/vdmoEJ5YbrQ.mp4", "QA": [{"q": "Go through the video content from tvsum dataset, and upon identifying highlight moments, list their timestamps. Subsequently, provide a saliency score for each identified highlight.", "a": "The highlight timestamps are in the 0.0, 32.0, 76.1, 77.1, 80.1, 84.1, 87.6, 93.6, 104.1, 106.1, 107.1, 109.1, 110.6, 112.1, 115.1, 123.1, 126.1, 127.6, 132.1, 133.1, 136.6, 140.1, 143.6, 144.6, 148.1, 150.2, 150.7, 153.7, 156.7, 168.2, 178.2, 184.2, 186.2, 190.2, 204.7 seconds. Their saliency scores are 2.2, 2.6, 2.1, 2.1, 2.0, 2.7, 2.2, 2.1, 2.9, 2.9, 2.9, 2.7, 2.5, 2.2, 2.0, 2.0, 2.3, 2.3, 3.2, 3.2, 2.4, 3.2, 3.2, 2.5, 2.5, 3.1, 3.1, 3.0, 2.0, 2.2, 2.4, 2.4, 2.5, 2.0, 2.0."}]}, {"video": "TVSum/videos/xwqBXPGE9pQ.mp4", "QA": [{"q": "In the video presented from tvsum dataset, seek moments that could serve as an executive summary for a busy stakeholder. It's vital to notate their timestamps and to score each based on their level of saliency.", "a": "The highlight timestamps are in the 20.5, 30.0, 30.5, 33.5, 58.6, 59.6, 72.1, 73.1, 76.1, 76.6, 80.1, 84.1, 85.6, 87.6, 106.6, 120.1, 124.1, 124.6, 125.1, 128.1, 130.1, 161.7, 166.2, 170.2, 174.2, 209.2, 210.2, 224.2, 225.7 seconds. Their saliency scores are 3.2, 3.3, 3.3, 3.2, 2.9, 2.9, 3.0, 3.0, 2.5, 2.5, 2.8, 2.6, 2.6, 2.5, 2.8, 2.5, 3.7, 3.7, 3.7, 3.6, 2.0, 2.4, 2.6, 2.5, 2.5, 2.4, 2.2, 2.5, 2.5."}]}]
|