ShuhuaiRen
commited on
Commit
•
7457806
1
Parent(s):
1f75968
upload data
Browse files- TimeIT.py +13 -13
- data/dense_video_captioning/dense_video_captioning_instructions.json +8 -0
- data/step_localization/step_localization_instructions.json +8 -0
- data/temporal_video_grounding/temporal_video_grounding_instructions.json +8 -0
- data/video_highlight_detection/video_highlight_detection_instructions.json +8 -0
- data/video_summarization/video_summarization_instructions.json +8 -0
TimeIT.py
CHANGED
@@ -25,55 +25,55 @@ _SEED = 1234 # for determinstic random sampling
|
|
25 |
_URLS = {
|
26 |
"charades": {
|
27 |
"train": "./data/temporal_video_grounding/charades/instruct_tvg_12.4k_charades.json",
|
28 |
-
"instruction": "",
|
29 |
},
|
30 |
"didemo": {
|
31 |
"train": "./data/temporal_video_grounding/didemo/instruct_tvg_33.0k_didemo.json",
|
32 |
-
"instruction": "",
|
33 |
},
|
34 |
"queryd": {
|
35 |
"train": "./data/temporal_video_grounding/queryd/instruct_tvg_14.6k_queryd.json",
|
36 |
-
"instruction": "",
|
37 |
},
|
38 |
"hirest_grounding": {
|
39 |
"train": "./data/temporal_video_grounding/hirest/instruct_tvg_0.5k_hirest.json",
|
40 |
-
"instruction": "",
|
41 |
},
|
42 |
"qvhighlights": {
|
43 |
"train": "./data/video_highlight_detection/qvhighlights/instruct_vhd_6.9k_qvhighlights.json",
|
44 |
-
"instruction": "",
|
45 |
},
|
46 |
"youcook2": {
|
47 |
"train": "./data/dense_video_captioning/youcook2/instruct_dvc_1.2k_youcook2.json",
|
48 |
-
"instruction": "",
|
49 |
},
|
50 |
"anet": {
|
51 |
"train": "./data/dense_video_captioning/anet/instruct_dvc_10.0k_anet.json",
|
52 |
-
"instruction": "",
|
53 |
},
|
54 |
"vitt": {
|
55 |
"train": "./data/dense_video_captioning/vitt/instruct_dvc_5.1k_vitt.json",
|
56 |
-
"instruction": "",
|
57 |
},
|
58 |
"tvsum": {
|
59 |
"train": "./data/video_summarization/tvsum/instruct_vhd_50_tvsum.json",
|
60 |
-
"instruction": "",
|
61 |
},
|
62 |
"summe": {
|
63 |
"train": "./data/video_summarization/summe/instruct_vhd_50_tvsum.json",
|
64 |
-
"instruction": "",
|
65 |
},
|
66 |
"coin": {
|
67 |
"train": "./data/step_localization/coin/instruct_action_9.0k_coin.json",
|
68 |
-
"instruction": "",
|
69 |
},
|
70 |
"hirest_step": {
|
71 |
"train": "./data/step_localization/hirest_step/instruct_action_0.5k_hirest.json",
|
72 |
-
"instruction": "",
|
73 |
},
|
74 |
"yttemporal": {
|
75 |
"train": "./data/transcribed_speech_generation/yttemporal/instruct_tsp_31.6k_yttemporal.json",
|
76 |
-
"instruction": "",
|
77 |
},
|
78 |
}
|
79 |
|
|
|
25 |
_URLS = {
|
26 |
"charades": {
|
27 |
"train": "./data/temporal_video_grounding/charades/instruct_tvg_12.4k_charades.json",
|
28 |
+
"instruction": "./data/temporal_video_grounding/temporal_video_grounding_instructions.json",
|
29 |
},
|
30 |
"didemo": {
|
31 |
"train": "./data/temporal_video_grounding/didemo/instruct_tvg_33.0k_didemo.json",
|
32 |
+
"instruction": "./data/temporal_video_grounding/temporal_video_grounding_instructions.json",
|
33 |
},
|
34 |
"queryd": {
|
35 |
"train": "./data/temporal_video_grounding/queryd/instruct_tvg_14.6k_queryd.json",
|
36 |
+
"instruction": "./data/temporal_video_grounding/temporal_video_grounding_instructions.json",
|
37 |
},
|
38 |
"hirest_grounding": {
|
39 |
"train": "./data/temporal_video_grounding/hirest/instruct_tvg_0.5k_hirest.json",
|
40 |
+
"instruction": "./data/temporal_video_grounding/temporal_video_grounding_instructions.json",
|
41 |
},
|
42 |
"qvhighlights": {
|
43 |
"train": "./data/video_highlight_detection/qvhighlights/instruct_vhd_6.9k_qvhighlights.json",
|
44 |
+
"instruction": "./data/video_highlight_detection/video_highlight_detection_instructions.json",
|
45 |
},
|
46 |
"youcook2": {
|
47 |
"train": "./data/dense_video_captioning/youcook2/instruct_dvc_1.2k_youcook2.json",
|
48 |
+
"instruction": "./data/dense_video_captioning/dense_video_captioning_instructions.json",
|
49 |
},
|
50 |
"anet": {
|
51 |
"train": "./data/dense_video_captioning/anet/instruct_dvc_10.0k_anet.json",
|
52 |
+
"instruction": "./data/dense_video_captioning/dense_video_captioning_instructions.json",
|
53 |
},
|
54 |
"vitt": {
|
55 |
"train": "./data/dense_video_captioning/vitt/instruct_dvc_5.1k_vitt.json",
|
56 |
+
"instruction": "./data/dense_video_captioning/dense_video_captioning_instructions.json",
|
57 |
},
|
58 |
"tvsum": {
|
59 |
"train": "./data/video_summarization/tvsum/instruct_vhd_50_tvsum.json",
|
60 |
+
"instruction": "./data/video_summarization/video_summarization_instructions.json",
|
61 |
},
|
62 |
"summe": {
|
63 |
"train": "./data/video_summarization/summe/instruct_vhd_50_tvsum.json",
|
64 |
+
"instruction": "./data/video_summarization/video_summarization_instructions.json",
|
65 |
},
|
66 |
"coin": {
|
67 |
"train": "./data/step_localization/coin/instruct_action_9.0k_coin.json",
|
68 |
+
"instruction": "./data/step_localization/step_localization_instructions.json",
|
69 |
},
|
70 |
"hirest_step": {
|
71 |
"train": "./data/step_localization/hirest_step/instruct_action_0.5k_hirest.json",
|
72 |
+
"instruction": "./data/step_localization/step_localization_instructions.json",
|
73 |
},
|
74 |
"yttemporal": {
|
75 |
"train": "./data/transcribed_speech_generation/yttemporal/instruct_tsp_31.6k_yttemporal.json",
|
76 |
+
"instruction": "./data/transcribed_speech_generation/transcribed_speech_generation.json",
|
77 |
},
|
78 |
}
|
79 |
|
data/dense_video_captioning/dense_video_captioning_instructions.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"0": "Localize a series of activity events in the video, output the start and end timestamp for each event, and describe each event with sentences. The output format of each predicted event should be like: 'start - end seconds, event description'. A specific example is: ' 90 - 102 seconds, spread margarine on two slices of white bread in the video'.",
|
3 |
+
"1": "Determine the start and end times of various activity events in the video, accompanied by descriptions.",
|
4 |
+
"2": "Capture and describe the activity events in the given video, specifying their respective time intervals, and outputting the time intervals in the 'start - end seconds format'.",
|
5 |
+
"3": "Identify, timestamp, and describe various activity events occurring in the video. The timestamp should include the start time and end time in seconds.",
|
6 |
+
"4": "Detect and report the start and end timestamps of activity events in the video, along with descriptions.",
|
7 |
+
"5": "Pinpoint the time intervals of activity events in the video, and provide detailed descriptions for each event."
|
8 |
+
}
|
data/step_localization/step_localization_instructions.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"0": "Localize a series of action steps in the given video, output a start and end timestamp for each step, and briefly describe the step. ",
|
3 |
+
"1": "Locate and describe a series of actions or steps in the video, including their start and end timestamps.",
|
4 |
+
"2": "Identify and mark the video segments corresponding to a series of actions or steps, specifying the timestamps and describing the steps.",
|
5 |
+
"3": "Find, identify, and determine the temporal boundaries of a series of distinct actions or steps occurring throughout the video. For each action, output the corresponding start and end timestamps, accompanied by a concise description.",
|
6 |
+
"4": "Identify and localize a series of steps or actions occurring in the video, providing start and end timestamps and related descriptions.",
|
7 |
+
"5": "Locate and pinpoint a sequential series of specific actions or steps in the video, accurately specifying the start and end timestamps for each action. Additionally, provide a succinct description of each action."
|
8 |
+
}
|
data/temporal_video_grounding/temporal_video_grounding_instructions.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"0": "Localize the visual content described by the given textual query <query_placeholder> in the video, and output the start and end timestamps in seconds.",
|
3 |
+
"1": "Detect and report the start and end timestamps of the video segment that semantically matches the given textual query <query_placeholder>.",
|
4 |
+
"2": "Give you a textual query: <query_placeholder> When does the described content occur in the video? Please return the timestamp in seconds.",
|
5 |
+
"3": "Locate and describe the visual content mentioned in the text query <query_placeholder> within the video, including timestamps.",
|
6 |
+
"4": "The given natural language query <query_placeholder> is semantically aligned with a video moment, please give the start time and end time of the video moment.",
|
7 |
+
"5": "Find the video segment that corresponds to the given textual query <query_placeholder> and determine its start and end seconds."
|
8 |
+
}
|
data/video_highlight_detection/video_highlight_detection_instructions.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"0": "You are given a video from the QVHighlights dataset. Please find the highlight contents in the video described by a sentence query, determining the highlight timestamps and its saliency score on a scale from 1 to 5. The output format should be like: 'The highlight timestamps are in the 82, 84, 86, 88, 90, 92, 94, 96, 98, 100 seconds. Their saliency scores are 1.3, 1.7, 1.7, 1.7, 1.7, 1.3, 1.7, 2.3, 2.3, 2.3'. Now I will give you the sentence query: <query_placeholder>. Please return the query-based highlight timestamps and salient scores.",
|
3 |
+
"1": "Watch the provided video and mark out the scenes that stand out based on the description: <query_placeholder>. Document the timestamps of these highlights and evaluate their saliency scores.",
|
4 |
+
"2": "Perform a thorough review of the video content, extracting key highlight moments that align with <query_placeholder>. It is essential to record the times of these moments and assign a distinct saliency value to each.",
|
5 |
+
"3": "Examine the video and, in accordance with query <query_placeholder>, highlight the standout moments. You're required to provide the exact timing alongside a saliency rating for each segment.",
|
6 |
+
"4": "In the video presented, seek moments that are a perfect match with <query_placeholder>. It's vital to notate their timestamps and to score each based on their level of saliency.",
|
7 |
+
"5": "Go through the video content, and upon identifying highlight moments that resonate with <query_placeholder>, list their timestamps. Subsequently, provide a saliency score for each identified highlight."
|
8 |
+
}
|
data/video_summarization/video_summarization_instructions.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"0": "From the <dataset_placeholder> dataset, generate a summarized version of the video, focusing on extracting key frames that best represent the overall narrative. The output should be a list of timestamps in seconds and their corresponding salient scores",
|
3 |
+
"1": "You are given a video from the <dataset_placeholder> dataset. Please find the highlight contents in the video, determining the highlight timestamps and its saliency score on a scale from 1 to 5. The output format should be like: 'The highlight timestamps are in the 82, 84, 86, 88, 90, 92, 94, 96, 98, 100 second. Their saliency scores are 1.3, 1.7, 1.7, 1.7, 1.7, 1.3, 1.7, 2.3, 2.3, 2.3'. ",
|
4 |
+
"2": "Identify and extract the most emotionally impactful moments from the video provided by <dataset_placeholder> dataset, rating their intensity on a scale from 1 to 5.",
|
5 |
+
"3": "Watch the provided video from the <dataset_placeholder> dataset and mark out the timestamps with stand-out visual content. Document the timestamps of these highlights and evaluate their saliency scores.",
|
6 |
+
"4": "In the video presented from <dataset_placeholder> dataset, seek moments that could serve as an executive summary for a busy stakeholder. It's vital to notate their timestamps and to score each based on their level of saliency.",
|
7 |
+
"5": "Go through the video content from <dataset_placeholder> dataset, and upon identifying highlight moments, list their timestamps. Subsequently, provide a saliency score for each identified highlight."
|
8 |
+
}
|