aps commited on
Commit
4ec2c36
0 Parent(s):

Init ego4d

Browse files
Files changed (3) hide show
  1. .gitattributes +37 -0
  2. dataset_infos.json +1 -0
  3. ego4d.py +136 -0
.gitattributes ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.model filter=lfs diff=lfs merge=lfs -text
11
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
12
+ *.onnx filter=lfs diff=lfs merge=lfs -text
13
+ *.ot filter=lfs diff=lfs merge=lfs -text
14
+ *.parquet filter=lfs diff=lfs merge=lfs -text
15
+ *.pb filter=lfs diff=lfs merge=lfs -text
16
+ *.pt filter=lfs diff=lfs merge=lfs -text
17
+ *.pth filter=lfs diff=lfs merge=lfs -text
18
+ *.rar filter=lfs diff=lfs merge=lfs -text
19
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
20
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
21
+ *.tflite filter=lfs diff=lfs merge=lfs -text
22
+ *.tgz filter=lfs diff=lfs merge=lfs -text
23
+ *.wasm filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ # Audio files - uncompressed
29
+ *.pcm filter=lfs diff=lfs merge=lfs -text
30
+ *.sam filter=lfs diff=lfs merge=lfs -text
31
+ *.raw filter=lfs diff=lfs merge=lfs -text
32
+ # Audio files - compressed
33
+ *.aac filter=lfs diff=lfs merge=lfs -text
34
+ *.flac filter=lfs diff=lfs merge=lfs -text
35
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
36
+ *.ogg filter=lfs diff=lfs merge=lfs -text
37
+ *.wav filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"default": {"description": "EGO4D is the world's largest egocentric (first person) video ML dataset and benchmark suite, with 3,600 hrs (and counting) of densely narrated video and a wide range of annotations across five new benchmark tasks. It covers hundreds of scenarios (household, outdoor, workplace, leisure, etc.) of daily life activity captured in-the-wild by 926 unique camera wearers from 74 worldwide locations and 9 different countries. Portions of the video are accompanied by audio, 3D meshes of the environment, eye gaze, stereo, and/or synchronized videos from multiple egocentric cameras at the same event. The approach to data collection was designed to uphold rigorous privacy and ethics standards with consenting participants and robust de-identification procedures where relevant.\n", "citation": "\n@article{Ego4D2021,\n author={Grauman, Kristen and Westbury, Andrew and Byrne, Eugene and Chavis, Zachary and Furnari, Antonino and Girdhar, Rohit and Hamburger, Jackson and Jiang, Hao and Liu, Miao and Liu, Xingyu and Martin, Miguel and Nagarajan, Tushar and Radosavovic, Ilija and Ramakrishnan, Santhosh Kumar and Ryan, Fiona and Sharma, Jayant and Wray, Michael and Xu, Mengmeng and Xu, Eric Zhongcong and Zhao, Chen and Bansal, Siddhant and Batra, Dhruv and Cartillier, Vincent and Crane, Sean and Do, Tien and Doulaty, Morrie and Erapalli, Akshay and Feichtenhofer, Christoph and Fragomeni, Adriano and Fu, Qichen and Fuegen, Christian and Gebreselasie, Abrham and Gonzalez, Cristina and Hillis, James and Huang, Xuhua and Huang, Yifei and Jia, Wenqi and Khoo, Weslie and Kolar, Jachym and Kottur, Satwik and Kumar, Anurag and Landini, Federico and Li, Chao and Li, Yanghao and Li, Zhenqiang and Mangalam, Karttikeya and Modhugu, Raghava and Munro, Jonathan and Murrell, Tullie and Nishiyasu, Takumi and Price, Will and Puentes, Paola Ruiz and Ramazanova, Merey and Sari, Leda and Somasundaram, Kiran and Southerland, Audrey and Sugano, Yusuke and Tao, Ruijie and Vo, Minh and Wang, Yuchen and Wu, Xindi and Yagi, Takuma and Zhu, Yunyi and Arbelaez, Pablo and Crandall, David and Damen, Dima and Farinella, Giovanni Maria and Ghanem, Bernard and Ithapu, Vamsi Krishna and Jawahar, C. V. and Joo, Hanbyul and Kitani, Kris and Li, Haizhou and Newcombe, Richard and Oliva, Aude and Park, Hyun Soo and Rehg, James M. and Sato, Yoichi and Shi, Jianbo and Shou, Mike Zheng and Torralba, Antonio and Torresani, Lorenzo and Yan, Mingfei and Malik, Jitendra},\n title = {Ego4D: Around the {W}orld in 3,000 {H}ours of {E}gocentric {V}ideo},\n journal = {CoRR},\n volume = {abs/2110.07058},\n year = {2021},\n url = {https://arxiv.org/abs/2110.07058},\n eprinttype = {arXiv},\n eprint = {2110.07058}\n}\n", "homepage": "", "license": "", "features": {"video_id": {"dtype": "string", "id": null, "_type": "Value"}, "video": {"dtype": "string", "id": null, "_type": "Value"}, "pass_1_narrations": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pass_1_narrations_timings": {"feature": {"dtype": "float32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pass_1_narrations_frames": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pass_2_narrations": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pass_2_narrations_timings": {"feature": {"dtype": "float32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pass_2_narrations_frames": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pass_1_summaries": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pass_1_summaries_timings": {"feature": {"feature": {"dtype": "float32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}, "pass_2_summaries": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pass_2_summaries_timings": {"feature": {"feature": {"dtype": "float32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "ego4_d", "config_name": "default", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 260135850, "num_examples": 8584, "dataset_name": "ego4_d"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 260135850, "size_in_bytes": 260135850}}
ego4d.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """A massive-scale, egocentric dataset and benchmark suite collected across 74 worldwide locations and 9 countries, with over 3,670 hours of daily-life activity video."""
18
+
19
+
20
+ import os
21
+ import json
22
+
23
+ import datasets
24
+
25
+ from .classes import CHARADES_CLASSES
26
+
27
+ _CITATION = """
28
+ @article{Ego4D2021,
29
+ author={Grauman, Kristen and Westbury, Andrew and Byrne, Eugene and Chavis, Zachary and Furnari, Antonino and Girdhar, Rohit and Hamburger, Jackson and Jiang, Hao and Liu, Miao and Liu, Xingyu and Martin, Miguel and Nagarajan, Tushar and Radosavovic, Ilija and Ramakrishnan, Santhosh Kumar and Ryan, Fiona and Sharma, Jayant and Wray, Michael and Xu, Mengmeng and Xu, Eric Zhongcong and Zhao, Chen and Bansal, Siddhant and Batra, Dhruv and Cartillier, Vincent and Crane, Sean and Do, Tien and Doulaty, Morrie and Erapalli, Akshay and Feichtenhofer, Christoph and Fragomeni, Adriano and Fu, Qichen and Fuegen, Christian and Gebreselasie, Abrham and Gonzalez, Cristina and Hillis, James and Huang, Xuhua and Huang, Yifei and Jia, Wenqi and Khoo, Weslie and Kolar, Jachym and Kottur, Satwik and Kumar, Anurag and Landini, Federico and Li, Chao and Li, Yanghao and Li, Zhenqiang and Mangalam, Karttikeya and Modhugu, Raghava and Munro, Jonathan and Murrell, Tullie and Nishiyasu, Takumi and Price, Will and Puentes, Paola Ruiz and Ramazanova, Merey and Sari, Leda and Somasundaram, Kiran and Southerland, Audrey and Sugano, Yusuke and Tao, Ruijie and Vo, Minh and Wang, Yuchen and Wu, Xindi and Yagi, Takuma and Zhu, Yunyi and Arbelaez, Pablo and Crandall, David and Damen, Dima and Farinella, Giovanni Maria and Ghanem, Bernard and Ithapu, Vamsi Krishna and Jawahar, C. V. and Joo, Hanbyul and Kitani, Kris and Li, Haizhou and Newcombe, Richard and Oliva, Aude and Park, Hyun Soo and Rehg, James M. and Sato, Yoichi and Shi, Jianbo and Shou, Mike Zheng and Torralba, Antonio and Torresani, Lorenzo and Yan, Mingfei and Malik, Jitendra},
30
+ title = {Ego4D: Around the {W}orld in 3,000 {H}ours of {E}gocentric {V}ideo},
31
+ journal = {CoRR},
32
+ volume = {abs/2110.07058},
33
+ year = {2021},
34
+ url = {https://arxiv.org/abs/2110.07058},
35
+ eprinttype = {arXiv},
36
+ eprint = {2110.07058}
37
+ }
38
+ """
39
+
40
+ _DESCRIPTION = """\
41
+ EGO4D is the world's largest egocentric (first person) video ML dataset and benchmark suite, with 3,600 hrs (and counting) of densely narrated video and a wide range of annotations across five new benchmark tasks. It covers hundreds of scenarios (household, outdoor, workplace, leisure, etc.) of daily life activity captured in-the-wild by 926 unique camera wearers from 74 worldwide locations and 9 different countries. Portions of the video are accompanied by audio, 3D meshes of the environment, eye gaze, stereo, and/or synchronized videos from multiple egocentric cameras at the same event. The approach to data collection was designed to uphold rigorous privacy and ethics standards with consenting participants and robust de-identification procedures where relevant.
42
+ """
43
+
44
+
45
+
46
+ class Ego4D(datasets.GeneratorBasedBuilder):
47
+ """A massive-scale, egocentric dataset and benchmark suite collected across 74 worldwide locations and 9 countries, with over 3,670 hours of daily-life activity video."""
48
+
49
+ BUILDER_CONFIGS = [datasets.BuilderConfig(name="default")]
50
+ DEFAULT_CONFIG_NAME = "default"
51
+
52
+ def _info(self):
53
+ return datasets.DatasetInfo(
54
+ description=_DESCRIPTION,
55
+ features=datasets.Features(
56
+ {
57
+ "video_id": datasets.Value("string"),
58
+ "video": datasets.Value("string"),
59
+ "pass_1_narrations": datasets.features.Sequence(datasets.Value("string")),
60
+ "pass_1_narrations_timings": datasets.Sequence(datasets.Value("float32")),
61
+ "pass_1_narrations_frames": datasets.features.Sequence(datasets.Value("string")),
62
+ "pass_2_narrations": datasets.features.Sequence(datasets.Value("string")),
63
+ "pass_2_narrations_timings": datasets.Sequence(datasets.Value("float32")),
64
+ "pass_2_narrations_frames": datasets.features.Sequence(datasets.Value("string")),
65
+ "pass_1_summaries": datasets.features.Sequence(datasets.Value("string")),
66
+ "pass_1_summaries_timings": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
67
+ "pass_2_summaries": datasets.features.Sequence(datasets.Value("string")),
68
+ "pass_2_summaries_timings": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
69
+ }
70
+ ),
71
+ supervised_keys=None,
72
+ homepage="",
73
+ citation=_CITATION,
74
+ )
75
+
76
+ @property
77
+ def manual_download_instructions(self):
78
+ return (
79
+ "To use Ego4D, you must download the data as specified in the instructions on Ego4D website: "
80
+ "https://ego4d-data.org/docs/start-here/. Please call the dataset loading script with output "
81
+ "directory used to generate data from CLI like `load_dataset('ego4d', data_dir='/path/to/ego4d_data')` "
82
+ )
83
+
84
+ def _split_generators(self, dl_manager):
85
+ data_dir = dl_manager.manual_dir
86
+ videos_dir = os.path.join(data_dir, "v1", "full_scale")
87
+ narrations = os.path.join(data_dir, "v1", "annotations", "narration.json")
88
+ return [
89
+ datasets.SplitGenerator(
90
+ name=datasets.Split.TRAIN,
91
+ gen_kwargs={
92
+ "videos_folder": videos_dir,
93
+ "narrations": narrations,
94
+ },
95
+ ),
96
+ ]
97
+
98
+ def _generate_examples(self, videos_folder, narrations):
99
+ """This function returns the examples."""
100
+ with open(narrations, "r", encoding="utf-8") as narrations_file:
101
+ narrations = json.load(narrations_file)
102
+
103
+ idx = 0
104
+ for video_file in os.listdir(videos_folder):
105
+ video = os.path.join(videos_folder, video_file)
106
+ video_id = os.path.splitext(video_file)[0]
107
+ if video_id not in narrations:
108
+ continue
109
+ current_narrations = narrations[video_id]
110
+ if current_narrations["status"] != "complete":
111
+ continue
112
+
113
+ pass_1 = current_narrations["narration_pass_1"]
114
+ pass_1_narrations = pass_1["narrations"]
115
+ pass_1_summaries = pass_1["summaries"]
116
+
117
+ pass_2 = current_narrations["narration_pass_2"]
118
+ pass_2_narrations = pass_2["narrations"]
119
+ pass_2_summaries = pass_2["summaries"]
120
+ yield idx, {
121
+ "video_id": video_id,
122
+ "video": video,
123
+ "pass_1_narrations": [narration["narration_text"] for narration in pass_1_narrations],
124
+ "pass_1_narrations_timings": [narration["timestamp_sec"] for narration in pass_1_narrations],
125
+ "pass_1_narrations_frames": [narration["timestamp_frame"] for narration in pass_1_narrations],
126
+ "pass_1_summaries": [summary["summary_text"] for summary in pass_1_summaries],
127
+ "pass_1_summaries_timings": [[summary["start_sec"], summary["end_sec"]] for summary in pass_1_summaries],
128
+ "pass_2_narrations": [narration["narration_text"] for narration in pass_2_narrations],
129
+ "pass_2_narrations_timings": [narration["timestamp_sec"] for narration in pass_2_narrations],
130
+ "pass_2_narrations_frames": [narration["timestamp_frame"] for narration in pass_2_narrations],
131
+ "pass_2_summaries": [summary["summary_text"] for summary in pass_2_summaries],
132
+ "pass_2_summaries_timings": [[summary["start_sec"], summary["end_sec"]] for summary in pass_2_summaries],
133
+
134
+ }
135
+
136
+ idx += 1