VictorSanh
commited on
Commit
•
5c14b2a
1
Parent(s):
c954cad
include uda subset
Browse files- epic_kitchens_100.py +49 -24
epic_kitchens_100.py
CHANGED
@@ -63,6 +63,7 @@ _URL_BASE = "https://raw.githubusercontent.com/epic-kitchens/epic-kitchens-100-a
|
|
63 |
_VARIANTS = [
|
64 |
"action_recognition", # This split is used by four challenges: Action Recognition, Weakly supervised action recognition, Action detection, Action anticipation
|
65 |
"multi_instance_retrieval",
|
|
|
66 |
]
|
67 |
class EpicKitchens100(datasets.GeneratorBasedBuilder):
|
68 |
"""Epic Kitchens"""
|
@@ -110,36 +111,59 @@ class EpicKitchens100(datasets.GeneratorBasedBuilder):
|
|
110 |
"multi_instance_retrieval": {
|
111 |
"train": os.path.join(_URL_BASE, "retrieval_annotations/EPIC_100_retrieval_train.csv"),
|
112 |
"test": os.path.join(_URL_BASE, "retrieval_annotations/EPIC_100_retrieval_test.csv")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
}
|
114 |
}
|
|
|
115 |
files_path = dl_manager.download_and_extract(urls)
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
|
|
|
|
|
|
|
|
134 |
datasets.SplitGenerator(
|
135 |
-
name=datasets.Split.
|
136 |
gen_kwargs={
|
137 |
-
"annotations": files_path[self.config.name]["
|
138 |
-
"split": "
|
139 |
},
|
140 |
),
|
141 |
-
|
142 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
143 |
|
144 |
def _generate_examples(self, annotations, split):
|
145 |
"""This function returns the examples."""
|
@@ -148,7 +172,8 @@ class EpicKitchens100(datasets.GeneratorBasedBuilder):
|
|
148 |
next(csv_reader) # Skip header
|
149 |
for idx, row in enumerate(csv_reader):
|
150 |
narration_id, participant_id, video_id, narration_timestamp, start_timestamp, stop_timestamp = row[:6]
|
151 |
-
if split
|
|
|
152 |
# The reason why it's jumping from 5 to 8 is that we are skipping `start_frame` and `stop_frame`
|
153 |
# since we are not exposing the frames, but just the videos
|
154 |
narration, verb, verb_class, noun, noun_class, all_nouns, all_noun_classes = row[8:15]
|
|
|
63 |
_VARIANTS = [
|
64 |
"action_recognition", # This split is used by four challenges: Action Recognition, Weakly supervised action recognition, Action detection, Action anticipation
|
65 |
"multi_instance_retrieval",
|
66 |
+
"unsupervised_domain_adaptation",
|
67 |
]
|
68 |
class EpicKitchens100(datasets.GeneratorBasedBuilder):
|
69 |
"""Epic Kitchens"""
|
|
|
111 |
"multi_instance_retrieval": {
|
112 |
"train": os.path.join(_URL_BASE, "retrieval_annotations/EPIC_100_retrieval_train.csv"),
|
113 |
"test": os.path.join(_URL_BASE, "retrieval_annotations/EPIC_100_retrieval_test.csv")
|
114 |
+
},
|
115 |
+
"unsupervised_domain_adaptation": {
|
116 |
+
"source_train": os.path.join(_URL_BASE, "UDA_annotations/EPIC_100_uda_source_train.csv"),
|
117 |
+
"target_train": os.path.join(_URL_BASE, "UDA_annotations/EPIC_100_uda_target_train_timestamps.csv"),
|
118 |
+
"source_test": os.path.join(_URL_BASE, "UDA_annotations/EPIC_100_uda_source_test_timestamps.csv"),
|
119 |
+
"target_test": os.path.join(_URL_BASE, "UDA_annotations/EPIC_100_uda_target_test_timestamps.csv"),
|
120 |
+
"source_val": os.path.join(_URL_BASE, "UDA_annotations/EPIC_100_uda_source_val.csv"),
|
121 |
+
"target_val": os.path.join(_URL_BASE, "UDA_annotations/EPIC_100_uda_target_val.csv"),
|
122 |
}
|
123 |
}
|
124 |
+
# Download data for all splits once for all since they are tiny csv files
|
125 |
files_path = dl_manager.download_and_extract(urls)
|
126 |
+
|
127 |
+
if self.config.name == "unsupervised_domain_adaptation":
|
128 |
+
splits = [
|
129 |
+
datasets.SplitGenerator(
|
130 |
+
name=datasets.Split(n_),
|
131 |
+
gen_kwargs={
|
132 |
+
"annotations": files_path[self.config.name][n_],
|
133 |
+
"split": n_,
|
134 |
+
},
|
135 |
+
)
|
136 |
+
for n_ in ["source_train", "target_train", "source_test", "target_test", "source_val", "target_val"]
|
137 |
+
]
|
138 |
+
return splits
|
139 |
+
else:
|
140 |
+
splits = [
|
141 |
+
datasets.SplitGenerator(
|
142 |
+
name=datasets.Split.TRAIN,
|
143 |
+
gen_kwargs={
|
144 |
+
"annotations": files_path[self.config.name]["train"],
|
145 |
+
"split": "train",
|
146 |
+
},
|
147 |
+
),
|
148 |
datasets.SplitGenerator(
|
149 |
+
name=datasets.Split.TEST,
|
150 |
gen_kwargs={
|
151 |
+
"annotations": files_path[self.config.name]["test"],
|
152 |
+
"split": "test",
|
153 |
},
|
154 |
),
|
155 |
+
]
|
156 |
+
if self.config.name == "action_recognition":
|
157 |
+
splits.append(
|
158 |
+
datasets.SplitGenerator(
|
159 |
+
name=datasets.Split.VALIDATION,
|
160 |
+
gen_kwargs={
|
161 |
+
"annotations": files_path[self.config.name]["validation"],
|
162 |
+
"split": "validation",
|
163 |
+
},
|
164 |
+
),
|
165 |
+
)
|
166 |
+
return splits
|
167 |
|
168 |
def _generate_examples(self, annotations, split):
|
169 |
"""This function returns the examples."""
|
|
|
172 |
next(csv_reader) # Skip header
|
173 |
for idx, row in enumerate(csv_reader):
|
174 |
narration_id, participant_id, video_id, narration_timestamp, start_timestamp, stop_timestamp = row[:6]
|
175 |
+
if (self.config.name in ["action_recognition", "multi_instance_retrieval"] and split in ["train", "validation"]) or \
|
176 |
+
(self.config.name == "unsupervised_domain_adaptation" and split in ["source_train", "source_val", "target_val"]):
|
177 |
# The reason why it's jumping from 5 to 8 is that we are skipping `start_frame` and `stop_frame`
|
178 |
# since we are not exposing the frames, but just the videos
|
179 |
narration, verb, verb_class, noun, noun_class, all_nouns, all_noun_classes = row[8:15]
|