jherng commited on
Commit
d8b66c6
1 Parent(s): 4509a2f

Debug dataset loading script

Browse files
Files changed (1) hide show
  1. xd-violence.py +16 -22
xd-violence.py CHANGED
@@ -76,7 +76,7 @@ class XDViolence(datasets.GeneratorBasedBuilder):
76
  if self.config.name == "rgb":
77
  features = datasets.Features(
78
  {
79
- "video_id": datasets.Value("string"),
80
  "rgb_feats": datasets.Array3D(
81
  shape=(None, 10, 2048),
82
  dtype="float32", # (num_frames, num_crops, feature_dim) use 10 crops by default as of now
@@ -108,8 +108,8 @@ class XDViolence(datasets.GeneratorBasedBuilder):
108
  else: # default = "video"
109
  features = datasets.Features(
110
  {
111
- "video_id": datasets.Value("string"),
112
- "video_path": datasets.Value("string"),
113
  "binary_target": datasets.ClassLabel(
114
  names=["Non-violence", "Violence"]
115
  ),
@@ -164,8 +164,8 @@ class XDViolence(datasets.GeneratorBasedBuilder):
164
  header=None,
165
  sep=" ",
166
  usecols=[0],
167
- names=["video_id"],
168
- )["video_id"]
169
  .apply(
170
  lambda x: urllib.parse.quote(
171
  urllib.parse.urljoin(_URL, f"video/{x}.mp4"), safe=":/"
@@ -177,8 +177,8 @@ class XDViolence(datasets.GeneratorBasedBuilder):
177
  header=None,
178
  sep=" ",
179
  usecols=[0],
180
- names=["video_id"],
181
- )["video_id"]
182
  .apply(
183
  lambda x: urllib.parse.quote(
184
  urllib.parse.urljoin(_URL, f"video/test_videos/{x}.mp4"),
@@ -220,19 +220,15 @@ class XDViolence(datasets.GeneratorBasedBuilder):
220
  def _generate_examples(self, annotation_path, video_paths, annotation_reader):
221
  ann_data = annotation_reader(annotation_path)
222
 
223
- for key, (video_path, annotation) in enumerate(zip(video_paths, ann_data)):
224
- video_id = annotation["video_id"]
225
  frame_annotations = annotation.get("frame_annotations", [])
226
 
227
- try:
228
- binary, multilabel = self.extract_labels(video_id)
229
- except Exception as e:
230
- print(f"Error processing video {video_id}.")
231
- raise e
232
 
233
  yield key, {
234
- "video_id": video_id,
235
- "video_path": video_path,
236
  "binary_target": binary,
237
  "multilabel_targets": multilabel,
238
  "frame_annotations": frame_annotations,
@@ -242,10 +238,8 @@ class XDViolence(datasets.GeneratorBasedBuilder):
242
  def _read_train_list(path):
243
  """Reads the train_list.txt file and returns a list of video ids."""
244
 
245
- train_list = pd.read_csv(
246
- path, header=None, sep=" ", usecols=[0], names=["video_id"]
247
- )
248
- train_list["video_id"] = train_list["video_id"].apply(lambda x: x.split("/")[1])
249
  return train_list.to_dict("records")
250
 
251
  @staticmethod
@@ -265,7 +259,7 @@ class XDViolence(datasets.GeneratorBasedBuilder):
265
 
266
  annotations.append(
267
  {
268
- "video_id": parts[0],
269
  "frame_annotations": [
270
  {"start": parts[start_idx], "end": parts[start_idx + 1]}
271
  for start_idx in range(1, len(parts), 2)
@@ -279,7 +273,7 @@ class XDViolence(datasets.GeneratorBasedBuilder):
279
 
280
  annotations.append(
281
  {
282
- "video_id": parts[0],
283
  "frame_annotations": [
284
  {"start": parts[start_idx], "end": parts[start_idx + 1]}
285
  for start_idx in range(1, len(parts), 2)
 
76
  if self.config.name == "rgb":
77
  features = datasets.Features(
78
  {
79
+ "id": datasets.Value("string"),
80
  "rgb_feats": datasets.Array3D(
81
  shape=(None, 10, 2048),
82
  dtype="float32", # (num_frames, num_crops, feature_dim) use 10 crops by default as of now
 
108
  else: # default = "video"
109
  features = datasets.Features(
110
  {
111
+ "id": datasets.Value("string"),
112
+ "path": datasets.Value("string"),
113
  "binary_target": datasets.ClassLabel(
114
  names=["Non-violence", "Violence"]
115
  ),
 
164
  header=None,
165
  sep=" ",
166
  usecols=[0],
167
+ names=["id"],
168
+ )["id"]
169
  .apply(
170
  lambda x: urllib.parse.quote(
171
  urllib.parse.urljoin(_URL, f"video/{x}.mp4"), safe=":/"
 
177
  header=None,
178
  sep=" ",
179
  usecols=[0],
180
+ names=["id"],
181
+ )["id"]
182
  .apply(
183
  lambda x: urllib.parse.quote(
184
  urllib.parse.urljoin(_URL, f"video/test_videos/{x}.mp4"),
 
220
  def _generate_examples(self, annotation_path, video_paths, annotation_reader):
221
  ann_data = annotation_reader(annotation_path)
222
 
223
+ for key, (path, annotation) in enumerate(zip(video_paths, ann_data)):
224
+ id = annotation["id"]
225
  frame_annotations = annotation.get("frame_annotations", [])
226
 
227
+ binary, multilabel = self.extract_labels(id)
 
 
 
 
228
 
229
  yield key, {
230
+ "id": id,
231
+ "path": path,
232
  "binary_target": binary,
233
  "multilabel_targets": multilabel,
234
  "frame_annotations": frame_annotations,
 
238
  def _read_train_list(path):
239
  """Reads the train_list.txt file and returns a list of video ids."""
240
 
241
+ train_list = pd.read_csv(path, header=None, sep=" ", usecols=[0], names=["id"])
242
+ train_list["id"] = train_list["id"].apply(lambda x: x.split("/")[1])
 
 
243
  return train_list.to_dict("records")
244
 
245
  @staticmethod
 
259
 
260
  annotations.append(
261
  {
262
+ "id": parts[0],
263
  "frame_annotations": [
264
  {"start": parts[start_idx], "end": parts[start_idx + 1]}
265
  for start_idx in range(1, len(parts), 2)
 
273
 
274
  annotations.append(
275
  {
276
+ "id": parts[0],
277
  "frame_annotations": [
278
  {"start": parts[start_idx], "end": parts[start_idx + 1]}
279
  for start_idx in range(1, len(parts), 2)