yangwang825 commited on
Commit
48836dd
1 Parent(s): 4c1ab6a

Update arca23k.py

Browse files
Files changed (1) hide show
  1. arca23k.py +26 -30
arca23k.py CHANGED
@@ -4,14 +4,11 @@
4
 
5
 
6
  import os
7
- import json
8
  import gzip
9
  import shutil
10
  import pathlib
11
  import logging
12
- import textwrap
13
  import datasets
14
- import itertools
15
  import typing as tp
16
  import pandas as pd
17
  import urllib.request
@@ -19,9 +16,8 @@ from pathlib import Path
19
  from copy import deepcopy
20
  from tqdm.auto import tqdm
21
  from rich.logging import RichHandler
22
- from huggingface_hub import hf_hub_download
23
 
24
- from ._arca23k import CLASSES, COARSE_TO_FINE
25
 
26
  logger = logging.getLogger(__name__)
27
  logger.addHandler(RichHandler())
@@ -81,9 +77,9 @@ class ARCA23K(datasets.GeneratorBasedBuilder):
81
  """Returns SplitGenerators."""
82
  for zip_type in ['zip', 'z01', 'z02', 'z03', 'z04']:
83
  _filename = f'ARCA23K.audio.{zip_type}'
84
- _zip_file_url = f'https://zenodo.org/records/5117901/files/ARCA23K.audio.{zip_type}'
85
  _save_path = os.path.join(
86
- HF_DATASETS_CACHE, 'confit___arca23k/v1.0', VERSION, _filename
87
  )
88
  download_file(_zip_file_url, os.path.join(_save_path, _filename))
89
  logger.info(f"`{_filename}` is downloaded to {_save_path}")
@@ -94,44 +90,44 @@ class ARCA23K(datasets.GeneratorBasedBuilder):
94
  _output_file = os.path.join(HF_DATASETS_CACHE, 'confit___arca23k/v1.0', VERSION, concat_zip_filename)
95
 
96
  if not os.path.exists(_output_file):
97
- logger.info(f"Reassemble {_output_file} file")
98
- os.system(f"zip -q -F {_input_file} --out {_output_file}")
 
99
  archive_path = dl_manager.extract(_output_file)
100
- logger.info(f"`{concat_zip_filename}` is downloaded to {archive_path}")
101
-
102
- metadata_path = dl_manager.download_and_extract("https://zenodo.org/records/5117901/files/ARCA23K-FSD.ground_truth.zip")
103
- train_df = pd.read_csv(os.path.join(metadata_path, 'train.csv'))
104
- validation_df = pd.read_csv(os.path.join(metadata_path, 'val.csv'))
105
- test_df = pd.read_csv(os.path.join(metadata_path, 'test.csv'))
106
 
 
 
 
107
  return [
108
  datasets.SplitGenerator(
109
- name=datasets.Split.TRAIN, gen_kwargs={"archive_path": archive_path, "split": "train", "metadata": train_df}
110
- ),
111
- datasets.SplitGenerator(
112
- name=datasets.Split.VALIDATION, gen_kwargs={"archive_path": archive_path, "split": "validation", "metadata": validation_df}
113
- ),
114
- datasets.SplitGenerator(
115
- name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path, "split": "test", "metadata": test_df}
116
  ),
 
 
 
 
 
 
117
  ]
118
-
119
- def _generate_examples(self, archive_path, split=None, metadata=None):
120
  extensions = ['.wav']
121
  _, _walker = fast_scandir(archive_path, extensions, recursive=True)
122
 
123
  fileid2class = {}
124
- for idx, row in metadata.iterrows():
125
- fileid2index[row['fname']] = row['label'] # this filename doesn't have suffix
126
-
127
  for guid, audio_path in enumerate(_walker):
128
- fileid = Path(audio_path).stem
 
129
  yield guid, {
130
  "id": str(guid),
131
  "file": audio_path,
132
  "audio": audio_path,
133
- "sound": fileid2index.get(fileid),
134
- "label": fileid2index.get(fileid),
135
  }
136
 
137
 
 
4
 
5
 
6
  import os
 
7
  import gzip
8
  import shutil
9
  import pathlib
10
  import logging
 
11
  import datasets
 
12
  import typing as tp
13
  import pandas as pd
14
  import urllib.request
 
16
  from copy import deepcopy
17
  from tqdm.auto import tqdm
18
  from rich.logging import RichHandler
 
19
 
20
+ from ._arca23k import CLASSES, FINE_TO_COARSE
21
 
22
  logger = logging.getLogger(__name__)
23
  logger.addHandler(RichHandler())
 
77
  """Returns SplitGenerators."""
78
  for zip_type in ['zip', 'z01', 'z02', 'z03', 'z04']:
79
  _filename = f'ARCA23K.audio.{zip_type}'
80
+ _zip_file_url = f'https://zenodo.org/records/5117901/files/ARCA23K.audio.{zip_type}?download=1'
81
  _save_path = os.path.join(
82
+ HF_DATASETS_CACHE, 'confit___arca23k/v1.0', VERSION
83
  )
84
  download_file(_zip_file_url, os.path.join(_save_path, _filename))
85
  logger.info(f"`{_filename}` is downloaded to {_save_path}")
 
90
  _output_file = os.path.join(HF_DATASETS_CACHE, 'confit___arca23k/v1.0', VERSION, concat_zip_filename)
91
 
92
  if not os.path.exists(_output_file):
93
+ logger.info(f"Reassembling {_output_file}")
94
+ os.system(f"zip -F {_input_file} --out {_output_file}")
95
+
96
  archive_path = dl_manager.extract(_output_file)
97
+ logger.info(f"`{concat_zip_filename}` is extracted to {archive_path}")
 
 
 
 
 
98
 
99
+ metadata_path = dl_manager.download_and_extract("https://zenodo.org/records/5117901/files/ARCA23K.ground_truth.zip")
100
+ metadata_df = pd.read_csv(os.path.join(metadata_path, 'ARCA23K.ground_truth', 'train.csv'))
101
+
102
  return [
103
  datasets.SplitGenerator(
104
+ name=datasets.Split.TRAIN, gen_kwargs={"archive_path": archive_path, "split": "train", "metadata_df": metadata_df}
 
 
 
 
 
 
105
  ),
106
+ # datasets.SplitGenerator(
107
+ # name=datasets.Split.VALIDATION, gen_kwargs={"archive_path": archive_path, "split": "validation", "metadata_path": metadata_path}
108
+ # ),
109
+ # datasets.SplitGenerator(
110
+ # name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path, "split": "test", "metadata_path": metadata_path}
111
+ # ),
112
  ]
113
+
114
+ def _generate_examples(self, archive_path, split=None, metadata_df=None):
115
  extensions = ['.wav']
116
  _, _walker = fast_scandir(archive_path, extensions, recursive=True)
117
 
118
  fileid2class = {}
119
+ for idx, row in metadata_df.iterrows():
120
+ fileid2class[f"{row['fname']}.wav"] = row['label'] # this filename doesn't have suffix
121
+
122
  for guid, audio_path in enumerate(_walker):
123
+ fileid = Path(audio_path).name
124
+ sound = fileid2class.get(fileid)
125
  yield guid, {
126
  "id": str(guid),
127
  "file": audio_path,
128
  "audio": audio_path,
129
+ "sound": sound,
130
+ "label": sound,
131
  }
132
 
133