dlxj commited on
Commit
d5f13b4
·
1 Parent(s): 0534d18

数据集正确转换

Browse files
.gitignore CHANGED
@@ -1,4 +1,6 @@
1
  # log and data files
 
 
2
  *.model
3
  *.pkl
4
  #*.ipynb
 
1
  # log and data files
2
+ common_voice_11_0/
3
+ Common Voice Scripted Speech 25.0 - Japanese/
4
  *.model
5
  *.pkl
6
  #*.ipynb
convert_ja.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ from dataclasses import is_dataclass
4
+ from datasets import load_dataset
5
+ from datasets.utils.logging import set_verbosity_info, enable_progress_bar
6
+ from omegaconf import OmegaConf
7
+
8
+ # 开启 datasets 进度条和日志
9
+ set_verbosity_info()
10
+ enable_progress_bar()
11
+
12
+ # 添加脚本所在路径到 sys.path
13
+ sys.path.append(os.path.join(os.path.dirname(__file__), 'scripts', 'speech_recognition'))
14
+
15
+ from convert_hf_dataset_to_nemo import HFDatasetConversionConfig, prepare_output_dirs, process_dataset
16
+
17
+ def convert_ja():
18
+ # 本地数据集路径
19
+ dataset_path = os.path.join(os.path.dirname(__file__), "data", "common_voice_11_0")
20
+ # 输出路径
21
+ output_dir = os.path.join(os.path.dirname(__file__), "data", "nemo_ja")
22
+
23
+ # 构建配置
24
+ cfg = HFDatasetConversionConfig(
25
+ output_dir=output_dir,
26
+ path=dataset_path,
27
+ name="ja",
28
+ ensure_ascii=False,
29
+ use_auth_token=False,
30
+ num_proc=4, # 使用多进程加快速度
31
+ )
32
+
33
+ # 转换为 OmegaConf
34
+ if is_dataclass(cfg):
35
+ cfg = OmegaConf.structured(cfg)
36
+
37
+ # 准备输出目录
38
+ prepare_output_dirs(cfg)
39
+
40
+ print(f"开始加载数据集 {cfg.path},语言: {cfg.name}...")
41
+ print("此过程会进行数据的准备和解压,请耐心等待(可以通过命令行查看进度条)...")
42
+
43
+ # 加载数据集
44
+ try:
45
+ dataset = load_dataset(
46
+ path=cfg.path,
47
+ name=cfg.name,
48
+ split=cfg.split,
49
+ cache_dir=None,
50
+ streaming=cfg.streaming,
51
+ token=cfg.use_auth_token,
52
+ trust_remote_code=True,
53
+ download_mode="force_redownload",
54
+ )
55
+ print("数据集加载完成!")
56
+ except Exception as e:
57
+ import traceback
58
+ print("Failed to load dataset. Traceback:")
59
+ print(traceback.format_exc())
60
+ sys.exit(1)
61
+
62
+ print("开始进行格式转换 (HF -> NeMo)...")
63
+ # 处理数据集
64
+ if isinstance(dataset, dict):
65
+ print(f"\nMultiple splits found for dataset {cfg.path}: {list(dataset.keys())}")
66
+ keys = list(dataset.keys())
67
+ for key in keys:
68
+ ds_split = dataset[key]
69
+ print(f"Processing split {key} for dataset {cfg.path}")
70
+ cfg.split_output_dir = os.path.join(cfg.resolved_output_dir, key)
71
+ process_dataset(ds_split, cfg)
72
+ del dataset[key], ds_split
73
+
74
+ cfg.split_output_dir = None
75
+ else:
76
+ print(f"Single split found for dataset {cfg.path} | Split chosen = {cfg.split}")
77
+ if cfg.split is not None:
78
+ cfg.split_output_dir = os.path.join(cfg.resolved_output_dir, cfg.split)
79
+
80
+ process_dataset(dataset, cfg)
81
+
82
+ if __name__ == '__main__':
83
+ convert_ja()
readme.txt CHANGED
@@ -25,12 +25,16 @@ https://datacollective.mozillafoundation.org/datasets/cmn2hm68r01n4mm071qux43yu
25
  日语 asr 数据集,原来的 mozilla-foundation/common_voice_11_0 已失效
26
 
27
  https://ai.gitee.com/hf-datasets/mozilla-foundation/common_voice_11_0
 
28
  这里可以下载数据集
29
- git clone https://ai.gitee.com/zoesimin/common_voice_11_0
30
 
31
  https://modelscope.cn/datasets
32
  这里有方言数据集
33
 
 
 
 
 
34
 
35
  tar xzvf "/mnt/y/ai/Common Voice Scripted Speech 25.0 - Japanese.tar.gz" -C /mnt/e/huggingface_echodict/NeMo/data
36
  wsl 解压数据
 
25
  日语 asr 数据集,原来的 mozilla-foundation/common_voice_11_0 已失效
26
 
27
  https://ai.gitee.com/hf-datasets/mozilla-foundation/common_voice_11_0
28
+ https://ai.gitee.com/datasets/blbqxxol/common_voice_11_0 我的 fork
29
  这里可以下载数据集
 
30
 
31
  https://modelscope.cn/datasets
32
  这里有方言数据集
33
 
34
+ conda run -n NeMo pip install datasets hydra-core omegaconf librosa soundfile
35
+
36
+
37
+ git clone https://ai.gitee.com/hf-datasets/common_voice_11_0
38
 
39
  tar xzvf "/mnt/y/ai/Common Voice Scripted Speech 25.0 - Japanese.tar.gz" -C /mnt/e/huggingface_echodict/NeMo/data
40
  wsl 解压数据
requirements.txt CHANGED
@@ -1 +1 @@
1
- datasets
 
1
+ datasets==3.6.0
scripts/speech_recognition/convert_hf_dataset_to_nemo.py CHANGED
@@ -91,7 +91,7 @@ import hydra
91
  import librosa
92
  import soundfile
93
  import tqdm
94
- from datasets import Audio, Dataset, IterableDataset, load_dataset
95
  from hydra.conf import HydraConf, RunDir
96
  from hydra.core.config_store import ConfigStore
97
  from omegaconf import OmegaConf
@@ -139,7 +139,7 @@ def prepare_output_dirs(cfg: HFDatasetConversionConfig):
139
  cfg.split_output_dir = None
140
 
141
 
142
- def infer_dataset_segments(batch):
143
  """
144
  Helper method to run in batch mode over a mapped Dataset.
145
 
@@ -149,9 +149,9 @@ def infer_dataset_segments(batch):
149
  A cleaned list of path segments
150
  """
151
  segments = []
152
- segment, path = os.path.split(batch['audio']['path'])
153
  segments.insert(0, path)
154
- while segment not in ('', os.path.sep):
155
  segment, path = os.path.split(segment)
156
  segments.insert(0, path)
157
 
@@ -204,22 +204,55 @@ def build_map_dataset_to_nemo_func(cfg: HFDatasetConversionConfig, basedir):
204
  """
205
 
206
  def map_dataset_to_nemo(batch):
207
- # Write audio file to correct path
208
- if cfg.streaming:
209
- batch['audio_filepath'] = batch['audio']['path'].split("::")[0].replace("zip://", "")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210
  else:
211
- segments = infer_dataset_segments(batch)
212
- audio_filepath = os.path.join(*segments)
213
- batch['audio_filepath'] = audio_filepath
214
-
215
- batch['audio_filepath'] = os.path.abspath(os.path.join(basedir, batch['audio_filepath']))
216
- audio_filepath = batch['audio_filepath']
217
- audio_filepath = prepare_audio_filepath(audio_filepath)
218
- batch['audio_filepath'] = audio_filepath # update filepath with prepared path
219
-
220
- soundfile.write(audio_filepath, batch['audio']['array'], samplerate=cfg.sampling_rate, format='wav')
221
-
222
- batch['duration'] = librosa.get_duration(y=batch['audio']['array'], sr=batch['audio']['sampling_rate'])
 
 
 
 
 
 
 
223
  return batch
224
 
225
  return map_dataset_to_nemo
@@ -245,9 +278,19 @@ def convert_offline_dataset_to_nemo(
245
  if num_proc < 0:
246
  num_proc = max(1, os.cpu_count() // 2)
247
 
248
- dataset = dataset.map(build_map_dataset_to_nemo_func(cfg, basedir), num_proc=num_proc)
 
 
 
 
 
 
 
 
 
249
  ds_iter = iter(dataset)
250
 
 
251
  with open(manifest_filepath, 'w') as manifest_f:
252
  for idx, sample in enumerate(
253
  tqdm.tqdm(
@@ -322,13 +365,16 @@ def process_dataset(dataset: IterableDataset, cfg: HFDatasetConversionConfig):
322
  if 'sentence' in dataset.features:
323
  dataset = dataset.rename_column("sentence", "text")
324
 
 
 
 
325
  if cfg.split_output_dir is None:
326
  basedir = cfg.resolved_output_dir
327
- manifest_filename = f"{cfg.path.replace('/', '_')}_manifest.json"
328
  else:
329
  basedir = cfg.split_output_dir
330
  split = os.path.split(cfg.split_output_dir)[-1]
331
- manifest_filename = f"{split}_{cfg.path.replace('/', '_')}_manifest.json"
332
 
333
  if not os.path.exists(cfg.split_output_dir):
334
  os.makedirs(cfg.split_output_dir, exist_ok=True)
 
91
  import librosa
92
  import soundfile
93
  import tqdm
94
+ from datasets import Audio, Dataset, IterableDataset, load_dataset, disable_progress_bar, enable_progress_bar
95
  from hydra.conf import HydraConf, RunDir
96
  from hydra.core.config_store import ConfigStore
97
  from omegaconf import OmegaConf
 
139
  cfg.split_output_dir = None
140
 
141
 
142
+ def infer_dataset_segments(audio_path):
143
  """
144
  Helper method to run in batch mode over a mapped Dataset.
145
 
 
149
  A cleaned list of path segments
150
  """
151
  segments = []
152
+ segment, path = os.path.split(audio_path)
153
  segments.insert(0, path)
154
+ while segment and segment != os.path.sep and segment != os.path.dirname(segment):
155
  segment, path = os.path.split(segment)
156
  segments.insert(0, path)
157
 
 
204
  """
205
 
206
  def map_dataset_to_nemo(batch):
207
+ # 检查是否是 batched 模式
208
+ is_batched = isinstance(batch['audio'], list)
209
+
210
+ if is_batched:
211
+ audio_filepaths = []
212
+ durations = []
213
+
214
+ for i in range(len(batch['audio'])):
215
+ audio_path = batch['audio'][i]['path']
216
+ audio_array = batch['audio'][i]['array']
217
+ audio_sr = batch['audio'][i]['sampling_rate']
218
+
219
+ # Write audio file to correct path
220
+ if cfg.streaming:
221
+ filepath = audio_path.split("::")[0].replace("zip://", "")
222
+ else:
223
+ segments = infer_dataset_segments(audio_path)
224
+ filepath = os.path.join(*segments)
225
+
226
+ filepath = os.path.abspath(os.path.join(basedir, filepath))
227
+ filepath = prepare_audio_filepath(filepath)
228
+ audio_filepaths.append(filepath) # update filepath with prepared path
229
+
230
+ soundfile.write(filepath, audio_array, samplerate=cfg.sampling_rate, format='wav')
231
+
232
+ durations.append(librosa.get_duration(y=audio_array, sr=audio_sr))
233
+
234
+ batch['audio_filepath'] = audio_filepaths
235
+ batch['duration'] = durations
236
  else:
237
+ audio_path = batch['audio']['path']
238
+ audio_array = batch['audio']['array']
239
+ audio_sr = batch['audio']['sampling_rate']
240
+
241
+ # Write audio file to correct path
242
+ if cfg.streaming:
243
+ filepath = audio_path.split("::")[0].replace("zip://", "")
244
+ else:
245
+ segments = infer_dataset_segments(audio_path)
246
+ filepath = os.path.join(*segments)
247
+
248
+ filepath = os.path.abspath(os.path.join(basedir, filepath))
249
+ filepath = prepare_audio_filepath(filepath)
250
+ batch['audio_filepath'] = filepath # update filepath with prepared path
251
+
252
+ soundfile.write(filepath, audio_array, samplerate=cfg.sampling_rate, format='wav')
253
+
254
+ batch['duration'] = librosa.get_duration(y=audio_array, sr=audio_sr)
255
+
256
  return batch
257
 
258
  return map_dataset_to_nemo
 
278
  if num_proc < 0:
279
  num_proc = max(1, os.cpu_count() // 2)
280
 
281
+ print(f"[{cfg.split}] 开始提取音频并保存为 wav 格式 (使用 {num_proc} 个进程)...")
282
+ enable_progress_bar()
283
+ dataset = dataset.map(
284
+ build_map_dataset_to_nemo_func(cfg, basedir),
285
+ num_proc=num_proc,
286
+ batched=True, # 强制单条处理
287
+ desc=f"[{cfg.split}] 转换音频文件"
288
+ )
289
+ print(f"[{cfg.split}] 音频转换完成!")
290
+
291
  ds_iter = iter(dataset)
292
 
293
+ print(f"[{cfg.split}] 开始生成 manifest JSON 文件...")
294
  with open(manifest_filepath, 'w') as manifest_f:
295
  for idx, sample in enumerate(
296
  tqdm.tqdm(
 
365
  if 'sentence' in dataset.features:
366
  dataset = dataset.rename_column("sentence", "text")
367
 
368
+ # 获取路径的最后一部分作为数据集名称,避免绝对路径导致文件名非法
369
+ dataset_name = os.path.basename(os.path.normpath(cfg.path))
370
+
371
  if cfg.split_output_dir is None:
372
  basedir = cfg.resolved_output_dir
373
+ manifest_filename = f"{dataset_name.replace('/', '_')}_manifest.json"
374
  else:
375
  basedir = cfg.split_output_dir
376
  split = os.path.split(cfg.split_output_dir)[-1]
377
+ manifest_filename = f"{split}_{dataset_name.replace('/', '_')}_manifest.json"
378
 
379
  if not os.path.exists(cfg.split_output_dir):
380
  os.makedirs(cfg.split_output_dir, exist_ok=True)