Sin2pi commited on
Commit
c11b94a
1 Parent(s): 769d963

Upload download_datasets_in_wav_or_mp3_and_create_csv.ipynb

Browse files
download_datasets_in_wav_or_mp3_and_create_csv.ipynb CHANGED
@@ -7,39 +7,43 @@
7
  "outputs": [],
8
  "source": [
9
  "from datasets import load_dataset\n",
10
- "import soundfile as sf, os, pandas as pd, re\n",
11
  "from tqdm import tqdm\n",
12
  "\n",
13
- "dataset = load_dataset(\"mozilla-foundation/common_voice_17_0\", \"ja\", split=\"other\", trust_remote_code=True, streaming=True, token=\"\")\n",
 
14
  "\n",
15
- "name = \"other\"\n",
16
- "ouput_dir = \"./datasets/CV17/\"\n",
17
- "output_file = 'metadata.csv'\n",
 
 
18
  "os.makedirs(ouput_dir + name, exist_ok=True)\n",
19
  "folder_path = ouput_dir + name # Create a folder to store the audio and transcription files\n",
20
  "\n",
21
- "char = '[ 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890]'\n",
22
- "special_characters = '[♬「」?!“%‘”~♪…?!゛#$%&()*+:;〈=〉@^_{|}~\"█♩♫』『.;:<>_()*&^$#@`, ]'\n",
23
- "\n",
24
- "dsa = (dataset\n",
25
- " .filter(lambda sample: bool(sample[\"sentence\"])) # Returns only samples with transcriptions\n",
26
- " .filter(lambda sample: not re.search(char, sample[\"sentence\"])) # Returns only samples without latin/roman alpha/numerics\n",
27
- " .filter(lambda sample: sample[\"down_votes\"] == 0) # Returns only samples with 0 downvotes (commonvoice)\n",
28
- " )\n",
29
  "\n",
30
- "for i, sample in tqdm(enumerate(dsa)): # Process each sample in the filtered dataset\n",
31
- " audio_sample = name + f'_{i}.wav' # or wav\n",
32
  " audio_path = os.path.join(folder_path, audio_sample)\n",
33
- " transcription_path = os.path.join(folder_path, output_file) # Path to save transcription file \n",
34
  " if not os.path.exists(audio_path):\n",
35
- " sf.write(audio_path, sample['audio']['array'], sample['audio']['sampling_rate'])\n",
 
 
 
 
36
  " sample[\"audio_length\"] = len(sample[\"audio\"][\"array\"]) / sample[\"audio\"][\"sampling_rate\"] # Get audio length, remove if not needed\n",
37
- " with open(transcription_path, 'a', encoding='utf-8') as transcription_file: # Save transcription file \n",
38
- " transcription_file.write(audio_sample+\",\") # Save transcription file name \n",
39
- " sample[\"sentence\"] = re.sub(special_characters,'', sample[\"sentence\"])\n",
40
- " transcription_file.write(sample['sentence']) # Save transcription \n",
41
- " transcription_file.write(str(\",\"+str(sample['audio_length']))) # Save audio length, remove if not needed\n",
42
- " transcription_file.write('\\n') "
 
 
 
43
  ]
44
  }
45
  ],
 
7
  "outputs": [],
8
  "source": [
9
  "from datasets import load_dataset\n",
10
+ "import soundfile as sf, os, re, neologdn\n",
11
  "from tqdm import tqdm\n",
12
  "\n",
13
+ "max = 20.0\n",
14
+ "min = 1.0\n",
15
  "\n",
16
+ "dataset = load_dataset(\"Sin2pi/JA_audio_JA_text_180k_samples\", split=\"train\", trust_remote_code=True, streaming=True)\n",
17
+ "\n",
18
+ "name = \"gvs\"\n",
19
+ "ouput_dir = \"./datasets/\"\n",
20
+ "output_file = 'metadata.csv' # create metadata file with file names and transcripts\n",
21
  "os.makedirs(ouput_dir + name, exist_ok=True)\n",
22
  "folder_path = ouput_dir + name # Create a folder to store the audio and transcription files\n",
23
  "\n",
24
+ "char = '[ 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890♬♪♩♫]'\n",
25
+ "special_characters = '[“%‘”~゛#$%&()*+:;〈=〉@^_{|}~\"█』『.;:<>_()*&^$#@`, ]' #「」\n",
 
 
 
 
 
 
26
  "\n",
27
+ "for i, sample in tqdm(enumerate(dataset)): # Process each sample in the filtered dataset\n",
28
+ " audio_sample = name + f'_{i}.mp3' # or wav\n",
29
  " audio_path = os.path.join(folder_path, audio_sample)\n",
30
+ " transcription_path = os.path.join(folder_path, out_file) # Path to save transcription file \n",
31
  " if not os.path.exists(audio_path):\n",
32
+ " patterns = [(r\"…\",''), (r\"!!\",'!'), (special_characters,\"\"), (r\"\\s+\", \"\")] # (r\"(.)\\1{2}\")\n",
33
+ " for pattern, replace in patterns:\n",
34
+ " sample[\"sentence\"] = re.sub(pattern, replace, sample[\"sentence\"])\n",
35
+ " sample[\"sentence\"] = (neologdn.normalize(sample[\"sentence\"], repeat=1)) # for Japanese only, repeat number reduces repeat characters\n",
36
+ " sample[\"sentence_length\"] = len(sample[\"sentence\"]) # Get sentence lengths \n",
37
  " sample[\"audio_length\"] = len(sample[\"audio\"][\"array\"]) / sample[\"audio\"][\"sampling_rate\"] # Get audio length, remove if not needed\n",
38
+ " if bool(sample[\"sentence\"]) and max > sample[\"audio_length\"] > min and not re.search(char, sample[\"sentence\"]) and sample[\"sentence_length\"] > min_char:\n",
39
+ " sf.write(audio_path, sample['audio']['array'], sample['audio']['sampling_rate']) # Get files \n",
40
+ " # process_directory(folder_path, (folder_path + \"/trimmed/\")) # for use with audio sample silence removal script\n",
41
+ " if os.path.isfile(audio_path):\n",
42
+ " os.remove(audio_path)\n",
43
+ " with open(transcription_path, 'a', encoding='utf-8') as transcription_file:\n",
44
+ " transcription_file.write(audio_sample+\",\") # Save transcription file name \n",
45
+ " transcription_file.write(sample['sentence']) # Save transcription \n",
46
+ " transcription_file.write('\\n')"
47
  ]
48
  }
49
  ],