RitchieP commited on
Commit
9d5f17f
1 Parent(s): 366dcbd

Upload dataset scripts

Browse files
Files changed (4) hide show
  1. accents.py +4 -0
  2. generate_train_test_split.py +108 -0
  3. release_stats.py +8 -0
  4. verbalex_voice.py +134 -0
accents.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ ACCENTS = {
2
+ "ar": "arabic",
3
+ "zh": "chinese"
4
+ }
generate_train_test_split.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is used to generate the train and test split of files within
2
+ # the file system.
3
+
4
+ import os
5
+ import re
6
+ import shutil
7
+ import tarfile
8
+ from math import floor
9
+ from random import shuffle
10
+
11
+
12
+ def get_file_list_from_dir(data_dir):
13
+ """Get a list of files within the directory"""
14
+ all_files = os.listdir(os.path.abspath(data_dir))
15
+ data_files = list(filter(lambda file: file.endswith('.wav'), all_files))
16
+ return data_files
17
+
18
+
19
+ # Due to the audio and transcript files are in different folders, we need to split
20
+ # them into train and test splits while maintaining their correct mapping in both splits
21
+ # For example, if transcript 0001 is in test split, audio 0001 should also be in test split.
22
+ #
23
+ # Hence, instead of splitting the files itself, I will split the files based on their
24
+ # ID number. Because every transcript and audio file has an ID number.
25
+ # For example, arctic_a0001.txt is the transcript for arctic_a0001.wav.
26
+ #
27
+ # After splitting the ID numbers, we will then assign the files to their respective splits
28
+ # based on their ID numbers.
29
+ def shuffle_file_id(files):
30
+ file_id_regex = re.compile(r'[a-z]\d\d\d\d')
31
+ file_id = []
32
+
33
+ for file in files:
34
+ file_id.append(file_id_regex.search(file).group())
35
+
36
+ shuffle(file_id)
37
+ return file_id
38
+
39
+
40
+ def train_test_splits(file_ids, training_ratio):
41
+ split=training_ratio
42
+ split_index = floor(len(file_ids) * split)
43
+ train = file_ids[:split_index]
44
+ test = file_ids[split_index:]
45
+ return train, test
46
+
47
+
48
+
49
+ if __name__ == "__main__":
50
+ print("Running")
51
+ audio_file_paths = {
52
+ "ABA": "C:\\Users\\user\\OneDrive - Universiti Sains Malaysia\\Assignment\\FYP\\l2arctic_release_v5.0\\ABA\\ABA\\wav",
53
+ "BWC": "C:\\Users\\user\\OneDrive - Universiti Sains Malaysia\\Assignment\\FYP\\l2arctic_release_v5.0\\BWC\\BWC\\wav"
54
+ }
55
+ speaker_accent_map = {
56
+ "ABA": "ar",
57
+ "BWC": "zh"
58
+ }
59
+ splits = ["train", "test"]
60
+
61
+ try:
62
+ for speaker, accent in speaker_accent_map.items():
63
+ audio_files = get_file_list_from_dir(audio_file_paths[speaker])
64
+ shuffled_file_id = shuffle_file_id(audio_files)
65
+ train, test = train_test_splits(shuffled_file_id, 0.7)
66
+
67
+ print(train)
68
+ print("Number of samples in training: ", len(train))
69
+ print(test)
70
+ print("Number of samples in testing: ", len(test))
71
+ tar_file = tarfile.open(
72
+ f"C:\\Users\\user\\OneDrive - Universiti Sains Malaysia\\Assignment\\FYP\\data\\audio\\{accent}\\train\\{accent}_train.tar",
73
+ "w"
74
+ )
75
+ for id in train:
76
+ # Copy training audio into a tar file
77
+ tar_file.add(
78
+ f"C:\\Users\\user\\OneDrive - Universiti Sains Malaysia\\Assignment\\FYP\\l2arctic_release_v5.0\\{speaker}\\{speaker}\\wav\\arctic_{id}.wav",
79
+ arcname=f"arctic_{id}.wav"
80
+ )
81
+
82
+ # Copy training transcript into a tsv file
83
+ with open(f"C:\\Users\\user\\OneDrive - Universiti Sains Malaysia\\Assignment\\FYP\\data\\transcript\\{accent}\\train.tsv", 'a') as tsv_file:
84
+ with open(f"C:\\Users\\user\\OneDrive - Universiti Sains Malaysia\\Assignment\\FYP\\l2arctic_release_v5.0\\{speaker}\\{speaker}\\transcript\\arctic_{id}.txt") as txt_file:
85
+ tsv_file.write(txt_file.read() + "\n")
86
+ tsv_file.close()
87
+ tar_file.close()
88
+
89
+ # Copy testing audio into tar file
90
+ tar_file = tarfile.open(
91
+ f"C:\\Users\\user\\OneDrive - Universiti Sains Malaysia\\Assignment\\FYP\\data\\audio\\{accent}\\test\\{accent}_test.tar",
92
+ "w"
93
+ )
94
+ for id in test:
95
+ tar_file.add(
96
+ f"C:\\Users\\user\\OneDrive - Universiti Sains Malaysia\\Assignment\\FYP\\l2arctic_release_v5.0\\{speaker}\\{speaker}\\wav\\arctic_{id}.wav",
97
+ arcname=f"arctic_{id}.wav"
98
+ )
99
+
100
+ # Copy testing transcript into tsv file
101
+ with open(f"C:\\Users\\user\\OneDrive - Universiti Sains Malaysia\\Assignment\\FYP\\data\\transcript\\{accent}\\test.tsv", 'a') as tsv_file:
102
+ with open(f"C:\\Users\\user\\OneDrive - Universiti Sains Malaysia\\Assignment\\FYP\\l2arctic_release_v5.0\\{speaker}\\{speaker}\\transcript\\arctic_{id}.txt") as txt_file:
103
+ tsv_file.write(txt_file.read() + "\n")
104
+ tsv_file.close()
105
+ tar_file.close()
106
+
107
+ except PermissionError:
108
+ print("Permission denied")
release_stats.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ STATS = {
2
+ "name": "VerbaLex Voice",
3
+ "version": "1.0.0",
4
+ "accents": {
5
+ "ar": {"numOfSpeaker": "1", "numOfWavFiles": "1129"},
6
+ "zh": {"numOfSpeaker": "1", "numOfWavFiles": "1130"}
7
+ }
8
+ }
verbalex_voice.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import os
3
+
4
+ import datasets
5
+ from tqdm import tqdm
6
+
7
+ from VerbaLex_Voice.accents import ACCENTS
8
+ from VerbaLex_Voice.release_stats import STATS
9
+
10
+ _HOMEPAGE = "https://huggingface.co/datasets/RitchieP/VerbaLex_voice"
11
+
12
+ _LICENSE = "https://choosealicense.com/licenses/apache-2.0/"
13
+
14
+ _BASE_URL = "https://huggingface.co/datasets/RitchieP/VerbaLex_voice/tree/main"
15
+
16
+ _AUDIO_URL = _BASE_URL + "audio/{accent}/{split}/{accent}_{split}.tar"
17
+
18
+ _TRANSCRIPT_URL = _BASE_URL + "transcript/{accent}/{split}.tsv"
19
+
20
+ _CITATION = """\
21
+ """
22
+
23
+
24
+ class VerbaLexVoiceConfig(datasets.BuilderConfig):
25
+ def __init__(self, name, version, **kwargs):
26
+ self.accent = kwargs.pop("accent", None)
27
+ self.num_speakers = kwargs.pop("num_speakers", None)
28
+ self.num_files = kwargs.pop("num_clips", None)
29
+ description = (
30
+ f"VerbaLex Voice english speech-to-text dataset in {self.accent} accent."
31
+ )
32
+
33
+ super(VerbaLexVoiceConfig, self).__init__(
34
+ name=name,
35
+ version=datasets.Version(version),
36
+ description=description,
37
+ **kwargs,
38
+ )
39
+
40
+
41
+ class VerbaLexVoiceDataset(datasets.GeneratorBasedBuilder):
42
+ """
43
+ VerbaLex is a dataset containing different English accents from non-native English speakers.
44
+ This dataset is created directly from the L2-Arctic dataset.
45
+ """
46
+ BUILDER_CONFIGS = [
47
+ VerbaLexVoiceConfig(
48
+ name=accent,
49
+ version=STATS["version"],
50
+ accent=ACCENTS[accent],
51
+ num_speakers=accent_stats["numOfSpeaker"],
52
+ num_files=accent_stats["numOfWavFiles"]
53
+ )
54
+ for accent, accent_stats in STATS["accents"].items()
55
+ ]
56
+
57
+ DEFAULT_CONFIG_NAME = "all"
58
+
59
+ def _info(self):
60
+ return datasets.DatasetInfo(
61
+ description=(
62
+ "VerbaLex Voice is a speech dataset focusing on accented English speech."
63
+ "It specifically targets speeches from speakers that is a non-native English speaker."
64
+ ),
65
+ features=datasets.Features(
66
+ {
67
+ "path": datasets.Value("string"),
68
+ "accent": datasets.Value("string"),
69
+ "sentence": datasets.Value("string"),
70
+ "audio": datasets.Audio(sampling_rate=44_100)
71
+ }
72
+ ),
73
+ supervised_keys=None,
74
+ homepage=_HOMEPAGE,
75
+ license=_LICENSE,
76
+ citation=_CITATION
77
+ )
78
+
79
+ def _split_generators(self, dl_manager):
80
+ """Returns SplitGenerators"""
81
+ accent = self.config.name
82
+
83
+ splits = ("train", "test")
84
+ audio_urls = {}
85
+ for split in splits:
86
+ audio_urls[split] = _AUDIO_URL.format(accent=accent, split=split)
87
+ archive_paths = dl_manager.download(audio_urls)
88
+ local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
89
+
90
+ meta_urls = {split: _TRANSCRIPT_URL.format(accent=accent, split=split) for split in splits}
91
+ meta_paths = dl_manager.download_and_extract(meta_urls)
92
+
93
+ split_names = {
94
+ "train": datasets.Split.TRAIN,
95
+ "test": datasets.Split.TEST
96
+ }
97
+ split_generators = []
98
+ for split in splits:
99
+ split_generators.append(
100
+ datasets.SplitGenerator(
101
+ name=split_names.get(split, split),
102
+ gen_kwargs={
103
+ "local_extracted_archive_paths": local_extracted_archive_paths.get(split),
104
+ "archives": [dl_manager.iter_archive(path) for path in archive_paths.get(split)],
105
+ "meta_path": meta_paths[split]
106
+ }
107
+ )
108
+ )
109
+
110
+ return split_generators
111
+
112
+ def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
113
+ data_fields = list(self._info().features.keys())
114
+ metadata = {}
115
+ with open(meta_path, encoding="UTF-8") as f:
116
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
117
+ for row in tqdm(reader, desc="Reading metadata..."):
118
+ if not row["path"].endswith(".wav"):
119
+ row["path"] += ".wav"
120
+ for field in data_fields:
121
+ if field not in row:
122
+ row[field] = ""
123
+ metadata[row["path"]] = row
124
+
125
+ for i, audio_archive in enumerate(archives):
126
+ for path, file in audio_archive:
127
+ _, filename = os.path.split(path)
128
+ if filename in metadata:
129
+ result = dict(metadata[filename])
130
+ path = os.path.join(local_extracted_archive_paths[i],
131
+ path) if local_extracted_archive_paths else path
132
+ result["audio"] = {"path": path, "bytes": file.read()}
133
+ result["path"] = path
134
+ yield path, result