Lemswasabi commited on
Commit
903d3e2
1 Parent(s): 7095a78

Create luxembourgish-asr-rtl-lu

Browse files
Files changed (1) hide show
  1. luxembourgish-asr-rtl-lu +100 -0
luxembourgish-asr-rtl-lu ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ #
3
+ # Created by lemswasabi on 17/05/2022.
4
+ # Copyright © 2022 letzspeak. All rights reserved.
5
+ #
6
+ """Luxembourgish ASR RTL.lu Dataset"""
7
+
8
+
9
+ import os
10
+
11
+ import datasets
12
+
13
+ from datasets.tasks import AutomaticSpeechRecognition
14
+
15
+
16
+ _DESCRIPTION = """\
17
+ luxembourgish-asr-rtl-lu dataset is a speech corpus for the under-resourced Luxembourgish language.
18
+ """
19
+
20
+ _URLS = {
21
+ "rtl-benchmark": "https://drive.google.com/uc?id=1IiFV6TZHH1sOBL409VnmxCXSSyQkue0F&export=download&confirm=t",
22
+ }
23
+
24
+ class Tuudle(datasets.GeneratorBasedBuilder):
25
+
26
+ VERSION = datasets.Version("1.1.0")
27
+
28
+ BUILDER_CONFIGS = [
29
+ datasets.BuilderConfig(name="rtl-benchmark", version=VERSION, description="This part contains benchmark of samples collected from the RTL.lu domain"),
30
+ ]
31
+
32
+ DEFAULT_CONFIG_NAME = "tuudle"
33
+
34
+ def _info(self):
35
+
36
+ features = datasets.Features(
37
+ {
38
+ "audio": datasets.Audio(sampling_rate=16_000),
39
+ "sentence": datasets.Value("string"),
40
+ }
41
+ )
42
+
43
+ return datasets.DatasetInfo(
44
+ description=_DESCRIPTION,
45
+ features=features,
46
+ supervised_keys=("audio", "sentence"),
47
+ task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="sentence")],
48
+ )
49
+
50
+ def _split_generators(self, dl_manager):
51
+
52
+ urls = _URLS[self.config.name]
53
+ archive_path = dl_manager.download_and_extract(urls)
54
+ metadata_filepaths = {
55
+ split: os.path.join(archive_path, os.path.join(split, f"{split}.tsv"))
56
+ for split in ["train", "test", "dev"]
57
+ }
58
+
59
+ return [
60
+ datasets.SplitGenerator(
61
+ name=datasets.Split.TRAIN,
62
+ gen_kwargs={
63
+ "local_extracted_archive": archive_path,
64
+ "metadata_filepath": metadata_filepaths["train"],
65
+ "split": "train",
66
+ },
67
+ ),
68
+ datasets.SplitGenerator(
69
+ name=datasets.Split.TEST,
70
+ gen_kwargs={
71
+ "local_extracted_archive": archive_path,
72
+ "metadata_filepath": metadata_filepaths["test"],
73
+ "split": "test",
74
+ },
75
+ ),
76
+ datasets.SplitGenerator(
77
+ name=datasets.Split.VALIDATION,
78
+ gen_kwargs={
79
+ "local_extracted_archive": archive_path,
80
+ "metadata_filepath": metadata_filepaths["dev"],
81
+ "split": "dev",
82
+ },
83
+ ),
84
+ ]
85
+
86
+ def _generate_examples(self, local_extracted_archive, metadata_filepath, split):
87
+
88
+ path_to_clips = os.path.join(local_extracted_archive, split)
89
+
90
+ with open(metadata_filepath, encoding="utf-8") as f:
91
+ lines = f.readlines()
92
+ for key, line in enumerate(lines[1:]):
93
+ field_values = line.strip().split("\t")
94
+ if len(field_values) == 2:
95
+ audio_filename, sentence = field_values[0], field_values[1]
96
+ audio_path = os.path.join(path_to_clips, audio_filename)
97
+ yield key, {
98
+ "audio": {"path": audio_path, "bytes": open(audio_path, "rb").read()},
99
+ "sentence": sentence,
100
+ }