File size: 6,888 Bytes
5902d6b
 
 
 
 
 
 
 
 
 
 
 
 
 
3428b9a
 
 
8434d7c
 
 
 
 
 
 
e69e747
 
5902d6b
 
8434d7c
fe8f1e0
8434d7c
2917be1
8434d7c
 
 
 
 
3428b9a
 
 
 
5902d6b
 
 
9884bba
ec2d664
 
 
5902d6b
 
 
f0a336a
 
 
 
5902d6b
8434d7c
 
5902d6b
 
8434d7c
 
3428b9a
 
8434d7c
 
3767e24
5902d6b
 
 
0c913c1
5902d6b
f4a4410
f0a336a
 
a155db5
 
 
5902d6b
0c913c1
3767e24
8434d7c
 
f0a336a
cb06622
8434d7c
f0a336a
 
 
 
8434d7c
 
 
 
 
f85b7af
a155db5
 
5d4a09b
a155db5
 
 
f0a336a
 
8434d7c
 
 
 
 
5902d6b
f0d8d99
 
8434d7c
 
 
 
 
517e7dd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
# coding=utf-8
# Copyright 2023 The current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" NENA Speech Dataset"""


import csv
import os

import datasets
from datasets.utils.py_utils import size_str
from tqdm import tqdm

from .dialects import DIALECTS
from .release_stats import STATS

_HOMEPAGE = "https://nena.ames.cam.ac.uk/"

_LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"

_BASE_URL = "https://huggingface.co/datasets/mnazari/nena_speech_1_0_test/resolve/main/"

_AUDIO_URL = _BASE_URL + "audio/{dialect}/{split}.tar"

_TRANSCRIPT_URL = _BASE_URL + "transcript/{dialect}/{split}.tsv"

import datasets

class NENASpeechConfig(datasets.BuilderConfig):
    """BuilderConfig for NENASpeech."""
    def __init__(self, name, version, **kwargs):
        self.dialect = kwargs.pop("dialect", None)
        self.release_date = kwargs.pop("release_date", None)
        self.num_examples = kwargs.pop("num_examples", None)
        self.num_speakers = kwargs.pop("num_speakers", None)
        self.validated_hr = kwargs.pop("validated_hr", None)
        self.total_hr = kwargs.pop("total_hr", None)
        self.size_bytes = kwargs.pop("size_bytes", None)
        self.size_human = size_str(self.size_bytes)
        description = (
            f"NENA Speech dataset in the {self.dialect} dialect released on {self.release_date}. "
            f"The dataset comprises {self.validated_hr} hours of validated transcribed speech data "
            f"out of {self.total_hr} hours in total from {self.num_speakers} speakers. "
            f"The dataset contains {self.num_examples} examples and has a size of {self.size_human}."
        )
        super(NENASpeechConfig, self).__init__(
            name=name,
            version=datasets.Version(version),
            description=description,
            **kwargs,
        )

class NENASpeech(datasets.GeneratorBasedBuilder):
    DEFAULT_WRITER_BATCH_SIZE = 1000

    BUILDER_CONFIGS = sorted([
        NENASpeechConfig(
            name=dialect,
            version=STATS["version"],
            dialect=DIALECTS[dialect],
            release_date=STATS["date"],
            num_examples=dialect_stats["totalExamples"],
            num_speakers=dialect_stats["speakers"],
            
            # validated_hr=float(dialect_stats["validHrs"]) if dialect_stats["validHrs"] else None,
            # total_hr=float(dialect_stats["totalHrs"]) if dialect_stats["totalHrs"] else None,
            # size_bytes=int(dialect_stats["size"]) if dialect_stats["size"] else None,
        )
        for dialect, dialect_stats in STATS["dialects"].items()
    ], key=lambda config: config.num_examples)

    def _info(self):
        total_dialects = len(STATS["dialects"])
        total_duration = STATS["durationLabelled"] / 60
        description = (
            "NENA Speech is a multimodal dataset to help teach machines how real people speak "
            "the Northeastern Neo-Aramaic dialects. The dataset currently consists of "
            f"{total_duration} validated minutes of speech in {total_dialects} dialects, but "
            "more examples are actively being crowdsourced."
        )
        features = datasets.Features(
            {
                "transcription": datasets.Value("string"),
                "translation": datasets.Value("string"),
                "audio": datasets.features.Audio(sampling_rate=48_000),
                "locale": datasets.Value("string"),
                "proficiency": datasets.Value("string"),
                "age": datasets.Value("string"),
                "crowdsourced": datasets.Value("bool"),
                "unlabelled": datasets.Value("bool"),
                "interrupted": datasets.Value("bool"),
                "client_id": datasets.Value("string"),
                "path": datasets.Value("string"),
            }
        )

        return datasets.DatasetInfo(
            description=description,
            # citation=_CITATION,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            features=features,
            supervised_keys=None,
        )

    def _split_generators(self, dl_manager):
        dialect = self.config.name

        audio_urls = {}
        splits = ("train", "dev", "test")
        for split in splits:
            audio_urls[split] = _AUDIO_URL.format(dialect=dialect, split=split)
        archive_paths = dl_manager.download(audio_urls)
        local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}

        meta_urls = {split: _TRANSCRIPT_URL.format(dialect=dialect, split=split) for split in splits}
        meta_paths = dl_manager.download_and_extract(meta_urls)

        split_generators = []
        split_names = {
            "train": datasets.Split.TRAIN,
            "dev": datasets.Split.VALIDATION,
            "test": datasets.Split.TEST,
        }
        for split in splits:
            split_generators.append(
                datasets.SplitGenerator(
                    name=split_names.get(split, split),
                    gen_kwargs={
                        "local_extracted_archive_paths": local_extracted_archive_paths.get(split),
                        "archive": dl_manager.iter_archive(archive_paths.get(split)),
                        "meta_path": meta_paths[split],
                    },
                ),
            )

        return split_generators

    def _generate_examples(self, local_extracted_archive_paths, archive, meta_path):
        data_fields = list(self._info().features.keys())
        metadata = {}
        with open(meta_path, encoding="utf-8") as f:
            reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
            for row in tqdm(reader, desc="Reading metadata..."):
                for field in data_fields:
                    if field not in row:
                        row[field] = ""
                metadata[row["path"]] = row

        for path, file in archive:
            _, filename = os.path.split(path)
            if filename in metadata:
                result = dict(metadata[filename])
                path = os.path.join(local_extracted_archive_paths, path) if local_extracted_archive_paths else path
                result["audio"] = {"path": path, "bytes": file.read()}
                result["path"] = path
                yield path, result