Datasets:

Languages:
Catalan
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
found
Annotations Creators:
found
Source Datasets:
original
Tags:
License:
File size: 3,985 Bytes
4ca4d06
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6237fe8
4ca4d06
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f646cd6
4ca4d06
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3a07d36
 
4ca4d06
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TV3Parla."""

import re

import datasets
from datasets.tasks import AutomaticSpeechRecognition


_CITATION = """\
@inproceedings{kulebi18_iberspeech,
  author={Baybars Külebi and Alp Öktem},
  title={{Building an Open Source Automatic Speech Recognition System for Catalan}},
  year=2018,
  booktitle={Proc. IberSPEECH 2018},
  pages={25--29},
  doi={10.21437/IberSPEECH.2018-6}
}
"""

_DESCRIPTION = """\
This corpus includes 240 hours of Catalan speech from broadcast material.
The details of segmentation, data processing and also model training are explained in Külebi, Öktem; 2018.
The content is owned by Corporació Catalana de Mitjans Audiovisuals, SA (CCMA);
we processed their material and hereby making it available under their terms of use.

This project was supported by the Softcatalà Association.
"""

_HOMEPAGE = "https://collectivat.cat/asr#tv3parla"

_LICENSE = "Creative Commons Attribution-NonCommercial 4.0 International"

_REPO = "https://huggingface.co/datasets/collectivat/tv3_parla/resolve/main/"
_URLS = {
    "transcripts": _REPO + "tv3_0.3_{split}.transcription",
    "audio": _REPO + "tv3_0.3.tar.gz",
}
_SPLITS = [datasets.Split.TRAIN, datasets.Split.TEST]

_PATTERN = re.compile(r"^<s> (?P<text>.+) </s> \((?P<id>\S+)\)$")


class Tv3Parla(datasets.GeneratorBasedBuilder):
    """TV3Parla."""

    VERSION = datasets.Version("0.3.0")

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "path": datasets.Value("string"),
                    "audio": datasets.features.Audio(),
                    "text": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
            task_templates=[
                AutomaticSpeechRecognition(transcription_column="text")
            ],
        )

    def _split_generators(self, dl_manager):
        urls = {
            split: {key: url.format(split=split) for key, url in _URLS.items()} for split in _SPLITS
        }
        dl_dir = dl_manager.download(urls)
        return [
            datasets.SplitGenerator(
                name=split,
                gen_kwargs={
                    "transcripts_path": dl_dir[split]["transcripts"],
                    "audio_files": dl_manager.iter_archive(dl_dir[split]["audio"]),
                    "split": split,
                },
            ) for split in _SPLITS
        ]

    def _generate_examples(self, transcripts_path, audio_files, split):
        transcripts = {}
        with open(transcripts_path, encoding="utf-8") as transcripts_file:
            for line in transcripts_file:
                match = _PATTERN.match(line)
                transcripts[match["id"]] = match["text"]
        # train: 159242; test: 2220
        for key, (path, file) in enumerate(audio_files):
            if path.endswith(".wav") and f"/{split}/" in path:
                uid = path.split("/")[-1][:-4]
                if uid not in transcripts:
                    continue
                text = transcripts.pop(uid)
                audio = {"path": path, "bytes": file.read()}
                yield key, {"path": path, "audio": audio, "text": text}