Datasets:
Tags:
speech-modeling
License:
First version of data loader for NPSC
Browse files- .gitignore +129 -0
- NPSC.py +180 -0
.gitignore
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
pip-wheel-metadata/
|
24 |
+
share/python-wheels/
|
25 |
+
*.egg-info/
|
26 |
+
.installed.cfg
|
27 |
+
*.egg
|
28 |
+
MANIFEST
|
29 |
+
|
30 |
+
# PyInstaller
|
31 |
+
# Usually these files are written by a python script from a template
|
32 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
33 |
+
*.manifest
|
34 |
+
*.spec
|
35 |
+
|
36 |
+
# Installer logs
|
37 |
+
pip-log.txt
|
38 |
+
pip-delete-this-directory.txt
|
39 |
+
|
40 |
+
# Unit test / coverage reports
|
41 |
+
htmlcov/
|
42 |
+
.tox/
|
43 |
+
.nox/
|
44 |
+
.coverage
|
45 |
+
.coverage.*
|
46 |
+
.cache
|
47 |
+
nosetests.xml
|
48 |
+
coverage.xml
|
49 |
+
*.cover
|
50 |
+
*.py,cover
|
51 |
+
.hypothesis/
|
52 |
+
.pytest_cache/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
*.log
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
target/
|
76 |
+
|
77 |
+
# Jupyter Notebook
|
78 |
+
.ipynb_checkpoints
|
79 |
+
|
80 |
+
# IPython
|
81 |
+
profile_default/
|
82 |
+
ipython_config.py
|
83 |
+
|
84 |
+
# pyenv
|
85 |
+
.python-version
|
86 |
+
|
87 |
+
# pipenv
|
88 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
89 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
90 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
91 |
+
# install all needed dependencies.
|
92 |
+
#Pipfile.lock
|
93 |
+
|
94 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
95 |
+
__pypackages__/
|
96 |
+
|
97 |
+
# Celery stuff
|
98 |
+
celerybeat-schedule
|
99 |
+
celerybeat.pid
|
100 |
+
|
101 |
+
# SageMath parsed files
|
102 |
+
*.sage.py
|
103 |
+
|
104 |
+
# Environments
|
105 |
+
.env
|
106 |
+
.venv
|
107 |
+
env/
|
108 |
+
venv/
|
109 |
+
ENV/
|
110 |
+
env.bak/
|
111 |
+
venv.bak/
|
112 |
+
|
113 |
+
# Spyder project settings
|
114 |
+
.spyderproject
|
115 |
+
.spyproject
|
116 |
+
|
117 |
+
# Rope project settings
|
118 |
+
.ropeproject
|
119 |
+
|
120 |
+
# mkdocs documentation
|
121 |
+
/site
|
122 |
+
|
123 |
+
# mypy
|
124 |
+
.mypy_cache/
|
125 |
+
.dmypy.json
|
126 |
+
dmypy.json
|
127 |
+
|
128 |
+
# Pyre type checker
|
129 |
+
.pyre/
|
NPSC.py
ADDED
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
# Lint as: python3
|
17 |
+
"""NPSC: Norwegian Parliament Speech Corpus"""
|
18 |
+
|
19 |
+
import io
|
20 |
+
import json
|
21 |
+
import tarfile
|
22 |
+
import datasets
|
23 |
+
import fsspec
|
24 |
+
from datasets.tasks import AutomaticSpeechRecognition
|
25 |
+
from datasets.utils.streaming_download_manager import xopen
|
26 |
+
|
27 |
+
|
28 |
+
_CITATION = """\
|
29 |
+
@inproceedings{johansen2019ner,
|
30 |
+
title={},
|
31 |
+
author={},
|
32 |
+
booktitle={LREC 2022},
|
33 |
+
year={2022},
|
34 |
+
url={https://arxiv.org/abs/}
|
35 |
+
}
|
36 |
+
"""
|
37 |
+
|
38 |
+
_DESCRIPTION = """\
|
39 |
+
The Norwegian Parliament Speech Corpus (NPSC) is a corpus for training a Norwegian ASR (Automatic Speech Recognition) models. The corpus is created by Språkbanken at the National Library in Norway.
|
40 |
+
|
41 |
+
NPSC is based on sound recording from meeting in the Norwegian Parliament. These talks are orthographically transcribed to either Norwegian Bokmål or Norwegian Nynorsk. In addition to the data actually included in this dataset, there is a significant amount of metadata that is included in the original corpus. Through the speaker id there is additional information about the speaker, like gender, age, and place of birth (ie dialect). Through the proceedings id the corpus can be linked to the official proceedings from the meetings.
|
42 |
+
|
43 |
+
The corpus is in total sound recordings from 40 entire days of meetings. This amounts to 140 hours of speech, 65,000 sentences or 1.2 million words.
|
44 |
+
"""
|
45 |
+
|
46 |
+
_HOMEPAGE = "https://www.nb.no/sprakbanken/en/resource-catalogue/oai-nb-no-sbr-58/"
|
47 |
+
|
48 |
+
# Example: https://huggingface.co/datasets/NbAiLab/NPSC/resolve/main/data/train/20170110_48K_mp3.tar.gz
|
49 |
+
_DATA_URL = "https://huggingface.co/datasets/NB/NPSC/resolve/main/data/{split}/{shard}_{config}.tar.gz"
|
50 |
+
# Example: https://huggingface.co/datasets/NB/NPSC/resolve/main/data/test/20170207.json
|
51 |
+
_METADATA_URL = "https://huggingface.co/datasets/NB/NPSC/resolve/main/data/{split}/{shard}.json"
|
52 |
+
|
53 |
+
_SHARDS = {
|
54 |
+
"validation": ["20170209", "20180109", "20180201", "20180307", "20180611"],
|
55 |
+
"test": ["20170207", "20171122", "20171219", "20180530"],
|
56 |
+
"train": ["20170110", "20170208", "20170215", "20170216", "20170222", "20170314", "20170322", "20170323", "20170403", "20170405", "20170419", "20170426", "20170503", "20170510", "20170516", "20170613", "20170615", "20171007", "20171012", "20171018", "20171024", "20171208", "20171211", "20171213", "20180316"],
|
57 |
+
}
|
58 |
+
|
59 |
+
class NpscConfig(datasets.BuilderConfig):
|
60 |
+
"""BuilderConfig for NPSC."""
|
61 |
+
|
62 |
+
def __init__(self, *args, **kwargs):
|
63 |
+
"""BuilderConfig for NPSC.
|
64 |
+
|
65 |
+
Args:
|
66 |
+
**kwargs: keyword arguments forwarded to super.
|
67 |
+
"""
|
68 |
+
super(NpscConfig, self).__init__(*args, **kwargs)
|
69 |
+
|
70 |
+
|
71 |
+
class Npsc(datasets.GeneratorBasedBuilder):
|
72 |
+
"""NPSC dataset."""
|
73 |
+
|
74 |
+
DEFAULT_WRITER_BATCH_SIZE = 1000
|
75 |
+
BUILDER_CONFIGS = [
|
76 |
+
NpscConfig(
|
77 |
+
name="48K_mp3",
|
78 |
+
version=datasets.Version("1.0.0"),
|
79 |
+
description="NPSC with samples in 48KHz mp3)",
|
80 |
+
),
|
81 |
+
]
|
82 |
+
|
83 |
+
def _info(self):
|
84 |
+
return datasets.DatasetInfo(
|
85 |
+
description=_DESCRIPTION,
|
86 |
+
features=datasets.Features(
|
87 |
+
{
|
88 |
+
"meeting_date": datasets.Value("string"),
|
89 |
+
"sentence_order": datasets.Value("int32"),
|
90 |
+
"speaker_id" : datasets.Value("int32"),
|
91 |
+
"speaker_name": datasets.Value("string"),
|
92 |
+
"sentence_text": datasets.Value("string"),
|
93 |
+
"sentence_language_code": datasets.Value("string"),
|
94 |
+
"text": datasets.Value("string"),
|
95 |
+
"start_time": datasets.Value("int32"),
|
96 |
+
"end_time": datasets.Value("int32"),
|
97 |
+
"normsentence_text": datasets.Value("string"),
|
98 |
+
"transsentence_text": datasets.Value("string"),
|
99 |
+
"translated": datasets.Value("int32"),
|
100 |
+
"audio": datasets.features.Audio(sampling_rate=48000),
|
101 |
+
|
102 |
+
}
|
103 |
+
),
|
104 |
+
supervised_keys=None,
|
105 |
+
homepage=_HOMEPAGE,
|
106 |
+
citation=_CITATION,
|
107 |
+
task_templates=[
|
108 |
+
AutomaticSpeechRecognition(
|
109 |
+
audio_file_path_column="path",
|
110 |
+
transcription_column="sentence_text"
|
111 |
+
)
|
112 |
+
],
|
113 |
+
)
|
114 |
+
|
115 |
+
def _split_generators(self, dl_manager):
|
116 |
+
"""Returns SplitGenerators."""
|
117 |
+
data_urls = {}
|
118 |
+
metadata_urls = {}
|
119 |
+
config_name = self.config.name
|
120 |
+
for split in ["train", "validation", "test"]:
|
121 |
+
metadata_urls[split] = []
|
122 |
+
data_urls[split] = []
|
123 |
+
for shard in _SHARDS[split]:
|
124 |
+
metadata_urls[split] += [
|
125 |
+
_METADATA_URL.format(split=split, shard=shard)
|
126 |
+
]
|
127 |
+
data_urls[split] += [
|
128 |
+
_DATA_URL.format(split=split, shard=shard, config=config_name)
|
129 |
+
]
|
130 |
+
train_downloaded_metadata = dl_manager.download(metadata_urls["train"])
|
131 |
+
validation_downloaded_metadata = dl_manager.download(metadata_urls["validation"])
|
132 |
+
test_downloaded_metadata = dl_manager.download(metadata_urls["test"])
|
133 |
+
train_downloaded_archives = dl_manager.download(data_urls["train"])
|
134 |
+
validation_downloaded_archives = dl_manager.download(data_urls["validation"])
|
135 |
+
test_downloaded_archives = dl_manager.download(data_urls["test"])
|
136 |
+
|
137 |
+
return [
|
138 |
+
datasets.SplitGenerator(
|
139 |
+
name=datasets.Split.TRAIN, gen_kwargs={
|
140 |
+
"archives": train_downloaded_archives,
|
141 |
+
"metadata_paths": train_downloaded_metadata,
|
142 |
+
}
|
143 |
+
),
|
144 |
+
datasets.SplitGenerator(
|
145 |
+
name=datasets.Split.VALIDATION, gen_kwargs={
|
146 |
+
"archives": validation_downloaded_archives,
|
147 |
+
"metadata_paths": validation_downloaded_metadata,
|
148 |
+
}
|
149 |
+
),
|
150 |
+
datasets.SplitGenerator(
|
151 |
+
name=datasets.Split.TEST, gen_kwargs={
|
152 |
+
"archives": test_downloaded_archives,
|
153 |
+
"metadata_paths": test_downloaded_metadata,
|
154 |
+
}
|
155 |
+
),
|
156 |
+
]
|
157 |
+
|
158 |
+
def _generate_examples(self, archives, metadata_paths):
|
159 |
+
"""Yields examples."""
|
160 |
+
data_fields = list(self._info().features.keys())
|
161 |
+
data_fields.remove("audio")
|
162 |
+
for archive_path, metadata_path in zip(*[archives, metadata_paths]):
|
163 |
+
metadata = {}
|
164 |
+
with xopen(metadata_path) as metadata_file:
|
165 |
+
for line in metadata_file.read().split("\n"):
|
166 |
+
if line:
|
167 |
+
metadata_object = json.loads(line)
|
168 |
+
if "path" in metadata_object:
|
169 |
+
metadata_key = metadata_object["path"].split("/", 1)[-1]
|
170 |
+
metadata[metadata_key] = metadata_object
|
171 |
+
with xopen(archive_path, "rb") as archive_fs:
|
172 |
+
archive_bytes = io.BytesIO(archive_fs.read())
|
173 |
+
with tarfile.open(fileobj=archive_bytes, mode="r") as tar:
|
174 |
+
for audio_file in tar.getmembers():
|
175 |
+
if audio_file.isfile():
|
176 |
+
metadata_key = audio_file.name.split(".mp3", 1)[0].split("/", 1)[-1]
|
177 |
+
audio_bytes = tar.extractfile(audio_file).read()
|
178 |
+
audio_dict = {"bytes": audio_bytes, "path": audio_file.name}
|
179 |
+
fields = {key: metadata[metadata_key][key] for key in data_fields}
|
180 |
+
yield metadata_key, {"audio": audio_dict, **fields}
|