dv-presidential-speech / dv-presidential-speech.py
dash8x's picture
Changed gzip csv to csv
7d44f77
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
import datasets
_CITATION = """\
@misc{Sofwath_2023,
title = "Dhivehi Presidential Speech Dataset",
url = "https://huggingface.co/datasets/dash8x/presidential_speech",
journal = "Hugging Face",
author = "Sofwath",
year = "2018",
month = jul
}
"""
_DESCRIPTION = """\
Dhivehi Presidential Speech is a Dhivehi speech dataset created from data extracted and
processed by [Sofwath](https://github.com/Sofwath) as part of a collection of Dhivehi
datasets found [here](https://github.com/Sofwath/DhivehiDatasets).
The dataset contains around 2.5 hrs (1 GB) of speech collected from Maldives President's Office
consisting of 7 speeches given by President Yaameen Abdhul Gayyoom.
"""
_HOMEPAGE = 'https://github.com/Sofwath/DhivehiDatasets'
_LICENSE = 'CC BY-NC-SA 4.0'
# Source data: 'https://drive.google.com/file/d/1vhMXoB2L23i4HfAGX7EYa4L-sfE4ThU5/view?usp=sharing'
_DATA_URL = 'data'
class DhivehiPresidentialSpeech(datasets.GeneratorBasedBuilder):
"""Dhivehi Presidential Speech is a free Dhivehi speech corpus consisting of around 2.5 hours of
recorded speech prepared for Dhivehi Automatic Speech Recognition task."""
VERSION = datasets.Version('1.0.0')
# This is an example of a dataset with multiple configurations.
# If you don't want/need to define several sub-sets in your dataset,
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
# If you need to make complex sub-parts in the datasets with configurable options
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
# BUILDER_CONFIG_CLASS = MyBuilderConfig
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
features=datasets.Features(
{
'path': datasets.Value('string'),
'audio': datasets.Audio(sampling_rate=16_000),
'sentence': datasets.Value('string'),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
dl_manager.download_config.ignore_url_params = True
audio_path = {}
local_extracted_archive = {}
metadata_path = {}
split_type = {
'train': datasets.Split.TRAIN,
'test': datasets.Split.TEST,
'validation': datasets.Split.VALIDATION,
}
for split in split_type:
audio_path[split] = dl_manager.download(f'{_DATA_URL}/audio_{split}.tar.gz')
local_extracted_archive[split] = dl_manager.extract(audio_path[split]) if not dl_manager.is_streaming else None
metadata_path[split] = dl_manager.download_and_extract(f'{_DATA_URL}/metadata_{split}.csv')
path_to_clips = 'dv-presidential-speech'
return [
datasets.SplitGenerator(
name=split_type[split],
gen_kwargs={
'local_extracted_archive': local_extracted_archive[split],
'audio_files': dl_manager.iter_archive(audio_path[split]),
'metadata_path': metadata_path[split],
'path_to_clips': f'{path_to_clips}-{split}/waves',
},
) for split in split_type
]
def _generate_examples(
self,
local_extracted_archive,
audio_files,
metadata_path,
path_to_clips,
):
"""Yields examples."""
data_fields = list(self._info().features.keys())
metadata = {}
with open(metadata_path, 'r', encoding='utf-8') as f:
reader = csv.reader(f, delimiter=',', quotechar='"')
for row in reader:
row_dict = {}
row_dict['path'] = row[0]
row_dict['sentence'] = row[1]
# if data is incomplete, fill with empty values
for field in data_fields:
if field not in row_dict:
row_dict[field] = ''
metadata[row_dict['path']] = row_dict
id_ = 0
for path, f in audio_files:
file_name = os.path.splitext(os.path.basename(path))[0]
if file_name in metadata:
result = dict(metadata[file_name])
# set the audio feature and the path to the extracted file
path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
result['audio'] = {'path': path, 'bytes': f.read()}
result['path'] = path
yield id_, result
id_ += 1