wake / wake.py
Ahmed-ibn-Harun's picture
Update wake.py
d0fcb79
raw
history blame contribute delete
No virus
4.28 kB
# coding=utf-8
# Copyright 2023 The BizzAI and HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
import csv
import os
import datasets
logger = datasets.logging.get_logger(__name__)
""" BizzBuddy AI Dataset"""
_CITATION = """\
@article{gerz2021multilingual,
title={Wake word data for Voice assistant trigger in English from spoken data},
author={Ahmed, Nicholas},
year={2023}
}
"""
_DESCRIPTION = """\
Wake is training and evaluation resource for wake word
detection task with spoken data. It covers the wake and not wake
intents collected from a multiple participants who agreed to contribute to the development
of the system on the wake word and the not wake words is a subset of the common voice and speech commands dataset.
"""
_ALL_CONFIGS = sorted([
"en-US"
])
_DESCRIPTION = "Wake is a dataset for the wake word detection task with spoken data."
_DATA_URL = 'https://huggingface.co/datasets/Ahmed-ibn-Harun/wake-w/resolve/main/data.tar.gz'
class WakeConfig(datasets.BuilderConfig):
"""BuilderConfig for xtreme-s"""
def __init__(
self, name, description, data_url
):
super(WakeConfig, self).__init__(
name=self.name,
version=datasets.Version("1.0.0", ""),
description=self.description,
)
self.name = name
self.description = description
self.data_url = data_url
def _build_config(name):
return WakeConfig(
name=name,
description=_DESCRIPTION,
data_url=_DATA_URL,
)
class Wake(datasets.GeneratorBasedBuilder):
DEFAULT_WRITER_BATCH_SIZE = 1000
BUILDER_CONFIGS = [_build_config(name) for name in _ALL_CONFIGS + ["all"]]
def _info(self):
task_templates = None
langs = _ALL_CONFIGS
features = datasets.Features(
{
"path": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=8_000),
"wake": datasets.ClassLabel(
names=[
0,
1,
]
),
"lang_id": datasets.ClassLabel(names=langs),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=("audio", "transcription"),
citation=_CITATION,
task_templates=task_templates,
)
def _split_generators(self, dl_manager):
langs = (
_ALL_CONFIGS
if self.config.name == "all"
else [self.config.name]
)
archive_path = dl_manager.download_and_extract(self.config.data_url)
audio_path = dl_manager.extract(
os.path.join(archive_path, "audio.tar.gz")
)
text_path = dl_manager.extract(
os.path.join(archive_path, "text.tar.gz")
)
text_path = {l: os.path.join(text_path, f"{l}.csv") for l in langs}
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"audio_path": audio_path,
"text_paths": text_path,
},
)
]
def _generate_examples(self, audio_path, text_paths):
key = 0
for lang in text_paths.keys():
text_path = text_paths[lang]
with open(text_path, encoding="utf-8") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",", skipinitialspace=True)
next(csv_reader)
for row in csv_reader:
file_path, intent_class = row
file_path = os.path.join(audio_path, *file_path.split("/"))
yield key, {
"path": file_path,
"audio": file_path,
"wake": intent_class,
"lang_id": _ALL_CONFIGS.index(lang),
}
key += 1