|
|
|
|
|
"""AudioSet sound event classification dataset.""" |
|
|
|
|
|
import os |
|
import json |
|
import gzip |
|
import joblib |
|
import shutil |
|
import pathlib |
|
import logging |
|
import zipfile |
|
import textwrap |
|
import datasets |
|
import requests |
|
import itertools |
|
import typing as tp |
|
import pandas as pd |
|
from pathlib import Path |
|
from copy import deepcopy |
|
from tqdm.auto import tqdm |
|
from rich.logging import RichHandler |
|
|
|
logger = logging.getLogger(__name__) |
|
logger.addHandler(RichHandler()) |
|
logger.setLevel(logging.INFO) |
|
|
|
|
|
DATA_DIR_STRUCTURE = """ |
|
audios/ |
|
βββ balanced_train_segments [20550 entries] |
|
βββ eval_segments [18887 entries] |
|
βββ unbalanced_train_segments |
|
βββ unbalanced_train_segments_part00 [46940 entries] |
|
... |
|
βββ unbalanced_train_segments_part40 [9844 entries] |
|
""" |
|
|
|
|
|
class AudioSetConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for AudioSet.""" |
|
|
|
def __init__(self, features, **kwargs): |
|
super(AudioSetConfig, self).__init__(version=datasets.Version("0.0.1", ""), **kwargs) |
|
self.features = features |
|
|
|
|
|
class AudioSet(datasets.GeneratorBasedBuilder): |
|
|
|
BUILDER_CONFIGS = [ |
|
AudioSetConfig( |
|
features=datasets.Features( |
|
{ |
|
"file": datasets.Value("string"), |
|
"audio": datasets.Audio(sampling_rate=SAMPLE_RATE), |
|
"sound": datasets.Sequence(datasets.Value("string")), |
|
"label": datasets.Sequence(datasets.features.ClassLabel(names=CLASSES)), |
|
} |
|
), |
|
name="balanced", |
|
description="", |
|
), |
|
AudioSetConfig( |
|
features=datasets.Features( |
|
{ |
|
"file": datasets.Value("string"), |
|
"audio": datasets.Audio(sampling_rate=SAMPLE_RATE), |
|
"sound": datasets.Sequence(datasets.Value("string")), |
|
"label": datasets.Sequence(datasets.features.ClassLabel(names=CLASSES)), |
|
} |
|
), |
|
name="unbalanced", |
|
description="", |
|
), |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description="", |
|
features=self.config.features, |
|
supervised_keys=None, |
|
homepage="", |
|
citation="", |
|
task_templates=None, |
|
) |
|
|
|
@property |
|
def manual_download_instructions(self): |
|
return ( |
|
"To use AudioSet you have to download it manually. " |
|
"Please download the dataset from https://huggingface.co/datasets/confit/audioset-full \n" |
|
"Then extract all files in one folder called `audios` and load the dataset with: " |
|
"`datasets.load_dataset('confit/audioset', 'balanced', data_dir='path/to/folder')`\n" |
|
"The tree structure of the downloaded data looks like: \n" |
|
f"{DATA_DIR_STRUCTURE}" |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir)) |
|
|
|
if not os.path.exists(data_dir): |
|
raise FileNotFoundError( |
|
f"{data_dir} does not exist. Make sure you insert a manual dir via " |
|
f"`datasets.load_dataset('confit/audioset', 'balanced', data_dir=...)` that includes files unzipped from all the zip files. " |
|
f"Manual download instructions: {self.manual_download_instructions}" |
|
) |
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"split": "train", "data_dir": data_dir}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"split": "test", "data_dir": data_dir}), |
|
] |
|
|
|
def _generate_examples(self, split, data_dir): |
|
"""Generate examples from AudioSet""" |
|
|
|
extensions = ['.wav'] |
|
|
|
if split == 'train': |
|
if self.config.name == 'balanced': |
|
archive_path = os.path.join(data_dir, 'audios', 'balanced_train_segments') |
|
metadata_url = 'https://huggingface.co/datasets/confit/audioset/resolve/main/metadata/audioset-20k.jsonl' |
|
elif self.config.name == 'unbalanced': |
|
archive_path = os.path.join(data_dir, 'audios', 'unbalanced_train_segments') |
|
metadata_url = 'https://huggingface.co/datasets/confit/audioset/resolve/main/metadata/audioset-2m.jsonl' |
|
elif split == 'test': |
|
archive_path = os.path.join(data_dir, 'audios', 'eval_segments') |
|
metadata_url = 'https://huggingface.co/datasets/confit/audioset/resolve/main/metadata/audioset-eval.jsonl' |
|
|
|
response = requests.get(url) |
|
if response.status_code == 200: |
|
|
|
|
|
data_list = [json.loads(line) for line in response.text.splitlines()] |
|
fileid2labels = {item['filename']:item['label'] for item in data_list} |
|
else: |
|
logger.info(f"Failed to retrieve data: Status code {response.status_code}") |
|
|
|
_, wav_paths = fast_scandir(archive_path, extensions, recursive=True) |
|
|
|
for guid, wav_path in enumerate(wav_paths): |
|
fileid = Path(wav_path).name |
|
sound = fileid2labels.get(fileid) |
|
try: |
|
yield guid, { |
|
"id": str(guid), |
|
"file": wav_path, |
|
"audio": wav_path, |
|
"sound": sound, |
|
"label": sound, |
|
} |
|
except: |
|
continue |
|
|
|
|
|
def fast_scandir(path: str, extensions: tp.List[str], recursive: bool = False): |
|
|
|
|
|
subfolders, files = [], [] |
|
|
|
try: |
|
for f in os.scandir(path): |
|
try: |
|
if f.is_dir(): |
|
subfolders.append(f.path) |
|
elif f.is_file(): |
|
if os.path.splitext(f.name)[1].lower() in extensions: |
|
files.append(f.path) |
|
except Exception: |
|
pass |
|
except Exception: |
|
pass |
|
|
|
if recursive: |
|
for path in list(subfolders): |
|
sf, f = fast_scandir(path, extensions, recursive=recursive) |
|
subfolders.extend(sf) |
|
files.extend(f) |
|
|
|
return subfolders, files |