import os import datasets import datasets.info import pandas as pd import numpy as np from pathlib import Path from datasets import load_dataset from typing import Iterable, Dict, Optional, Union, List _CITATION = """\ @dataset{kota_dohi_2023_7687464, author = {Kota Dohi and Keisuke and Noboru and Daisuke and Yuma and Tomoya and Harsh and Takashi and Yohei}, title = {DCASE 2023 Challenge Task 2 Development Dataset}, month = mar, year = 2023, publisher = {Zenodo}, version = {1.0}, doi = {10.5281/zenodo.7687464}, url = {https://doi.org/10.5281/zenodo.7687464} } """ _LICENSE = "Creative Commons Attribution 4.0 International Public License" _METADATA_REG = r"attributes_\d+.csv" _NUM_TARGETS = 2 _NUM_CLASSES = 7 _TARGET_NAMES = ["normal", "anomaly"] _CLASS_NAMES = ["gearbox", "fan", "bearing", "slider", "ToyCar", "ToyTrain", "valve"] _HOMEPAGE = { "dev": "https://zenodo.org/record/7687464#.Y_96q9LMLmH", "add": "", "eval": "", } DATA_URLS = { "dev": { "train": "data/dev_train.tar.gz", "test": "data/dev_test.tar.gz", "metadata": "data/dev_metadata.csv", }, "add": { "train": "data/add_train.tar.gz", "test": "data/add_test.tar.gz", "metadata": "data/add_metadata.csv", }, "eval": { "test": "data/eval_test.tar.gz", "metadata": "data/eval_metadata.csv", }, } EMBEDDING_URLS = { "dev": { "ast-finetuned-audioset-10-10-0.4593-embeddings": { "train": "data/MIT_ast-finetuned-audioset-10-10-0.4593-embeddings_dev_train.npz", "test": "data/MIT_ast-finetuned-audioset-10-10-0.4593-embeddings_dev_test.npz", "size": (1, 768), "dtype": "float32", }, }, "add": { "ast-finetuned-audioset-10-10-0.4593-embeddings": { "train": "", "test": "", }, }, "eval": { "ast-finetuned-audioset-10-10-0.4593-embeddings": { "train": "", "test": "", }, }, } STATS = { "name": "Enriched Dataset of 'DCASE 2023 Challenge Task 2'", "configs": { 'dev': { 'date': "Mar 1, 2023", 'version': "1.0.0", 'homepage': "https://zenodo.org/record/7687464#.ZABmANLMLmH", "splits": ["train", "test"], }, # 'add': { # 'date': None, # 'version': "0.0.0", # 'homepage': None, # "splits": ["train", "test"], # }, # 'eval': { # 'date': None, # 'version': "0.0.0", # 'homepage': None, # "splits": ["test"], # }, } } DATASET = { 'dev': 'DCASE 2023 Challenge Task 2 Development Dataset', 'add': 'DCASE 2023 Challenge Task 2 Additional Train Dataset', 'eval': 'DCASE 2023 Challenge Task 2 Evaluation Dataset', } _SPOTLIGHT_LAYOUT = "data/config-spotlight-layout.json" _SPOTLIGHT_RENAME = { "audio": "original_audio", "path": "audio", } class DCASE2023Task2DatasetConfig(datasets.BuilderConfig): """BuilderConfig for DCASE2023Task2Dataset.""" def __init__(self, name, version, **kwargs): self.release_date = kwargs.pop("release_date", None) self.homepage = kwargs.pop("homepage", None) self.data_urls = kwargs.pop("data_urls", None) self.embeddings_urls = kwargs.pop("embeddings_urls", None) self.splits = kwargs.pop("splits", None) self.rename = kwargs.pop("rename", None) self.layout = kwargs.pop("layout", None) description = ( f"Dataset for the DCASE 2023 Challenge Task 2 'First-Shot Unsupervised Anomalous Sound Detection " f"for Machine Condition Monitoring'. released on {self.release_date}. Original data available under" f"{self.homepage}. " f"CONFIG: {name}." ) super(DCASE2023Task2DatasetConfig, self).__init__( name=name, version=datasets.Version(version), description=description, ) def to_spotlight(self, data: Union[pd.DataFrame, datasets.Dataset]) -> pd.DataFrame: def get_split(path: str) -> str: fn = os.path.basename(path) if "train" in fn: return "train" elif "test" in fn: return "test" else: raise NotImplementedError if type(data) == datasets.Dataset: # remove embedding columns first -> throws error in .to_pandas() embeddings = {} emb_features = [key for key, val in data.features.items() if type(val) == datasets.Array2D] if len(emb_features) > 0: embeddings = { key: [np.asarray(emb).reshape(-1,) for emb in data[key].copy()] for key in emb_features } data = data.remove_columns(emb_features) # retrieve split df = data.to_pandas() df["split"] = data.split._name if "+" not in data.split._name else df["path"].map(get_split) df["config"] = data.config_name # get clearnames for classes class_names = data.features["class"].names df["class_name"] = df["class"].apply(lambda x: class_names[x]) # append embeddings for emb_name, emb_list in embeddings.items(): df[emb_name] = emb_list elif type(data) == pd.DataFrame: df = data else: raise TypeError("type(data) not in Union[pd.DataFrame, datasets.Dataset]") df["file_path"] = df["path"] df.rename(columns=self.rename, inplace=True) return df.copy() def get_layout(self): return self.layout class DCASE2023Task2Dataset(datasets.GeneratorBasedBuilder): """Dataset for the DCASE 2023 Challenge Task 2 "First-Shot Unsupervised Anomalous Sound Detection for Machine Condition Monitoring".""" VERSION = datasets.Version("0.0.3") DEFAULT_CONFIG_NAME = "dev" BUILDER_CONFIGS = [ DCASE2023Task2DatasetConfig( name=key, version=stats["version"], dataset=DATASET[key], homepage=_HOMEPAGE[key], data_urls=DATA_URLS[key], release_date=stats["date"], splits=stats["splits"], layout=_SPOTLIGHT_LAYOUT, rename=_SPOTLIGHT_RENAME, ) for key, stats in STATS["configs"].items() ] def _info(self): features = { "audio": datasets.Audio(sampling_rate=16_000), "path": datasets.Value("string"), "section": datasets.Value("int64"), "d1p": datasets.Value("string"), "d1v": datasets.Value("string"), "d2p": datasets.Value("string"), "d2v": datasets.Value("string"), "d3p": datasets.Value("string"), "d3v": datasets.Value("string"), "domain": datasets.ClassLabel(num_classes=2, names=["source", "target"]), "label": datasets.ClassLabel(num_classes=_NUM_TARGETS, names=_TARGET_NAMES), "class": datasets.ClassLabel(num_classes=_NUM_CLASSES, names=_CLASS_NAMES), } features.update({ emb_name: datasets.Array2D(shape=emb["size"], dtype=emb["dtype"]) for emb_name, emb in self.config.embeddings_urls.items() }) features = datasets.Features(features) return datasets.DatasetInfo( # This is the description that will appear on the datasets page. description=self.config.description, features=features, supervised_keys=datasets.info.SupervisedKeysData("label"), homepage=self.config.homepage, license=_LICENSE, citation=_CITATION, ) def _split_generators( self, dl_manager: datasets.DownloadManager ): """Returns SplitGenerators.""" dl_manager.download_config.ignore_url_params = True audio_path = {} local_extracted_archive = {} split_type = {"train": datasets.Split.TRAIN, "test": datasets.Split.TEST} embeddings = {split: dict() for split in split_type} for split in split_type: if split in self.config.splits: audio_path[split] = dl_manager.download(self.config.data_urls[split]) local_extracted_archive[split] = dl_manager.extract( audio_path[split]) if not dl_manager.is_streaming else None for emb_name, emb_data in self.config.embeddings_urls.items(): embeddings[split][emb_name] = np.load(dl_manager.download_and_extract(emb_data[split]) + "/arr_0.npy", allow_pickle=True).item() return [ datasets.SplitGenerator( name=split_type[split], gen_kwargs={ "split": split, "local_extracted_archive": local_extracted_archive[split], "audio_files": dl_manager.iter_archive(audio_path[split]), "embeddings": embeddings[split], "metadata_file": dl_manager.download_and_extract(self.config.data_urls["metadata"]), }, ) for split in split_type if split in self.config.splits ] def _generate_examples( self, split: str, local_extracted_archive: Union[Dict, List], audio_files: Optional[Iterable], embeddings: Optional[Dict], metadata_file: Optional[str], ): """Yields examples.""" metadata = pd.read_csv(metadata_file) data_fields = list(self._info().features.keys()) id_ = 0 for path, f in audio_files: lookup = Path(path).parent.name + "/" + Path(path).name if lookup in metadata["path"].values: path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path audio = {"path": path, "bytes": f.read()} result = {field: None for field in data_fields} result.update(metadata[metadata["path"] == lookup].T.squeeze().to_dict()) for emb_key in embeddings.keys(): result[emb_key] = embeddings[emb_key][lookup] result["path"] = path yield id_, {**result, "audio": audio} id_ += 1 if __name__ == "__main__": ds = load_dataset("dcase23-task2-enriched.py", "dev", split="train", streaming=True)