import os import json import datasets import datasets.info import pandas as pd import numpy as np import tempfile import requests import io from pathlib import Path from datasets import load_dataset from typing import Iterable, Dict, Optional, Union, List _CITATION = """\ @dataset{kota_dohi_2023_7882613, author = {Kota Dohi and Keisuke Imoto and Noboru Harada and Daisuke Niizumi and Yuma Koizumi and Tomoya Nishida and Harsh Purohit and Takashi Endo and Yohei Kawaguchi}, title = {DCASE 2023 Challenge Task 2 Development Dataset}, month = mar, year = 2023, publisher = {Zenodo}, version = {3.0}, doi = {10.5281/zenodo.7882613}, url = {https://doi.org/10.5281/zenodo.7882613} } """ _LICENSE = "Creative Commons Attribution 4.0 International Public License" _METADATA_REG = r"attributes_\d+.csv" _NUM_TARGETS = 2 _NUM_CLASSES = 14 _TARGET_NAMES = ["normal", "anomaly"] _CLASS_NAMES = ["gearbox", "fan", "bearing", "slider", "ToyCar", "ToyTrain", "valve", "bandsaw", "grinder", "shaker", "ToyDrone", "ToyNscale", "ToyTank", "Vacuum"] _HOMEPAGE = { "dev": "https://zenodo.org/record/7690157", "add": "", "eval": "", } DATA_URLS = { "dev": { "train": "data/dev_train.tar.gz", "test": "data/dev_test.tar.gz", "metadata": "data/dev_metadata.csv", }, "add": { "train": "data/add_train.tar.gz", "metadata": "data/add_metadata.csv", }, "eval": { "test": "data/eval_test.tar.gz", "metadata": None, }, } EMBEDDING_URLS = { "dev": { "embeddings_ast-finetuned-audioset-10-10-0.4593": { "train": "data/MIT_ast-finetuned-audioset-10-10-0.4593-embeddings_dev_train.npz", "test": "data/MIT_ast-finetuned-audioset-10-10-0.4593-embeddings_dev_test.npz", "size": (1, 768), "dtype": "float32", }, }, "add": { "embeddings_ast-finetuned-audioset-10-10-0.4593": { "train": "data/MIT_ast-finetuned-audioset-10-10-0.4593-embeddings_add_train.npz", "size": (1, 768), "dtype": "float32", }, }, "eval": { "embeddings_ast-finetuned-audioset-10-10-0.4593": { "test": "data/MIT_ast-finetuned-audioset-10-10-0.4593-embeddings_eval_test.npz", "size": (1, 768), "dtype": "float32", }, }, } STATS = { "name": "Enriched Dataset of 'DCASE 2023 Challenge Task 2'", "configs": { 'dev': { 'date': "Mar 1, 2023", 'version': "3.0.0", 'homepage': "https://zenodo.org/record/7882613", "splits": ["train", "test"], }, 'add': { 'date': "Apr 15, 2023", 'version': "1.0.0", 'homepage': "https://zenodo.org/record/7830345", "splits": ["train"], }, 'eval': { 'date': "May 1, 2023", 'version': "1.0.0", 'homepage': "https://zenodo.org/record/7860847", "splits": ["test"], }, } } DATASET = { 'dev': 'DCASE 2023 Challenge Task 2 Development Dataset', 'add': 'DCASE 2023 Challenge Task 2 Additional Train Dataset', 'eval': 'DCASE 2023 Challenge Task 2 Evaluation Dataset', } SPOTLIGHT_LAYOUTS = { "standard": {"orientation":"vertical","children":[{"kind":"split","weight":51.96463654223969,"orientation":"horizontal","children":[{"kind":"tab","weight":30,"children":[{"kind":"widget","name":"Table","type":"table","config":{"tableView":"full","visibleColumns":["class","class_name","config","d1p","d1v","d2p","d2v","d3p","d3v","file_path","label","section","split"],"sorting":None,"orderByRelevance":False}}]},{"kind":"tab","weight":33.970588235294116,"children":[{"kind":"widget","name":"Similarity Map (2)","type":"similaritymap","config":{"placeBy":None,"reductionMethod":None,"colorBy":"label","sizeBy":None,"filter":False,"umapNNeighbors":20,"umapMetric":None,"umapMinDist":0.15,"pcaNormalization":None,"umapMenuLocalGlobalBalance":None,"umapMenuIsAdvanced":False}}]},{"kind":"tab","weight":36.029411764705884,"children":[{"kind":"widget","name":"Similarity Map","type":"similaritymap","config":{"placeBy":None,"reductionMethod":None,"colorBy":"class","sizeBy":None,"filter":False,"umapNNeighbors":20,"umapMetric":None,"umapMinDist":0.15,"pcaNormalization":None,"umapMenuLocalGlobalBalance":None,"umapMenuIsAdvanced":False}},{"kind":"widget","name":"Scatter Plot","type":"scatterplot","config":{"xAxisColumn":None,"yAxisColumn":None,"colorBy":None,"sizeBy":None,"filter":False}},{"kind":"widget","name":"Histogram","type":"histogram","config":{"columnKey":None,"stackByColumnKey":None,"filter":False}}]}]},{"kind":"tab","weight":48.03536345776031,"children":[{"kind":"widget","name":"Inspector","type":"inspector","config":{"views":[{"view":"AudioView","columns":["path"],"name":"view","key":"43a5beff-9423-41c9-a5ba-285a7ece7a02"},{"view":"SpectrogramView","columns":["path"],"name":"view","key":"5f035027-dd02-4587-ba77-defdf823c124"}],"visibleColumns":4}}]}]}, "simple": {"orientation":"vertical","children":[{"kind":"split","weight":60.575296108291035,"orientation":"horizontal","children":[{"kind":"tab","weight":31.52260461369049,"children":[{"kind":"widget","name":"Table","type":"table","config":{"tableView":"filtered","visibleColumns":["class","d1p","d1v","d2p","d2v","d3p","d3v","dev_train_lof_anomaly","dev_train_lof_anomaly_score","domain","label","section"],"sorting":None,"orderByRelevance":False}}]},{"kind":"tab","weight":33.869200490640154,"children":[{"kind":"widget","name":"Similarity map with AST-lof anomaly score","type":"similaritymap","config":{"placeBy":None,"reductionMethod":None,"colorBy":"dev_train_lof_anomaly_score","sizeBy":"label","filter":False,"umapNNeighbors":20,"umapMetric":None,"umapMinDist":0.15,"pcaNormalization":None,"umapMenuLocalGlobalBalance":None,"umapMenuIsAdvanced":False}}]},{"kind":"tab","weight":34.60819489566936,"children":[{"kind":"widget","name":"Similarity map with classes","type":"similaritymap","config":{"placeBy":None,"reductionMethod":None,"colorBy":"class","sizeBy":None,"filter":False,"umapNNeighbors":20,"umapMetric":None,"umapMinDist":0.15,"pcaNormalization":None,"umapMenuLocalGlobalBalance":None,"umapMenuIsAdvanced":False}},{"kind":"widget","name":"Scatter Plot","type":"scatterplot","config":{"xAxisColumn":None,"yAxisColumn":None,"colorBy":None,"sizeBy":None,"filter":False}},{"kind":"widget","name":"Histogram","type":"histogram","config":{"columnKey":"domain","stackByColumnKey":"prediction_correct_dcase2023_task2_baseline_ae","filter":False}}]}]},{"kind":"tab","weight":39.424703891708965,"children":[{"kind":"widget","name":"Inspector","type":"inspector","config":{"views":[{"view":"AudioView","columns":["path"],"name":"view","key":"dea9a175-9582-412e-9f49-be729e8838fb"},{"view":"SpectrogramView","columns":["path"],"name":"view","key":"676bd937-226b-4632-ae2d-ec8bc37bcc5d"},{"view":"ScalarView","columns":["label"],"name":"view","key":"dbfcc0b1-9e96-4d31-8856-f0bd7f0b8144"},{"view":"ScalarView","columns":["domain"],"name":"view","key":"3e79654f-e017-402c-b136-6a13c4409ae4"}],"visibleColumns":4}}]}]}, "extended": {"orientation":"vertical","children":[{"kind":"split","weight":54.145516074450086,"orientation":"horizontal","children":[{"kind":"tab","weight":31.52260461369049,"children":[{"kind":"widget","name":"Table","type":"table","config":{"tableView":"filtered","visibleColumns":["class","d1p","d1v","d2p","d2v","d3p","d3v","dev_train_lof_anomaly","dev_train_lof_anomaly_score","domain","label","section"],"sorting":None,"orderByRelevance":False}}]},{"kind":"tab","weight":33.869200490640154,"children":[{"kind":"widget","name":"Similarity map with AST-lof anomaly score","type":"similaritymap","config":{"placeBy":None,"reductionMethod":None,"colorBy":"dev_train_lof_anomaly_score","sizeBy":"label","filter":False,"umapNNeighbors":20,"umapMetric":None,"umapMinDist":0.15,"pcaNormalization":None,"umapMenuLocalGlobalBalance":None,"umapMenuIsAdvanced":False}}]},{"kind":"tab","weight":34.60819489566936,"children":[{"kind":"widget","name":"Similarity map with classes","type":"similaritymap","config":{"placeBy":None,"reductionMethod":None,"colorBy":"class","sizeBy":None,"filter":False,"umapNNeighbors":20,"umapMetric":None,"umapMinDist":0.15,"pcaNormalization":None,"umapMenuLocalGlobalBalance":None,"umapMenuIsAdvanced":False}},{"kind":"widget","name":"Scatter Plot","type":"scatterplot","config":{"xAxisColumn":None,"yAxisColumn":None,"colorBy":None,"sizeBy":None,"filter":False}}]}]},{"kind":"split","weight":45.854483925549914,"orientation":"horizontal","children":[{"kind":"tab","weight":58.581483486735245,"children":[{"kind":"widget","name":"Inspector","type":"inspector","config":{"views":[{"view":"AudioView","columns":["path"],"name":"view","key":"dea9a175-9582-412e-9f49-be729e8838fb"},{"view":"SpectrogramView","columns":["path"],"name":"view","key":"676bd937-226b-4632-ae2d-ec8bc37bcc5d"},{"view":"ScalarView","columns":["label"],"name":"view","key":"dbfcc0b1-9e96-4d31-8856-f0bd7f0b8144"},{"view":"ScalarView","columns":["domain"],"name":"view","key":"3e79654f-e017-402c-b136-6a13c4409ae4"}],"visibleColumns":4}}]},{"kind":"tab","weight":41.418516513264755,"children":[{"kind":"widget","name":"Histogram","type":"histogram","config":{"columnKey":"class","stackByColumnKey":"dev_train_lof_anomaly"}}]}]}]}, } SPOTLIGHT_RENAME = { "audio": "original_audio", "path": "audio", } class DCASE2023Task2DatasetConfig(datasets.BuilderConfig): """BuilderConfig for DCASE2023Task2Dataset.""" def __init__(self, name, version, **kwargs): self.release_date = kwargs.pop("release_date", None) self.homepage = kwargs.pop("homepage", None) self.data_urls = kwargs.pop("data_urls", None) self.embeddings_urls = kwargs.pop("embeddings_urls", None) self.splits = kwargs.pop("splits", None) self.rename = kwargs.pop("rename", None) self.layout = kwargs.pop("layout", None) description = ( f"Dataset for the DCASE 2023 Challenge Task 2 'First-Shot Unsupervised Anomalous Sound Detection " f"for Machine Condition Monitoring'. released on {self.release_date}. Original data available under" f"{self.homepage}. " f"CONFIG: {name}." ) super(DCASE2023Task2DatasetConfig, self).__init__( name=name, version=datasets.Version(version), description=description, ) def to_spotlight(self, data: Union[pd.DataFrame, datasets.Dataset]) -> pd.DataFrame: def get_split(path: str) -> str: fn = os.path.basename(path) if "train" in fn: return "train" elif "test" in fn: return "test" else: raise NotImplementedError if type(data) == datasets.Dataset: # retrieve split df = data.to_pandas() df["split"] = data.split._name if "+" not in data.split._name else df["path"].map(get_split) df["config"] = data.config_name # get clearnames for classes class_names = data.features["class"].names df["class_name"] = df["class"].apply(lambda x: class_names[x]) elif type(data) == pd.DataFrame: df = data else: raise TypeError("type(data) not in Union[pd.DataFrame, datasets.Dataset]") df["file_path"] = df["path"] df.rename(columns=self.rename, inplace=True) return df.copy() def get_layout(self, config: str = "standard") -> str: layout_json = tempfile.mktemp(".json") with open(layout_json, "w") as outfile: json.dump(self.layout[config], outfile) return layout_json class DCASE2023Task2Dataset(datasets.GeneratorBasedBuilder): """Dataset for the DCASE 2023 Challenge Task 2 "First-Shot Unsupervised Anomalous Sound Detection for Machine Condition Monitoring".""" VERSION = datasets.Version("0.1.0") DEFAULT_CONFIG_NAME = "dev" BUILDER_CONFIGS = [ DCASE2023Task2DatasetConfig( name=key, version=stats["version"], dataset=DATASET[key], homepage=_HOMEPAGE[key], data_urls=DATA_URLS[key], embeddings_urls=EMBEDDING_URLS[key], release_date=stats["date"], splits=stats["splits"], layout=SPOTLIGHT_LAYOUTS, rename=SPOTLIGHT_RENAME, ) for key, stats in STATS["configs"].items() ] def _info(self): features = { "audio": datasets.Audio(sampling_rate=16_000), "path": datasets.Value("string"), "section": datasets.Value("int64"), "domain": datasets.ClassLabel(num_classes=2, names=["source", "target"]), "label": datasets.ClassLabel(num_classes=_NUM_TARGETS, names=_TARGET_NAMES), "class": datasets.ClassLabel(num_classes=_NUM_CLASSES, names=_CLASS_NAMES), "d1p": datasets.Value("string"), "d1v": datasets.Value("string"), "d2p": datasets.Value("string"), "d2v": datasets.Value("string"), "d3p": datasets.Value("string"), "d3v": datasets.Value("string"), "dev_train_lof_anomaly": datasets.Value("int64"), "dev_train_lof_anomaly_score": datasets.Value("float32"), "add_train_lof_anomaly": datasets.Value("int64"), "add_train_lof_anomaly_score": datasets.Value("float32"), } if self.config.embeddings_urls is not None: features.update({ emb_name: [datasets.Value(emb["dtype"])] for emb_name, emb in self.config.embeddings_urls.items() }) features = datasets.Features(features) return datasets.DatasetInfo( # This is the description that will appear on the datasets page. description=self.config.description, features=features, supervised_keys=datasets.info.SupervisedKeysData("label"), homepage=self.config.homepage, license=_LICENSE, citation=_CITATION, ) def _split_generators( self, dl_manager: datasets.DownloadManager ): """Returns SplitGenerators.""" dl_manager.download_config.ignore_url_params = True audio_path = {} local_extracted_archive = {} split_type = {"train": datasets.Split.TRAIN, "test": datasets.Split.TEST} embeddings = {split: dict() for split in split_type} for split in split_type: if split in self.config.splits: audio_path[split] = dl_manager.download(self.config.data_urls[split]) local_extracted_archive[split] = dl_manager.extract( audio_path[split]) if not dl_manager.is_streaming else None if self.config.embeddings_urls is not None: for emb_name, emb_data in self.config.embeddings_urls.items(): downloaded_embeddings = dl_manager.download(emb_data[split]) if dl_manager.is_streaming: response = requests.get(downloaded_embeddings) response.raise_for_status() downloaded_embeddings = io.BytesIO(response.content) npz_file = np.load(downloaded_embeddings, allow_pickle=True) embeddings[split][emb_name] = npz_file["arr_0"].item() return [ datasets.SplitGenerator( name=split_type[split], gen_kwargs={ "split": split, "local_extracted_archive": local_extracted_archive[split], "audio_files": dl_manager.iter_archive(audio_path[split]), "embeddings": embeddings[split], "metadata_file": dl_manager.download_and_extract(self.config.data_urls["metadata"]) if self.config.data_urls["metadata"] is not None else None, "scores_file": dl_manager.download_and_extract("data/scores.csv"), "is_streaming": dl_manager.is_streaming, }, ) for split in split_type if split in self.config.splits ] def _generate_examples( self, split: str, local_extracted_archive: Union[Dict, List], audio_files: Optional[Iterable], embeddings: Optional[Dict], metadata_file: Optional[str], scores_file: Optional[str], is_streaming: Optional[bool], ): """Yields examples.""" if metadata_file is not None: metadata = pd.read_csv(metadata_file) if scores_file is not None: scores = pd.read_csv(scores_file) data_fields = list(self._info().features.keys()) id_ = 0 for path, f in audio_files: lookup = Path(path).parent.name + "/" + Path(path).name if metadata_file is None or lookup in metadata["path"].values: path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path if is_streaming: audio = {"path": path, "bytes": f.read()} else: audio = {"path": path, "bytes": None} result = {field: None for field in data_fields} if metadata_file is not None: result.update(metadata[metadata["path"] == lookup].T.squeeze().to_dict()) if scores is not None: result.update(scores[scores["path"] == lookup].T.squeeze().to_dict()) for emb_key in embeddings.keys(): result[emb_key] = np.asarray(embeddings[emb_key][lookup]).squeeze().tolist() result["path"] = path yield id_, {**result, "audio": audio} id_ += 1 if __name__ == "__main__": ds = load_dataset("dcase23-task2-enriched.py", "dev", split="train", streaming=True)