agkphysics commited on
Commit
e1a0de8
1 Parent(s): c051cda

Add dataset loading script.

Browse files
Files changed (2) hide show
  1. AudioSet.py +157 -0
  2. README.md +22 -3
AudioSet.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Aaron Keesing
2
+ #
3
+ # Permission is hereby granted, free of charge, to any person obtaining
4
+ # a copy of this software and associated documentation files (the
5
+ # “Software”), to deal in the Software without restriction, including
6
+ # without limitation the rights to use, copy, modify, merge, publish,
7
+ # distribute, sublicense, and/or sell copies of the Software, and to
8
+ # permit persons to whom the Software is furnished to do so, subject to
9
+ # the following conditions:
10
+ #
11
+ # The above copyright notice and this permission notice shall be
12
+ # included in all copies or substantial portions of the Software.
13
+ #
14
+ # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND,
15
+ # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16
+ # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
17
+ # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
18
+ # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
19
+ # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
20
+ # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21
+
22
+ from itertools import chain
23
+ import json
24
+ import os
25
+ import tarfile
26
+
27
+ import pandas as pd
28
+ import datasets
29
+
30
+
31
+ _CITATION = """\
32
+ @inproceedings{45857,
33
+ title = {Audio Set: An ontology and human-labeled dataset for audio events},
34
+ author = {Jort F. Gemmeke and Daniel P. W. Ellis and Dylan Freedman and Aren Jansen and Wade Lawrence and R. Channing Moore and Manoj Plakal and Marvin Ritter},
35
+ year = {2017},
36
+ booktitle = {Proc. IEEE ICASSP 2017},
37
+ address = {New Orleans, LA}
38
+ }
39
+ """
40
+
41
+ _DESCRIPTION = """\
42
+ This repository contains the balanced training set and evaluation set of the AudioSet
43
+ data, described here: https://research.google.com/audioset/dataset/index.html. The
44
+ YouTube videos were downloaded in March 2023, and so not all of the original audios are
45
+ available.
46
+ """
47
+
48
+ _HOMEPAGE = "https://research.google.com/audioset/dataset/index.html"
49
+
50
+ _LICENSE = "cc-by-4.0"
51
+
52
+ _URL_PREFIX = "https://huggingface.co/datasets/agkphysics/AudioSet/resolve/main"
53
+
54
+
55
+ def _iter_tar(path):
56
+ """Iterate through the tar archive, but without skipping some files, which the HF
57
+ DL does.
58
+ """
59
+ with open(path, "rb") as fid:
60
+ stream = tarfile.open(fileobj=fid, mode="r|*")
61
+ for tarinfo in stream:
62
+ file_obj = stream.extractfile(tarinfo)
63
+ yield tarinfo.name, file_obj
64
+ stream.members = []
65
+ del stream
66
+
67
+
68
+ class AudioSetDataset(datasets.GeneratorBasedBuilder):
69
+ VERSION = datasets.Version("1.0.0")
70
+
71
+ def _info(self) -> datasets.DatasetInfo:
72
+ return datasets.DatasetInfo(
73
+ description=_DESCRIPTION,
74
+ citation=_CITATION,
75
+ homepage=_HOMEPAGE,
76
+ license=_LICENSE,
77
+ features=datasets.Features(
78
+ {
79
+ "video_id": datasets.Value("string"),
80
+ "audio": datasets.Audio(sampling_rate=None, mono=True, decode=True),
81
+ "labels": datasets.Sequence(datasets.Value("string")),
82
+ "human_labels": datasets.Sequence(datasets.Value("string")),
83
+ }
84
+ ),
85
+ )
86
+
87
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
88
+ if self.config.data_dir:
89
+ prefix = self.config.data_dir
90
+ else:
91
+ prefix = _URL_PREFIX
92
+
93
+ _LABEL_URLS = {
94
+ "bal_train": f"{prefix}/balanced_train_segments.csv",
95
+ "eval": f"{prefix}/eval_segments.csv",
96
+ "ontology": f"{prefix}/ontology.json",
97
+ }
98
+
99
+ _DATA_URLS = {
100
+ "bal_train": [f"{prefix}/bal_train0{i}.tar" for i in range(10)],
101
+ "eval": [f"{prefix}/eval0{i}.tar" for i in range(9)],
102
+ }
103
+
104
+ tar_files = dl_manager.download(_DATA_URLS)
105
+ label_files = dl_manager.download(_LABEL_URLS)
106
+
107
+ return [
108
+ datasets.SplitGenerator(
109
+ name=datasets.Split.TRAIN,
110
+ gen_kwargs={
111
+ "labels": label_files["bal_train"],
112
+ "ontology": label_files["ontology"],
113
+ "audio_files": chain.from_iterable(
114
+ _iter_tar(x) for x in tar_files["bal_train"]
115
+ ),
116
+ },
117
+ ),
118
+ datasets.SplitGenerator(
119
+ name=datasets.Split.TEST,
120
+ gen_kwargs={
121
+ "labels": label_files["eval"],
122
+ "ontology": label_files["ontology"],
123
+ "audio_files": chain.from_iterable(
124
+ _iter_tar(x) for x in tar_files["eval"]
125
+ ),
126
+ },
127
+ ),
128
+ ]
129
+
130
+ def _generate_examples(self, labels, ontology, audio_files):
131
+ labels_df = pd.read_csv(
132
+ labels,
133
+ skiprows=3,
134
+ header=None,
135
+ skipinitialspace=True,
136
+ names=["vid_id", "start", "end", "labels"],
137
+ )
138
+ with open(ontology) as fid:
139
+ ontology_data = json.load(fid)
140
+ id_to_name = {x["id"]: x["name"] for x in ontology_data}
141
+
142
+ examples = {}
143
+ for _, row in labels_df.iterrows():
144
+ label_ids = row["labels"].split(",")
145
+ human_labels = [id_to_name[x] for x in label_ids]
146
+ examples[row["vid_id"]] = {
147
+ "video_id": row["vid_id"],
148
+ "labels": label_ids,
149
+ "human_labels": human_labels,
150
+ }
151
+
152
+ for path, fid in audio_files:
153
+ vid_id = os.path.splitext(os.path.basename(path))[0]
154
+ if vid_id in examples:
155
+ audio = {"path": path, "bytes": fid.read()}
156
+ examples[vid_id]["audio"] = audio
157
+ yield vid_id, examples[vid_id]
README.md CHANGED
@@ -1,9 +1,28 @@
1
  ---
2
  license: cc-by-4.0
3
- tags:
4
- - audio
5
  task_categories:
6
  - audio-classification
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  ---
8
 
9
  # AudioSet data
@@ -23,7 +42,7 @@ Most audio is sampled at 48 kHz 24 bit, but about 10% is sampled at
23
 
24
  ## Citation
25
  ```bibtex
26
- @inproceedings{45857,
27
  title = {Audio Set: An ontology and human-labeled dataset for audio events},
28
  author = {Jort F. Gemmeke and Daniel P. W. Ellis and Dylan Freedman and Aren Jansen and Wade Lawrence and R. Channing Moore and Manoj Plakal and Marvin Ritter},
29
  year = {2017},
 
1
  ---
2
  license: cc-by-4.0
 
 
3
  task_categories:
4
  - audio-classification
5
+ tags:
6
+ - audio
7
+ dataset_info:
8
+ features:
9
+ - name: video_id
10
+ dtype: string
11
+ - name: audio
12
+ dtype: audio
13
+ - name: labels
14
+ sequence: string
15
+ - name: human_labels
16
+ sequence: string
17
+ splits:
18
+ - name: train
19
+ num_bytes: 26016210987
20
+ num_examples: 18685
21
+ - name: test
22
+ num_bytes: 23763682278
23
+ num_examples: 17142
24
+ download_size: 49805654900
25
+ dataset_size: 49779893265
26
  ---
27
 
28
  # AudioSet data
 
42
 
43
  ## Citation
44
  ```bibtex
45
+ @inproceedings{jort_audioset_2017,
46
  title = {Audio Set: An ontology and human-labeled dataset for audio events},
47
  author = {Jort F. Gemmeke and Daniel P. W. Ellis and Dylan Freedman and Aren Jansen and Wade Lawrence and R. Channing Moore and Manoj Plakal and Marvin Ritter},
48
  year = {2017},