yangwang825
commited on
Update magnatagatune.py
Browse files- magnatagatune.py +57 -34
magnatagatune.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
# coding=utf-8
|
2 |
|
3 |
-
"""
|
4 |
|
5 |
|
6 |
import os
|
@@ -18,13 +18,14 @@ import urllib.request
|
|
18 |
from pathlib import Path
|
19 |
from copy import deepcopy
|
20 |
from tqdm.auto import tqdm
|
|
|
21 |
from rich.logging import RichHandler
|
22 |
|
23 |
logger = logging.getLogger(__name__)
|
24 |
logger.addHandler(RichHandler())
|
25 |
logger.setLevel(logging.INFO)
|
26 |
|
27 |
-
SAMPLE_RATE =
|
28 |
|
29 |
# Cache location
|
30 |
VERSION = "0.0.1"
|
@@ -36,38 +37,41 @@ DEFAULT_HF_DATASETS_CACHE = os.path.join(HF_CACHE_HOME, "datasets")
|
|
36 |
HF_DATASETS_CACHE = Path(os.getenv("HF_DATASETS_CACHE", DEFAULT_HF_DATASETS_CACHE))
|
37 |
|
38 |
CLASSES = [
|
39 |
-
|
|
|
|
|
|
|
|
|
40 |
]
|
41 |
-
|
42 |
-
INDEX2CLASS = {idx:cls for idx, cls in enumerate(CLASSES)}
|
43 |
|
44 |
|
45 |
-
class
|
46 |
-
"""BuilderConfig for
|
47 |
|
48 |
def __init__(self, features, **kwargs):
|
49 |
-
super(
|
50 |
self.features = features
|
51 |
|
52 |
|
53 |
-
class
|
54 |
|
55 |
BUILDER_CONFIGS = [
|
56 |
-
|
57 |
features=datasets.Features(
|
58 |
{
|
59 |
"file": datasets.Value("string"),
|
60 |
"audio": datasets.Audio(sampling_rate=SAMPLE_RATE),
|
61 |
-
"
|
62 |
-
"label": datasets.features.ClassLabel(names=CLASSES),
|
63 |
}
|
64 |
),
|
65 |
-
name="
|
66 |
description="",
|
67 |
),
|
68 |
]
|
69 |
|
70 |
-
DEFAULT_CONFIG_NAME = "
|
71 |
|
72 |
def _info(self):
|
73 |
return datasets.DatasetInfo(
|
@@ -81,10 +85,10 @@ class MedleySolosDB(datasets.GeneratorBasedBuilder):
|
|
81 |
|
82 |
def _split_generators(self, dl_manager):
|
83 |
"""Returns SplitGenerators."""
|
84 |
-
zip_file_url = "https://
|
85 |
_filename = zip_file_url.split('/')[-1]
|
86 |
_save_path = os.path.join(
|
87 |
-
HF_DATASETS_CACHE, '
|
88 |
)
|
89 |
download_file(zip_file_url, _save_path)
|
90 |
logger.info(f"`{_filename}` is downloaded to {_save_path}")
|
@@ -104,46 +108,65 @@ class MedleySolosDB(datasets.GeneratorBasedBuilder):
|
|
104 |
]
|
105 |
|
106 |
def _generate_examples(self, archive_path, split=None):
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
|
112 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
_, _walker = fast_scandir(archive_path, extensions, recursive=True)
|
114 |
|
|
|
|
|
|
|
115 |
if split == 'train':
|
116 |
fileid2class = {}
|
117 |
for idx, row in train_df.iterrows():
|
118 |
-
fileid = row['
|
119 |
-
class_ =
|
120 |
fileid2class[fileid] = class_
|
121 |
elif split == 'validation':
|
122 |
fileid2class = {}
|
123 |
for idx, row in validation_df.iterrows():
|
124 |
-
fileid = row['
|
125 |
-
class_ =
|
126 |
fileid2class[fileid] = class_
|
127 |
elif split == 'test':
|
128 |
fileid2class = {}
|
129 |
for idx, row in test_df.iterrows():
|
130 |
-
fileid = row['
|
131 |
-
class_ =
|
132 |
fileid2class[fileid] = class_
|
133 |
|
134 |
-
_walker = [fileid for fileid in _walker if not Path(fileid).name.startswith('._Medley')]
|
135 |
for guid, audio_path in enumerate(_walker):
|
136 |
-
|
137 |
-
fileid = fileid.split('_')[-1]
|
138 |
-
if fileid not in fileid2class:
|
139 |
continue
|
140 |
-
|
141 |
yield guid, {
|
142 |
"id": str(guid),
|
143 |
"file": audio_path,
|
144 |
"audio": audio_path,
|
145 |
-
"
|
146 |
-
"label":
|
147 |
}
|
148 |
|
149 |
|
|
|
1 |
# coding=utf-8
|
2 |
|
3 |
+
"""MagnaTagATune dataset."""
|
4 |
|
5 |
|
6 |
import os
|
|
|
18 |
from pathlib import Path
|
19 |
from copy import deepcopy
|
20 |
from tqdm.auto import tqdm
|
21 |
+
from rich import print
|
22 |
from rich.logging import RichHandler
|
23 |
|
24 |
logger = logging.getLogger(__name__)
|
25 |
logger.addHandler(RichHandler())
|
26 |
logger.setLevel(logging.INFO)
|
27 |
|
28 |
+
SAMPLE_RATE = 16_000
|
29 |
|
30 |
# Cache location
|
31 |
VERSION = "0.0.1"
|
|
|
37 |
HF_DATASETS_CACHE = Path(os.getenv("HF_DATASETS_CACHE", DEFAULT_HF_DATASETS_CACHE))
|
38 |
|
39 |
CLASSES = [
|
40 |
+
"guitar", "classical", "slow", "techno", "strings", "drums", "electronic", "rock", "fast", "piano",
|
41 |
+
"ambient", "beat", "violin", "vocal", "synth", "female", "indian", "opera", "male", "singing", "vocals",
|
42 |
+
"no vocals", "harpsichord", "loud", "quiet", "flute", "woman", "male vocal", "no vocal", "pop", "soft",
|
43 |
+
"sitar", "solo", "man", "classic", "choir", "voice", "new age", "dance", "male voice", "female vocal",
|
44 |
+
"beats", "harp", "cello", "no voice", "weird", "country", "metal", "female voice", "choral"
|
45 |
]
|
46 |
+
CLASSES = sorted(CLASSES)
|
|
|
47 |
|
48 |
|
49 |
+
class MagnaTagATuneConfig(datasets.BuilderConfig):
|
50 |
+
"""BuilderConfig for MagnaTagATune."""
|
51 |
|
52 |
def __init__(self, features, **kwargs):
|
53 |
+
super(MagnaTagATuneConfig, self).__init__(version=datasets.Version(VERSION, ""), **kwargs)
|
54 |
self.features = features
|
55 |
|
56 |
|
57 |
+
class MagnaTagATune(datasets.GeneratorBasedBuilder):
|
58 |
|
59 |
BUILDER_CONFIGS = [
|
60 |
+
MagnaTagATuneConfig(
|
61 |
features=datasets.Features(
|
62 |
{
|
63 |
"file": datasets.Value("string"),
|
64 |
"audio": datasets.Audio(sampling_rate=SAMPLE_RATE),
|
65 |
+
"sound": datasets.Sequence(datasets.Value("string")),
|
66 |
+
"label": datasets.Sequence(datasets.features.ClassLabel(names=CLASSES)),
|
67 |
}
|
68 |
),
|
69 |
+
name="top50",
|
70 |
description="",
|
71 |
),
|
72 |
]
|
73 |
|
74 |
+
DEFAULT_CONFIG_NAME = "top50"
|
75 |
|
76 |
def _info(self):
|
77 |
return datasets.DatasetInfo(
|
|
|
85 |
|
86 |
def _split_generators(self, dl_manager):
|
87 |
"""Returns SplitGenerators."""
|
88 |
+
zip_file_url = "https://huggingface.co/datasets/confit/magnatagatune/resolve/main/mp3.zip"
|
89 |
_filename = zip_file_url.split('/')[-1]
|
90 |
_save_path = os.path.join(
|
91 |
+
HF_DATASETS_CACHE, 'confit___magnatagatune/top50', VERSION, _filename
|
92 |
)
|
93 |
download_file(zip_file_url, _save_path)
|
94 |
logger.info(f"`{_filename}` is downloaded to {_save_path}")
|
|
|
108 |
]
|
109 |
|
110 |
def _generate_examples(self, archive_path, split=None):
|
111 |
+
df = pd.read_csv(
|
112 |
+
'https://huggingface.co/datasets/confit/magnatagatune/resolve/main/annotations_final.csv', sep="\t"
|
113 |
+
)
|
114 |
+
# Filter only the songs that have at least one of the top 50 tags
|
115 |
+
df = df[df[CLASSES].sum(axis=1) > 0].reset_index(drop=True)
|
116 |
+
df = df[CLASSES + ["mp3_path", "clip_id"]]
|
117 |
+
train_ids_df = pd.read_csv(
|
118 |
+
"https://huggingface.co/datasets/confit/magnatagatune/resolve/main/train_gt_mtt.tsv", sep="\t", header=None
|
119 |
+
)
|
120 |
+
train_ids = train_ids_df[0].tolist()
|
121 |
+
train_df = df[df["clip_id"].isin(train_ids)].reset_index(drop=True)
|
122 |
+
|
123 |
+
validation_ids_df = pd.read_csv(
|
124 |
+
"https://huggingface.co/datasets/confit/magnatagatune/resolve/main/val_gt_mtt.tsv", sep="\t", header=None
|
125 |
+
)
|
126 |
+
validation_ids = validation_ids_df[0].tolist()
|
127 |
+
validation_df = df[df["clip_id"].isin(validation_ids)].reset_index(drop=True)
|
128 |
|
129 |
+
test_ids_df = pd.read_csv(
|
130 |
+
"https://huggingface.co/datasets/confit/magnatagatune/resolve/main/test_gt_mtt.tsv", sep="\t", header=None
|
131 |
+
)
|
132 |
+
test_ids = test_ids_df[0].tolist()
|
133 |
+
test_df = df[df["clip_id"].isin(test_ids)].reset_index(drop=True)
|
134 |
+
|
135 |
+
extensions = ['.mp3']
|
136 |
_, _walker = fast_scandir(archive_path, extensions, recursive=True)
|
137 |
|
138 |
+
# Extract the list of column names where the value is 1 for each row
|
139 |
+
result = df.apply(lambda row: [col for col in df.columns if row[col] == 1], axis=1).tolist()
|
140 |
+
|
141 |
if split == 'train':
|
142 |
fileid2class = {}
|
143 |
for idx, row in train_df.iterrows():
|
144 |
+
fileid = os.path.join(archive_path, str(row['mp3_path']))
|
145 |
+
class_ = result[idx]
|
146 |
fileid2class[fileid] = class_
|
147 |
elif split == 'validation':
|
148 |
fileid2class = {}
|
149 |
for idx, row in validation_df.iterrows():
|
150 |
+
fileid = os.path.join(archive_path, str(row['mp3_path']))
|
151 |
+
class_ = result[idx]
|
152 |
fileid2class[fileid] = class_
|
153 |
elif split == 'test':
|
154 |
fileid2class = {}
|
155 |
for idx, row in test_df.iterrows():
|
156 |
+
fileid = os.path.join(archive_path, str(row['mp3_path']))
|
157 |
+
class_ = result[idx]
|
158 |
fileid2class[fileid] = class_
|
159 |
|
|
|
160 |
for guid, audio_path in enumerate(_walker):
|
161 |
+
if audio_path not in fileid2class:
|
|
|
|
|
162 |
continue
|
163 |
+
tags = fileid2class.get(audio_path)
|
164 |
yield guid, {
|
165 |
"id": str(guid),
|
166 |
"file": audio_path,
|
167 |
"audio": audio_path,
|
168 |
+
"sound": tags,
|
169 |
+
"label": tags,
|
170 |
}
|
171 |
|
172 |
|