Datasets:
mb23
/

Languages:
English
License:
This view is limited to 50 files because it contains too many changes.  See the raw diff here.
Files changed (50) hide show
  1. GraySpectrogram.py +59 -444
  2. README.md +5 -140
  3. data/test/{metadata_0001.jsonl → metadata.jsonl} +0 -0
  4. data/test/metadata_0000.jsonl +0 -0
  5. data/test/metadata_0002.jsonl +0 -0
  6. data/test/metadata_0003.jsonl +0 -0
  7. data/test/metadata_0004.jsonl +0 -0
  8. data/test/metadata_0005.jsonl +0 -0
  9. data/test/metadata_0006.jsonl +0 -0
  10. data/test/metadata_0007.jsonl +0 -0
  11. data/test/metadata_0008.jsonl +0 -0
  12. data/test/metadata_0009.jsonl +0 -0
  13. data/test/metadata_0010.jsonl +0 -0
  14. data/test/metadata_0011.jsonl +0 -0
  15. data/test/metadata_0012.jsonl +0 -0
  16. data/test/metadata_0013.jsonl +0 -0
  17. data/test/metadata_0014.jsonl +0 -0
  18. data/test/metadata_0015.jsonl +0 -0
  19. data/test/metadata_0016.jsonl +0 -0
  20. data/test/metadata_0017.jsonl +0 -0
  21. data/test/metadata_0018.jsonl +0 -0
  22. data/test/metadata_0019.jsonl +0 -0
  23. data/test/metadata_0020.jsonl +0 -0
  24. data/test/metadata_0021.jsonl +0 -0
  25. data/test/test_0002.zip +0 -3
  26. data/test/test_0003.zip +0 -3
  27. data/test/test_0004.zip +0 -3
  28. data/test/test_0005.zip +0 -3
  29. data/test/test_0006.zip +0 -3
  30. data/test/test_0007.zip +0 -3
  31. data/test/test_0008.zip +0 -3
  32. data/test/test_0009.zip +0 -3
  33. data/test/test_0010.zip +0 -3
  34. data/test/test_0011.zip +0 -3
  35. data/test/test_0012.zip +0 -3
  36. data/test/test_0013.zip +0 -3
  37. data/test/test_0014.zip +0 -3
  38. data/test/test_0015.zip +0 -3
  39. data/test/test_0016.zip +0 -3
  40. data/test/test_0017.zip +0 -3
  41. data/test/test_0018.zip +0 -3
  42. data/test/test_0019.zip +0 -3
  43. data/test/test_0020.zip +0 -3
  44. data/test/test_0021.zip +0 -3
  45. data/train/{train_0000.zip → data_0000.zip} +0 -0
  46. data/train/{train_0001.zip → data_0001.zip} +0 -0
  47. data/train/{metadata_0001.jsonl → metadata.jsonl} +0 -0
  48. data/train/metadata_0000.jsonl +0 -0
  49. data/train/metadata_0002.jsonl +0 -0
  50. data/train/metadata_0003.jsonl +0 -0
GraySpectrogram.py CHANGED
@@ -10,13 +10,13 @@ from pathlib import Path
10
 
11
 
12
  # ここに設定を記入
13
- _NAME = "mb23/GraySpectrogram"
14
  _EXTENSION = [".png"]
15
  _REVISION = "main"
16
 
17
  # _HOMEPAGE = "https://github.com/fastai/imagenette"
18
  # プログラムを置く場所が決まったら、ここにホームページURLつける
19
- _HOMEPAGE = "https://huggingface.co/datasets/mb23/GraySpectrogram"
20
 
21
  _DESCRIPTION = f"""\
22
  {_NAME} Datasets including spectrogram.png file from Google MusicCaps Datasets!
@@ -34,369 +34,20 @@ Using for Project Learning...
34
  # DatasetInfo : https://huggingface.co/docs/datasets/package_reference/main_classes
35
 
36
 
37
- def get_information():
38
- # データを整理?
39
- hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0)
40
 
41
- # メタデータであるjsonlファイルのURLを取得
42
- # ここの抽出方法変えられないかな?
43
- train_metadata_url = DataFilesDict.from_hf_repo(
44
- {datasets.Split.TRAIN: ["data/train/**"]},
45
- dataset_info=hfh_dataset_info,
46
- allowed_extensions=["jsonl", ".jsonl"],
47
- )
48
-
49
- test_metadata_url = DataFilesDict.from_hf_repo(
50
- {datasets.Split.TEST: ["data/test/**"]},
51
- dataset_info=hfh_dataset_info,
52
- allowed_extensions=["jsonl", ".jsonl"],
53
- )
54
-
55
-
56
- metadata_urls = dict()
57
- metadata_urls["train"] = train_metadata_url["train"]
58
- metadata_urls["test"] = test_metadata_url["test"]
59
-
60
- # 画像データは**.zipのURLをDict型として取得?
61
- # **.zipのURLをDict型として取得?
62
- train_data_url = DataFilesDict.from_hf_repo(
63
- {datasets.Split.TRAIN: ["data/train/**"]},
64
- dataset_info=hfh_dataset_info,
65
- allowed_extensions=["zip", ".zip"],
66
- )
67
-
68
- test_data_url = DataFilesDict.from_hf_repo(
69
- {datasets.Split.TEST: ["data/test/**"]},
70
- dataset_info=hfh_dataset_info,
71
- allowed_extensions=["zip", ".zip"]
72
- )
73
- data_urls = dict()
74
- data_urls["train"] = train_data_url["train"]
75
- data_urls["test"] = test_data_url["test"]
76
- return (metadata_urls, data_urls)
77
-
78
-
79
-
80
- class GraySpectrogramConfig(datasets.BuilderConfig):
81
- """BuilderConfig for Imagette."""
82
-
83
- def __init__(self, data_url, metadata_url, **kwargs):
84
- """BuilderConfig for Imagette.
85
- Args:
86
- data_url: `string`, url to download the zip file from.
87
- matadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
88
- **kwargs: keyword arguments forwarded to super.
89
- """
90
- super(GraySpectrogramConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
91
- self.data_url = data_url
92
- self.metadata_url = metadata_url
93
-
94
- class GraySpectrogram(datasets.GeneratorBasedBuilder):
95
 
96
  # データのサブセットはここで用意
97
- metadata_urls, data_urls = get_information()
98
- subset_name_list = [
99
- "data 0-200",
100
- "data 200-600",
101
- "data 600-1000",
102
- "data 1000-1300",
103
- "data 1300-1600",
104
- "data 1600-2000",
105
- ]
106
- for i in range(2000, 2800, 200):
107
- subset_name_list.append(f"data {i}-{i+200}")
108
- for i in range(3000, 5200, 200):
109
- subset_name_list.append(f"data {i}-{i+200}")
110
- subset_name_list.append("data 5200-5520")
111
-
112
- config_list = list()
113
- for i in range(22):
114
- config_list.append(
115
- GraySpectrogramConfig(
116
- name = subset_name_list[i],
117
- description = _DESCRIPTION,
118
- data_url = {
119
- "train" : data_urls["train"][i],
120
- "test" : data_urls["test"][i]
121
- },
122
- metadata_url = {
123
- "train" : metadata_urls["train"][i],
124
- "test" : metadata_urls["test"][i]
125
- }
126
- )
127
  )
128
-
129
- BUILDER_CONFIGS = config_list
130
-
131
- # BUILDER_CONFIGS = [
132
- # GraySpectrogramConfig(
133
- # name="data 0-200",
134
- # description=_DESCRIPTION,
135
- # data_url = {
136
- # "train" : data_urls["train"][0],
137
- # "test" : data_urls["test"][0]
138
- # },
139
- # metadata_url = {
140
- # "train" : metadata_urls["train"][0],
141
- # "test" : metadata_urls["test"][0]
142
- # }
143
-
144
- # ),
145
- # GraySpectrogramConfig(
146
- # name="data 200-600",
147
- # description=_DESCRIPTION,
148
- # data_url ={
149
- # "train" : data_urls["train"][1],
150
- # "test" : data_urls["test"][1]
151
- # },
152
- # metadata_url = {
153
- # "train": metadata_urls["train"][1],
154
- # "test" : metadata_urls["test"][1]
155
- # }
156
-
157
- # ),
158
- # GraySpectrogramConfig(
159
- # name="data 600-1000",
160
- # description=_DESCRIPTION,
161
- # data_url = {
162
- # "train" : data_urls["train"][2],
163
- # "test" : data_urls["test"][2]
164
- # },
165
- # metadata_url = {
166
- # "train" : metadata_urls["train"][2],
167
- # "test" : metadata_urls["test"][2]
168
- # }
169
- # ),
170
- # GraySpectrogramConfig(
171
- # name="data 1000-1300",
172
- # description=_DESCRIPTION,
173
- # data_url = {
174
- # "train" : data_urls["train"][3],
175
- # "test" : data_urls["test"][3]
176
- # },
177
- # metadata_url = {
178
- # "train" : metadata_urls["train"][3],
179
- # "test" : metadata_urls["test"][3]
180
- # }
181
-
182
- # ),
183
- # GraySpectrogramConfig(
184
- # name="data 1300-1600",
185
- # description=_DESCRIPTION,
186
- # data_url = {
187
- # "train" : data_urls["train"][4],
188
- # "test" : data_urls["test"][4]
189
- # },
190
- # metadata_url = {
191
- # "train" : metadata_urls["train"][4],
192
- # "test" : metadata_urls["test"][4]
193
- # }
194
- # ),
195
- # GraySpectrogramConfig(
196
- # name="data 1600-2000",
197
- # description=_DESCRIPTION,
198
- # data_url = {
199
- # "train" : data_urls["train"][5],
200
- # "test" : data_urls["test"][5]
201
- # },
202
- # metadata_url = {
203
- # "train" : metadata_urls["train"][5],
204
- # "test" : metadata_urls["test"][5]
205
- # }
206
- # ),
207
- # GraySpectrogramConfig(
208
- # name="data 2000-2200",
209
- # description=_DESCRIPTION,
210
- # data_url = {
211
- # "train" : data_urls["train"][6],
212
- # "test" : data_urls["test"][6]
213
- # },
214
- # metadata_url = {
215
- # "train" : metadata_urls["train"][6],
216
- # "test" : metadata_urls["test"][6]
217
- # }
218
- # ),
219
- # GraySpectrogramConfig(
220
- # name="data 2200-2600",
221
- # description=_DESCRIPTION,
222
- # data_url = {
223
- # "train" : data_urls["train"][7],
224
- # "test" : data_urls["test"][7]
225
- # },
226
- # metadata_url = {
227
- # "train" : metadata_urls["train"][7],
228
- # "test" : metadata_urls["test"][7]
229
- # }
230
- # ),
231
- # GraySpectrogramConfig(
232
- # name="data 2600-2800",
233
- # description=_DESCRIPTION,
234
- # data_url = {
235
- # "train" : data_urls["train"][8],
236
- # "test" : data_urls["test"][8]
237
- # },
238
- # metadata_url = {
239
- # "train" : metadata_urls["train"][8],
240
- # "test" : metadata_urls["test"][8]
241
- # }
242
- # ),
243
- # GraySpectrogramConfig(
244
- # name="data 3000-3200",
245
- # description=_DESCRIPTION,
246
- # data_url = {
247
- # "train" : data_urls["train"][9],
248
- # "test" : data_urls["test"][9]
249
- # },
250
- # metadata_url = {
251
- # "train" : metadata_urls["train"][9],
252
- # "test" : metadata_urls["test"][9]
253
- # }
254
- # ),
255
- # GraySpectrogramConfig(
256
- # name="data 3200-3400",
257
- # description=_DESCRIPTION,
258
- # data_url = {
259
- # "train" : data_urls["train"][10],
260
- # "test" : data_urls["test"][10]
261
- # },
262
- # metadata_url = {
263
- # "train" : metadata_urls["train"][11],
264
- # "test" : metadata_urls["test"][11]
265
- # }
266
- # ),
267
- # GraySpectrogramConfig(
268
- # name="data 3400-3600",
269
- # description=_DESCRIPTION,
270
- # data_url = {
271
- # "train" : data_urls["train"][12],
272
- # "test" : data_urls["test"][12]
273
- # },
274
- # metadata_url = {
275
- # "train" : metadata_urls["train"][12],
276
- # "test" : metadata_urls["test"][12]
277
- # }
278
- # ),
279
- # GraySpectrogramConfig(
280
- # name="data 3600-3800",
281
- # description=_DESCRIPTION,
282
- # data_url = {
283
- # "train" : data_urls["train"][13],
284
- # "test" : data_urls["test"][1]
285
- # },
286
- # metadata_url = {
287
- # "train" : metadata_urls["train"][14],
288
- # "test" : metadata_urls["test"][14]
289
- # }
290
- # ),
291
- # GraySpectrogramConfig(
292
- # name="data 3800-4000",
293
- # description=_DESCRIPTION,
294
- # data_url = {
295
- # "train" : data_urls["train"][15],
296
- # "test" : data_urls["test"][15]
297
- # },
298
- # metadata_url = {
299
- # "train" : metadata_urls["train"][15],
300
- # "test" : metadata_urls["test"][15]
301
- # }
302
- # ),
303
- # GraySpectrogramConfig(
304
- # name="data 4000-4200",
305
- # description=_DESCRIPTION,
306
- # data_url = {
307
- # "train" : data_urls["train"][16],
308
- # "test" : data_urls["test"][16]
309
- # },
310
- # metadata_url = {
311
- # "train" : metadata_urls["train"][16],
312
- # "test" : metadata_urls["test"][16]
313
- # }
314
- # ),
315
- # GraySpectrogramConfig(
316
- # name="data 4200-4400",
317
- # description=_DESCRIPTION,
318
- # data_url = {
319
- # "train" : data_urls["train"][17],
320
- # "test" : data_urls["test"][17]
321
- # },
322
- # metadata_url = {
323
- # "train" : metadata_urls["train"][17],
324
- # "test" : metadata_urls["test"][17]
325
- # }
326
- # ),
327
- # GraySpectrogramConfig(
328
- # name="data 4400-4600",
329
- # description=_DESCRIPTION,
330
- # data_url = {
331
- # "train" : data_urls["train"][18],
332
- # "test" : data_urls["test"][18]
333
- # },
334
- # metadata_url = {
335
- # "train" : metadata_urls["train"][18],
336
- # "test" : metadata_urls["test"][18]
337
- # }
338
- # ),
339
- # GraySpectrogramConfig(
340
- # name="data 4600-4800",
341
- # description=_DESCRIPTION,
342
- # data_url = {
343
- # "train" : data_urls["train"][19],
344
- # "test" : data_urls["test"][19]
345
- # },
346
- # metadata_url = {
347
- # "train" : metadata_urls["train"][19],
348
- # "test" : metadata_urls["test"][19]
349
- # }
350
- # ),
351
- # GraySpectrogramConfig(
352
- # name="data 4800-5000",
353
- # description=_DESCRIPTION,
354
- # data_url = {
355
- # "train" : data_urls["train"][20],
356
- # "test" : data_urls["test"][20]
357
- # },
358
- # metadata_url = {
359
- # "train" : metadata_urls["train"][20],
360
- # "test" : metadata_urls["test"][20]
361
- # }
362
- # ),
363
- # GraySpectrogramConfig(
364
- # name="data 5000-5200",
365
- # description=_DESCRIPTION,
366
- # data_url = {
367
- # "train" : data_urls["train"][21],
368
- # "test" : data_urls["test"][21]
369
- # },
370
- # metadata_url = {
371
- # "train" : metadata_urls["train"][4],
372
- # "test" : metadata_urls["test"][4]
373
- # }
374
- # ),
375
- # GraySpectrogramConfig(
376
- # name="data 5200-5520",
377
- # description=_DESCRIPTION,
378
- # data_url = {
379
- # "train" : data_urls["train"][4],
380
- # "test" : data_urls["test"][4]
381
- # },
382
- # metadata_url = {
383
- # "train" : metadata_urls["train"][4],
384
- # "test" : metadata_urls["test"][4]
385
- # }
386
- # ),
387
- # GraySpectrogramConfig(
388
- # name="data 2800-3000",
389
- # description=_DESCRIPTION,
390
- # data_url = {
391
- # "train" : data_urls["train"][4],
392
- # "test" : data_urls["test"][4]
393
- # },
394
- # metadata_url = {
395
- # "train" : metadata_urls["train"][4],
396
- # "test" : metadata_urls["test"][4]
397
- # }
398
- # )
399
- # ]
400
 
401
  def _info(self) -> DatasetInfo:
402
  return datasets.DatasetInfo(
@@ -431,95 +82,57 @@ class GraySpectrogram(datasets.GeneratorBasedBuilder):
431
  )
432
 
433
  def _split_generators(self, dl_manager: DownloadManager):
 
 
434
 
435
- train_metadata_path = dl_manager.download_and_extract(self.config.metadata_url["train"])
436
- test_metadata_path = dl_manager.download_and_extract(self.config.metadata_url["test"])
437
- train_data_path = dl_manager.download(self.config.data_url["train"])
438
- test_data_path = dl_manager.download(self.config.data_url["test"])
439
- return [
440
- datasets.SplitGenerator(
441
- name=datasets.Split.TRAIN,
442
- gen_kwargs={
443
- "images": dl_manager.iter_archive(train_data_path),
444
- "metadata_path": train_metadata_path,
445
- }
446
- ),
447
- datasets.SplitGenerator(
448
- name=datasets.Split.TEST,
449
- gen_kwargs={
450
- "images": dl_manager.iter_archive(test_data_path),
451
- "metadata_path": test_metadata_path,
452
- }
453
- ),
454
- ]
455
-
456
- # # huggingfaceのディレクトリからデータを取ってくる
457
- # hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0)
458
 
459
- # # メタデータであるjsonlファイルのURLを取得
460
- # # ここの抽出方法変えられないかな?
461
- # train_metadata_url = DataFilesDict.from_hf_repo(
462
- # {datasets.Split.TRAIN: ["data/train/**"]},
463
- # dataset_info=hfh_dataset_info,
464
- # allowed_extensions=["jsonl", ".jsonl"],
465
- # )
466
 
467
- # test_metadata_url = DataFilesDict.from_hf_repo(
468
- # {datasets.Split.TEST: ["data/test/**"]},
469
- # dataset_info=hfh_dataset_info,
470
- # allowed_extensions=["jsonl", ".jsonl"],
471
- # )
472
-
473
- # metadata_urls = dict()
474
- # metadata_urls["train"] = train_metadata_url["train"]
475
- # metadata_urls["test"] = test_metadata_url["test"]
476
 
477
- # # 画像データは**.zipのURLをDict型として取得?
478
- # # **.zipのURLをDict型として取得?
479
- # train_data_url = DataFilesDict.from_hf_repo(
480
- # {datasets.Split.TRAIN: ["data/train/**"]},
481
- # dataset_info=hfh_dataset_info,
482
- # allowed_extensions=["zip", ".zip"],
483
- # )
484
 
485
- # test_data_url = DataFilesDict.from_hf_repo(
486
- # {datasets.Split.TEST: ["data/test/**"]},
487
- # dataset_info=hfh_dataset_info,
488
- # allowed_extensions=["zip", ".zip"]
489
- # )
490
- # data_urls = dict()
491
- # data_urls["train"] = train_data_url["train"]
492
- # data_urls["test"] = test_data_url["test"]
493
 
494
- # gs = []
495
-
496
- # # for split, file_list in data_urls.items():
497
- # # metadata_list = metadata_urls[split]
498
- # # for i, file_ in enumerate(file_list):
499
- # # '''
500
- # # split : "train" or "test" or "val"
501
- # # files : zip files
502
- # # '''
503
- # # # print(file_)
504
- # # # print(metadata_list[0])
505
- # # # # リポジトリからダウンロードしてとりあえずキャッシュしたURLリストを取得
506
- # # metadata_path = dl_manager.download_and_extract(metadata_list[i])
507
- # # downloaded_files = dl_manager.download(file_)
508
- # # # # 元のコードではzipファイルの中身を"filepath"としてそのまま_generate_exampleに引き渡している?
509
- # # gs.append(
510
- # # datasets.SplitGenerator(
511
- # # name = split,
512
- # # gen_kwargs = {
513
- # # # "images" : iter(iter_archive[split]),
514
- # # "images" : dl_manager.iter_archive(downloaded_files),
515
- # # "metadata_path": metadata_path # メタデータパスを渡す
516
- # # }
517
- # # )
518
- # # )
519
-
520
- # return gs
521
 
522
-
523
  def _generate_examples(self, images, metadata_path):
524
  """Generate images and captions for splits."""
525
  # with open(metadata_path, encoding="utf-8") as f:
@@ -530,7 +143,7 @@ class GraySpectrogram(datasets.GeneratorBasedBuilder):
530
  num_list = list()
531
  label_list = list()
532
 
533
- with open(metadata_path, encoding="utf-8") as fin:
534
  for line in fin:
535
  data = json.loads(line)
536
  file_list.append(data["file_name"])
@@ -550,3 +163,5 @@ class GraySpectrogram(datasets.GeneratorBasedBuilder):
550
  "number" : num_list[idx],
551
  "label": label_list[idx]
552
  }
 
 
 
10
 
11
 
12
  # ここに設定を記入
13
+ _NAME = "mickylan2367/LoadingScriptPractice"
14
  _EXTENSION = [".png"]
15
  _REVISION = "main"
16
 
17
  # _HOMEPAGE = "https://github.com/fastai/imagenette"
18
  # プログラムを置く場所が決まったら、ここにホームページURLつける
19
+ _HOMEPAGE = "https://huggingface.co/datasets/mickylan2367/spectrogram_musicCaps"
20
 
21
  _DESCRIPTION = f"""\
22
  {_NAME} Datasets including spectrogram.png file from Google MusicCaps Datasets!
 
34
  # DatasetInfo : https://huggingface.co/docs/datasets/package_reference/main_classes
35
 
36
 
 
 
 
37
 
38
+ class LoadingScriptPractice(datasets.GeneratorBasedBuilder):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
  # データのサブセットはここで用意
41
+ BUILDER_CONFIGS = [
42
+ datasets.BuilderConfig(
43
+ name="train",
44
+ description=_DESCRIPTION,
45
+ # data_url = train_data_url["train"][0],
46
+ # metadata_urls = {
47
+ # "train" : train_metadata_paths["train"][0]
48
+ # }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  )
50
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
  def _info(self) -> DatasetInfo:
53
  return datasets.DatasetInfo(
 
82
  )
83
 
84
  def _split_generators(self, dl_manager: DownloadManager):
85
+ # huggingfaceのディレクトリからデータを取ってくる
86
+ hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0)
87
 
88
+ metadata_urls = DataFilesDict.from_hf_repo(
89
+ {datasets.Split.TRAIN: ["**"]},
90
+ dataset_info=hfh_dataset_info,
91
+ allowed_extensions=["jsonl", ".jsonl"],
92
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
+ # **.zipのURLをDict型として取得?
95
+ data_urls = DataFilesDict.from_hf_repo(
96
+ {datasets.Split.TRAIN: ["**"]},
97
+ dataset_info=hfh_dataset_info,
98
+ allowed_extensions=["zip", ".zip"],
99
+ )
 
100
 
101
+ data_paths = dict()
102
+ for path in data_urls["train"]:
103
+ dname = dirname(path)
104
+ folder = basename(Path(dname))
105
+ data_paths[folder] = path
 
 
 
 
106
 
107
+ metadata_paths = dict()
108
+ for path in metadata_urls["train"]:
109
+ dname = dirname(path)
110
+ folder = basename(Path(dname))
111
+ metadata_paths[folder] = path
 
 
112
 
 
 
 
 
 
 
 
 
113
 
114
+ gs = []
115
+ for split, files in data_paths.items():
116
+ '''
117
+ split : "train" or "test" or "val"
118
+ files : zip files
119
+ '''
120
+ # リポジトリからダウンロードしてとりあえずキャッシュしたURLリストを取得
121
+ metadata_path = dl_manager.download_and_extract(metadata_paths[split])
122
+ downloaded_files_path = dl_manager.download(files)
123
+
124
+ # 元のコードではzipファイルの中身を"filepath"としてそのまま_generate_exampleに引き渡している?
125
+ gs.append(
126
+ datasets.SplitGenerator(
127
+ name = split,
128
+ gen_kwargs={
129
+ "images" : dl_manager.iter_archive(downloaded_files_path),
130
+ "metadata_path": metadata_path
131
+ }
132
+ )
133
+ )
134
+ return gs
 
 
 
 
 
 
135
 
 
136
  def _generate_examples(self, images, metadata_path):
137
  """Generate images and captions for splits."""
138
  # with open(metadata_path, encoding="utf-8") as f:
 
143
  num_list = list()
144
  label_list = list()
145
 
146
+ with open(metadata_path) as fin:
147
  for line in fin:
148
  data = json.loads(line)
149
  file_list.append(data["file_name"])
 
163
  "number" : num_list[idx],
164
  "label": label_list[idx]
165
  }
166
+
167
+
README.md CHANGED
@@ -6,14 +6,12 @@ tags:
6
  - music
7
  - spectrogram
8
  size_categories:
9
- - 10K<n<100K
10
  ---
11
 
12
- # Google/MusicCapsをスペクトログラムにしたデータ。
13
 
14
- * <font color="red">The dataset viwer of this repository is truncated, so maybe you should see <a href="https://huggingface.co/datasets/mb23/GraySpectrotram_example">this one</a> instaed.</font>
15
-
16
- ## Dataset information
17
  <table>
18
  <thead>
19
  <td>画像</td>
@@ -31,7 +29,7 @@ size_categories:
31
  </tbody>
32
  </table>
33
 
34
- ## How this dataset was made
35
 
36
  * コード:https://colab.research.google.com/drive/13m792FEoXszj72viZuBtusYRUL1z6Cu2?usp=sharing
37
  * 参考にしたKaggle Notebook : https://www.kaggle.com/code/osanseviero/musiccaps-explorer
@@ -50,7 +48,7 @@ image = Image.fromarray(np.uint8(D), mode='L') # 'L'は1チャンネルのグ
50
  image.save('spectrogram_{}.png')
51
  ```
52
 
53
- ## Recover music(wave form) from sprctrogram
54
  ```python
55
  im = Image.open("pngファイル")
56
  db_ud = np.uint8(np.array(im))
@@ -63,136 +61,3 @@ print(amp.shape)
63
  y_inv = librosa.griffinlim(amp*200)
64
  display(IPython.display.Audio(y_inv, rate=sr))
65
  ```
66
-
67
- ## Example : How to use this
68
- * <font color="red">Subset <b>data 1300-1600</b> and <b>data 3400-3600</b> are not working now, so please get subset_name_list</n>
69
- those were removed first</font>.
70
- ### 1 : get information about this dataset:
71
- * copy this code~~
72
-
73
- ```python
74
- '''
75
- if you use GoogleColab, remove # to install packages below..
76
- '''
77
- #!pip install datasets
78
- #!pip install huggingface-hub
79
- #!huggingface-cli login
80
- import datasets
81
- from datasets import load_dataset
82
-
83
- # make subset_name_list
84
- subset_name_list = [
85
- 'data 0-200',
86
- 'data 200-600',
87
- 'data 600-1000',
88
- 'data 1000-1300',
89
- 'data 1600-2000',
90
- 'data 2000-2200',
91
- 'data 2200-2400',
92
- 'data 2400-2600',
93
- 'data 2600-2800',
94
- 'data 3000-3200',
95
- 'data 3200-3400',
96
- 'data 3600-3800',
97
- 'data 3800-4000',
98
- 'data 4000-4200',
99
- 'data 4200-4400',
100
- 'data 4400-4600',
101
- 'data 4600-4800',
102
- 'data 4800-5000',
103
- 'data 5000-5200',
104
- 'data 5200-5520'
105
- ]
106
-
107
- # load_all_datasets
108
- data = load_dataset("mb23/GraySpectrogram", subset_name_list[0])
109
- for subset in subset_name_list:
110
- # Confirm subset_list doesn't include "remove_list" datasets in the above cell.
111
- print(subset)
112
- new_ds = load_dataset("mb23/GraySpectrogram", subset)
113
- new_dataset_train = datasets.concatenate_datasets([data["train"], new_ds["train"]])
114
- new_dataset_test = datasets.concatenate_datasets([data["test"], new_ds["test"]])
115
-
116
- # take place of data[split]
117
- data["train"] = new_dataset_train
118
- data["test"] = new_dataset_test
119
-
120
- data
121
- ```
122
-
123
-
124
-
125
- ### 2 : load dataset and change to dataloader:
126
- * You can use the code below:
127
- * <font color="red">...but (;・∀・)I don't know whether this code works efficiently, because I haven't tried this code so far</color>
128
- ```python
129
- import datasets
130
- from datasets import load_dataset, DatasetDict
131
- from torchvision import transforms
132
- from torch.utils.data import DataLoader
133
- # BATCH_SIZE = ???
134
- # IMAGE_SIZE = ???
135
- # TRAIN_SIZE = ??? # the number of training data
136
- # TEST_SIZE = ??? # the number of test data
137
-
138
- def load_datasets():
139
-
140
- # Define data transforms
141
- data_transforms = [
142
- transforms.Resize((IMG_SIZE, IMG_SIZE)),
143
- transforms.ToTensor(), # Scales data into [0,1]
144
- transforms.Lambda(lambda t: (t * 2) - 1) # Scale between [-1, 1]
145
- ]
146
- data_transform = transforms.Compose(data_transforms)
147
-
148
- data = load_dataset("mb23/GraySpectrogram", subset_name_list[0])
149
- for subset in subset_name_list:
150
- # Confirm subset_list doesn't include "remove_list" datasets in the above cell.
151
- print(subset)
152
- new_ds = load_dataset("mb23/GraySpectrogram", subset)
153
- new_dataset_train = datasets.concatenate_datasets([data["train"], new_ds["train"]])
154
- new_dataset_test = datasets.concatenate_datasets([data["test"], new_ds["test"]])
155
-
156
- # take place of data[split]
157
- data["train"] = new_dataset_train
158
- data["test"] = new_dataset_test
159
-
160
- # memo:
161
- # 特徴量上手く抽出する方法が...わからん。これは力づく。
162
- # 本当はload_dataset()の時点で抽出したかったけど、無理そう
163
- # リポジトリ作り直してpush_to_hub()したほうがいいかもしれない。
164
-
165
- new_dataset = dict()
166
- new_dataset["train"] = Dataset.from_dict({
167
- "image" : data["train"]["image"],
168
- "caption" : data["train"]["caption"]
169
- })
170
-
171
- new_dataset["test"] = Dataset.from_dict({
172
- "image" : data["test"]["image"],
173
- "caption" : data["test"]["caption"]
174
- })
175
- data = datasets.DatasetDict(new_dataset)
176
- train = data["train"]
177
- test = data["test"]
178
-
179
- for idx in range(len(train["image"])):
180
- train["image"][idx] = data_transform(train["image"][idx])
181
- test["image"][idx] = data_transform(test["image"][idx])
182
-
183
- train = Dataset.from_dict(train)
184
- train = train.with_format("torch") # リスト型回避
185
- test = Dataset.from_dict(train)
186
- test = test.with_format("torch") # リスト型回避
187
-
188
- # or
189
- train_loader = DataLoader(train, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
190
- test_loader = DataLoader(test, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
191
- return train_loader, test_loader
192
-
193
- ```
194
- * then try this?
195
- ```
196
- train_loader, test_loader = load_datasets()
197
- ```
198
-
 
6
  - music
7
  - spectrogram
8
  size_categories:
9
+ - n<1K
10
  ---
11
 
12
+ ## Google/MusicCapsをスペクトログラムにしたデータ。
13
 
14
+ ### データの基本情報
 
 
15
  <table>
16
  <thead>
17
  <td>画像</td>
 
29
  </tbody>
30
  </table>
31
 
32
+ ### データ作った方法
33
 
34
  * コード:https://colab.research.google.com/drive/13m792FEoXszj72viZuBtusYRUL1z6Cu2?usp=sharing
35
  * 参考にしたKaggle Notebook : https://www.kaggle.com/code/osanseviero/musiccaps-explorer
 
48
  image.save('spectrogram_{}.png')
49
  ```
50
 
51
+ ### ♪復元方法
52
  ```python
53
  im = Image.open("pngファイル")
54
  db_ud = np.uint8(np.array(im))
 
61
  y_inv = librosa.griffinlim(amp*200)
62
  display(IPython.display.Audio(y_inv, rate=sr))
63
  ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/test/{metadata_0001.jsonl → metadata.jsonl} RENAMED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_0000.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_0002.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_0003.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_0004.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_0005.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_0006.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_0007.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_0008.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_0009.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_0010.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_0011.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_0012.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_0013.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_0014.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_0015.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_0016.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_0017.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_0018.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_0019.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_0020.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_0021.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/test/test_0002.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4b799a2c436036f92650f195914f8fccf9fc6fb81378438f0f94a97828cff70d
3
- size 95043607
 
 
 
 
data/test/test_0003.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:145c674bf0d7f296b96a9795e406d870e465eb3269f12c6ab7e98db3e9ea8015
3
- size 53560142
 
 
 
 
data/test/test_0004.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e04b965240af249eb5dbc3b4ead9822457b8ebca40b6160c43058912c5c645a8
3
- size 52906639
 
 
 
 
data/test/test_0005.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1539aa68c808ccbcb004260fdb3da655f3bbc8eee023a940030276bf22f50d5e
3
- size 74353414
 
 
 
 
data/test/test_0006.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1a0c13db058af5f47df44cb404af02628b54deb8eb9a471b54a12ee8cc61ea11
3
- size 37484003
 
 
 
 
data/test/test_0007.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d2b9ff313898a94ec1ae9e53db18a279b43c84670fb5bc02e4f032e03bb7c3b6
3
- size 34210880
 
 
 
 
data/test/test_0008.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e0aacc6ba9729330ab485dea19c805b415d0ceac849519074589f2daecae5ad8
3
- size 41155429
 
 
 
 
data/test/test_0009.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ecfea32c648323ee4e49e3f6d1c5278e10dddb77ee4d923c594911f49ca84369
3
- size 31324986
 
 
 
 
data/test/test_0010.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:52c69d274169047b8f366af3d353fcd9a452503e312483403e810ae0da488335
3
- size 31324986
 
 
 
 
data/test/test_0011.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:762da27c32b2b000703edd0a8b5edca8dd5dbec68ed06319f6254333a8e8dad0
3
- size 38007883
 
 
 
 
data/test/test_0012.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d10a195ad768908f365dd05b25fb74070c4f6e852afe5b230fa42d481201d0d7
3
- size 45046391
 
 
 
 
data/test/test_0013.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a03e9120ea7c0533b10037382cfc0e354a3eab73d5eb9328fdb0b92d78f5b287
3
- size 34731000
 
 
 
 
data/test/test_0014.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:10596a1fc01f601293ede2527833be3eecc1f3e5ae29ff42a1b6dbba807ac094
3
- size 34023077
 
 
 
 
data/test/test_0015.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5afe079007f0a95f5063f4969c1cb82e03e83d11d93d8d8710b60e5256240f4d
3
- size 38458202
 
 
 
 
data/test/test_0016.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:09dccc8404043a13c276a943a4e8caae4eb52ad1a895a628f075ba160df774a9
3
- size 39240930
 
 
 
 
data/test/test_0017.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ec7da1e2daca2dc5a3a29b1503a725cd32a9d8898dc6078c33ee0e2fd5acf92c
3
- size 44973307
 
 
 
 
data/test/test_0018.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6139ebb0fd5bec1cf1a35f1854cb9c52135c038706dec31c01982f6124845f8b
3
- size 44639983
 
 
 
 
data/test/test_0019.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e4d308fea5139b2b0b507368688d83949f3f10e657606725c2e2ee1fd581db97
3
- size 38335932
 
 
 
 
data/test/test_0020.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:483d0eb1b6da4930300d731a3ea75f6daf5256fddceb2d53edf29e377d71474e
3
- size 45066606
 
 
 
 
data/test/test_0021.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9056fecd66b7a61205739162d8b100c2eb81bb8e1713f0075db396a61e2d2f80
3
- size 65583226
 
 
 
 
data/train/{train_0000.zip → data_0000.zip} RENAMED
File without changes
data/train/{train_0001.zip → data_0001.zip} RENAMED
File without changes
data/train/{metadata_0001.jsonl → metadata.jsonl} RENAMED
The diff for this file is too large to render. See raw diff
 
data/train/metadata_0000.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/train/metadata_0002.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/train/metadata_0003.jsonl DELETED
The diff for this file is too large to render. See raw diff