mickylan2367 commited on
Commit
e48cc21
1 Parent(s): 156906c

change repository contructure

Browse files
LoadingScriptPractice.py CHANGED
@@ -12,9 +12,8 @@ _NAME = "mickylan2367/LoadingScriptPractice"
12
  _EXTENSION = [".png"]
13
  _REVISION = "main"
14
 
15
- # _HOMEPAGE = "https://github.com/fastai/imagenette"
16
  # プログラムを置く場所が決まったら、ここにホームページURLつける
17
- _HOMEPAGE = "https://huggingface.co/datasets/mickylan2367/spectrogram_musicCaps"
18
 
19
  _DESCRIPTION = f"""\
20
  {_NAME} Datasets including spectrogram.png file from Google MusicCaps Datasets!
@@ -27,50 +26,61 @@ Using for Project Learning...
27
 
28
  # 参考になりそうなURL集
29
  # https://huggingface.co/docs/datasets/v1.1.1/_modules/datasets/utils/download_manager.html
 
30
  # https://huggingface.co/datasets/animelover/danbooru2022/blob/main/danbooru2022.py
31
  # https://huggingface.co/datasets/food101/blob/main/food101.py
32
  # https://huggingface.co/docs/datasets/about_dataset_load
 
 
 
33
 
34
-
35
- class LoadingScriptPracticeConfig(datasets.BuilderConfig):
36
- """Builder Config for spectrogram_MusicCaps"""
37
-
38
- def __init__(self, **kwargs):
39
- """BuilderConfig
40
- Args:
41
- data_url: `string`, url to download the zip file from.
42
- metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
43
- **kwargs: keyword arguments forwarded to super.
44
- """
45
- super(LoadingScriptPracticeConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
46
- # self.data_url = data_url
47
- # self.metadata_urls = metadata_urls
48
 
49
  class LoadingScriptPractice(datasets.GeneratorBasedBuilder):
50
 
51
  # データのサブセットはここで用意
52
  BUILDER_CONFIGS = [
53
- LoadingScriptPracticeConfig(
54
  name="MusicCaps data 0_3",
55
  description="this Datasets is personal practice for using loadingScript. Data is from Google/MusicCaps",
56
- )
 
 
 
 
 
 
57
  ]
58
 
59
- def _info(self):
60
  return datasets.DatasetInfo(
61
- description=_DESCRIPTION,
62
  features=datasets.Features(
63
  {
64
  "image": datasets.Image(),
65
  "caption": datasets.Value("string"),
66
  "data_idx": datasets.Value("int32"),
67
  "number" : datasets.Value("int32"),
68
- "label" : datasets.Value("string")
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  }
70
  ),
71
- supervised_keys=("image", "caption", "data_idx", "number", "label"),
72
  homepage=_HOMEPAGE,
73
- # citation=_CITATION,
74
  # license=_LICENSE,
75
  # task_templates=[ImageClassification(image_column="image", label_column="label")],
76
  )
@@ -99,8 +109,8 @@ class LoadingScriptPractice(datasets.GeneratorBasedBuilder):
99
  files : zip files
100
  '''
101
  # リポジトリからダウンロードしてとりあえずキャッシュしたURLリストを取得
102
- split_metadata_path = split_metadata_paths[split][0]
103
- downloaded_files_path = dl_manager.download_and_extract(files)
104
 
105
  # 元のコードではzipファイルの中身を"filepath"としてそのまま_generate_exampleに引き渡している?
106
  gs.append(
@@ -111,10 +121,10 @@ class LoadingScriptPractice(datasets.GeneratorBasedBuilder):
111
  "metadata_path": split_metadata_path
112
  }
113
  )
114
- )
115
  return gs
116
 
117
- def _generate_examples(self, images, metadata_path, dl_manager:DownloadManager):
118
  """Generate images and captions for splits."""
119
  # with open(metadata_path, encoding="utf-8") as f:
120
  # files_to_keep = set(f.read().split("\n"))
@@ -124,7 +134,7 @@ class LoadingScriptPractice(datasets.GeneratorBasedBuilder):
124
  num_list = list()
125
  label_list = list()
126
 
127
- with open(dl_manager.download_and_extract(metadata_path)) as fin:
128
  for line in fin:
129
  data = json.loads(line)
130
  file_list.append(data["file_name"])
 
12
  _EXTENSION = [".png"]
13
  _REVISION = "main"
14
 
 
15
  # プログラムを置く場所が決まったら、ここにホームページURLつける
16
+ _HOMEPAGE = "https://huggingface.co/datasets/mickylan2367/LoadingScriptPractice"
17
 
18
  _DESCRIPTION = f"""\
19
  {_NAME} Datasets including spectrogram.png file from Google MusicCaps Datasets!
 
26
 
27
  # 参考になりそうなURL集
28
  # https://huggingface.co/docs/datasets/v1.1.1/_modules/datasets/utils/download_manager.html
29
+ # https://huggingface.co/docs/datasets/package_reference/builder_classes
30
  # https://huggingface.co/datasets/animelover/danbooru2022/blob/main/danbooru2022.py
31
  # https://huggingface.co/datasets/food101/blob/main/food101.py
32
  # https://huggingface.co/docs/datasets/about_dataset_load
33
+ # https://huggingface.co/datasets/frgfm/imagenette/blob/main/imagenette.py
34
+ # https://huggingface.co/docs/datasets/v1.2.1/add_dataset.html
35
+ # DatasetInfo : https://huggingface.co/docs/datasets/package_reference/main_classes
36
 
37
+ # 使用したデータセット(クラスラベル)
38
+ # https://huggingface.co/datasets/marsyas/gtzan
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
  class LoadingScriptPractice(datasets.GeneratorBasedBuilder):
41
 
42
  # データのサブセットはここで用意
43
  BUILDER_CONFIGS = [
44
+ datasets.BuilderConfig(
45
  name="MusicCaps data 0_3",
46
  description="this Datasets is personal practice for using loadingScript. Data is from Google/MusicCaps",
47
+ ),
48
+
49
+ # splits (dict, optional) — The mapping between split name and metadata.
50
+ # LoadingScriptPracticeConfig(
51
+ # name="MusicCaps data ",
52
+ # description="this Datasets is personal practice for using loadingScript. Data is from Google/MusicCaps",
53
+ # )
54
  ]
55
 
56
+ def _info(self) -> DatasetInfo:
57
  return datasets.DatasetInfo(
58
+ description = self.config.description,
59
  features=datasets.Features(
60
  {
61
  "image": datasets.Image(),
62
  "caption": datasets.Value("string"),
63
  "data_idx": datasets.Value("int32"),
64
  "number" : datasets.Value("int32"),
65
+ "label" : datasets.ClassLabel(
66
+ names=[
67
+ "blues",
68
+ "classical",
69
+ "country",
70
+ "disco",
71
+ "hiphop",
72
+ "metal",
73
+ "pop",
74
+ "reggae",
75
+ "rock",
76
+ "jazz"
77
+ ]
78
+ )
79
  }
80
  ),
81
+ supervised_keys=("image", "caption"),
82
  homepage=_HOMEPAGE,
83
+ citation= "",
84
  # license=_LICENSE,
85
  # task_templates=[ImageClassification(image_column="image", label_column="label")],
86
  )
 
109
  files : zip files
110
  '''
111
  # リポジトリからダウンロードしてとりあえずキャッシュしたURLリストを取得
112
+ split_metadata_path = dl_manager.download_and_extract(split_metadata_paths[split][0])
113
+ downloaded_files_path = dl_manager.download(files[0])
114
 
115
  # 元のコードではzipファイルの中身を"filepath"としてそのまま_generate_exampleに引き渡している?
116
  gs.append(
 
121
  "metadata_path": split_metadata_path
122
  }
123
  )
124
+ )
125
  return gs
126
 
127
+ def _generate_examples(self, images, metadata_path):
128
  """Generate images and captions for splits."""
129
  # with open(metadata_path, encoding="utf-8") as f:
130
  # files_to_keep = set(f.read().split("\n"))
 
134
  num_list = list()
135
  label_list = list()
136
 
137
+ with open(metadata_path) as fin:
138
  for line in fin:
139
  data = json.loads(line)
140
  file_list.append(data["file_name"])
data/test/data-0000.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51f3fc0c7e3b8d072c84e40b8430d307372915e56f36e1d4c78fd8bd14d2dae2
3
+ size 440052
data/test/metadata.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7249fa1e7e947587c4aff6ba30754d65b6e181f3b8d1aac6a11337322fb383b6
3
+ size 2592
data/{data-0000.zip → train/data-0000.zip} RENAMED
File without changes
data/{metadata.jsonl → train/metadata.jsonl} RENAMED
File without changes
try.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import datasets
3
+ from huggingface_hub import HfApi
4
+ from datasets import DownloadManager, DatasetInfo
5
+ from datasets.data_files import DataFilesDict
6
+ import os
7
+ import json
8
+
9
+
10
+ # ここに設定を記入
11
+ _NAME = "mickylan2367/LoadingScriptPractice"
12
+ _EXTENSION = [".png"]
13
+ _REVISION = "main"
14
+
15
+ # _HOMEPAGE = "https://github.com/fastai/imagenette"
16
+ # プログラムを置く場所が決まったら、ここにホームページURLつける
17
+ _HOMEPAGE = "https://huggingface.co/datasets/mickylan2367/spectrogram_musicCaps"
18
+
19
+ _DESCRIPTION = f"""\
20
+ {_NAME} Datasets including spectrogram.png file from Google MusicCaps Datasets!
21
+ Using for Project Learning...
22
+ """
23
+
24
+ # URLを取得
25
+
26
+ # class LoadingScriptPracticeConfig(datasets.BuilderConfig):
27
+ # """BuilderConfig for Imagette."""
28
+
29
+ # def __init__(self, data_url, metadata_urls, **kwargs):
30
+ # """BuilderConfig for Imagette.
31
+ # Args:
32
+ # data_url: `string`, url to download the zip file from.
33
+ # matadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
34
+ # **kwargs: keyword arguments forwarded to super.
35
+ # """
36
+ # super(LoadingScriptPracticeConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
37
+ # self.data_url = data_url
38
+ # self.metadata_urls = metadata_urls
39
+
40
+
41
+
42
+ class LoadingScriptPractice(datasets.GeneratorBasedBuilder):
43
+
44
+ # データのサブセットはここで用意
45
+ BUILDER_CONFIGS = [
46
+ datasets.BuilderConfig(
47
+ name="train",
48
+ description="this Datasets is personal practice for using loadingScript. Data is from Google/MusicCaps",
49
+ # data_url = train_data_url["train"][0],
50
+ # metadata_urls = {
51
+ # "train" : train_metadata_paths["train"][0]
52
+ # }
53
+ ),
54
+
55
+ # splits (dict, optional) — The mapping between split name and metadata.
56
+ datasets.BuilderConfig(
57
+ name="test",
58
+ description="this Datasets is personal practice for using loadingScript. Data is from Google/MusicCaps",
59
+ # data_url = test_data_url["test"][0],
60
+ # metadata_urls={
61
+ # "test" : test_metadata_paths["test"][0]
62
+ # }
63
+ )
64
+ ]
65
+
66
+ def _info(self) -> DatasetInfo:
67
+ return datasets.DatasetInfo(
68
+ description = self.config.description,
69
+ features=datasets.Features(
70
+ {
71
+ "image": datasets.Image(),
72
+ "caption": datasets.Value("string"),
73
+ "data_idx": datasets.Value("int32"),
74
+ "number" : datasets.Value("int32"),
75
+ "label" : datasets.ClassLabel(
76
+ names=[
77
+ "blues",
78
+ "classical",
79
+ "country",
80
+ "disco",
81
+ "hiphop",
82
+ "metal",
83
+ "pop",
84
+ "reggae",
85
+ "rock",
86
+ "jazz"
87
+ ]
88
+ )
89
+ }
90
+ ),
91
+ supervised_keys=("image", "caption"),
92
+ homepage=_HOMEPAGE,
93
+ citation= "",
94
+ # license=_LICENSE,
95
+ # task_templates=[ImageClassification(image_column="image", label_column="label")],
96
+ )
97
+
98
+ def _split_generators(self, dl_manager: DownloadManager):
99
+ # huggingfaceのディレクトリからデータを取ってくる
100
+ hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0)
101
+
102
+ train_metadata_paths = DataFilesDict.from_hf_repo(
103
+ {datasets.Split.TRAIN: ["**"]},
104
+ dataset_info=hfh_dataset_info,
105
+ allowed_extensions=["jsonl", ".jsonl"],
106
+ )
107
+
108
+ test_metadata_paths = DataFilesDict.from_hf_repo(
109
+ {datasets.Split.TEST: ["**"]},
110
+ dataset_info=hfh_dataset_info,
111
+ allowed_extensions=["jsonl", ".jsonl"],
112
+ )
113
+
114
+ # **.zipのURLをDict型として取得?
115
+ train_data_url = DataFilesDict.from_hf_repo(
116
+ {datasets.Split.TRAIN: ["**"]},
117
+ dataset_info=hfh_dataset_info,
118
+ allowed_extensions=["zip", ".zip"],
119
+ )
120
+
121
+ test_data_url = DataFilesDict.from_hf_repo(
122
+ {datasets.Split.test: ["**"]},
123
+ dataset_info=hfh_dataset_info,
124
+ allowed_extensions=["zip", ".zip"],
125
+ )
126
+
127
+ # 扱いやすいように再構成
128
+ data_path = {
129
+ "train" : train_data_url["train"][0],
130
+ "test" : test_data_url["test"][0]
131
+ }
132
+
133
+ split_metadata_paths= {
134
+ "train" : train_metadata_paths["train"][0],
135
+ "test" : test_metadata_paths["test"][0]
136
+ }
137
+
138
+ gs = []
139
+ for split, files in data_path.items():
140
+ '''
141
+ split : "train" or "test" or "val"
142
+ files : zip files
143
+ '''
144
+ # リポジトリからダウンロードしてとりあえずキャッシュしたURLリストを取得
145
+ split_metadata_path = dl_manager.download_and_extract(split_metadata_paths[split])
146
+ downloaded_files_path = dl_manager.download(files)
147
+
148
+ # 元のコードではzipファイルの中身を"filepath"としてそのまま_generate_exampleに引き渡している?
149
+ gs.append(
150
+ datasets.SplitGenerator(
151
+ name = split,
152
+ gen_kwargs={
153
+ "images" : dl_manager.iter_archive(downloaded_files_path),
154
+ "metadata_path": split_metadata_path
155
+ }
156
+ )
157
+ )
158
+ return gs
159
+
160
+ def _generate_examples(self, images, metadata_path):
161
+ """Generate images and captions for splits."""
162
+ # with open(metadata_path, encoding="utf-8") as f:
163
+ # files_to_keep = set(f.read().split("\n"))
164
+ file_list = list()
165
+ caption_list = list()
166
+ dataIDX_list = list()
167
+ num_list = list()
168
+ label_list = list()
169
+
170
+ with open(metadata_path) as fin:
171
+ for line in fin:
172
+ data = json.loads(line)
173
+ file_list.append(data["file_name"])
174
+ caption_list.append(data["caption"])
175
+ dataIDX_list.append(data["data_idx"])
176
+ num_list.append(data["number"])
177
+ label_list.append(data["label"])
178
+
179
+ for idx, (file_path, file_obj) in enumerate(images):
180
+ yield file_path, {
181
+ "image": {
182
+ "path": file_path,
183
+ "bytes": file_obj.read()
184
+ },
185
+ "caption" : caption_list[idx],
186
+ "data_idx" : dataIDX_list[idx],
187
+ "number" : num_list[idx],
188
+ "label": label_list[idx]
189
+ }
190
+
191
+