mickylan2367 commited on
Commit
b24185e
1 Parent(s): 208bac9

added another loading Script

Browse files
Files changed (2) hide show
  1. LoadingScriptPractice.py +52 -27
  2. try.py +0 -183
LoadingScriptPractice.py CHANGED
@@ -5,6 +5,8 @@ from datasets import DownloadManager, DatasetInfo
5
  from datasets.data_files import DataFilesDict
6
  import os
7
  import json
 
 
8
 
9
 
10
  # ここに設定を記入
@@ -12,45 +14,55 @@ _NAME = "mickylan2367/LoadingScriptPractice"
12
  _EXTENSION = [".png"]
13
  _REVISION = "main"
14
 
 
15
  # プログラムを置く場所が決まったら、ここにホームページURLつける
16
- _HOMEPAGE = "https://huggingface.co/datasets/mickylan2367/LoadingScriptPractice"
17
 
18
  _DESCRIPTION = f"""\
19
  {_NAME} Datasets including spectrogram.png file from Google MusicCaps Datasets!
20
  Using for Project Learning...
21
  """
22
 
23
- # え...なにこれ(;´・ω・)
24
- # _IMAGES_DIR = "mickylan2367/images/data/"
25
- # _REPO = "https://huggingface.co/datasets/frgfm/imagenette/resolve/main/metadata"
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
- # 参考になりそうなURL集
28
- # https://huggingface.co/docs/datasets/v1.1.1/_modules/datasets/utils/download_manager.html
29
- # https://huggingface.co/docs/datasets/package_reference/builder_classes
30
- # https://huggingface.co/datasets/animelover/danbooru2022/blob/main/danbooru2022.py
31
- # https://huggingface.co/datasets/food101/blob/main/food101.py
32
- # https://huggingface.co/docs/datasets/about_dataset_load
33
- # https://huggingface.co/datasets/frgfm/imagenette/blob/main/imagenette.py
34
- # https://huggingface.co/docs/datasets/v1.2.1/add_dataset.html
35
- # DatasetInfo : https://huggingface.co/docs/datasets/package_reference/main_classes
36
 
37
- # 使用したデータセット(クラスラベル)
38
- # https://huggingface.co/datasets/marsyas/gtzan
39
 
40
  class LoadingScriptPractice(datasets.GeneratorBasedBuilder):
41
 
42
  # データのサブセットはここで用意
43
  BUILDER_CONFIGS = [
44
  datasets.BuilderConfig(
45
- name="MusicCaps data 0_3",
46
  description="this Datasets is personal practice for using loadingScript. Data is from Google/MusicCaps",
 
 
 
 
47
  ),
48
 
49
  # splits (dict, optional) — The mapping between split name and metadata.
50
- # LoadingScriptPracticeConfig(
51
- # name="MusicCaps data ",
52
- # description="this Datasets is personal practice for using loadingScript. Data is from Google/MusicCaps",
53
- # )
 
 
 
 
54
  ]
55
 
56
  def _info(self) -> DatasetInfo:
@@ -89,28 +101,41 @@ class LoadingScriptPractice(datasets.GeneratorBasedBuilder):
89
  # huggingfaceのディレクトリからデータを取ってくる
90
  hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0)
91
 
92
- split_metadata_paths = DataFilesDict.from_hf_repo(
93
  {datasets.Split.TRAIN: ["**"]},
94
  dataset_info=hfh_dataset_info,
95
  allowed_extensions=["jsonl", ".jsonl"],
96
  )
97
-
98
  # **.zipのURLをDict型として取得?
99
- data_path = DataFilesDict.from_hf_repo(
100
  {datasets.Split.TRAIN: ["**"]},
101
  dataset_info=hfh_dataset_info,
102
  allowed_extensions=["zip", ".zip"],
103
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
  gs = []
106
- for split, files in data_path.items():
107
  '''
108
  split : "train" or "test" or "val"
109
  files : zip files
110
  '''
111
  # リポジトリから��ウンロードしてとりあえずキャッシュしたURLリストを取得
112
- split_metadata_path = dl_manager.download_and_extract(split_metadata_paths[split][0])
113
- downloaded_files_path = dl_manager.download(files[0])
114
 
115
  # 元のコードではzipファイルの中身を"filepath"としてそのまま_generate_exampleに引き渡している?
116
  gs.append(
@@ -118,7 +143,7 @@ class LoadingScriptPractice(datasets.GeneratorBasedBuilder):
118
  name = split,
119
  gen_kwargs={
120
  "images" : dl_manager.iter_archive(downloaded_files_path),
121
- "metadata_path": split_metadata_path
122
  }
123
  )
124
  )
 
5
  from datasets.data_files import DataFilesDict
6
  import os
7
  import json
8
+ from os.path import dirname, basename
9
+ from pathlib import Path
10
 
11
 
12
  # ここに設定を記入
 
14
  _EXTENSION = [".png"]
15
  _REVISION = "main"
16
 
17
+ # _HOMEPAGE = "https://github.com/fastai/imagenette"
18
  # プログラムを置く場所が決まったら、ここにホームページURLつける
19
+ _HOMEPAGE = "https://huggingface.co/datasets/mickylan2367/spectrogram_musicCaps"
20
 
21
  _DESCRIPTION = f"""\
22
  {_NAME} Datasets including spectrogram.png file from Google MusicCaps Datasets!
23
  Using for Project Learning...
24
  """
25
 
26
+ # URLを取得
27
+
28
+ # class LoadingScriptPracticeConfig(datasets.BuilderConfig):
29
+ # """BuilderConfig for Imagette."""
30
+
31
+ # def __init__(self, data_url, metadata_urls, **kwargs):
32
+ # """BuilderConfig for Imagette.
33
+ # Args:
34
+ # data_url: `string`, url to download the zip file from.
35
+ # matadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
36
+ # **kwargs: keyword arguments forwarded to super.
37
+ # """
38
+ # super(LoadingScriptPracticeConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
39
+ # self.data_url = data_url
40
+ # self.metadata_urls = metadata_urls
41
 
 
 
 
 
 
 
 
 
 
42
 
 
 
43
 
44
  class LoadingScriptPractice(datasets.GeneratorBasedBuilder):
45
 
46
  # データのサブセットはここで用意
47
  BUILDER_CONFIGS = [
48
  datasets.BuilderConfig(
49
+ name="train",
50
  description="this Datasets is personal practice for using loadingScript. Data is from Google/MusicCaps",
51
+ # data_url = train_data_url["train"][0],
52
+ # metadata_urls = {
53
+ # "train" : train_metadata_paths["train"][0]
54
+ # }
55
  ),
56
 
57
  # splits (dict, optional) — The mapping between split name and metadata.
58
+ datasets.BuilderConfig(
59
+ name="test",
60
+ description="this Datasets is personal practice for using loadingScript. Data is from Google/MusicCaps",
61
+ # data_url = test_data_url["test"][0],
62
+ # metadata_urls={
63
+ # "test" : test_metadata_paths["test"][0]
64
+ # }
65
+ )
66
  ]
67
 
68
  def _info(self) -> DatasetInfo:
 
101
  # huggingfaceのディレクトリからデータを取ってくる
102
  hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0)
103
 
104
+ metadata_urls = DataFilesDict.from_hf_repo(
105
  {datasets.Split.TRAIN: ["**"]},
106
  dataset_info=hfh_dataset_info,
107
  allowed_extensions=["jsonl", ".jsonl"],
108
  )
109
+
110
  # **.zipのURLをDict型として取得?
111
+ data_urls = DataFilesDict.from_hf_repo(
112
  {datasets.Split.TRAIN: ["**"]},
113
  dataset_info=hfh_dataset_info,
114
  allowed_extensions=["zip", ".zip"],
115
  )
116
+
117
+ data_paths = dict()
118
+ for path in data_path["train"]:
119
+ dname = dirname(path)
120
+ folder = basename(Path(dname))
121
+ data_paths[folder] = path
122
+
123
+ metadata_paths = dict()
124
+ for path in data_path["train"]:
125
+ dname = dirname(path)
126
+ folder = basename(Path(dname))
127
+ metadata_paths[folder] = path
128
+
129
 
130
  gs = []
131
+ for split, files in data_paths.items():
132
  '''
133
  split : "train" or "test" or "val"
134
  files : zip files
135
  '''
136
  # リポジトリから��ウンロードしてとりあえずキャッシュしたURLリストを取得
137
+ metadata_path = dl_manager.download_and_extract(metadata_paths[split])
138
+ downloaded_files_path = dl_manager.download(files)
139
 
140
  # 元のコードではzipファイルの中身を"filepath"としてそのまま_generate_exampleに引き渡している?
141
  gs.append(
 
143
  name = split,
144
  gen_kwargs={
145
  "images" : dl_manager.iter_archive(downloaded_files_path),
146
+ "metadata_path": metadata_path
147
  }
148
  )
149
  )
try.py DELETED
@@ -1,183 +0,0 @@
1
-
2
- import datasets
3
- from huggingface_hub import HfApi
4
- from datasets import DownloadManager, DatasetInfo
5
- from datasets.data_files import DataFilesDict
6
- import os
7
- import json
8
- from os.path import dirname, basename
9
- from pathlib import Path
10
-
11
-
12
- # ここに設定を記入
13
- _NAME = "mickylan2367/LoadingScriptPractice"
14
- _EXTENSION = [".png"]
15
- _REVISION = "main"
16
-
17
- # _HOMEPAGE = "https://github.com/fastai/imagenette"
18
- # プログラムを置く場所が決まったら、ここにホームページURLつける
19
- _HOMEPAGE = "https://huggingface.co/datasets/mickylan2367/spectrogram_musicCaps"
20
-
21
- _DESCRIPTION = f"""\
22
- {_NAME} Datasets including spectrogram.png file from Google MusicCaps Datasets!
23
- Using for Project Learning...
24
- """
25
-
26
- # URLを取得
27
-
28
- # class LoadingScriptPracticeConfig(datasets.BuilderConfig):
29
- # """BuilderConfig for Imagette."""
30
-
31
- # def __init__(self, data_url, metadata_urls, **kwargs):
32
- # """BuilderConfig for Imagette.
33
- # Args:
34
- # data_url: `string`, url to download the zip file from.
35
- # matadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
36
- # **kwargs: keyword arguments forwarded to super.
37
- # """
38
- # super(LoadingScriptPracticeConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
39
- # self.data_url = data_url
40
- # self.metadata_urls = metadata_urls
41
-
42
-
43
-
44
- class LoadingScriptPractice(datasets.GeneratorBasedBuilder):
45
-
46
- # データのサブセットはここで用意
47
- BUILDER_CONFIGS = [
48
- datasets.BuilderConfig(
49
- name="train",
50
- description="this Datasets is personal practice for using loadingScript. Data is from Google/MusicCaps",
51
- # data_url = train_data_url["train"][0],
52
- # metadata_urls = {
53
- # "train" : train_metadata_paths["train"][0]
54
- # }
55
- ),
56
-
57
- # splits (dict, optional) — The mapping between split name and metadata.
58
- datasets.BuilderConfig(
59
- name="test",
60
- description="this Datasets is personal practice for using loadingScript. Data is from Google/MusicCaps",
61
- # data_url = test_data_url["test"][0],
62
- # metadata_urls={
63
- # "test" : test_metadata_paths["test"][0]
64
- # }
65
- )
66
- ]
67
-
68
- def _info(self) -> DatasetInfo:
69
- return datasets.DatasetInfo(
70
- description = self.config.description,
71
- features=datasets.Features(
72
- {
73
- "image": datasets.Image(),
74
- "caption": datasets.Value("string"),
75
- "data_idx": datasets.Value("int32"),
76
- "number" : datasets.Value("int32"),
77
- "label" : datasets.ClassLabel(
78
- names=[
79
- "blues",
80
- "classical",
81
- "country",
82
- "disco",
83
- "hiphop",
84
- "metal",
85
- "pop",
86
- "reggae",
87
- "rock",
88
- "jazz"
89
- ]
90
- )
91
- }
92
- ),
93
- supervised_keys=("image", "caption"),
94
- homepage=_HOMEPAGE,
95
- citation= "",
96
- # license=_LICENSE,
97
- # task_templates=[ImageClassification(image_column="image", label_column="label")],
98
- )
99
-
100
- def _split_generators(self, dl_manager: DownloadManager):
101
- # huggingfaceのディレクトリからデータを取ってくる
102
- hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0)
103
-
104
- metadata_urls = DataFilesDict.from_hf_repo(
105
- {datasets.Split.TRAIN: ["**"]},
106
- dataset_info=hfh_dataset_info,
107
- allowed_extensions=["jsonl", ".jsonl"],
108
- )
109
-
110
- # **.zipのURLをDict型として取得?
111
- data_urls = DataFilesDict.from_hf_repo(
112
- {datasets.Split.TRAIN: ["**"]},
113
- dataset_info=hfh_dataset_info,
114
- allowed_extensions=["zip", ".zip"],
115
- )
116
-
117
- data_paths = dict()
118
- for path in data_path["train"]:
119
- dname = dirname(path)
120
- folder = basename(Path(dname))
121
- data_paths[folder] = path
122
-
123
- metadata_paths = dict()
124
- for path in data_path["train"]:
125
- dname = dirname(path)
126
- folder = basename(Path(dname))
127
- metadata_paths[folder] = path
128
-
129
-
130
- gs = []
131
- for split, files in data_paths.items():
132
- '''
133
- split : "train" or "test" or "val"
134
- files : zip files
135
- '''
136
- # リポジトリからダウンロードしてとりあえずキャッシュしたURLリストを取得
137
- metadata_path = dl_manager.download_and_extract(metadata_paths[split])
138
- downloaded_files_path = dl_manager.download(files)
139
-
140
- # 元のコードではzipファイルの中身を"filepath"としてそのまま_generate_exampleに引き渡している?
141
- gs.append(
142
- datasets.SplitGenerator(
143
- name = split,
144
- gen_kwargs={
145
- "images" : dl_manager.iter_archive(downloaded_files_path),
146
- "metadata_path": metadata_path
147
- }
148
- )
149
- )
150
- return gs
151
-
152
- def _generate_examples(self, images, metadata_path):
153
- """Generate images and captions for splits."""
154
- # with open(metadata_path, encoding="utf-8") as f:
155
- # files_to_keep = set(f.read().split("\n"))
156
- file_list = list()
157
- caption_list = list()
158
- dataIDX_list = list()
159
- num_list = list()
160
- label_list = list()
161
-
162
- with open(metadata_path) as fin:
163
- for line in fin:
164
- data = json.loads(line)
165
- file_list.append(data["file_name"])
166
- caption_list.append(data["caption"])
167
- dataIDX_list.append(data["data_idx"])
168
- num_list.append(data["number"])
169
- label_list.append(data["label"])
170
-
171
- for idx, (file_path, file_obj) in enumerate(images):
172
- yield file_path, {
173
- "image": {
174
- "path": file_path,
175
- "bytes": file_obj.read()
176
- },
177
- "caption" : caption_list[idx],
178
- "data_idx" : dataIDX_list[idx],
179
- "number" : num_list[idx],
180
- "label": label_list[idx]
181
- }
182
-
183
-