mickylan2367 commited on
Commit
0d98637
1 Parent(s): 491fec1

git add many files

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. data/metadata.jsonl +3 -0
  3. data/train.zip +3 -0
  4. try.py +153 -0
.gitattributes CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ *.jsonl filter=lfs diff=lfs merge=lfs -text
data/metadata.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f4497176d180c649f6011dacb396580cb96b744894412ccdb3121593178d5cf
3
+ size 2580
data/train.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7305b55622d9b2562f24f414ab8d6784ffe25524f9d72270d8845798aa76d9f
3
+ size 947921
try.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import datasets
3
+ from huggingface_hub import HfApi
4
+ from datasets import DownloadManager, DatasetInfo
5
+ from datasets.data_files import DataFilesDict
6
+ import os
7
+ import json
8
+
9
+
10
+ # ここに設定を記入
11
+ _NAME = "mickylan2367/LoadingScriptPractice"
12
+ _EXTENSION = [".png"]
13
+ _REVISION = "main"
14
+
15
+ # _HOMEPAGE = "https://github.com/fastai/imagenette"
16
+ # プログラムを置く場所が決まったら、ここにホームページURLつける
17
+ _HOMEPAGE = "https://huggingface.co/datasets/mickylan2367/spectrogram_musicCaps"
18
+
19
+ _DESCRIPTION = f"""\
20
+ {_NAME} Datasets including spectrogram.png file from Google MusicCaps Datasets!
21
+ Using for Project Learning...
22
+ """
23
+
24
+ # え...なにこれ(;´・ω・)
25
+ # _IMAGES_DIR = "mickylan2367/images/data/"
26
+ # _REPO = "https://huggingface.co/datasets/frgfm/imagenette/resolve/main/metadata"
27
+
28
+ # 参考になりそうなURL集
29
+ # https://huggingface.co/docs/datasets/v1.1.1/_modules/datasets/utils/download_manager.html
30
+ # https://huggingface.co/datasets/animelover/danbooru2022/blob/main/danbooru2022.py
31
+ # https://huggingface.co/datasets/food101/blob/main/food101.py
32
+ # https://huggingface.co/docs/datasets/about_dataset_load
33
+
34
+
35
+ class spectrogram_musicCapsConfig(datasets.BuilderConfig):
36
+ """Builder Config for spectrogram_MusicCaps"""
37
+
38
+ def __init__(self, **kwargs):
39
+ """BuilderConfig
40
+ Args:
41
+ data_url: `string`, url to download the zip file from.
42
+ metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
43
+ **kwargs: keyword arguments forwarded to super.
44
+ """
45
+ super(spectrogram_musicCapsConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
46
+ # self.data_url = data_url
47
+ # self.metadata_urls = metadata_urls
48
+
49
+ class spectrogram_musicCaps(datasets.GeneratorBasedBuilder):
50
+
51
+ # データのサブセットはここで用意
52
+ BUILDER_CONFIGS = [
53
+ spectrogram_musicCapsConfig(
54
+ name="MusicCaps data 0_10",
55
+ description="Datasets from MusicCaps by Mikan",
56
+ ),
57
+
58
+ # spectrogram_musicCapsConfig(
59
+ # name="MusicCpas data 10_100",
60
+ # description="Datasets second action by Mikan",
61
+ # )
62
+ ]
63
+
64
+ def _info(self):
65
+ return datasets.DatasetInfo(
66
+ description=_DESCRIPTION,
67
+ features=datasets.Features(
68
+ {
69
+ "image": datasets.Image(),
70
+ "caption": datasets.Value("string"),
71
+ "data_idx": datasets.Value("int32"),
72
+ "number" : datasets.Value("int32"),
73
+ "label" : datasets.Value("string")
74
+ }
75
+ ),
76
+ supervised_keys=("image", "caption", "data_idx", "number", "label"),
77
+ homepage=_HOMEPAGE,
78
+ # citation=_CITATION,
79
+ # license=_LICENSE,
80
+ # task_templates=[ImageClassification(image_column="image", label_column="label")],
81
+ )
82
+
83
+ def _split_generators(self, dl_manager: DownloadManager):
84
+ # huggingfaceのディレクトリからデータを取ってくる
85
+ hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0)
86
+
87
+ split_metadata_paths = DataFilesDict.from_hf_repo(
88
+ {datasets.Split.TRAIN: ["**"]},
89
+ dataset_info=hfh_dataset_info,
90
+ allowed_extensions=["jsonl", ".jsonl"],
91
+ )
92
+
93
+ # **.zipのURLをDict型として取得?
94
+ data_path = DataFilesDict.from_hf_repo(
95
+ {datasets.Split.TRAIN: ["**"]},
96
+ dataset_info=hfh_dataset_info,
97
+ allowed_extensions=["zip", ".zip"],
98
+ )
99
+
100
+ gs = []
101
+ for split, files in data_path.items():
102
+ '''
103
+ split : "train" or "test" or "val"
104
+ files : zip files
105
+ '''
106
+ # リポジトリからダウンロードしてとりあえずキャッシュしたURLリストを取得
107
+ split_metadata_path = dl_manager.download_and_extract(split_metadata_paths[split][0])
108
+ downloaded_files_path = dl_manager.download_and_extract(files)
109
+
110
+ # 元のコードではzipファイルの中身を"filepath"としてそのまま_generate_exampleに引き渡している?
111
+ gs.append(
112
+ datasets.SplitGenerator(
113
+ name = split,
114
+ gen_kwargs={
115
+ "images" : downloaded_files_path,
116
+ "metadata_path": split_metadata_path
117
+ }
118
+ )
119
+ )
120
+ return gs
121
+
122
+ def _generate_examples(self, images, metadata_path):
123
+ """Generate images and captions for splits."""
124
+ # with open(metadata_path, encoding="utf-8") as f:
125
+ # files_to_keep = set(f.read().split("\n"))
126
+ file_list = list()
127
+ caption_list = list()
128
+ dataIDX_list = list()
129
+ num_list = list()
130
+ label_list = list()
131
+
132
+ with open(metadata_path) as fin:
133
+ for line in fin:
134
+ data = json.loads(line)
135
+ file_list.append(data["file_name"])
136
+ caption_list.append(data["caption"])
137
+ dataIDX_list.append(data["data_idx"])
138
+ num_list.append(data["number"])
139
+ label_list.append(data["label"])
140
+
141
+ for idx, (file_path, file_obj) in enumerate(images):
142
+ yield file_path, {
143
+ "image": {
144
+ "path": file_path,
145
+ "bytes": file_obj.read()
146
+ },
147
+ "caption" : caption_list[idx],
148
+ "data_idx" : dataIDX_list[idx],
149
+ "number" : num_list[idx],
150
+ "label": label_list[idx]
151
+ }
152
+
153
+