inmortalkaktus commited on
Commit
dc5787e
1 Parent(s): 1c8e134

add initial files

Browse files
.gitattributes CHANGED
@@ -49,3 +49,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
49
  *.jpg filter=lfs diff=lfs merge=lfs -text
50
  *.jpeg filter=lfs diff=lfs merge=lfs -text
51
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
49
  *.jpg filter=lfs diff=lfs merge=lfs -text
50
  *.jpeg filter=lfs diff=lfs merge=lfs -text
51
  *.webp filter=lfs diff=lfs merge=lfs -text
52
+ data/train-00000-of-00001.parquet filter=lfs diff=lfs merge=lfs -text
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8a313d4fdf65dbd1c286b2b8b28949982a110695de4e04a59bdb569b90b8fd7
3
+ size 391065
pokemon-pixel-art.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # datasets-cli test pokemon-pixel-art.py --save_info --all_configs
2
+ #
3
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ # TODO: Address all TODOs and remove all explanatory comments
17
+ """TODO: Add a description here."""
18
+
19
+
20
+ import csv
21
+ import json
22
+ import os
23
+ import pandas as pd
24
+
25
+ import datasets
26
+ from PIL import Image
27
+ from io import BytesIO
28
+
29
+
30
+ # TODO: Add BibTeX citation
31
+ # Find for instance the citation on arxiv or on the dataset repo/website
32
+ _CITATION = """\
33
+ @InProceedings{huggingface:dataset,
34
+ title = {A pixel art Pokemon sprites dataset},
35
+ author={InmortalKaktus / aleoli.
36
+ },
37
+ year={2022}
38
+ }
39
+ """
40
+
41
+ # TODO: Add description of the dataset here
42
+ # You can copy an official description
43
+ _DESCRIPTION = """\
44
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
45
+ """
46
+
47
+ # TODO: Add a link to an official homepage for the dataset here
48
+ _HOMEPAGE = ""
49
+
50
+ # TODO: Add the licence for the dataset here if you can find it
51
+ _LICENSE = ""
52
+
53
+ _URL = "https://huggingface.co/datasets/inmortalkaktus/pokemon-pixel-art/resolve/main/data/train-00000-of-00001.parquet"
54
+
55
+
56
+ class PokemonPixelArt(datasets.GeneratorBasedBuilder):
57
+ """TODO: Short description of my dataset."""
58
+
59
+ VERSION = datasets.Version("1.1.0")
60
+
61
+ def _info(self):
62
+ features = datasets.Features(
63
+ {
64
+ "image": datasets.Image(decode=True),
65
+ "text": datasets.Value("string")
66
+ }
67
+ )
68
+
69
+ return datasets.DatasetInfo(
70
+ # This is the description that will appear on the datasets page.
71
+ description=_DESCRIPTION,
72
+ # This defines the different columns of the dataset and their types
73
+ features=features, # Here we define them above because they are different between the two configurations
74
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
75
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
76
+ # supervised_keys=("sentence", "label"),
77
+ # Homepage of the dataset for documentation
78
+ homepage=_HOMEPAGE,
79
+ # License for the dataset if available
80
+ license=_LICENSE,
81
+ # Citation for the dataset
82
+ citation=_CITATION,
83
+ )
84
+
85
+ def _split_generators(self, dl_manager):
86
+ data_dir = dl_manager.download(_URL)
87
+ print('data_dir:', data_dir)
88
+ return [
89
+ datasets.SplitGenerator(
90
+ name=datasets.Split.TRAIN,
91
+ # These kwargs will be passed to _generate_examples
92
+ gen_kwargs={
93
+ "filepath": data_dir,
94
+ "split": "train",
95
+ },
96
+ ),
97
+ ]
98
+
99
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
100
+ def _generate_examples(self, filepath, split):
101
+ file = pd.read_parquet(filepath)
102
+
103
+ for index, row in file.iterrows():
104
+ byte_img = BytesIO(row['image']['bytes'])
105
+ img = Image.open(byte_img)
106
+ img = img.convert('RGB')
107
+
108
+
109
+ yield index, {
110
+ "image": img,
111
+ "text": row['text']
112
+ }