Datasets:

Tasks:
Other
Multilinguality:
monolingual
Size Categories:
unknown
Language Creators:
found
Annotations Creators:
machine-generated
Source Datasets:
original
License:
Eugene Siow commited on
Commit
c4d7073
1 Parent(s): 4a65909

Initial commit.

Browse files
Files changed (2) hide show
  1. Div2k.py +157 -0
  2. README.md +206 -0
Div2k.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """DIV2K dataset: DIVerse 2K resolution high quality images.
16
+ Adapted from TF Datasets: https://github.com/tensorflow/datasets/"""
17
+
18
+ import os
19
+ from pathlib import Path
20
+
21
+ import datasets
22
+
23
+ _CITATION = """
24
+ @InProceedings{Agustsson_2017_CVPR_Workshops,
25
+ author = {Agustsson, Eirikur and Timofte, Radu},
26
+ title = {NTIRE 2017 Challenge on Single Image Super-Resolution: Dataset and Study},
27
+ booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops},
28
+ url = "http://www.vision.ee.ethz.ch/~timofter/publications/Agustsson-CVPRW-2017.pdf",
29
+ month = {July},
30
+ year = {2017}
31
+ }
32
+ """
33
+
34
+ _DESCRIPTION = """
35
+ DIV2K dataset: DIVerse 2K resolution high quality images as used for the challenges @ NTIRE (CVPR 2017 and
36
+ CVPR 2018) and @ PIRM (ECCV 2018)
37
+ """
38
+
39
+ _HOMEPAGE = "https://data.vision.ee.ethz.ch/cvl/DIV2K/"
40
+
41
+ _LICENSE = """
42
+ Please notice that this dataset is made available for academic research purpose only. All the images are
43
+ collected from the Internet, and the copyright belongs to the original owners. If any of the images belongs to
44
+ you and you would like it removed, please kindly inform the authors, and they will remove it from the dataset
45
+ immediately.
46
+ """
47
+
48
+ _DL_URL = "https://data.vision.ee.ethz.ch/cvl/DIV2K/"
49
+
50
+ _DL_URLS = {
51
+ "train_hr": _DL_URL + "DIV2K_train_HR.zip",
52
+ "valid_hr": _DL_URL + "DIV2K_valid_HR.zip",
53
+ "train_bicubic_x2": _DL_URL + "DIV2K_train_LR_bicubic_X2.zip",
54
+ "train_unknown_x2": _DL_URL + "DIV2K_train_LR_unknown_X2.zip",
55
+ "valid_bicubic_x2": _DL_URL + "DIV2K_valid_LR_bicubic_X2.zip",
56
+ "valid_unknown_x2": _DL_URL + "DIV2K_valid_LR_unknown_X2.zip",
57
+ "train_bicubic_x3": _DL_URL + "DIV2K_train_LR_bicubic_X3.zip",
58
+ "train_unknown_x3": _DL_URL + "DIV2K_train_LR_unknown_X3.zip",
59
+ "valid_bicubic_x3": _DL_URL + "DIV2K_valid_LR_bicubic_X3.zip",
60
+ "valid_unknown_x3": _DL_URL + "DIV2K_valid_LR_unknown_X3.zip",
61
+ "train_bicubic_x4": _DL_URL + "DIV2K_train_LR_bicubic_X4.zip",
62
+ "train_unknown_x4": _DL_URL + "DIV2K_train_LR_unknown_X4.zip",
63
+ "valid_bicubic_x4": _DL_URL + "DIV2K_valid_LR_bicubic_X4.zip",
64
+ "valid_unknown_x4": _DL_URL + "DIV2K_valid_LR_unknown_X4.zip",
65
+ "train_bicubic_x8": _DL_URL + "DIV2K_train_LR_x8.zip",
66
+ "valid_bicubic_x8": _DL_URL + "DIV2K_valid_LR_x8.zip",
67
+ "train_realistic_mild_x4": _DL_URL + "DIV2K_train_LR_mild.zip",
68
+ "valid_realistic_mild_x4": _DL_URL + "DIV2K_valid_LR_mild.zip",
69
+ "train_realistic_difficult_x4": _DL_URL + "DIV2K_train_LR_difficult.zip",
70
+ "valid_realistic_difficult_x4": _DL_URL + "DIV2K_valid_LR_difficult.zip",
71
+ "train_realistic_wild_x4": _DL_URL + "DIV2K_train_LR_wild.zip",
72
+ "valid_realistic_wild_x4": _DL_URL + "DIV2K_valid_LR_wild.zip",
73
+ }
74
+
75
+ _DATA_OPTIONS = [
76
+ "bicubic_x2", "bicubic_x3", "bicubic_x4", "bicubic_x8", "unknown_x2",
77
+ "unknown_x3", "unknown_x4", "realistic_mild_x4", "realistic_difficult_x4",
78
+ "realistic_wild_x4"
79
+ ]
80
+
81
+
82
+ class Div2kConfig(datasets.BuilderConfig):
83
+ """BuilderConfig for Div2k."""
84
+
85
+ def __init__(self, name, **kwargs):
86
+ """Constructs a Div2kConfig."""
87
+ if name not in _DATA_OPTIONS:
88
+ raise ValueError("data must be one of %s" % _DATA_OPTIONS)
89
+
90
+ super(Div2kConfig, self).__init__(name=name, **kwargs)
91
+ self.data = name
92
+ self.download_urls = {
93
+ "train_lr_url": _DL_URLS["train_" + self.data],
94
+ "valid_lr_url": _DL_URLS["valid_" + self.data],
95
+ "train_hr_url": _DL_URLS["train_hr"],
96
+ "valid_hr_url": _DL_URLS["valid_hr"],
97
+ }
98
+
99
+
100
+ class Div2k(datasets.GeneratorBasedBuilder):
101
+ """DIV2K dataset: DIVerse 2K resolution high quality images."""
102
+
103
+ BUILDER_CONFIGS = [
104
+ Div2kConfig(version=datasets.Version("2.0.0"), name=data) for data in _DATA_OPTIONS
105
+ ]
106
+
107
+ DEFAULT_CONFIG_NAME = "bicubic_x2"
108
+
109
+ def _info(self):
110
+ features = datasets.Features(
111
+ {
112
+ "lr": datasets.Value("string"),
113
+ "hr": datasets.Value("string"),
114
+ }
115
+ )
116
+ return datasets.DatasetInfo(
117
+ description=_DESCRIPTION,
118
+ features=features,
119
+ supervised_keys=None,
120
+ homepage=_HOMEPAGE,
121
+ license=_LICENSE,
122
+ citation=_CITATION,
123
+ )
124
+
125
+ def _split_generators(self, dl_manager):
126
+ """Returns SplitGenerators."""
127
+ extracted_paths = dl_manager.download_and_extract(
128
+ self.config.download_urls)
129
+ return [
130
+ datasets.SplitGenerator(
131
+ name=datasets.Split.TRAIN,
132
+ gen_kwargs={
133
+ "lr_path": extracted_paths["train_lr_url"],
134
+ "hr_path": os.path.join(extracted_paths["train_hr_url"], "DIV2K_train_HR"),
135
+ },
136
+ ),
137
+ datasets.SplitGenerator(
138
+ name=datasets.Split.VALIDATION,
139
+ gen_kwargs={
140
+ "lr_path": extracted_paths["valid_lr_url"],
141
+ "hr_path": str(os.path.join(extracted_paths["valid_hr_url"], "DIV2K_valid_HR")),
142
+ },
143
+ ),
144
+ ]
145
+
146
+ def _generate_examples(
147
+ self, lr_path, hr_path
148
+ ):
149
+ """ Yields examples as (key, example) tuples. """
150
+ extensions = {'.png'}
151
+ for file_path in sorted(Path(lr_path).glob("**/*")):
152
+ if file_path.suffix in extensions:
153
+ file_path_str = str(file_path.as_posix())
154
+ yield file_path_str, {
155
+ "lr": file_path_str,
156
+ "hr": str((Path(hr_path) / f"{str(file_path.name)[:4]}.png").as_posix())
157
+ }
README.md ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - found
6
+ languages: []
7
+ licenses:
8
+ - other-academic-use
9
+ multilinguality:
10
+ - monolingual
11
+ pretty_name: Div2k
12
+ size_categories:
13
+ - unknown
14
+ source_datasets:
15
+ - original
16
+ task_categories:
17
+ - other
18
+ task_ids:
19
+ - other-other-image-super-resolution
20
+ ---
21
+
22
+ # Dataset Card for Div2k
23
+
24
+ ## Table of Contents
25
+ - [Table of Contents](#table-of-contents)
26
+ - [Dataset Description](#dataset-description)
27
+ - [Dataset Summary](#dataset-summary)
28
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
29
+ - [Languages](#languages)
30
+ - [Dataset Structure](#dataset-structure)
31
+ - [Data Instances](#data-instances)
32
+ - [Data Fields](#data-fields)
33
+ - [Data Splits](#data-splits)
34
+ - [Dataset Creation](#dataset-creation)
35
+ - [Curation Rationale](#curation-rationale)
36
+ - [Source Data](#source-data)
37
+ - [Annotations](#annotations)
38
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
39
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
40
+ - [Social Impact of Dataset](#social-impact-of-dataset)
41
+ - [Discussion of Biases](#discussion-of-biases)
42
+ - [Other Known Limitations](#other-known-limitations)
43
+ - [Additional Information](#additional-information)
44
+ - [Dataset Curators](#dataset-curators)
45
+ - [Licensing Information](#licensing-information)
46
+ - [Citation Information](#citation-information)
47
+ - [Contributions](#contributions)
48
+
49
+ ## Dataset Description
50
+
51
+ - **Homepage: https://data.vision.ee.ethz.ch/cvl/DIV2K/**
52
+ - **Repository: https://huggingface.co/datasets/eugenesiow/Div2k**
53
+ - **Paper: http://www.vision.ee.ethz.ch/~timofter/publications/Agustsson-CVPRW-2017.pdf**
54
+ - **Leaderboard: https://github.com/eugenesiow/super-image#scale-x2**
55
+ - **Point of Contact: radu.timofte@vision.ee.ethz.ch**
56
+
57
+ ### Dataset Summary
58
+
59
+ DIV2K is a dataset of RGB images (2K resolution high quality images) with a large diversity of contents.
60
+
61
+ The DIV2K dataset is divided into:
62
+
63
+ - train data: starting from 800 high definition high resolution images we obtain corresponding low resolution images and provide both high and low resolution images for 2, 3, and 4 downscaling factors
64
+ - validation data: 100 high definition high resolution images are used for genereting low resolution corresponding images, the low res are provided from the beginning of the challenge and are meant for the participants to get online feedback from the validation server; the high resolution images will be released when the final phase of the challenge starts.
65
+
66
+ Install with `pip`:
67
+ ```bash
68
+ pip install datasets super-image
69
+ ```
70
+
71
+ Evaluate a model with the [`super-image`](https://github.com/eugenesiow/super-image) library:
72
+ ```python
73
+ from datasets import load_dataset
74
+ from super_image import EdsrModel
75
+ from super_image.data import EvalDataset, EvalMetrics
76
+
77
+ dataset = load_dataset('eugenesiow/Div2k', 'bicubic_x2', split='validation')
78
+ eval_dataset = EvalDataset(dataset)
79
+ model = EdsrModel.from_pretrained(input_dir, scale=scale)
80
+ EvalMetrics.evaluate(model, eval_dataset)
81
+ ```
82
+
83
+ ### Supported Tasks and Leaderboards
84
+
85
+ The dataset is commonly used for training and evaluation of the `image-super-resolution` task.
86
+
87
+ Unofficial [`super-image`](https://github.com/eugenesiow/super-image) leaderboard for:
88
+ - [Scale 2](https://github.com/eugenesiow/super-image#scale-x2)
89
+ - [Scale 3](https://github.com/eugenesiow/super-image#scale-x3)
90
+ - [Scale 4](https://github.com/eugenesiow/super-image#scale-x4)
91
+ - [Scale 8](https://github.com/eugenesiow/super-image#scale-x8)
92
+
93
+ ### Languages
94
+
95
+ [More Information Needed]
96
+
97
+ ## Dataset Structure
98
+
99
+ ### Data Instances
100
+
101
+ An example of `train` for `bicubic_x2` looks as follows.
102
+ ```
103
+ {
104
+ "hr": "/.cache/huggingface/datasets/downloads/extracted/DIV2K_valid_HR/0801.png",
105
+ "lr": "/.cache/huggingface/datasets/downloads/extracted/DIV2K_valid_LR_bicubic/X2/0801x2.png"
106
+ }
107
+ ```
108
+
109
+ ### Data Fields
110
+
111
+ The data fields are the same among all splits.
112
+
113
+ - `hr`: a `string` to the path of the High Resolution (HR) `.png` image.
114
+ - `lr`: a `string` to the path of the Low Resolution (LR) `.png` image.
115
+
116
+ ### Data Splits
117
+
118
+ | name |train |validation|
119
+ |-------|-----:|---:|
120
+ |bicubic_x2|800|100|
121
+ |bicubic_x3|800|100|
122
+ |bicubic_x4|800|100|
123
+ |bicubic_x8|800|100|
124
+ |unknown_x2|800|100|
125
+ |unknown_x3|800|100|
126
+ |unknown_x4|800|100|
127
+ |realistic_mild_x4|800|100|
128
+ |realistic_difficult_x4|800|100|
129
+ |realistic_wild_x4|800|100|
130
+
131
+
132
+ ## Dataset Creation
133
+
134
+ ### Curation Rationale
135
+
136
+ [More Information Needed]
137
+
138
+ ### Source Data
139
+
140
+ #### Initial Data Collection and Normalization
141
+
142
+ [More Information Needed]
143
+
144
+ #### Who are the source language producers?
145
+
146
+ [More Information Needed]
147
+
148
+ ### Annotations
149
+
150
+ #### Annotation process
151
+
152
+ [More Information Needed]
153
+
154
+ #### Who are the annotators?
155
+
156
+ [More Information Needed]
157
+
158
+ ### Personal and Sensitive Information
159
+
160
+ All the images are collected from the Internet, and the copyright belongs to the original owners. If any of the images
161
+ belongs to you and you would like it removed, please kindly inform the authors, and they will remove it from the dataset
162
+ immediately.
163
+
164
+ ## Considerations for Using the Data
165
+
166
+ ### Social Impact of Dataset
167
+
168
+ [More Information Needed]
169
+
170
+ ### Discussion of Biases
171
+
172
+ [More Information Needed]
173
+
174
+ ### Other Known Limitations
175
+
176
+ [More Information Needed]
177
+
178
+ ## Additional Information
179
+
180
+ ### Dataset Curators
181
+
182
+ [More Information Needed]
183
+
184
+ ### Licensing Information
185
+
186
+ Please notice that this dataset is made available for academic research purpose only. All the images are
187
+ collected from the Internet, and the copyright belongs to the original owners. If any of the images belongs to
188
+ you and you would like it removed, please kindly inform the authors, and they will remove it from the dataset
189
+ immediately.
190
+
191
+ ### Citation Information
192
+
193
+ ```bibtex
194
+ @InProceedings{Agustsson_2017_CVPR_Workshops,
195
+ author = {Agustsson, Eirikur and Timofte, Radu},
196
+ title = {NTIRE 2017 Challenge on Single Image Super-Resolution: Dataset and Study},
197
+ booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops},
198
+ url = "http://www.vision.ee.ethz.ch/~timofter/publications/Agustsson-CVPRW-2017.pdf",
199
+ month = {July},
200
+ year = {2017}
201
+ }
202
+ ```
203
+
204
+ ### Contributions
205
+
206
+ Thanks to [@eugenesiow](https://github.com/eugenesiow) for adding this dataset.