Jorgvt commited on
Commit
9e2121f
1 Parent(s): 9b96538

:sparkles: Custom script to load the data.

Browse files
Files changed (1) hide show
  1. tid2008.py +108 -0
tid2008.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ import os
4
+ from PIL import Image
5
+
6
+ import pandas as pd
7
+ from huggingface_hub import hf_hub_download, snapshot_download
8
+ import datasets
9
+ import cv2
10
+
11
+ # _CITATION = """\
12
+ # @InProceedings{huggingface:dataset,
13
+ # title = {A great new dataset},
14
+ # author={huggingface, Inc.
15
+ # },
16
+ # year={2020}
17
+ # }
18
+ # """
19
+
20
+ _DESCRIPTION = """\
21
+ Image Quality Assessment Dataset consisting of 25 reference images, 17 different distortions and 4 intensities per distortion.
22
+ In total there are 1700 (reference, distortion, MOS) tuples.
23
+ """
24
+
25
+ # _HOMEPAGE = ""
26
+
27
+ # _LICENSE = ""
28
+
29
+ # TODO: Add link to the official dataset URLs here
30
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
31
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
32
+ # _URLS = {
33
+ # "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
34
+ # "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
35
+ # }
36
+
37
+ # _REPO = "https://huggingface.co/datasets/frgfm/imagenette/resolve/main/metadata" # Stolen from imagenette.py
38
+ _REPO = "https://huggingface.co/datasets/Jorgvt/TID2008/resolve/main"
39
+
40
+ class TID2008(datasets.GeneratorBasedBuilder):
41
+ """TID2008 Image Quality Dataset"""
42
+
43
+ VERSION = datasets.Version("1.0.0")
44
+
45
+ def _info(self):
46
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
47
+ features = datasets.Features(
48
+ {
49
+ "reference": datasets.Image(),
50
+ "distorted": datasets.Image(),
51
+ "mos": datasets.Value("float")
52
+ }
53
+ )
54
+ return datasets.DatasetInfo(
55
+ description=_DESCRIPTION,
56
+ features=features,
57
+ # supervised_keys=("reference", "distorted", "mos"),
58
+ # homepage=_HOMEPAGE,
59
+ # license=_LICENSE,
60
+ # citation=_CITATION,
61
+ )
62
+
63
+ def _split_generators(self, dl_manager):
64
+ data_path = dl_manager.download("image_pairs_mos.csv")
65
+ data = pd.read_csv(data_path, index_col=0)
66
+
67
+ root_path = "/".join(data_path.split("/")[:-1])
68
+ reference_path = os.path.join(root_path, "reference_images")
69
+ distorted_path = os.path.join(root_path, "distorted_images")
70
+
71
+ data["Reference"] = data["Reference"].apply(lambda x: os.path.join(reference_path, x))
72
+ data["Distorted"] = data["Distorted"].apply(lambda x: os.path.join(distorted_path, x))
73
+ return [
74
+ datasets.SplitGenerator(
75
+ name=datasets.Split.TRAIN,
76
+ gen_kwargs={
77
+ "reference": data["Reference"],
78
+ "distorted": data["Distorted"],
79
+ "mos": data["MOS"],
80
+ "split": "train",
81
+ },
82
+ )
83
+ ]
84
+
85
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
86
+ def _generate_examples(self, reference, distorted, mos, split):
87
+ for key, (ref, dist, m) in enumerate(zip(reference, distorted, mos)):
88
+ yield key, {
89
+ "reference": ref,
90
+ "distorted": dist,
91
+ "mos": m,
92
+ }
93
+ # with open(filepath, encoding="utf-8") as f:
94
+ # for key, row in enumerate(f):
95
+ # data = json.loads(row)
96
+ # if self.config.name == "first_domain":
97
+ # # Yields examples as (key, example) tuples
98
+ # yield key, {
99
+ # "sentence": data["sentence"],
100
+ # "option1": data["option1"],
101
+ # "answer": "" if split == "test" else data["answer"],
102
+ # }
103
+ # else:
104
+ # yield key, {
105
+ # "sentence": data["sentence"],
106
+ # "option2": data["option2"],
107
+ # "second_domain_answer": "" if split == "test" else data["second_domain_answer"],
108
+ # }