File size: 4,099 Bytes
9e2121f 79c90d9 9e2121f 79c90d9 9e2121f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
import csv
import json
import os
from PIL import Image
import pandas as pd
from huggingface_hub import hf_hub_download, snapshot_download
import datasets
import cv2
# _CITATION = """\
# @InProceedings{huggingface:dataset,
# title = {A great new dataset},
# author={huggingface, Inc.
# },
# year={2020}
# }
# """
_DESCRIPTION = """\
Image Quality Assessment Dataset consisting of 25 reference images, 17 different distortions and 4 intensities per distortion.
In total there are 1700 (reference, distortion, MOS) tuples.
"""
# _HOMEPAGE = ""
# _LICENSE = ""
# TODO: Add link to the official dataset URLs here
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
# _URLS = {
# "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
# "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
# }
# _REPO = "https://huggingface.co/datasets/frgfm/imagenette/resolve/main/metadata" # Stolen from imagenette.py
_REPO = "https://huggingface.co/datasets/Jorgvt/TID2008/resolve/main"
class TID2008(datasets.GeneratorBasedBuilder):
"""TID2008 Image Quality Dataset"""
VERSION = datasets.Version("1.0.0")
def _info(self):
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
features = datasets.Features(
{
"reference": datasets.Image(),
"distorted": datasets.Image(),
"mos": datasets.Value("float")
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
# supervised_keys=("reference", "distorted", "mos"),
# homepage=_HOMEPAGE,
# license=_LICENSE,
# citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_path = dl_manager.download("image_pairs_mos.csv")
data = pd.read_csv(data_path, index_col=0)
# kk = dl_manager.download("distorted_images")
# print(kk)
root_path = "/".join(data_path.split("/")[:-1])
reference_path = dl_manager.download("reference_images")
distorted_path = dl_manager.download("distorted_images")
data["Reference"] = data["Reference"].apply(lambda x: os.path.join(reference_path, x))
data["Distorted"] = data["Distorted"].apply(lambda x: os.path.join(distorted_path, x))
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"reference": data["Reference"],
"distorted": data["Distorted"],
"mos": data["MOS"],
"split": "train",
},
)
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, reference, distorted, mos, split):
for key, (ref, dist, m) in enumerate(zip(reference, distorted, mos)):
yield key, {
"reference": ref,
"distorted": dist,
"mos": m,
}
# with open(filepath, encoding="utf-8") as f:
# for key, row in enumerate(f):
# data = json.loads(row)
# if self.config.name == "first_domain":
# # Yields examples as (key, example) tuples
# yield key, {
# "sentence": data["sentence"],
# "option1": data["option1"],
# "answer": "" if split == "test" else data["answer"],
# }
# else:
# yield key, {
# "sentence": data["sentence"],
# "option2": data["option2"],
# "second_domain_answer": "" if split == "test" else data["second_domain_answer"],
# } |