TID2008 / TID2008.py
Jorgvt's picture
downloading imgs
fc2a476
raw history blame
No virus
4.61 kB
import csv
import json
import os
from PIL import Image
import pandas as pd
from huggingface_hub import hf_hub_download, snapshot_download
import datasets
import cv2
# _CITATION = """\
# @InProceedings{huggingface:dataset,
# title = {A great new dataset},
# author={huggingface, Inc.
# },
# year={2020}
# }
# """
_DESCRIPTION = """\
Image Quality Assessment Dataset consisting of 25 reference images, 17 different distortions and 4 intensities per distortion.
In total there are 1700 (reference, distortion, MOS) tuples.
"""
# _HOMEPAGE = ""
# _LICENSE = ""
# TODO: Add link to the official dataset URLs here
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
# _URLS = {
# "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
# "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
# }
# _REPO = "https://huggingface.co/datasets/frgfm/imagenette/resolve/main/metadata" # Stolen from imagenette.py
_REPO = "https://huggingface.co/datasets/Jorgvt/TID2008/resolve/main"
class TID2008(datasets.GeneratorBasedBuilder):
"""TID2008 Image Quality Dataset"""
VERSION = datasets.Version("1.0.0")
def _info(self):
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
features = datasets.Features(
{
"reference": datasets.Image(),
"distorted": datasets.Image(),
"mos": datasets.Value("float")
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
# supervised_keys=("reference", "distorted", "mos"),
# homepage=_HOMEPAGE,
# license=_LICENSE,
# citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_path = dl_manager.download("image_pairs_mos.csv")
data = pd.read_csv(data_path, index_col=0)
# kk = dl_manager.download("distorted_images")
# print(kk)
root_path = "/".join(data_path.split("/")[:-1])
# reference_path = dl_manager.download("reference_images")
# distorted_path = dl_manager.download("distorted_images")
reference_paths = data["Reference"].apply(lambda x: os.path.join("reference_images", x)).to_list()
distorted_paths = data["Distorted"].apply(lambda x: os.path.join("distorted_images", x)).to_list()
reference_paths = dl_manager.download(reference_paths)
distorted_paths = dl_manager.download(distorted_paths)
# dl_manager.download(data["Reference"])
# data["Reference"] = data["Reference"].apply(lambda x: os.path.join(reference_path, x))
# data["Distorted"] = data["Distorted"].apply(lambda x: os.path.join(distorted_path, x))
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
# "reference": data["Reference"],
# "distorted": data["Distorted"],
"reference": reference_paths,
"distorted": distorted_paths,
"mos": data["MOS"],
"split": "train",
},
)
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, reference, distorted, mos, split):
for key, (ref, dist, m) in enumerate(zip(reference, distorted, mos)):
yield key, {
"reference": ref,
"distorted": dist,
"mos": m,
}
# with open(filepath, encoding="utf-8") as f:
# for key, row in enumerate(f):
# data = json.loads(row)
# if self.config.name == "first_domain":
# # Yields examples as (key, example) tuples
# yield key, {
# "sentence": data["sentence"],
# "option1": data["option1"],
# "answer": "" if split == "test" else data["answer"],
# }
# else:
# yield key, {
# "sentence": data["sentence"],
# "option2": data["option2"],
# "second_domain_answer": "" if split == "test" else data["second_domain_answer"],
# }