|
"""Visual similarities discovery (VSD) is an important task |
|
with broad e-commerce applications. Given an image of |
|
a certain object, the goal of VSD is to retrieve images of |
|
|
|
different objects with high perceptual visual similarity. Al- |
|
though being a highly addressed problem, the evaluation |
|
|
|
of proposed methods for VSD is often based on a proxy of |
|
an identification-retrieval task, evaluating the ability of a |
|
model to retrieve different images of the same object. We |
|
posit that evaluating VSD methods based on identification |
|
tasks is limited, and faithful evaluation must rely on expert |
|
annotations. In this paper, we introduce the first large-scale |
|
fashion visual similarity benchmark dataset, consisting of |
|
more than 110K expert-annotated image pairs.""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
from typing import Optional, Union |
|
|
|
import datasets |
|
from pathlib import Path |
|
from datasets.data_files import DataFilesDict |
|
from datasets.features import Features |
|
from datasets.info import DatasetInfo |
|
from huggingface_hub import snapshot_download |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {A great new dataset}, |
|
author={huggingface, Inc. |
|
}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Visual similarities discovery (VSD) is an important task |
|
with broad e-commerce applications. Given an image of |
|
a certain object, the goal of VSD is to retrieve images of |
|
|
|
different objects with high perceptual visual similarity. Al- |
|
though being a highly addressed problem, the evaluation |
|
|
|
of proposed methods for VSD is often based on a proxy of |
|
an identification-retrieval task, evaluating the ability of a |
|
model to retrieve different images of the same object. We |
|
posit that evaluating VSD methods based on identification |
|
tasks is limited, and faithful evaluation must rely on expert |
|
annotations. In this paper, we introduce the first large-scale |
|
fashion visual similarity benchmark dataset, consisting of |
|
more than 110K expert-annotated image pairs. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://vsd-benchmark.github.io/vsd/" |
|
|
|
|
|
_LICENSE = "MIT" |
|
_URL = "https://huggingface.co/datasets/vsd-benchmark/vsd-fashion/tree/main" |
|
_HF_DATASET_ID = 'vsd-benchmark/vsd-fashion' |
|
|
|
|
|
class VSDFashionConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for VSDFashion.""" |
|
|
|
def __init__(self, dataset_folder, split_folder, image_folder=None, **kwargs): |
|
"""BuilderConfig for VSDFashion. |
|
|
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
|
|
|
|
super(VSDFashionConfig, self).__init__(version=datasets.Version("0.0.1"), **kwargs) |
|
self.dataset_folder = dataset_folder |
|
self.split_folder = split_folder |
|
self.image_folder = image_folder |
|
|
|
|
|
class VSDFashion(datasets.GeneratorBasedBuilder): |
|
def __init__( |
|
self, |
|
cache_dir: Optional[str] = None, |
|
dataset_name: Optional[str] = None, |
|
config_name: Optional[str] = None, |
|
hash: Optional[str] = None, |
|
base_path: Optional[str] = None, |
|
info: Optional[DatasetInfo] = None, |
|
features: Optional[Features] = None, |
|
token: Optional[Union[bool, str]] = None, |
|
use_auth_token="deprecated", |
|
repo_id: Optional[str] = None, |
|
data_files: Optional[Union[str, list, dict, DataFilesDict]] = None, |
|
data_dir: Optional[str] = None, |
|
storage_options: Optional[dict] = None, |
|
writer_batch_size: Optional[int] = None, |
|
name="deprecated", |
|
image_folder: str = None, |
|
**config_kwargs): |
|
super().__init__(cache_dir, dataset_name, config_name, hash, base_path, info, features, token, use_auth_token, repo_id, data_files, data_dir, storage_options, writer_batch_size, name, **config_kwargs) |
|
|
|
self.image_folder = Path(image_folder) |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
VSDFashionConfig( |
|
name="in_catalog_retrieval_zero_shot", |
|
description="Zero shot (no training) on fashion catalog query and candidates visual similairty", |
|
dataset_folder='in_fashion', |
|
split_folder='gt_tagging', |
|
), |
|
VSDFashionConfig( |
|
name="in_catalog_open_catalog", |
|
description="Training task for VSD where the queries in the train and test may overlap.", |
|
dataset_folder='in_fashion', |
|
split_folder='gt_tagging_split_open_catalog', |
|
), |
|
VSDFashionConfig( |
|
name="in_catalog_closed_catalog", |
|
description="Training task for VSD where the queries in the train and test DO NOT overlap.", |
|
dataset_folder='in_fashion', |
|
split_folder='gt_tagging_split_closed_catalog', |
|
), |
|
VSDFashionConfig( |
|
name="consumer-catalog_wild_zero_shot", |
|
description="Zero shot task for matching a consumer taken photo of a clothing and visually matching with a catalog item. Pretraining on any data is allowed, except consumer photos (queries).", |
|
dataset_folder='in_fashion', |
|
split_folder='gt_tagging_wild', |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "in_catalog_retrieval_zero_shot" |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"query": datasets.Image(), |
|
"candidate": datasets.Image(), |
|
"value": datasets.ClassLabel(num_classes=2, names=["neg", "pos"]), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
dataset_root_path = Path(snapshot_download(_HF_DATASET_ID, repo_type='dataset')) |
|
|
|
dataset_path = dataset_root_path/self.config.dataset_folder |
|
task_data_path = dataset_path/self.config.split_folder |
|
|
|
if self.config.name == 'in_catalog_retrieval_zero_shot': |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"manifest_file": task_data_path/'manifest.json', |
|
"seeds_file": task_data_path/'seeds.json', |
|
"annotations_file": task_data_path/'in_fashion_tags_dict.json', |
|
"split": "test", |
|
"images_folder": self.image_folder, |
|
"metadata_file": dataset_path/'dataset_metadata.json' |
|
}, |
|
), |
|
] |
|
elif self.config.name == 'consumer-catalog_wild_zero_shot': |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"manifest_file": task_data_path/'manifest.json', |
|
"seeds_file": None, |
|
"annotations_file": task_data_path/'in_fashion_outshop_tags_dict.json', |
|
"split": "test", |
|
"images_folder": self.image_folder, |
|
"metadata_file": dataset_path/'dataset_metadata_wild.json', |
|
"gallery_phases": ['train'] |
|
}, |
|
), |
|
] |
|
else: |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"manifest_file": task_data_path/'manifest_train.json', |
|
"seeds_file": task_data_path/'seeds_train.json', |
|
"annotations_file": task_data_path/'in_fashion_tags_dict_train.json', |
|
"split": "train", |
|
"images_folder": self.image_folder, |
|
"metadata_file": dataset_path/'dataset_metadata.json' |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"manifest_file": task_data_path/'manifest_test.json', |
|
"seeds_file": task_data_path/'seeds_test.json', |
|
"annotations_file": task_data_path/'in_fashion_tags_dict_test.json', |
|
"split": "test", |
|
"images_folder": self.image_folder, |
|
"metadata_file": dataset_path/'dataset_metadata.json' |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, annotations_file, **kwargs): |
|
with open(annotations_file, encoding="utf-8") as f: |
|
data = json.load(f) |
|
|
|
for key, row in enumerate(data): |
|
|
|
yield key, { |
|
"query": str(self.image_folder/row['key'][0]), |
|
"candidate": str(self.image_folder/row['key'][1]), |
|
"value": row['value'], |
|
} |