|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" WinoGAViL Loading Script """ |
|
|
|
|
|
import json |
|
import os |
|
import pandas as pd |
|
import datasets |
|
from huggingface_hub import hf_hub_url |
|
|
|
|
|
_CITATION = """\ |
|
@article{bitton2022winogavil, |
|
title={WinoGAViL: Gamified Association Benchmark to Challenge Vision-and-Language Models}, |
|
author={Bitton, Yonatan and Guetta, Nitzan Bitton and Yosef, Ron and Elovici, Yuval and Bansal, Mohit and Stanovsky, Gabriel and Schwartz, Roy}, |
|
journal={arXiv preprint arXiv:2207.12576}, |
|
year={2022} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
WinoGAViL is a challenging dataset for evaluating vision-and-language commonsense reasoning abilities. Given a set of images, a cue, and a number K, the task is to select the K images that best fits the association. This dataset was collected via the WinoGAViL online game to collect vision-and-language associations, (e.g., werewolves to a full moon). Inspired by the popular card game Codenames, a spymaster gives a textual cue related to several visual candidates, and another player has to identify them. Human players are rewarded for creating associations that are challenging for a rival AI model but still solvable by other human players. We evaluate several state-of-the-art vision-and-language models, finding that they are intuitive for humans (>90% Jaccard index) but challenging for state-of-the-art AI models, where the best model (ViLT) achieves a score of 52%, succeeding mostly where the cue is visually salient. Our analysis as well as the feedback we collect from players indicate that the collected associations require diverse reasoning skills, including general knowledge, common sense, abstraction, and more. |
|
""" |
|
|
|
_HOMEPAGE = "https://winogavil.github.io/" |
|
|
|
_LICENSE = "https://creativecommons.org/licenses/by/4.0/" |
|
|
|
_URL = "https://huggingface.co/datasets/nlphuji/winogavil/blob/main" |
|
|
|
class Winogavil(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="TEST", version=VERSION, description="winogavil dataset"), |
|
] |
|
|
|
def _info(self): |
|
img = datasets.Image() |
|
features = datasets.Features( |
|
{ |
|
"candidates": [datasets.Value("string")], |
|
"candidates_images": [img], |
|
"cue": datasets.Value("string"), |
|
"associations": [datasets.Value("string")], |
|
'score_fool_the_ai': datasets.Value("float64"), |
|
'num_associations': datasets.Value("int64"), |
|
'num_candidates': datasets.Value("int64"), |
|
'solvers_jaccard_mean': datasets.Value("float64"), |
|
'solvers_jaccard_std': datasets.Value("float64"), |
|
'ID': datasets.Value("int64"), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
data_dir = dl_manager.download_and_extract({ |
|
"examples_csv": hf_hub_url("datasets/nlphuji/winogavil", filename="winogavil_dataset.csv")}) |
|
|
|
return [datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs=data_dir)] |
|
|
|
|
|
def _generate_examples(self, examples_csv): |
|
|
|
|
|
df = pd.read_csv(examples_csv) |
|
|
|
|
|
|
|
|
|
|
|
for r_idx, r in df.iterrows(): |
|
r_dict = r.to_dict() |
|
r_dict['candidates'] = json.loads(r_dict['candidates']) |
|
candidates_images = [get_img_url(x) for x in r['candidates'][ |
|
r['candidates_images'] = candidates_images |
|
r_dict['associations'] = json.loads(r_dict['associations']) |
|
key = r['ID'] |
|
yield key, r_dict |
|
|
|
def get_img_url(image_name): |
|
return 'https://winogavil.s3.eu-west-1.amazonaws.com/{}'.format(image_name + ".jpg") |
|
|