Datasets:
Tasks:
Visual Question Answering
Formats:
parquet
Sub-tasks:
visual-question-answering
Languages:
English
Size:
100K - 1M
ArXiv:
License:
Update files from the datasets library (from 1.18.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.18.0
- README.md +2 -1
- create_dummy_data.py +6 -5
README.md
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
---
|
2 |
paperswithcode_id: compguesswhat
|
|
|
3 |
---
|
4 |
|
5 |
# Dataset Card for "compguesswhat"
|
@@ -259,4 +260,4 @@ The data fields are the same among all splits.
|
|
259 |
|
260 |
### Contributions
|
261 |
|
262 |
-
Thanks to [@thomwolf](https://github.com/thomwolf), [@aleSuglia](https://github.com/aleSuglia), [@lhoestq](https://github.com/lhoestq) for adding this dataset.
|
|
|
1 |
---
|
2 |
paperswithcode_id: compguesswhat
|
3 |
+
pretty_name: CompGuessWhat?!
|
4 |
---
|
5 |
|
6 |
# Dataset Card for "compguesswhat"
|
|
|
260 |
|
261 |
### Contributions
|
262 |
|
263 |
+
Thanks to [@thomwolf](https://github.com/thomwolf), [@aleSuglia](https://github.com/aleSuglia), [@lhoestq](https://github.com/lhoestq) for adding this dataset.
|
create_dummy_data.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import gzip
|
2 |
import json
|
|
|
3 |
import os
|
4 |
from argparse import ArgumentParser
|
5 |
|
@@ -50,10 +51,10 @@ def create_dummy_data_for_split(data_path, dataset_name, dataset_version, data_f
|
|
50 |
os.makedirs(dummy_data_path)
|
51 |
|
52 |
for split_name, split_file in data_files.items():
|
53 |
-
|
54 |
|
55 |
split_filepath = os.path.join(data_path, full_dataset_name, dataset_version, split_file)
|
56 |
-
|
57 |
with gzip.open(split_filepath) as in_file:
|
58 |
dummy_filepath = os.path.join(dummy_data_path, split_file)
|
59 |
with gzip.open(dummy_filepath, mode="w") as out_file:
|
@@ -81,12 +82,12 @@ def main(args):
|
|
81 |
|
82 |
dataset_version = dataset_info["compguesswhat-original"]["version"]["version_str"]
|
83 |
|
84 |
-
|
85 |
|
86 |
-
|
87 |
create_dummy_data_for_split(args.data_path, "original", dataset_version, original_data_files)
|
88 |
|
89 |
-
|
90 |
create_dummy_data_for_split(args.data_path, "zero_shot", dataset_version, zs_data_files)
|
91 |
|
92 |
|
|
|
1 |
import gzip
|
2 |
import json
|
3 |
+
import logging
|
4 |
import os
|
5 |
from argparse import ArgumentParser
|
6 |
|
|
|
51 |
os.makedirs(dummy_data_path)
|
52 |
|
53 |
for split_name, split_file in data_files.items():
|
54 |
+
logging.info(f"Generating dummy data for split {split_name} (num. examples = {args.examples})")
|
55 |
|
56 |
split_filepath = os.path.join(data_path, full_dataset_name, dataset_version, split_file)
|
57 |
+
logging.info(f"Reading split file {split_filepath}")
|
58 |
with gzip.open(split_filepath) as in_file:
|
59 |
dummy_filepath = os.path.join(dummy_data_path, split_file)
|
60 |
with gzip.open(dummy_filepath, mode="w") as out_file:
|
|
|
82 |
|
83 |
dataset_version = dataset_info["compguesswhat-original"]["version"]["version_str"]
|
84 |
|
85 |
+
logging.info(f"Creating dummy data for CompGuessWhat?! {dataset_version}")
|
86 |
|
87 |
+
logging.info("Original dataset...")
|
88 |
create_dummy_data_for_split(args.data_path, "original", dataset_version, original_data_files)
|
89 |
|
90 |
+
logging.info("Zero-shot dataset...")
|
91 |
create_dummy_data_for_split(args.data_path, "zero_shot", dataset_version, zs_data_files)
|
92 |
|
93 |
|