Datasets:
Tasks:
Image-to-Text
Formats:
parquet
Sub-tasks:
image-captioning
Languages:
English
Size:
1M - 10M
License:
Commit
•
ce994f5
1
Parent(s):
e1a96a4
Delete legacy JSON metadata (#2)
Browse files- Delete legacy JSON metadata (7da48ca2d0aad6d9df9f3e8fcd2a6db38748de1e)
- dataset_infos.json +0 -1
dataset_infos.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"default": {"description": "Image captioning dataset\nThe resulting dataset (version 1.1) has been split into Training, Validation, and Test splits. The Training split consists of 3,318,333 image-URL/caption pairs, with a total number of 51,201 total token types in the captions (i.e., total vocabulary). The average number of tokens per captions is 10.3 (standard deviation of 4.5), while the median is 9.0 tokens per caption. The Validation split consists of 15,840 image-URL/caption pairs, with similar statistics.\n", "citation": "@inproceedings{sharma-etal-2018-conceptual,\n title = \"Conceptual Captions: A Cleaned, Hypernymed, Image Alt-text Dataset For Automatic Image Captioning\",\n author = \"Sharma, Piyush and\n Ding, Nan and\n Goodman, Sebastian and\n Soricut, Radu\",\n booktitle = \"Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = jul,\n year = \"2018\",\n address = \"Melbourne, Australia\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P18-1238\",\n doi = \"10.18653/v1/P18-1238\",\n pages = \"2556--2565\",\n abstract = \"We present a new dataset of image caption annotations, Conceptual Captions, which contains an order of magnitude more images than the MS-COCO dataset (Lin et al., 2014) and represents a wider variety of both images and image caption styles. We achieve this by extracting and filtering image caption annotations from billions of webpages. We also present quantitative evaluations of a number of image captioning models and show that a model architecture based on Inception-ResNetv2 (Szegedy et al., 2016) for image-feature extraction and Transformer (Vaswani et al., 2017) for sequence modeling achieves the best performance when trained on the Conceptual Captions dataset.\",\n}\n", "homepage": "http://data.statmt.org/cc-100/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "caption": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "conceptual_captions", "config_name": "default", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 623230370, "num_examples": 3318333, "dataset_name": "conceptual_captions"}, "validation": {"name": "validation", "num_bytes": 2846024, "num_examples": 15840, "dataset_name": "conceptual_captions"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 626076394, "size_in_bytes": 626076394}, "unlabeled": {"description": "Google's Conceptual Captions dataset has more than 3 million images, paired with natural-language captions.\nIn contrast with the curated style of the MS-COCO images, Conceptual Captions images and their raw descriptions are harvested from the web,\nand therefore represent a wider variety of styles. The raw descriptions are harvested from the Alt-text HTML attribute associated with web images.\nThe authors developed an automatic pipeline that extracts, filters, and transforms candidate image/caption pairs, with the goal of achieving a balance of cleanliness,\ninformativeness, fluency, and learnability of the resulting captions.\n", "citation": "@inproceedings{sharma2018conceptual,\n title = {Conceptual Captions: A Cleaned, Hypernymed, Image Alt-text Dataset For Automatic Image Captioning},\n author = {Sharma, Piyush and Ding, Nan and Goodman, Sebastian and Soricut, Radu},\n booktitle = {Proceedings of ACL},\n year = {2018},\n}\n", "homepage": "http://data.statmt.org/cc-100/", "license": "The dataset may be freely used for any purpose, although acknowledgement of\nGoogle LLC (\"Google\") as the data source would be appreciated. The dataset is\nprovided \"AS IS\" without any warranty, express or implied. Google disclaims all\nliability for any damages, direct or indirect, resulting from the use of the\ndataset.\n", "features": {"image_url": {"dtype": "string", "id": null, "_type": "Value"}, "caption": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "conceptual_captions", "config_name": "unlabeled", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 584520156, "num_examples": 3318333, "dataset_name": "conceptual_captions"}, "validation": {"name": "validation", "num_bytes": 2698726, "num_examples": 15840, "dataset_name": "conceptual_captions"}}, "download_checksums": {"https://storage.googleapis.com/gcc-data/Train/GCC-training.tsv?_ga=2.191230122.-1896153081.1529438250": {"num_bytes": 564607502, "checksum": "eab84e5ebc713a41a6b1f6ae6fa3d6617821a13b03fe24e16004cc4aac189635"}, "https://storage.googleapis.com/gcc-data/Validation/GCC-1.1.0-Validation.tsv?_ga=2.141047602.-1896153081.1529438250": {"num_bytes": 2603670, "checksum": "528a0c939ec2ad8d1740bd3f459a51e9fe67643050e29f68fabb6da3f8ac985d"}}, "download_size": 567211172, "post_processing_size": null, "dataset_size": 587218882, "size_in_bytes": 1154430054}, "labeled": {"description": "Google's Conceptual Captions dataset has more than 3 million images, paired with natural-language captions.\nIn contrast with the curated style of the MS-COCO images, Conceptual Captions images and their raw descriptions are harvested from the web,\nand therefore represent a wider variety of styles. The raw descriptions are harvested from the Alt-text HTML attribute associated with web images.\nThe authors developed an automatic pipeline that extracts, filters, and transforms candidate image/caption pairs, with the goal of achieving a balance of cleanliness,\ninformativeness, fluency, and learnability of the resulting captions.\n", "citation": "@inproceedings{sharma2018conceptual,\n title = {Conceptual Captions: A Cleaned, Hypernymed, Image Alt-text Dataset For Automatic Image Captioning},\n author = {Sharma, Piyush and Ding, Nan and Goodman, Sebastian and Soricut, Radu},\n booktitle = {Proceedings of ACL},\n year = {2018},\n}\n", "homepage": "http://data.statmt.org/cc-100/", "license": "The dataset may be freely used for any purpose, although acknowledgement of\nGoogle LLC (\"Google\") as the data source would be appreciated. The dataset is\nprovided \"AS IS\" without any warranty, express or implied. Google disclaims all\nliability for any damages, direct or indirect, resulting from the use of the\ndataset.\n", "features": {"image_url": {"dtype": "string", "id": null, "_type": "Value"}, "caption": {"dtype": "string", "id": null, "_type": "Value"}, "labels": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "MIDs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "confidence_scores": {"feature": {"dtype": "float64", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "conceptual_captions", "config_name": "labeled", "version": "0.0.0", "splits": {"train": {"name": "train", "num_bytes": 1199330856, "num_examples": 2007090, "dataset_name": "conceptual_captions"}}, "download_checksums": {"https://storage.googleapis.com/conceptual-captions-v1-1-labels/Image_Labels_Subset_Train_GCC-Labels-training.tsv?_ga=2.234395421.-20118413.1607637118": {"num_bytes": 1282463277, "checksum": "d63f475306f376e4df2d365003f321468032278cd241d4c9eefc3c3e232baa38"}}, "download_size": 1282463277, "post_processing_size": null, "dataset_size": 1199330856, "size_in_bytes": 2481794133}}
|
|
|
|