Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
crowdsourced
Annotations Creators:
crowdsourced
Source Datasets:
original
ArXiv:
Tags:
License:
albertvillanova HF staff commited on
Commit
30a47cf
1 Parent(s): a014c2c

Delete legacy JSON metadata (#2)

Browse files

- Delete legacy JSON metadata (ee5a9227d8b24314a8bdc911749aa7c5bcc6e36f)

Files changed (1) hide show
  1. dataset_infos.json +0 -1
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"train": {"description": "TextVQA requires models to read and reason about text in images to answer questions about them. \nSpecifically, models need to incorporate a new modality of text present in the images and reason \nover it to answer TextVQA questions. TextVQA dataset contains 45,336 questions over 28,408 images\nfrom the OpenImages dataset. \n", "citation": "\n@inproceedings{singh2019towards,\n title={Towards VQA Models That Can Read},\n author={Singh, Amanpreet and Natarjan, Vivek and Shah, Meet and Jiang, Yu and Chen, Xinlei and Parikh, Devi and Rohrbach, Marcus},\n booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},\n pages={8317-8326},\n year={2019}\n}\n", "homepage": "https://textvqa.org", "license": "CC BY 4.0", "features": {"image_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "int32", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "image": {"decode": true, "id": null, "_type": "Image"}, "image_width": {"dtype": "int32", "id": null, "_type": "Value"}, "image_height": {"dtype": "int32", "id": null, "_type": "Value"}, "flickr_original_url": {"dtype": "string", "id": null, "_type": "Value"}, "flickr_300k_url": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "image_classes": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "set_name": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "textvqa", "config_name": "train", "version": {"version_str": "0.5.1", "description": "", "major": 0, "minor": 5, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 21381310, "num_examples": 34602, "dataset_name": "textvqa"}, "validation": {"name": "validation", "num_bytes": 3077854, "num_examples": 5000, "dataset_name": "textvqa"}, "test": {"name": "test", "num_bytes": 3025046, "num_examples": 5734, "dataset_name": "textvqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/textvqa/data/TextVQA_0.5.1_train.json": {"num_bytes": 21634937, "checksum": "95f5c407db56cba56a177799dcd685a7cc0ec7c0d851b59910acf7786d31b68a"}, "https://dl.fbaipublicfiles.com/textvqa/data/TextVQA_0.5.1_val.json": {"num_bytes": 3116162, "checksum": "4ceb5aadc1a41719d0a3e4dfdf06838bcfee1db569a9a65ee67d31c99893081d"}, "https://dl.fbaipublicfiles.com/textvqa/data/TextVQA_0.5.1_test.json": {"num_bytes": 2770520, "checksum": "d8d4b738101087bac5a6182d22d9aef3772e08e77827e6cf6116808910b75db2"}, "https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip": {"num_bytes": 7072297970, "checksum": "ecf35005640d0708eae185aab1c0a10f89b2db7420b29185a1ed92a8f4290498"}, "https://dl.fbaipublicfiles.com/textvqa/images/test_images.zip": {"num_bytes": 970296721, "checksum": "1276b908994c444c46484fb21e9e15fcda1be9c675f6ad727489e52eea68cbcd"}}, "download_size": 8070116310, "post_processing_size": null, "dataset_size": 27484210, "size_in_bytes": 8097600520}, "val": {"description": "TextVQA requires models to read and reason about text in images to answer questions about them. \nSpecifically, models need to incorporate a new modality of text present in the images and reason \nover it to answer TextVQA questions. TextVQA dataset contains 45,336 questions over 28,408 images\nfrom the OpenImages dataset. \n", "citation": "\n@inproceedings{singh2019towards,\n title={Towards VQA Models That Can Read},\n author={Singh, Amanpreet and Natarjan, Vivek and Shah, Meet and Jiang, Yu and Chen, Xinlei and Parikh, Devi and Rohrbach, Marcus},\n booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},\n pages={8317-8326},\n year={2019}\n}\n", "homepage": "https://textvqa.org", "license": "CC BY 4.0", "features": {"image_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "int32", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "image": {"decode": true, "id": null, "_type": "Image"}, "image_width": {"dtype": "int32", "id": null, "_type": "Value"}, "image_height": {"dtype": "int32", "id": null, "_type": "Value"}, "flickr_original_url": {"dtype": "string", "id": null, "_type": "Value"}, "flickr_300k_url": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "image_classes": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "set_name": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "textvqa", "config_name": "val", "version": {"version_str": "0.5.1", "description": "", "major": 0, "minor": 5, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 21381310, "num_examples": 34602, "dataset_name": "textvqa"}, "validation": {"name": "validation", "num_bytes": 3077854, "num_examples": 5000, "dataset_name": "textvqa"}, "test": {"name": "test", "num_bytes": 3025046, "num_examples": 5734, "dataset_name": "textvqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/textvqa/data/TextVQA_0.5.1_train.json": {"num_bytes": 21634937, "checksum": "95f5c407db56cba56a177799dcd685a7cc0ec7c0d851b59910acf7786d31b68a"}, "https://dl.fbaipublicfiles.com/textvqa/data/TextVQA_0.5.1_val.json": {"num_bytes": 3116162, "checksum": "4ceb5aadc1a41719d0a3e4dfdf06838bcfee1db569a9a65ee67d31c99893081d"}, "https://dl.fbaipublicfiles.com/textvqa/data/TextVQA_0.5.1_test.json": {"num_bytes": 2770520, "checksum": "d8d4b738101087bac5a6182d22d9aef3772e08e77827e6cf6116808910b75db2"}, "https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip": {"num_bytes": 7072297970, "checksum": "ecf35005640d0708eae185aab1c0a10f89b2db7420b29185a1ed92a8f4290498"}, "https://dl.fbaipublicfiles.com/textvqa/images/test_images.zip": {"num_bytes": 970296721, "checksum": "1276b908994c444c46484fb21e9e15fcda1be9c675f6ad727489e52eea68cbcd"}}, "download_size": 8070116310, "post_processing_size": null, "dataset_size": 27484210, "size_in_bytes": 8097600520}, "test": {"description": "TextVQA requires models to read and reason about text in images to answer questions about them. \nSpecifically, models need to incorporate a new modality of text present in the images and reason \nover it to answer TextVQA questions. TextVQA dataset contains 45,336 questions over 28,408 images\nfrom the OpenImages dataset. \n", "citation": "\n@inproceedings{singh2019towards,\n title={Towards VQA Models That Can Read},\n author={Singh, Amanpreet and Natarjan, Vivek and Shah, Meet and Jiang, Yu and Chen, Xinlei and Parikh, Devi and Rohrbach, Marcus},\n booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},\n pages={8317-8326},\n year={2019}\n}\n", "homepage": "https://textvqa.org", "license": "CC BY 4.0", "features": {"image_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "int32", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "image": {"decode": true, "id": null, "_type": "Image"}, "image_width": {"dtype": "int32", "id": null, "_type": "Value"}, "image_height": {"dtype": "int32", "id": null, "_type": "Value"}, "flickr_original_url": {"dtype": "string", "id": null, "_type": "Value"}, "flickr_300k_url": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "image_classes": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "set_name": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "textvqa", "config_name": "test", "version": {"version_str": "0.5.1", "description": "", "major": 0, "minor": 5, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 21381310, "num_examples": 34602, "dataset_name": "textvqa"}, "validation": {"name": "validation", "num_bytes": 3077854, "num_examples": 5000, "dataset_name": "textvqa"}, "test": {"name": "test", "num_bytes": 3025046, "num_examples": 5734, "dataset_name": "textvqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/textvqa/data/TextVQA_0.5.1_train.json": {"num_bytes": 21634937, "checksum": "95f5c407db56cba56a177799dcd685a7cc0ec7c0d851b59910acf7786d31b68a"}, "https://dl.fbaipublicfiles.com/textvqa/data/TextVQA_0.5.1_val.json": {"num_bytes": 3116162, "checksum": "4ceb5aadc1a41719d0a3e4dfdf06838bcfee1db569a9a65ee67d31c99893081d"}, "https://dl.fbaipublicfiles.com/textvqa/data/TextVQA_0.5.1_test.json": {"num_bytes": 2770520, "checksum": "d8d4b738101087bac5a6182d22d9aef3772e08e77827e6cf6116808910b75db2"}, "https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip": {"num_bytes": 7072297970, "checksum": "ecf35005640d0708eae185aab1c0a10f89b2db7420b29185a1ed92a8f4290498"}, "https://dl.fbaipublicfiles.com/textvqa/images/test_images.zip": {"num_bytes": 970296721, "checksum": "1276b908994c444c46484fb21e9e15fcda1be9c675f6ad727489e52eea68cbcd"}}, "download_size": 8070116310, "post_processing_size": null, "dataset_size": 27484210, "size_in_bytes": 8097600520}, "textvqa": {"description": "TextVQA requires models to read and reason about text in images to answer questions about them.\nSpecifically, models need to incorporate a new modality of text present in the images and reason\nover it to answer TextVQA questions. TextVQA dataset contains 45,336 questions over 28,408 images\nfrom the OpenImages dataset.\n", "citation": "\n@inproceedings{singh2019towards,\n title={Towards VQA Models That Can Read},\n author={Singh, Amanpreet and Natarjan, Vivek and Shah, Meet and Jiang, Yu and Chen, Xinlei and Batra, Dhruv and Parikh, Devi and Rohrbach, Marcus},\n booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},\n pages={8317-8326},\n year={2019}\n}\n", "homepage": "https://textvqa.org", "license": "CC BY 4.0", "features": {"image_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "int32", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "image": {"decode": true, "id": null, "_type": "Image"}, "image_width": {"dtype": "int32", "id": null, "_type": "Value"}, "image_height": {"dtype": "int32", "id": null, "_type": "Value"}, "flickr_original_url": {"dtype": "string", "id": null, "_type": "Value"}, "flickr_300k_url": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "image_classes": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "set_name": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "textvqa", "config_name": "textvqa", "version": {"version_str": "0.5.1", "description": null, "major": 0, "minor": 5, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 22073350, "num_examples": 34602, "dataset_name": "textvqa"}, "validation": {"name": "validation", "num_bytes": 3177854, "num_examples": 5000, "dataset_name": "textvqa"}, "test": {"name": "test", "num_bytes": 3139726, "num_examples": 5734, "dataset_name": "textvqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/textvqa/data/TextVQA_0.5.1_train.json": {"num_bytes": 21634937, "checksum": "95f5c407db56cba56a177799dcd685a7cc0ec7c0d851b59910acf7786d31b68a"}, "https://dl.fbaipublicfiles.com/textvqa/data/TextVQA_0.5.1_val.json": {"num_bytes": 3116162, "checksum": "4ceb5aadc1a41719d0a3e4dfdf06838bcfee1db569a9a65ee67d31c99893081d"}, "https://dl.fbaipublicfiles.com/textvqa/data/TextVQA_0.5.1_test.json": {"num_bytes": 2770520, "checksum": "d8d4b738101087bac5a6182d22d9aef3772e08e77827e6cf6116808910b75db2"}, "https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip": {"num_bytes": 7072297970, "checksum": "ecf35005640d0708eae185aab1c0a10f89b2db7420b29185a1ed92a8f4290498"}, "https://dl.fbaipublicfiles.com/textvqa/images/test_images.zip": {"num_bytes": 970296721, "checksum": "1276b908994c444c46484fb21e9e15fcda1be9c675f6ad727489e52eea68cbcd"}}, "download_size": 8070116310, "post_processing_size": null, "dataset_size": 28390930, "size_in_bytes": 8098507240}}