Datasets:

Sub-tasks:
fact-checking
Languages:
English
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
crowdsourced
Annotations Creators:
crowdsourced
Source Datasets:
original
ArXiv:
Tags:
License:
albertvillanova HF staff commited on
Commit
e9ee727
1 Parent(s): 00281d7

Delete legacy JSON metadata (#4)

Browse files

- Delete legacy JSON metadata (56dfa096b4bfe4fb705d56557c9cbb81ffbff752)

Files changed (1) hide show
  1. dataset_infos.json +0 -1
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"tab_fact": {"description": "The problem of verifying whether a textual hypothesis holds the truth based on the given evidence, also known as fact verification, plays an important role in the study of natural language understanding and semantic representation. However, existing studies are restricted to dealing with unstructured textual evidence (e.g., sentences and passages, a pool of passages), while verification using structured forms of evidence, such as tables, graphs, and databases, remains unexplored. TABFACT is large scale dataset with 16k Wikipedia tables as evidence for 118k human annotated statements designed for fact verification with semi-structured evidence. The statements are labeled as either ENTAILED or REFUTED. TABFACT is challenging since it involves both soft linguistic reasoning and hard symbolic reasoning.\n", "citation": "@inproceedings{2019TabFactA,\n title={TabFact : A Large-scale Dataset for Table-based Fact Verification},\n author={Wenhu Chen, Hongmin Wang, Jianshu Chen, Yunkai Zhang, Hong Wang, Shiyang Li, Xiyou Zhou and William Yang Wang},\n booktitle = {International Conference on Learning Representations (ICLR)},\n address = {Addis Ababa, Ethiopia},\n month = {April},\n year = {2020}\n}\n", "homepage": "https://tabfact.github.io/", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "table_id": {"dtype": "string", "id": null, "_type": "Value"}, "table_text": {"dtype": "string", "id": null, "_type": "Value"}, "table_caption": {"dtype": "string", "id": null, "_type": "Value"}, "statement": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["refuted", "entailed"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "tab_fact", "config_name": "tab_fact", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 99852664, "num_examples": 92283, "dataset_name": "tab_fact"}, "validation": {"name": "validation", "num_bytes": 13846872, "num_examples": 12792, "dataset_name": "tab_fact"}, "test": {"name": "test", "num_bytes": 13493391, "num_examples": 12779, "dataset_name": "tab_fact"}}, "download_checksums": {"https://github.com/wenhuchen/Table-Fact-Checking/archive/948b5560e2f7f8c9139bd91c7f093346a2bb56a8.zip": {"num_bytes": 196508436, "checksum": "4f0bffb6e53b59760173dac82979a0e5272c2d97514659ac3f4b44c7a008df4a"}}, "download_size": 196508436, "post_processing_size": null, "dataset_size": 127192927, "size_in_bytes": 323701363}, "blind_test": {"description": "The problem of verifying whether a textual hypothesis holds the truth based on the given evidence, also known as fact verification, plays an important role in the study of natural language understanding and semantic representation. However, existing studies are restricted to dealing with unstructured textual evidence (e.g., sentences and passages, a pool of passages), while verification using structured forms of evidence, such as tables, graphs, and databases, remains unexplored. TABFACT is large scale dataset with 16k Wikipedia tables as evidence for 118k human annotated statements designed for fact verification with semi-structured evidence. The statements are labeled as either ENTAILED or REFUTED. TABFACT is challenging since it involves both soft linguistic reasoning and hard symbolic reasoning.\n", "citation": "@inproceedings{2019TabFactA,\n title={TabFact : A Large-scale Dataset for Table-based Fact Verification},\n author={Wenhu Chen, Hongmin Wang, Jianshu Chen, Yunkai Zhang, Hong Wang, Shiyang Li, Xiyou Zhou and William Yang Wang},\n booktitle = {International Conference on Learning Representations (ICLR)},\n address = {Addis Ababa, Ethiopia},\n month = {April},\n year = {2020}\n}\n", "homepage": "https://tabfact.github.io/", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "table_id": {"dtype": "string", "id": null, "_type": "Value"}, "table_text": {"dtype": "string", "id": null, "_type": "Value"}, "table_caption": {"dtype": "string", "id": null, "_type": "Value"}, "statement": {"dtype": "string", "id": null, "_type": "Value"}, "test_id": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "tab_fact", "config_name": "blind_test", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 10954442, "num_examples": 9750, "dataset_name": "tab_fact"}}, "download_checksums": {"https://github.com/wenhuchen/Table-Fact-Checking/archive/948b5560e2f7f8c9139bd91c7f093346a2bb56a8.zip": {"num_bytes": 196508436, "checksum": "4f0bffb6e53b59760173dac82979a0e5272c2d97514659ac3f4b44c7a008df4a"}}, "download_size": 196508436, "post_processing_size": null, "dataset_size": 10954442, "size_in_bytes": 207462878}}