albertvillanova HF staff commited on
Commit
599e5d6
1 Parent(s): 8979c77

Delete legacy JSON metadata (#2)

Browse files

- Delete legacy JSON metadata (b3c9c0f0609601dd154a611632b082e690ccaa9b)

Files changed (1) hide show
  1. dataset_infos.json +0 -1
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"xsum_factuality": {"description": "Neural abstractive summarization models are highly prone to hallucinate content that is unfaithful to the input\ndocument. The popular metric such as ROUGE fails to show the severity of the problem. The dataset consists of\nfaithfulness and factuality annotations of abstractive summaries for the XSum dataset. We have crowdsourced 3 judgements\n for each of 500 x 5 document-system pairs. This will be a valuable resource to the abstractive summarization community.\n", "citation": "@InProceedings{maynez_acl20,\n author = \"Joshua Maynez and Shashi Narayan and Bernd Bohnet and Ryan Thomas Mcdonald\",\n title = \"On Faithfulness and Factuality in Abstractive Summarization\",\n booktitle = \"Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics\",\n year = \"2020\",\n pages = \"1906--1919\",\n address = \"Online\",\n}\n", "homepage": "https://research.google/tools/datasets/xsum-hallucination-annotations/", "license": "https://creativecommons.org/licenses/by/4.0/", "features": {"bbcid": {"dtype": "int32", "id": null, "_type": "Value"}, "system": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "is_factual": {"num_classes": 2, "names": ["no", "yes"], "names_file": null, "id": null, "_type": "ClassLabel"}, "worker_id": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xsum_factuality", "config_name": "xsum_factuality", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 800027, "num_examples": 5597, "dataset_name": "xsum_factuality"}}, "download_checksums": {"https://raw.githubusercontent.com/google-research-datasets/xsum_hallucination_annotations/master/factuality_annotations_xsum_summaries.csv": {"num_bytes": 759614, "checksum": "f0ace0a9b52cacaa632ded3d07a355b7991383ce28fdd9fcbbf08a8523695ecb"}, "https://raw.githubusercontent.com/google-research-datasets/xsum_hallucination_annotations/master/hallucination_annotations_xsum_summaries.csv": {"num_bytes": 2105145, "checksum": "fa7fb66a36cc0f32ede4135985d0d65591dc2a8d21103a0bacd0583d77d4c8ea"}}, "download_size": 2864759, "post_processing_size": null, "dataset_size": 800027, "size_in_bytes": 3664786}, "xsum_faithfulness": {"description": "Neural abstractive summarization models are highly prone to hallucinate content that is unfaithful to the input\ndocument. The popular metric such as ROUGE fails to show the severity of the problem. The dataset consists of\nfaithfulness and factuality annotations of abstractive summaries for the XSum dataset. We have crowdsourced 3 judgements\n for each of 500 x 5 document-system pairs. This will be a valuable resource to the abstractive summarization community.\n", "citation": "@InProceedings{maynez_acl20,\n author = \"Joshua Maynez and Shashi Narayan and Bernd Bohnet and Ryan Thomas Mcdonald\",\n title = \"On Faithfulness and Factuality in Abstractive Summarization\",\n booktitle = \"Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics\",\n year = \"2020\",\n pages = \"1906--1919\",\n address = \"Online\",\n}\n", "homepage": "https://research.google/tools/datasets/xsum-hallucination-annotations/", "license": "https://creativecommons.org/licenses/by/4.0/", "features": {"bbcid": {"dtype": "int32", "id": null, "_type": "Value"}, "system": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "hallucination_type": {"num_classes": 2, "names": ["intrinsic", "extrinsic"], "names_file": null, "id": null, "_type": "ClassLabel"}, "hallucinated_span_start": {"dtype": "int32", "id": null, "_type": "Value"}, "hallucinated_span_end": {"dtype": "int32", "id": null, "_type": "Value"}, "worker_id": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xsum_factuality", "config_name": "xsum_faithfulness", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1750325, "num_examples": 11185, "dataset_name": "xsum_factuality"}}, "download_checksums": {"https://raw.githubusercontent.com/google-research-datasets/xsum_hallucination_annotations/master/factuality_annotations_xsum_summaries.csv": {"num_bytes": 759614, "checksum": "f0ace0a9b52cacaa632ded3d07a355b7991383ce28fdd9fcbbf08a8523695ecb"}, "https://raw.githubusercontent.com/google-research-datasets/xsum_hallucination_annotations/master/hallucination_annotations_xsum_summaries.csv": {"num_bytes": 2105145, "checksum": "fa7fb66a36cc0f32ede4135985d0d65591dc2a8d21103a0bacd0583d77d4c8ea"}}, "download_size": 2864759, "post_processing_size": null, "dataset_size": 1750325, "size_in_bytes": 4615084}}