Datasets:

Languages:
Indonesian
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
found
Annotations Creators:
no-annotation
Source Datasets:
original
ArXiv:
License:
albertvillanova HF staff commited on
Commit
195d4cd
1 Parent(s): 955dbcb

Delete legacy JSON metadata (#2)

Browse files

- Delete legacy JSON metadata (b7df377021f77828b508905203fe2d78ec3978a1)

Files changed (1) hide show
  1. dataset_infos.json +0 -1
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"canonical": {"description": "In this paper, we introduce a large-scale Indonesian summarization dataset. We harvest articles from this http URL,\nan online news portal, and obtain 215,827 document-summary pairs. We leverage pre-trained language models to develop\nbenchmark extractive and abstractive summarization methods over the dataset with multilingual and monolingual\nBERT-based models. We include a thorough error analysis by examining machine-generated summaries that have\nlow ROUGE scores, and expose both issues with ROUGE it-self, as well as with extractive and abstractive\nsummarization models.\n", "citation": "@inproceedings{id_liputan6,\n author = {Fajri Koto, Jey Han Lau, Timothy Baldwin},\n title = {Liputan6: A Large-scale Indonesian Dataset for Text Summarization},\n year = {2020},\n url = {https://arxiv.org/abs/2011.00679},\n}\n", "homepage": "https://arxiv.org/abs/2011.00679", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "clean_article": {"dtype": "string", "id": null, "_type": "Value"}, "clean_summary": {"dtype": "string", "id": null, "_type": "Value"}, "extractive_summary": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "id_liputan6", "config_name": "canonical", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 20944658, "num_examples": 10972, "dataset_name": "id_liputan6"}, "test": {"name": "test", "num_bytes": 20526768, "num_examples": 10972, "dataset_name": "id_liputan6"}, "train": {"name": "train", "num_bytes": 382245586, "num_examples": 193883, "dataset_name": "id_liputan6"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 423717012, "size_in_bytes": 423717012}, "xtreme": {"description": "In this paper, we introduce a large-scale Indonesian summarization dataset. We harvest articles from this http URL,\nan online news portal, and obtain 215,827 document-summary pairs. We leverage pre-trained language models to develop\nbenchmark extractive and abstractive summarization methods over the dataset with multilingual and monolingual\nBERT-based models. We include a thorough error analysis by examining machine-generated summaries that have\nlow ROUGE scores, and expose both issues with ROUGE it-self, as well as with extractive and abstractive\nsummarization models.\n", "citation": "@inproceedings{id_liputan6,\n author = {Fajri Koto, Jey Han Lau, Timothy Baldwin},\n title = {Liputan6: A Large-scale Indonesian Dataset for Text Summarization},\n year = {2020},\n url = {https://arxiv.org/abs/2011.00679},\n}\n", "homepage": "https://arxiv.org/abs/2011.00679", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "clean_article": {"dtype": "string", "id": null, "_type": "Value"}, "clean_summary": {"dtype": "string", "id": null, "_type": "Value"}, "extractive_summary": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "id_liputan6", "config_name": "xtreme", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 9652946, "num_examples": 4948, "dataset_name": "id_liputan6"}, "test": {"name": "test", "num_bytes": 7574550, "num_examples": 3862, "dataset_name": "id_liputan6"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 17227496, "size_in_bytes": 17227496}}