naacl22_wikitext15M / dataset_infos.json
zhengxuanzenwu's picture
Upload dataset_infos.json
c13632e
{"Reproducibility--naacl22_wikitext15M": {"description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike License.\n", "citation": "@InProceedings{wikitext,\n author={Stephen, Merity and Caiming ,Xiong and James, Bradbury and Richard Socher}\n year={2016}\n}\n", "homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wikitext", "config_name": "wikitext-103-v1", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1295575, "num_examples": 4358, "dataset_name": "naacl22_wikitext15M"}, "train": {"name": "train", "num_bytes": 545141915, "num_examples": 1801350, "dataset_name": "naacl22_wikitext15M"}, "validation": {"name": "validation", "num_bytes": 1154751, "num_examples": 3760, "dataset_name": "naacl22_wikitext15M"}}, "download_checksums": null, "download_size": 309556868, "post_processing_size": null, "dataset_size": 547592241, "size_in_bytes": 857149109}}