yzhou992 commited on
Commit
3a99893
1 Parent(s): 43ea71a

Upload dataset_infos.json

Browse files
Files changed (1) hide show
  1. dataset_infos.json +1 -1
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"yzhou992--tokenize_wikitext103": {"description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n", "citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/", "license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)", "features": {"attention_mask": {"feature": {"dtype": "int8", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "input_ids": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "sentence_order_label": {"dtype": "int64", "id": null, "_type": "Value"}, "special_tokens_mask": {"feature": {"dtype": "int8", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "token_type_ids": {"feature": {"dtype": "int8", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": null, "config_name": null, "version": null, "splits": {"test": {"name": "test", "num_bytes": 2094025, "num_examples": 1723, "dataset_name": "tokenize_wikitext103"}, "train": {"name": "train", "num_bytes": 888342625, "num_examples": 711735, "dataset_name": "tokenize_wikitext103"}, "validation": {"name": "validation", "num_bytes": 1860210, "num_examples": 1557, "dataset_name": "tokenize_wikitext103"}}, "download_checksums": null, "download_size": 238733016, "post_processing_size": null, "dataset_size": 892296860, "size_in_bytes": 1131029876}}
 
1
+ {"yzhou992--tokenize_wikitext103": {"description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n", "citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/", "license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)", "features": {"attention_mask": {"feature": {"dtype": "int8", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "input_ids": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "sentence_order_label": {"dtype": "int64", "id": null, "_type": "Value"}, "special_tokens_mask": {"feature": {"dtype": "int8", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "token_type_ids": {"feature": {"dtype": "int8", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": null, "config_name": null, "version": null, "splits": {"test": {"name": "test", "num_bytes": 2094025, "num_examples": 1723, "dataset_name": "tokenize_wikitext103"}, "train": {"name": "train", "num_bytes": 888342625, "num_examples": 711735, "dataset_name": "tokenize_wikitext103"}, "validation": {"name": "validation", "num_bytes": 1860210, "num_examples": 1557, "dataset_name": "tokenize_wikitext103"}}, "download_checksums": null, "download_size": 239103762, "post_processing_size": null, "dataset_size": 892296860, "size_in_bytes": 1131400622}}