Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
found
Annotations Creators:
machine-generated
Source Datasets:
original
ArXiv:
Tags:
split-and-rephrase
License:
albertvillanova HF staff commited on
Commit
7f02232
1 Parent(s): 2cdf4fe

Delete legacy JSON metadata (#2)

Browse files

- Delete legacy JSON metadata (9479e060d424ef078e4c54aca38f5eafe772054e)

Files changed (1) hide show
  1. dataset_infos.json +0 -1
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "One million English sentences, each split into two sentences that together preserve the original meaning, extracted from Wikipedia \nGoogle's WikiSplit dataset was constructed automatically from the publicly available Wikipedia revision history. Although \nthe dataset contains some inherent noise, it can serve as valuable training data for models that split or merge sentences.\n", "citation": "@InProceedings{BothaEtAl2018,\n title = {{Learning To Split and Rephrase From Wikipedia Edit History}},\n author = {Botha, Jan A and Faruqui, Manaal and Alex, John and Baldridge, Jason and Das, Dipanjan},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing},\n pages = {to appear},\n note = {arXiv preprint arXiv:1808.09468},\n year = {2018}\n}\n", "homepage": "https://dataset-homepage/", "license": "", "features": {"complex_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "simple_sentence_1": {"dtype": "string", "id": null, "_type": "Value"}, "simple_sentence_2": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "wiki_split", "config_name": "default", "version": {"version_str": "0.1.0", "description": null, "datasets_version_to_prepare": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1949294, "num_examples": 5000, "dataset_name": "wiki_split"}, "train": {"name": "train", "num_bytes": 384513073, "num_examples": 989944, "dataset_name": "wiki_split"}, "validation": {"name": "validation", "num_bytes": 1935459, "num_examples": 5000, "dataset_name": "wiki_split"}}, "download_checksums": {"https://github.com/google-research-datasets/wiki-split/raw/master/train.tsv.zip": {"num_bytes": 96439435, "checksum": "42234243209fbb8f11405372284d634728faf47eafd5799cefb932c06c7b3a54"}, "https://github.com/google-research-datasets/wiki-split/raw/master/test.tsv": {"num_bytes": 1926782, "checksum": "88c3eff253f7ff3fcce9370bb54f750bc35f646c1b6d2a45df2b993c96670f7f"}, "https://github.com/google-research-datasets/wiki-split/raw/master/validation.tsv": {"num_bytes": 1912947, "checksum": "63931ff45b7d95d60480bdcd466d69894fc02a0ec975cab8d0955e9b8cceb922"}}, "download_size": 100279164, "dataset_size": 388397826, "size_in_bytes": 488676990}}