Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
found
Annotations Creators:
found
Source Datasets:
original
ArXiv:
Tags:
License:
albertvillanova HF staff commited on
Commit
3a00c01
1 Parent(s): 78d8f38

Delete legacy JSON metadata (#3)

Browse files

- Delete legacy JSON metadata (b53fa488aaf19fa5551a0d0b1e2d28e5919f1527)

Files changed (1) hide show
  1. dataset_infos.json +0 -1
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "This dataset gathers 728,321 biographies from wikipedia. It aims at evaluating text generation\nalgorithms. For each article, we provide the first paragraph and the infobox (both tokenized).\nFor each article, we extracted the first paragraph (text), the infobox (structured data). Each\ninfobox is encoded as a list of (field name, field value) pairs. We used Stanford CoreNLP\n(http://stanfordnlp.github.io/CoreNLP/) to preprocess the data, i.e. we broke the text into\nsentences and tokenized both the text and the field values. The dataset was randomly split in\nthree subsets train (80%), valid (10%), test (10%).\n", "citation": "@article{DBLP:journals/corr/LebretGA16,\n author = {R{'{e}}mi Lebret and\n David Grangier and\n Michael Auli},\n title = {Generating Text from Structured Data with Application to the Biography\n Domain},\n journal = {CoRR},\n volume = {abs/1603.07771},\n year = {2016},\n url = {http://arxiv.org/abs/1603.07771},\n archivePrefix = {arXiv},\n eprint = {1603.07771},\n timestamp = {Mon, 13 Aug 2018 16:48:30 +0200},\n biburl = {https://dblp.org/rec/journals/corr/LebretGA16.bib},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n", "homepage": "https://github.com/DavidGrangier/wikipedia-biography-dataset", "license": "CC BY-SA 3.0", "features": {"input_text": {"table": {"feature": {"column_header": {"dtype": "string", "id": null, "_type": "Value"}, "row_number": {"dtype": "int16", "id": null, "_type": "Value"}, "content": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "context": {"dtype": "string", "id": null, "_type": "Value"}}, "target_text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": {"input": "input_text", "output": "target_text"}, "task_templates": null, "builder_name": "wiki_bio", "config_name": "default", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 619269257, "num_examples": 582659, "dataset_name": "wiki_bio"}, "test": {"name": "test", "num_bytes": 77264695, "num_examples": 72831, "dataset_name": "wiki_bio"}, "val": {"name": "val", "num_bytes": 77335069, "num_examples": 72831, "dataset_name": "wiki_bio"}}, "download_checksums": {"https://huggingface.co/datasets/wiki_bio/resolve/main/data/wikipedia-biography-dataset.zip": {"num_bytes": 333998704, "checksum": "0de0fef4cc6c9182138939134b81b6ac33ffbc989b6d23a2d9ef1e50c49b8032"}}, "download_size": 333998704, "post_processing_size": null, "dataset_size": 773869021, "size_in_bytes": 1107867725}}