Plim commited on
Commit
7a48e33
1 Parent(s): d278f89

Upload dataset_infos.json

Browse files
Files changed (1) hide show
  1. dataset_infos.json +1 -0
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"Plim--language_model_fr": {"description": "A parallel corpus extracted from the European Parliament web site by Philipp Koehn (University of Edinburgh). The main intended use is to aid statistical machine translation research.\n\n\nWikipedia dataset containing cleaned articles of all languages.\nThe datasets are built from the Wikipedia dump\n(https://dumps.wikimedia.org/) with one split per language. Each example\ncontains the content of one full Wikipedia article with cleaning to strip\nmarkdown and unwanted sections (references, etc.).\n", "citation": "http://www.lrec-conf.org/proceedings/lrec2012/pdf/463_Paper.pdf\n\n@ONLINE {wikidump,\n author = \"Wikimedia Foundation\",\n title = \"Wikimedia Downloads\",\n url = \"https://dumps.wikimedia.org\"\n}\n", "homepage": "https://opus.nlpl.eu/Europarl.php\n\nhttps://dumps.wikimedia.org", "license": "The data set comes with the same license\nas the original sources.\nPlease, check the information about the source\nthat is given on\nhttp://opus.nlpl.eu/Europarl-v8.php\n\n\n", "features": {"text_processed": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": null, "config_name": null, "version": null, "splits": {"train": {"name": "train", "num_bytes": 6258591941, "num_examples": 4261522, "dataset_name": "language_model_fr"}}, "download_checksums": null, "download_size": 3508337947, "post_processing_size": null, "dataset_size": 6258591941, "size_in_bytes": 9766929888}}