Datasets:

Multilinguality:
translation
Size Categories:
10K<n<100K
Language Creators:
found
Annotations Creators:
expert-generated
found
Source Datasets:
original
ArXiv:
License:
system HF staff commited on
Commit
3a5ca83
1 Parent(s): 79e2b1d

Update files from the datasets library (from 1.13.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.13.0

Files changed (3) hide show
  1. README.md +1 -0
  2. dataset_infos.json +1 -1
  3. menyo20k_mt.py +1 -1
README.md CHANGED
@@ -20,6 +20,7 @@ task_categories:
20
  task_ids:
21
  - machine-translation
22
  paperswithcode_id: null
 
23
  ---
24
 
25
  # Dataset Card for MENYO-20k
 
20
  task_ids:
21
  - machine-translation
22
  paperswithcode_id: null
23
+ pretty_name: MENYO-20k
24
  ---
25
 
26
  # Dataset Card for MENYO-20k
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"menyo20k_mt": {"description": "MENYO-20k is a multi-domain parallel dataset with texts obtained from news articles, ted talks, movie transcripts, radio transcripts, science and technology texts, and other short articles curated from the web and professional translators. The dataset has 20,100 parallel sentences split into 10,070 training sentences, 3,397 development sentences, and 6,633 test sentences (3,419 multi-domain, 1,714 news domain, and 1,500 ted talks speech transcript domain). The development and test sets are available upon request.\n", "citation": "@dataset{david_ifeoluwa_adelani_2020_4297448,\n author = {David Ifeoluwa Adelani and\n Jesujoba O. Alabi and\n Damilola Adebonojo and\n Adesina Ayeni and\n Mofe Adeyemi and\n Ayodele Awokoya},\n title = {MENYO-20k: A Multi-domain English - Yor\u00f9b\u00e1 Corpus\n for Machine Translation},\n month = nov,\n year = 2020,\n publisher = {Zenodo},\n version = {1.0},\n doi = {10.5281/zenodo.4297448},\n url = {https://doi.org/10.5281/zenodo.4297448}\n}\n", "homepage": "https://zenodo.org/record/4297448#.X81G7s0zZPY", "license": "For non-commercial use because some of the data sources like Ted talks and JW news requires permission for commercial use.", "features": {"translation": {"languages": ["en", "yo"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": null, "builder_name": "menyo20k_mt", "config_name": "menyo20k_mt", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2551273, "num_examples": 10070, "dataset_name": "menyo20k_mt"}}, "download_checksums": {"https://github.com/dadelani/menyo-20k_MT/raw/main/data/train.tsv": {"num_bytes": 2490852, "checksum": "3c152119d4dc1fba12ee9424f1e7fd11648acfa8e2ea7f6464a37a18e69d9a06"}}, "download_size": 2490852, "post_processing_size": null, "dataset_size": 2551273, "size_in_bytes": 5042125}}
 
1
+ {"menyo20k_mt": {"description": "MENYO-20k is a multi-domain parallel dataset with texts obtained from news articles, ted talks, movie transcripts, radio transcripts, science and technology texts, and other short articles curated from the web and professional translators. The dataset has 20,100 parallel sentences split into 10,070 training sentences, 3,397 development sentences, and 6,633 test sentences (3,419 multi-domain, 1,714 news domain, and 1,500 ted talks speech transcript domain). The development and test sets are available upon request.\n", "citation": "@dataset{david_ifeoluwa_adelani_2020_4297448,\n author = {David Ifeoluwa Adelani and\n Jesujoba O. Alabi and\n Damilola Adebonojo and\n Adesina Ayeni and\n Mofe Adeyemi and\n Ayodele Awokoya},\n title = {MENYO-20k: A Multi-domain English - Yor\u00f9b\u00e1 Corpus\n for Machine Translation},\n month = nov,\n year = 2020,\n publisher = {Zenodo},\n version = {1.0},\n doi = {10.5281/zenodo.4297448},\n url = {https://doi.org/10.5281/zenodo.4297448}\n}\n", "homepage": "https://zenodo.org/record/4297448#.X81G7s0zZPY", "license": "For non-commercial use because some of the data sources like Ted talks and JW news requires permission for commercial use.", "features": {"translation": {"languages": ["en", "yo"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "menyo20k_mt", "config_name": "menyo20k_mt", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2551273, "num_examples": 10070, "dataset_name": "menyo20k_mt"}}, "download_checksums": {"https://raw.githubusercontent.com/uds-lsv/menyo-20k_MT/master/data/train.tsv": {"num_bytes": 2490852, "checksum": "3c152119d4dc1fba12ee9424f1e7fd11648acfa8e2ea7f6464a37a18e69d9a06"}}, "download_size": 2490852, "post_processing_size": null, "dataset_size": 2551273, "size_in_bytes": 5042125}}
menyo20k_mt.py CHANGED
@@ -55,7 +55,7 @@ _LICENSE = "For non-commercial use because some of the data sources like Ted tal
55
 
56
  # The HuggingFace dataset library don't host the datasets but only point to the original files
57
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
58
- _URL = "https://github.com/dadelani/menyo-20k_MT/raw/main/data/train.tsv"
59
 
60
 
61
  class Menyo20kMt(datasets.GeneratorBasedBuilder):
 
55
 
56
  # The HuggingFace dataset library don't host the datasets but only point to the original files
57
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
58
+ _URL = "https://raw.githubusercontent.com/uds-lsv/menyo-20k_MT/master/data/train.tsv"
59
 
60
 
61
  class Menyo20kMt(datasets.GeneratorBasedBuilder):