kpackxs commited on
Commit
58e2235
1 Parent(s): eb09ed7

added details for downloading the dev and test sets

Browse files
Files changed (3) hide show
  1. README.md +14 -8
  2. dataset_infos.json +1 -1
  3. menyo20k_mt.py +12 -5
README.md CHANGED
@@ -33,8 +33,14 @@ dataset_info:
33
  - name: train
34
  num_bytes: 2551273
35
  num_examples: 10070
36
- download_size: 2490852
37
- dataset_size: 2551273
 
 
 
 
 
 
38
  ---
39
 
40
  # Dataset Card for MENYO-20k
@@ -66,8 +72,8 @@ dataset_info:
66
  ## Dataset Description
67
 
68
  - **Homepage:** [Homepage for Menyo-20k](https://zenodo.org/record/4297448#.X81G7s0zZPY)
69
- - **Repository:**[Github Repo](https://github.com/dadelani/menyo-20k_MT)
70
- - **Paper:**
71
  - **Leaderboard:**
72
  - **Point of Contact:**
73
 
@@ -90,7 +96,7 @@ Languages are English and YOruba
90
  The data consists of tab seperated entries
91
 
92
  ```
93
- {'translation':
94
  {'en': 'Unit 1: What is Creative Commons?',
95
  'yo': 'Ìdá 1: Kín ni Creative Commons?'
96
  }
@@ -106,7 +112,7 @@ The data consists of tab seperated entries
106
 
107
  ### Data Splits
108
 
109
- Only training dataset available
110
 
111
  ## Dataset Creation
112
 
@@ -171,7 +177,7 @@ The dataset is open but for non-commercial use because some of the data sources
171
  Adesina Ayeni and
172
  Mofe Adeyemi and
173
  Ayodele Awokoya},
174
- title = {{MENYO-20k: A Multi-domain English - Yorùbá Corpus
175
  for Machine Translation}},
176
  month = nov,
177
  year = 2020,
@@ -183,4 +189,4 @@ The dataset is open but for non-commercial use because some of the data sources
183
  ```
184
  ### Contributions
185
 
186
- Thanks to [@yvonnegitau](https://github.com/yvonnegitau) for adding this dataset.
 
33
  - name: train
34
  num_bytes: 2551273
35
  num_examples: 10070
36
+ download_size: 2490852
37
+ - name: dev
38
+ num_examples: 3397
39
+ download_size: 849789
40
+ - name: test
41
+ num_examples: 6633
42
+ download_size: 1865593
43
+
44
  ---
45
 
46
  # Dataset Card for MENYO-20k
 
72
  ## Dataset Description
73
 
74
  - **Homepage:** [Homepage for Menyo-20k](https://zenodo.org/record/4297448#.X81G7s0zZPY)
75
+ - **Repository:**[Github Repo](https://github.com/uds-lsv/menyo-20k_MT/)
76
+ - **Paper:** [African NLP 2021](https://www.lsv.uni-saarland.de/wp-content/uploads/2022/06/africanlp2021_EACL_menyo20k.pdf)
77
  - **Leaderboard:**
78
  - **Point of Contact:**
79
 
 
96
  The data consists of tab seperated entries
97
 
98
  ```
99
+ {'translation':
100
  {'en': 'Unit 1: What is Creative Commons?',
101
  'yo': 'Ìdá 1: Kín ni Creative Commons?'
102
  }
 
112
 
113
  ### Data Splits
114
 
115
+ Training, Validation and Test datasets are available.
116
 
117
  ## Dataset Creation
118
 
 
177
  Adesina Ayeni and
178
  Mofe Adeyemi and
179
  Ayodele Awokoya},
180
+ title = {{MENYO-20k: A Multi-domain English - Yorùbá Corpus
181
  for Machine Translation}},
182
  month = nov,
183
  year = 2020,
 
189
  ```
190
  ### Contributions
191
 
192
+ Thanks to [@yvonnegitau](https://github.com/yvonnegitau) for adding this dataset.
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"menyo20k_mt": {"description": "MENYO-20k is a multi-domain parallel dataset with texts obtained from news articles, ted talks, movie transcripts, radio transcripts, science and technology texts, and other short articles curated from the web and professional translators. The dataset has 20,100 parallel sentences split into 10,070 training sentences, 3,397 development sentences, and 6,633 test sentences (3,419 multi-domain, 1,714 news domain, and 1,500 ted talks speech transcript domain). The development and test sets are available upon request.\n", "citation": "@dataset{david_ifeoluwa_adelani_2020_4297448,\n author = {David Ifeoluwa Adelani and\n Jesujoba O. Alabi and\n Damilola Adebonojo and\n Adesina Ayeni and\n Mofe Adeyemi and\n Ayodele Awokoya},\n title = {MENYO-20k: A Multi-domain English - Yor\u00f9b\u00e1 Corpus\n for Machine Translation},\n month = nov,\n year = 2020,\n publisher = {Zenodo},\n version = {1.0},\n doi = {10.5281/zenodo.4297448},\n url = {https://doi.org/10.5281/zenodo.4297448}\n}\n", "homepage": "https://zenodo.org/record/4297448#.X81G7s0zZPY", "license": "For non-commercial use because some of the data sources like Ted talks and JW news requires permission for commercial use.", "features": {"translation": {"languages": ["en", "yo"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "menyo20k_mt", "config_name": "menyo20k_mt", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2551273, "num_examples": 10070, "dataset_name": "menyo20k_mt"}}, "download_checksums": {"https://raw.githubusercontent.com/uds-lsv/menyo-20k_MT/master/data/train.tsv": {"num_bytes": 2490852, "checksum": "3c152119d4dc1fba12ee9424f1e7fd11648acfa8e2ea7f6464a37a18e69d9a06"}}, "download_size": 2490852, "post_processing_size": null, "dataset_size": 2551273, "size_in_bytes": 5042125}}
 
1
+ {"menyo20k_mt": {"description": "MENYO-20k is a multi-domain parallel dataset with texts obtained from news articles, ted talks, movie transcripts, radio transcripts, science and technology texts, and other short articles curated from the web and professional translators. The dataset has 20,100 parallel sentences split into 10,070 training sentences, 3,397 development sentences, and 6,633 test sentences (3,419 multi-domain, 1,714 news domain, and 1,500 ted talks speech transcript domain). The development and test sets are available upon request.\n", "citation": "@dataset{david_ifeoluwa_adelani_2020_4297448,\n author = {David Ifeoluwa Adelani and\n Jesujoba O. Alabi and\n Damilola Adebonojo and\n Adesina Ayeni and\n Mofe Adeyemi and\n Ayodele Awokoya},\n title = {MENYO-20k: A Multi-domain English - Yor\u00f9b\u00e1 Corpus\n for Machine Translation},\n month = nov,\n year = 2020,\n publisher = {Zenodo},\n version = {1.0},\n doi = {10.5281/zenodo.4297448},\n url = {https://doi.org/10.5281/zenodo.4297448}\n}\n", "homepage": "https://zenodo.org/record/4297448#.X81G7s0zZPY", "license": "For non-commercial use because some of the data sources like Ted talks and JW news requires permission for commercial use.", "features": {"translation": {"languages": ["en", "yo"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "menyo20k_mt", "config_name": "menyo20k_mt", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2551273, "num_examples": 10070, "dataset_name": "menyo20k_mt"},"dev": {"name": "dev", "num_bytes": 849789, "num_examples": 3397, "dataset_name": "menyo20k_mt"},"test": {"name": "test", "num_bytes": 1865593, "num_examples": 6633, "dataset_name": "menyo20k_mt"} }, "download_checksums": {"https://raw.githubusercontent.com/uds-lsv/menyo-20k_MT/master/data/train.tsv": {"num_bytes": 2490852, "checksum": "3c152119d4dc1fba12ee9424f1e7fd11648acfa8e2ea7f6464a37a18e69d9a06"}}, "download_size": 2490852, "post_processing_size": null, "dataset_size": 2551273, "size_in_bytes": 5042125}}
menyo20k_mt.py CHANGED
@@ -55,13 +55,19 @@ _LICENSE = "For non-commercial use because some of the data sources like Ted tal
55
 
56
  # The HuggingFace dataset library don't host the datasets but only point to the original files
57
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
58
- _URL = "https://raw.githubusercontent.com/uds-lsv/menyo-20k_MT/master/data/train.tsv"
 
 
 
 
 
 
59
 
60
 
61
  class Menyo20kMt(datasets.GeneratorBasedBuilder):
62
  """MENYO-20k: A Multi-domain English - Yorùbá Corpus for Machine Translations"""
63
 
64
- VERSION = datasets.Version("1.0.0")
65
 
66
  BUILDER_CONFIGS = [
67
  datasets.BuilderConfig(
@@ -89,10 +95,11 @@ class Menyo20kMt(datasets.GeneratorBasedBuilder):
89
 
90
  def _split_generators(self, dl_manager):
91
  """Returns SplitGenerators."""
92
- train_path = dl_manager.download_and_extract(_URL)
93
-
94
  return [
95
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
 
 
96
  ]
97
 
98
  def _generate_examples(self, filepath):
 
55
 
56
  # The HuggingFace dataset library don't host the datasets but only point to the original files
57
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
58
+ _URLS = {
59
+ "train": "https://raw.githubusercontent.com/uds-lsv/menyo-20k_MT/master/data/train.tsv",
60
+ "dev": "https://raw.githubusercontent.com/uds-lsv/menyo-20k_MT/master/data/dev.tsv",
61
+ "test": "https://raw.githubusercontent.com/uds-lsv/menyo-20k_MT/master/data/test.tsv",
62
+ }
63
+
64
+
65
 
66
 
67
  class Menyo20kMt(datasets.GeneratorBasedBuilder):
68
  """MENYO-20k: A Multi-domain English - Yorùbá Corpus for Machine Translations"""
69
 
70
+ VERSION = datasets.Version("1.1.0")
71
 
72
  BUILDER_CONFIGS = [
73
  datasets.BuilderConfig(
 
95
 
96
  def _split_generators(self, dl_manager):
97
  """Returns SplitGenerators."""
98
+ data_files = dl_manager.download(_URLs)
 
99
  return [
100
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
101
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
102
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
103
  ]
104
 
105
  def _generate_examples(self, filepath):