PontifexMaximus commited on
Commit
44149c0
1 Parent(s): e15538f

Update en-as.py

Browse files
Files changed (1) hide show
  1. en-as.py +158 -35
en-as.py CHANGED
@@ -1,45 +1,168 @@
1
  import datasets
2
 
3
- from .wmt_utils import Wmt, WmtConfig
4
-
5
-
6
- _URL = "http://www.statmt.org/wmt16/translation-task.html"
7
- _CITATION = """
8
- @InProceedings{bojar-EtAl:2016:WMT1,
9
- author = {Bojar, Ond\v{r}ej and Chatterjee, Rajen and Federmann, Christian and Graham, Yvette and Haddow, Barry and Huck, Matthias and Jimeno Yepes, Antonio and Koehn, Philipp and Logacheva, Varvara and Monz, Christof and Negri, Matteo and Neveol, Aurelie and Neves, Mariana and Popel, Martin and Post, Matt and Rubino, Raphael and Scarton, Carolina and Specia, Lucia and Turchi, Marco and Verspoor, Karin and Zampieri, Marcos},
10
- title = {Findings of the 2016 Conference on Machine Translation},
11
- booktitle = {Proceedings of the First Conference on Machine Translation},
12
- month = {August},
13
- year = {2016},
14
- address = {Berlin, Germany},
15
- publisher = {Association for Computational Linguistics},
16
- pages = {131--198},
17
- url = {http://www.aclweb.org/anthology/W/W16/W16-2301}
 
 
 
 
 
 
 
 
 
 
 
 
18
  }
19
  """
20
 
21
- _LANGUAGE_PAIRS = [(lang, "as") for lang in ["or"]]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
- class Wmt16(Wmt):
25
- """WMT 16 translation datasets for all {xx, "en"} language pairs."""
26
 
27
- BUILDER_CONFIGS = [
28
- WmtConfig( # pylint:disable=g-complex-comprehension
29
- description="WMT 2016 %s-%s translation task dataset." % (l1, l2),
30
- url=_URL,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  citation=_CITATION,
32
- language_pair=(l1, l2),
33
- version=datasets.Version("1.0.0"),
34
  )
35
- for l1, l2 in _LANGUAGE_PAIRS
36
- ]
37
-
38
- @property
39
- def _subsets(self):
40
- return {
41
- datasets.Split.TRAIN: [
42
- "as-or"
43
- ],
44
-
45
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import datasets
2
 
3
+
4
+ _CITATION = """\
5
+ @inproceedings{siripragada-etal-2020-multilingual,
6
+ title = "A Multilingual Parallel Corpora Collection Effort for {I}ndian Languages",
7
+ author = "Siripragada, Shashank and
8
+ Philip, Jerin and
9
+ Namboodiri, Vinay P. and
10
+ Jawahar, C V",
11
+ booktitle = "Proceedings of the 12th Language Resources and Evaluation Conference",
12
+ month = may,
13
+ year = "2020",
14
+ address = "Marseille, France",
15
+ publisher = "European Language Resources Association",
16
+ url = "https://aclanthology.org/2020.lrec-1.462",
17
+ pages = "3743--3751",
18
+ language = "English",
19
+ ISBN = "979-10-95546-34-4",
20
+ }
21
+ @article{2020,
22
+ title={Revisiting Low Resource Status of Indian Languages in Machine Translation},
23
+ url={http://dx.doi.org/10.1145/3430984.3431026},
24
+ DOI={10.1145/3430984.3431026},
25
+ journal={8th ACM IKDD CODS and 26th COMAD},
26
+ publisher={ACM},
27
+ author={Philip, Jerin and Siripragada, Shashank and Namboodiri, Vinay P. and Jawahar, C. V.},
28
+ year={2020},
29
+ month={Dec}
30
  }
31
  """
32
 
33
+ _DESCRIPTION = """\
34
+ Sentence aligned parallel corpus between 11 Indian Languages, crawled and extracted from the press information bureau
35
+ website.
36
+ """
37
+
38
+ _HOMEPAGE = "http://preon.iiit.ac.in/~jerin/bhasha/"
39
+
40
+ _LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International"
41
+
42
+ _URL = {
43
+ "0.0.0": "http://preon.iiit.ac.in/~jerin/resources/datasets/pib-v0.tar",
44
+ "1.3.0": "http://preon.iiit.ac.in/~jerin/resources/datasets/pib_v1.3.tar.gz",
45
+ }
46
+ _ROOT_DIR = {
47
+ "0.0.0": "pib",
48
+ "1.3.0": "pib-v1.3",
49
+ }
50
 
51
+ _LanguagePairs = [
52
+ "or-ur",
53
+ "ml-or",
54
+ "bn-ta",
55
+ "gu-mr",
56
+ "hi-or",
57
+ "en-or",
58
+ "mr-ur",
59
+ "en-ta",
60
+ "hi-ta",
61
+ "bn-en",
62
+ "bn-or",
63
+ "ml-ta",
64
+ "gu-ur",
65
+ "bn-ml",
66
+ "ml-pa",
67
+ "en-pa",
68
+ "bn-hi",
69
+ "hi-pa",
70
+ "gu-te",
71
+ "pa-ta",
72
+ "hi-ml",
73
+ "or-te",
74
+ "en-ml",
75
+ "en-hi",
76
+ "bn-pa",
77
+ "mr-te",
78
+ "mr-pa",
79
+ "bn-te",
80
+ "gu-hi",
81
+ "ta-ur",
82
+ "te-ur",
83
+ "or-pa",
84
+ "gu-ml",
85
+ "gu-pa",
86
+ "hi-te",
87
+ "en-te",
88
+ "ml-te",
89
+ "pa-ur",
90
+ "hi-ur",
91
+ "mr-or",
92
+ "en-ur",
93
+ "ml-ur",
94
+ "bn-mr",
95
+ "gu-ta",
96
+ "pa-te",
97
+ "bn-gu",
98
+ "bn-ur",
99
+ "ml-mr",
100
+ "or-ta",
101
+ "ta-te",
102
+ "gu-or",
103
+ "en-gu",
104
+ "hi-mr",
105
+ "mr-ta",
106
+ "en-mr"
107
+ "as-or",
108
+ ]
109
 
 
 
110
 
111
+ class PibConfig(datasets.BuilderConfig):
112
+ """BuilderConfig for PIB"""
113
+
114
+ def __init__(self, language_pair, version=datasets.Version("1.3.0"), **kwargs):
115
+ super().__init__(version=version, **kwargs)
116
+ """
117
+ Args:
118
+ language_pair: language pair, you want to load
119
+ **kwargs: keyword arguments forwarded to super.
120
+ """
121
+ self.src, self.tgt = language_pair.split("-")
122
+
123
+
124
+ class Pib(datasets.GeneratorBasedBuilder):
125
+ """This new dataset is the large scale sentence aligned corpus in 11 Indian languages, viz.
126
+ CVIT-PIB corpus that is the largest multilingual corpus available for Indian languages.
127
+ """
128
+
129
+ BUILDER_CONFIG_CLASS = PibConfig
130
+ BUILDER_CONFIGS = [PibConfig(name=pair, description=_DESCRIPTION, language_pair=pair) for pair in _LanguagePairs]
131
+
132
+ def _info(self):
133
+ return datasets.DatasetInfo(
134
+ description=_DESCRIPTION,
135
+ features=datasets.Features(
136
+ {"translation": datasets.features.Translation(languages=[self.config.src, self.config.tgt])}
137
+ ),
138
+ supervised_keys=(self.config.src, self.config.tgt),
139
+ homepage=_HOMEPAGE,
140
+ license=_LICENSE,
141
  citation=_CITATION,
 
 
142
  )
143
+
144
+ def _split_generators(self, dl_manager):
145
+ archive = dl_manager.download(_URL[str(self.config.version)])
146
+ return [
147
+ datasets.SplitGenerator(
148
+ name=datasets.Split.TRAIN,
149
+ gen_kwargs={
150
+ "archive": dl_manager.iter_archive(archive),
151
+ },
152
+ ),
153
+ ]
154
+
155
+ def _generate_examples(self, archive):
156
+ root_dir = _ROOT_DIR[str(self.config.version)]
157
+ data_dir = f"{root_dir}/{self.config.src}-{self.config.tgt}"
158
+ src = tgt = None
159
+ for path, file in archive:
160
+ if data_dir in path:
161
+ if f"{data_dir}/train.{self.config.src}" in path:
162
+ src = file.read().decode("utf-8").split("\n")[:-1]
163
+ if f"{data_dir}/train.{self.config.tgt}" in path:
164
+ tgt = file.read().decode("utf-8").split("\n")[:-1]
165
+ if src and tgt:
166
+ break
167
+ for idx, (s, t) in enumerate(zip(src, tgt)):
168
+ yield idx, {"translation": {self.config.src: s, self.config.tgt: t}}