Datasets:

ArXiv:
License:
jaygala24 commited on
Commit
1924d9e
1 Parent(s): f948138

Upload IN22-Gen.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. IN22-Gen.py +198 -0
IN22-Gen.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ """The IN-22 Gen Evaluation Benchmark for evaluation of Machine Translation for Indic Languages."""
3
+
4
+ import os
5
+ import sys
6
+ import datasets
7
+
8
+ from typing import Union, List, Optional
9
+
10
+
11
+ _CITATION = """
12
+ @article{ai4bharat2023indictrans2,
13
+ title = {IndicTrans2: Towards High-Quality and Accessible Machine Translation Models for all 22 Scheduled Indian Languages},
14
+ author = {AI4Bharat and Jay Gala and Pranjal A. Chitale and Raghavan AK and Sumanth Doddapaneni and Varun Gumma and Aswanth Kumar and Janki Nawale and Anupama Sujatha and Ratish Puduppully and Vivek Raghavan and Pratyush Kumar and Mitesh M. Khapra and Raj Dabre and Anoop Kunchukuttan},
15
+ year = {2023},
16
+ journal = {arXiv preprint arXiv: 2305.16307}
17
+ }
18
+ """
19
+
20
+ _DESCRIPTION = """\
21
+ IN-22 is a newly created comprehensive benchmark for evaluating machine translation performance in multi-domain, n-way parallel contexts across 22 Indic languages.
22
+ IN22-Gen is a general-purpose multi-domain evaluation subset of IN22. It has been created from two sources: Wikipedia and Web Sources offering diverse content spanning news, entertainment, culture, legal, and India-centric topics.
23
+ """
24
+
25
+ _HOMEPAGE = "https://github.com/AI4Bharat/IndicTrans2"
26
+
27
+ _LICENSE = "CC-BY-4.0"
28
+
29
+ _LANGUAGES = [
30
+ "asm_Beng", "ben_Beng", "brx_Deva",
31
+ "doi_Deva", "eng_Latn", "gom_Deva",
32
+ "guj_Gujr", "hin_Deva", "kan_Knda",
33
+ "kas_Arab", "mai_Deva", "mal_Mlym",
34
+ "mar_Deva", "mni_Beng", "mni_Mtei",
35
+ "npi_Deva", "ory_Orya", "pan_Guru",
36
+ "san_Deva", "sat_Olck", "snd_Arab",
37
+ "snd_Deva", "tam_Taml", "tel_Telu",
38
+ "urd_Arab"
39
+ ]
40
+
41
+ _URL = "https://indictrans2-public.objectstore.e2enetworks.net/IN22_benchmark.tar.gz"
42
+
43
+ _SPLITS = ["gen"]
44
+
45
+ _SENTENCES_PATHS = {
46
+ lang: {
47
+ split: os.path.join("IN22_benchmark", split, f"test.{lang}")
48
+ for split in _SPLITS
49
+ } for lang in _LANGUAGES
50
+ }
51
+
52
+ _METADATA_PATHS = {
53
+ split: os.path.join("IN22_benchmark", f"metadata_{split}.tsv")
54
+ for split in _SPLITS
55
+ }
56
+
57
+ from itertools import permutations
58
+
59
+ def _pairings(iterable, r=2):
60
+ previous = tuple()
61
+ for p in permutations(sorted(iterable), r):
62
+ if p > previous:
63
+ previous = p
64
+ yield p
65
+
66
+
67
+ class IN22GenConfig(datasets.BuilderConfig):
68
+ """BuilderConfig for the IN-22 Gen evaluation subset."""
69
+ def __init__(self, lang: str, lang2: str = None, **kwargs):
70
+ """
71
+ Args:
72
+ **kwargs: keyword arguments forwarded to super.
73
+ """
74
+ super().__init__(version=datasets.Version("1.0.0"), **kwargs)
75
+ self.lang = lang
76
+ self.lang2 = lang2
77
+
78
+
79
+ class IN22Gen(datasets.GeneratorBasedBuilder):
80
+ """IN-22 Gen evaluation subset."""
81
+
82
+ BUILDER_CONFIGS = [
83
+ IN22GenConfig(
84
+ name=lang,
85
+ description=f"IN-22: {lang} subset.",
86
+ lang=lang
87
+ )
88
+ for lang in _LANGUAGES
89
+ ] + [
90
+ IN22GenConfig(
91
+ name="all",
92
+ description=f"IN-22: all language pairs",
93
+ lang=None
94
+ )
95
+ ] + [
96
+ IN22GenConfig(
97
+ name=f"{l1}-{l2}",
98
+ description=f"IN-22: {l1}-{l2} aligned subset.",
99
+ lang=l1,
100
+ lang2=l2
101
+ ) for (l1,l2) in _pairings(_LANGUAGES)
102
+ ]
103
+
104
+ def _info(self):
105
+ features = {
106
+ "id": datasets.Value("int32"),
107
+ "context": datasets.Value("string"),
108
+ "source": datasets.Value("string"),
109
+ "url": datasets.Value("string"),
110
+ "domain": datasets.Value("string"),
111
+ "num_words": datasets.Value("int32"),
112
+ "bucket": datasets.Value("string")
113
+ }
114
+ if self.config.name != "all" and "-" not in self.config.name:
115
+ features["sentence"] = datasets.Value("string")
116
+ elif "-" in self.config.name:
117
+ for lang in [self.config.lang, self.config.lang2]:
118
+ features[f"sentence_{lang}"] = datasets.Value("string")
119
+ else:
120
+ for lang in _LANGUAGES:
121
+ features[f"sentence_{lang}"] = datasets.Value("string")
122
+ return datasets.DatasetInfo(
123
+ description=_DESCRIPTION,
124
+ features=datasets.Features(features),
125
+ homepage=_HOMEPAGE,
126
+ license=_LICENSE,
127
+ citation=_CITATION,
128
+ )
129
+
130
+ def _split_generators(self, dl_manager):
131
+ dl_dir = dl_manager.download_and_extract(_URL)
132
+
133
+ def _get_sentence_paths(split):
134
+ if isinstance(self.config.lang, str) and isinstance(self.config.lang2, str):
135
+ sentence_paths = [os.path.join(dl_dir, _SENTENCES_PATHS[lang][split]) for lang in (self.config.lang, self.config.lang2)]
136
+ elif isinstance(self.config.lang, str):
137
+ sentence_paths = os.path.join(dl_dir, _SENTENCES_PATHS[self.config.lang][split])
138
+ else:
139
+ sentence_paths = [os.path.join(dl_dir, _SENTENCES_PATHS[lang][split]) for lang in _LANGUAGES]
140
+ return sentence_paths
141
+ return [
142
+ datasets.SplitGenerator(
143
+ name=split,
144
+ gen_kwargs={
145
+ "sentence_paths": _get_sentence_paths(split),
146
+ "metadata_path": os.path.join(dl_dir, _METADATA_PATHS[split]),
147
+ }
148
+ ) for split in _SPLITS
149
+ ]
150
+
151
+ def _generate_examples(self, sentence_paths: Union[str, List[str]], metadata_path: str, langs: Optional[List[str]] = None):
152
+ """Yields examples as (key, example) tuples."""
153
+ if isinstance(sentence_paths, str):
154
+ with open(sentence_paths, "r") as sentences_file:
155
+ with open(metadata_path, "r") as metadata_file:
156
+ metadata_lines = [l.strip() for l in metadata_file.readlines()[1:]]
157
+ for id_, (sentence, metadata) in enumerate(
158
+ zip(sentences_file, metadata_lines)
159
+ ):
160
+ sentence = sentence.strip()
161
+ metadata = metadata.split("\t")
162
+ yield id_, {
163
+ "id": id_ + 1,
164
+ "sentence": sentence,
165
+ "context": metadata[0],
166
+ "source": metadata[1],
167
+ "url": metadata[2],
168
+ "domain": metadata[3],
169
+ "num_words": metadata[4],
170
+ "bucket": metadata[5]
171
+ }
172
+ else:
173
+ sentences = {}
174
+ if len(sentence_paths) == len(_LANGUAGES):
175
+ langs = _LANGUAGES
176
+ else:
177
+ langs = [self.config.lang, self.config.lang2]
178
+ for path, lang in zip(sentence_paths, langs):
179
+ with open(path, "r") as sent_file:
180
+ sentences[lang] = [l.strip() for l in sent_file.readlines()]
181
+ with open(metadata_path, "r") as metadata_file:
182
+ metadata_lines = [l.strip() for l in metadata_file.readlines()[1:]]
183
+ for id_, metadata in enumerate(metadata_lines):
184
+ metadata = metadata.split("\t")
185
+ yield id_, {
186
+ **{
187
+ "id": id_ + 1,
188
+ "context": metadata[0],
189
+ "source": metadata[1],
190
+ "url": metadata[2],
191
+ "domain": metadata[3],
192
+ "num_words": metadata[4],
193
+ "bucket": metadata[5]
194
+ }, **{
195
+ f"sentence_{lang}": sentences[lang][id_]
196
+ for lang in langs
197
+ }
198
+ }