satpalsr commited on
Commit
86edf25
1 Parent(s): 472345b

Upload indicCorpv2.py

Browse files
Files changed (1) hide show
  1. indicCorpv2.py +191 -0
indicCorpv2.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The IndicCorpV2 benchmark."""
2
+
3
+ import textwrap
4
+ import datasets
5
+
6
+ _INDIC_CORPV2_CITATION = """\
7
+ @article{Doddapaneni2022towards,
8
+ title={Towards Leaving No Indic Language Behind: Building Monolingual Corpora, Benchmark and Models for Indic Languages},
9
+ author={Sumanth Doddapaneni and Rahul Aralikatte and Gowtham Ramesh and Shreyansh Goyal and Mitesh M. Khapra and Anoop Kunchukuttan and Pratyush Kumar},
10
+ journal={ArXiv},
11
+ year={2022},
12
+ volume={abs/2212.05409}
13
+ }
14
+ """
15
+
16
+ _INDIC_CORPV2_DESCRIPTION = """\
17
+ IndicCORPV2 is the largest collection of texts for Indic langauges consisting of 20.9 Billion tokens of which 14.4B tokens correspond to 23 Indic languages and 6.B tokens of Indian English content curated from Indian websites.
18
+ """
19
+
20
+ _DESCRIPTIONS = {
21
+ "as": textwrap.dedent(
22
+ """
23
+ Assamese
24
+ """
25
+ ),
26
+ "bd": textwrap.dedent(
27
+ """
28
+ Bodo
29
+ """
30
+ ),
31
+ "bn": textwrap.dedent(
32
+ """
33
+ Bengali
34
+ """
35
+ ),
36
+ "dg": textwrap.dedent(
37
+ """
38
+ Dogri
39
+ """
40
+ ),
41
+ "en": textwrap.dedent(
42
+ """
43
+ English
44
+ """
45
+ ),
46
+ "gom": textwrap.dedent(
47
+ """
48
+ Konkani
49
+ """
50
+ ),
51
+ "gu": textwrap.dedent(
52
+ """
53
+ Gujrati
54
+ """
55
+ ),
56
+ "hi": textwrap.dedent(
57
+ """
58
+ Hindi
59
+ """
60
+ ),
61
+ "kha": textwrap.dedent(
62
+ """
63
+ Khasi
64
+ """
65
+ ),
66
+ "kn": textwrap.dedent(
67
+ """
68
+ Kannada
69
+ """
70
+ ),
71
+ "ks": textwrap.dedent(
72
+ """
73
+ Kashmiri
74
+ """
75
+ ),
76
+ "mai": textwrap.dedent(
77
+ """
78
+ Maithili
79
+ """
80
+ ),
81
+ "ml": textwrap.dedent(
82
+ """
83
+ Malayalam
84
+ """
85
+ ),
86
+ "mni": textwrap.dedent(
87
+ """
88
+ Manipuri
89
+ """
90
+ ),
91
+ "mr": textwrap.dedent(
92
+ """
93
+ Marathi
94
+ """
95
+ ),
96
+ "ne": textwrap.dedent(
97
+ """
98
+ Nepali
99
+ """
100
+ ),
101
+ "or": textwrap.dedent(
102
+ """
103
+ Odia
104
+ """
105
+ ),
106
+ "pa": textwrap.dedent(
107
+ """
108
+ Punjabi
109
+ """
110
+ ),
111
+ "sa": textwrap.dedent(
112
+ """
113
+ Sanskrit
114
+ """
115
+ ),
116
+ "sat": textwrap.dedent(
117
+ """
118
+ Santali
119
+ """
120
+ ),
121
+ "sd": textwrap.dedent(
122
+ """
123
+ Sindhi
124
+ """
125
+ ),
126
+ "ta": textwrap.dedent(
127
+ """
128
+ Tamil
129
+ """
130
+ ),
131
+ "te": textwrap.dedent(
132
+ """
133
+ Telugu
134
+ """
135
+ ),
136
+ "ur": textwrap.dedent(
137
+ """
138
+ Urdu
139
+ """
140
+ ),
141
+ }
142
+
143
+ _URL = "https://objectstore.e2enetworks.net/ai4b-public-nlu-nlg/indic-corp-frozen-for-the-paper-oct-2022/{language}.txt"
144
+
145
+ _VERSION = datasets.Version("2.0.0", "Second version of IndicCorp")
146
+
147
+ class IndicCorpv2(datasets.GeneratorBasedBuilder):
148
+ """IndicCorpV2 dataset."""
149
+ BUILDER_CONFIGS = [
150
+ datasets.BuilderConfig(
151
+ name=f"{lang}",
152
+ description=f"IndicCorpv2 for {lang}",
153
+ version=_VERSION,
154
+ )
155
+ for lang in _DESCRIPTIONS.keys()
156
+ ]
157
+
158
+ def _info(self):
159
+ features = datasets.Features(
160
+ {
161
+ "text": datasets.Value("string"),
162
+ }
163
+ )
164
+ return datasets.DatasetInfo(
165
+ description=_INDIC_CORPV2_DESCRIPTION,
166
+ features=features,
167
+ supervised_keys=None,
168
+ homepage="https://github.com/AI4Bharat/IndicBERT/tree/main#indiccorp-v2",
169
+ citation=_INDIC_CORPV2_CITATION,
170
+ )
171
+
172
+ def _split_generators(self, dl_manager):
173
+ """Returns SplitGenerators."""
174
+ langauge = self.config.name
175
+ splits = {datasets.Split.TRAIN: "train"}
176
+ data_urls = {
177
+ split: _URL.format(language=langauge) for split in splits.values()
178
+ }
179
+ download_paths = dl_manager.download(data_urls)
180
+ return [
181
+ datasets.SplitGenerator(
182
+ name=datasets.Split.TRAIN,
183
+ gen_kwargs={"filepath": download_paths[split]},
184
+ ) for split in splits
185
+ ]
186
+
187
+ def _generate_examples(self, filepath):
188
+ """Yields examples."""
189
+ with open(filepath, encoding="utf-8") as f:
190
+ for idx, row in enumerate(f):
191
+ yield idx, {"text": row.strip()}