File size: 8,711 Bytes
ddda7f9
 
04b3eab
 
 
 
 
 
 
 
ddda7f9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ac15f21
ddda7f9
 
91a1694
ddda7f9
91a1694
ddda7f9
 
 
91a1694
 
ddda7f9
91a1694
ddda7f9
 
91a1694
 
4037561
ddda7f9
 
 
 
91a1694
ddda7f9
 
 
 
91a1694
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ddda7f9
 
 
 
 
 
 
91a1694
 
ddda7f9
 
 
 
 
91a1694
ddda7f9
 
 
 
91a1694
 
 
 
 
 
ddda7f9
 
 
 
 
 
 
 
91a1694
ddda7f9
 
 
 
 
ac15f21
ddda7f9
ac15f21
ddda7f9
 
04b3eab
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
---
language:
- bn
- en
- gu
- hi
- kn
- ta
- ur
license: cc-by-3.0
size_categories:
- 1M<n<10M
task_categories:
- text-generation
- fill-mask
task_ids:
- language-modeling
- masked-language-modeling
configs:
- config_name: 20231101.bn
  data_files:
  - split: train
    path: ben_Beng/train-*
- config_name: 20231101.en
  data_files:
  - split: train
    path: eng_Latn/train-*
- config_name: 20231101.gu
  data_files:
  - split: train
    path: guj_Gujr/train-*
- config_name: 20231101.hi
  data_files:
  - split: train
    path: hin_Deva/train-*
- config_name: 20231101.kn
  data_files:
  - split: train
    path: kan_Knda/train-*
- config_name: 20231101.ta
  data_files:
  - split: train
    path: tam_Taml/train-*
- config_name: 20231101.ur
  data_files:
  - split: train
    path: urd_Arab/train-*
dataset_info:
- config_name: 20231101.bn
  features:
  - name: id
    dtype: string
  - name: url
    dtype: string
  - name: title
    dtype: string
  - name: text
    dtype: string
  - name: sents
    dtype: int32
  - name: chars
    dtype: int32
  - name: words
    dtype: int32
  - name: tokens
    dtype: int32
  splits:
  - name: train
    num_bytes: 674539757
    num_examples: 200820
  download_size: 652782434
  dataset_size: 652782434
- config_name: 20231101.en
  features:
  - name: id
    dtype: string
  - name: url
    dtype: string
  - name: title
    dtype: string
  - name: text
    dtype: string
  - name: sents
    dtype: int32
  - name: chars
    dtype: int32
  - name: words
    dtype: int32
  - name: tokens
    dtype: int32
  splits:
  - name: train
    num_bytes: 703955598
    num_examples: 200820
  download_size: 426488108
  dataset_size: 426488108
- config_name: 20231101.gu
  features:
  - name: id
    dtype: string
  - name: url
    dtype: string
  - name: title
    dtype: string
  - name: text
    dtype: string
  - name: sents
    dtype: int32
  - name: chars
    dtype: int32
  - name: words
    dtype: int32
  - name: tokens
    dtype: int32
  splits:
  - name: train
    num_bytes: 668666407
    num_examples: 200820
  download_size: 658661502
  dataset_size: 658661502
- config_name: 20231101.hi
  features:
  - name: id
    dtype: string
  - name: url
    dtype: string
  - name: title
    dtype: string
  - name: text
    dtype: string
  - name: sents
    dtype: int32
  - name: chars
    dtype: int32
  - name: words
    dtype: int32
  - name: tokens
    dtype: int32
  splits:
  - name: train
    num_bytes: 678769726
    num_examples: 200820
  download_size: 640983312
  dataset_size: 640983312
- config_name: 20231101.kn
  features:
  - name: id
    dtype: string
  - name: url
    dtype: string
  - name: title
    dtype: string
  - name: text
    dtype: string
  - name: sents
    dtype: int32
  - name: chars
    dtype: int32
  - name: words
    dtype: int32
  - name: tokens
    dtype: int32
  splits:
  - name: train
    num_bytes: 708769566
    num_examples: 200820
  download_size: 689888426
  dataset_size: 689888426
- config_name: 20231101.ta
  features:
  - name: id
    dtype: string
  - name: url
    dtype: string
  - name: title
    dtype: string
  - name: text
    dtype: string
  - name: sents
    dtype: int32
  - name: chars
    dtype: int32
  - name: words
    dtype: int32
  - name: tokens
    dtype: int32
  splits:
  - name: train
    num_bytes: 781041863
    num_examples: 200820
  download_size: 721062888
  dataset_size: 721062888
- config_name: 20231101.ur
  features:
  - name: id
    dtype: string
  - name: url
    dtype: string
  - name: title
    dtype: string
  - name: text
    dtype: string
  - name: sents
    dtype: int32
  - name: chars
    dtype: int32
  - name: words
    dtype: int32
  - name: tokens
    dtype: int32
  splits:
  - name: train
    num_bytes: 655510379
    num_examples: 200820
  download_size: 543259766
  dataset_size: 543259766
---

# Bhasha Wiki Indic 

<!-- Provide a quick summary of the dataset. -->
This dataset has Wikipedia articles pertaining to Indian context.
## Dataset Details

### Dataset Description

<!-- Provide a longer summary of what this dataset is. -->
The dataset is built from Wikipedia articles taken from [wikimedia/wikipedia](https://huggingface.co/datasets/wikimedia/wikipedia). 
We filtered, cleaned and translated English articles related to India and Indian context out of entire dataset.

Each example has contents of a full cleaned wikipedia article and it's translations in 6 Indian languages.


- **Curated by:** [Soket AI Labs](https://soket.ai/)
- **Language(s) (NLP):** [English, Hindi, Bengali, Gujarati, Tamil, Kannada, Urdu]
- **License:** [cc-by-sa-3.0]

## Uses

<!-- Address questions around how the dataset is intended to be used. -->
The dataset is focussed on Indian factual content for pre-training LLMs where Indian knowledge and contextual understanding is required.

## Dataset Structure

<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
Total number of rows: 200820 
It has approximately **1.56** billion tokens for all languages. The ratio for number of tokens for each language is roughly same 
when tokenized 
with our Indic tokenizer we created which can be found in our model repository [Pragna-1b](https://huggingface.co/soketlabs/pragna-1b).
Here are token counts for each language:
- English: 197.7 millions
- Hindi: 227.5 millions
- Bengali: 289.1 millions
- Gujarati: 206.2 millions
- Tamil: 233.8 millions
- Kannada: 203.5 millions
- Urdu: 207 millions

Each row corresponds to a wikipedia article with the decription of article in source language(english) and translations in 6 indian languages.  
The title is in english and descriptions in different languages is represented by column name of format "language_code"_"script". 
Each description column in different languages is a list of sentences/multiple sentences and can be concatenated to get cleaned article decription.

Each row is of the format:
```yaml
{'id': '1',
 'url': 'https://simple.wikipedia.org/sample_article',
 'title': 'Sample article',
 'eng_Latn': ['This is a sample...', 'and more information'],
 'hin_Deva': ['यह एक नमूना है'..., 'और अधिक जानकारी'],
 'kan_Knda': ['ಇದು ಒಂದು ಮಾದರಿ...', 'ಮತ್ತು ಹೆಚ್ಚಿನ ಮಾಹಿತಿ'],
 'ben_Beng': ['এটি একটি নমুনা...', 'এবং আরও তথ্য'],
 'guj_Gujr': ['આ એક નમૂનો છે...', 'અને વધુ માહિતી'],
 'tam_Taml': ['இது ஒரு மாதிரி...', 'மேலும் தகவல்'],
 'urd_Arab': ['...یہ ایک نمونہ ہے۔', 'اور مزید معلومات']
}
```


## Dataset Creation

### Curation Rationale

<!-- Motivation for the creation of this dataset. -->
We needed to induce knowledge regarding India and Indian context while training our LLM, for which we gathered available Indic 
content data and also filtered factual data from Wikipedia.


### Source Data

<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
Wikpedia english articles from [wikimedia/wikipedia](https://huggingface.co/datasets/wikimedia/wikipedia)

#### Data Collection and Processing

<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
We filtered out Indian context data from [wikimedia/wikipedia](https://huggingface.co/datasets/wikimedia/wikipedia) dataset's English 
articles by select keywords. 
Further we trained a few shot classification model to classify for Indian content vs Not Indian content to narrow down filtered English 
articles. 
We cleaned the articles and removed unwanted paragraphs for References etc. 
We then translated these artices to 6 Indian languages (Hindi, Bengali, Gujarati, Tamil, Kannada, Urdu) using AI4Bharat's [IndicTrans2](https://huggingface.co/ai4bharat/indictrans2-en-indic-1B). The dataset has been cleaned and can be used for pre-training multilingual LLMs.




### Recommendations

<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->

Though we tried to filter as much Indic context articles as possible with high Recall, there might be some non indic articles mixed in them as well.


### Citation Information

```
@ONLINE{bhasha-wiki-indic,
    author = "Soket Labs Technology and Research Private Limited",
    title  = "Bhasha-Wiki-Indic",
    url    = "https://soket.ai"
}
```