sea_wiki / README.md
sabilmakbar's picture
Update README.md and Add Script to Count Token Stats (#3)
a5d8f0d
|
raw
history blame
18.4 kB
---
annotations_creators:
- no-annotation
language_creators:
- crowdsourced
language:
- ace
- ban
- bjn
- bug
- gor
- km
- id
- jv
- lo
- mad
- mnw
- min
- ms
- my
- nia
- shn
- su
- tet
- th
- vi
license:
- cc-by-sa-3.0
- gfdl
multilinguality:
- multilingual
source_datasets:
- Wikipedia-HF
task_categories:
- text-generation
- fill-mask
task_ids:
- language-modeling
- masked-language-modeling
pretty_name: Wikipedia Archive for SEA Languages
tags:
- Wikipedia
- Southeast Asia (SEA)
- Dialect
- Banyumasan Dialect of Javanese (Ngapak)
- SEA-related Languages
- SEA Local Languages
dataset_info:
- config_name: seawiki_all
features:
- name: url
dtype: string
- name: title
dtype: string
- name: text
dtype: string
splits:
- name: ace
num_bytes: 4952102
num_examples: 13003
- name: ban
num_bytes: 18198909
num_examples: 20987
- name: bjn
num_bytes: 6792259
num_examples: 10519
- name: bug
num_bytes: 3298561
num_examples: 15880
- name: gor
num_bytes: 6239133
num_examples: 15359
- name: id
num_bytes: 1118834498
num_examples: 665622
- name: jv
num_bytes: 72101470
num_examples: 73380
- name: km
num_bytes: 103146669
num_examples: 11994
- name: lo
num_bytes: 15240262
num_examples: 5014
- name: mad
num_bytes: 1612542
num_examples: 1192
- name: map_bms
num_bytes: 5221506
num_examples: 13580
- name: min
num_bytes: 116824020
num_examples: 227143
- name: mnw
num_bytes: 47321734
num_examples: 3296
- name: ms
num_bytes: 419662356
num_examples: 368628
- name: my
num_bytes: 313370839
num_examples: 109310
- name: nia
num_bytes: 2153274
num_examples: 1714
- name: shn
num_bytes: 33754296
num_examples: 13945
- name: su
num_bytes: 47516268
num_examples: 61555
- name: tet
num_bytes: 1454499
num_examples: 1468
- name: th
num_bytes: 1012930269
num_examples: 159719
- name: vi
num_bytes: 1603057632
num_examples: 1288680
download_size: 4959860254
dataset_size: 4953683098
- config_name: seawiki_dedup_all
features:
- name: url
dtype: string
- name: title
dtype: string
- name: text
dtype: string
splits:
- name: ace
num_bytes: 4944916
num_examples: 12979
- name: ban
num_bytes: 18025267
num_examples: 20611
- name: bjn
num_bytes: 6786207
num_examples: 10503
- name: bug
num_bytes: 2182435
num_examples: 9969
- name: gor
num_bytes: 6217480
num_examples: 15290
- name: id
num_bytes: 1117891512
num_examples: 662443
- name: jv
num_bytes: 71997517
num_examples: 73080
- name: km
num_bytes: 102698901
num_examples: 11466
- name: lo
num_bytes: 14908444
num_examples: 4897
- name: mad
num_bytes: 1612542
num_examples: 1192
- name: map_bms
num_bytes: 5067489
num_examples: 11839
- name: min
num_bytes: 116721269
num_examples: 225972
- name: mnw
num_bytes: 47243333
num_examples: 3271
- name: ms
num_bytes: 414783365
num_examples: 348045
- name: my
num_bytes: 312990457
num_examples: 108819
- name: nia
num_bytes: 2153274
num_examples: 1714
- name: shn
num_bytes: 33616591
num_examples: 13662
- name: su
num_bytes: 47512744
num_examples: 61529
- name: tet
num_bytes: 1452151
num_examples: 1464
- name: th
num_bytes: 1012868861
num_examples: 159666
- name: vi
num_bytes: 1602828123
num_examples: 1287910
download_size: 4950689052
dataset_size: 4944502878
- config_name: seawiki_with_countries_all
features:
- name: url
dtype: string
- name: title
dtype: string
- name: text
dtype: string
splits:
- name: idn_ace
num_bytes: 4952102
num_examples: 13003
- name: idn_ban
num_bytes: 18198909
num_examples: 20987
- name: idn_bjn
num_bytes: 6792259
num_examples: 10519
- name: idn_bug
num_bytes: 3298561
num_examples: 15880
- name: idn_gor
num_bytes: 6239133
num_examples: 15359
- name: idn_id
num_bytes: 1118834498
num_examples: 665622
- name: idn_jv
num_bytes: 72101470
num_examples: 73380
- name: idn_mad
num_bytes: 1612542
num_examples: 1192
- name: idn_map_bms
num_bytes: 5221506
num_examples: 13580
- name: idn_min
num_bytes: 116824020
num_examples: 227143
- name: idn_ms
num_bytes: 419662356
num_examples: 368628
- name: idn_nia
num_bytes: 2153274
num_examples: 1714
- name: idn_su
num_bytes: 47516268
num_examples: 61555
- name: idn_tet
num_bytes: 1454499
num_examples: 1468
- name: sgp_ms
num_bytes: 419662356
num_examples: 368628
- name: mys_ms
num_bytes: 419662356
num_examples: 368628
- name: brn_ms
num_bytes: 419662356
num_examples: 368628
- name: tha_th
num_bytes: 1012930269
num_examples: 159719
- name: mmr_my
num_bytes: 313370839
num_examples: 109310
- name: mmr_shn
num_bytes: 33754296
num_examples: 13945
- name: mmr_mnw
num_bytes: 47321734
num_examples: 3296
- name: lao_lo
num_bytes: 15240262
num_examples: 5014
- name: vnm_vi
num_bytes: 1603057632
num_examples: 1288680
- name: khm_km
num_bytes: 103146669
num_examples: 11994
- name: tls_tet
num_bytes: 1454499
num_examples: 1468
download_size: 4959860254
dataset_size: 6214124665
- config_name: seawiki_with_countries_dedup_all
features:
- name: url
dtype: string
- name: title
dtype: string
- name: text
dtype: string
splits:
- name: idn_ace
num_bytes: 4944916
num_examples: 12979
- name: idn_ban
num_bytes: 18025267
num_examples: 20611
- name: idn_bjn
num_bytes: 6786207
num_examples: 10503
- name: idn_bug
num_bytes: 2182435
num_examples: 9969
- name: idn_gor
num_bytes: 6217480
num_examples: 15290
- name: idn_id
num_bytes: 1117891512
num_examples: 662443
- name: idn_jv
num_bytes: 71997517
num_examples: 73080
- name: idn_mad
num_bytes: 1612542
num_examples: 1192
- name: idn_map_bms
num_bytes: 5067489
num_examples: 11839
- name: idn_min
num_bytes: 116721269
num_examples: 225972
- name: idn_ms
num_bytes: 414783365
num_examples: 348045
- name: idn_nia
num_bytes: 2153274
num_examples: 1714
- name: idn_su
num_bytes: 47512744
num_examples: 61529
- name: idn_tet
num_bytes: 1452151
num_examples: 1464
- name: sgp_ms
num_bytes: 414783365
num_examples: 348045
- name: mys_ms
num_bytes: 414783365
num_examples: 348045
- name: brn_ms
num_bytes: 414783365
num_examples: 348045
- name: tha_th
num_bytes: 1012868861
num_examples: 159666
- name: mmr_my
num_bytes: 312990457
num_examples: 108819
- name: mmr_shn
num_bytes: 33616591
num_examples: 13662
- name: mmr_mnw
num_bytes: 47243333
num_examples: 3271
- name: lao_lo
num_bytes: 14908444
num_examples: 4897
- name: vnm_vi
num_bytes: 1602828123
num_examples: 1287910
- name: khm_km
num_bytes: 102698901
num_examples: 11466
- name: tls_tet
num_bytes: 1452151
num_examples: 1464
download_size: 4950689052
dataset_size: 6190305124
---
# **SEA Wikipedia Data Repository**
---
license: cc-by-sa-3.0
---
Welcome to SEA Wikipedia Data Repository. The datasets are extracted from [Wikipedia HF](https://huggingface.co/datasets/wikipedia) and processed using the scripts available in this repository for reproducibility purpose.
# Getting Started #
### To read the datasets directly ###
Use one of the following code chunks to load it from HuggingFace Hub:
You can refer to the 2nd args of ```config name``` using the following script
```
dataset = load_dataset(
"sabilmakbar/sea_wiki",
"seawiki_dedup_all" # a config name, can be "seawiki_dedup_all" or "seawiki_with_countries_all", or "seawiki_with_countries_dedup_all" , defaults to "seawiki_dedup_all"
)
```
Or you can provide both ```lang``` and ```date_stamp``` (or just lang only by assuming the ```date_stamp``` will take the newest one)
```
dataset = load_dataset(
"sabilmakbar/sea_wiki",
lang = "id", # see README for complete lang choices
date_stamp="20230901"
)
```
Or you can provide a ```country``` params with similar fashion to ```lang``` args (providing both ```country``` and ```lang``` will prioritize the ```lang``` kwarg)
```
dataset = load_dataset(
"sabilmakbar/sea_wiki",
lang = "id", # see the splits for complete lang choices
date_stamp="20230901"
)
```
# **FAQS**
### What are the available languages provided in dataset and from which country?
You may check the following tables to understand the current coverage of this dataset (languages, countries, data size & volume).
#### 1. Table of Countries and its Country Code
| Country Code | Country Name | Wiki Info |
| :---: | :---: | :---: |
| brn | Brunei | [Wiki Link](https://en.wikipedia.org/wiki/Brunei) |
| idn | Indonesia | [Wiki Link](https://en.wikipedia.org/wiki/Indonesia) |
| khm | Cambodia | [Wiki Link](https://en.wikipedia.org/wiki/Cambodia) |
| lao | Laos | [Wiki Link](https://en.wikipedia.org/wiki/Laos) |
| mmr | Myanmar | [Wiki Link](https://en.wikipedia.org/wiki/Myanmar) |
| mys | Malaysia | [Wiki Link](https://en.wikipedia.org/wiki/Malaysia) |
| sgp | Singapore | [Wiki Link](https://en.wikipedia.org/wiki/Singapore) |
| tha | Thailand | [Wiki Link](https://en.wikipedia.org/wiki/Thailand) |
| tls | East Timor | [Wiki Link](https://en.wikipedia.org/wiki/East_Timor) |
| vnm | Vietnam | [Wiki Link](https://en.wikipedia.org/wiki/Vietnam) |
#### 2. Table of Languages and Countries of its speakers
| Lang Code | Lang Name | Country Codes Spoken | Wiki Info | Total Data | Total Size (MiB rounded) |
| :---: | :---: | :---: | :--- | ---: | ---: |
| ace | Acehnese | idn | [Wiki Link](https://en.wikipedia.org/wiki/Acehnese_language) | 12904 | 4.64 |
| ban | Balinese | idn | [Wiki Link](https://en.wikipedia.org/wiki/Balinese_language) | 19837 | 16.56 |
| bjn | Banjarese | idn | [Wiki Link](https://en.wikipedia.org/wiki/Banjarese_language) | 10437 | 6.35 |
| bug | Buginese | idn | [Wiki Link](https://en.wikipedia.org/wiki/Buginese_language) | 9793 | 1.98 |
| gor | Gorontalo | idn | [Wiki Link](https://en.wikipedia.org/wiki/Gorontalo_language) | 14514 | 5.71 |
| km | Khmer | khm | [Wiki Link](https://en.wikipedia.org/wiki/Khmer_language) | 11994 | 98.37 |
| id | Indonesian | idn | [Wiki Link](https://en.wikipedia.org/wiki/Indonesian_language) | 654287 | 1049.93 |
| jv | Javanese | idn | [Wiki Link](https://en.wikipedia.org/wiki/Javanese_language) | 72667 | 66.54 |
| lo | Lao | lao | [Wiki Link](https://en.wikipedia.org/wiki/Lao_language) | 5014 | 14.53 |
| mad | Madurese | idn | [Wiki Link](https://en.wikipedia.org/wiki/Madurese_language) | 1192 | 1.54 |
| map_bms | Banyumasan <br>(Dialect of Javanese) | idn | [Wiki Link](https://en.wikipedia.org/wiki/Banyumasan_dialect) | 11832 | 4.83 |
| mnw | Mon | mmr | [Wiki Link](https://en.wikipedia.org/wiki/Mon_language) | 3296 | 45.13 |
| min | Minangkabau | idn | [Wiki Link](https://en.wikipedia.org/wiki/Minangkabau_language) | 225858 | 110.99 |
| ms | Malay | mys, sgp, brn, idn | [Wiki Link](https://en.wikipedia.org/wiki/Malay_language) | 346186 | 391.43 |
| my | Burmese | mmr | [Wiki Link](https://en.wikipedia.org/wiki/Burmese_language) | 109310 | 298.85 |
| nia | Nias | idn | [Wiki Link](https://en.wikipedia.org/wiki/Nias_language) | 1650 | 1.85 |
| shn | Shan | mmr | [Wiki Link](https://en.wikipedia.org/wiki/Shan_language) | 13945 | 32.19 |
| su | Sundanese | idn | [Wiki Link](https://en.wikipedia.org/wiki/Sundanese_language) | 61494 | 45.21 |
| tet | Tetum | tls, idn | [Wiki Link](https://en.wikipedia.org/wiki/Tetum_language) | 1465 | 1.39 |
| th | Thai | tha | [Wiki Link](https://en.wikipedia.org/wiki/Thai_language) | 159719 | 966.00 |
| vi | Vietnamese | vnm | [Wiki Link](https://en.wikipedia.org/wiki/Vietnamese_language) | 1288680 | 1528.79 |
#### 3. Table of Token Statistics for Covered Languages
The token statistics is generated using ```tiktoken``` using encoder for GPT-4.
| Lang Code | Total Token | Avg Token per Article | Min Token | Max Token | Token Deciles List |
| :---: | ---: | ---: | ---: | ---: | :--- |
| ace | 1,370,829 | 105.61899992295247 | 3 | 9,659 | [38.0, 52.0, 54.0, 69.0, 76.0, 84.0, 90.0, 123.0, 126.0] |
| ban | 5,924,610 | 287.44893503469024 | 5 | 24,364 | [97.0, 144.0, 165.0, 187.0, 209.0, 245.0, 276.0, 315.0, 421.0] |
| bjn | 1,935,505 | 184.28115776444827 | 2 | 30,170 | [36.0, 38.0, 39.0, 40.0, 42.0, 51.0, 82.0, 151.0, 367.0] |
| bug | 553,693 | 55.54147858360919 | 1 | 13,951 | [31.0, 42.0, 43.0, 46.0, 48.0, 50.0, 52.0, 55.0, 57.0] |
| gor | 1,575,766 | 103.05860039241334 | 2 | 5,525 | [55.0, 58.0, 60.0, 62.0, 64.0, 66.0, 69.0, 75.0, 96.0] |
| id | 325,411,713 | 491.22975561670967 | 1 | 198,597 | [54.0, 93.0, 123.0, 145.0, 180.0, 226.0, 332.0, 543.0, 1068.0] |
| jv | 23,528,314 | 321.95284619594963 | 2 | 342,156 | [48.0, 60.0, 75.0, 88.0, 117.0, 175.0, 270.0, 420.0, 772.0] |
| km | 54,559,721 | 4,758.391854177568 | 1 | 1,110,771 | [160.0, 293.0, 452.0, 693.0, 1032.0, 1609.0, 2644.0, 4745.0, 9607.0] |
| lo | 9,395,636 | 1,918.6514192362672 | 3 | 107,154 | [134.0, 184.2, 285.0, 494.0, 658.0, 894.6, 1258.0, 1971.2, 4153.8] |
| mad | 611,736 | 513.2013422818792 | 14 | 17,093 | [80.1, 110.2, 135.0, 161.0, 194.0, 242.0, 302.7, 531.4, 1167.1] |
| map_bms | 1,307,244 | 110.41844750401216 | 1 | 20,629 | [20.0, 21.0, 22.0, 24.0, 30.0, 35.0, 36.0, 38.0, 111.0] |
| min | 33,114,184 | 146.54109358681606 | 3 | 58,387 | [81.0, 91.0, 96.0, 108.0, 119.0, 135.0, 156.0, 168.0, 170.0] |
| mnw | 31,595,647 | 9,659.3234484867 | 6 | 1,450,765 | [425.0, 601.0, 629.0, 682.0, 763.0, 2103.0, 4255.0, 7724.0, 14517.0] |
| ms | 121,343,673 | 348.64363228892813 | 1 | 68,545 | [32.0, 40.0, 49.0, 63.0, 105.0, 138.0, 216.0, 362.0, 788.0] |
| my | 189,439,447 | 1,740.8673761015998 | 10 | 1,376,658 | [164.0, 269.0, 350.0, 508.0, 559.0, 578.0, 605.0, 892.4, 3369.0] |
| nia | 795,527 | 464.134772462077 | 8 | 18,650 | [59.0, 61.0, 63.0, 65.0, 67.0, 86.0, 239.1, 623.4, 1249.7] |
| shn | 23,125,637 | 1,692.6977748499487 | 2 | 204,094 | [460.0, 480.0, 585.0, 679.0, 715.0, 740.0, 756.0, 780.0, 1580.9] |
| su | 14,710,124 | 239.07627297697022 | 1 | 99,456 | [41.0, 43.0, 45.0, 49.0, 70.0, 146.0, 216.0, 219.0, 419.0] |
| tet | 487,016 | 332.6612021857924 | 4 | 24,287 | [30.3, 47.0, 66.9, 101.0, 164.0, 177.0, 187.0, 248.6, 604.4] |
| th | 330,964,733 | 2,072.8566695476807 | 1 | 289,150 | [231.0, 390.0, 546.0, 727.0, 969.0, 1276.0, 1741.0, 2533.0, 4361.0] |
| vi | 546,481,258 | 424.3163404275143 | 3 | 246,463 | [46.0, 64.0, 71.0, 80.0, 86.0, 92.0, 120.0, 240.0, 824.0] |
Some other languages in SEA that are already exists its Wiki Index at Wikimedia might be missing from this list. Any lang update PR is greatly appreciated!
### How does the data being preprocessed? What makes it different from loading it directly from Wikipedia HF?
The data available in here are processed with following flows:
1. Raw data is being deduplicated on ```title``` and ```text``` (text-content from a given article), to remove articles containing boilerplate text (template text that are used usually for unavailable informations or asking for contributions of content in that article), which usually deemed noisy for NLP data.
2. Furthermore, the ```title``` and ```text``` data are being checked for string-matching duplication (duplication of text that are being pre-processed, i.e symbols removed, HTML tags striped, or ASCII-chars/UTF-8 chars validated). You may check this [ ```dedup_raw_wiki_data.py```](https://huggingface.co/datasets/sabilmakbar/sea_wiki/blob/main/dedup_raw_wiki_data.py) script to understand its implementation.
### How do I extract new Wikipedia Dataset of SEA languages?
You may check to the script [_```extract_raw_wiki_data.py```_](https://huggingface.co/datasets/sabilmakbar/sea_wiki/blob/main/extract_raw_wiki_data.py) to understand its implementations, or you can adjust the bash provided in [_```extract_raw_wiki_data_sea.sh```_](https://huggingface.co/datasets/sabilmakbar/sea_wiki/blob/main/extract_raw_wiki_data_sea.sh) to extract it on your own.
### How do I extract new Wikipedia Dataset of SEA languages?
You may visit this [Wikipedia Dump Index](https://dumps.wikimedia.org/backup-index.html) to check any latest available data and this link [Wikipedia Language Coverage](https://meta.wikimedia.org/wiki/List_of_Wikipedias#All_Wikipedias_ordered_by_number_of_articles) to map into any languages that you're wanting to extract. Please note that this dataset is extensible to any languages of your choice.
### To replicate the whole dataset generation process ###
1. Set-up a new Python/Conda Environment (recommended Python version: 3.9.6 to 3.9.18 or 3.10.0 to 3.10.13) and install the requirements on ```requirements.txt``` use this codebase via ```pip install -r requirements.txt```.
2. Activate the chosen Python/Conda environment which the requirements are being installed.
3. Force install ```multiprocess==0.70.15``` by using ```pip install multiprocess==0.70.15``` to avoid [this issue](https://github.com/huggingface/datasets/issues/5613#issuecomment-1703169594) (there's no other workaround for now)
4. Run this ```sh``` script for extractions from Wikiedia HF using ```sh extract_raw_wiki_data_sea.sh```<br>
This script will run [_```extract_raw_wiki_data.py```_](https://huggingface.co/datasets/sabilmakbar/sea_wiki/blob/main/extract_raw_wiki_data.py) to construct the Wiki Dataset.
5. Run this ```sh``` script for deduplications from extracted data in Step 4 using ```sh dedup_raw_wiki_data_sea.sh```<br>
This script will run [_```dedup_raw_wiki_data.py```_](https://huggingface.co/datasets/sabilmakbar/sea_wiki/blob/main/dedup_raw_wiki_data.py) to do Wiki Dataset Clenasing. Please note that the cleansing process can be language/dialect specific.
## Citation Info:
```
@ONLINE{wikidump,
author = "Wikimedia Foundation",
title = "Wikimedia Downloads",
url = "https://dumps.wikimedia.org"}
@ONLINE{wikipedia-hf,
title = "Huggingface Wikipedia Dataset",
url = "https://huggingface.co/datasets/wikipedia"}
```