sabilmakbar commited on
Commit
5fdddf9
1 Parent(s): a91c8a9

Update the data w/ compressed files to speed-up download process and reduce download size (#6)

Browse files

- Update data and lfs tracker (dae695eb27dbf0ef7f14ac24b69deb8545cd00d3)
- Update dset loader and README (444ec85a46f9483636b739c5da57af19d334e11b)
- Delete moved files into GH repo (cb3b2e7896a4b45648266b2cd541aed6695a5de2)
- Slightly reformat count_data_stats.py (bf3791d47b40f801a4b3700f20b989af5d1ace9d)

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -1
  2. README.md +53 -52
  3. count_data_stats.py +1 -1
  4. dedup_raw_wiki_data.py +0 -414
  5. dedup_raw_wiki_data_sea.sh +0 -62
  6. extract_raw_wiki_data.py +0 -73
  7. extract_raw_wiki_data_batched.py +0 -87
  8. extract_raw_wiki_data_sea.sh +0 -38
  9. sea_wiki.py +18 -5
  10. sea_wiki_dedup_data/{wiki_pag_20231101_dataset_dedup_cleansed.csv → wiki_ace_20231101_dataset_dedup_cleansed.csv.gz} +2 -2
  11. sea_wiki_dedup_data/{wiki_ace_20231101_dataset_dedup_cleansed.csv → wiki_ban_20231101_dataset_dedup_cleansed.csv.gz} +2 -2
  12. sea_wiki_dedup_data/{wiki_bug_20231101_dataset_dedup_cleansed.csv → wiki_bcl_20231101_dataset_dedup_cleansed.csv.gz} +2 -2
  13. sea_wiki_dedup_data/{wiki_cbk-zam_20231101_dataset_dedup_cleansed.csv → wiki_bjn_20231101_dataset_dedup_cleansed.csv.gz} +2 -2
  14. sea_wiki_dedup_data/{wiki_bjn_20231101_dataset_dedup_cleansed.csv → wiki_bug_20231101_dataset_dedup_cleansed.csv.gz} +2 -2
  15. sea_wiki_dedup_data/wiki_cbk-zam_20231101_dataset_dedup_cleansed.csv.gz +3 -0
  16. sea_wiki_dedup_data/wiki_ceb_20231101_dataset_dedup_cleansed.csv +0 -3
  17. sea_wiki_dedup_data/wiki_ceb_20231101_dataset_dedup_cleansed.csv.gz +3 -0
  18. sea_wiki_dedup_data/wiki_gor_20231101_dataset_dedup_cleansed.csv +0 -3
  19. sea_wiki_dedup_data/wiki_gor_20231101_dataset_dedup_cleansed.csv.gz +3 -0
  20. sea_wiki_dedup_data/wiki_id_20231101_dataset_dedup_cleansed.csv.gz +3 -0
  21. sea_wiki_dedup_data/wiki_ilo_20231101_dataset_dedup_cleansed.csv +0 -3
  22. sea_wiki_dedup_data/wiki_ilo_20231101_dataset_dedup_cleansed.csv.gz +3 -0
  23. sea_wiki_dedup_data/wiki_jv_20231101_dataset_dedup_cleansed.csv +0 -3
  24. sea_wiki_dedup_data/{wiki_ban_20231101_dataset_dedup_cleansed.csv → wiki_jv_20231101_dataset_dedup_cleansed.csv.gz} +2 -2
  25. sea_wiki_dedup_data/wiki_km_20231101_dataset_dedup_cleansed.csv +0 -3
  26. sea_wiki_dedup_data/{wiki_bcl_20231101_dataset_dedup_cleansed.csv → wiki_km_20231101_dataset_dedup_cleansed.csv.gz} +2 -2
  27. sea_wiki_dedup_data/wiki_lo_20231101_dataset_dedup_cleansed.csv +0 -3
  28. sea_wiki_dedup_data/wiki_lo_20231101_dataset_dedup_cleansed.csv.gz +3 -0
  29. sea_wiki_dedup_data/wiki_mad_20231101_dataset_dedup_cleansed.csv +0 -3
  30. sea_wiki_dedup_data/wiki_mad_20231101_dataset_dedup_cleansed.csv.gz +3 -0
  31. sea_wiki_dedup_data/wiki_map-bms_20231101_dataset_dedup_cleansed.csv +0 -3
  32. sea_wiki_dedup_data/wiki_map-bms_20231101_dataset_dedup_cleansed.csv.gz +3 -0
  33. sea_wiki_dedup_data/wiki_min_20231101_dataset_dedup_cleansed.csv +0 -3
  34. sea_wiki_dedup_data/wiki_min_20231101_dataset_dedup_cleansed.csv.gz +3 -0
  35. sea_wiki_dedup_data/wiki_mnw_20231101_dataset_dedup_cleansed.csv +0 -3
  36. sea_wiki_dedup_data/wiki_mnw_20231101_dataset_dedup_cleansed.csv.gz +3 -0
  37. sea_wiki_dedup_data/wiki_ms_20231101_dataset_dedup_cleansed.csv +0 -3
  38. sea_wiki_dedup_data/wiki_ms_20231101_dataset_dedup_cleansed.csv.gz +3 -0
  39. sea_wiki_dedup_data/wiki_my_20231101_dataset_dedup_cleansed.csv +0 -3
  40. sea_wiki_dedup_data/wiki_my_20231101_dataset_dedup_cleansed.csv.gz +3 -0
  41. sea_wiki_dedup_data/wiki_nia_20231101_dataset_dedup_cleansed.csv +0 -3
  42. sea_wiki_dedup_data/wiki_nia_20231101_dataset_dedup_cleansed.csv.gz +3 -0
  43. sea_wiki_dedup_data/wiki_pag_20231101_dataset_dedup_cleansed.csv.gz +3 -0
  44. sea_wiki_dedup_data/wiki_pam_20231101_dataset_dedup_cleansed.csv +0 -3
  45. sea_wiki_dedup_data/wiki_pam_20231101_dataset_dedup_cleansed.csv.gz +3 -0
  46. sea_wiki_dedup_data/wiki_shn_20231101_dataset_dedup_cleansed.csv +0 -3
  47. sea_wiki_dedup_data/wiki_shn_20231101_dataset_dedup_cleansed.csv.gz +3 -0
  48. sea_wiki_dedup_data/wiki_su_20231101_dataset_dedup_cleansed.csv +0 -3
  49. sea_wiki_dedup_data/wiki_su_20231101_dataset_dedup_cleansed.csv.gz +3 -0
  50. sea_wiki_dedup_data/wiki_ta_20231101_dataset_dedup_cleansed.csv +0 -3
.gitattributes CHANGED
@@ -53,5 +53,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
- # Wiki data files CSV
57
  *.csv filter=lfs diff=lfs merge=lfs -text
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ # Wiki data files CSV gz-compressed
57
  *.csv filter=lfs diff=lfs merge=lfs -text
58
+ *.csv.gz filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -3,6 +3,36 @@ annotations_creators:
3
  - no-annotation
4
  language_creators:
5
  - crowdsourced
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  license:
7
  - cc-by-sa-4.0
8
  multilinguality:
@@ -12,36 +42,6 @@ source_datasets:
12
  task_categories:
13
  - text-generation
14
  - fill-mask
15
- language:
16
- - ace
17
- - ban
18
- - bcl
19
- - bjn
20
- - bug
21
- - cbk
22
- - ceb
23
- - gor
24
- - id
25
- - ilo
26
- - jv
27
- - km
28
- - lo
29
- - mad
30
- - min
31
- - mnw
32
- - ms
33
- - my
34
- - nia
35
- - pag
36
- - pam
37
- - shn
38
- - su
39
- - ta
40
- - th
41
- - tl
42
- - tet
43
- - vi
44
- - war
45
  task_ids:
46
  - language-modeling
47
  - masked-language-modeling
@@ -82,7 +82,7 @@ dataset_info:
82
  num_bytes: 2033238
83
  num_examples: 3285
84
  - name: ceb
85
- num_bytes: 4572804909
86
  num_examples: 6302896
87
  - name: gor
88
  num_bytes: 6239133
@@ -148,13 +148,13 @@ dataset_info:
148
  num_bytes: 85356818
149
  num_examples: 45341
150
  - name: vi
151
- num_bytes: 1603057632
152
  num_examples: 1288680
153
  - name: war
154
  num_bytes: 454304567
155
  num_examples: 1266394
156
- download_size: 10940051715
157
- dataset_size: 10923905689
158
  - config_name: seawiki_dedup_all
159
  features:
160
  - name: url
@@ -183,7 +183,7 @@ dataset_info:
183
  num_bytes: 1579651
184
  num_examples: 2242
185
  - name: ceb
186
- num_bytes: 4346511152
187
  num_examples: 5815254
188
  - name: gor
189
  num_bytes: 6217480
@@ -249,13 +249,13 @@ dataset_info:
249
  num_bytes: 85286023
250
  num_examples: 45121
251
  - name: vi
252
- num_bytes: 1602828123
253
- num_examples: 1287910
254
  - name: war
255
  num_bytes: 454266479
256
  num_examples: 1266204
257
- download_size: 10701952694
258
- dataset_size: 10686874347
259
  - config_name: seawiki_with_countries_all
260
  features:
261
  - name: url
@@ -353,7 +353,7 @@ dataset_info:
353
  num_bytes: 1370162
354
  num_examples: 2665
355
  - name: phl_ceb
356
- num_bytes: 4572804909
357
  num_examples: 6302896
358
  - name: sgp_ms
359
  num_bytes: 419662356
@@ -374,10 +374,10 @@ dataset_info:
374
  num_bytes: 1454499
375
  num_examples: 1468
376
  - name: vnm_vi
377
- num_bytes: 1603057632
378
  num_examples: 1288680
379
- download_size: 10940051715
380
- dataset_size: 13074580032
381
  - config_name: seawiki_with_countries_dedup_all
382
  features:
383
  - name: url
@@ -475,7 +475,7 @@ dataset_info:
475
  num_bytes: 764869
476
  num_examples: 1108
477
  - name: phl_ceb
478
- num_bytes: 4346511152
479
  num_examples: 5815254
480
  - name: sgp_ms
481
  num_bytes: 414783365
@@ -496,10 +496,10 @@ dataset_info:
496
  num_bytes: 1452151
497
  num_examples: 1464
498
  - name: vnm_vi
499
- num_bytes: 1602828123
500
- num_examples: 1287910
501
- download_size: 10701952694
502
- dataset_size: 12822597856
503
  ---
504
 
505
  # **SEA Wikipedia Data Repository**
@@ -582,7 +582,7 @@ You may check the following tables to understand the current coverage of this da
582
  | tgl | tl | Tagalog | phl | [Wiki Link](https://en.wikipedia.org/wiki/Tagalog_language) | 45121 | 81.34 |
583
  | tha | th | Thai | tha | [Wiki Link](https://en.wikipedia.org/wiki/Thai_language) | 159666 | 965.95 |
584
  | tet | tet | Tetum | tls, idn | [Wiki Link](https://en.wikipedia.org/wiki/Tetum_language) | 1464 | 1.38 |
585
- | vie | vi | Vietnamese | vnm | [Wiki Link](https://en.wikipedia.org/wiki/Vietnamese_language) | 1287910 | 1,528.58 |
586
  | war | war | Waray | phl | [Wiki Link](https://en.wikipedia.org/wiki/Waray_language) | 1266204 | 433.22 |
587
  | (dialect) | map_bms | Banyumasan <br>(Dialect of Javanese) | idn | [Wiki Link](https://en.wikipedia.org/wiki/Banyumasan_dialect) | 11839 | 4.83 |
588
 
@@ -590,7 +590,7 @@ You may check the following tables to understand the current coverage of this da
590
  #### 3. Table of Token Statistics for Covered Languages
591
  The token statistics is generated using ```tiktoken``` using encoder for GPT-4.
592
 
593
- | Lang Code | Total Token | Avg Token per Article | Min Token | Max Token | Token Deciles List |
594
  | :---: | ---: | ---: | ---: | ---: | :--- |
595
  | ace | 1,370,829 | 105.61899992295247 | 3 | 9,659 | [38.0, 52.0, 54.0, 69.0, 76.0, 84.0, 90.0, 123.0, 126.0] |
596
  | ban | 5,924,610 | 287.44893503469024 | 5 | 24,364 | [97.0, 144.0, 165.0, 187.0, 209.0, 245.0, 276.0, 315.0, 421.0] |
@@ -620,7 +620,7 @@ The token statistics is generated using ```tiktoken``` using encoder for GPT-4.
620
  | tet | 487,016 | 332.6612021857924 | 4 | 24,287 | [30.3, 47.0, 66.9, 101.0, 164.0, 177.0, 187.0, 248.6, 604.4] |
621
  | th | 330,964,733 | 2,072.8566695476807 | 1 | 289,150 | [231.0, 390.0, 546.0, 727.0, 969.0, 1276.0, 1741.0, 2533.0, 4361.0] |
622
  | tl | 27,789,730 | 615.8934864032269 | 7 | 60,728 | [73.0, 116.0, 161.0, 214.0, 281.0, 360.0, 465.0, 666.0, 1136.0] |
623
- | vi | 546,481,258 | 424.3163404275143 | 3 | 246,463 | [46.0, 64.0, 71.0, 80.0, 86.0, 92.0, 120.0, 240.0, 824.0] |
624
  | war | 117,438,315 | 92.74833676090108 | 1 | 25,689 | [60.0, 77.0, 81.0, 84.0, 87.0, 90.0, 94.0, 99.0, 110.0] |
625
 
626
  Some other languages in SEA that are already exists its Wiki Index at Wikimedia might be missing from this list. Any lang update PR is greatly appreciated!
@@ -628,7 +628,8 @@ Some other languages in SEA that are already exists its Wiki Index at Wikimedia
628
  ### How does the data being preprocessed? What makes it different from loading it directly from Wikipedia HF?
629
  The data available in here are processed with following flows:
630
  1. Raw data is being deduplicated on ```title``` and ```text``` (text-content from a given article), to remove articles containing boilerplate text (template text that are used usually for unavailable informations or asking for contributions of content in that article), which usually deemed noisy for NLP data.
631
- 2. Furthermore, the ```title``` and ```text``` data are being checked for string-matching duplication (duplication of text that are being pre-processed, i.e symbols removed, HTML tags striped, or ASCII-chars/UTF-8 chars validated). You may check this [ ```dedup_raw_wiki_data.py```](https://huggingface.co/datasets/sabilmakbar/sea_wiki/blob/main/dedup_raw_wiki_data.py) script to understand its implementation.
 
632
 
633
  ### How do I extract new Wikipedia Dataset of SEA languages?
634
  Please refer to the corresponding Github Repo for more detailed info [SEA Wiki Github Source Code](https://github.com/sabilmakbar/sea_wiki)
 
3
  - no-annotation
4
  language_creators:
5
  - crowdsourced
6
+ language:
7
+ - ace
8
+ - ban
9
+ - bcl
10
+ - bjn
11
+ - bug
12
+ - cbk
13
+ - ceb
14
+ - gor
15
+ - id
16
+ - ilo
17
+ - jv
18
+ - km
19
+ - lo
20
+ - mad
21
+ - min
22
+ - mnw
23
+ - ms
24
+ - my
25
+ - nia
26
+ - pag
27
+ - pam
28
+ - shn
29
+ - su
30
+ - ta
31
+ - th
32
+ - tl
33
+ - tet
34
+ - vi
35
+ - war
36
  license:
37
  - cc-by-sa-4.0
38
  multilinguality:
 
42
  task_categories:
43
  - text-generation
44
  - fill-mask
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  task_ids:
46
  - language-modeling
47
  - masked-language-modeling
 
82
  num_bytes: 2033238
83
  num_examples: 3285
84
  - name: ceb
85
+ num_bytes: 4572804910
86
  num_examples: 6302896
87
  - name: gor
88
  num_bytes: 6239133
 
148
  num_bytes: 85356818
149
  num_examples: 45341
150
  - name: vi
151
+ num_bytes: 1603057633
152
  num_examples: 1288680
153
  - name: war
154
  num_bytes: 454304567
155
  num_examples: 1266394
156
+ download_size: 1829748651
157
+ dataset_size: 10923905691
158
  - config_name: seawiki_dedup_all
159
  features:
160
  - name: url
 
183
  num_bytes: 1579651
184
  num_examples: 2242
185
  - name: ceb
186
+ num_bytes: 4346511153
187
  num_examples: 5815254
188
  - name: gor
189
  num_bytes: 6217480
 
249
  num_bytes: 85286023
250
  num_examples: 45121
251
  - name: vi
252
+ num_bytes: 1602830022
253
+ num_examples: 1287912
254
  - name: war
255
  num_bytes: 454266479
256
  num_examples: 1266204
257
+ download_size: 1811459996
258
+ dataset_size: 10686876247
259
  - config_name: seawiki_with_countries_all
260
  features:
261
  - name: url
 
353
  num_bytes: 1370162
354
  num_examples: 2665
355
  - name: phl_ceb
356
+ num_bytes: 4572804910
357
  num_examples: 6302896
358
  - name: sgp_ms
359
  num_bytes: 419662356
 
374
  num_bytes: 1454499
375
  num_examples: 1468
376
  - name: vnm_vi
377
+ num_bytes: 1603057633
378
  num_examples: 1288680
379
+ download_size: 1829748651
380
+ dataset_size: 13074580034
381
  - config_name: seawiki_with_countries_dedup_all
382
  features:
383
  - name: url
 
475
  num_bytes: 764869
476
  num_examples: 1108
477
  - name: phl_ceb
478
+ num_bytes: 4346511153
479
  num_examples: 5815254
480
  - name: sgp_ms
481
  num_bytes: 414783365
 
496
  num_bytes: 1452151
497
  num_examples: 1464
498
  - name: vnm_vi
499
+ num_bytes: 1602830022
500
+ num_examples: 1287912
501
+ download_size: 1811459996
502
+ dataset_size: 12822599756
503
  ---
504
 
505
  # **SEA Wikipedia Data Repository**
 
582
  | tgl | tl | Tagalog | phl | [Wiki Link](https://en.wikipedia.org/wiki/Tagalog_language) | 45121 | 81.34 |
583
  | tha | th | Thai | tha | [Wiki Link](https://en.wikipedia.org/wiki/Thai_language) | 159666 | 965.95 |
584
  | tet | tet | Tetum | tls, idn | [Wiki Link](https://en.wikipedia.org/wiki/Tetum_language) | 1464 | 1.38 |
585
+ | vie | vi | Vietnamese | vnm | [Wiki Link](https://en.wikipedia.org/wiki/Vietnamese_language) | 1287912 | 1,528.58 |
586
  | war | war | Waray | phl | [Wiki Link](https://en.wikipedia.org/wiki/Waray_language) | 1266204 | 433.22 |
587
  | (dialect) | map_bms | Banyumasan <br>(Dialect of Javanese) | idn | [Wiki Link](https://en.wikipedia.org/wiki/Banyumasan_dialect) | 11839 | 4.83 |
588
 
 
590
  #### 3. Table of Token Statistics for Covered Languages
591
  The token statistics is generated using ```tiktoken``` using encoder for GPT-4.
592
 
593
+ | Dataset Lang Code | Total Token | Avg Token per Article | Min Token | Max Token | Token Deciles List |
594
  | :---: | ---: | ---: | ---: | ---: | :--- |
595
  | ace | 1,370,829 | 105.61899992295247 | 3 | 9,659 | [38.0, 52.0, 54.0, 69.0, 76.0, 84.0, 90.0, 123.0, 126.0] |
596
  | ban | 5,924,610 | 287.44893503469024 | 5 | 24,364 | [97.0, 144.0, 165.0, 187.0, 209.0, 245.0, 276.0, 315.0, 421.0] |
 
620
  | tet | 487,016 | 332.6612021857924 | 4 | 24,287 | [30.3, 47.0, 66.9, 101.0, 164.0, 177.0, 187.0, 248.6, 604.4] |
621
  | th | 330,964,733 | 2,072.8566695476807 | 1 | 289,150 | [231.0, 390.0, 546.0, 727.0, 969.0, 1276.0, 1741.0, 2533.0, 4361.0] |
622
  | tl | 27,789,730 | 615.8934864032269 | 7 | 60,728 | [73.0, 116.0, 161.0, 214.0, 281.0, 360.0, 465.0, 666.0, 1136.0] |
623
+ | vi | 546,481,913 | 424.3161900813099 | 3 | 246,463 | [46.0, 64.0, 71.0, 80.0, 86.0, 92.0, 120.0, 240.0, 824.0] |
624
  | war | 117,438,315 | 92.74833676090108 | 1 | 25,689 | [60.0, 77.0, 81.0, 84.0, 87.0, 90.0, 94.0, 99.0, 110.0] |
625
 
626
  Some other languages in SEA that are already exists its Wiki Index at Wikimedia might be missing from this list. Any lang update PR is greatly appreciated!
 
628
  ### How does the data being preprocessed? What makes it different from loading it directly from Wikipedia HF?
629
  The data available in here are processed with following flows:
630
  1. Raw data is being deduplicated on ```title``` and ```text``` (text-content from a given article), to remove articles containing boilerplate text (template text that are used usually for unavailable informations or asking for contributions of content in that article), which usually deemed noisy for NLP data.
631
+ 2. Furthermore, the ```title``` and ```text``` data are being checked for string-matching duplication (duplication of text that are being pre-processed, i.e symbols removed, HTML tags striped, or ASCII-chars/UTF-8 chars validated).
632
+ The source code can be found on this Github Repo [SEA Wiki Github Source Code](https://github.com/sabilmakbar/sea_wiki)
633
 
634
  ### How do I extract new Wikipedia Dataset of SEA languages?
635
  Please refer to the corresponding Github Repo for more detailed info [SEA Wiki Github Source Code](https://github.com/sabilmakbar/sea_wiki)
count_data_stats.py CHANGED
@@ -35,7 +35,7 @@ if __name__ == "__main__":
35
  stat_dict[split] = {"total": total_token, "avg": avg_token, "min": min_token, "max": max_token, "deciles": deciles}
36
 
37
  # for markdown table format
38
- print("| Lang Code | Total Token | Avg Token per Article | Min Token | Max Token | Token Deciles List |")
39
  print("| :---: | ---: | ---: | ---: | ---: | :--- |")
40
  for key, data in stat_dict.items():
41
  print(f"| {key} | {data['total']:,} | {data['avg']:,} | {data['min']:,} | {data['max']:,} | {[round(num,2) for num in data['deciles']]} |")
 
35
  stat_dict[split] = {"total": total_token, "avg": avg_token, "min": min_token, "max": max_token, "deciles": deciles}
36
 
37
  # for markdown table format
38
+ print("| Dataset Lang Code | Total Token | Avg Token per Article | Min Token | Max Token | Token Deciles List |")
39
  print("| :---: | ---: | ---: | ---: | ---: | :--- |")
40
  for key, data in stat_dict.items():
41
  print(f"| {key} | {data['total']:,} | {data['avg']:,} | {data['min']:,} | {data['max']:,} | {[round(num,2) for num in data['deciles']]} |")
dedup_raw_wiki_data.py DELETED
@@ -1,414 +0,0 @@
1
- # %%
2
- '''
3
- Script on Cleansing Wikipedia Data that has been extracted from extract_raw_wiki_data.py
4
- '''
5
- #core functionality modules
6
- import os, gc
7
- import logging
8
- import argparse
9
- import warnings
10
-
11
- from functools import partial
12
-
13
- #text preprocess modules
14
- import re
15
- import urllib
16
- from xml.etree import ElementTree as ET
17
-
18
- #dataset related modules
19
- import numpy as np
20
- import pandas as pd
21
-
22
-
23
- ### MODULES DEFINITION ###
24
- #create custom type-checking of incoming ArgParse
25
- def argparse_bool_check(value: str):
26
- #cast str with value like float into actual float
27
- try:
28
- value = float(value)
29
- #can't be parsed as float, keep as it is
30
- except ValueError:
31
- pass
32
-
33
- #cast float-like value (incl int) into str
34
- if isinstance(value, float) and int(value) == value:
35
- value = str(int(value))
36
- #raise ArgumentTypeError if the value isn't in string already
37
- else:
38
- if not isinstance(value, str):
39
- raise argparse.ArgumentTypeError(f"Not the correct value (args: {value})! Expected is cast-able to '1' or '0' or already in string. Please rectify!")
40
- #check for these combinations of values
41
- if value.lower() in ("yes", "true", "t", "y", "1"):
42
- return True
43
- elif value.lower() in ("no", "false", "f", "n", "0"):
44
- return False
45
- else:
46
- raise argparse.ArgumentTypeError(f"Value Error! Not the correct value (args: {value})! Please rectify!")
47
-
48
-
49
- def text_processing_args_checker(value: str):
50
- if value not in ["all", "text", "title", "neither"]:
51
- raise argparse.ArgumentTypeError(f"Value Error! Not the correct value (args: {value})! Please rectify!")
52
- else:
53
- return value
54
-
55
-
56
- def set_logger():
57
- # Set up the logger
58
- logging.basicConfig(
59
- level=logging.INFO, # Set the desired logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
60
- format='%(asctime)s [%(levelname)s]: %(message)s', # Customize the log message format
61
- datefmt='%Y-%m-%d %H:%M:%S' # Customize the date/time format
62
- )
63
-
64
- # Create a file handler to write logs into a file
65
- file_handler = logging.FileHandler('app.log')
66
-
67
- # Set the log level for the file handler
68
- file_handler.setLevel(logging.INFO)
69
-
70
- # Create a formatter for the file handler (customize the log format for the file)
71
- file_formatter = logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
72
- file_handler.setFormatter(file_formatter)
73
-
74
- logger = logging.getLogger("Wiki Dataset Generation")
75
- logger.addHandler(file_handler)
76
-
77
- return logger
78
-
79
-
80
- #wrapper fn of text-cleansing
81
- def text_cleansing_wrapper(fn, exception_class_names = []):
82
-
83
- #ensure caught exception class names passed to decorator is a list (if provided)
84
- if not isinstance(exception_class_names, list):
85
- raise TypeError("Exception Class Name for Wrapper is not a list!")
86
- #ensure all values of caught exception class name list is a string
87
- if not all([isinstance(val, str) for val in exception_class_names]):
88
- raise ValueError("Found an element of Exception Class Name for Wrapper that is not a string!")
89
-
90
- #lowercase all exception class name
91
- exception_class_names = [val.lower() for val in exception_class_names]
92
- if len(exception_class_names) == 0:
93
- warnings.warn("The wrapper receives 0 `exception_class_names` to be warned! Will return the function value with its input!")
94
-
95
- def text_fn_wrapper(text: str, *args, **kwargs):
96
- try:
97
- return fn(text, *args, **kwargs)
98
- except Exception as e:
99
- _exc_name = type(e).__name__
100
- if _exc_name.lower() not in exception_class_names and len(exception_class_names)>0:
101
- raise Exception(f"Exception Occured of {_exc_name} in {fn.__name__}!") from e
102
- else:
103
- _followup_msg = "Returning the input as it is..."
104
- _text_warn = f"An exception of {_exc_name} occured in {fn.__name__}! {_followup_msg}"
105
- warnings.warn(_text_warn)
106
- return text
107
-
108
- return text_fn_wrapper
109
-
110
-
111
- #create html tags cleanser of a given text
112
- partial_decorator = partial(text_cleansing_wrapper, exception_class_names=["parseerror"])
113
- @partial_decorator
114
- def remove_html_tags(text: str):
115
- #extracted from "https://stackoverflow.com/a/9662410", w/ additional decorator of error handler
116
- return (''.join(ET.fromstring(text).itertext())).strip()
117
-
118
-
119
- #create url decoder of text
120
- @text_cleansing_wrapper
121
- def decode_url(text: str):
122
- # return (urllib.parse.unquote(text)).encode('utf8', errors='ignore').decode().strip()
123
- return (urllib.parse.unquote(text)).strip()
124
-
125
- #create encoder check of text
126
- @text_cleansing_wrapper
127
- def check_text_by_encoder(text: str, encoder: str="utf8"):
128
- return text.encode(encoder, errors='ignore').decode().strip()
129
-
130
- #create excessive whitespace removal of text
131
- @text_cleansing_wrapper
132
- def remove_excessive_whitespace(text: str):
133
- return re.sub("(\s)(\s+)", r"\1", text).strip()
134
-
135
- #create non-alphanumeric removal of text
136
- @text_cleansing_wrapper
137
- def remove_non_alphanumeric(text: str):
138
- return re.sub("[^a-z0-9\s]", "", text, flags=re.I).strip()
139
-
140
- # def cleanse_wiki_text(text: str):
141
- # return remove_html_tags(decode_url_and_remove_non_ascii(text))
142
-
143
- # def normalize_wiki_title(text: str):
144
- # return remove_non_alphanumeric(remove_excessive_whitespace(text.lower()))
145
-
146
-
147
- def _text_normalizer_constructor(
148
- remove_non_alphanumeric_bool: bool, remove_excessive_whitespace_bool: bool,
149
- remove_html_tags_bool: bool, decode_url_bool: bool, encoder_check_bool: bool,
150
- encoder: str="utf8"):
151
-
152
- _lambda_fn_1 = partial(check_text_by_encoder, encoder=encoder) if encoder_check_bool else lambda x: x
153
- _lambda_fn_2 = lambda x: remove_non_alphanumeric(_lambda_fn_1(x)) if remove_non_alphanumeric_bool else _lambda_fn_1(x)
154
- _lambda_fn_3 = lambda x: remove_excessive_whitespace(_lambda_fn_2(x)) if remove_excessive_whitespace_bool else _lambda_fn_2(x)
155
- _lambda_fn_4 = lambda x: remove_html_tags(_lambda_fn_3(x)) if remove_html_tags_bool else _lambda_fn_3(x)
156
- _lambda_fn_5 = lambda x: decode_url(_lambda_fn_4(x)) if decode_url_bool else _lambda_fn_4(x)
157
-
158
- return _lambda_fn_5
159
-
160
-
161
- def _args_to_text_constructor_fn(**kwargs):
162
-
163
- def _decode_options(opt: str):
164
- # return decoded options with format `text_opt`, `title_opt`
165
- # possible values are ["all", "text", "title", "neither"]
166
- if opt == "all":
167
- return True, True
168
- elif opt == "text":
169
- return True, False
170
- elif opt == "title":
171
- return False, True
172
- else:
173
- return False, False
174
-
175
- kwargs_title, kwargs_text = {}, {}
176
-
177
- kwargs_title["encoder"] = kwargs["text_encoder_choice_title"]
178
- kwargs_text["encoder"] = kwargs["text_encoder_choice_text"]
179
-
180
- for key, val in kwargs.items():
181
- if key not in [
182
- "remove_non_alphanumeric_option", "remove_excessive_whitespace_option",
183
- "remove_html_tags_option", "decode_url_option", "encoder_check_option"]:
184
- continue
185
- new_key = "_".join(key.split("_")[:-1]) + "_bool"
186
- text_opt_val, title_opt_val = _decode_options(val)
187
- kwargs_text[new_key], kwargs_title[new_key] = text_opt_val, title_opt_val
188
-
189
- return _text_normalizer_constructor(**kwargs_text), _text_normalizer_constructor(**kwargs_title)
190
-
191
-
192
- def _text_processing_wrapper(text: str, _fn, mode: str="text"):
193
- if mode not in ["text", "title"]:
194
- raise ValueError(f"Provided `mode` isn't either 'text' or 'title'! Received: {mode}")
195
- return _fn(text.lower()) if mode=="title" else _fn(text)
196
-
197
-
198
- ### MAIN CODE ###
199
- if __name__ == "__main__":
200
- parser = argparse.ArgumentParser()
201
-
202
- parser.add_argument("--raw-csv-path", help="Relative location of csv file containing raw Wikipedia data")
203
-
204
- parser.add_argument("--drop-hard-dupl", help="""Flag whether to drop hard duplicates
205
- (exact values of data of relevant text fields, Titles & Desc)""",
206
- default=True, type=argparse_bool_check)
207
-
208
- parser.add_argument("--drop-soft-dupl", help="""Flag whether to drop soft duplicates
209
- (duplicates after cleansed and normalized relevant text fields, Titles & Desc)""",
210
- default=True, type=argparse_bool_check)
211
-
212
- parser.add_argument("--save-dir-path", help="""Relative dir path of saved Wikipedia CSV data
213
- to the `dedup_raw_wiki_data.py` script dir""",
214
- default=os.path.dirname(os.path.abspath(__file__)))
215
-
216
- ### THE FOLLOWING ARGUMENTS ONLY TEMPORARILY ALTER THE TEXT DATA ONLY FOR SOFT-DEDUP CHECK ###
217
- ### THE INITIAL TEXT DATA WON'T BE OVERWRITTEN AFTER BEING PREPROCESSED ###
218
- ### UNLESS YOU ARE SPECIFYING IN ARGS `overwrite-initial-title-data` AND `overwrite-initial-text-data` ###
219
-
220
- ### ARGS TO OVERWRITTE INITIAL TEXT DATA WITH PROCESSED ONES ###
221
- parser.add_argument("--overwrite-initial-title-data", help="""Flag whether to overwrite title
222
- init data w/ processed data (True) or keep it as it is (False)""",
223
- default=False, type=argparse_bool_check)
224
-
225
- parser.add_argument("--overwrite-initial-text-data", help="""Flag whether to overwrite text
226
- init data w/ processed data (True) or keep it as it is (False)""",
227
- default=False, type=argparse_bool_check)
228
-
229
- ### INSTANTIATOR ARGS FOR CONSTRUCTING TEXT PROCESSING FN TO BE APPLIED ###
230
- parser.add_argument("--remove-non-alphanumeric-option", help="""Identifier which columns to be preprocessed
231
- using `remove_non_alphanumeric` for soft duplicates detection
232
- (Choices are "all", "text", "title", "neither")""",
233
- default="neither", type=text_processing_args_checker)
234
-
235
- parser.add_argument("--remove-excessive-whitespace-option", help="""Identifier which columns to be preprocessed
236
- using `remove_excessive_whitespace` for soft duplicates detection
237
- (Choices are "all", "text", "title", "neither")""",
238
- default="all", type=text_processing_args_checker)
239
-
240
- parser.add_argument("--remove-html-tags-option", help="""Identifier which columns to be preprocessed
241
- using `remove_html_tags` for soft duplicates detection
242
- (Choices are "all", "text", "title", "neither")""",
243
- default="all", type=text_processing_args_checker)
244
-
245
- parser.add_argument("--decode-url-option", help="""Identifier which columns to be preprocessed
246
- using `decode_url` for soft duplicates detection
247
- (Choices are "all", "text", "title", "neither")""",
248
- default="all", type=text_processing_args_checker)
249
-
250
- ### ARGS TO CHOOSE ENCODER CHECKING AND ITS CONFIG INITIALIZATION ###
251
- parser.add_argument("--encoder-check-option", help="""Identifier which columns to be preprocessed
252
- using `check_text_by_encoder` for soft duplicates detection
253
- (Choices are "all", "text", "title", "neither")""",
254
- default="all", type=text_processing_args_checker)
255
-
256
- parser.add_argument("--text-encoder-choice-title", help="""Identifier of title encoder type
257
- to be applied into `check_text_by_encoder` for soft duplicates detection""",
258
- default="utf8", type=str)
259
-
260
- parser.add_argument("--text-encoder-choice-text", help="""Identifier of text encoder type
261
- to be applied into `check_text_by_encoder` for soft duplicates detection""",
262
- default="utf8", type=str)
263
-
264
-
265
- _EXPECTED_COLNAMES = ["id", "url", "title", "text"]
266
-
267
- logger = set_logger()
268
- logger.info("Parsing arguments...")
269
-
270
- args = parser.parse_args()
271
-
272
- # class dotdict(dict):
273
- # """dot.notation access to dictionary attributes"""
274
- # __getattr__ = dict.get
275
- # __setattr__ = dict.__setitem__
276
- # __delattr__ = dict.__delitem__
277
-
278
- # args = dotdict({
279
- # "raw_csv_path":"",
280
- # "drop_hard_dupl": True,
281
- # "drop_soft_dupl": True,
282
- # "save_dir_path": os.path.dirname(os.path.abspath(__file__)),
283
- # "overwrite_initial_title_data": False,
284
- # "overwrite_initial_text_data": False,
285
- # "remove_non_alphanumeric_option":"neither",
286
- # "remove_excessive_whitespace_option": "neither",
287
- # "remove_html_tags_option":"neither",
288
- # "decode_url_option":"neither",
289
- # "encoder_check_option":"all",
290
- # "text_encoder_choice_title":"utf8",
291
- # "text_encoder_choice_text":"utf8"
292
- # })
293
-
294
- _TEXT_PROCESSING_FN, _TITLE_PROCESSING_FN = _args_to_text_constructor_fn(
295
- remove_non_alphanumeric_option = args.remove_non_alphanumeric_option,
296
- remove_excessive_whitespace_option = args.remove_excessive_whitespace_option,
297
- remove_html_tags_option = args.remove_html_tags_option,
298
- decode_url_option = args.text_encoder_choice_title,
299
- encoder_check_option = args.encoder_check_option,
300
- text_encoder_choice_title = args.text_encoder_choice_title,
301
- text_encoder_choice_text = args.text_encoder_choice_text
302
- )
303
-
304
- raw_data_path = args.raw_csv_path
305
- drop_hard_dupl = args.drop_hard_dupl
306
- drop_soft_dupl = args.drop_soft_dupl
307
- save_dir = args.save_dir_path
308
-
309
- overwrite_initial_title_data = args.overwrite_initial_title_data
310
- overwrite_initial_text_data = args.overwrite_initial_text_data
311
-
312
-
313
- df = pd.read_csv(raw_data_path)
314
- if len(set(df.columns).difference(set(_EXPECTED_COLNAMES))) != 0 or len(set(_EXPECTED_COLNAMES).difference(set(df.columns))) != 0:
315
- raise ValueError(f"The data schema expected, consist of columns: {', '.join(df.columns.to_list())} doesn't match with expected column values of {', '.join(_EXPECTED_COLNAMES)}!")
316
-
317
- if (not drop_hard_dupl) and (not drop_soft_dupl):
318
- raise AssertionError("The script won't run with both `drop-hard-dupl` and `drop-soft-dupl` args turned off!")
319
- elif (not drop_hard_dupl):
320
- warnings.warn("The args of `drop_hard_dupl` isn't turned off! Possibly the data will contain one template value of Wikipedia (usually no contribution text!)")
321
-
322
- #will save id identifier colname first (popping first list val)
323
- id_colname = _EXPECTED_COLNAMES.pop(0)
324
-
325
- # if any of the data has duplicate values from columns checked (url, title, or text),
326
- # it means the data integrity is questionable
327
- # i.e. copied from other article or filled with template text
328
- # hence, we will delete those duplicated datasets
329
-
330
- #hard duplicate drop (drop all duplicate values that has exact same text on expected unique colnames)
331
- if drop_hard_dupl:
332
-
333
- for colname in _EXPECTED_COLNAMES:
334
- logger.info(f"Checking data integrity on column {colname} on removing hard-duplicate(s)...")
335
- dupl_text_df = df[df.duplicated(subset=colname,keep=False)]
336
- shape_of_dupl_data = dupl_text_df.shape[0]
337
-
338
- if shape_of_dupl_data > 0:
339
- logger.info(f"Found {shape_of_dupl_data} data duplicated! Will be dropped")
340
- df.drop_duplicates(subset=colname, keep=False, inplace=True)
341
-
342
-
343
- #check id/idx of the cleansed data, whether it has duplicate
344
- # (the duplication of id/idx should came from the very first extraction, not from the cleansing)
345
-
346
- if df[df.duplicated(subset=id_colname,keep=False)].shape[0] > 0:
347
- logger.info("Duplicated ID found! Re-assigning ID to the new ones based on `df.reset_index` method!")
348
- df[id_colname] = df.reset_index().index
349
-
350
- #soft duplicate drop (drop all except one duplicate values that has exact same text on expected unique colnames)
351
- #keep the data that has longest value of its raw form
352
- if drop_soft_dupl:
353
-
354
- idx_to_keep = set(df.index.to_list())
355
- #clean from text & title only, url isn't needed for this process
356
- _EXPECTED_COLNAMES.remove("url")
357
-
358
- for colname in _EXPECTED_COLNAMES:
359
- #Construct Text Cleanser Fn for soft-duplicate cleansing
360
- _PROCESSING_FN = _TEXT_PROCESSING_FN if colname == "text" else _TITLE_PROCESSING_FN
361
- text_processing_fn = partial(_text_processing_wrapper, _fn=_PROCESSING_FN, mode=colname)
362
- logger.info(f"Checking data integrity on column {colname} on removing soft-duplicate(s)...")
363
- _df = df.copy(deep=True)
364
-
365
- #Setting up DF cols as String so it can be text-processed
366
- _df = _df[[colname]]
367
- _df[colname] = _df[colname].astype("str")
368
- logger.info(f"Cleansing the data based on {colname}")
369
-
370
- #applying text processing
371
- _df[colname+"_raw_len"] = _df[colname].apply(len)
372
- _df[colname+"_cleansed"] = _df[colname].apply(lambda row_text: text_processing_fn(text=row_text))
373
-
374
- #overwrite its text data if set as true
375
- if overwrite_initial_title_data and colname == "title":
376
- df[colname] = _df[colname+"_cleansed"]
377
- elif overwrite_initial_text_data and colname == "text":
378
- df[colname] = _df[colname+"_cleansed"]
379
-
380
- #choose the data to keep by "ranking" it according to len of its raw text (greatest to keep)
381
- logger.info(f"Ranking and grouping the data based on {colname}")
382
- _df["rk"] = _df.groupby(colname+"_cleansed")[colname+"_raw_len"].rank(method="min", ascending=False)
383
- shape_of_dupl_data = _df[_df["rk"]>1].shape[0]
384
-
385
- if shape_of_dupl_data > 0:
386
- logger.info(f"Found {shape_of_dupl_data} data duplicated! Will be dropped")
387
- _idx_to_keep = _df[_df["rk"]==1].index.to_list()
388
- if len(_idx_to_keep)+shape_of_dupl_data != df.shape[0]:
389
- raise AssertionError("Mismatch of data number!")
390
- idx_to_keep = idx_to_keep.intersection(set(_idx_to_keep))
391
- else:
392
- logger.info(f"No soft-duplicate found in colname {colname}. Continuing")
393
-
394
- del _df
395
- gc.collect()
396
-
397
- logger.info(f"The final data kept is {len(idx_to_keep)} from {df.shape[0]}")
398
- df = df.loc[list(idx_to_keep),:]
399
-
400
- logger.info("Saving dataset cleansed form...")
401
- #input path splitted by ("/") for the last entry should return filename
402
- #whereas the filename splitted by (".") except the last value should return the filename w/o ".csv" extension
403
-
404
- _override_suffix_identifier = ""
405
- if overwrite_initial_title_data or overwrite_initial_text_data:
406
- _override_suffix_identifier = "_overwritten"
407
- if overwrite_initial_text_data:
408
- _override_suffix_identifier = "_text"+_override_suffix_identifier
409
- if overwrite_initial_title_data:
410
- _override_suffix_identifier = "_title"+_override_suffix_identifier
411
-
412
- _save_file_name = ".".join(raw_data_path.split("/")[-1].split(".")[:-1]) + "_dedup_cleansed" + _override_suffix_identifier + ".csv"
413
- _save_file_name = _save_file_name.replace("_raw", "")
414
- df.to_csv(f"{save_dir}/{_save_file_name}", index=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dedup_raw_wiki_data_sea.sh DELETED
@@ -1,62 +0,0 @@
1
- #!/bin/bash
2
-
3
- # all available lang codes in SEA local-languages or linguistically-related to following countries in SEA:
4
- # Indonesia: "ace" (Acehnese), "ban" (Balinese), "bjn" (Banjarese), "bug" (Buginese), "gor" (Gorontalo), "id" (Indonesian), "jv" (Javanese), "mad" (Madurese), "map-bms" (Banyumasan, Dialect of Javanese), "min" (Minangkabau), "ms" (Malay), "nia" (Nias), "su" (Sundanese), "tet" (Tetum)
5
- # Singapore: "ms" (Malay), "ta" (Tamil)
6
- # Malaysia: "ms" (Malay), "ta" (Tamil)
7
- # Brunei: "ms" (Malay)
8
- # Thailand: "mnw" (Mon), "shn" (Shan), "th" (Thai)
9
- # Myanmar: "my" (Burmese), "mnw" (Mon), "shn" (Shan)
10
- # Laos: "lo" (Lao)
11
- # Vietnam: "vi" (Vietnamese)
12
- # Cambodia: "km" (Khmer)
13
- # East Timor: "tet" (Tetum)
14
- # Philippines: "bcl" (Central Bicolano), "cbk-zam" (Chavacano), "ceb" (Cebuano), "ilo" (Ilokano), "pag" (Pangasinan), "pam" (Kapampangan), "tl" (Tagalog), "war" (Waray)
15
-
16
- #params of executions
17
- folder_dir_to_save=./sea_wiki_dedup_data
18
- input_folder_to_be_dedup=./sea_wiki_raw_data
19
-
20
- drop_hard_dupl=True
21
- drop_soft_dupl=True
22
-
23
-
24
- # main executions
25
-
26
- # src: https://stackoverflow.com/a/18887210 (to list all files under a dir)
27
- shopt -s nullglob
28
- file_name_array=($input_folder_to_be_dedup/*)
29
- shopt -u nullglob # Turn off nullglob to make sure it doesn't interfere with anything later
30
- file_name_array="${file_name_array}"
31
-
32
- if [ ${#file_name_array[@]} == 0 ]; then
33
- echo "No files found under directory $input_folder_to_be_dedup" >&2
34
- fi
35
-
36
- if [ ! -d $folder_dir_to_save ];
37
- then
38
- echo "Dir $folder_dir_to_save not exists! Creating the dir..."
39
- mkdir $folder_dir_to_save
40
- fi
41
-
42
- echo "The params hard-dedup drop is set as $drop_hard_dupl"
43
- echo "The params soft-dedup drop is set as $drop_soft_dupl"
44
-
45
- for val in ${!file_name_array[@]}; do
46
- csv_path=${file_name_array[$val]}
47
-
48
- if [[ ${csv_path} != *".csv" ]]; then
49
- echo "The extracted file name isn't a CSV! Skipping! Received $csv_path"
50
- continue
51
- fi
52
-
53
- echo "Executing Dedup on iteration no "$((val+1))" of total ${#file_name_array[@]} for input data $csv_path"
54
- #see the script bcs there are more args than this command is using
55
- python dedup_raw_wiki_data.py \
56
- --raw-csv-path $csv_path \
57
- --drop-hard-dupl $drop_hard_dupl \
58
- --drop-soft-dupl $drop_soft_dupl \
59
- --save-dir-path $folder_dir_to_save
60
- echo "Done Execution"
61
- done
62
- echo "Done Dedup Process"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
extract_raw_wiki_data.py DELETED
@@ -1,73 +0,0 @@
1
- '''
2
- Script on Generating Wikipedia Data that are dumped into https://dumps.wikimedia.org/
3
- More info can be read on https://huggingface.co/datasets/wikipedia
4
- -------------------
5
- Check here to see available indexed data: https://dumps.wikimedia.org/backup-index.html
6
- Also check here to see language meta from its code: https://meta.wikimedia.org/wiki/List_of_Wikipedias
7
- '''
8
-
9
- import os, gc
10
- import logging
11
- import argparse
12
-
13
- import pandas as pd
14
- from datasets import load_dataset
15
-
16
-
17
- def set_logger():
18
- # Set up the logger
19
- logging.basicConfig(
20
- level=logging.INFO, # Set the desired logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
21
- format='%(asctime)s [%(levelname)s]: %(message)s', # Customize the log message format
22
- datefmt='%Y-%m-%d %H:%M:%S' # Customize the date/time format
23
- )
24
-
25
- # Create a file handler to write logs into a file
26
- file_handler = logging.FileHandler('app.log')
27
-
28
- # Set the log level for the file handler
29
- file_handler.setLevel(logging.INFO)
30
-
31
- # Create a formatter for the file handler (customize the log format for the file)
32
- file_formatter = logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
33
- file_handler.setFormatter(file_formatter)
34
-
35
- logger = logging.getLogger("Wiki Dataset Generation")
36
- logger.addHandler(file_handler)
37
-
38
- return logger
39
-
40
-
41
- #only executed if called directly
42
- if __name__ == "__main__":
43
- parser = argparse.ArgumentParser()
44
-
45
- parser.add_argument("--lang-id", help="Lang ID from Wikipedia Data to extract")
46
-
47
- parser.add_argument("--date-ver", help="Date of Wikipedia Data (YYYYMMDD) generation to extract")
48
-
49
- parser.add_argument("--save-dir-path", help="""Relative dir path of saved Wikipedia CSV data
50
- to the `extract_raw_wiki_data.py` script dir""",
51
- default=os.path.dirname(os.path.abspath(__file__)))
52
-
53
- args = parser.parse_args()
54
-
55
-
56
- dset_name = "wikipedia"
57
-
58
- logger = set_logger()
59
- logger.info("Parsing arguments...")
60
-
61
- lang_id = args.lang_id
62
- date_ver = args.date_ver
63
- save_dir = args.save_dir_path
64
-
65
- logger.info("Loading the dataset from Wikipedia...")
66
- df = load_dataset(dset_name, language=lang_id, date=date_ver, beam_runner='DirectRunner', split="train").to_pandas()
67
- logger.info("Loading done!")
68
- logger.info(f"#Data collected: {df.shape[0]}")
69
- logger.info("Saving dataset raw form...")
70
- df.to_csv(f"{save_dir}/wiki_{lang_id}_{date_ver}_raw_dataset.csv", index=False)
71
-
72
- del df
73
- gc.collect()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
extract_raw_wiki_data_batched.py DELETED
@@ -1,87 +0,0 @@
1
- '''
2
- Script on Generating Wikipedia Data that are dumped into https://dumps.wikimedia.org/
3
- More info can be read on https://huggingface.co/datasets/wikipedia
4
- -------------------
5
- Check here to see available indexed data: https://dumps.wikimedia.org/backup-index.html
6
- Also check here to see language meta from its code: https://meta.wikimedia.org/wiki/List_of_Wikipedias
7
- '''
8
-
9
- import os, gc
10
- import logging
11
- import argparse
12
-
13
- import pandas as pd
14
- from datasets import load_dataset
15
-
16
-
17
- def set_logger():
18
- # Set up the logger
19
- logging.basicConfig(
20
- level=logging.INFO, # Set the desired logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
21
- format='%(asctime)s [%(levelname)s]: %(message)s', # Customize the log message format
22
- datefmt='%Y-%m-%d %H:%M:%S' # Customize the date/time format
23
- )
24
-
25
- # Create a file handler to write logs into a file
26
- file_handler = logging.FileHandler('app.log')
27
-
28
- # Set the log level for the file handler
29
- file_handler.setLevel(logging.INFO)
30
-
31
- # Create a formatter for the file handler (customize the log format for the file)
32
- file_formatter = logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
33
- file_handler.setFormatter(file_formatter)
34
-
35
- logger = logging.getLogger("Wiki Dataset Generation")
36
- logger.addHandler(file_handler)
37
-
38
- return logger
39
-
40
-
41
- #only executed if called directly
42
- if __name__ == "__main__":
43
- parser = argparse.ArgumentParser()
44
-
45
- parser.add_argument("--lang-id", help="Lang ID from Wikipedia Data to extract")
46
-
47
- parser.add_argument("--date-ver", help="Date of Wikipedia Data (YYYYMMDD) generation to extract")
48
-
49
- #default: all
50
- parser.add_argument("--split-extr", help="""Split extraction config for choosing
51
- subsets of data to process. It follows python list slicing string args""",
52
- default=":")
53
-
54
- #default: all
55
- parser.add_argument("--force_rerun_split", help="""Flag to identify whether to check existing
56
- splits or forcing to re-create it""",
57
- default=False)
58
-
59
- parser.add_argument("--save-dir-path", help="""Relative dir path of saved Wikipedia CSV data
60
- to the `extract_raw_wiki_data.py` script dir""",
61
- default=os.path.dirname(os.path.abspath(__file__)))
62
-
63
- args = parser.parse_args()
64
-
65
-
66
- dset_name = "sea_loader_batched/wiki_loader.py"
67
-
68
- logger = set_logger()
69
- logger.info("Parsing arguments...")
70
-
71
- lang_id = args.lang_id
72
- date_ver = args.date_ver
73
- generated_split_extraction = args.split_extr
74
- force_rerun_split_generation = args.force_rerun_split
75
- save_dir = args.save_dir_path
76
-
77
- logger.info("Loading the dataset from Wikipedia...")
78
- df = load_dataset(dset_name, language=lang_id, date=date_ver, beam_runner='DirectRunner',
79
- split="train", subset_file_to_process=generated_split_extraction,
80
- force_rerun_split=force_rerun_split_generation).to_pandas()
81
- logger.info("Loading done!")
82
- logger.info(f"#Data collected: {df.shape[0]}")
83
- logger.info("Saving dataset raw form...")
84
- df.to_csv(f"{save_dir}/wiki_{lang_id}_{date_ver}_raw_dataset_splitted.csv", index=False)
85
-
86
- del df
87
- gc.collect()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
extract_raw_wiki_data_sea.sh DELETED
@@ -1,38 +0,0 @@
1
- #!/bin/bash
2
-
3
- # all available lang codes in SEA local-languages or linguistically-related to following countries in SEA:
4
- # Indonesia: "ace" (Acehnese), "ban" (Balinese), "bjn" (Banjarese), "bug" (Buginese), "gor" (Gorontalo), "id" (Indonesian), "jv" (Javanese), "mad" (Madurese), "map-bms" (Banyumasan, Dialect of Javanese), "min" (Minangkabau), "ms" (Malay), "nia" (Nias), "su" (Sundanese), "tet" (Tetum)
5
- # Singapore: "ms" (Malay), "ta" (Tamil)
6
- # Malaysia: "ms" (Malay), "ta" (Tamil)
7
- # Brunei: "ms" (Malay)
8
- # Thailand: "mnw" (Mon), "shn" (Shan), "th" (Thai)
9
- # Myanmar: "my" (Burmese), "mnw" (Mon), "shn" (Shan)
10
- # Laos: "lo" (Lao)
11
- # Vietnam: "vi" (Vietnamese)
12
- # Cambodia: "km" (Khmer)
13
- # East Timor: "tet" (Tetum)
14
- # Philippines: "bcl" (Central Bicolano), "cbk-zam" (Chavacano), "ceb" (Cebuano), "ilo" (Ilokano), "pag" (Pangasinan), "pam" (Kapampangan), "tl" (Tagalog), "war" (Waray)
15
-
16
- #params of executions
17
- date_ver=20231101
18
- folder_dir_to_save=./sea_wiki_raw_data
19
- lang_list=(ace ban bcl bjn bug cbk-zam ceb gor id ilo jv km lo mad map-bms min mnw ms my nia pag pam shn su tet ta th tl vi war)
20
-
21
-
22
- #main executions
23
-
24
- if [ ! -d $folder_dir_to_save ]; then
25
- echo "Dir $folder_dir_to_save not exists! Creating the dir..."
26
- mkdir $folder_dir_to_save
27
- fi
28
-
29
- for val in ${!lang_list[@]}; do
30
- lang=${lang_list[$val]}
31
- echo "Executing Extractor on iteration no $((val+1)) of total ${#lang_list[@]} for language $lang and date version of $date_ver"
32
- python extract_raw_wiki_data.py \
33
- --lang-id $lang \
34
- --date-ver $date_ver \
35
- --save-dir-path $folder_dir_to_save
36
- echo "Done Execution"
37
- done
38
- echo "Done Extraction Process"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
sea_wiki.py CHANGED
@@ -66,11 +66,24 @@ _LATEST_DUMP_VERSION_DATE = sorted(_AVAILABLE_DUMP_VERSION_DATE)[-1]
66
 
67
  def _construct_dset_url_from_dset_version_and_lang(date_ver: str, lang: str, mode: str):
68
  _mode_to_folder_mapper = {"dedup": "sea_wiki_dedup_data", "raw": "sea_wiki_raw_data"}
69
- _mode_to_file_suffix_mapper = {"dedup": "dataset_dedup_cleansed.csv", "raw": "raw_dataset.csv"}
70
 
71
  return os.path.join(_mode_to_folder_mapper[mode], f"wiki_{lang}_{date_ver}_{_mode_to_file_suffix_mapper[mode]}")
72
 
73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  class SEAWikiConfig(datasets.BuilderConfig):
75
  """BuilderConfig for SEAWiki."""
76
 
@@ -199,7 +212,7 @@ class SEAWiki(datasets.GeneratorBasedBuilder):
199
  # handle cases of config "seawiki_all", "seawiki_dedup_all", and custom config where only country is provided (take all langs in a country)
200
  if self.config.name in ("seawiki_all", "seawiki_dedup_all") or (self.config.country is not None and self.config.lang is None):
201
  file_dict = {self._get_lang_name_from_data_url(file): file for file in self.config.data_url}
202
- dl_dir = dl_manager.download_and_extract(file_dict)
203
 
204
  return [
205
  datasets.SplitGenerator(
@@ -218,7 +231,7 @@ class SEAWiki(datasets.GeneratorBasedBuilder):
218
  for file in file_list:
219
  file_dict[country + "_" + self._get_lang_name_from_data_url(file)] = file
220
 
221
- dl_dir = dl_manager.download_and_extract(file_dict)
222
 
223
  return [
224
  datasets.SplitGenerator(
@@ -231,7 +244,7 @@ class SEAWiki(datasets.GeneratorBasedBuilder):
231
 
232
  # handle custom config where only country is provided
233
  elif self.config.lang is not None:
234
- dl_dir = dl_manager.download_and_extract(self.config.data_url)
235
  return [
236
  datasets.SplitGenerator(
237
  name=datasets.Split.TRAIN,
@@ -243,7 +256,7 @@ class SEAWiki(datasets.GeneratorBasedBuilder):
243
 
244
 
245
  def _generate_examples(self, data_file):
246
- pd_df = pd.read_csv(data_file)
247
  for _, row in pd_df.iterrows():
248
  example = {feature: row[feature] for feature in self.config.features}
249
  idx = row["id"]
 
66
 
67
  def _construct_dset_url_from_dset_version_and_lang(date_ver: str, lang: str, mode: str):
68
  _mode_to_folder_mapper = {"dedup": "sea_wiki_dedup_data", "raw": "sea_wiki_raw_data"}
69
+ _mode_to_file_suffix_mapper = {"dedup": "dataset_dedup_cleansed.csv.gz", "raw": "raw_dataset.csv.gz"}
70
 
71
  return os.path.join(_mode_to_folder_mapper[mode], f"wiki_{lang}_{date_ver}_{_mode_to_file_suffix_mapper[mode]}")
72
 
73
 
74
+ def read_csv_ignore_some_nulls(path: str, null_list_data: list=None, *args, **kwargs):
75
+ '''
76
+ Wrapper of `pd.read_csv` fn that ignores some of null data
77
+ '''
78
+ #values of pd._libs.parsers.STR_NA_VALUES: {'', '<NA>', 'NaN', 'N/A', 'null', '1.#QNAN', 'None', '#NA', 'nan', '-NaN', '#N/A N/A', '-1.#QNAN', 'NA', '-1.#IND', 'n/a', 'NULL', '-nan', '1.#IND', '#N/A'}
79
+ _unconsidered_for_null_list = ['NA', 'NULL', 'null', 'nan', 'null', 'NaN', 'None', 'N/A']
80
+ if null_list_data is not None:
81
+ _unconsidered_for_null_list.extend(null_list_data)
82
+
83
+ values_to_considered_missing_data = [val for val in pd._libs.parsers.STR_NA_VALUES if val not in _unconsidered_for_null_list]
84
+ return pd.read_csv(path, keep_default_na=False, na_values=values_to_considered_missing_data, *args, **kwargs)
85
+
86
+
87
  class SEAWikiConfig(datasets.BuilderConfig):
88
  """BuilderConfig for SEAWiki."""
89
 
 
212
  # handle cases of config "seawiki_all", "seawiki_dedup_all", and custom config where only country is provided (take all langs in a country)
213
  if self.config.name in ("seawiki_all", "seawiki_dedup_all") or (self.config.country is not None and self.config.lang is None):
214
  file_dict = {self._get_lang_name_from_data_url(file): file for file in self.config.data_url}
215
+ dl_dir = dl_manager.download(file_dict)
216
 
217
  return [
218
  datasets.SplitGenerator(
 
231
  for file in file_list:
232
  file_dict[country + "_" + self._get_lang_name_from_data_url(file)] = file
233
 
234
+ dl_dir = dl_manager.download(file_dict)
235
 
236
  return [
237
  datasets.SplitGenerator(
 
244
 
245
  # handle custom config where only country is provided
246
  elif self.config.lang is not None:
247
+ dl_dir = dl_manager.download(self.config.data_url)
248
  return [
249
  datasets.SplitGenerator(
250
  name=datasets.Split.TRAIN,
 
256
 
257
 
258
  def _generate_examples(self, data_file):
259
+ pd_df = read_csv_ignore_some_nulls(data_file, compression='gzip')
260
  for _, row in pd_df.iterrows():
261
  example = {feature: row[feature] for feature in self.config.features}
262
  idx = row["id"]
sea_wiki_dedup_data/{wiki_pag_20231101_dataset_dedup_cleansed.csv → wiki_ace_20231101_dataset_dedup_cleansed.csv.gz} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2bc9b5c45ddbf2288944abf5bc1ffecb0c3ba28fdd28db335031cf6ec52c4f6b
3
- size 764394
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfaba4f6590aa0259c3d4d6086768fd74fdfadc687bb0f674f9e3a0aee365b83
3
+ size 813603
sea_wiki_dedup_data/{wiki_ace_20231101_dataset_dedup_cleansed.csv → wiki_ban_20231101_dataset_dedup_cleansed.csv.gz} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f52ce2ae7c8d340a30014bd036bc0806f5782f5a0856f80ffe3bf80f71b33152
3
- size 4938934
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5c32ae0977f94f6c3d7d99516d30909eec686383ddffa71d599255c4f39184d
3
+ size 3957396
sea_wiki_dedup_data/{wiki_bug_20231101_dataset_dedup_cleansed.csv → wiki_bcl_20231101_dataset_dedup_cleansed.csv.gz} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cc2a1d85b4306eb2d0cf74db4ca2d997b0ea20e2f4016167aeb66394ac5b9b59
3
- size 2172844
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b125d1585cbf0cb1fda5c4124de07d3bf2e7752943108f3bbba56905f8cf456
3
+ size 6967103
sea_wiki_dedup_data/{wiki_cbk-zam_20231101_dataset_dedup_cleansed.csv → wiki_bjn_20231101_dataset_dedup_cleansed.csv.gz} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e089c0accfbb22e26567995e4341b4f5c0756ef3598c5966a624922af77ce9f0
3
- size 1578089
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:baa5ed89e351ce165e04214c4512b9e2264a352a154ec11fcebac6d78b91c8af
3
+ size 1980505
sea_wiki_dedup_data/{wiki_bjn_20231101_dataset_dedup_cleansed.csv → wiki_bug_20231101_dataset_dedup_cleansed.csv.gz} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0a0914f929502fa632e764dadeddf49969b45ccf35d9933743bd4e9e2b16ea0f
3
- size 6791315
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38f22a07979c260b77ba2a6bdd84b2402c68a197520bbeaabdcf1e930304feee
3
+ size 243060
sea_wiki_dedup_data/wiki_cbk-zam_20231101_dataset_dedup_cleansed.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6767c2c1800f961b5a218082554c18e0531ac4897c795dc089ef192fe1e41af5
3
+ size 461691
sea_wiki_dedup_data/wiki_ceb_20231101_dataset_dedup_cleansed.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:23c5d20ac09b5777b50062b6113eb6ab3cf51d03f68a506319913ce56b4f338e
3
- size 4354355472
 
 
 
 
sea_wiki_dedup_data/wiki_ceb_20231101_dataset_dedup_cleansed.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d30269963df1ad3e5a228cd5ca86e5e00db152e94709e1589d578b10fc5403bd
3
+ size 344090450
sea_wiki_dedup_data/wiki_gor_20231101_dataset_dedup_cleansed.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:0607ee607dd6c00c5eb6729efc0c38a21ea9e10da39005e34373959e179e7707
3
- size 6222508
 
 
 
 
sea_wiki_dedup_data/wiki_gor_20231101_dataset_dedup_cleansed.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6327f3ce7e3ba5b049f161a28747aa892dbe93b6c1293829ec3a591b4f29d77d
3
+ size 1060119
sea_wiki_dedup_data/wiki_id_20231101_dataset_dedup_cleansed.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d5fa79da4fe764d1ddafb3d025123be9542ac246c4956b64421ea464d50f33a
3
+ size 362225834
sea_wiki_dedup_data/wiki_ilo_20231101_dataset_dedup_cleansed.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f95c04563bc1a824d29d6701e664b8b86535c8a148a3a453949441661e7bfe99
3
- size 16778313
 
 
 
 
sea_wiki_dedup_data/wiki_ilo_20231101_dataset_dedup_cleansed.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfdbac9788c4b44479c7481a5738b563447545a310f5bf4a63cd6f88a1209cea
3
+ size 4301462
sea_wiki_dedup_data/wiki_jv_20231101_dataset_dedup_cleansed.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8f36dba2f186c2a710cac07918a218ca74006d8940fc8fd1955f4122725efd43
3
- size 72052487
 
 
 
 
sea_wiki_dedup_data/{wiki_ban_20231101_dataset_dedup_cleansed.csv → wiki_jv_20231101_dataset_dedup_cleansed.csv.gz} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:325006efe32de9ee07f718cb187094a358e9a50577a4945470471c798bfa4d2b
3
- size 18034158
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e22f661da1235a865ac0af2b725e4d23d28bd86c8b8cf796191aee653214ecd1
3
+ size 22618188
sea_wiki_dedup_data/wiki_km_20231101_dataset_dedup_cleansed.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e0f043cd2dddb22a3076abe0af5805b6b8130678aa6650bbdb158f91cb6e1b30
3
- size 102709279
 
 
 
 
sea_wiki_dedup_data/{wiki_bcl_20231101_dataset_dedup_cleansed.csv → wiki_km_20231101_dataset_dedup_cleansed.csv.gz} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:31578e60c6811f9649ffe5250da492faf427c057183b1361d9aebf7e9150b93e
3
- size 19988184
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08121b434e6958fa44d24385f63479b38247972dcb10907d611f815735ba7315
3
+ size 18408737
sea_wiki_dedup_data/wiki_lo_20231101_dataset_dedup_cleansed.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:47d4132398f9002803a88454ad4318a9f2f41c11281a9f085c23255810beccd6
3
- size 14905688
 
 
 
 
sea_wiki_dedup_data/wiki_lo_20231101_dataset_dedup_cleansed.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3b7bc8f254c5a29984f20063359317074b150bae99934e88265897bdee5650f
3
+ size 2991380
sea_wiki_dedup_data/wiki_mad_20231101_dataset_dedup_cleansed.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:517777f9ec0da20ce5ccf1f70cca6d2a8192452745afefc6288ff360dad4ee7c
3
- size 1610155
 
 
 
 
sea_wiki_dedup_data/wiki_mad_20231101_dataset_dedup_cleansed.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:526d47b7b3b1d06cd9f181ab5235e469e3dd383d872e5ec7bf29b97819099c53
3
+ size 565391
sea_wiki_dedup_data/wiki_map-bms_20231101_dataset_dedup_cleansed.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:49bee130293507159198f83422c654d5e0000e6e20d27ed5f5afeb378a663967
3
- size 5076335
 
 
 
 
sea_wiki_dedup_data/wiki_map-bms_20231101_dataset_dedup_cleansed.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b45121f0eb9a2fbe5683c1d7b6f1ea0cb89df5b9828f1fe61617bc054347d65
3
+ size 1338732
sea_wiki_dedup_data/wiki_min_20231101_dataset_dedup_cleansed.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6debf5f9204262a3bcdcb37966873f877158882c435a3b5cae55a01ad1418a3f
3
- size 116663617
 
 
 
 
sea_wiki_dedup_data/wiki_min_20231101_dataset_dedup_cleansed.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d5422b6f9971693e4dfee585b600870b0c1f6828ddff3b7b6748bb23745a1e5
3
+ size 11098162
sea_wiki_dedup_data/wiki_mnw_20231101_dataset_dedup_cleansed.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:daf4ec4e087fcf46c2ebf71a3db8e11b8bdbfbbe0eacd9e5efc6d9f9c5b6b6d2
3
- size 47243726
 
 
 
 
sea_wiki_dedup_data/wiki_mnw_20231101_dataset_dedup_cleansed.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0dbe2abe6a89b29195578ee29663c6adf4cad38bd4bc908df504f1b8cb44b21d
3
+ size 7030477
sea_wiki_dedup_data/wiki_ms_20231101_dataset_dedup_cleansed.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c1aca444ed1a161d30069d3106f604597cc9e3e48267b223d2ef3cf7b52fa7c
3
- size 415339805
 
 
 
 
sea_wiki_dedup_data/wiki_ms_20231101_dataset_dedup_cleansed.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9d6418149b867aaed13bca953c99f3759cdc12a72f8944952f037a52167d281
3
+ size 128679448
sea_wiki_dedup_data/wiki_my_20231101_dataset_dedup_cleansed.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ffd9ad7c34a340d4ae62820dccede45d515dc69b145340f4dc5485f01d83745f
3
- size 312976625
 
 
 
 
sea_wiki_dedup_data/wiki_my_20231101_dataset_dedup_cleansed.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28e722b249c16f453a989431ad2c46d12501b5ca561b598eda9f0d95868e0017
3
+ size 42231705
sea_wiki_dedup_data/wiki_nia_20231101_dataset_dedup_cleansed.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3895f612ef2366104e669eac8acd5ec13646ab4dd388a9372422bbcdfbbe45d6
3
- size 2151317
 
 
 
 
sea_wiki_dedup_data/wiki_nia_20231101_dataset_dedup_cleansed.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb463394b65b36ea36e9f4861edbe78905aed275d3ad8d32dbd66c0f69bfcbd4
3
+ size 677839
sea_wiki_dedup_data/wiki_pag_20231101_dataset_dedup_cleansed.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d36c1ad39a3466be6740d1a9de2ea47c58af4efccab637e3cc9a192f25ab9abe
3
+ size 234098
sea_wiki_dedup_data/wiki_pam_20231101_dataset_dedup_cleansed.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d4600cf6211b6811a46d6099c411b95ca3125c8bc89f5a86855c43205a1520e0
3
- size 8201263
 
 
 
 
sea_wiki_dedup_data/wiki_pam_20231101_dataset_dedup_cleansed.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b25cf56f6260b4511b42b556860411297a4e32e22f22476ce53a96c072df206
3
+ size 2607510
sea_wiki_dedup_data/wiki_shn_20231101_dataset_dedup_cleansed.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:05d1589795708541a95366f18528a47c5ac95f4e287291a19d5589f03183cf8f
3
- size 33599756
 
 
 
 
sea_wiki_dedup_data/wiki_shn_20231101_dataset_dedup_cleansed.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca1893fa99fda0b50802e7af6c7662f963c883cc7d6af4bf2f704f4e9682aaa1
3
+ size 3687592
sea_wiki_dedup_data/wiki_su_20231101_dataset_dedup_cleansed.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:32b758f801e232d271882a619b7107237b15964dc467b16a5867493cfdb1b655
3
- size 47525184
 
 
 
 
sea_wiki_dedup_data/wiki_su_20231101_dataset_dedup_cleansed.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8890bc3a05d83ad0a7062edde9317fa1040ad12fff859c1879124c1fbdf28a9d
3
+ size 11654891
sea_wiki_dedup_data/wiki_ta_20231101_dataset_dedup_cleansed.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:15fc78ac817f48d7a1dcc64c79899b5b72298fd0b0ab36d20bc563723893050c
3
- size 809226177