sabilmakbar commited on
Commit
7725656
1 Parent(s): 3f7dd84

- Init commit (15a95e7ccfd3c7d5daf755e12db8d1fcb865b030)
- Syncing the docs and the codes (7d396d138f4e531364402280ff17c5bba020abe1)
- Update markdown to activate table (48bfa3a9b49e3ad646d0473b225f9bd7f52f536c)
- Update markdown to activate table (607e083038d0e45e4a5b3591a480dee17c93c8ec)

Files changed (34) hide show
  1. .gitattributes +2 -0
  2. README.md +234 -0
  3. cleanse_wiki_data.py +246 -0
  4. dedup_raw_wiki_data_indo.sh +56 -0
  5. extract_raw_wiki_data.py +68 -0
  6. extract_raw_wiki_data_indo.sh +29 -0
  7. indo_wiki.py +181 -0
  8. indo_wiki_dedup_data/wiki_ace_20230901_dataset_soft_hard_cleansed.csv +3 -0
  9. indo_wiki_dedup_data/wiki_ban_20230901_dataset_soft_hard_cleansed.csv +3 -0
  10. indo_wiki_dedup_data/wiki_bjn_20230901_dataset_soft_hard_cleansed.csv +3 -0
  11. indo_wiki_dedup_data/wiki_bug_20230901_dataset_soft_hard_cleansed.csv +3 -0
  12. indo_wiki_dedup_data/wiki_gor_20230901_dataset_soft_hard_cleansed.csv +3 -0
  13. indo_wiki_dedup_data/wiki_id_20230901_dataset_soft_hard_cleansed.csv +3 -0
  14. indo_wiki_dedup_data/wiki_jv_20230901_dataset_soft_hard_cleansed.csv +3 -0
  15. indo_wiki_dedup_data/wiki_map-bms_20230901_dataset_soft_hard_cleansed.csv +3 -0
  16. indo_wiki_dedup_data/wiki_min_20230901_dataset_soft_hard_cleansed.csv +3 -0
  17. indo_wiki_dedup_data/wiki_ms_20230901_dataset_soft_hard_cleansed.csv +3 -0
  18. indo_wiki_dedup_data/wiki_nia_20230901_dataset_soft_hard_cleansed.csv +3 -0
  19. indo_wiki_dedup_data/wiki_su_20230901_dataset_soft_hard_cleansed.csv +3 -0
  20. indo_wiki_dedup_data/wiki_tet_20230901_dataset_soft_hard_cleansed.csv +3 -0
  21. indo_wiki_raw_data/wiki_ace_20230901_raw_dataset.csv +3 -0
  22. indo_wiki_raw_data/wiki_ban_20230901_raw_dataset.csv +3 -0
  23. indo_wiki_raw_data/wiki_bjn_20230901_raw_dataset.csv +3 -0
  24. indo_wiki_raw_data/wiki_bug_20230901_raw_dataset.csv +3 -0
  25. indo_wiki_raw_data/wiki_gor_20230901_raw_dataset.csv +3 -0
  26. indo_wiki_raw_data/wiki_id_20230901_raw_dataset.csv +3 -0
  27. indo_wiki_raw_data/wiki_jv_20230901_raw_dataset.csv +3 -0
  28. indo_wiki_raw_data/wiki_map-bms_20230901_raw_dataset.csv +3 -0
  29. indo_wiki_raw_data/wiki_min_20230901_raw_dataset.csv +3 -0
  30. indo_wiki_raw_data/wiki_ms_20230901_raw_dataset.csv +3 -0
  31. indo_wiki_raw_data/wiki_nia_20230901_raw_dataset.csv +3 -0
  32. indo_wiki_raw_data/wiki_su_20230901_raw_dataset.csv +3 -0
  33. indo_wiki_raw_data/wiki_tet_20230901_raw_dataset.csv +3 -0
  34. requirements.txt +6 -0
.gitattributes CHANGED
@@ -53,3 +53,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ # Wiki data files CSV
57
+ *.csv filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,237 @@
1
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  license: cc-by-sa-3.0
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ annotations_creators:
3
+ - no-annotation
4
+ language_creators:
5
+ - crowdsourced
6
+ language:
7
+ - ace
8
+ - ban
9
+ - bjn
10
+ - bug
11
+ - gor
12
+ - id
13
+ - jv
14
+ - mis
15
+ - min
16
+ - ms
17
+ - nia
18
+ - su
19
+ - tet
20
+ license:
21
+ - cc-by-sa-3.0
22
+ - gfdl
23
+ multilinguality:
24
+ - multilingual
25
+ source_datasets:
26
+ - Wikipedia-HF
27
+ task_categories:
28
+ - text-generation
29
+ - fill-mask
30
+ task_ids:
31
+ - language-modeling
32
+ - masked-language-modeling
33
+ pretty_name: Wikipedia Archive for Indonesian Languages & Local Languages
34
+ tags:
35
+ - Wikipedia
36
+ - Indonesian
37
+ - Sundanese
38
+ - Javanese
39
+ - Malay
40
+ - Dialect
41
+ - Javanese Dialect (Banyumase/Ngapak)
42
+ - Indonesian Language
43
+ - Malay Language
44
+ - Indonesia-related Languages
45
+ - Indonesian Local Languages
46
+ dataset_info:
47
+ - config_name: indowiki_all
48
+ features:
49
+ - name: url
50
+ dtype: string
51
+ - name: title
52
+ dtype: string
53
+ - name: text
54
+ dtype: string
55
+ splits:
56
+ - name: ace
57
+ num_bytes: 4875688
58
+ num_examples: 12932
59
+ - name: ban
60
+ num_bytes: 17561379
61
+ num_examples: 20243
62
+ - name: bjn
63
+ num_bytes: 6669628
64
+ num_examples: 10460
65
+ - name: bug
66
+ num_bytes: 3297641
67
+ num_examples: 15877
68
+ - name: gor
69
+ num_bytes: 6007726
70
+ num_examples: 14572
71
+ - name: id
72
+ num_bytes: 1103106307
73
+ num_examples: 657990
74
+ - name: jv
75
+ num_bytes: 70335030
76
+ num_examples: 73150
77
+ - name: map_bms
78
+ num_bytes: 5215803
79
+ num_examples: 13574
80
+ - name: min
81
+ num_bytes: 116481049
82
+ num_examples: 227024
83
+ - name: ms
84
+ num_bytes: 416001194
85
+ num_examples: 367463
86
+ - name: nia
87
+ num_bytes: 1938378
88
+ num_examples: 1651
89
+ - name: su
90
+ num_bytes: 47489084
91
+ num_examples: 61557
92
+ - name: tet
93
+ num_bytes: 1452716
94
+ num_examples: 1465
95
+ download_size: 1803193334
96
+ dataset_size: 1800431623
97
+ - config_name: indowiki_dedup_all
98
+ features:
99
+ - name: url
100
+ dtype: string
101
+ - name: title
102
+ dtype: string
103
+ - name: text
104
+ dtype: string
105
+ splits:
106
+ - name: ace
107
+ num_bytes: 4867838
108
+ num_examples: 12904
109
+ - name: ban
110
+ num_bytes: 17366080
111
+ num_examples: 19837
112
+ - name: bjn
113
+ num_bytes: 6655378
114
+ num_examples: 10437
115
+ - name: bug
116
+ num_bytes: 2072609
117
+ num_examples: 9793
118
+ - name: gor
119
+ num_bytes: 5989252
120
+ num_examples: 14514
121
+ - name: id
122
+ num_bytes: 1100932403
123
+ num_examples: 654287
124
+ - name: jv
125
+ num_bytes: 69774853
126
+ num_examples: 72667
127
+ - name: map_bms
128
+ num_bytes: 5060989
129
+ num_examples: 11832
130
+ - name: min
131
+ num_bytes: 116376870
132
+ num_examples: 225858
133
+ - name: ms
134
+ num_bytes: 410443550
135
+ num_examples: 346186
136
+ - name: nia
137
+ num_bytes: 1938121
138
+ num_examples: 1650
139
+ - name: su
140
+ num_bytes: 47410439
141
+ num_examples: 61494
142
+ - name: tet
143
+ num_bytes: 1447926
144
+ num_examples: 1460
145
+ download_size: 1793103024
146
+ dataset_size: 1790336308
147
+ - config_name: indowiki_dedup_id_only
148
+ features:
149
+ - name: url
150
+ dtype: string
151
+ - name: title
152
+ dtype: string
153
+ - name: text
154
+ dtype: string
155
+ splits:
156
+ - name: train
157
+ num_bytes: 1100932403
158
+ num_examples: 654287
159
+ download_size: 1103131493
160
+ dataset_size: 1100932403
161
+ ---
162
+
163
+ # **Indonesian Wikipedia Data Repository**
164
+ ---
165
  license: cc-by-sa-3.0
166
  ---
167
+ Welcome to Indonesian Wikipedia Data Repository. The datasets are extracted from [Wikipedia HF](https://huggingface.co/datasets/wikipedia) and processed using the scripts available in this repository for reproducibility purpose.
168
+
169
+ # **FAQS**
170
+ ### What are the available languages provided in dataset?
171
+ Please check the following table.
172
+
173
+ | Lang Code | Lang Desc | Wiki Info | Total Data | Total Size (bytes) |
174
+ | :---: | :----: | :--- | ---: | ---: |
175
+ | ace | Acehnese | [Wiki Link](https://en.wikipedia.org/wiki/Acehnese_language) | 12904 | 4867838 |
176
+ | ban | Balinese | [Wiki Link](https://en.wikipedia.org/wiki/Balinese_language) | 19837 | 17366080 |
177
+ | bjn | Acehnese | [Wiki Link](https://en.wikipedia.org/wiki/Banjarese_language) | 10437 | 6655378 |
178
+ | bug | Buginese | [Wiki Link](https://en.wikipedia.org/wiki/Buginese_language) | 9793 | 2072609 |
179
+ | gor | Gorontalo | [Wiki Link](https://en.wikipedia.org/wiki/Gorontalo_language) | 14514 | 5989252 |
180
+ | id | Indonesian | [Wiki Link](https://en.wikipedia.org/wiki/Indonesian_language) | 654287 | 1100932403 |
181
+ | jv | Javanese | [Wiki Link](https://en.wikipedia.org/wiki/Javanese_language) | 72667 | 69774853 |
182
+ | map_bms | Banyumasan <br />(Dialect of Javanese) | [Wiki Link](https://en.wikipedia.org/wiki/Banyumasan_dialect) | 11832 | 5060989 |
183
+ | min | Minangkabau | [Wiki Link](https://en.wikipedia.org/wiki/Minangkabau_language) | 225858 | 116376870 |
184
+ | ms | Malay | [Wiki Link](https://en.wikipedia.org/wiki/Malay_language) | 346186 | 410443550 |
185
+ | nia | Nias | [Wiki Link](https://en.wikipedia.org/wiki/Nias_language) | 1650 | 1938121 |
186
+ | su | Sundanese | [Wiki Link](https://en.wikipedia.org/wiki/Sundanese_language) | 61494 | 47410439 |
187
+ | tet | Tetum | [Wiki Link](https://en.wikipedia.org/wiki/Tetum_language) | 1465 | 1452716 |
188
+
189
+
190
+ ### How do I extract new Wikipedia Dataset of Indonesian languages?
191
+ You may check to the script [_```extract_raw_wiki_data.py```_](https://huggingface.co/datasets/sabilmakbar/indo_wiki/blob/main/extract_raw_wiki_data.py) to understand its implementations, or you can adjust the bash provided in [_```extract_raw_wiki_data_indo.sh```_](https://huggingface.co/datasets/sabilmakbar/indo_wiki/blob/main/extract_raw_wiki_data_indo.sh) to extract it on your own. Please note that this dataset is extensible to any languages of your choice.
192
+
193
+ ### How do I extract new Wikipedia Dataset of Indonesian languages?
194
+ You may visit this [Wikipedia Dump Index](https://dumps.wikimedia.org/backup-index.html) to check any latest available data and this link [Wikipedia Language Coverage](https://meta.wikimedia.org/wiki/List_of_Wikipedias#All_Wikipedias_ordered_by_number_of_articles) to map into any languages that you're wanting to extract.
195
+
196
+ ### How does the data being preprocessed? What makes it different from loading it directly from Wikipedia HF?
197
+ The data available in here are processed with following flows:
198
+ 1. Raw data is being deduplicated on ```title``` and ```text``` (text-content from a given article), to remove articles containing boilerplate text (template text that are used usually for no-available informations or asking for contributions of content in that article), which usually deemed noisy for NLP data.
199
+ 2. Furthermore, the ```title``` and ```text``` data are being checked for string-matching duplication (duplication of text that are being pre-processed, i.e symbols removed, HTML tags striped, or ASCII-chars validated). You may check this [ ```cleanse_wiki_data.py```](https://huggingface.co/datasets/sabilmakbar/indo_wiki/blob/main/cleanse_wiki_data.py) script to understand its implementation.
200
+
201
+ # Getting Started #
202
+ ### To read the datasets directly ###
203
+ Use one of the following code chunks to load it from HuggingFace Hub:
204
+ You can refer to the 2nd args of ```config name``` using the following script
205
+ ```
206
+ dataset = load_dataset(
207
+ "sabilmakbar/indo_wiki",
208
+ "indo_wiki_dedup_data" # a config name, can be "indo_wiki_raw_data" or "indowiki_dedup_id_only", defaults to "indo_wiki_dedup_data"
209
+ )
210
+ ```
211
+ Or you can provide both ```lang``` and ```date_stamp``` (providing only one will thrown an error)
212
+ ```
213
+ dataset = load_dataset(
214
+ "sabilmakbar/indo_wiki",
215
+ lang = "id", # see the splits for complete lang choices
216
+ date_stamp="20230901"
217
+ )
218
+ ```
219
+
220
+ ### To replicate the whole dataset generation process ###
221
+ 1. Set-up a new Python/Conda Environment (recommended Python version: 3.9.6 to 3.9.18 or 3.10.0 to 3.10.13) and install the requirements on ```requirements.txt``` use this codebase via ```pip install -r requirements.txt```.
222
+ 2. Activate the chosen Python/Conda environment which the requirements are being installed.
223
+ 3. Run this ```sh``` script for extractions from Wikimedia Dump:
224
+ ```sh extract_raw_wiki_data_indo.sh```.
225
+ 4. Run this ```sh``` script of deduplication:
226
+ ```sh dedup_raw_wiki_data_indo.sh```.
227
+
228
+ ## Citation Info:
229
+ ```
230
+ @ONLINE{wikidump,
231
+ author = "Wikimedia Foundation",
232
+ title = "Wikimedia Downloads",
233
+ url = "https://dumps.wikimedia.org"}
234
+ @ONLINE{wikipedia-hf,
235
+ title = "Huggingface Wikipedia Dataset",
236
+ url = "https://huggingface.co/datasets/wikipedia"}
237
+ ```
cleanse_wiki_data.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Script on Cleansing Wikipedia Data that has been extracted from extract_raw_wiki_data.py
3
+ '''
4
+ #core functionality modules
5
+ import os, gc
6
+ import logging
7
+ import argparse
8
+ import warnings
9
+
10
+ from functools import partial
11
+
12
+ #text preprocess modules
13
+ import re
14
+ import urllib
15
+ from xml.etree import ElementTree as ET
16
+
17
+ #dataset related modules
18
+ import numpy as np
19
+ import pandas as pd
20
+
21
+
22
+ ### MODULES DEFINITION ###
23
+ #create custom type-checking of incoming ArgParse
24
+ def argparse_bool_check(value: str):
25
+ #cast str with value like float into actual float
26
+ try:
27
+ value = float(value)
28
+ #can't be parsed as float, keep as it is
29
+ except ValueError:
30
+ pass
31
+
32
+ #cast float-like value (incl int) into str
33
+ if isinstance(value, float) and int(value) == value:
34
+ value = str(int(value))
35
+ #raise ArgumentTypeError if the value isn't in string already
36
+ else:
37
+ if not isinstance(value, str):
38
+ raise argparse.ArgumentTypeError(f"Not the correct value (args: {value})! Expected is cast-able to '1' or '0' or already in string. Please rectify!")
39
+ #check for these combinations of values
40
+ if value.lower() in ("yes", "true", "t", "y", "1"):
41
+ return True
42
+ elif value.lower() in ("no", "false", "f", "n", "0"):
43
+ return False
44
+ else:
45
+ raise argparse.ArgumentTypeError(f"Value Error! Not the correct value (args: {value})! Please rectify!")
46
+
47
+
48
+ def set_logger():
49
+ # Set up the logger
50
+ logging.basicConfig(
51
+ level=logging.INFO, # Set the desired logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
52
+ format='%(asctime)s [%(levelname)s]: %(message)s', # Customize the log message format
53
+ datefmt='%Y-%m-%d %H:%M:%S' # Customize the date/time format
54
+ )
55
+
56
+ # Create a file handler to write logs into a file
57
+ file_handler = logging.FileHandler('app.log')
58
+
59
+ # Set the log level for the file handler
60
+ file_handler.setLevel(logging.INFO)
61
+
62
+ # Create a formatter for the file handler (customize the log format for the file)
63
+ file_formatter = logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
64
+ file_handler.setFormatter(file_formatter)
65
+
66
+ logger = logging.getLogger("Wiki Dataset Generation")
67
+ logger.addHandler(file_handler)
68
+
69
+ return logger
70
+
71
+
72
+ #wrapper fn of text-cleansing
73
+ def text_cleansing_wrapper(fn, exception_class_names = []):
74
+
75
+ #ensure caught exception class names passed to decorator is a list (if provided)
76
+ if not isinstance(exception_class_names, list):
77
+ raise TypeError("Exception Class Name for Wrapper is not a list!")
78
+ #ensure all values of caught exception class name list is a string
79
+ if not all([isinstance(val, str) for val in exception_class_names]):
80
+ raise ValueError("Found an element of Exception Class Name for Wrapper that is not a string!")
81
+
82
+ #lowercase all exception class name
83
+ exception_class_names = [val.lower() for val in exception_class_names]
84
+ if len(exception_class_names) == 0:
85
+ warnings.warn("The wrapper receives 0 `exception_class_names` to be warned! Will return the function value with its input!")
86
+
87
+ def text_fn_wrapper(text: str):
88
+ try:
89
+ return fn(text)
90
+ except Exception as e:
91
+ _exc_name = type(e).__name__
92
+ if _exc_name.lower() not in exception_class_names and len(exception_class_names)>0:
93
+ raise Exception(f"Exception Occured of {_exc_name}!") from e
94
+ else:
95
+ _followup_msg = "Returning the input as it is..."
96
+ _text_warn = f"An exception of {_exc_name} occured! {_followup_msg}"
97
+ warnings.warn(_text_warn)
98
+ return text
99
+
100
+ return text_fn_wrapper
101
+
102
+
103
+ #create html tags cleanser of a given text
104
+ partial_decorator = partial(text_cleansing_wrapper, exception_class_names=["parseerror"])
105
+ @partial_decorator
106
+ def remove_html_tags(text: str):
107
+ #extracted from "https://stackoverflow.com/a/9662410", w/ additional decorator of error handler
108
+ return ''.join(ET.fromstring(text).itertext())
109
+
110
+
111
+ #create non-ascii removal of text
112
+ @text_cleansing_wrapper
113
+ def decode_url_and_remove_non_ascii(text: str):
114
+ # return (urllib.parse.unquote(text)).encode('utf8', errors='ignore').decode().strip()
115
+ return (urllib.parse.unquote(text)).encode('ascii', errors='ignore').decode().strip()
116
+
117
+ #create excessive whitespace removal of text
118
+ @text_cleansing_wrapper
119
+ def remove_excessive_whitespace(text: str):
120
+ return re.sub("(\s)(\s+)", "\1", text).strip()
121
+
122
+ #create non-alphanumeric removal of text
123
+ @text_cleansing_wrapper
124
+ def remove_non_alphanumeric(text: str):
125
+ return re.sub("[^a-z0-9\s]", "", text, flags=re.I).strip()
126
+
127
+ def cleanse_wiki_text(text: str):
128
+ return remove_html_tags(decode_url_and_remove_non_ascii(text))
129
+
130
+ def normalize_wiki_title(text: str):
131
+ return remove_non_alphanumeric(remove_excessive_whitespace(text.lower()))
132
+
133
+
134
+ ### MAIN CODE ###
135
+ if __name__ == "__main__":
136
+ parser = argparse.ArgumentParser()
137
+
138
+ parser.add_argument("--raw-csv-path", help="Relative location of csv file containing raw Wikipedia data")
139
+ parser.add_argument("--drop-hard-dupl", help="""Flag whether to drop hard duplicates
140
+ (exact values of data of relevant text fields, Titles & Desc)""",
141
+ default=True, type=argparse_bool_check)
142
+ parser.add_argument("--drop-soft-dupl", help="""Flag whether to drop soft duplicates
143
+ (duplicates after cleansed and normalized relevant text fields, Titles & Desc)""",
144
+ default=True, type=argparse_bool_check)
145
+ parser.add_argument("--save-dir-path", help="""Relative dir path of saved Wikipedia CSV data
146
+ to the `cleanse_wiki_data.py` script dir""",
147
+ default=os.path.dirname(os.path.abspath(__file__)))
148
+
149
+ args = parser.parse_args()
150
+
151
+
152
+ expected_colnames = ["id", "url", "title", "text"]
153
+
154
+ logger = set_logger()
155
+ logger.info("Parsing arguments...")
156
+
157
+ raw_data_path = args.raw_csv_path
158
+ drop_hard_dupl = args.drop_hard_dupl
159
+ drop_soft_dupl = args.drop_soft_dupl
160
+ save_dir = args.save_dir_path
161
+
162
+ df = pd.read_csv(raw_data_path)
163
+ if len(set(df.columns).difference(set(expected_colnames))) != 0 or len(set(expected_colnames).difference(set(df.columns))) != 0:
164
+ raise ValueError(f"The data schema expected, consist of columns: {', '.join(df.columns.to_list())} doesn't match with expected column values of {', '.join(expected_colnames)}!")
165
+
166
+ if (not drop_hard_dupl) and (not drop_soft_dupl):
167
+ raise AssertionError("The script won't run with both `drop-hard-dupl` and `drop-soft-dupl` args turned off!")
168
+ elif (not drop_hard_dupl):
169
+ warnings.warn("The args of `drop_hard_dupl` isn't turned off! Possibly the data will contain one template value of Wikipedia (usually no contribution text!)")
170
+
171
+ #will save id identifier colname first (popping first list val)
172
+ id_colname = expected_colnames.pop(0)
173
+
174
+ # if any of the data has duplicate values from columns checked (url, title, or text),
175
+ # it means the data integrity is questionable
176
+ # i.e. copied from other article or filled with template text
177
+ # hence, we will delete those duplicated datasets
178
+
179
+ suffix_file = "_dedup_cleansed"
180
+ #hard duplicate drop (drop all duplicate values that has exact same text on expected unique colnames)
181
+ if drop_hard_dupl:
182
+
183
+ for colname in expected_colnames:
184
+ logger.info(f"Checking data integrity on column {colname} on removing hard-duplicate(s)...")
185
+ dupl_text_df = df[df.duplicated(subset=colname,keep=False)]
186
+ shape_of_dupl_data = dupl_text_df.shape[0]
187
+
188
+ if shape_of_dupl_data > 0:
189
+ logger.info(f"Found {shape_of_dupl_data} data duplicated! Will be dropped")
190
+ df.drop_duplicates(subset=colname, keep=False, inplace=True)
191
+
192
+
193
+ #check id/idx of the cleansed data, whether it has duplicate
194
+ # (the duplication of id/idx should came from the very first extraction, not from the cleansing)
195
+
196
+ if df[df.duplicated(subset=id_colname,keep=False)].shape[0] > 0:
197
+ logger.info("Duplicated ID found! Re-assigning ID to the new ones based on `df.reset_index` method!")
198
+ df[id_colname] = df.reset_index().index
199
+
200
+ #soft duplicate drop (drop all except one duplicate values that has exact same text on expected unique colnames)
201
+ #keep the data that has longest value of its raw form
202
+ if drop_soft_dupl:
203
+
204
+ idx_to_keep = set(df.index.to_list())
205
+ #clean from text & title only, url isn't needed for this process
206
+ expected_colnames.remove("url")
207
+
208
+ for colname in expected_colnames:
209
+ logger.info(f"Checking data integrity on column {colname} on removing soft-duplicate(s)...")
210
+ _df = df.copy(deep=True)
211
+
212
+ #define text processing fn for soft-duplicate cleansing
213
+ #text processing for all colums (text & title)
214
+ _df = _df[[colname]]
215
+ _df[colname] = _df[colname].astype("str")
216
+ logger.info(f"Cleansing the data based on {colname}")
217
+ _df[colname+"_raw_len"] = _df[colname].apply(len)
218
+ _df[colname+"_cleansed"] = _df[colname].apply(cleanse_wiki_text)
219
+ if colname == "title":
220
+ # title text has been cleansed by `cleanse_wiki_text` fn, but needs to normalized on
221
+ # whitespaces, non alphanum syms (incl. punctuations), and case (all lowercased)
222
+ _df[colname+"_cleansed"] = _df[colname+"_cleansed"].apply(normalize_wiki_title)
223
+
224
+ #choose the data to keep by "ranking" it according to len of its raw text (greatest to keep)
225
+ logger.info(f"Ranking and grouping the data based on {colname}")
226
+ _df["rk"] = _df.groupby(colname+"_cleansed")[colname+"_raw_len"].rank(method="min", ascending=False)
227
+ shape_of_dupl_data = _df[_df["rk"]>1].shape[0]
228
+
229
+ if shape_of_dupl_data > 0:
230
+ logger.info(f"Found {shape_of_dupl_data} data duplicated! Will be dropped")
231
+ _idx_to_keep = _df[_df["rk"]==1].index.to_list()
232
+ if len(_idx_to_keep)+shape_of_dupl_data != df.shape[0]:
233
+ raise AssertionError("Mismatch of data number!")
234
+ idx_to_keep = idx_to_keep.intersection(set(_idx_to_keep))
235
+ del _df
236
+ gc.collect()
237
+
238
+ logger.info(f"The final data kept is {len(idx_to_keep)} from {df.shape[0]}")
239
+ df = df.loc[list(idx_to_keep),:]
240
+
241
+ logger.info("Saving dataset cleansed form...")
242
+ #input path splitted by ("/") for the last entry should return filename
243
+ #whereas the filename splitted by (".") except the last value should return the filename w/o ".csv" extension
244
+
245
+ _save_fn = ".".join(raw_data_path.split("/")[-1].split(".")[:-1]) + suffix_file + ".csv"
246
+ df.to_csv(f"{save_dir}/{_save_fn}", index=False)
dedup_raw_wiki_data_indo.sh ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # all available lang codes in Indonesia local-languages or linguistically-related to Indonesian Language:
4
+ # "ace", "ban", "bjn", "bug", "gor", "id", "jv", "map-bms", "min", "ms", "nia", "su", "tet"
5
+
6
+ #params of executions
7
+ folder_dir_to_save=./indo_wiki_dedup
8
+ input_folder_to_be_dedup=./indo_wiki_raw
9
+
10
+ drop_hard_dupl=True
11
+ drop_soft_dupl=True
12
+
13
+
14
+ # main executions
15
+
16
+ # src: https://stackoverflow.com/a/18887210 (to list all files under a dir)
17
+ shopt -s nullglob
18
+ file_name_array=($input_folder_to_be_dedup/*)
19
+ shopt -u nullglob # Turn off nullglob to make sure it doesn't interfere with anything later
20
+ file_name_array="${file_name_array}"
21
+
22
+ if [ ${#file_name_array[@]} == 0 ]; then
23
+ echo "No files found under directory $input_folder_to_be_dedup" >&2
24
+ fi
25
+
26
+ if [ ! -d $folder_dir_to_save ];
27
+ then
28
+ echo "Dir $folder_dir_to_save not exists! Creating the dir..."
29
+ mkdir $folder_dir_to_save
30
+ fi
31
+
32
+ echo "The params hard-dedup drop is set as $drop_hard_dupl"
33
+ echo "The params soft-dedup drop is set as $drop_soft_dupl"
34
+
35
+ # echo "${file_name_array[0]}"
36
+ # echo "${file_name_array[1]}"
37
+ # echo "${file_name_array[2]}"
38
+ # echo "${file_name_array[3]}"
39
+
40
+ for val in ${!file_name_array[@]}; do
41
+ csv_path=${file_name_array[$val]}
42
+
43
+ if [[ ${csv_path} != *".csv" ]]; then
44
+ echo "The extracted file name isn't a CSV! Skipping! Received $csv_path"
45
+ continue
46
+ fi
47
+
48
+ echo "Executing Dedup on iteration no "$((val+1))" of total ${#file_name_array[@]} for input data $csv_path"
49
+ python cleanse_wiki_data.py \
50
+ --raw-csv-path $csv_path \
51
+ --drop-hard-dupl $drop_hard_dupl \
52
+ --drop-soft-dupl $drop_soft_dupl \
53
+ --save-dir-path $folder_dir_to_save
54
+ echo "Done Execution"
55
+ done
56
+ echo "Done Dedup Process"
extract_raw_wiki_data.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Script on Generating Wikipedia Data that are dumped into https://dumps.wikimedia.org/
3
+ More info can be read on https://huggingface.co/datasets/wikipedia
4
+ -------------------
5
+ Check here to see available indexed data: https://dumps.wikimedia.org/backup-index.html
6
+ Also check here to see language meta from its code: https://meta.wikimedia.org/wiki/List_of_Wikipedias
7
+ '''
8
+
9
+ import os
10
+ import logging
11
+ import argparse
12
+
13
+ import pandas as pd
14
+ from datasets import load_dataset
15
+
16
+
17
+ def set_logger():
18
+ # Set up the logger
19
+ logging.basicConfig(
20
+ level=logging.INFO, # Set the desired logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
21
+ format='%(asctime)s [%(levelname)s]: %(message)s', # Customize the log message format
22
+ datefmt='%Y-%m-%d %H:%M:%S' # Customize the date/time format
23
+ )
24
+
25
+ # Create a file handler to write logs into a file
26
+ file_handler = logging.FileHandler('app.log')
27
+
28
+ # Set the log level for the file handler
29
+ file_handler.setLevel(logging.INFO)
30
+
31
+ # Create a formatter for the file handler (customize the log format for the file)
32
+ file_formatter = logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
33
+ file_handler.setFormatter(file_formatter)
34
+
35
+ logger = logging.getLogger("Wiki Dataset Generation")
36
+ logger.addHandler(file_handler)
37
+
38
+ return logger
39
+
40
+
41
+ #only executed if called directly
42
+ if __name__ == "__main__":
43
+ parser = argparse.ArgumentParser()
44
+
45
+ parser.add_argument("--lang-id", help="Lang ID from Wikipedia Data to extract")
46
+ parser.add_argument("--date-ver", help="Date of Wikipedia Data (YYYYMMDD) generation to extract")
47
+ parser.add_argument("--save-dir-path", help="""Relative dir path of saved Wikipedia CSV data
48
+ to the `extract_raw_wiki_data.py` script dir""",
49
+ default=os.path.dirname(os.path.abspath(__file__)))
50
+
51
+ args = parser.parse_args()
52
+
53
+
54
+ dset_name = "wikipedia"
55
+
56
+ logger = set_logger()
57
+ logger.info("Parsing arguments...")
58
+
59
+ lang_id = args.lang_id
60
+ date_ver = args.date_ver
61
+ save_dir = args.save_dir_path
62
+
63
+ logger.info("Loading the dataset from Wikipedia...")
64
+ df = load_dataset(dset_name, language=lang_id, date=date_ver, beam_runner='DirectRunner', split="train").to_pandas()
65
+ logger.info("Loading done!")
66
+ logger.info(f"#Data collected: {df.shape[0]}")
67
+ logger.info("Saving dataset raw form...")
68
+ df.to_csv(f"{save_dir}/wiki_{lang_id}_{date_ver}_raw_dataset.csv", index=False)
extract_raw_wiki_data_indo.sh ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # all available lang codes in Indonesia local-languages or linguistically-related to Indonesian Language:
4
+ # "ace", "ban", "bjn", "bug", "gor", "id", "jv", "map-bms", "min", "ms", "nia", "su", "tet"
5
+
6
+ #params of executions
7
+ date_ver=20230901
8
+ folder_dir_to_save=./indo_wiki_raw
9
+ lang_list=(ace ban bjn)
10
+ # bug gor id jv map-bms min ms nia su tet)
11
+
12
+
13
+ #main executions
14
+
15
+ if [ ! -d $folder_dir_to_save ]; then
16
+ echo "Dir $folder_dir_to_save not exists! Creating the dir..."
17
+ mkdir $folder_dir_to_save
18
+ fi
19
+
20
+ for val in ${!lang_list[@]}; do
21
+ lang=${lang_list[$val]}
22
+ echo "Executing Extractor on iteration no $((val+1)) of total ${#lang_list[@]} for language $lang and date version of $date_ver"
23
+ python extract_raw_wiki_data.py \
24
+ --lang-id $lang \
25
+ --date-ver $date_ver \
26
+ --save-dir-path $folder_dir_to_save
27
+ echo "Done Execution"
28
+ done
29
+ echo "Done Extraction Process"
indo_wiki.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The Indonesian Wiki Loader"""
2
+
3
+ import os
4
+ import re
5
+
6
+ import pandas as pd
7
+
8
+ import datasets
9
+
10
+
11
+ _CITATIONS = """\
12
+ @ONLINE{wikidump,
13
+ author = "Wikimedia Foundation",
14
+ title = "Wikimedia Downloads",
15
+ url = "https://dumps.wikimedia.org"}
16
+
17
+ @ONLINE{wikipedia-hf,
18
+ title = "Huggingface Wikipedia Dataset",
19
+ url = "https://huggingface.co/datasets/wikipedia"}"""
20
+
21
+ _REPO_URL = "https://huggingface.co/datasets/sabilmakbar/indo_wiki"
22
+
23
+ _LICENSE = (
24
+ "This work is licensed under the Creative Commons Attribution-ShareAlike "
25
+ "3.0 Unported License. To view a copy of this license, visit "
26
+ "http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to "
27
+ "Creative Commons, PO Box 1866, Mountain View, CA 94042, USA."
28
+ )
29
+
30
+
31
+ _INDO_WIKI_RAW_DESCRIPTION = """\
32
+ Indonesian Wikipedia Data Repository contains Wikipedia Data from Wikipedia HF that focuses
33
+ on extraction in Indonesian Languange and Indonesian Local Languages, that some of them
34
+ are considered as low-resource languages or extremely low-resource languages"""
35
+
36
+ _INDO_WIKI_DEDUP_DESCRIPTION = """\
37
+ This is a derivative of Indonesian Wikipedia Data Repository which is already pre-processed
38
+ by identifying and dropping duplicates to prevent boilerplate texts occuring in dataset"""
39
+
40
+ _AVAILABLE_DUMP_VERSION_DATE = ["20230901"]
41
+ _AVAILABLE_DUMP_LANGUAGES = ["ace", "ban", "bjn", "bug", "gor", "id", "jv", "map-bms", "min", "ms", "nia", "su", "tet"]
42
+
43
+
44
+ def _construct_dataset_from_dset_version_and_lang(date_ver: str, lang: str, mode: str):
45
+ _mode_to_folder_mapper = {"dedup": "indo_wiki_dedup_data", "raw": "indo_wiki_raw_data"}
46
+ _mode_to_file_suffix_mapper = {"dedup": "dataset_soft_hard_cleansed.csv", "raw": "raw_dataset.csv"}
47
+
48
+ return os.path.join(_mode_to_folder_mapper[mode], f"wiki_{lang}_{date_ver}_{_mode_to_file_suffix_mapper[mode]}")
49
+
50
+
51
+ class IndoWikiConfig(datasets.BuilderConfig):
52
+ """BuilderConfig for IndoWiki."""
53
+
54
+ def __init__(self, description: str=None, features: list=['url', 'title', 'text'],
55
+ data_url: str=None, date_stamp: str=None, lang: str=None,
56
+ mode = "dedup", **kwargs):
57
+ """BuilderConfig for IndoWiki.
58
+
59
+ Args:
60
+ description: `string`, description of dataset
61
+ features: `list[string]`, list of the features that will appear in the
62
+ feature dict. Should not include "label" if it's a supervised.
63
+ data_url: `string`, url to download the data.
64
+ date_stamp: `string`, wikidump date_stamp for data available in repo.
65
+ lang: `string`, language to be loaded.
66
+ **kwargs: keyword arguments forwarded to super.
67
+ """
68
+ # validate configs
69
+ if mode not in ["dedup", "raw"]:
70
+ raise ValueError(f"Error occured! Expected values are 'dedup' or 'raw' for arg `mode`, received {mode}!")
71
+
72
+ if (lang is None or date_stamp is None) and data_url is None:
73
+ raise ValueError("Expected `data_url` is provided or both `date_stamp` and `lang` are provided!")
74
+
75
+ _mode_to_desc_mapper = {"dedup": _INDO_WIKI_DEDUP_DESCRIPTION, "raw":_INDO_WIKI_RAW_DESCRIPTION}
76
+
77
+ if date_stamp is not None and date_stamp not in _AVAILABLE_DUMP_VERSION_DATE:
78
+ raise ValueError("Provided `date_stamp` dataset versioning doesn't match! Please re-check")
79
+
80
+ if lang is not None and lang not in _AVAILABLE_DUMP_LANGUAGES:
81
+ raise ValueError("Provided `lang` doesn't match! Please re-check")
82
+
83
+ super(IndoWikiConfig, self).__init__(**kwargs)
84
+ self.features = features
85
+
86
+ # prioritize kwargs data_url
87
+ if data_url is not None:
88
+ self.data_url = data_url
89
+ else:
90
+ self.data_url = _construct_dataset_from_dset_version_and_lang(date_ver=date_stamp, lang=lang, mode=mode)
91
+
92
+ # auto-construct desc if not provided
93
+ if description is None:
94
+ self.description = _mode_to_desc_mapper[mode] + "\n" + f"From file path {self.data_url}"
95
+
96
+ #define citations & info URL internally in config class
97
+ self.citation = _CITATIONS
98
+ self.url = _REPO_URL
99
+
100
+
101
+ class IndoWiki(datasets.GeneratorBasedBuilder):
102
+ """The IndoWiki Dataset."""
103
+
104
+ #if name isn't provided, will create a dataset of all languages
105
+ DEFAULT_CONFIG_NAME = "indowiki_dedup_all"
106
+ BUILDER_CONFIG_CLASS = IndoWikiConfig
107
+
108
+ _newest_data_raw_all_langs = [_construct_dataset_from_dset_version_and_lang(
109
+ date_ver=sorted(_AVAILABLE_DUMP_VERSION_DATE)[-1], lang=lang, mode="raw") for lang in _AVAILABLE_DUMP_LANGUAGES]
110
+ _newest_data_dedup_all_langs = [_construct_dataset_from_dset_version_and_lang(
111
+ date_ver=sorted(_AVAILABLE_DUMP_VERSION_DATE)[-1], lang=lang, mode="dedup") for lang in _AVAILABLE_DUMP_LANGUAGES]
112
+
113
+ BUILDER_CONFIGS = [
114
+ IndoWikiConfig(
115
+ name="indowiki_all",
116
+ description=_INDO_WIKI_RAW_DESCRIPTION,
117
+ data_url=_newest_data_raw_all_langs
118
+ ),
119
+ IndoWikiConfig(
120
+ name="indowiki_dedup_all",
121
+ description=_INDO_WIKI_DEDUP_DESCRIPTION,
122
+ data_url=_newest_data_dedup_all_langs
123
+ ),
124
+ IndoWikiConfig(
125
+ name="indowiki_dedup_id_only",
126
+ lang="id",
127
+ date_stamp="20230901"
128
+ )
129
+ ]
130
+
131
+
132
+ def _info(self):
133
+ features = {feature: datasets.Value("string") for feature in self.config.features}
134
+
135
+ return datasets.DatasetInfo(
136
+ description = self.config.description,
137
+ features = datasets.Features(features),
138
+ homepage = self.config.url,
139
+ citation = self.config.citation,
140
+ license=_LICENSE)
141
+
142
+
143
+ @staticmethod
144
+ def _get_lang_name_from_data_url(data_url: str):
145
+ #lang code occurred after "wiki_" and before date versioning (using 8len date)
146
+ _list_folder_sep = data_url.split("/")[-1].split("_")
147
+ _min_pos = min([pos for pos, data in enumerate(_list_folder_sep) if bool(re.search("\d{8}", data))])
148
+ return re.sub("[^\w\.]", "_", "_".join(_list_folder_sep[1:_min_pos]))
149
+
150
+
151
+ def _split_generators(self, dl_manager):
152
+ if self.config.name in ("indowiki_all", "indowiki_dedup_all"):
153
+ file_dict = {self._get_lang_name_from_data_url(file): file for file in self.config.data_url}
154
+ dl_dir = dl_manager.download_and_extract(file_dict)
155
+
156
+ return [
157
+ datasets.SplitGenerator(
158
+ name=datasets.Split(split_name),
159
+ gen_kwargs={
160
+ "data_file": file_name
161
+ }
162
+ )
163
+ #dl_dir is a dictionary containing lang or split as keyname and file path as value
164
+ for split_name, file_name in dl_dir.items()]
165
+ else:
166
+ dl_dir = dl_manager.download_and_extract(self.config.data_url)
167
+ return [
168
+ datasets.SplitGenerator(
169
+ name=datasets.Split.TRAIN,
170
+ gen_kwargs={
171
+ "data_file": dl_dir
172
+ },
173
+ )
174
+ ]
175
+
176
+ def _generate_examples(self, data_file):
177
+ pd_df = pd.read_csv(data_file)
178
+ for _, row in pd_df.iterrows():
179
+ example = {feature: row[feature] for feature in self.config.features}
180
+ idx = row["id"]
181
+ yield idx, example
indo_wiki_dedup_data/wiki_ace_20230901_dataset_soft_hard_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b7a05edb06e63806bb9f036f11952f11b1619e83aa179a237fa0aac095b27a0
3
+ size 4861916
indo_wiki_dedup_data/wiki_ban_20230901_dataset_soft_hard_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ab8ecdf127c87def8aec96ba9491dc303551e6c306e04f87aea4d6c31c23671
3
+ size 17374796
indo_wiki_dedup_data/wiki_bjn_20230901_dataset_soft_hard_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8a9a7fae9899da428857f06f7a61f9776492982c3ba490d4a67412f9bcef8cd
3
+ size 6660505
indo_wiki_dedup_data/wiki_bug_20230901_dataset_soft_hard_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8408abd47cd96f87c4ff1dcbe9c8ce561c6edb99fe3778ab9d0e0ae30ad866d
3
+ size 2063266
indo_wiki_dedup_data/wiki_gor_20230901_dataset_soft_hard_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e419898eb86d3aba756335934c520b79638bb8f345e6a97b12ae6696390c4281
3
+ size 5993525
indo_wiki_dedup_data/wiki_id_20230901_dataset_soft_hard_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c216961666d8e72672e096c7f82da3181c0f4c86f70788a95eb494cbcb3052ff
3
+ size 1103131493
indo_wiki_dedup_data/wiki_jv_20230901_dataset_soft_hard_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45e5a55de3d79ddf4c9839d9eddda7a15ed04dbe30dba043a9a08d635efe7b11
3
+ size 69829565
indo_wiki_dedup_data/wiki_map-bms_20230901_dataset_soft_hard_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd1f106fb38507875ca3cf6eb6b58a5a10770b80e1359b777b9d9b79d9b1a087
3
+ size 5069820
indo_wiki_dedup_data/wiki_min_20230901_dataset_soft_hard_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:406d1e88795af506b45f0f990cf308121e30b38a3806675e26d7b0bbc3190859
3
+ size 116318697
indo_wiki_dedup_data/wiki_ms_20230901_dataset_soft_hard_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b43492d3c9e2feb969475c6248de8d3b5c4170ba5ce42315efee39b20f157432
3
+ size 410994494
indo_wiki_dedup_data/wiki_nia_20230901_dataset_soft_hard_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30ce1cd5c0d582a00b4af46235daa6def81184a1733b61046c3463eb483c61c4
3
+ size 1935863
indo_wiki_dedup_data/wiki_su_20230901_dataset_soft_hard_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52a370f6792988947613fa85b1c62e1314be1187b4ff6eff437d268f7dc70646
3
+ size 47422800
indo_wiki_dedup_data/wiki_tet_20230901_dataset_soft_hard_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8af08d93ff441117fdd72bbd3108a7bd544e553671494575b5b81fb750f0fde8
3
+ size 1446284
indo_wiki_raw_data/wiki_ace_20230901_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98efd18b41e63ec4ff44308c45f62a179c15a2c945fe7b8b542276413cbe57ee
3
+ size 4869755
indo_wiki_raw_data/wiki_ban_20230901_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18d8ebbafe81b487234a7928042aa9cb584d85ddffc325f7f6165b4d2b3d37f7
3
+ size 17569316
indo_wiki_raw_data/wiki_bjn_20230901_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e82f0dd75bc5a385ad7f064e735fd086229180c0c61daa88b9dd986017b07f71
3
+ size 6674754
indo_wiki_raw_data/wiki_bug_20230901_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aaa41d93b239ff4b93538c1d13b4c833265a7c9e649783bef2faef033078710d
3
+ size 3280082
indo_wiki_raw_data/wiki_gor_20230901_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c83d1c72bea58c6c70a440b25345e148eeed810610df560385106afea55424c3
3
+ size 6012019
indo_wiki_raw_data/wiki_id_20230901_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0dc9f33d9ebbe016cff9a24748892e649240a8bd84725f5ea61de143b501325
3
+ size 1105309135
indo_wiki_raw_data/wiki_jv_20230901_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be9e6abb7f829f23301c23c6744a4c0b4fcf1e9fb0b9966b22abd2e1f03e696d
3
+ size 70389991
indo_wiki_raw_data/wiki_map-bms_20230901_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75642ea2175456e3b8012927b98f7735c745c4bb8e70f3e9bfef7ad7e5b12215
3
+ size 5221783
indo_wiki_raw_data/wiki_min_20230901_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d843ad1428a3a5ec1b8975c01501d94adc3082ef72d3ee52092bfae1829b56c
3
+ size 116421140
indo_wiki_raw_data/wiki_ms_20230901_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ae57d23fef7caab3f964c4c19f2ec707d4af6bca200df011e9ae36f4b304ee1
3
+ size 416556742
indo_wiki_raw_data/wiki_nia_20230901_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0577ffe755aa08c4e99223c1d8dafc3fa5eb327b3363cfe888401581b1053ee0
3
+ size 1936118
indo_wiki_raw_data/wiki_su_20230901_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9f6897d6f75a8c39f2f8c9610fe52f9524784a2d26ff76eca92381040c2672d
3
+ size 47501421
indo_wiki_raw_data/wiki_tet_20230901_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cff4e04bf7eaeb1873b05dadce1c5e6bc4ba9eb93cf991c346df5e347d884d7
3
+ size 1451078
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ datasets==2.14.6
2
+ pandas==2.1.0
3
+ fsspec==2023.9.1
4
+ apache-beam==2.50.0
5
+ dill~=0.3.1.x
6
+ numpy~=1.24.x