sabilmakbar commited on
Commit
cb3b2e7
1 Parent(s): 444ec85

Delete moved files into GH repo

Browse files
dedup_raw_wiki_data.py DELETED
@@ -1,414 +0,0 @@
1
- # %%
2
- '''
3
- Script on Cleansing Wikipedia Data that has been extracted from extract_raw_wiki_data.py
4
- '''
5
- #core functionality modules
6
- import os, gc
7
- import logging
8
- import argparse
9
- import warnings
10
-
11
- from functools import partial
12
-
13
- #text preprocess modules
14
- import re
15
- import urllib
16
- from xml.etree import ElementTree as ET
17
-
18
- #dataset related modules
19
- import numpy as np
20
- import pandas as pd
21
-
22
-
23
- ### MODULES DEFINITION ###
24
- #create custom type-checking of incoming ArgParse
25
- def argparse_bool_check(value: str):
26
- #cast str with value like float into actual float
27
- try:
28
- value = float(value)
29
- #can't be parsed as float, keep as it is
30
- except ValueError:
31
- pass
32
-
33
- #cast float-like value (incl int) into str
34
- if isinstance(value, float) and int(value) == value:
35
- value = str(int(value))
36
- #raise ArgumentTypeError if the value isn't in string already
37
- else:
38
- if not isinstance(value, str):
39
- raise argparse.ArgumentTypeError(f"Not the correct value (args: {value})! Expected is cast-able to '1' or '0' or already in string. Please rectify!")
40
- #check for these combinations of values
41
- if value.lower() in ("yes", "true", "t", "y", "1"):
42
- return True
43
- elif value.lower() in ("no", "false", "f", "n", "0"):
44
- return False
45
- else:
46
- raise argparse.ArgumentTypeError(f"Value Error! Not the correct value (args: {value})! Please rectify!")
47
-
48
-
49
- def text_processing_args_checker(value: str):
50
- if value not in ["all", "text", "title", "neither"]:
51
- raise argparse.ArgumentTypeError(f"Value Error! Not the correct value (args: {value})! Please rectify!")
52
- else:
53
- return value
54
-
55
-
56
- def set_logger():
57
- # Set up the logger
58
- logging.basicConfig(
59
- level=logging.INFO, # Set the desired logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
60
- format='%(asctime)s [%(levelname)s]: %(message)s', # Customize the log message format
61
- datefmt='%Y-%m-%d %H:%M:%S' # Customize the date/time format
62
- )
63
-
64
- # Create a file handler to write logs into a file
65
- file_handler = logging.FileHandler('app.log')
66
-
67
- # Set the log level for the file handler
68
- file_handler.setLevel(logging.INFO)
69
-
70
- # Create a formatter for the file handler (customize the log format for the file)
71
- file_formatter = logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
72
- file_handler.setFormatter(file_formatter)
73
-
74
- logger = logging.getLogger("Wiki Dataset Generation")
75
- logger.addHandler(file_handler)
76
-
77
- return logger
78
-
79
-
80
- #wrapper fn of text-cleansing
81
- def text_cleansing_wrapper(fn, exception_class_names = []):
82
-
83
- #ensure caught exception class names passed to decorator is a list (if provided)
84
- if not isinstance(exception_class_names, list):
85
- raise TypeError("Exception Class Name for Wrapper is not a list!")
86
- #ensure all values of caught exception class name list is a string
87
- if not all([isinstance(val, str) for val in exception_class_names]):
88
- raise ValueError("Found an element of Exception Class Name for Wrapper that is not a string!")
89
-
90
- #lowercase all exception class name
91
- exception_class_names = [val.lower() for val in exception_class_names]
92
- if len(exception_class_names) == 0:
93
- warnings.warn("The wrapper receives 0 `exception_class_names` to be warned! Will return the function value with its input!")
94
-
95
- def text_fn_wrapper(text: str, *args, **kwargs):
96
- try:
97
- return fn(text, *args, **kwargs)
98
- except Exception as e:
99
- _exc_name = type(e).__name__
100
- if _exc_name.lower() not in exception_class_names and len(exception_class_names)>0:
101
- raise Exception(f"Exception Occured of {_exc_name} in {fn.__name__}!") from e
102
- else:
103
- _followup_msg = "Returning the input as it is..."
104
- _text_warn = f"An exception of {_exc_name} occured in {fn.__name__}! {_followup_msg}"
105
- warnings.warn(_text_warn)
106
- return text
107
-
108
- return text_fn_wrapper
109
-
110
-
111
- #create html tags cleanser of a given text
112
- partial_decorator = partial(text_cleansing_wrapper, exception_class_names=["parseerror"])
113
- @partial_decorator
114
- def remove_html_tags(text: str):
115
- #extracted from "https://stackoverflow.com/a/9662410", w/ additional decorator of error handler
116
- return (''.join(ET.fromstring(text).itertext())).strip()
117
-
118
-
119
- #create url decoder of text
120
- @text_cleansing_wrapper
121
- def decode_url(text: str):
122
- # return (urllib.parse.unquote(text)).encode('utf8', errors='ignore').decode().strip()
123
- return (urllib.parse.unquote(text)).strip()
124
-
125
- #create encoder check of text
126
- @text_cleansing_wrapper
127
- def check_text_by_encoder(text: str, encoder: str="utf8"):
128
- return text.encode(encoder, errors='ignore').decode().strip()
129
-
130
- #create excessive whitespace removal of text
131
- @text_cleansing_wrapper
132
- def remove_excessive_whitespace(text: str):
133
- return re.sub("(\s)(\s+)", r"\1", text).strip()
134
-
135
- #create non-alphanumeric removal of text
136
- @text_cleansing_wrapper
137
- def remove_non_alphanumeric(text: str):
138
- return re.sub("[^a-z0-9\s]", "", text, flags=re.I).strip()
139
-
140
- # def cleanse_wiki_text(text: str):
141
- # return remove_html_tags(decode_url_and_remove_non_ascii(text))
142
-
143
- # def normalize_wiki_title(text: str):
144
- # return remove_non_alphanumeric(remove_excessive_whitespace(text.lower()))
145
-
146
-
147
- def _text_normalizer_constructor(
148
- remove_non_alphanumeric_bool: bool, remove_excessive_whitespace_bool: bool,
149
- remove_html_tags_bool: bool, decode_url_bool: bool, encoder_check_bool: bool,
150
- encoder: str="utf8"):
151
-
152
- _lambda_fn_1 = partial(check_text_by_encoder, encoder=encoder) if encoder_check_bool else lambda x: x
153
- _lambda_fn_2 = lambda x: remove_non_alphanumeric(_lambda_fn_1(x)) if remove_non_alphanumeric_bool else _lambda_fn_1(x)
154
- _lambda_fn_3 = lambda x: remove_excessive_whitespace(_lambda_fn_2(x)) if remove_excessive_whitespace_bool else _lambda_fn_2(x)
155
- _lambda_fn_4 = lambda x: remove_html_tags(_lambda_fn_3(x)) if remove_html_tags_bool else _lambda_fn_3(x)
156
- _lambda_fn_5 = lambda x: decode_url(_lambda_fn_4(x)) if decode_url_bool else _lambda_fn_4(x)
157
-
158
- return _lambda_fn_5
159
-
160
-
161
- def _args_to_text_constructor_fn(**kwargs):
162
-
163
- def _decode_options(opt: str):
164
- # return decoded options with format `text_opt`, `title_opt`
165
- # possible values are ["all", "text", "title", "neither"]
166
- if opt == "all":
167
- return True, True
168
- elif opt == "text":
169
- return True, False
170
- elif opt == "title":
171
- return False, True
172
- else:
173
- return False, False
174
-
175
- kwargs_title, kwargs_text = {}, {}
176
-
177
- kwargs_title["encoder"] = kwargs["text_encoder_choice_title"]
178
- kwargs_text["encoder"] = kwargs["text_encoder_choice_text"]
179
-
180
- for key, val in kwargs.items():
181
- if key not in [
182
- "remove_non_alphanumeric_option", "remove_excessive_whitespace_option",
183
- "remove_html_tags_option", "decode_url_option", "encoder_check_option"]:
184
- continue
185
- new_key = "_".join(key.split("_")[:-1]) + "_bool"
186
- text_opt_val, title_opt_val = _decode_options(val)
187
- kwargs_text[new_key], kwargs_title[new_key] = text_opt_val, title_opt_val
188
-
189
- return _text_normalizer_constructor(**kwargs_text), _text_normalizer_constructor(**kwargs_title)
190
-
191
-
192
- def _text_processing_wrapper(text: str, _fn, mode: str="text"):
193
- if mode not in ["text", "title"]:
194
- raise ValueError(f"Provided `mode` isn't either 'text' or 'title'! Received: {mode}")
195
- return _fn(text.lower()) if mode=="title" else _fn(text)
196
-
197
-
198
- ### MAIN CODE ###
199
- if __name__ == "__main__":
200
- parser = argparse.ArgumentParser()
201
-
202
- parser.add_argument("--raw-csv-path", help="Relative location of csv file containing raw Wikipedia data")
203
-
204
- parser.add_argument("--drop-hard-dupl", help="""Flag whether to drop hard duplicates
205
- (exact values of data of relevant text fields, Titles & Desc)""",
206
- default=True, type=argparse_bool_check)
207
-
208
- parser.add_argument("--drop-soft-dupl", help="""Flag whether to drop soft duplicates
209
- (duplicates after cleansed and normalized relevant text fields, Titles & Desc)""",
210
- default=True, type=argparse_bool_check)
211
-
212
- parser.add_argument("--save-dir-path", help="""Relative dir path of saved Wikipedia CSV data
213
- to the `dedup_raw_wiki_data.py` script dir""",
214
- default=os.path.dirname(os.path.abspath(__file__)))
215
-
216
- ### THE FOLLOWING ARGUMENTS ONLY TEMPORARILY ALTER THE TEXT DATA ONLY FOR SOFT-DEDUP CHECK ###
217
- ### THE INITIAL TEXT DATA WON'T BE OVERWRITTEN AFTER BEING PREPROCESSED ###
218
- ### UNLESS YOU ARE SPECIFYING IN ARGS `overwrite-initial-title-data` AND `overwrite-initial-text-data` ###
219
-
220
- ### ARGS TO OVERWRITTE INITIAL TEXT DATA WITH PROCESSED ONES ###
221
- parser.add_argument("--overwrite-initial-title-data", help="""Flag whether to overwrite title
222
- init data w/ processed data (True) or keep it as it is (False)""",
223
- default=False, type=argparse_bool_check)
224
-
225
- parser.add_argument("--overwrite-initial-text-data", help="""Flag whether to overwrite text
226
- init data w/ processed data (True) or keep it as it is (False)""",
227
- default=False, type=argparse_bool_check)
228
-
229
- ### INSTANTIATOR ARGS FOR CONSTRUCTING TEXT PROCESSING FN TO BE APPLIED ###
230
- parser.add_argument("--remove-non-alphanumeric-option", help="""Identifier which columns to be preprocessed
231
- using `remove_non_alphanumeric` for soft duplicates detection
232
- (Choices are "all", "text", "title", "neither")""",
233
- default="neither", type=text_processing_args_checker)
234
-
235
- parser.add_argument("--remove-excessive-whitespace-option", help="""Identifier which columns to be preprocessed
236
- using `remove_excessive_whitespace` for soft duplicates detection
237
- (Choices are "all", "text", "title", "neither")""",
238
- default="all", type=text_processing_args_checker)
239
-
240
- parser.add_argument("--remove-html-tags-option", help="""Identifier which columns to be preprocessed
241
- using `remove_html_tags` for soft duplicates detection
242
- (Choices are "all", "text", "title", "neither")""",
243
- default="all", type=text_processing_args_checker)
244
-
245
- parser.add_argument("--decode-url-option", help="""Identifier which columns to be preprocessed
246
- using `decode_url` for soft duplicates detection
247
- (Choices are "all", "text", "title", "neither")""",
248
- default="all", type=text_processing_args_checker)
249
-
250
- ### ARGS TO CHOOSE ENCODER CHECKING AND ITS CONFIG INITIALIZATION ###
251
- parser.add_argument("--encoder-check-option", help="""Identifier which columns to be preprocessed
252
- using `check_text_by_encoder` for soft duplicates detection
253
- (Choices are "all", "text", "title", "neither")""",
254
- default="all", type=text_processing_args_checker)
255
-
256
- parser.add_argument("--text-encoder-choice-title", help="""Identifier of title encoder type
257
- to be applied into `check_text_by_encoder` for soft duplicates detection""",
258
- default="utf8", type=str)
259
-
260
- parser.add_argument("--text-encoder-choice-text", help="""Identifier of text encoder type
261
- to be applied into `check_text_by_encoder` for soft duplicates detection""",
262
- default="utf8", type=str)
263
-
264
-
265
- _EXPECTED_COLNAMES = ["id", "url", "title", "text"]
266
-
267
- logger = set_logger()
268
- logger.info("Parsing arguments...")
269
-
270
- args = parser.parse_args()
271
-
272
- # class dotdict(dict):
273
- # """dot.notation access to dictionary attributes"""
274
- # __getattr__ = dict.get
275
- # __setattr__ = dict.__setitem__
276
- # __delattr__ = dict.__delitem__
277
-
278
- # args = dotdict({
279
- # "raw_csv_path":"",
280
- # "drop_hard_dupl": True,
281
- # "drop_soft_dupl": True,
282
- # "save_dir_path": os.path.dirname(os.path.abspath(__file__)),
283
- # "overwrite_initial_title_data": False,
284
- # "overwrite_initial_text_data": False,
285
- # "remove_non_alphanumeric_option":"neither",
286
- # "remove_excessive_whitespace_option": "neither",
287
- # "remove_html_tags_option":"neither",
288
- # "decode_url_option":"neither",
289
- # "encoder_check_option":"all",
290
- # "text_encoder_choice_title":"utf8",
291
- # "text_encoder_choice_text":"utf8"
292
- # })
293
-
294
- _TEXT_PROCESSING_FN, _TITLE_PROCESSING_FN = _args_to_text_constructor_fn(
295
- remove_non_alphanumeric_option = args.remove_non_alphanumeric_option,
296
- remove_excessive_whitespace_option = args.remove_excessive_whitespace_option,
297
- remove_html_tags_option = args.remove_html_tags_option,
298
- decode_url_option = args.text_encoder_choice_title,
299
- encoder_check_option = args.encoder_check_option,
300
- text_encoder_choice_title = args.text_encoder_choice_title,
301
- text_encoder_choice_text = args.text_encoder_choice_text
302
- )
303
-
304
- raw_data_path = args.raw_csv_path
305
- drop_hard_dupl = args.drop_hard_dupl
306
- drop_soft_dupl = args.drop_soft_dupl
307
- save_dir = args.save_dir_path
308
-
309
- overwrite_initial_title_data = args.overwrite_initial_title_data
310
- overwrite_initial_text_data = args.overwrite_initial_text_data
311
-
312
-
313
- df = pd.read_csv(raw_data_path)
314
- if len(set(df.columns).difference(set(_EXPECTED_COLNAMES))) != 0 or len(set(_EXPECTED_COLNAMES).difference(set(df.columns))) != 0:
315
- raise ValueError(f"The data schema expected, consist of columns: {', '.join(df.columns.to_list())} doesn't match with expected column values of {', '.join(_EXPECTED_COLNAMES)}!")
316
-
317
- if (not drop_hard_dupl) and (not drop_soft_dupl):
318
- raise AssertionError("The script won't run with both `drop-hard-dupl` and `drop-soft-dupl` args turned off!")
319
- elif (not drop_hard_dupl):
320
- warnings.warn("The args of `drop_hard_dupl` isn't turned off! Possibly the data will contain one template value of Wikipedia (usually no contribution text!)")
321
-
322
- #will save id identifier colname first (popping first list val)
323
- id_colname = _EXPECTED_COLNAMES.pop(0)
324
-
325
- # if any of the data has duplicate values from columns checked (url, title, or text),
326
- # it means the data integrity is questionable
327
- # i.e. copied from other article or filled with template text
328
- # hence, we will delete those duplicated datasets
329
-
330
- #hard duplicate drop (drop all duplicate values that has exact same text on expected unique colnames)
331
- if drop_hard_dupl:
332
-
333
- for colname in _EXPECTED_COLNAMES:
334
- logger.info(f"Checking data integrity on column {colname} on removing hard-duplicate(s)...")
335
- dupl_text_df = df[df.duplicated(subset=colname,keep=False)]
336
- shape_of_dupl_data = dupl_text_df.shape[0]
337
-
338
- if shape_of_dupl_data > 0:
339
- logger.info(f"Found {shape_of_dupl_data} data duplicated! Will be dropped")
340
- df.drop_duplicates(subset=colname, keep=False, inplace=True)
341
-
342
-
343
- #check id/idx of the cleansed data, whether it has duplicate
344
- # (the duplication of id/idx should came from the very first extraction, not from the cleansing)
345
-
346
- if df[df.duplicated(subset=id_colname,keep=False)].shape[0] > 0:
347
- logger.info("Duplicated ID found! Re-assigning ID to the new ones based on `df.reset_index` method!")
348
- df[id_colname] = df.reset_index().index
349
-
350
- #soft duplicate drop (drop all except one duplicate values that has exact same text on expected unique colnames)
351
- #keep the data that has longest value of its raw form
352
- if drop_soft_dupl:
353
-
354
- idx_to_keep = set(df.index.to_list())
355
- #clean from text & title only, url isn't needed for this process
356
- _EXPECTED_COLNAMES.remove("url")
357
-
358
- for colname in _EXPECTED_COLNAMES:
359
- #Construct Text Cleanser Fn for soft-duplicate cleansing
360
- _PROCESSING_FN = _TEXT_PROCESSING_FN if colname == "text" else _TITLE_PROCESSING_FN
361
- text_processing_fn = partial(_text_processing_wrapper, _fn=_PROCESSING_FN, mode=colname)
362
- logger.info(f"Checking data integrity on column {colname} on removing soft-duplicate(s)...")
363
- _df = df.copy(deep=True)
364
-
365
- #Setting up DF cols as String so it can be text-processed
366
- _df = _df[[colname]]
367
- _df[colname] = _df[colname].astype("str")
368
- logger.info(f"Cleansing the data based on {colname}")
369
-
370
- #applying text processing
371
- _df[colname+"_raw_len"] = _df[colname].apply(len)
372
- _df[colname+"_cleansed"] = _df[colname].apply(lambda row_text: text_processing_fn(text=row_text))
373
-
374
- #overwrite its text data if set as true
375
- if overwrite_initial_title_data and colname == "title":
376
- df[colname] = _df[colname+"_cleansed"]
377
- elif overwrite_initial_text_data and colname == "text":
378
- df[colname] = _df[colname+"_cleansed"]
379
-
380
- #choose the data to keep by "ranking" it according to len of its raw text (greatest to keep)
381
- logger.info(f"Ranking and grouping the data based on {colname}")
382
- _df["rk"] = _df.groupby(colname+"_cleansed")[colname+"_raw_len"].rank(method="min", ascending=False)
383
- shape_of_dupl_data = _df[_df["rk"]>1].shape[0]
384
-
385
- if shape_of_dupl_data > 0:
386
- logger.info(f"Found {shape_of_dupl_data} data duplicated! Will be dropped")
387
- _idx_to_keep = _df[_df["rk"]==1].index.to_list()
388
- if len(_idx_to_keep)+shape_of_dupl_data != df.shape[0]:
389
- raise AssertionError("Mismatch of data number!")
390
- idx_to_keep = idx_to_keep.intersection(set(_idx_to_keep))
391
- else:
392
- logger.info(f"No soft-duplicate found in colname {colname}. Continuing")
393
-
394
- del _df
395
- gc.collect()
396
-
397
- logger.info(f"The final data kept is {len(idx_to_keep)} from {df.shape[0]}")
398
- df = df.loc[list(idx_to_keep),:]
399
-
400
- logger.info("Saving dataset cleansed form...")
401
- #input path splitted by ("/") for the last entry should return filename
402
- #whereas the filename splitted by (".") except the last value should return the filename w/o ".csv" extension
403
-
404
- _override_suffix_identifier = ""
405
- if overwrite_initial_title_data or overwrite_initial_text_data:
406
- _override_suffix_identifier = "_overwritten"
407
- if overwrite_initial_text_data:
408
- _override_suffix_identifier = "_text"+_override_suffix_identifier
409
- if overwrite_initial_title_data:
410
- _override_suffix_identifier = "_title"+_override_suffix_identifier
411
-
412
- _save_file_name = ".".join(raw_data_path.split("/")[-1].split(".")[:-1]) + "_dedup_cleansed" + _override_suffix_identifier + ".csv"
413
- _save_file_name = _save_file_name.replace("_raw", "")
414
- df.to_csv(f"{save_dir}/{_save_file_name}", index=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dedup_raw_wiki_data_sea.sh DELETED
@@ -1,62 +0,0 @@
1
- #!/bin/bash
2
-
3
- # all available lang codes in SEA local-languages or linguistically-related to following countries in SEA:
4
- # Indonesia: "ace" (Acehnese), "ban" (Balinese), "bjn" (Banjarese), "bug" (Buginese), "gor" (Gorontalo), "id" (Indonesian), "jv" (Javanese), "mad" (Madurese), "map-bms" (Banyumasan, Dialect of Javanese), "min" (Minangkabau), "ms" (Malay), "nia" (Nias), "su" (Sundanese), "tet" (Tetum)
5
- # Singapore: "ms" (Malay), "ta" (Tamil)
6
- # Malaysia: "ms" (Malay), "ta" (Tamil)
7
- # Brunei: "ms" (Malay)
8
- # Thailand: "mnw" (Mon), "shn" (Shan), "th" (Thai)
9
- # Myanmar: "my" (Burmese), "mnw" (Mon), "shn" (Shan)
10
- # Laos: "lo" (Lao)
11
- # Vietnam: "vi" (Vietnamese)
12
- # Cambodia: "km" (Khmer)
13
- # East Timor: "tet" (Tetum)
14
- # Philippines: "bcl" (Central Bicolano), "cbk-zam" (Chavacano), "ceb" (Cebuano), "ilo" (Ilokano), "pag" (Pangasinan), "pam" (Kapampangan), "tl" (Tagalog), "war" (Waray)
15
-
16
- #params of executions
17
- folder_dir_to_save=./sea_wiki_dedup_data
18
- input_folder_to_be_dedup=./sea_wiki_raw_data
19
-
20
- drop_hard_dupl=True
21
- drop_soft_dupl=True
22
-
23
-
24
- # main executions
25
-
26
- # src: https://stackoverflow.com/a/18887210 (to list all files under a dir)
27
- shopt -s nullglob
28
- file_name_array=($input_folder_to_be_dedup/*)
29
- shopt -u nullglob # Turn off nullglob to make sure it doesn't interfere with anything later
30
- file_name_array="${file_name_array}"
31
-
32
- if [ ${#file_name_array[@]} == 0 ]; then
33
- echo "No files found under directory $input_folder_to_be_dedup" >&2
34
- fi
35
-
36
- if [ ! -d $folder_dir_to_save ];
37
- then
38
- echo "Dir $folder_dir_to_save not exists! Creating the dir..."
39
- mkdir $folder_dir_to_save
40
- fi
41
-
42
- echo "The params hard-dedup drop is set as $drop_hard_dupl"
43
- echo "The params soft-dedup drop is set as $drop_soft_dupl"
44
-
45
- for val in ${!file_name_array[@]}; do
46
- csv_path=${file_name_array[$val]}
47
-
48
- if [[ ${csv_path} != *".csv" ]]; then
49
- echo "The extracted file name isn't a CSV! Skipping! Received $csv_path"
50
- continue
51
- fi
52
-
53
- echo "Executing Dedup on iteration no "$((val+1))" of total ${#file_name_array[@]} for input data $csv_path"
54
- #see the script bcs there are more args than this command is using
55
- python dedup_raw_wiki_data.py \
56
- --raw-csv-path $csv_path \
57
- --drop-hard-dupl $drop_hard_dupl \
58
- --drop-soft-dupl $drop_soft_dupl \
59
- --save-dir-path $folder_dir_to_save
60
- echo "Done Execution"
61
- done
62
- echo "Done Dedup Process"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
extract_raw_wiki_data.py DELETED
@@ -1,73 +0,0 @@
1
- '''
2
- Script on Generating Wikipedia Data that are dumped into https://dumps.wikimedia.org/
3
- More info can be read on https://huggingface.co/datasets/wikipedia
4
- -------------------
5
- Check here to see available indexed data: https://dumps.wikimedia.org/backup-index.html
6
- Also check here to see language meta from its code: https://meta.wikimedia.org/wiki/List_of_Wikipedias
7
- '''
8
-
9
- import os, gc
10
- import logging
11
- import argparse
12
-
13
- import pandas as pd
14
- from datasets import load_dataset
15
-
16
-
17
- def set_logger():
18
- # Set up the logger
19
- logging.basicConfig(
20
- level=logging.INFO, # Set the desired logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
21
- format='%(asctime)s [%(levelname)s]: %(message)s', # Customize the log message format
22
- datefmt='%Y-%m-%d %H:%M:%S' # Customize the date/time format
23
- )
24
-
25
- # Create a file handler to write logs into a file
26
- file_handler = logging.FileHandler('app.log')
27
-
28
- # Set the log level for the file handler
29
- file_handler.setLevel(logging.INFO)
30
-
31
- # Create a formatter for the file handler (customize the log format for the file)
32
- file_formatter = logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
33
- file_handler.setFormatter(file_formatter)
34
-
35
- logger = logging.getLogger("Wiki Dataset Generation")
36
- logger.addHandler(file_handler)
37
-
38
- return logger
39
-
40
-
41
- #only executed if called directly
42
- if __name__ == "__main__":
43
- parser = argparse.ArgumentParser()
44
-
45
- parser.add_argument("--lang-id", help="Lang ID from Wikipedia Data to extract")
46
-
47
- parser.add_argument("--date-ver", help="Date of Wikipedia Data (YYYYMMDD) generation to extract")
48
-
49
- parser.add_argument("--save-dir-path", help="""Relative dir path of saved Wikipedia CSV data
50
- to the `extract_raw_wiki_data.py` script dir""",
51
- default=os.path.dirname(os.path.abspath(__file__)))
52
-
53
- args = parser.parse_args()
54
-
55
-
56
- dset_name = "wikipedia"
57
-
58
- logger = set_logger()
59
- logger.info("Parsing arguments...")
60
-
61
- lang_id = args.lang_id
62
- date_ver = args.date_ver
63
- save_dir = args.save_dir_path
64
-
65
- logger.info("Loading the dataset from Wikipedia...")
66
- df = load_dataset(dset_name, language=lang_id, date=date_ver, beam_runner='DirectRunner', split="train").to_pandas()
67
- logger.info("Loading done!")
68
- logger.info(f"#Data collected: {df.shape[0]}")
69
- logger.info("Saving dataset raw form...")
70
- df.to_csv(f"{save_dir}/wiki_{lang_id}_{date_ver}_raw_dataset.csv", index=False)
71
-
72
- del df
73
- gc.collect()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
extract_raw_wiki_data_batched.py DELETED
@@ -1,87 +0,0 @@
1
- '''
2
- Script on Generating Wikipedia Data that are dumped into https://dumps.wikimedia.org/
3
- More info can be read on https://huggingface.co/datasets/wikipedia
4
- -------------------
5
- Check here to see available indexed data: https://dumps.wikimedia.org/backup-index.html
6
- Also check here to see language meta from its code: https://meta.wikimedia.org/wiki/List_of_Wikipedias
7
- '''
8
-
9
- import os, gc
10
- import logging
11
- import argparse
12
-
13
- import pandas as pd
14
- from datasets import load_dataset
15
-
16
-
17
- def set_logger():
18
- # Set up the logger
19
- logging.basicConfig(
20
- level=logging.INFO, # Set the desired logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
21
- format='%(asctime)s [%(levelname)s]: %(message)s', # Customize the log message format
22
- datefmt='%Y-%m-%d %H:%M:%S' # Customize the date/time format
23
- )
24
-
25
- # Create a file handler to write logs into a file
26
- file_handler = logging.FileHandler('app.log')
27
-
28
- # Set the log level for the file handler
29
- file_handler.setLevel(logging.INFO)
30
-
31
- # Create a formatter for the file handler (customize the log format for the file)
32
- file_formatter = logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
33
- file_handler.setFormatter(file_formatter)
34
-
35
- logger = logging.getLogger("Wiki Dataset Generation")
36
- logger.addHandler(file_handler)
37
-
38
- return logger
39
-
40
-
41
- #only executed if called directly
42
- if __name__ == "__main__":
43
- parser = argparse.ArgumentParser()
44
-
45
- parser.add_argument("--lang-id", help="Lang ID from Wikipedia Data to extract")
46
-
47
- parser.add_argument("--date-ver", help="Date of Wikipedia Data (YYYYMMDD) generation to extract")
48
-
49
- #default: all
50
- parser.add_argument("--split-extr", help="""Split extraction config for choosing
51
- subsets of data to process. It follows python list slicing string args""",
52
- default=":")
53
-
54
- #default: all
55
- parser.add_argument("--force_rerun_split", help="""Flag to identify whether to check existing
56
- splits or forcing to re-create it""",
57
- default=False)
58
-
59
- parser.add_argument("--save-dir-path", help="""Relative dir path of saved Wikipedia CSV data
60
- to the `extract_raw_wiki_data.py` script dir""",
61
- default=os.path.dirname(os.path.abspath(__file__)))
62
-
63
- args = parser.parse_args()
64
-
65
-
66
- dset_name = "sea_loader_batched/wiki_loader.py"
67
-
68
- logger = set_logger()
69
- logger.info("Parsing arguments...")
70
-
71
- lang_id = args.lang_id
72
- date_ver = args.date_ver
73
- generated_split_extraction = args.split_extr
74
- force_rerun_split_generation = args.force_rerun_split
75
- save_dir = args.save_dir_path
76
-
77
- logger.info("Loading the dataset from Wikipedia...")
78
- df = load_dataset(dset_name, language=lang_id, date=date_ver, beam_runner='DirectRunner',
79
- split="train", subset_file_to_process=generated_split_extraction,
80
- force_rerun_split=force_rerun_split_generation).to_pandas()
81
- logger.info("Loading done!")
82
- logger.info(f"#Data collected: {df.shape[0]}")
83
- logger.info("Saving dataset raw form...")
84
- df.to_csv(f"{save_dir}/wiki_{lang_id}_{date_ver}_raw_dataset_splitted.csv", index=False)
85
-
86
- del df
87
- gc.collect()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
extract_raw_wiki_data_sea.sh DELETED
@@ -1,38 +0,0 @@
1
- #!/bin/bash
2
-
3
- # all available lang codes in SEA local-languages or linguistically-related to following countries in SEA:
4
- # Indonesia: "ace" (Acehnese), "ban" (Balinese), "bjn" (Banjarese), "bug" (Buginese), "gor" (Gorontalo), "id" (Indonesian), "jv" (Javanese), "mad" (Madurese), "map-bms" (Banyumasan, Dialect of Javanese), "min" (Minangkabau), "ms" (Malay), "nia" (Nias), "su" (Sundanese), "tet" (Tetum)
5
- # Singapore: "ms" (Malay), "ta" (Tamil)
6
- # Malaysia: "ms" (Malay), "ta" (Tamil)
7
- # Brunei: "ms" (Malay)
8
- # Thailand: "mnw" (Mon), "shn" (Shan), "th" (Thai)
9
- # Myanmar: "my" (Burmese), "mnw" (Mon), "shn" (Shan)
10
- # Laos: "lo" (Lao)
11
- # Vietnam: "vi" (Vietnamese)
12
- # Cambodia: "km" (Khmer)
13
- # East Timor: "tet" (Tetum)
14
- # Philippines: "bcl" (Central Bicolano), "cbk-zam" (Chavacano), "ceb" (Cebuano), "ilo" (Ilokano), "pag" (Pangasinan), "pam" (Kapampangan), "tl" (Tagalog), "war" (Waray)
15
-
16
- #params of executions
17
- date_ver=20231101
18
- folder_dir_to_save=./sea_wiki_raw_data
19
- lang_list=(ace ban bcl bjn bug cbk-zam ceb gor id ilo jv km lo mad map-bms min mnw ms my nia pag pam shn su tet ta th tl vi war)
20
-
21
-
22
- #main executions
23
-
24
- if [ ! -d $folder_dir_to_save ]; then
25
- echo "Dir $folder_dir_to_save not exists! Creating the dir..."
26
- mkdir $folder_dir_to_save
27
- fi
28
-
29
- for val in ${!lang_list[@]}; do
30
- lang=${lang_list[$val]}
31
- echo "Executing Extractor on iteration no $((val+1)) of total ${#lang_list[@]} for language $lang and date version of $date_ver"
32
- python extract_raw_wiki_data.py \
33
- --lang-id $lang \
34
- --date-ver $date_ver \
35
- --save-dir-path $folder_dir_to_save
36
- echo "Done Execution"
37
- done
38
- echo "Done Extraction Process"