|
|
|
''' |
|
Script on Cleansing Wikipedia Data that has been extracted from extract_raw_wiki_data.py |
|
''' |
|
|
|
import os, gc |
|
import logging |
|
import argparse |
|
import warnings |
|
|
|
from functools import partial |
|
|
|
|
|
import re |
|
import urllib |
|
from xml.etree import ElementTree as ET |
|
|
|
|
|
import numpy as np |
|
import pandas as pd |
|
|
|
|
|
|
|
|
|
def argparse_bool_check(value: str): |
|
|
|
try: |
|
value = float(value) |
|
|
|
except ValueError: |
|
pass |
|
|
|
|
|
if isinstance(value, float) and int(value) == value: |
|
value = str(int(value)) |
|
|
|
else: |
|
if not isinstance(value, str): |
|
raise argparse.ArgumentTypeError(f"Not the correct value (args: {value})! Expected is cast-able to '1' or '0' or already in string. Please rectify!") |
|
|
|
if value.lower() in ("yes", "true", "t", "y", "1"): |
|
return True |
|
elif value.lower() in ("no", "false", "f", "n", "0"): |
|
return False |
|
else: |
|
raise argparse.ArgumentTypeError(f"Value Error! Not the correct value (args: {value})! Please rectify!") |
|
|
|
|
|
def text_processing_args_checker(value: str): |
|
if value not in ["all", "text", "title", "neither"]: |
|
raise argparse.ArgumentTypeError(f"Value Error! Not the correct value (args: {value})! Please rectify!") |
|
else: |
|
return value |
|
|
|
|
|
def set_logger(): |
|
|
|
logging.basicConfig( |
|
level=logging.INFO, |
|
format='%(asctime)s [%(levelname)s]: %(message)s', |
|
datefmt='%Y-%m-%d %H:%M:%S' |
|
) |
|
|
|
|
|
file_handler = logging.FileHandler('app.log') |
|
|
|
|
|
file_handler.setLevel(logging.INFO) |
|
|
|
|
|
file_formatter = logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S') |
|
file_handler.setFormatter(file_formatter) |
|
|
|
logger = logging.getLogger("Wiki Dataset Generation") |
|
logger.addHandler(file_handler) |
|
|
|
return logger |
|
|
|
|
|
|
|
def text_cleansing_wrapper(fn, exception_class_names = []): |
|
|
|
|
|
if not isinstance(exception_class_names, list): |
|
raise TypeError("Exception Class Name for Wrapper is not a list!") |
|
|
|
if not all([isinstance(val, str) for val in exception_class_names]): |
|
raise ValueError("Found an element of Exception Class Name for Wrapper that is not a string!") |
|
|
|
|
|
exception_class_names = [val.lower() for val in exception_class_names] |
|
if len(exception_class_names) == 0: |
|
warnings.warn("The wrapper receives 0 `exception_class_names` to be warned! Will return the function value with its input!") |
|
|
|
def text_fn_wrapper(text: str, *args, **kwargs): |
|
try: |
|
return fn(text, *args, **kwargs) |
|
except Exception as e: |
|
_exc_name = type(e).__name__ |
|
if _exc_name.lower() not in exception_class_names and len(exception_class_names)>0: |
|
raise Exception(f"Exception Occured of {_exc_name} in {fn.__name__}!") from e |
|
else: |
|
_followup_msg = "Returning the input as it is..." |
|
_text_warn = f"An exception of {_exc_name} occured in {fn.__name__}! {_followup_msg}" |
|
warnings.warn(_text_warn) |
|
return text |
|
|
|
return text_fn_wrapper |
|
|
|
|
|
|
|
partial_decorator = partial(text_cleansing_wrapper, exception_class_names=["parseerror"]) |
|
@partial_decorator |
|
def remove_html_tags(text: str): |
|
|
|
return (''.join(ET.fromstring(text).itertext())).strip() |
|
|
|
|
|
|
|
@text_cleansing_wrapper |
|
def decode_url(text: str): |
|
|
|
return (urllib.parse.unquote(text)).strip() |
|
|
|
|
|
@text_cleansing_wrapper |
|
def check_text_by_encoder(text: str, encoder: str="utf8"): |
|
return text.encode(encoder, errors='ignore').decode().strip() |
|
|
|
|
|
@text_cleansing_wrapper |
|
def remove_excessive_whitespace(text: str): |
|
return re.sub("(\s)(\s+)", r"\1", text).strip() |
|
|
|
|
|
@text_cleansing_wrapper |
|
def remove_non_alphanumeric(text: str): |
|
return re.sub("[^a-z0-9\s]", "", text, flags=re.I).strip() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _text_normalizer_constructor( |
|
remove_non_alphanumeric_bool: bool, remove_excessive_whitespace_bool: bool, |
|
remove_html_tags_bool: bool, decode_url_bool: bool, encoder_check_bool: bool, |
|
encoder: str="utf8"): |
|
|
|
_lambda_fn_1 = partial(check_text_by_encoder, encoder=encoder) if encoder_check_bool else lambda x: x |
|
_lambda_fn_2 = lambda x: remove_non_alphanumeric(_lambda_fn_1(x)) if remove_non_alphanumeric_bool else _lambda_fn_1(x) |
|
_lambda_fn_3 = lambda x: remove_excessive_whitespace(_lambda_fn_2(x)) if remove_excessive_whitespace_bool else _lambda_fn_2(x) |
|
_lambda_fn_4 = lambda x: remove_html_tags(_lambda_fn_3(x)) if remove_html_tags_bool else _lambda_fn_3(x) |
|
_lambda_fn_5 = lambda x: decode_url(_lambda_fn_4(x)) if decode_url_bool else _lambda_fn_4(x) |
|
|
|
return _lambda_fn_5 |
|
|
|
|
|
def _args_to_text_constructor_fn(**kwargs): |
|
|
|
def _decode_options(opt: str): |
|
|
|
|
|
if opt == "all": |
|
return True, True |
|
elif opt == "text": |
|
return True, False |
|
elif opt == "title": |
|
return False, True |
|
else: |
|
return False, False |
|
|
|
kwargs_title, kwargs_text = {}, {} |
|
|
|
kwargs_title["encoder"] = kwargs["text_encoder_choice_title"] |
|
kwargs_text["encoder"] = kwargs["text_encoder_choice_text"] |
|
|
|
for key, val in kwargs.items(): |
|
if key not in [ |
|
"remove_non_alphanumeric_option", "remove_excessive_whitespace_option", |
|
"remove_html_tags_option", "decode_url_option", "encoder_check_option"]: |
|
continue |
|
new_key = "_".join(key.split("_")[:-1]) + "_bool" |
|
text_opt_val, title_opt_val = _decode_options(val) |
|
kwargs_text[new_key], kwargs_title[new_key] = text_opt_val, title_opt_val |
|
|
|
return _text_normalizer_constructor(**kwargs_text), _text_normalizer_constructor(**kwargs_title) |
|
|
|
|
|
def _text_processing_wrapper(text: str, _fn, mode: str="text"): |
|
if mode not in ["text", "title"]: |
|
raise ValueError(f"Provided `mode` isn't either 'text' or 'title'! Received: {mode}") |
|
return _fn(text.lower()) if mode=="title" else _fn(text) |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
parser = argparse.ArgumentParser() |
|
|
|
parser.add_argument("--raw-csv-path", help="Relative location of csv file containing raw Wikipedia data") |
|
|
|
parser.add_argument("--drop-hard-dupl", help="""Flag whether to drop hard duplicates |
|
(exact values of data of relevant text fields, Titles & Desc)""", |
|
default=True, type=argparse_bool_check) |
|
|
|
parser.add_argument("--drop-soft-dupl", help="""Flag whether to drop soft duplicates |
|
(duplicates after cleansed and normalized relevant text fields, Titles & Desc)""", |
|
default=True, type=argparse_bool_check) |
|
|
|
parser.add_argument("--save-dir-path", help="""Relative dir path of saved Wikipedia CSV data |
|
to the `dedup_raw_wiki_data.py` script dir""", |
|
default=os.path.dirname(os.path.abspath(__file__))) |
|
|
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument("--overwrite-initial-title-data", help="""Flag whether to overwrite title |
|
init data w/ processed data (True) or keep it as it is (False)""", |
|
default=False, type=argparse_bool_check) |
|
|
|
parser.add_argument("--overwrite-initial-text-data", help="""Flag whether to overwrite text |
|
init data w/ processed data (True) or keep it as it is (False)""", |
|
default=False, type=argparse_bool_check) |
|
|
|
|
|
parser.add_argument("--remove-non-alphanumeric-option", help="""Identifier which columns to be preprocessed |
|
using `remove_non_alphanumeric` for soft duplicates detection |
|
(Choices are "all", "text", "title", "neither")""", |
|
default="neither", type=text_processing_args_checker) |
|
|
|
parser.add_argument("--remove-excessive-whitespace-option", help="""Identifier which columns to be preprocessed |
|
using `remove_excessive_whitespace` for soft duplicates detection |
|
(Choices are "all", "text", "title", "neither")""", |
|
default="all", type=text_processing_args_checker) |
|
|
|
parser.add_argument("--remove-html-tags-option", help="""Identifier which columns to be preprocessed |
|
using `remove_html_tags` for soft duplicates detection |
|
(Choices are "all", "text", "title", "neither")""", |
|
default="all", type=text_processing_args_checker) |
|
|
|
parser.add_argument("--decode-url-option", help="""Identifier which columns to be preprocessed |
|
using `decode_url` for soft duplicates detection |
|
(Choices are "all", "text", "title", "neither")""", |
|
default="all", type=text_processing_args_checker) |
|
|
|
|
|
parser.add_argument("--encoder-check-option", help="""Identifier which columns to be preprocessed |
|
using `check_text_by_encoder` for soft duplicates detection |
|
(Choices are "all", "text", "title", "neither")""", |
|
default="all", type=text_processing_args_checker) |
|
|
|
parser.add_argument("--text-encoder-choice-title", help="""Identifier of title encoder type |
|
to be applied into `check_text_by_encoder` for soft duplicates detection""", |
|
default="utf8", type=str) |
|
|
|
parser.add_argument("--text-encoder-choice-text", help="""Identifier of text encoder type |
|
to be applied into `check_text_by_encoder` for soft duplicates detection""", |
|
default="utf8", type=str) |
|
|
|
|
|
_EXPECTED_COLNAMES = ["id", "url", "title", "text"] |
|
|
|
logger = set_logger() |
|
logger.info("Parsing arguments...") |
|
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_TEXT_PROCESSING_FN, _TITLE_PROCESSING_FN = _args_to_text_constructor_fn( |
|
remove_non_alphanumeric_option = args.remove_non_alphanumeric_option, |
|
remove_excessive_whitespace_option = args.remove_excessive_whitespace_option, |
|
remove_html_tags_option = args.remove_html_tags_option, |
|
decode_url_option = args.text_encoder_choice_title, |
|
encoder_check_option = args.encoder_check_option, |
|
text_encoder_choice_title = args.text_encoder_choice_title, |
|
text_encoder_choice_text = args.text_encoder_choice_text |
|
) |
|
|
|
raw_data_path = args.raw_csv_path |
|
drop_hard_dupl = args.drop_hard_dupl |
|
drop_soft_dupl = args.drop_soft_dupl |
|
save_dir = args.save_dir_path |
|
|
|
overwrite_initial_title_data = args.overwrite_initial_title_data |
|
overwrite_initial_text_data = args.overwrite_initial_text_data |
|
|
|
|
|
df = pd.read_csv(raw_data_path) |
|
if len(set(df.columns).difference(set(_EXPECTED_COLNAMES))) != 0 or len(set(_EXPECTED_COLNAMES).difference(set(df.columns))) != 0: |
|
raise ValueError(f"The data schema expected, consist of columns: {', '.join(df.columns.to_list())} doesn't match with expected column values of {', '.join(_EXPECTED_COLNAMES)}!") |
|
|
|
if (not drop_hard_dupl) and (not drop_soft_dupl): |
|
raise AssertionError("The script won't run with both `drop-hard-dupl` and `drop-soft-dupl` args turned off!") |
|
elif (not drop_hard_dupl): |
|
warnings.warn("The args of `drop_hard_dupl` isn't turned off! Possibly the data will contain one template value of Wikipedia (usually no contribution text!)") |
|
|
|
|
|
id_colname = _EXPECTED_COLNAMES.pop(0) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if drop_hard_dupl: |
|
|
|
for colname in _EXPECTED_COLNAMES: |
|
logger.info(f"Checking data integrity on column {colname} on removing hard-duplicate(s)...") |
|
dupl_text_df = df[df.duplicated(subset=colname,keep=False)] |
|
shape_of_dupl_data = dupl_text_df.shape[0] |
|
|
|
if shape_of_dupl_data > 0: |
|
logger.info(f"Found {shape_of_dupl_data} data duplicated! Will be dropped") |
|
df.drop_duplicates(subset=colname, keep=False, inplace=True) |
|
|
|
|
|
|
|
|
|
|
|
if df[df.duplicated(subset=id_colname,keep=False)].shape[0] > 0: |
|
logger.info("Duplicated ID found! Re-assigning ID to the new ones based on `df.reset_index` method!") |
|
df[id_colname] = df.reset_index().index |
|
|
|
|
|
|
|
if drop_soft_dupl: |
|
|
|
idx_to_keep = set(df.index.to_list()) |
|
|
|
_EXPECTED_COLNAMES.remove("url") |
|
|
|
for colname in _EXPECTED_COLNAMES: |
|
|
|
_PROCESSING_FN = _TEXT_PROCESSING_FN if colname == "text" else _TITLE_PROCESSING_FN |
|
text_processing_fn = partial(_text_processing_wrapper, _fn=_PROCESSING_FN, mode=colname) |
|
logger.info(f"Checking data integrity on column {colname} on removing soft-duplicate(s)...") |
|
_df = df.copy(deep=True) |
|
|
|
|
|
_df = _df[[colname]] |
|
_df[colname] = _df[colname].astype("str") |
|
logger.info(f"Cleansing the data based on {colname}") |
|
|
|
|
|
_df[colname+"_raw_len"] = _df[colname].apply(len) |
|
_df[colname+"_cleansed"] = _df[colname].apply(lambda row_text: text_processing_fn(text=row_text)) |
|
|
|
|
|
if overwrite_initial_title_data and colname == "title": |
|
df[colname] = _df[colname+"_cleansed"] |
|
elif overwrite_initial_text_data and colname == "text": |
|
df[colname] = _df[colname+"_cleansed"] |
|
|
|
|
|
logger.info(f"Ranking and grouping the data based on {colname}") |
|
_df["rk"] = _df.groupby(colname+"_cleansed")[colname+"_raw_len"].rank(method="min", ascending=False) |
|
shape_of_dupl_data = _df[_df["rk"]>1].shape[0] |
|
|
|
if shape_of_dupl_data > 0: |
|
logger.info(f"Found {shape_of_dupl_data} data duplicated! Will be dropped") |
|
_idx_to_keep = _df[_df["rk"]==1].index.to_list() |
|
if len(_idx_to_keep)+shape_of_dupl_data != df.shape[0]: |
|
raise AssertionError("Mismatch of data number!") |
|
idx_to_keep = idx_to_keep.intersection(set(_idx_to_keep)) |
|
else: |
|
logger.info(f"No soft-duplicate found in colname {colname}. Continuing") |
|
|
|
del _df |
|
gc.collect() |
|
|
|
logger.info(f"The final data kept is {len(idx_to_keep)} from {df.shape[0]}") |
|
df = df.loc[list(idx_to_keep),:] |
|
|
|
logger.info("Saving dataset cleansed form...") |
|
|
|
|
|
|
|
_override_suffix_identifier = "" |
|
if overwrite_initial_title_data or overwrite_initial_text_data: |
|
_override_suffix_identifier = "_overwritten" |
|
if overwrite_initial_text_data: |
|
_override_suffix_identifier = "_text"+_override_suffix_identifier |
|
if overwrite_initial_title_data: |
|
_override_suffix_identifier = "_title"+_override_suffix_identifier |
|
|
|
_save_file_name = ".".join(raw_data_path.split("/")[-1].split(".")[:-1]) + "_dedup_cleansed" + _override_suffix_identifier + ".csv" |
|
_save_file_name = _save_file_name.replace("_raw", "") |
|
df.to_csv(f"{save_dir}/{_save_file_name}", index=False) |
|
|