|
|
|
|
|
import argparse |
|
from pathlib import Path |
|
import re |
|
import shutil |
|
import string |
|
|
|
import pandas as pd |
|
|
|
from project_settings import project_path |
|
|
|
|
|
def get_args(): |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument( |
|
"--data_dir", |
|
default=(project_path / "data2").as_posix(), |
|
type=str |
|
) |
|
parser.add_argument( |
|
"--output_file", |
|
default="metadata.csv", |
|
type=str |
|
) |
|
args = parser.parse_args() |
|
return args |
|
|
|
|
|
class SentenceType(object): |
|
def __init__(self): |
|
self.advertise_keywords = [ |
|
"请访问", |
|
] |
|
self.url_keywords = [ |
|
"www.", |
|
] |
|
|
|
self.pattern_predictor_map = { |
|
"www.(?:.*?).(net|com|cn)": "url" |
|
} |
|
|
|
def contains_advertise(self, text: str): |
|
for k in self.advertise_keywords: |
|
if text.__contains__(k): |
|
return "advertise" |
|
return None |
|
|
|
def contains_url(self, text: str): |
|
for k in self.url_keywords: |
|
if text.__contains__(k): |
|
return "url" |
|
return None |
|
|
|
def pattern_predict(self, text: str): |
|
for k, v in self.pattern_predictor_map.items(): |
|
if re.search(k, text, flags=re.IGNORECASE) is not None: |
|
return v |
|
return None |
|
|
|
def predict(self, text: str): |
|
result = self.pattern_predict(text) |
|
if result is not None: |
|
return result |
|
|
|
result = self.contains_advertise(text) |
|
if result is not None: |
|
return result |
|
return None |
|
|
|
|
|
class ContentPreprocess(object): |
|
def __init__(self): |
|
self.s_type = SentenceType() |
|
|
|
self.punctuation_map = { |
|
",": ",", |
|
"。": ".", |
|
"、": ",", |
|
"?": "?", |
|
":": ":", |
|
"{": "{", |
|
"}": "}", |
|
"(": "(", |
|
")": ")", |
|
"【": "(", |
|
"】": ")", |
|
"「": "\"", |
|
"」": "\"", |
|
"『": "\"", |
|
"』": "\"", |
|
"《": "(", |
|
"》": ")", |
|
"”": "\"", |
|
"“": "\"", |
|
"‘": "\'", |
|
"’": "\'", |
|
"=": "", |
|
|
|
"": "", |
|
" ": "", |
|
" ": "", |
|
"\t": "", |
|
"\n": "", |
|
"\r": "", |
|
"\v": "", |
|
"\f": "", |
|
|
|
} |
|
|
|
self.string_map = { |
|
"^_^": "", |
|
"◆": "", |
|
"☆": "", |
|
"...": "…", |
|
|
|
"()": "", |
|
"{}": "", |
|
"『』": "", |
|
"《》": "", |
|
|
|
"<": "<", |
|
">": ">", |
|
"&": " ", |
|
" ": " ", |
|
"quot;": "\"", |
|
"nbsp;": " ", |
|
"amp;": "", |
|
"✘": "✘", |
|
|
|
"龙腾小说网ltxs520.com": "", |
|
"龙腾小说吧www.ltxsba.net": "", |
|
"龙腾小说网 ltxs520.com": "", |
|
"龙腾小说 ltxs520.com": "", |
|
"龙腾小说ltxs520.com": "", |
|
|
|
"(..)免费": "", |
|
|
|
"\"\"": "\"\n\"", |
|
|
|
} |
|
|
|
self.pattern_map = { |
|
r"</(?:.*?)>": "", |
|
r"⌒{4,}": "###", |
|
r"#{4,}": "###" |
|
} |
|
|
|
self.queue = list() |
|
self.queue_length = 0 |
|
self.max_seq_length = 128 |
|
|
|
self.output_list = list() |
|
|
|
def char_b2q(self, uchar): |
|
"""单个字符 半角转全角""" |
|
inside_code = ord(uchar) |
|
if inside_code < 0x0020 or inside_code > 0x7e: |
|
return uchar |
|
if inside_code == 0x0020: |
|
inside_code = 0x3000 |
|
else: |
|
inside_code += 0xfee0 |
|
return chr(inside_code) |
|
|
|
def char_q2b(self, uchar): |
|
"""单个字符 全角转半角""" |
|
inside_code = ord(uchar) |
|
if inside_code == 0x3000: |
|
inside_code = 0x0020 |
|
else: |
|
inside_code -= 0xfee0 |
|
if inside_code < 0x0020 or inside_code > 0x7e: |
|
return uchar |
|
return chr(inside_code) |
|
|
|
def q2b(self, text: str): |
|
"""全角转半角""" |
|
result = "" |
|
for c in text: |
|
c = self.char_q2b(c) |
|
result += c |
|
return result |
|
|
|
def remove_space(self, text: str): |
|
text = text.replace(" ", "") |
|
return text |
|
|
|
def replace_punctuation(self, text: str): |
|
text_ = text |
|
for k, v in self.punctuation_map.items(): |
|
text_ = text_.replace(k, v) |
|
|
|
if text_ != text: |
|
text_ = self.replace_punctuation(text_) |
|
return text_ |
|
|
|
def replace_string(self, text: str): |
|
for k, v in self.string_map.items(): |
|
text = text.replace(k, v) |
|
return text |
|
|
|
def replace_by_pattern(self, text: str): |
|
for k, v in self.pattern_map.items(): |
|
text = re.sub(k, v, text) |
|
return text |
|
|
|
def process(self, text: str): |
|
text = self.q2b(text) |
|
text = self.replace_punctuation(text) |
|
text = self.replace_by_pattern(text) |
|
text = self.replace_string(text) |
|
|
|
self.in_queue(text) |
|
return text |
|
|
|
def in_queue(self, text: str): |
|
if self.s_type.predict(text) is not None: |
|
return |
|
if len(text) == 0: |
|
return |
|
|
|
text_splits = text.split(".") |
|
num_splits = len(text_splits) |
|
for idx, text_split in enumerate(text_splits): |
|
char_num = len(text_split) |
|
if self.queue_length + char_num > self.max_seq_length: |
|
text_ = "".join(self.queue) |
|
self.output_list.append(text_) |
|
|
|
self.queue = list() |
|
self.queue_length = 0 |
|
|
|
if idx == 0 and len(self.queue) != 0: |
|
self.queue.append("\n") |
|
|
|
if num_splits == 1: |
|
self.queue.append(text_split) |
|
self.queue_length += char_num |
|
else: |
|
if idx + 1 != num_splits: |
|
self.queue.append("{}.".format(text_split)) |
|
self.queue_length += char_num |
|
|
|
else: |
|
self.queue.append(text_split) |
|
self.queue_length += char_num |
|
|
|
return |
|
|
|
|
|
def main(): |
|
args = get_args() |
|
|
|
data_dir = Path(args.data_dir) |
|
filename_list = data_dir.glob("*/*.txt") |
|
|
|
s_type = SentenceType() |
|
c_preprocess = ContentPreprocess() |
|
|
|
result = list() |
|
for filename in filename_list: |
|
name = filename.stem |
|
splits = name.split("_") |
|
idx = splits[-1] |
|
novel_name = "_".join(splits[:-1]) |
|
|
|
to_filename = Path(filename.as_posix().replace("/data2/", "/data3/")) |
|
to_filename.parent.mkdir(parents=True, exist_ok=True) |
|
|
|
with open(filename.as_posix(), "r", encoding="utf-8") as fin, \ |
|
open(to_filename.as_posix(), "w", encoding="utf-8") as fout: |
|
for row in fin: |
|
row = str(row).strip() |
|
|
|
c_preprocess.process(row) |
|
|
|
for row_ in c_preprocess.output_list: |
|
|
|
|
|
|
|
|
|
fout.write("{}\n".format(row_)) |
|
|
|
c_preprocess.output_list = list() |
|
|
|
return |
|
|
|
|
|
if __name__ == '__main__': |
|
main() |
|
|