|
import concurrent.futures as fut |
|
import pathlib |
|
import ftfy |
|
import orjson |
|
import re |
|
import psutil |
|
from bs4 import BeautifulSoup, Comment |
|
import markdownify |
|
from text_toolkit import normalize_puncts, clean_newlines, merge_consecutive_lines |
|
|
|
|
|
class md_nolinks(markdownify.MarkdownConverter): |
|
def convert_a(self, _, text, __): |
|
_, _, text = markdownify.chomp(text) |
|
if not text: |
|
return "" |
|
return text |
|
|
|
def convert_img(self, el, text, convert_as_inline): |
|
return "" |
|
|
|
|
|
md = md_nolinks() |
|
|
|
|
|
def blackbox(soup: pathlib.Path, filename: pathlib.Path): |
|
p = psutil.Process() |
|
p.cpu_affinity([i for i in range(0, 64)]) |
|
path = pathlib.Path(soup) |
|
data_bytes = path.read_bytes() |
|
try: |
|
utf8 = data_bytes.decode("utf-8") |
|
utf8, explain = ftfy.fix_and_explain(utf8) |
|
except UnicodeDecodeError: |
|
|
|
magic_data, magic_explain = ftfy.guess_bytes(data_bytes) |
|
|
|
utf8, explain = ftfy.fix_and_explain(magic_data) |
|
|
|
soup = BeautifulSoup(utf8, "lxml") |
|
pre = soup.find("pre") |
|
[i.decompose() for i in soup.find_all("span", attrs={"class": "pagenum"})] |
|
[i.extract() for i in soup.findAll(text=lambda text: isinstance(text, Comment))] |
|
composed_content = [] |
|
if pre: |
|
|
|
txt: str = pre.get_text() |
|
|
|
if "gutenberg e" in txt.lower(): |
|
if not pre.parent: |
|
raise Exception |
|
has_pre = False |
|
for child in pre.next_siblings: |
|
if isinstance(child, str): |
|
composed_content.append(child.lstrip()) |
|
|
|
if child.name == "pre": |
|
|
|
has_pre = True |
|
break |
|
if child.name == "table": |
|
continue |
|
if child.name is None: |
|
continue |
|
child_filt = child.get_text().lower() |
|
if ( |
|
child.name in ["div", "p"] |
|
and "transcriber" in child_filt |
|
and "note" in child_filt |
|
): |
|
continue |
|
if "pagenum" in child.get("class", ""): |
|
continue |
|
composed_content.append(child) |
|
else: |
|
|
|
look_ahead = 5 |
|
sibs = [] |
|
rule = None |
|
for sibling in pre.next_siblings: |
|
if look_ahead > 0: |
|
if sibling.name == "hr": |
|
rule = sibling |
|
break |
|
else: |
|
sibs.append(sibling) |
|
if not rule: |
|
raise Exception |
|
else: |
|
pass |
|
for t_block in rule.next_siblings: |
|
if isinstance(t_block, str): |
|
composed_content.append(t_block.lstrip(" ")) |
|
continue |
|
if not t_block.get_text().strip(): |
|
continue |
|
|
|
if t_block.name == "hr" and "full" in t_block.get("class", ""): |
|
break |
|
gt = t_block.get_text().lower() |
|
if t_block.name == "pre" and "gutenberg" in gt: |
|
break |
|
elif t_block.name == "p" and "end of" in gt and "gutenberg" in gt: |
|
break |
|
else: |
|
composed_content.append(t_block) |
|
|
|
else: |
|
print("no pre", filename) |
|
|
|
body = soup.find("body") |
|
if not body: |
|
raise Exception("Invisible man?") |
|
|
|
pre_ctx = [] |
|
ctx = [] |
|
post_ctx = [] |
|
mode = 0 |
|
for children in body: |
|
if isinstance(children, str): |
|
continue |
|
if isinstance(children, type(None)): |
|
continue |
|
gt = children.get_text() |
|
if children.name == "table": |
|
children.decompose() |
|
if ( |
|
(children.name == "div" or children.name == "p") |
|
and "start of" in gt.lower() |
|
and "gutenberg" in gt.lower() |
|
): |
|
mode = 0.5 |
|
pre_ctx.append(children) |
|
if ( |
|
(children.name == "div" or children.name == "p") |
|
and "end of" in gt.lower() |
|
and "gutenberg" in gt.lower() |
|
and mode == 1 |
|
): |
|
mode = 2 |
|
post_ctx.append(children) |
|
if mode == 0: |
|
pre_ctx.append(children) |
|
elif mode == 0.5: |
|
mode = 1 |
|
pre_ctx.append(children) |
|
elif mode == 1: |
|
ctx.append(children) |
|
elif mode == 2: |
|
post_ctx.append(children) |
|
z_ctx = [] |
|
for pp in ctx: |
|
if pp.name is None: |
|
continue |
|
z_ctx.append(pp) |
|
composed_content = z_ctx |
|
pure = [] |
|
for content in composed_content: |
|
if isinstance(content, str): |
|
pure.append(content) |
|
continue |
|
if not content.name: |
|
continue |
|
if "toc" in content.get("class", "") and content.name in ["ul", "ol"]: |
|
continue |
|
pure.append(content) |
|
pp = normalize_puncts( |
|
"\n".join( |
|
[ |
|
md.convert_soup(soup_content) |
|
if not isinstance(soup_content, str) |
|
else soup_content |
|
for soup_content in pure |
|
] |
|
) |
|
) |
|
pp = clean_newlines(pp) |
|
pp = merge_consecutive_lines(pp) |
|
pp = clean_newlines(pp) |
|
pathlib.Path(filename).write_text(pp, encoding="utf-8") |
|
|
|
return pp |
|
|
|
|
|
pathlib.Path("gutenberg_processed").mkdir(exist_ok=True) |
|
|
|
|
|
if __name__ == "__main__": |
|
with fut.ProcessPoolExecutor(max_workers=64) as pool: |
|
futures = [] |
|
for gutenbook in orjson.loads(pathlib.Path("final.json").read_bytes()): |
|
path, f_type = gutenbook |
|
if f_type == "html": |
|
|
|
path = pathlib.Path(path) |
|
list_html = list(path.glob("*.htm")) |
|
if len(list_html) > 1: |
|
print(list_html) |
|
list_html = list_html[0] |
|
|
|
futures.append( |
|
pool.submit( |
|
blackbox, |
|
list_html, |
|
str( |
|
pathlib.Path("gutenberg_processed") |
|
/ list_html.with_suffix(".txt").name |
|
), |
|
) |
|
) |
|
print("Waiting for the future to arrive...") |
|
fut.wait(futures) |
|
|