|
|
|
|
|
import argparse |
|
from pathlib import Path |
|
import re |
|
from typing import Set |
|
|
|
import requests |
|
from tqdm import tqdm |
|
|
|
from project_settings import project_path |
|
|
|
|
|
def get_args(): |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument( |
|
"--data_dir", |
|
default=(project_path / "data/yazhou").as_posix(), |
|
type=str |
|
) |
|
args = parser.parse_args() |
|
return args |
|
|
|
|
|
page_list_pattern = r""" |
|
<div class="articleList"><p><span>(?:.*?)</span><a class="img-center" target="_blank" href="(.*?)">(.*?)</a></p></div> |
|
""" |
|
page_list_pattern = str(page_list_pattern).strip() |
|
|
|
|
|
content_pattern = r""" |
|
<hr>(.*?)<div class="hot-search bottom"> |
|
""" |
|
content_pattern = str(content_pattern).strip() |
|
|
|
|
|
paragraph_pattern = r""" |
|
<(?:.*?)> |
|
""" |
|
paragraph_pattern = str(paragraph_pattern).strip() |
|
|
|
|
|
def parse_page_list(text: str): |
|
matches = re.findall(page_list_pattern, text, flags=re.DOTALL) |
|
return matches |
|
|
|
|
|
def parse_content(text: str): |
|
match = re.search(content_pattern, text, flags=re.DOTALL) |
|
return match |
|
|
|
|
|
def parse_paragraph(text: str): |
|
text = text.replace("<br>", "\n") |
|
matches = re.sub(paragraph_pattern, "", text, flags=re.DOTALL) |
|
return matches |
|
|
|
|
|
def main(): |
|
args = get_args() |
|
|
|
data_dir = Path(args.data_dir) |
|
data_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
finished_idx: Set[int] = set() |
|
for filename in data_dir.glob("*.txt"): |
|
idx = filename.stem.split("_")[-1] |
|
idx = int(idx) |
|
finished_idx.add(idx) |
|
|
|
print("finished idx: {}".format(len(finished_idx))) |
|
|
|
for page_idx in tqdm(range(1, 1000)): |
|
url = "https://yazhouse8.com/article.php?page={}".format(page_idx) |
|
resp = requests.get(url) |
|
|
|
matches = parse_page_list(resp.text) |
|
for match in matches: |
|
url, name = match |
|
idx = url.split("/")[-1] |
|
idx = int(idx.split(".")[0]) |
|
url = "https://yazhouse8.com/{}".format(url) |
|
|
|
if idx in finished_idx: |
|
continue |
|
finished_idx.add(idx) |
|
|
|
resp = requests.get(url) |
|
|
|
match = parse_content(resp.text) |
|
text = parse_paragraph(match.group(0)) |
|
|
|
text = re.sub(r"[\u0020]{4,}", "", text) |
|
text = re.sub(r"[\t]", "", text) |
|
text = re.sub(r" ", "", text) |
|
text = re.sub(r"\n\n", "\n", text) |
|
text = re.sub(r"&", " ", text) |
|
text = re.sub(r" ", " ", text) |
|
text = re.sub(r"quot;", "\"", text) |
|
text = re.sub(r"nbsp;", " ", text) |
|
text = re.sub(r"amp;", " ", text) |
|
text = re.sub(r"✘", "✘", text) |
|
text = re.sub(r"lt;", "<", text) |
|
text = re.sub(r"gt;", ">", text) |
|
text = re.sub(r"⌒{4,}", "⌒⌒⌒", text) |
|
text = re.sub(r"#{4,}", "###", text) |
|
|
|
name = str(name).replace("|", "_") |
|
name = str(name).replace("\\", "_") |
|
name = str(name).replace("/", "_") |
|
name = str(name).replace(":", "_") |
|
name = str(name).replace("~", "_") |
|
name = str(name).replace("?", "_") |
|
name = str(name).replace("~", "_") |
|
name = str(name).replace("*", "") |
|
name = str(name).replace("\"", "") |
|
name = str(name).replace("<", "") |
|
name = str(name).replace(">", "") |
|
name = re.sub(r"&", " ", name) |
|
name = re.sub(r" ", " ", name) |
|
name = re.sub(r"quot;", "\"", name) |
|
name = re.sub(r"nbsp;", " ", name) |
|
name = re.sub(r"amp;", " ", name) |
|
name = re.sub(r"✘", "✘", name) |
|
name = re.sub(r"lt;", "<", name) |
|
name = re.sub(r"gt;", ">", name) |
|
name = str(name).replace(" ", "") |
|
name = str(name).replace("#", "") |
|
|
|
with open(data_dir / "{}_{}.txt".format(name, idx), "w", encoding="utf-8") as f: |
|
f.write(text) |
|
|
|
return |
|
|
|
|
|
if __name__ == '__main__': |
|
main() |
|
|