
# 爬取的主网站地址
import os
import re
from multiprocessing.pool import Pool

import requests

start_url = 'https://www.kanunu8.com/book2/11138/'

def get_source(url):
    try:
        response = requests.get(url)
        response.encoding = 'gbk'  # 确保编码正确
        return response.text
    except requests.RequestException as e:
        print(f"Error fetching {url}: {e}")
        return ""

def get_article_url(html):
    article_url_list = []
    try:
        article_block = re.findall('正文(.*?)<div class="clear">', html, re.S)[0]
        article_url = re.findall('<a href="(\d*.html)">', article_block, re.S)
        for url in article_url:
            article_url_list.append(start_url + url)
    except IndexError:
        print("Error parsing article URLs")
    return article_url_list

def get_article(html):
    try:
        chapter_name = re.findall('<h1>(.*?)<br>', html, re.S)[0]
        text_block = re.search('<p>(.*?)</p>', html, re.S).group(1)
        text_block = text_block.replace('&nbsp;', ' ').replace('<p>', '').replace('</p>', '')
        return chapter_name, text_block
    except (IndexError, AttributeError):
        print("Error parsing article content")
        return None, None

def save(chapter, article):
    if chapter and article:
        os.makedirs('北欧众神', exist_ok=True)
        with open(os.path.join('北欧众神', chapter + '.txt'), 'w', encoding='utf-8') as f:
            f.write(article)

def query_article(url):
    article_html = get_source(url)
    if article_html:
        chapter_name, article_text = get_article(article_html)
        if chapter_name and article_text:
            save(chapter_name, article_text)

if __name__ == '__main__':
    toc_html = get_source(start_url)
    if toc_html:
        toc_list = get_article_url(toc_html)
        if toc_list:
            pool = Pool(4)
            pool.map(query_article, toc_list)
            pool.close()
            pool.join()
