import os
from curl_cffi import requests
from util.useragent import get_headers_init_user_agent
import re
from util.logger import get_logger
logger = get_logger("笔趣阁小说.log")


try:
    logger.info("程序开始执行")
    #输入小说详情页
    url = 'https://www.beqege.cc/1076/'
    headers = {
        'User-Agent':get_headers_init_user_agent(),
    }

    response = requests.get(url, headers=headers, impersonate="chrome100")
    if response.status_code == 200:
        info_title = re.search(r'<meta property="og:novel:book_name" content="(.*?)"/>', response.text).group(1)
        info_author = re.search(r'<meta property="og:novel:author" content="(.*?)"/>', response.text).group(1)
        info_category = re.search(r'<meta property="og:novel:category" content="(.*?)"/>', response.text).group(1)
        # 获取章节链接和标题
        logger.debug(f"开始爬取 {info_title}-{info_category}-{info_author} 小说章节链接和标题")
        items = re.findall(r'<dd><a href="(.*?)">(.*?)</a></dd>', response.text)
        if not os.path.exists(f"static/笔趣阁/{info_title}-{info_category}-{info_author}/"):
            os.makedirs(f"static/笔趣阁/{info_title}-{info_category}-{info_author}/")
        for item in items:
            detail_url = f'https://www.beqege.cc{item[0]}'
            detail_title = item[1]
            logger.debug(f"开始保存 {detail_title}.txt")
            detail_response = requests.get(detail_url, headers=headers, impersonate="chrome100")
            detail_content = re.search(r'<div id="content">(.*?)</div>', detail_response.text, re.S).group(1)
            detail_lines = re.findall(r'<p>(.*?)</p>', detail_response.text, re.S)
            with open(f'static/笔趣阁/{info_title}-{info_category}-{info_author}/{detail_title}.txt', 'w', encoding='utf-8') as f:
                for line in detail_lines:
                    f.write(line+'\n')
            logger.debug(f"保存 {detail_title}.txt 完成")
        logger.debug(f"爬取 {info_title}-{info_category}-{info_author} 小说章节链接和标题结束")
except Exception as e:
    logger.error(e)

logger.info("程序执行完毕")
