import requests
from lxml import etree
from util.logger import get_logger

logger = get_logger('小说.log', __file__)

url = "https://www.wenku8.net/novel/0/5/index.htm"

headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36',
    'referer': 'https://www.wenku8.net/book/5.htm',
    'cookie': 'Hm_lvt_b74ae3cad0d17bb613b120c400dcef59=1762344899; Hm_lpvt_b74ae3cad0d17bb613b120c400dcef59=1762344935; Hm_lvt_d72896ddbf8d27c750e3b365ea2fc902=1762344421; HMACCOUNT=075CB1840AAE9424; _clck=swifr7%5E2%5Eg0r%5E0%5E2135; cf_clearance=yMnWnJZKYb2NhXIwYPqUFguzuMHzRaqA.H9H.BzRgos-1762344424-1.2.1.1-UMYAIBA0AtHQl4PWuKGH7tJi8OVwEArcIdDPovGeq0bZUdSbvK0G6hdzXd0Dfq8_5urKd_qASiPSjSveYV7QPp3v0GVmMOSMaBfISK54sJ7Z3Ny0tauDMyU5uao4aTrXnWl_k8TlCSve31SppSHz8vIbHlsrZ0rtJawNxHUJUUe4rDQDlVTQg4laZKd2z9rxF0.Hfw70P3ojg9tMNGfwC9_V0oy4ljhcrItAUlR5uAI; PHPSESSID=758763d4cb8114328609195857a4ed1d; jieqiUserInfo=jieqiUserId%3D1603723%2CjieqiUserName%3D842411430%2CjieqiUserGroup%3D3%2CjieqiUserVip%3D0%2CjieqiUserName_un%3D842411430%2CjieqiUserHonor_un%3D%26%23x65B0%3B%26%23x624B%3B%26%23x4E0A%3B%26%23x8DEF%3B%2CjieqiUserGroupName_un%3D%26%23x666E%3B%26%23x901A%3B%26%23x4F1A%3B%26%23x5458%3B%2CjieqiUserLogin%3D1762344436; jieqiVisitInfo=jieqiUserLogin%3D1762344436%2CjieqiUserId%3D1603723; jieqiVisitId=article_articleviews%3D5%2Cjieqi_article_reviews_views%3D302822; jieqiVisitTime=jieqiArticleReviewTime%3D1762344804; Hm_lvt_acfbfe93830e0272a88e1cc73d4d6d0f=1762344896; Hm_lpvt_d72896ddbf8d27c750e3b365ea2fc902=1762344936; Hm_lpvt_acfbfe93830e0272a88e1cc73d4d6d0f=1762344936; _clsk=dnxhax%5E1762344936650%5E22%5E1%5Ek.clarity.ms%2Fcollect'
}


response = requests.get(url, headers=headers)
# print(response.content.decode('gbk'))
datas = response.content.decode('gbk')
tree = etree.HTML(datas)
word_urls = tree.xpath('//table[@class="css"]/tr/td[@class="ccss"]/a/@href')



for word_url in word_urls:
    detail_urls = f'https://www.wenku8.net/novel/0/5/{word_url}'
    logger.info(detail_urls)
    if detail_urls:
        detail_response = requests.get(detail_urls, headers=headers)
        detail_datas = detail_response.content.decode('gbk')
        detail_tree = etree.HTML(detail_datas)
        title = detail_tree.xpath('//div[@id="title"][1]/text()')
        word = detail_tree.xpath('//div[@id="contentmain"]/div[@id="content"]/text()')

        if word:  # 确保有内容
            full_text = ''.join(word)  # 拼接列表为完整字符串
            full_text = full_text.replace('\xa0', ' ')

            # 写入文件，指定encoding='utf-8'避免乱码
            with open(f'static/轻小说/{title}.txt', 'w', encoding='utf-8') as f:
                f.write(full_text)

            logger.info(f"已保存：{title}")



