"""
    请求地址：https://www.bqka.cc/top/
    1.发起请求
    2.获取玄幻排行榜
    3.获取目录链接
    4.获取每一章内容
    5.保存数据
"""

import re
import os
import time
import requests
from lxml import etree
from multiprocessing import Pool

# 请求函数
def getHTML(url):
    header = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
        "Alt-Used": "www.bqka.cc",
        "Connection": "keep-alive",
        "Cookie": "Hm_lvt_b147be33903fb4b5cd5f16843ab81a1d=1717460174; Hm_lpvt_b147be33903fb4b5cd5f16843ab81a1d=1717461621",
        "Host": "www.bqka.cc",
        "Priority": "u=1",
        "Referer": "https://www.bqka.cc/xuanhuan/",
        "Sec-Fetch-Dest": "document",
        "Sec-Fetch-Mode": "navigate",
        "Sec-Fetch-Site": "same-origin",
        "Sec-Fetch-User": "?1",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:126.0) Gecko/20100101 Firefox/126.0"
    }

    try:
        response = requests.get(url=url, headers=header)
        response.raise_for_status()
        response.encoding = response.apparent_encoding
        return response.text
    except:
        print('爬取失败')
        return None


# 获取排行榜小说
def getFictionName():
    fiction_id = []
    fiction_link = []
    response = getHTML(url='https://www.bqka.cc/top/')
    if response:
        response = etree.HTML(response)
        node_list = response.xpath('//div[@class="wrap rank"]/div[1]//li')
        for link in node_list:
            fiction_link.append('https://www.bqka.cc' + link.xpath('./a/@href')[0])
            fiction_id.append(re.findall('/book/(.*?)/', link.xpath('./a/@href')[0])[0])
        return fiction_id, fiction_link
    else:
        print('排行榜解析失败')
        return None

# 获取小说目录
def getFictionos(fiction):
    print(fiction)
    book_list = []
    response = getHTML(url=fiction[1])
    response = etree.HTML(response)
    # 小说名称
    fictionName = response.xpath('//div[@class="info"]/h1/text()')[0]
    # 作者
    author = response.xpath('//div[@class="small"]/span[1]/text()')[0]
    # 获取总章节数
    hrefCount = response.xpath('//div[@class="listmain"]//dd/a/@href')[-1]
    page = re.findall(f'/book/{fiction[0]}/(.*?).html', hrefCount)
    for page in range(1, int(page[0]) + 1):
        response = getHTML(url= f'https://www.bqka.cc/book/{fiction[0]}/{page}.html')
        title, text = getParse(response)
        saveText(fictionName, author, title, text)

# 解析内容函数
def getParse(content):
    if content:
        content = etree.HTML(content)
        # 章节标题
        title = content.xpath('//div[@class="content"]/h1/text()')[0]
        # 章节内容
        content_text = content.xpath('//div[@class="content"]//div[@id="chaptercontent"]/text()')
        return title, content_text
    else:
        print('内容解析失败')

# 保存函数
def saveText(fictionName, author, title, text):
    if not os.path.exists(f'./小说/{fictionName}'):
        os.mkdir(f'./小说/{fictionName}')
        with open(f'./小说/{fictionName}/{title}.txt', 'w', encoding='utf-8') as fp:
            fp.write(title + '\n' + author + '\n')
            fp.write('\n'.join(text))
    else:
        with open(f'./小说/{fictionName}/{title}.txt', 'w', encoding='utf-8') as fp:
            fp.write(title + '\n' + author + '\n')
            fp.write('\n'.join(text))

if __name__ == '__main__':
    fiction_id, fiction_link = getFictionName()
    fiction = list(zip(fiction_id, fiction_link))
    pool = Pool(5)
    pool.map(getFictionos, fiction)
    pool.close()
    pool.join()