from typing import List, Union, Any
from urllib.parse import urljoin
from xml import etree
from lxml import html
import requests

url = 'https://www.xbiqu6.com/books/4783423/'
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}


# 进入小说全部目录链接
def get_novel_catalog(url):
    response = requests.get(url, headers=headers)
    response.encoding = 'utf-8'
    if response.status_code == 200:
        selector = html.fromstring(response.text)
        catalog_url = selector.xpath('//a[@class="btn-tobtm"]/@href')[0]
        return catalog_url
    else:
        print('请求失败')
        return None

#获取每一页的目录链接
def get_next_page(catalog_url):
    page_url = [catalog_url]
    indices = ["index_" + str(i) + ".html" for i in range(2, 33) ]
    for index in indices:
        page_url.append(urljoin(catalog_url, index))
    return page_url



# 获取小说章节链接
def get_novel_chapter(page_url):
    response = requests.get(page_url, headers=headers)
    response.encoding = 'utf-8'
    xpath = '//div[2]/ul/li[*]/a/@href'
    if response.status_code == 200:
        selector = html.fromstring(response.text)
        chapter_urls = selector.xpath(xpath)
        chapter_url = []
        for iterm in chapter_urls:
            chapter_url.append(urljoin(page_url, iterm))
        return chapter_url
    else:
        print('请求失败')
        return None

# 获取小说章节内容
def get_novel_content(chapter_url):
    response = requests.get(chapter_url, headers=headers)
    response.encoding = 'utf-8'
    if response.status_code == 200:
        selector = html.fromstring(response.text)
        content = selector.xpath('//div[@class="content"]/p/text()')
        title = selector.xpath('//h1[@class="title"]/text()')[0]
        return content, title
    else:
        print('请求失败')
        return None



if __name__ == '__main__':
    catalog_url = get_novel_catalog(url)
    page_urls = get_next_page(catalog_url)
    # print(page_urls)
    chapter_urls = []
    for page_url in page_urls:
        chapter_urls.extend(get_novel_chapter(page_url))
    # print(chapter_urls)

    novel_content = {}
    for chapter_url in chapter_urls:
        content, title = get_novel_content(chapter_url)
        novel_content[title] = content
        with open(title+'.txt', 'w', encoding='utf-8') as f:
            f.write('\n'.join(content))  # 保存章节内容到文件
        print(title, '保存成功')