# user-agent
# pip install requests bs4 lxml -i https://mirror.baidu.com/pypi/simple/
import os
import re
import time
from typing import TextIO

import requests
from bs4 import BeautifulSoup

header = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36"
}

# TODO 基地址
baseurl = 'https://www.jingwuxs.com'
# TODO 开始地址
nextPageUrl = 'https://www.jingwuxs.com/xiaoshuo/94373053/194605459.html'
# 病弱少女不死于丧尸模拟器
# txt存储目录
filePath = os.path.abspath(os.path.join(os.getcwd(), 'novel'))

# TODO 名称
fileName = '病弱少女不死于丧尸模拟器.txt'

# 小说全url名称
fileNovelPath = os.path.join(filePath, fileName)

if not os.path.exists(filePath):
    os.mkdir(filePath)

# 章节去重列表
titleList = []

nextPageHtml = ''


# 删除特殊字符
def deletetag(text):
    return re.sub(r'[\[\]#\/\\:*\,;\?\"\'<>\|\(\)《》&\^!~=%\{\}@！：。·！￥……（） ]', '', text)


def getNextPageUrl(pageHtml: str):
    """
    TODO 获取下一页的url
    :param pageHtml:
    :return:
    """
    soup = BeautifulSoup(pageHtml, 'lxml')
    return soup.select("#next_url")[0]['href']


def savePage(pageHtml: str, f: TextIO):
    """
    TODO 保存页面数据
    :param pageHtml:
    :param f:
    :return:
    """
    soup = BeautifulSoup(pageHtml, 'lxml')
    # 尝试得到标题
    title = soup.select('#wrapper > article > h1')[0].text
    if title not in titleList:
        f.write('\n\n' + title + '\n\n')
        print(f"当前章节: {title}")
        titleList.append(title)
    # 一行一行获取容器内数据
    textList = soup.select("#booktxt p")
    for text in textList:
        f.write(text.text + '\n')


def tryQuit(text):
    """
    TODO 尝试退出
    :param text:
    :return:
    """
    soup = BeautifulSoup(text, 'lxml')
    t = soup.select("#next_url")[0].text
    if t == '没有了':
        return True
    return False


if __name__ == '__main__':
    novelFirstPage = requests.get(url=nextPageUrl, headers=header)
    novelFirstPage.encoding = "utf-8"  # 手动指定字符编码为utf-8
    nextPageHtml = novelFirstPage.text
    with open(fileNovelPath, 'w', encoding='utf-8') as f:
        savePage(nextPageHtml, f)
        while True:
            # 获取下一页url
            nextPageUrl = getNextPageUrl(nextPageHtml)
            # 请求
            pageHtml = requests.get(url=baseurl + nextPageUrl, headers=header)
            pageHtml.encoding = 'utf-8'
            nextPageHtml = pageHtml.text
            # 保存页面
            savePage(nextPageHtml, f)
            time.sleep(0.4)
            # 尝试退出
            flag = tryQuit(nextPageHtml)
            if flag:
                print("爬虫结束~")
                break
