# 爬取数据
# 怎么发送请求
# pip install requests
# pip install lxml 剔除html网页中标签 这个工具需要结合 XPath Helper
import os
import re

import requests
from lxml import etree

# 初始化文件
os.remove('道诡异仙.txt')

# 发送给谁
url = 'https://www.paomov.com/13/13306/14619074.html'


# 处理空格的方法
def remove_space(text):
    text = text.replace(' ', '')
    return re.sub(r'\s|\xa0', '', text)


# 增加伪装信息
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0',
}


while True:
    # 发送请求
    response = requests.get(url, headers=headers)
    #  设置响应的编码
    response.encoding = 'utf-8'
    # 响应消息
    # print(response.text)

    # 处理响应数据
    e = etree.HTML(response.text)
    mulvUrl = e.xpath('//div[@class="bottem2"]/a[2]/@href', default='', smart_strings=False)[0].replace('//', 'https://')
    # 章节内容
    contentDidNot = e.xpath('//div[@id="content"]/text()', default='', smart_strings=False)
    content = [remove_space(text) for text in contentDidNot]
    # 章节标题
    title = e.xpath('//h1/text()', default='', smart_strings=False)
    # 获取下一个章节的url地址
    url = e.xpath('//div[@class="bottem2"]/a[3]/@href', default='', smart_strings=False)[0].replace('//', 'https://')
    print(title, end='\t\t\t\t\t\t')
    # print(content)
    print(url)
    # 保存
    with open('道诡异仙.txt', 'a', encoding='utf-8') as f:
        f.write('\n' + title[0] + '\n' + '\n'.join(content))

    # 爬取文章完成
    if url == mulvUrl:
        break
