# python练习
# 姓 名：韩立
# 开发时间：2024/07/26 21:22
import random
import re
from re import fullmatch
import time
import numpy as np
from lxml import etree
import requests


# 获取一个页面的数据，封装i进context、href_next中
# context：正文和标题
# href_next：下一页的链接
# 为了后续写进文本的时候能改变章节标题的大小，这里不再进行每段的合并，直接输出列表（标题和每段内容）
def getContextAndHref_next(url, head):
    repo = requests.get(url, headers=head)
    repo.encoding = "utf-8"

    # 获取页面数据
    et = etree.HTML(repo.text)
    # 数据清洗
    # 1  获取正文（含章节标题，后续会做优化。更改标题的字号）
    # 使用Xpath切割数据
    context = et.xpath("/html/body/div[4]/div/div/div[2]/div[3]//text()")
    # 删除固定格式的首尾空白换行符、空格等
    context.pop(0)
    context.pop(-1)

    # 2  获取下一页/章的链接
    # 获取后缀（网页中的数据给的就是后缀，不是完整的链接。需要拼接字符串）
    href_next = et.xpath("/html/body/div[4]/div/div/div[2]/div[4]/a[3]/@href")
    # 网址拼接 获取下一个页面网址
    # 列表数据转化，然后拼接
    href_next = "https://www.22shuquge.net" + href_next[0]

    return {"context": context, "href_next": href_next}

def getBegin_href(page_url, head):
    repo = requests.get(page_url, headers=head)
    repo.encoding = "utf-8"

    # 获取页面数据
    et = etree.HTML(repo.text)
    begin_url = et.xpath("/html/body/div[4]/div[2]/div[1]/div[2]/ul/li[1]/a/@href")
    url= "https://www.22shuquge.net" + begin_url[0]
    return url

def write_page(context):
    global file_save_name
    with open(file_save_name, mode="a", encoding="utf-8") as f:
        for item in context:
            # print(item)
            # item = str(item)
            # print(type(item))
            # if fullmatch(r'第1章.{0,20} ', item):
            #     #第.{0,20}章.{0,100}
            #     print("yes!!")
            item = "\t" + item + "\n"
            f.write(item)
        f.write("\n\n")



def head_random():
    heads = {
        "0": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
             "Chrome/122.0.0.0 Safari/537.36 Edg/122.0.0.0 ",
        "1": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 "
             "Safari/537.36 Core/1.94.265.400 QQBrowser/12.7.5765.400 ",
        "2": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 "
             "Safari/537.36 115Browser/25.0.6.5 "
    }

    s = str(np.random.randint(len(heads)))
    head_get = heads.get(s)
    head = {}
    head.setdefault("User-Agent", head_get)
    return head


if __name__ == '__main__':
    # 大宣武圣.txt
    file_save_name = "大宣武圣.txt"
    # 爬取页数
    page = 5
    page_url = "https://www.22shuquge.net/b/568/568620/"
    begin_url = getBegin_href(page_url, head=head_random())
    print("begin_url: " + begin_url)
    # url = "https://www.22shuquge.net/b/568/568620/142488950.html"
    url_original = "https://www.22shuquge.net/b/568/568620/"
    for i in range(page):

        # print(head)
        context_and_href_next = getContextAndHref_next(begin_url, head = head_random())
        context = context_and_href_next.get("context", "No value assigned")
        href_next = context_and_href_next.get("href_next", "No value assigned")
        write_page(context)
        print("第" + str(i+1) + "页爬取成功！！！！———地址为：" + begin_url)
        if href_next[27:-1] == page_url[27:-1]:
            break
        begin_url = href_next
        # 休眠0-2秒
        random_uniform = random.uniform(0, 2)
        time.sleep(random_uniform)
