# -*-coding:utf-8-*-
"""
@project: python-project
@Time: 2023/12/2714:51
@Auth: Loneliness_Burial
@File: 08.爬小说.py
@Motto: 深情不改必坠死海
"""
# 使用网站www.bicuix.com & http://www.365kk.cc/
# 定位方式：xpath
# 书名：//*[@id="info"]/h1/text()
# 作者：//*[@id="info"]/p[1]/text()
# 最后更新时间：//*[@id="info"]/p[3]/text()
# 简介：//*[@id="intro"]/p/text()
# 下一章：//*[@id="wrapper"]/div[5]/div/div[4]/a[4]/@href

import requests
from lxml import etree
import random
import time
import sys


# 获取下一页链接的函数
# def next_url(next_url_element):
#     nxturl = 'http://www.tycqzw.net/57_57672/'
#     # rfind('/') 获取最后一个'/'字符的索引
#     index = next_url_element.rfind('/') + 1
#     nxturl += next_url_element[index:]
#     return nxturl


# http://www.tycqzw.net/57_57672/21597160.html  http://www.tycqzw.net/57_57672/21597161.html
# 数据清洗函数
def clean_data(filename, info):
    """
    :param filename: 原文档名
    :param info: [bookTitle, author, update, introduction]
    """
    print("\n==== 数据清洗开始 ====")
    # 新的文件名
    new_filename = 'new' + filename
    # 打开两个文本文档
    f_old = open(filename, 'r', encoding='utf-8')
    f_new = open(new_filename, 'w', encoding='utf-8')
    # 首先在新的文档中写入书籍信息
    f_new.write('==  《' + info[0] + '》\r\n')  # 标题
    f_new.write('==  ' + info[1] + '\r\n')  # 作者
    f_new.write('==  ' + info[2] + '\r\n')  # 最后更新时间
    f_new.write("=" * 10)
    f_new.write('\r\n')
    f_new.write('==  ' + info[3] + '\r\n')  # 简介
    f_new.write("=" * 10)
    f_new.write('\r\n')
    lines = f_old.readlines()  # 按行读取原文档中的内容
    empty_cnt = 0  # 用于记录连续的空行数
    # 遍历原文档中的每行
    for line in lines:
        if line == '\n':  # 如果当前是空行
            empty_cnt += 1  # 连续空行数+1
            if empty_cnt >= 2:  # 如果连续空行数不少于2
                continue  # 直接读取下一行，当前空行不写入
        else:  # 如果当前不是空行
            empty_cnt = 0  # 连续空行数清零
        if line.startswith("\u3000\u3000"):  # 如果有段首缩进
            line = line[2:]  # 删除段首缩进
            f_new.write(line)  # 写入当前行
        elif line.startswith("第"):  # 如果当前行是章节标题
            f_new.write("\r\n")  # 写入换行
            f_new.write("-" * 20)  # 写入20个'-'
            f_new.write("\r\n")  # 写入换行
            f_new.write(line)  # 写入章节标题
        else:  # 如果当前行是未缩进的普通段落
            f_new.write(line)  # 保持原样写入
    f_old.close()  # 关闭原文档
    f_new.close()  # 关闭新文档


headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36',
    'Cookie': 'Hm_lvt_e3e247ef5fc6ddf680656d5841ad7fe0=1720524034,1720526676; HMACCOUNT=661F62E80A2DA3F0; '
              'clickbids=37363%2C57672%2C29163%2C77021; Hm_lpvt_e3e247ef5fc6ddf680656d5841ad7fe0=1720527458',
    'Host': 'www.tycqzw.net',
    'Connection': 'keep-alive'
}
# 小说主页
main_url = "http://www.tycqzw.net/57_57672/"
# 使用get方法请求网页
main_resp = requests.get(main_url, headers=headers)
# 将网页内容按gbk规范解码为文本形式
main_text = main_resp.content
# print(main_text)
# 将文本内容创建为可解析元素
main_html = etree.HTML(main_text)
# 依次获取书籍的标题、作者、最近更新时间和简介
# main_html.xpath返回的是列表，因此需要加一个[0]来表示列表中的首个元素
# /html/body/div[3]/div[1]/div/div/div[2]/div[1]/h1/text()
# /html/body/div[3]/div[1]/div/div/div[2]/div[1]/div/p[1]/text()
# /html/body/div[3]/div[1]/div/div/div[2]/div[1]/div/p[5]/text()
# /html/body/div[3]/div[1]/div/div/div[2]/div[2]/text()/text()
# 斗破//*[@id="info"]/h1
bookTitle = main_html.xpath('//*[@id="info"]/h1/text()')[0]
author = main_html.xpath('//*[@id="info"]/p[1]/text()')[0]
update = main_html.xpath('//*[@id="info"]/p[3]/text()')[0]
introduction = main_html.xpath('//*[@id="intro"]/p/text()')[0]
# print(bookTitle, author, update, introduction)
# 调试期间仅爬取六个页面
maxPages = 1624
cnt = 0
# 记录上一章节的标题
lastTitle = ''
# 爬取起点
start_url = 'http://www.tycqzw.net/57_57672/21597158.html'
# 爬取终点
end_url = 'http://www.tycqzw.net/57_57672/21599080.html'
# 获取起点和终点的数字部分
start_num = int(start_url.split('/')[-1].split('.')[0])
end_num = int(end_url.split('/')[-1].split('.')[0])

# # 进度条相关
# total_chapters = end_num - start_num + 1
# def print_progress_bar(current, total, bar_length=10):
#     progress = current / total
#     bar = '-' * int(progress * bar_length) + ' ' * (bar_length - int(progress * bar_length))
#     sys.stdout.write(f'\r下载进度 {bar} {int(progress * 100)}%')
#     sys.stdout.flush()


url = start_url
while start_url != end_url:
    cnt += 1  # 记录当前爬取的页面
    # if cnt > maxPages:
    #     break  # 当爬取的页面数超过maxPages时停止
    resp = requests.get(url, headers)
    text = resp.content
    html = etree.HTML(text)
    # //*[@id="wrapper"]/div[5]/div/div[2]/h1
    # 章节定位
    title_list = html.xpath('//*[@class="content_read"]/div/div[2]/h1/text()')[0]
    # print(title)
    # if title_list:
    #     title = title_list[0]  # 提取标题
    # else:
    #     title = 'No Title'
    title = title_list
    # 内容
    contents = html.xpath('//*[@id="content"]/text()')
    # 输出爬取进度信息
    print("cnt: {},title: {},url: {}".format(cnt, title, url))
    # print(contents)
    with open(bookTitle + '.txt', 'a', encoding='utf-8') as f_new:
        if title != lastTitle:  # 章节标题改变
            f_new.write(title + '\n')  # 写入新的章节标题
            lastTitle = title  # 更新章节标题
        for content in contents:
            f_new.write(content)
            f_new.write('\n\n')
        # f_new.close()
    # # 获取"下一章"按钮指向的链接
    # next_url_element = html.xpath('//*[@id="wrapper"]/div[4]/div/div[6]/a[4]/@href')
    # # 递增页面编号
    # start_num += 1
    # url = f'http://www.tycqzw.net/57_57672/{start_num}.html'

    # 获取"下一章"按钮指向的链接
    next_page_num_1 = start_num + 1
    next_page_num_2 = start_num + 2
    next_page_num_4 = start_num + 4
    next_url_1 = f'http://www.tycqzw.net/57_57672/{next_page_num_1}.html'
    next_url_2 = f'http://www.tycqzw.net/57_57672/{next_page_num_2}.html'
    next_url_4 = f'http://www.tycqzw.net/57_57672/{next_page_num_4}.html'

    # 尝试请求下一章的两种可能链接
    resp1 = requests.get(next_url_1, headers=headers)
    if resp1.status_code == 200:
        url = next_url_1
        start_num = next_page_num_1
    else:
        resp2 = requests.get(next_url_2, headers=headers)
        if resp2.status_code == 200:
            url = next_url_2
            start_num = next_page_num_2
        else:
            resp4 = requests.get(next_url_4,headers=headers)
            if resp4.status_code == 200:
                url = next_url_4
                start_num = next_page_num_4
            else:
                print("未找到下一章链接")
                break

    # # 更新进度条并显示当前进度信息
    # print_progress_bar(cnt, total_chapters)

    sleepTime = random.randint(2, 5)
    time.sleep(sleepTime)
    # 传入函数next_url得到下一页链接
    # url = next_url(next_url_element)
    sleepTime = random.randint(2, 5)  # 产生一个2~5之间的随机数
    time.sleep(sleepTime)  # 暂停2~5之间随机的秒数
clean_data(bookTitle + '.txt', [bookTitle, author, update, introduction])
print("爬取完毕")
