# !/usr/bin/python
# -*- coding: utf-8 -*-
"""
@FileName:  re_DouLuo.py
@Time    :  2021/10/19 13:03
@Author  :  Alan_1999
@Version :  1.0
@License :  (C)Copyright 2021-2022
@Desc    :  数据来源：https://www.nitianxieshen.com/douluodalu1/
            全书总计分为688章，第一章地址为/29108.html，终章为29795.html
"""
import re, requests, json, time
import sys, os
from lxml import etree


def mkdir(name):  # 定义创建文件夹函数，用于创建存储结果的文件夹
    dir = os.getcwd() + "\\" + name + "\\"
    if not os.path.exists(dir):
        os.makedirs(dir)
        print(f"创建《{name}》文件夹成功!!!\n")
    print(f"已存在《{name}》文件夹，爬虫继续中>>>>>>>>>")


def write_text(book_name, chap_name, text, count):
    print(f">>>>> 正在写入 {chap_name} 章节 >>>>>")
    dir = os.getcwd() + "\\" + book_name + "\\" + str(count) + "-" + chap_name + ".txt"
    if os.path.exists(dir):
        os.remove(dir)
    chap_file = open(dir, "w")
    for item in text[1:]:
        print(item, file=chap_file)
    chap_file.close()
    print(f">>>>>{count}-{chap_name}写入完成，即将开始下一章节 >>>>>")


if __name__ == '__main__':
    header = {"Host": "www.nitianxieshen.com",
              "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:93.0) Gecko/20100101 Firefox/93.0",
              "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
              "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
              "Accept-Encoding": "gzip, deflate, br",
              "Connection": "keep-alive",
              "Referer": "https://www.nitianxieshen.com/douluodalu1/29109.html"
              }
    calo_html = "https://www.nitianxieshen.com/douluodalu1/"
    sc_calo_html = requests.get(calo_html, headers=header).text.encode("latin1").decode("utf-8")
    # print(sc_calo_html)       # 查看获取到的目录源码

    xp_calo_html = etree.HTML(sc_calo_html)
    book_name = [item.text for item in xp_calo_html.xpath("/html/body/div[1]/div[2]/div[1]/div[1]/div[2]/h1")][0]
    # print([item.text for item in book_name]) #得到小说的名称，用于创建文件/文件夹，存储爬取结果
    mkdir(book_name)

    # 查看源码得知章节起止地址为 29108--29795
    chap_begin, chap_end, count_flag = 29108, 29795, 0
    for i in range(chap_begin, chap_end+1):
        chap_html = calo_html + str(i) + ".html"
        # chap_html = calo_html + str(29108) + ".html"
        # print(chap_html)  # 查看网址是否正确，进入章节的爬取
        sc_chap_html = requests.get(chap_html, headers=header).text.encode("latin1").decode("utf-8")
        # print(sc_chap_html)  # 查看获取源码信息，分析获取文本
        xp_chap_html = etree.HTML(sc_chap_html)
        chap_name = [item.text for item in xp_chap_html.xpath("/html/body/div/div/div[2]/h1")][0]
        # print(chap_name)  # 获取章节名称
        text = [item.text for item in xp_chap_html.xpath('/html/body/div/div/div[3]/p')]
        # print(text)  # 获取章节内容，准备写入
        count_flag += 1
        write_text(book_name, chap_name, text, count_flag)
    print(f"\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"
          f"{book_name}全书爬取成功，共计{count_flag}章\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")

