# 这是一个小说采集脚本，目标主站https://www.kubiji.org/
import requests
from bs4 import BeautifulSoup
import bs4
import re


def get_one_page(url):
    # 获取网页内容.
    headers = {
        'Connection': 'keep-alive',
        'Cache-Control': 'max-age=0',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        'Accept-Language': 'zh-CN,zh;q=0.9',
    }
    try:
        response = requests.get(url, headers=headers, verify=False, timeout=30)
        response.raise_for_status()
        response.encoding = response.apparent_encoding
        return response.text
    except:
        print("error!")
        return ""


def parse_one_page(part_contents, html, book_name):
    soup = BeautifulSoup(html, 'html.parser')
    # 获取章节名称
    for chapter_name_ in soup.find_all('div', re.compile('read_title')):
        if isinstance(chapter_name_, bs4.element.Tag):
            print(chapter_name_.h1.contents[0])
    part_contents = part_contents+(chapter_name_.h1.contents[0]+'\n')
    print(chapter_name_.h1.contents[0], "完成采集！")
    # 获取章节内容
    chapter_contents_ = soup.find_all('div', re.compile('content'))[0]
    chapter_length = (len(chapter_contents_))
    if isinstance(chapter_contents_, bs4.element.Tag):
        for i in range(1, chapter_length, 1):
            if len(chapter_contents_.contents[i]) > 5:
                # print(chapter_contents_.contents[i])
                part_contents = part_contents + \
                    (chapter_contents_.contents[i]+'\n')
    # 获取书名
    book_name = (soup.find_all('div', re.compile('read_top'))
                 [0].h2.contents[0].contents[0])
    # 返回章节名称和内容，以及书名.
    return part_contents, book_name


def createFiles(book_name, part_contents):
    # 写入的编码格式必须使用utf-8，不然无法写入,追加模式写入.
    book_name = book_name + '.txt'
    try:
        with open(book_name, 'a', encoding='utf-8') as fo:
            print("保存URL到文件:", fo.name)
            fo.write(part_contents)
    except:
        print("write file failed!")
        return "error"


def write_all_chapter(url, chapter_number):
    # 输入起始章节url,以及章节总数.

    common_url = ""
    http_array = url.split("/")
    http_len = len(http_array)
    change_chapter = int(url.split("/")[-1].split(".")[0])

    # 共有连接前缀部分.
    for i in range(http_len-1):
        common_url = common_url + http_array[i]+"/"

    for chapter_number_add in range(chapter_number):
        # 抽取初始连接章节的数字编号.
        new_chapter_number = chapter_number_add + change_chapter
        # 构建每一个章节的url连接.
        build_url = common_url + str(new_chapter_number)+".html"

        # 单个章节的内容写入.
        # 单章节内容存储变量置空.
        part_contents = ""
        book_name = ""
        html_cotents = get_one_page(build_url)
        part_contents, book_name = parse_one_page(
            part_contents, html_cotents, book_name)
        # print(book_name)

        createFiles(book_name, part_contents)
        print(build_url, "章节写入完成！")


def main():
    # 输入小说章节的第一章节url.
    url = 'https://www.kubiji.org/77839/867138.html'
    chapter_number = 410
    write_all_chapter(url, chapter_number)


if __name__ == '__main__':
    main()
