# 项目名：小说爬取
# 作者：张见葛

import requests
import novelSource as sources
from cusProgress import set_progress
from cusProgress import update_progress
from custPrint import get
from custPrint import print_welcome_info
from custPrint import print_search_result
from custPrint import print_chapter_list
from lxml import etree
from rich.console import Console
import threading
import sys
import os
import time
from requests.packages.urllib3.exceptions import InsecureRequestWarning

requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# 日志打印
console = Console()
# 全局变量
Global = {'thread_num': 20}


# 多线程爬取
class myThread(threading.Thread):
    def __init__(self, cha_list):
        threading.Thread.__init__(self)
        self.cha_list = cha_list

    def run(self):
        for i in self.cha_list:
            parse_content(i)  # 开始爬取第i章的内容


# 答应彩色日志
def print_log(info, color='red'):
    console.print(info, style=color)


# 根据URL获取页面内容
def get_html_code(url, method, headers, data):
    return requests.request(method, url, headers=headers, data=data)


# 解析html文本，爬取xpath对应的元素
def get_xpath_value(html, xpath):
    if len(html) == 0:
        html = ""
    return etree.HTML(html).xpath(xpath)


# 格式化url， 对’/book/24333/‘路径添加url
def parse_url(domain_name, url):
    if url.startswith('http') or url.startswith('www'):
        return url
    elif url.startswith('/'):
        return domain_name + url
    else:
        return domain_name + '/' + url


# 根据配置的数据源，开始遍历搜索小说信息
def search_novel(novel_sources):
    response = []
    for novel_source in novel_sources:
        s_o = novel_source.get('search-engines')
        # 搜索爬取的xpath表达式
        xpath_expression = get(s_o, 'xpath')
        # 爬取搜索页面内容
        search_text = get_html_code(get(s_o, 'url'), get(s_o, 'method'), get(s_o, 'headers'), get(s_o, 'data'))
        # 解析搜索结果
        search_novels = get_xpath_value(search_text.text, get(xpath_expression, 'novel-list'))
        for novel_info in search_novels:
            novel_info_html = etree.tostring(novel_info, method='html')
            novel_name = get_xpath_value(novel_info_html, get(xpath_expression, 'novel-info/novel-name'))
            author = get_xpath_value(novel_info_html, get(xpath_expression, 'novel-info/author'))
            # new_chapter = get_xpath_value(novel_info_html, get(xpath_expression, 'novel-info/new-chapter'))
            url = get_xpath_value(novel_info_html, get(xpath_expression, 'novel-info/url'))
            source_name = get(novel_source, 'name')
            if len(url) == 1 and len(url[0]) > 0 and len(novel_name) == 1 and len(author) == 1:
                response.append({"novel_name": novel_name[0], "author": author[0],
                                 "url": parse_url(novel_source.get('web-url'), url[0]), "source_name": source_name,
                                 "source": novel_source})
        return response


# 获取小说章节列表信息
def get_chapter_list(novel):
    chapter_list = get_html_code(get(novel, 'url'), get(novel, 'source/details-page/method'),
                                 get(novel, 'source/details-page/headers'), None)
    chapter_list = get_xpath_value(chapter_list.text, get(novel, 'source/details-page/list-xpath'))
    result_list = []
    for chapter in chapter_list:
        chapter_info_html = etree.tostring(chapter, method='html')
        chapter_name = get_xpath_value(chapter_info_html, get(novel, 'source/details-page/chapter-name'))
        url = get_xpath_value(chapter_info_html, get(novel, 'source/details-page/url'))
        if len(chapter_name) == 1 and len(url) == 1:
            result_list.append({"chapter_name": chapter_name[0], "url": parse_url(novel.get('url'), url[0])})
    return result_list


# 爬取全部章节的小说内容
def parse_novel_content(chapters, novel_info):
    # 记录数据到全局
    Global['chapters'] = chapters
    Global['novel'] = novel_info
    # 判断章节是否为空
    if len(chapters) < 1:
        print_log('>> 未爬取到目录信息, 开始推出程序！')
        sys.exit(0)
    else:
        print_chapter_list(chapters)
        Global['start'] = int(input("请输入想要下载的开始章节序号: "))
        Global['end'] = int(input("请输入想要下载的结束章节序号: "))
    # 创建主目录
    mk_main_dir(novel_info.get('novel_name'))
    set_progress(Global['end']-Global['start'])
    chapters_list = []
    for i in range(Global['thread_num']):
        chapters_list.append([])
    start = Global['start'] - 1
    while True:
        for i in range(Global['thread_num']):
            if start == Global['end']:
                break
            else:
                chapters_list[i].append(start)
                start = start + 1
        if start == Global['end']:
            break

    thread_list = []
    for i in range(Global['thread_num']):
        if len(chapters_list[i]) > 0:
            thread_list.append(myThread(chapters_list[i]))
    for th in thread_list:
        th.start()
    for th in thread_list:
        th.join()
    time.sleep(3)
    # 开始合并文件
    merge_files()


def merge_files():
    if os.path.exists(Global['parent_path'] + Global['novel_name']):
        print_log('>> 删除已合并内容')
        os.remove(Global['parent_path'] + Global['novel_name'])
    files_path = []
    for root, ds, fs in os.walk(Global['parent_path']):
        for f in fs:
            fullname = os.path.join(root, f)
            files_path.append(fullname.replace('\\', '/'))
    files_path.sort()
    print(files_path)
    novel_file = open(Global['parent_path'] + Global['novel_name'], "w", encoding='utf-8')
    for file in files_path:
        ms = open(file, 'r', encoding='utf-8')
        for line in ms.readlines():
            novel_file.write(line)
        ms.close()
    novel_file.close()


# 爬取指定章节内容
def parse_content(index):
    chapter_name = Global['chapters'][index].get('chapter_name')
    chapter_url = Global['chapters'][index].get('url')
    method = get(Global['novel'], 'source/details-page/method')
    headers = get(Global['novel'], 'source/details-page/headers')
    repeat_requests = 5
    while True:
        repeat_requests = repeat_requests - 1
        contents = get_xpath_value(get_html_code(chapter_url, method, headers, None).text,
                                   get(Global['novel'], 'source/details-page/content-xpath'))
        if len(contents) > 0 or repeat_requests < 1:
            break
    f = open(Global['parent_path'] + str(index).zfill(6) + ' ' + chapter_name + '.txt', "w", encoding='utf-8')
    f.write('\n\n\t\t' + chapter_name)
    if len(contents) > 0 and isinstance(contents, list):
        for content in contents:
            f.write(content.replace("\u00a0", " "))
    f.close
    update_progress(chapter_name)


# 创建主目录
def mk_main_dir(dir_name, parent_path='./'):
    path = dir_name.strip().rstrip("\\")  # 去除首位空格 # 去除尾部 \ 符号
    # 如果不存在就创建
    if not os.path.exists(parent_path + path):
        print_log('>> 开始创建小说文件夹')
        os.makedirs(path)
        print_log('>> 【' + path + '】文件夹创建成功')
        # 记录父路径
    Global['parent_path'] = parent_path + path + '/'
    Global['novel_name'] = dir_name + '.txt'


# 主函数
if __name__ == '__main__':
    # 欢迎使用提示信息
    print_welcome_info()

    # 输入想要搜索的小说名
    novels = search_novel(sources.get_sources(input("请输入想要下载的小说名: ")))
    # novels = search_novel(sources.get_sources("官居一品"))
    index = print_search_result(novels) - 1
    # get_chapter_list(novels[index])
    result_list = get_chapter_list(novels[index])
    # result_list = [{'chapter_name': '第一章 一梦五百年 (上)', 'url': 'https://www.bqg9527.com/book/24333//12831690.html'},
    #                {'chapter_name': '第二章 一梦五百年 (中)', 'url': 'https://www.bqg9527.com/book/24333//12831691.html'}]
    # novel = {'novel_name': '官居一品', 'author': '三戒大师', 'url': 'https://www.bqg9527.com/book/24333/', 'source_name': '笔趣阁',
    #          'source': {'name': '笔趣阁', 'web-url': 'https://www.bqg9527.com',
    #                     'search-engines': {'url': 'https://www.bqg9527.com/s.php', 'method': 'POST',
    #                                        'headers': {'origin': 'https://www.bqg9527.com',
    #                                                    'referer': 'https://www.bqg9527.com/',
    #                                                    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36',
    #                                                    'Accept': '*/*', 'Cache-Control': 'no-cache',
    #                                                    'Host': 'www.bqg9527.com',
    #                                                    'Content-Type': 'application/x-www-form-urlencoded'},
    #                                        'data': {'type': 'articlename', 'language': 'zh_cn', 's': '官居一品'},
    #                                        'xpath': {'novel-list': '//*[@id="history"]/li[position()>1]',
    #                                                  'novel-info': {'novel-name': '//span[@class="t2"]/a/text()',
    #                                                                 'author': '//span[@class="t3"]/text()',
    #                                                                 'new-chapter': '//span[@class="t4"]//text()',
    #                                                                 'url': '//span[@class="t2"]/a/attribute::href'}}},
    #                     'details-page': {'list-xpath': '//*[@id="list"]/dl/dt[2]/following-sibling::dd',
    #                                      'url': '//a/attribute::href', 'chapter-name': '//a/text()',
    #                                      'content-xpath': '//*[@id="content"]/text()', 'method': 'GET', 'headers': {
    #                             'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36',
    #                             'Accept': '*/*', 'Cache-Control': 'no-cache',
    #                             'Content-Type': 'application/x-www-form-urlencoded'}}}}
    parse_novel_content(result_list, novels[index])
