#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
@File Name  : cl_txtDown_v1.py
@Author     : LeeCQ
@Date-Time  : 2019/12/13 18:02

1.建立对照表：
    文件名：章节名
2.下载文件： 多线程
3. 拼接文件；

"""
import os
import sys, logging

logger = logging.getLogger("logger")  # 创建实例
formatter = logging.Formatter("[%(asctime)s] < %(threadName)s: %(thread)d > [%(levelname)s] %(message)s")
# 终端日志
consle_handler = logging.StreamHandler(sys.stdout)
consle_handler.setFormatter(formatter)  # 日志文件的格式

logger.setLevel(logging.DEBUG)  # 设置日志文件等级
logger.addHandler(consle_handler)  #

import warnings
import requests
import urllib3
import time
import bs4
import re
import threading
from HTTP.Download_txt.Error import *
from SQL.MySQL.localhost_mysql import LocalhostMySQL


class TxtMySQL(LocalhostMySQL):
    """把文章写入数据库"""

    def __init__(self, user, passwd, db, **kwargs):
        super().__init__(user, passwd, db, **kwargs)

    def create_txt(self, table_name):
        _c = (f"CREATE TABLE IF NOT EXISTS `{table_name}` ( "
              f"idd     INT(10)     NOT NULL    UNIQUE, "
              f"url     VARCHAR(50)     NOT NULL    UNIQUE, "
              f"title   VARCHAR(99), "
              f"body    VARCHAR(20000)"
              f" ) ")
        return self.create_table(_c)


class DownloadTXT:
    """下载小说：

    :param retry: IO流错误重试次数，默认：5次。
    :param timeout: requests 请求的超时时间，默认：10s。
    :param max_thread: 允许的最大线程数，默认：30。
    :param debug: 遇到网络传输错误时是否终止，默认False。
    :param warning: 是否允许输出Warning信息，默认：False。
    :param log_path: log文件的储存位置，默认: ./sup/txtDownload.log。
    :param html_encode: 网页编码, 默认: utf8。
    """
    HEADER = {
        'Accept': 'text/html, application/xhtml+xml, application/xml; q=0.9, */*; q=0.8',
        'Accept-Encoding': 'gzip, deflate',
        'Accept-Language': 'zh-Hans-CN, zh-Hans; q=0.5',
        'Cache-Control': 'max-age=0',
        'Connection': 'Keep-Alive',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362'
        }
    replace = ['天才一秒记住本站地址：www.biqumo.com　笔趣阁手机版阅读网址：m.biqumo.com',
               'https://www.biqumo.com/',
               '天才一秒记住本站地址：',
               'www.biqumo.com',
               '笔趣阁手机版阅读网址：m.biqumo.com',
               'chaptererror();',
               'app2();', 'app1();', '　', '　', ' ',
               ]

    def __init__(self, retry=5, timeout=10, max_thread=30,
                 debug=False, warning=False, log_path=None,
                 html_encode='utf8'):
        self.retry = retry
        self.debug = debug
        self.encode = html_encode
        self.timeout = timeout
        self.max_thread = max_thread
        self.log_path = './sup/txtDownload.log' if not log_path else log_path

        if not os.path.exists(os.path.dirname(self.log_path)):
            os.mkdir(os.path.dirname(self.log_path))
        self.log()

        if not warning:
            warnings.filterwarnings("ignore")

        self.not_download = []
        self.__sql = TxtMySQL('test', '123456', 'test')

    def log(self):
        # 创建日志文件
        file_handler = logging.FileHandler(f"{self.log_path}.log", encoding='utf8')
        file_handler.setFormatter(formatter)  # 日志文件的格式
        logger.setLevel(logging.DEBUG)  # 设置日志文件等级
        logger.addHandler(file_handler)  #

    def set_timeout(self, timeout):
        self.timeout = timeout

    def set_maxThread(self, max_thread):
        self.max_thread = max_thread

    def set_retryTimes(self, retry):
        self.retry = retry

    @staticmethod
    def re_chapter(_text):
        """可重写的模块 -> <-

        :param _text: 小说页面的网页源码
        :return: 小说的章节名称，章节内容
        """
        _bs4 = bs4.BeautifulSoup(_text, 'lxml')
        title = _bs4.select('div.content > h1')[0].text
        body = _bs4.select('div.content > div.showtxt')[0].text  # 找到列表 -> str
        return title, body

    @staticmethod
    def re_catalogue(_text):
        """解析目录："""
        _bs4 = bs4.BeautifulSoup(_text, 'lxml')
        book_name = _bs4.select('div.book > div.info > h2')[0].text
        _text = str(_bs4.select('div.listmain > dl')[0])  # 找到列表 -> str
        _text = _text[_text.rfind('</dt>') + 4:]  # 删除 ‘最新章节’模块
        return book_name, re.findall('<a href="(.*?)">(.*?)</a>', _text)
        # return 书名, (url, 章节名)

    @staticmethod
    def __format_replace(s: str):
        _l = [x.strip() for x in s.split('\n')]
        return [x for x in _l if x is not '']

    def __requests_get(self, _url: str, header: dict):
        """取得requests.get和所用的时间；
        ！！ 此函数仅限 - __requests_200()调用 ！！

        :return: response 对象, 运行时间
        """
        start: float = time.time()
        header.setdefault('Host', _url.split('/')[2])

        urllib3.disable_warnings()
        try:
            _res = requests.get(_url, headers=header, verify=False, timeout=self.timeout)
            return _res.status_code, _res, time.time() - start
        except:
            return -1, '超时', 0  # 占位， 否则会出现错误

    def __requests_200(self, _url, header):
        """判断页面返回码；返回结果"""
        n = self.retry
        while n:
            n -= 1
            status_code, __page, _runtime = self.__requests_get(_url, header)
            if status_code == 200:
                __page.encoding = self.encode
                return __page.text, _runtime
            logger.warning(f'非200返回：{status_code}')
            time.sleep(1)
        if not self.debug:
            raise RetryTimeout(f"重试次数用尽:{_url}")
        else:
            logger.error(f"重试次数用尽:{_url}")
            self.not_download.append(_url)
            return f"重试次数用尽:{_url}", -1

    def __re_mainBody(self, _text):
        title, body = self.re_chapter(_text)

        body = str(body.replace('<br>', '\n'))
        body = body.replace('\r', '\n')
        body = body.replace('  ', '')
        return title, body

    @staticmethod
    def __get_idd(url_):
        return url_.split('/')[-1].split('.')[0]

    def __format_sql(self, chap):
        _d = dict(chap)
        urls_, titles_ = list(_d.keys()), list(_d.values())
        return [self.__get_idd(url_) for url_ in urls_], urls_, titles_

    def __combine(self):
        """合并所有的txt"""
        result = self.__sql.select(self.book_name, 'title', 'body', ORDER='`idd`')
        _s = ''
        for title, body in result:
            if not body.startswith(title):
                print(body)
                _s += f'{title}\n{body}\n\n'
            else:
                _s += f'{body}\n\n'
        with open(f'{self.book_name}.txt', 'w', encoding='gbk') as f:
            f.write(_s)

    def _run_init(self, catalogue_url):
        logger.info("正在采集书籍信息...")
        text, runtime = self.__requests_200(catalogue_url, self.HEADER)
        if runtime >= 0:
            book_name, chapter_list = self.re_catalogue(text)
            self.book_name = book_name
        else:
            return []
        self.__sql.create_txt(book_name)
        _idd, _url, _title = self.__format_sql(chapter_list)
        logger.info(f"书名：{book_name}，共找到{len(_title)}章。")
        logger.info(f"数据库更新——_idd, _url, _title")
        self.__sql.insert(book_name,
                          ignore=True,
                          idd=_idd,
                          url=_url,
                          title=_title
                          )
        return chapter_list

    def _run_body(self, url_: str):
        """子线程的入口：下载body；

        1. 获取页面
        2. 解析页面
        3. 上传数据库
        :param url_: 文章的url
        :return: 0 or Error
        """
        sql = TxtMySQL('test', '123456', 'test')
        text_, runtime = self.__requests_200(url_, self.HEADER)
        if runtime >= 0:
            title, body = self.__re_mainBody(text_)
            if len(body) < 5:
                logger.error(f"文章的Body长度：{len(body)}小于预期长度{5}")
                raise BodyLenError(f"文章的Body长度：{len(body)}小于预期长度{5}")
            body = body.replace(url_, '')
            for _r in self.replace:
                body = body.replace(_r, '')
        else:
            self.not_download.append(url_)
            logger.error(f"URL:{url_}请求超时，错误返回！")
            raise TimeoutError(f"URL:{url_}请求超时，错误返回！")
        sql.update(table=self.book_name,
                   where_key='idd',
                   where_value=self.__get_idd(url_),
                   body=str(body)
                   )

    def __thread_wait(self, option: int or bool):
        while True:
            print(f"\r[{time.strftime('%H:%M:%S')}]正在下载："
                  f"{', '.join([t.getName() for t in threading.enumerate()])}",
                  end=' ')
            if option:  # 等待添加线程
                if len(threading.enumerate()) <= self.max_thread + 1:
                    return 0
            else:  # 等待所有线程执行完毕
                if len(threading.enumerate()) == 1:
                    return 0
            time.sleep(1)

    def _run_thread(self, host, catalogue_list):
        for url, title in catalogue_list:
            threading.Thread(target=self._run_body, args=(host + url,), name=title).start()  # 添加启动线程
            self.__thread_wait(1)
        self.__thread_wait(0)  # 等待所有的线程执行完毕。
        return 0

    def run(self, catalogue_url):
        host = '/'.join([x for x in catalogue_url.split('/')[:3]])
        catalogue_list = self._run_init(catalogue_url)
        if len(catalogue_list) <= 0:
            return -1
        # 排除已经下载的项目
        catalogue_list = self.__sql.select(self.book_name, 'url', 'title', WHERE='body is null', )
        # 多线程 下载 body
        status = self._run_thread(host, catalogue_list)

        if status == 0:
            self.__combine()
        else:
            raise DownloadTXTError(f'{self.book_name}.run()-出现错误的返回码：应该：0 得到：{status}')

    def test(self):
        self.__thread_wait(1)


if __name__ == '__main__':
    # _url = 'https://www.biqumo.com/8_8521/'
    # DownloadTXT(html_encode='gbk').test()
    a = DownloadTXT(html_encode='gbk', timeout=20, retry=10)
    a.run('https://www.biqumo.com/8_8521/')
