import os
import random
import time
from multiprocessing.dummy import Pool
import requests
from bs4 import BeautifulSoup, NavigableString, Tag
from tqdm import tqdm


class Chapter():
    def __init__(self, name, content):
        self.name = name
        self.content = content


class Args():
    def __init__(self, name, url):
        self.name = name
        self.url = url


user_agent_list = [
    "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
    "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
    "Mozilla/5.0 (Windows NT 10.0; …) Gecko/20100101 Firefox/61.0",
    "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
    "Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15",
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'
]


class Downloader(object):
    def __init__(self, server, target, config):
        self.headers = {
            'User-Agent': random.choice(user_agent_list)
            , 'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'none',
            'Sec-Fetch-User': '?1',
            'Upgrade-Insecure-Requests': '1'
        }
        self.server = server
        self.target = target
        self.config = config
        self.names = []
        self.urls = []
        self.nums = 0

    # 获取每个章节下载链接和章节名
    def get_download_url(self):
        father = self.config.get('Urls', 'father')
        id_ = self.config.get('Urls', 'id')
        class_name = self.config.get('Urls', 'class')
        start_url = self.config.get('Urls', 'start_url')

        req = requests.get(url=self.target)
        req.encoding = req.apparent_encoding
        if ("GB2312" == req.encoding):  # gbk 为gb2312的扩充，对偏僻的汉字可以正常显示
            req.encoding = "gbk"
        html = req.text
        div = ''
        if class_name != '':
            div = BeautifulSoup(html, 'html.parser').find(father, class_=class_name)
        if id_ != '':
            div = BeautifulSoup(html, 'html.parser').find(father, id=id_)
        all_a = div.find_all('a')
        # 章节计数 排除前面12个链接 下标0-11
        self.nums = len(all_a[int(start_url):])
        for a in all_a[int(start_url):]:
            self.names.append(a.string)
            self.urls.append(self.server + a.get('href'))

    # 获取章节内容
    # @staticmethod
    def get_contents(self, arg):
        req = requests.get(url=arg.url)
        req.encoding = req.apparent_encoding
        # 配置文件读取
        div = self.config.get('Content', 'father')
        id_ = self.config.get('Content', 'id')
        class_name = self.config.get('Content', 'class')
        print_name = self.config.get('Content', 'print_name')
        ad = self.config.get('Content', 'ad')

        content = ''
        if class_name != '':
            content = BeautifulSoup(req.text, 'html.parser').find(div, class_=class_name)
        if id_ != '':
            content = BeautifulSoup(req.text, 'html.parser').find(div, id=id_)
        # 文章每一段仅为NavigableString类型，间隔\n\n排版
        try:
            contents = content.contents
        except BaseException as e:
            print('获取的响应码：：'+str(req.status_code)+'----异常：'+e.__str__()+'\n')
            return None
        text = ""
        for con in contents:
            # NavigableString类型且长度大于1
            if isinstance(con, NavigableString) and len(con) > 1:
                text = text + con + '\n\n'
            # Tag类型
            elif isinstance(con, Tag) and len(con.text) > 0:
                text = text + con.text + '\n\n'
        # 去除小说内容之外的广告文字
        if ad and text.__contains__(ad):
            text = text.split(ad)[0]
        # 小说名
        name = arg.name
        if name.__contains__('\ufffd'):
            name = name.replace('\ufffd', "零")
        if print_name == 'True':
            print('下载：' + name)

        chapter = Chapter(name, text)
        return chapter

    def singleDown(self, datas, sleepSec = 0):
        results = list()
        for arg in datas:
            chapter = self.get_contents(arg)
            if sleepSec > 0:
                time.sleep(sleepSec)
            results.append(chapter)
        return results

    @staticmethod
    def writer(name, text, abspath):
        path = os.path.dirname(abspath)
        if not os.path.exists(path):
            os.makedirs(path)
        with open(abspath, 'a', encoding='utf-8')as f:
            f.write(name + '\n\n')
            f.writelines(text)
            f.write('\n\n')


def down_one_novel(server, target, abspath, config):
    isSingle = bool(config.get('Thread', 'single'))
    coreNum = int(config.get('Thread', 'coreNum'))
    sleepSec = float(config.get('Thread', 'sleepSec'))
    dl = Downloader(server, target, config)
    # 1.目录页所有章节url
    dl.get_download_url()
    # 构建data_list
    data_list = []
    for i in range(dl.nums):
        args = Args(dl.names[i], dl.urls[i])
        data_list.append(args)
    print('\n开始下载:' + abspath)
    # 当小说已经存在
    if os.path.exists(abspath):
        print(abspath + ':已存在')
        return
    if coreNum:
        my_pool = Pool(int(coreNum))
    else:
        my_pool = Pool(64)
    start_time = time.time()
    if isSingle:
        # 单线程下载
        print('单线程下载')
        pool_map = dl.singleDown(data_list, float(sleepSec))
    else:
        # 多线程下载
        print('多线程下载')
        pool_map = my_pool.map(dl.get_contents, data_list)
    print(abspath + ' 下载全部章节完成' + '->准备整合')
    for chapter in tqdm(pool_map, desc=abspath):
        dl.writer(chapter.name, chapter.content, abspath)
    end_time = time.time()
    use_time = str(end_time - start_time)
    print(abspath + ':下载用时' + use_time + 's')
