import os
import random
import time
from multiprocessing.dummy import Pool
import requests
from bs4 import BeautifulSoup, NavigableString, Tag
from tqdm import tqdm
from lxml import etree


class Chapter():
    def __init__(self, name, content):
        self.name = name
        self.content = content


class Args():
    def __init__(self, name, url):
        self.name = name
        self.url = url


user_agent_list = [
    "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
    "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
    "Mozilla/5.0 (Windows NT 10.0; …) Gecko/20100101 Firefox/61.0",
    "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
    "Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15",
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'
]


class Downloader(object):
    def __init__(self, target, content_id, content_class):
        self.headers = {
            'User-Agent': random.choice(user_agent_list)
            , 'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'none',
            'Sec-Fetch-User': '?1',
            'Upgrade-Insecure-Requests': '1'
        }
        self.target = target
        self.content_id = content_id
        self.content_class = content_class

    # 获取章节内容
    # @staticmethod
    def get_contents(self, arg):
        req = requests.get(url=arg.url)
        req.encoding = req.apparent_encoding
        content = None
        if self.content_id:
            content = BeautifulSoup(req.text, 'html.parser').find('div', id=self.content_id)
        elif self.content_class:
            content = BeautifulSoup(req.text, 'html.parser').find('div', class_=self.content_class)
        else:
            return None
        try:
            contents = content.contents
        except BaseException as e:
            print(e.__str__())
            return None
        text = ""
        for con in contents:
            # NavigableString类型且长度大于1
            if isinstance(con, NavigableString) and len(con) > 1:
                text = text + con + '\n\n'
            # Tag类型
            elif isinstance(con, Tag) and len(con.text) > 0:
                text = text + con.text + '\n\n'
        text = text.replace('\s{2,3}', "\n")
        name = arg.name
        print(name)
        if text.__contains__('\ufffd'):
            name = name.replace('\ufffd', "零")
        chapter = Chapter(name, text)
        return chapter

    @staticmethod
    def writer(name, text, abspath):
        path = os.path.dirname(abspath)
        if not os.path.exists(path):
            os.makedirs(path)
        with open(abspath, 'a', encoding='utf-8')as f:
            f.write(name + '\n\n')
            f.writelines(text)
            f.write('\n\n')


def down_one_novel(target, abspath, data_list, content_id, content_class):
    dl = Downloader(target, content_id, content_class)

    print('\n开始下载:' + abspath)
    # 当小说已经存在
    if os.path.exists(abspath):
        print(abspath + ':已存在')
        return
    my_pool = Pool(16)
    start_time = time.time()
    pool_map = my_pool.map(dl.get_contents, data_list)
    print(abspath + ' 下载全部章节完成' + '->准备整合')
    for chapter in tqdm(pool_map, desc=abspath):
        if chapter:
            dl.writer(chapter.name, chapter.content, abspath)
    end_time = time.time()
    use_time = str(end_time - start_time)
    print(abspath + ':下载用时' + use_time + 's')


if __name__ == '__main__':
    dl = Downloader('https://www.wqge.cc/133_133248/', 'content', '')
    arg = Args("hh", 'https://www.wqge.cc/133_133248/172299048.html')
    dl.get_contents(arg=arg)
