# -*- coding: utf-8 -*-
"""
------------------------------------------------------------------------------
    File Name:  content_downloader
    Author   :  wanwei1029
    Date     :  2018/10/9
    Desc     : 此类主要下载小说的正文
------------------------------------------------------------------------------
"""
import time
import requests
import os
import traceback
from selenium import webdriver
# from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
import samp.samp_logging as sl
import samp.spider.aikantxt.spider_utils as utils
from samp.spider.aikantxt.spider_utils import RedisClient
from requests.packages import urllib3

logger = sl.get_logger("content_downloader", os.path.join(utils.SAVE_DIR, "downloader.log"))

BASE_URL = "https://www.aikantxt.la"


class ContentDownloader(object):
    def __init__(self):
        # option = Options()
        # option.add_argument("--headless")
        # option.add_argument("--disable-gpu")
        # self.driver = webdriver.Chrome(chrome_options=option)
        # pool = redis.ConnectionPool(host=utils.REDIS_HOST, port=utils.REDIS_PORT, db=utils.REDIS_DB,
        #                             decode_responses=True)
        # self.sr = redis.StrictRedis(connection_pool=pool)
        self.headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36'
        }
        self.content_url = "https://www.aikantxt.la/content.php"

    def download(self, url):
        if url is None:
            return None
        try:
            splits = url.split("/")
            crid = splits[-1].split(".")[0]
            nbid = splits[-2][5:]
            data = {"nbid": nbid, "crid": crid, "fid": "fb96549631c835eb239cd614cc6b5cb7d295121a"}
            urllib3.disable_warnings()
            r = requests.post(self.content_url, data=data, headers=self.headers, verify=False, timeout=5)
            if r.status_code == 200:
                logger.info("first success download content: {0}".format(url))
                # RedisClient.get_client().incr(self.get_flag(utils.FLAG_SUCCESS))
                # RedisClient.get_client().incr(utils.FLAG_SUCCESS)
                return r.text
            else:
                logger.error("error download content: {0}, status_code={1}".format(url, r.status_code))
                # RedisClient.get_client().incr(self.get_flag(utils.FLAG_ERROR))
                # RedisClient.get_client().incr(utils.FLAG_ERROR)
            # self.driver.get(url)
            # time.sleep(2)
            # elem = self.driver.find_element_by_id("content")
            # return elem.text
        except Exception as e:
            logger.error("error download content: {0}".format(url))
            logger.error(traceback.format_exc())
            # RedisClient.get_client().incr(self.get_flag(utils.FLAG_ERROR))
            # RedisClient.get_client().incr(utils.FLAG_ERROR)
            # try:
            #     self.driver.get(url)
            #     time.sleep(4)
            #     elem = self.driver.find_element_by_id("content")
            #     RedisClient.get_client().incr(self.get_flag(utils.FLAG_SUCCESS))
            #     RedisClient.get_client().incr(utils.FLAG_SUCCESS)
            #     logger.info("second success download content: {0}".format(url))
            #     return elem.text
            # except Exception as e1:
            #     logger.error("error download content: {0}".format(url))
            #     RedisClient.get_client().incr(self.get_flag(utils.FLAG_ERROR))
            #     RedisClient.get_client().incr(utils.FLAG_ERROR)
            #     # logger.error(str(e1))
        return None

    def get_flag(self, flag):
        current_time = time.localtime()
        time_flag = time.strftime("%d:%H:", current_time)
        cur_min = int(current_time.tm_min / 10)
        return flag+time_flag+str(cur_min)


class ChapterListDownloader(object):
    @staticmethod
    def download(url, book_id):
        if url is None:
            return
        user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
        heads = {'User-Agent': user_agent}
        r = requests.get(url, headers=heads, timeout=10)
        if r.status_code == 200:
            r.encoding = 'gbk'
            soup = BeautifulSoup(r.text, "html.parser")
            head_node = soup.find("head")
            category = head_node.find(attrs={"property": "og:novel:category"})['content']
            book_name = head_node.find(attrs={"property": "og:novel:book_name"})['content']
            author = head_node.find(attrs={"property": "og:novel:author"})['content']
            update_time = head_node.find(attrs={"property": "og:novel:update_time"})['content']
            # 取所有章节
            chapter_list = soup.find_all("dd")
            processed_chapter_list = list()
            short_url_set = set()
            book_path = utils.SAVE_DIR + "\\" + utils.rem_special_char(str(book_id)+"_"+book_name)
            for chapter_item in chapter_list:
                # print(len(chapter_list))
                short_url = chapter_item.contents[0]['href']
                if short_url not in short_url_set:
                    short_url_set.add(short_url)
                    chapter_dict = dict()
                    chapter_dict['url'] = BASE_URL + short_url
                    chapter_dict['title'] = chapter_item.contents[0].string
                    chapter_dict['book_name'] = str(book_id)+"_"+book_name
                    chapter_dict['author'] = author
                    chapter_dict['update_time'] = update_time
                    chapter_dict['category'] = category
                    # 如果已经下载，不再重复下载
                    title_replace = utils.rem_special_char(chapter_dict['title'])
                    file_name = book_path + "\\" + title_replace + ".json"
                    if os.path.exists(file_name):
                        # logger.info("{0} exists, skip...".format(file_name))
                        pass
                    else:
                        # logger.info("{0} do not exists, download...".format(file_name))
                        processed_chapter_list.append(chapter_dict)
            # for chapter_item in processed_chapter_list:
            #     print(chapter_item)
            return processed_chapter_list
        return None


def demo():
    """
    """
    url = "https://www.aikantxt.la/aikan18681/12452994.html"
    # print(len(url.split("/")))
    # splits = url.split("/")
    # print(splits[-1].split(".")[0])
    # print(splits[-2][5:])
    cd = ContentDownloader()
    content = cd.download(url)
    print(content)


def chapter():
    url = "https://www.aikantxt.la/aikan1"
    chapter_list = ChapterListDownloader.download(url, 1)
    if chapter_list:
        print(len(chapter_list))
    # file_name = r"E:\nas\spider\aikantxt\1-1000\1_剑神纵横异界\第一百七十七章：海第大捕捞（一）.json"
    # flag = os.path.exists(file_name)
    # print(flag)


if __name__ == '__main__':
    test_method = "chapter"
    if test_method == "demo":
        demo()
    elif test_method == "chapter":
        chapter()
