"""Exhentai 漫画下载
"""

import asyncio
import logging
import re
from threading import Thread as Worker
from typing import Union
from typing import Iterable

import requests as r
from bs4 import BeautifulSoup

from zom_spider_lib.abc import WebsiteSpiderABC
from zom_spider_lib.tools import cookie_str_to_dict

EXHENTAI_TIMEOUT = 10


class ExhentaiException(r.RequestException):
    pass


class ExhentaiGallarySpider(WebsiteSpiderABC):
    """Exhentai 画廊爬虫

    1. 获取首页内容
    2. 解析页面
    3. 获取各分页地址

    需要参数

    :param str url: 画廊地址，例如 https://exhentai.org/g/*/*
    :param str cookie: "key=value; " 风格的 cookie 字符串

    返回结果

    |   键   | 类型 | 含义           |
    |:------:|:----:|----------------|
    | title  | str  | 画廊的标题     |
    | title2 | str  | 画廊的第二标题 |
    |  url   | str  | 画廊的地址     |
    | pages  | dict | 页码: 页面地址 |

    示例

    >>> egs = ExhentaiGallarySpider()
    >>> egs.config(cookie=("ipb_member_id=*******; "
    ...                    "ipb_pass_hash=36ea***************4d1b; "
    ...                    "igneous=c0****a; sk=8ab*********jql0u7vc; "
    ...                    "u=4********g")
    ...           )
    >>> egs.run()
    {
        "title": ...,
        "title2": ...,
        "url: ...,
        "pages": {
            1: ...,
            2: ...,
            ...
        }
    }
    """
    __version__ = "2019.11.23-1"
    # 画廊页中显示标题的 HTML 元素的 CSS 选择器
    PAGE_TITLE_SELECTOR = "#gn"
    PAGE_TITLE2_SELECTOR = "#gj"
    # 画廊页中显示页面范围的 HTML 元素的 CSS 选择器
    PAGE_RANGE_SELETOR = "body > div.gtb > p.gpc"
    # 画廊页显示的目录区 HTML 元素的 CSS 选择器
    PAGE_MENU_SELECTOR = "#gdt > .gdtm > div > a"
    # 页面图片地址的 CSS 选择器
    PAGE_IMG_URL_SELECTOR = "#img"
    # 从画廊页选择出页面范围
    PAGE_RANGE_RE = re.compile(
        r"Showing (?P<f>\d+) - (?P<t>\d+) of (?P<total>\d+) images")

    def __init__(self, name=None):
        super().__init__(name=name)
        self.cookie = dict()
        self.session = r.Session()

        self.logger = logging.Logger(self.name)

        self.result = dict()

    def config(self, cookie: Union[str, dict], log_level: Union[str, int] = 0):
        """配置当前爬虫

        :param str|dict cookie: 能登录的 exhentai cookie
        """
        if isinstance(cookie, str):
            self.cookie = cookie_str_to_dict(cookie)
        elif isinstance(cookie, dict):
            self.cookie = cookie
        else:
            raise TypeError(cookie)

        logger_handler = logging.StreamHandler()
        logger_handler.setLevel(log_level)
        logger_handler.setFormatter(
            logging.Formatter(
                fmt="%(levelname)s %(asctime)s - %(pathname)s:%(lineno)s %(msg)s")
        )
        self.logger.addHandler(logger_handler)

        self.session.headers.update(
            {
                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0",
                "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
                "accept-encoding": "gzip, deflate, br",
                "dnt": "1",
                "connection": "keep-alive",
                "upgrade-insecure-requests": "1",
            }
        )
        self.session.cookies.update(self.cookie)

    def run(self, url: str) -> dict:
        """获取 exhentai 上一个画廊的所有漫画分页地址，
        返回 页码->地址 的字典。

        :param str url: 画廊地址
        :rtype: dict
        :return: 页码: 地址"""
        self.result = {
            "title": str(),
            "title2": str(),
            "url": url,
            "pages": dict()
        }
        if self.get_first_page():
            pass
        else:
            i = 1
            while not self.get_other_page(i):
                i += 1
        self.logger.info(f"success for {url}")
        return self.result

    def get_first_page(self) -> bool:
        """获取第一页内容

        如果成功，漫画页面地址将写入 self.result 中，
        如果失败，将会抛出 ExhentaiException

        如果到达结尾，则返回 True
        如果还有未获取的页面，返回 False
        """
        url = self.result["url"]
        for i in range(3):
            try:
                resp = self.session.get(url, timeout=EXHENTAI_TIMEOUT)
                self.logger.info(f"get page {resp.url}.")
                break
            except r.Timeout:
                self.logger.warning(f"request {url} timeout, retry {i}...")
                continue
            except r.ConnectionError:
                self.logger.warning(
                    f"request {url} with connection error, retry {i}...")
                continue
        else:
            self.logger.error(f"{url} request max retry hit, quit")
            raise r.RequestException(self.result["url"])

        if resp.status_code != 200:
            raise r.RequestException(resp.url, resp.status_code)

        gallary = BeautifulSoup(resp.text, "lxml")
        self.result["title"] = gallary.select_one(
            self.PAGE_TITLE_SELECTOR).text
        self.result["title2"] = gallary.select_one(
            self.PAGE_TITLE2_SELECTOR).text

        for pn, purl in enumerate(self.yield_subpages_unwraped_url(gallary)):
            self.result["pages"][pn + 1] = purl

        # 检查是否完成
        page_range = gallary.select_one(self.PAGE_RANGE_SELETOR).text
        page_status = self.PAGE_RANGE_RE.match(page_range)
        return page_status["t"] == page_status["total"]

    def get_other_page(self, num: int) -> bool:
        """获取指定目录分页下的子页面

        如果达到末尾则返回 True
        否则返回 False
        """
        url = self.result["url"]
        url = f"{url}/?p={num}"

        for i in range(3):
            try:
                resp = self.session.get(url, timeout=EXHENTAI_TIMEOUT)
                self.logger.info(f"get page {resp.url}.")
                break
            except r.Timeout:
                self.logger.warning(f"request {url} timeout, retry {i}...")
                continue
            except r.ConnectionError:
                self.logger.warning(
                    f"request {url} with connection error, retry {i}...")
                continue
        else:
            self.logger.error(f"{url} request max retry hit, quit")
            raise r.RequestException(self.result["url"])

        if resp.status_code != 200:
            raise r.RequestException(resp.url, resp.status_code)

        gallary = BeautifulSoup(resp.text, "lxml")
        self.result["title"] = gallary.select_one(
            self.PAGE_TITLE_SELECTOR).text
        self.result["title2"] = gallary.select_one(
            self.PAGE_TITLE2_SELECTOR).text

        for pn, purl in enumerate(self.yield_subpages_unwraped_url(gallary)):
            self.result["pages"][pn + 1] = purl

        # 检查是否完成
        page_range = gallary.select_one(self.PAGE_RANGE_SELETOR).text
        page_status = self.PAGE_RANGE_RE.match(page_range)
        return page_status["t"] == page_status["total"]

    def yield_subpages_unwraped_url(self, index_html: BeautifulSoup) -> Iterable[str]:
        pages_el = index_html.select(self.PAGE_MENU_SELECTOR)
        for pe in pages_el:
            wrapped_url = pe["href"]
            img_url = self.unwrap_subpage_url(wrapped_url)
            yield img_url

    def unwrap_subpage_url(self, wrapped_url: str) -> str:
        """解包漫画子页面。

        在画廊页获取的漫画页面位于 exhentai 域名下，但漫画图片地址存在于其他网站
        """
        for i in range(3):
            try:
                resp = self.session.get(
                    wrapped_url, timeout=EXHENTAI_TIMEOUT)
                self.logger.info(f"unwrap subpage {resp.url}.")
                break
            except r.Timeout:
                self.logger.warning(
                    f"request {wrapped_url} timeout, retry {i}...")
                continue
            except r.ConnectionError:
                self.logger.warning(
                    f"request {wrapped_url} with connection error, retry {i}...")
                continue
        else:
            self.logger.error(f"{wrapped_url} request max retry hit, quit")
            raise r.RequestException(self.result["url"])
        if resp.status_code != 200:
            raise r.RequestException(resp.url, resp.status_code)
        subpage = BeautifulSoup(resp.text, "lxml")
        img = subpage.select_one(self.PAGE_IMG_URL_SELECTOR)
        return img["src"]
