# coding: utf-8
import json
from typing import List
from urllib.parse import urlparse, parse_qs

from bs4 import element
from loguru import logger

from app.common import APIBase, USER_AGENT, BookItemModel, HomeItemModel, SearchResultModel, BookInfoModel, \
    ChapterPhotoModel


def get_book(soup: element.Tag, url: str):
    book_a = soup.find('a', attrs={'class': 'comics-card__poster text-decoration-none'})
    if not book_a:
        book_a = soup.find('a')
    cover_url = book_a.find('amp-img').get('src')
    tags = book_a.find_all('span', attrs={'class': 'tab fl text-truncate'})
    description = soup.find('small', attrs={'class': 'tags text-truncate'})
    return BookItemModel(
        title=book_a.get('title').strip(),
        cover=cover_url,
        info_url=url + book_a.get('href'),
        tags=[tag.text.strip() for tag in tags],
        desc=description.text.strip(),
        source=Plugin
    )


def url_parse(url: str) -> str:
    parse_result = urlparse(url)
    query_dict = parse_qs(parse_result.query)
    new = {}
    for key, value in query_dict.items():
        if isinstance(value, list):
            new[key] = value[0]
    return '/comic/chapter/%s/%s_%s.html' % (new['comic_id'], new['section_slot'], new['chapter_slot'])


class Plugin(APIBase):
    name = '包子漫画'
    description = '包子漫画网，全网最全的包子漫画资源网站。'
    link = 'https://cn.baozimhcn.com/'
    icon = './plugins/baozi.png'

    base_url = 'https://cn.baozimhcn.com'
    headers = {'user-agent': USER_AGENT, 'Referer': 'https://cn.baozimhcn.com'}

    @classmethod
    def home(cls) -> List[HomeItemModel]:
        logger.info('获取包子漫画首页列表')
        soup = cls.url_to_bs4(cls.base_url)
        content_list = soup.find('div', {'class': 'mt-5 index'}).find_all('div', {'class': 'l-content'})
        contents = []
        for content in content_list[:-1]:
            title = content.find('div', {'class': 'catalog-title'}).text.strip()
            more = content.find('a', {'class': 'more fr'})
            if more:
                more_url = more.get('href')
            else:
                more_url = ''
            items = content.find_all('div', {'class': 'comics-card pure-u-1-3 pure-u-md-1-4 pure-u-lg-1-6'})
            comicList = []
            for item in items:
                comicList.append(get_book(item, cls.base_url))
            contents.append(HomeItemModel(
                title=title,
                more_url=cls.base_url + more_url,
                comicList=comicList,
                source=cls
            ))
        logger.info('获取包子漫画首页列表成功')
        return contents

    @classmethod
    def search(cls, key: str) -> SearchResultModel:
        url = cls.base_url + '/search'
        querystring = {"q": key}
        logger.info(f'搜索{cls.name}漫画：{key}')
        soup = cls.url_to_bs4(url, params=querystring)
        pures = soup.select_one('#layout > div.l-content.search.mt-5 > div.pure-g.classify-items').find_all('div', {
            'class': 'comics-card pure-u-1-2 pure-u-sm-1-2 pure-u-md-1-4 pure-u-lg-1-6'})

        comicList = []
        for pure in pures:
            comicList.append(get_book(pure, cls.base_url))
        logger.info(f'搜索{cls.name}漫画：{key}成功')
        return SearchResultModel(
            keyword=key,
            source=cls,
            comicList=comicList,
        )

    @classmethod
    def info(cls, url: str) -> BookInfoModel:
        logger.info(f'获取{cls.name}漫画章节信息：{url}')
        soup = cls.url_to_bs4(url)
        detail_div = soup.select_one('#layout > div.comics-detail > div.de-info-wr > div.l-content > div')
        cover_url = detail_div.find('amp-img').get('src')
        title = detail_div.find('h1', attrs={'class': 'comics-detail__title'}).text.strip()
        author = detail_div.find('h2', attrs={'class': 'comics-detail__author'}).text.strip()
        tags = [span.text.strip() for span in detail_div.find_all('span', attrs={'class': 'tag'})]
        latestChapter = soup.select_one(
            '#layout > div.comics-detail > div.de-info-wr > div.l-content > div > div.pure-u-1-1.pure-u-sm-2-3.pure-u-md-3-4 > div > div.supporting-text.mt-2 > div:nth-child(2) > span > a')
        try:
            chapter_items = soup.find_all('a', {'class': 'comics-chapters__item'})
        except AttributeError:
            chapter_items = []
        try:
            latestChapterName = latestChapter.text.strip()
            latestChapterUrl = cls.base_url + url_parse(latestChapter.get('href'))
        except AttributeError:
            latestChapterName = ''
            latestChapterUrl = ''
        chapters = []
        for index, chapter_item in enumerate(chapter_items):
            href = cls.base_url + url_parse(chapter_item.get('href'))
            title = chapter_item.text.strip()
            chapters.append((title, href))
        chapters = set(chapters)
        chapters = sorted(chapters, key=lambda x: x[0])
        new_chapters = []
        for title, href in chapters:
            new_chapters.append(BookItemModel(title=title, photo_url=href, source=cls))

        logger.info(f'获取{cls.name}漫画章节信息：{url}成功')
        return BookInfoModel(
            title=title,
            author=author,
            cover=cover_url,
            url=url,
            tags=tags,
            desc=detail_div.find('p', attrs={'class': 'comics-detail__desc overflow-hidden'}).text.strip(),
            source=cls,
            chapters=new_chapters,
            latestChapterName=latestChapterName,
            latestChapterUrl=latestChapterUrl
        )

    @classmethod
    def photos(cls, url: str) -> ChapterPhotoModel:
        logger.info(f'获取{cls.name}漫画图片：{url}')
        soup = cls.url_to_bs4(url)
        ul = soup.find('ul', {'class': 'comic-contain'})

        urls = []
        for script in ul.find_all('script'):
            uri = json.loads(script.text)['url']
            urls.append(uri)
        logger.info(f'获取{cls.name}漫画图片：{url}成功')
        return ChapterPhotoModel(
            width=970,
            min_height=760,
            urls=urls,
            source=cls
        )

    # @classmethod
    # def classify(cls) -> List[ComicTypeModel]:
    #     result = [
    #         [
    #             ComicTypeModel(name='全部', key='region', value='all'),
    #             ComicTypeModel(name='国漫', key='region', value='en'),
    #             ComicTypeModel(name='日本', key='region', value='jp'),
    #             ComicTypeModel(name='韩国', key='region', value='kr'),
    #             ComicTypeModel(name='欧美', key='region', value='en')
    #         ],
    #         [
    #             ComicTypeModel(name='全部', key='state', value='all'),
    #             ComicTypeModel(name='连载中', key='state', value='serial'),
    #             ComicTypeModel(name='已完结', key='state', value='pub')
    #         ],
    #         [
    #             ComicTypeModel(name='全部', key='type', value='all'),
    #             ComicTypeModel(name='恋爱', key='type', value='lianai'),
    #             ComicTypeModel(name='纯爱', key='type', value='chunai'),
    #             ComicTypeModel(name='古风', key='type', value='gufeng'),
    #             ComicTypeModel(name='异能', key='type', value='yineng'),
    #             ComicTypeModel(name='悬疑', key='type', value='xuanyi'),
    #             ComicTypeModel(name='剧情', key='type', value='juqing'),
    #             ComicTypeModel(name='科幻', key='type', value='kehuan'),
    #             ComicTypeModel(name='奇幻', key='type', value='qihuan'),
    #             ComicTypeModel(name='玄幻', key='type', value='xuanhuan'),
    #             ComicTypeModel(name='穿越', key='type', value='chuanyue'),
    #             ComicTypeModel(name='冒险', key='type', value='mouxian'),
    #             ComicTypeModel(name='推理', key='type', value='tuili'),
    #             ComicTypeModel(name='武侠', key='type', value='wuxia'),
    #             ComicTypeModel(name='格斗', key='type', value='gedou'),
    #             ComicTypeModel(name='战争', key='type', value='zhanzheng'),
    #             ComicTypeModel(name='热血', key='type', value='rexie'),
    #             ComicTypeModel(name='搞笑', key='type', value='gaoxiao'),
    #             ComicTypeModel(name='大女主', key='type', value='danuzhu'),
    #             ComicTypeModel(name='都市', key='type', value='dushi'),
    #             ComicTypeModel(name='总裁', key='type', value='zongcai'),
    #             ComicTypeModel(name='后宫', key='type', value='hougong'),
    #             ComicTypeModel(name='日常', key='type', value='richang'),
    #             ComicTypeModel(name='韩漫', key='type', value='hanman'),
    #             ComicTypeModel(name='少年', key='type', value='shaonian'),
    #             ComicTypeModel(name='其它', key='type', value='qita')
    #         ],
    #         [
    #             ComicTypeModel(name='全部', key='filter', value='*'),
    #             ComicTypeModel(name='ABCD', key='filter', value='ABCD'),
    #             ComicTypeModel(name='EFGH', key='filter', value='EFGH'),
    #             ComicTypeModel(name='IJKL', key='filter', value='IJKL'),
    #             ComicTypeModel(name='MNOP', key='filter', value='MNOP'),
    #             ComicTypeModel(name='QRST', key='filter', value='QRST'),
    #             ComicTypeModel(name='UVW', key='filter', value='UVW'),
    #             ComicTypeModel(name='XYZ', key='filter', value='XYZ'),
    #             ComicTypeModel(name='0-9', key='filter', value='0-9')
    #         ]
    #     ]
    #     return result

    # @classmethod
    # def classify_api(cls, params: dict = None, *args, **kwargs) -> ComicClassifyModel:
    #     logger.info(f'获取{cls.name}漫画分类信息：{params}')
    #     url = kwargs.pop('url', None)
    #     if not url:
    #         url = cls.base_url + '/api/bzmhq/amp_comic_list'
    #         params.update({'__amp_source_origin': cls.base_url, 'limit': 36})
    #     bs_url = 'https://static-tw.baozimhcn.com/cover/'
    #     response = cls.send(url).json()
    #     result = []
    #     for item in response['items']:
    #         result.append(
    #             ComicChapterModel(
    #                 title=item['name'],
    #                 url=cls.base_url + '/comic/' + item['comic_id'],
    #                 cover_url=bs_url + item['topic_img'],
    #                 author=item['author'],
    #                 tags=item['type_names'],
    #                 source=cls.name
    #             )
    #         )
    #     params['page'] += 1
    #     next_url = f'{cls.base_url}/api/bzmhq/amp_comic_list?{urlencode(params)}'
    #     return ComicClassifyModel(items=result, next_url=next_url)
