# coding: utf-8
import json
import re
from typing import List

from bs4 import BeautifulSoup, element
from loguru import logger

from app.common import APIBase, USER_AGENT, BookItemModel, HomeItemModel, SearchResultModel, BookInfoModel, \
    ChapterPhotoModel, getGMTTime


def getBookItem(bookList: element.Tag, base_url: str):
    items = bookList.find_all('div', attrs={'class': 'item ib'})
    models = []
    for item in items:
        title_p = item.find('p', attrs={'class': 'title'})
        cover = item.find('img', attrs={'class': 'cover'}).get('src')
        title = title_p.text.strip()
        tip = item.find('p', attrs={'class': 'tip'}).text.strip()
        url = title_p.find('a').get('href')

        model = BookItemModel(
            title=title,
            info_url=base_url + url,
            cover=cover,
            desc=tip,
            source=Plugin
        )
        models.append(model)
    return models


def container_to_model(container: element.Tag, base_url: str):
    topBar = container.find('div', attrs={'class': 'topBar'})
    try:
        title = topBar.find('span', attrs={'class': 'logo'}).find('img').get('alt')
    except AttributeError:
        title = topBar.find('img').get('alt')
    try:
        desc = topBar.find('span', attrs={'class': 'title ib'}).text.strip()
    except AttributeError:
        desc = ''
    return HomeItemModel(
        title=title,
        desc=desc,
        source=Plugin,
        comicList=getBookItem(container, base_url)
    )


class Plugin(APIBase):
    name = '漫本漫画'
    description = '漫本提供精品在线漫画阅读、推荐、评论、排行榜和数据分析。漫本是古惑仔官方发行平台，拥有古惑仔全球的电子版权，目前古惑仔已连载到89话。漫本是华人原创漫画发行平台，也是一个在线漫画的阅读平台。'
    author = '赤鸢'
    version = '1.0.0'
    icon = './plugins/manben.png'
    link = 'https://www.manben.com/'

    base_url = 'https://www.manben.com'
    headers = {
        'User-Agent': USER_AGENT,
        'Referer': 'https://www.manben.com'
    }

    @classmethod
    def home(cls) -> List[HomeItemModel]:
        """
        漫画首页
        :return:
        """
        soup = cls.url_to_bs4(cls.base_url)
        # # 原创精品
        original = soup.select_one('body > div.original > div')
        original_model = container_to_model(original, cls.base_url)
        # 最近更新
        latest = soup.select_one('body > div.series > div')
        latest_model = container_to_model(latest, cls.base_url)
        # 左侧分类
        leftBar = soup.select_one('body > div.topic_1 > div > div.leftBar.ib')
        topBars = leftBar.find_all('div', attrs={'class': 'topBar'})
        bookList = leftBar.find_all('div', attrs={'class': 'bookList_1'})
        # 少年漫画
        young_model = container_to_model(BeautifulSoup(topBars[0].prettify() + bookList[0].prettify(), 'lxml'),
                                         cls.base_url)
        # 少女漫画
        girl_model = container_to_model(BeautifulSoup(topBars[1].prettify() + bookList[1].prettify(), 'lxml'),
                                        cls.base_url)
        # 最新上架
        newly = soup.select_one('body > div.rankListBak > div.topic_2 > div > div')
        newly_model = container_to_model(newly, cls.base_url)
        return [original_model, latest_model, young_model, girl_model, newly_model]

    @classmethod
    def search(cls, key: str) -> SearchResultModel:
        url = cls.base_url + '/search?title=' + key
        soup = cls.url_to_bs4(url)
        noneImg = soup.find('div', {'class': 'noneImg'})
        if noneImg:
            comicList = []
        else:
            bookList = soup.find('div', attrs={'class': 'bookList_2'})
            comicList = getBookItem(bookList, cls.base_url)
        return SearchResultModel(
            keyword=key,
            source=cls,
            comicList=comicList
        )

    @classmethod
    def info(cls, url: str) -> BookInfoModel:
        logger.info(f'获取漫画章节信息: {url}')
        soup = cls.url_to_bs4(url)
        comicInfo = soup.find('div', attrs={'class': 'comicInfo'})
        div_img = comicInfo.find('div', {'class': 'img'})
        img = div_img.find('img')
        latest_a = div_img.find('a')

        cover_url = img.get('src')
        title = img.get('title')
        author = ' , '.join(list(i for i in filter(None, comicInfo.select_one(
            'div.ib.info > p:nth-child(3) > span.ib.l').text.split(' ')))).replace('作 , 者： ,', '')
        gray = comicInfo.select_one('div.ib.info > p:nth-child(5)')
        tags = [s.text.strip() for s in gray.find_all('span')]
        description = comicInfo.select_one('div.ib.info > p.content').text.strip()

        updated_at = soup.select_one('#chapterList > div.topBar > span.fr')
        if updated_at:
            updated_at = updated_at.text.strip()
        else:
            updated_at = ''

        chapterlistload = soup.find('div', attrs={'id': 'chapterlistload'})
        chapters = []
        if chapterlistload:
            ibs = chapterlistload.find_all('a', attrs={'class': 'ib', 'target': '_blank'})
            for ib in ibs:
                title = ib.text.strip(),
                href = cls.base_url + ib.get('href'),
                chapters.append((title, href))
        chapters = set(chapters)
        chapters = sorted(chapters, key=lambda x: x[0])
        new_chapters = []
        for title, href in chapters:
            new_chapters.append(BookItemModel(title=title, photo_url=href, source=cls))
        return BookInfoModel(
            title=title,
            url=url,
            cover=cover_url,
            author=author,
            tags=tags,
            source=cls,
            desc=description,
            lastUpdate=updated_at,
            latestChapterName=latest_a.text.strip(),
            latestChapterUrl=cls.base_url + latest_a.get('href'),
            chapters=new_chapters,
            useBrowser=True
        )

    @classmethod
    def photos(cls, url: str) -> ChapterPhotoModel:
        logger.info(f'获取漫画章节图片: {url}')
        cls.headers['Referer'] = url
        urls = []
        soup = cls.url_to_bs4(url)
        pagelist = soup.find('span', attrs={'class': 'pagelist'})
        js_codes = ''
        for script in soup.find_all('script'):
            string = script.string
            if string:
                js_codes += string
        if pagelist:
            length = len(pagelist.find_all('a'))
            m = url.replace(cls.base_url, '')
            cid = re.search(r'/m(\d+)/', m).group(1)
            urls = []
            for i in range(1, length + 1):
                params = {
                    'd': getGMTTime(),
                    'cid': cid,
                    'page': i,
                    'showtype': '1',
                    'ispre': '0'
                }
                imageshow_url = cls.base_url + '/imageshow.ashx'
                response = cls.get(imageshow_url, params=params)
                chapterimage = response.text.replace('var chapterimage=', '').replace(';', '')
                chapterimage = json.loads(chapterimage)
                img_url = chapterimage['ImagePix'] + chapterimage['Images'][0]
                urls.append(img_url)
        else:
            com = re.search(r"com\|\|cdndm5\|(.*?)'", js_codes).group(1)
            if com:
                com_list = com.split('|')
                u1 = com_list[3]
                u2 = com_list[13]
                u3 = com_list[6]
                u4 = com_list[5]
                u5 = com_list[2]
                u6 = com_list[1]
                u7 = com_list[0]
                u8 = com_list[7]
                bu = f'https://{u1}-{u2}-{u3}-{u4}-{u5}.cdndm5.com/{u6}/{u7}/{u8}/'
                com_list.sort()
                new_list = []
                for c in com_list:
                    l = c.split('_')
                    if len(l) == 2:
                        new_list.append(f'{c}.jpg')
                    elif len(l) == 3:
                        new_list.append(f'bar/{c}.jpg')
                for n in new_list:
                    urls.append(bu + n)
        return ChapterPhotoModel(
            width=970,
            min_height=760,
            urls=urls,
            source=cls
        )
