# 简单动漫
from myfun.crawler.soup import getHtmlSoup, UrlItem, Crawler

# 名称
TAB_NAME = '简单动漫'
TAB_TAG = 'jddm'
# 域名地址
URL_MAIN = 'https://www.36dm.club/'
# 搜索页
URL_SEARCH = URL_MAIN + 'search.php?keyword=%s&page=%d'


class JDDM(Crawler):
    def __init__(self):
        super(JDDM, self).__init__()
        self.title = TAB_NAME
        self.tab = TAB_TAG

    # 获取电影列表
    def getMovies(self, result):
        super(JDDM, self).getMovies(result)
        result.main = URL_MAIN
        if result.kwd:
            url = URL_SEARCH % (result.kwd, result.page)
        elif result.page > 1:
            url = URL_MAIN + str(result.page) + '.html'
        else:
            url = URL_MAIN
        soup = getHtmlSoup(url)
        for tr in soup.find_all('tr', attrs={'class': ['alt1', 'alt2']}):
            title = tr.text
            td = tr.find_all('td', attrs={'style': 'text-align:left;'}, limit=1)
            if len(td) == 0:
                continue
            a = td[0].find_all('a', limit=1)
            if len(a) == 0:
                continue
            href = a[0]['href']
            result.list.append(UrlItem(title, href))

        div = soup.find_all('div', attrs={'class': 'pages clear'}, limit=1)
        if len(div) > 0:
            for aa in div[0].find_all('a'):
                href = aa['href']
                if len(href) > 0:
                    result.ttp = max(int(href.split('/')[-1].replace('.html', '')), result.ttp)

        return result

    # 获取附件地址
    def getAttach(self, result):
        super(JDDM, self).getAttach(result)
        result.main = URL_MAIN
        soup = getHtmlSoup(URL_MAIN + result.url)
        a = soup.find_all('a', attrs={'id': 'download'}, limit=1)
        if len(a) == 1:
            href = a[0]['href']
            result.one = URL_MAIN + href
        else:
            result.isNone = True
        return result
