# BT之家
from myfun.crawler.soup import getHtmlSoup, UrlItem, Crawler


# 名称
TAB_NAME = 'BT之家'
TAB_TAG = 'btbbt'
# 域名地址
URL_MAIN = 'http://www.btbtt10.com/'
# 搜索页
URL_SEARCH = URL_MAIN + 'search-index-fid-0-orderby-timedesc-daterage-0-keyword-%s-page-%d.htm'
# 纯净电影
URL_PURE = URL_MAIN + 'forum-index-fid-951-typeid1-3-typeid2-0-typeid3-0-typeid4-0-page-%d.htm'
# 主题
INCLUDE_TITLES = ('剧集', '电影', '高清电影', '动漫', '游戏', '综艺', '纯净版')


# 包含主题
def isInclude(title):
    for s in INCLUDE_TITLES:
        if '【' + s + '】' in title or '[' + s + ']' in title:
            return True
    return False


class BTBBT(Crawler):
    def __init__(self):
        super(BTBBT, self).__init__()
        self.title = TAB_NAME
        self.tab = TAB_TAG

    # 获取电影列表
    def getMovies(self, result):
        super(BTBBT, self).getMovies(result)
        result.main = URL_MAIN
        if result.kwd:
            url = URL_SEARCH % (result.kwd, result.page)
        else:
            url = URL_PURE % result.page
        soup = getHtmlSoup(url)
        for td in soup.find_all('td', attrs={'class': 'subject'}):
            title = ''
            href = ''
            aas = td.find_all('a')
            if len(aas) < 4:
                continue
            for aa in aas:
                if 'thread_icon' in aa['class']:
                    continue
                title += aa.text
                href = aa['href']
            if isInclude(title):  # 包含主题
                title = title.replace('<span class=red>', '')
                title = title.replace('</span>', '')
                result.list.append(UrlItem(title, href))

        for aa in soup.find_all('div', attrs={'class': 'page'}, limit=1)[0]:
            href = aa['href']
            if len(href) > 0:
                result.ttp = max(int(href.split('-')[-1].replace('.htm', '')), result.ttp)

        return result

    # 获取附件地址
    def getAttach(self, result):
        super(BTBBT, self).getAttach(result)
        result.main = URL_MAIN
        soup = getHtmlSoup(URL_MAIN + result.url)
        attach = soup.find_all('div', attrs={'class': 'attachlist'})

        if len(attach) == 1:
            for aa in attach[0].find_all('a', attrs={'target': '_blank', 'rel': 'nofollow'}):
                title = aa.text
                href = aa['href'].replace('dialog', 'download')
                result.list.append(UrlItem(title, href))

        if len(result.list) == 1:
            result.one = URL_MAIN + result.list[0].href

        result.isNone = len(result.list) == 0
        return result
