# coding=utf-8
# !/usr/bin/python
# by eaudouce
import sys, requests, time, json, re
sys.path.append('..')
from base.spider import Spider

class Spider(Spider):  # 元类 默认的元类 type
    site_name = 'xk_布米'

    pub_url = 'http://www.bumimi.vip/'
    site_url = ''
    cookies = ''
    headers = {
        'Accept-Encoding': 'gzip, deflate',
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.75 Safari/537.36'
    }

    config = {
        'player': {},
        'class': [
            {'type_name':'电视剧', 'type_id':'tv'},
            {'type_name':'电影', 'type_id':'mov'},
            {'type_name':'综艺', 'type_id':'zongyi'},
            {'type_name':'动漫', 'type_id':'acg'}
        ],
        'filter': {
            'tv': [{
                'key': 'year',
                'name': '年代',
                'value': [{'n': '年代', 'v': '0'}] + [{'n': str(i), 'v': str(i)} for i in range(int(time.localtime().tm_year), 2014, -1)] + [{'n': '更早', 'v': '2000'}]
            }, {
                'key': 'cate',
                'name': '类型',
                'value': [{'n': '类型', 'v': '0'}] + [{'n': i.split(',')[0], 'v': i.split(',')[1]} for i in '古装,19#言情,20#武侠,21#偶像,22#家庭,23#青春,24#都市,25#爱情,26#喜剧,27#战争,28#军旅,29#谍战,30#悬疑,31#罪案,32#穿越,33#宫廷,34#历史,35#神话,36#科幻,37#年代,38#农村,39#商战,40#剧情,41#奇幻,42'.split('#')]
            }, {
                'key': 'area',
                'name': '地区',
                'value': [{'n': '地区', 'v': '0'}] + [{'n': i.split(',')[0], 'v': i.split(',')[1]} for i in '国产,china#港剧,hk#台剧,tw#美剧,american#韩剧,korea#泰剧,thailand#日剧,japan#英剧,england'.split('#')]
            }],
            'mov': [{
                'key': 'year',
                'name': '年代',
                'value': [{'n': '年代', 'v': '0'}] + [{'n': str(i), 'v': str(i)} for i in range(int(time.localtime().tm_year), 2014, -1)] + [{'n': '更早', 'v': '2000'}]
            }, {
                'key': 'cate',
                'name': '类型',
                'value': [{'n': '类型', 'v': '0'}] + [{'n': i.split(',')[0], 'v': i.split(',')[1]} for i in '喜剧,1#悲剧,2#爱情,3#动作,4#枪战,5#犯罪,6#惊悚,7#恐怖,8#悬疑,9#动画,10#家庭,11#奇幻,12#魔幻,13#科幻,14#战争,15#青春,16#剧情,17'.split('#')]
            }, {
                'key': 'area',
                'name': '地区',
                'value': [{'n': '地区', 'v': '0'}] + [{'n': i.split(',')[0], 'v': i.split(',')[1]} for i in '国产,china#香港,hk#美国,american#英国,england#韩国,korea#泰国,thailand#日本,japan#印度,india'.split('#')]
            }],
            'zongyi': [{
                'key': 'year',
                'name': '年代',
                'value': [{'n': '年代', 'v': '0'}] + [{'n': str(i), 'v': str(i)} for i in range(int(time.localtime().tm_year), 2014, -1)] + [{'n': '更早', 'v': '2000'}]
            }, {
                'key': 'cate',
                'name': '类型',
                'value': [{'n': '类型', 'v': '0'}] + [{'n': i.split(',')[0], 'v': i.split(',')[1]} for i in '播报,43#访谈,44#搞笑,45#游戏,46#选秀,47#时尚,48#情感,49#晚会,50#曲艺,51#美食,52#少儿,53#脱口秀,54#职场,55#相亲,56#音乐,57#伦理,58#真人秀,59#舞蹈,60#亲子,61#财经,62#旅游,63#益智,64#竞技,65#纪实,66#生活,67#盛会,68#歌舞,69#其它,70'.split('#')]
            }, {
                'key': 'area',
                'name': '地区',
                'value': [{'n': '地区', 'v': '0'}] + [{'n': i.split(',')[0], 'v': i.split(',')[1]} for i in '中国,china#英国,england#美国,american#韩国,korea#泰国,thailand#日本,japan'.split('#')]
            }],
            'acg': [{
                'key': 'year',
                'name': '年代',
                'value': [{'n': '年代', 'v': '0'}] + [{'n': str(i), 'v': str(i)} for i in range(int(time.localtime().tm_year), 2014, -1)] + [{'n': '更早', 'v': '2000'}]
            }, {
                'key': 'cate',
                'name': '类型',
                'value': [{'n': '类型', 'v': '0'}] + [{'n': i.split(',')[0], 'v': i.split(',')[1]} for i in '搞笑,71#经典,72#热血,73#催泪,74#治愈,75#猎奇,76#励志,78#战斗,80#后宫,81#机战,82#恋爱,84#百合,85#科幻,86#奇幻,88#推理,89#校园,90#运动,91#魔法,94#历史,95#伪娘,101#美少女,102#萝莉,103#亲子,105#青春,107#冒险,108#竞技,109'.split('#')]
            }, {
                'key': 'area',
                'name': '地区',
                'value': [{'n': '地区', 'v': '0'}] + [{'n': i.split(',')[0], 'v': i.split(',')[1]} for i in '国产,china#日本,japan#英国,england#美国,american#韩国,korea'.split('#')]
            }]
        }
    }

    lines = {'zd':'最大','yj':'永久','hn':'牛牛','gs':'光波','sn':'新朗','wl':'涡轮','lz':'良子','fs':'F速','ff':'飞飞','bd':'百度','uk':'酷U','wj':'无天','bj':'八戒','tk':'天空','ss':'速速','kb':'酷播','sd':'闪电','xk':'看看','tp':'淘淘','jy':'精英','sl':'sl线'}

    def getSiteUrl(self):
        rsp = self.fetch(self.pub_url, headers=self.headers)
        root = self.html(self.cleanText(rsp.text))
        url = root.xpath('//a/@href')[0].strip('/')
        self.site_url = url
        return url

    def getName(self):
        return self.site_name

    def init(self, extend=''):
        pass

    def homeContent(self, filter):
        result = {'class': self.config['class']}
        if filter:
            result['filters'] = self.config['filter']
        return result

    def homeVideoContent(self):
        if '' == self.site_url:
            self.getSiteUrl()

        rsp = self.fetch(self.site_url, headers=self.headers)
        root = self.html(self.cleanText(rsp.text))
        aList = root.xpath("//li[@class='y-newfigure']/a")
        videos = []
        for a in aList:
            name = a.xpath("./div/p/span[@class='s1']/text()")[0].strip()
            pic = a.xpath("./div/img/@data-img")[0].strip()
            mark = a.xpath("./div[@class='y-newfigure-detail']/p[@class='y-newfigure-desc']/text()")[0].strip()
            sid = a.xpath("./@href")[0].strip()
            if False == sid.startswith('http'):
                sid = self.site_url + sid
            if False == sid.endswith('/'):
                sid = sid + '/'
            videos.append({
                'vod_id': sid,
                'vod_name': name,
                'vod_pic': pic,
                'vod_remarks': mark
            })
        result = {
            'list': videos
        }
        return result

    def categoryContent(self, tid, pg, filter, extend):
        if '' == self.site_url:
            self.getSiteUrl()

        result = {}
        limit = 30
        params = {
            'year': '0',
            'cate': '0',
            'area': 'all'
        }
        for i in params:
            if i in extend.keys() and len(extend[i]) > 0:
                params[i] = extend[i]
        params['site_url'] = self.site_url
        params['page'] = pg
        params['tid'] = tid
        url = '{site_url}/{tid}/{year}/{cate}/{area}/{page}.html'.format(**params)

        rsp = self.fetch(url, headers=self.headers)
        text = self.cleanText(rsp.text)
        list_url = 'var _yu_gda_s="'
        list_url = self.regStr(rsp.text, 'var _yu_gda_s="(\\S*?)"')
        dect = self.regStr(rsp.text, 'var dect="(\\S*?)"')

        headers = self.headers
        headers['Origin'] = self.site_url
        headers['Referer'] = self.site_url
        rsp = self.fetch('{}?dec={}&action={tid}&year={year}&class={cate}&area={area}&page={page}&id='.format(list_url, dect, **params), headers=headers)
        root = self.html(self.cleanText(rsp.text))
        aList = root.xpath("//li/a")
        videos = []
        for a in aList:
            name = a.xpath('.//span[@class="s1"]/text()')[0]
            pic = a.xpath('.//img/@data-img')[0]
            mark = ' '.join(a.xpath(".//span[@class='hint']/text()")).strip()
            sid = a.xpath("./@href")[0]
            if False == sid.startswith('http'):
                sid = self.site_url + sid
            if False == sid.endswith('/'):
                sid = sid + '/'
            videos.append({
                'vod_id': sid,
                'vod_name': name,
                'vod_pic': pic,
                'vod_remarks': mark
            })

        if len(videos) < limit:
            pagecount = int(pg)
            total = (int(pg) - 1)*limit + len(videos)
        else:
            pagecount = int(pg) + 1
            total = int(pg)*limit + 1

        result['list'] = videos
        result['page'] = pg
        result['pagecount'] = pagecount
        result['limit'] = limit
        result['total'] = total
        return result

    def detailContent(self, array):
        if '' == self.site_url:
            self.getSiteUrl()

        url = array[0]
        if False == url.startswith('http'):
            url = self.site_url + url
        if False == url.endswith('/'):
            url = url + '/'

        tid = url.split('/')[4]
        rsp = self.fetch(url, headers=self.headers)
        root = self.html(self.cleanText(rsp.text))

        bak_url = root.xpath('//body/a/*[contains(text(), "备用线路")]/../@href')
        if len(bak_url) > 0:
            url = '/'.join(bak_url[0].split('/')[:3]) + '/' + '/'.join(url.split('/')[3:5]) + '/'
            rsp = self.fetch(url, headers=self.headers)
            root = self.html(self.cleanText(rsp.text))

        info = root.xpath("//div[contains(@class,'p-top-main')]")
        detail_type = 1
        if len(info) < 1:
            info = root.xpath("//div[contains(@class,'content')]")
            detail_type = 2
        if len(info) < 1:
            return {'list':[]}

        line_url = self.regStr(rsp.text, r'src="([^"]+s{}\.js[^"]*)"'.format(tid))
        headers = self.headers
        headers['Referer'] = url
        rsp = self.fetch(line_url, headers=headers)

        lines = re.findall(r'var\s+playarr_([a-z]{2})\s*=\s*new\s+Array', rsp.text)
        vod_play_from = []
        vod_play_url = []
        for i in lines:
            if i in self.lines.keys():
                vod_play_from.append(self.lines[i])
                vod_list = re.findall(r'playarr_{}\[(\d+)\]\s*=\s*"(.*?)\,[\-\d]+\,(.*?)"'.format(i), rsp.text)
                vod_items = []
                for v in vod_list:
                    vod_items.append('{}${}'.format(v[2].replace('%u', '\\u').encode('latin-1').decode('unicode-escape'), v[1]))
                vod_play_url.append('#'.join(vod_items))

        info = info[0]
        remarks = self.regStr(rsp.text, 'lianzaijs_.*?=(.*?);')
        if len(remarks) > 0:
            remarks = '更新至' + remarks + '集'
        if 1 == detail_type:
            pic = info.xpath(".//div[contains(@class,'b-detailcover')]/img/@data-img")[0].strip()
            title = info.xpath(".//h1/text()")[0].strip()
            top_info = info.xpath(".//div[@id='js-desc-switch']")[0]
            type_name = '/'.join(top_info.xpath(".//p/span[contains(text(), '类型')]/following-sibling::a/text()")).strip()
            area = '/'.join(top_info.xpath(".//p/span[contains(text(), '地区')]/following-sibling::a/text()")).strip()
            actor = '/'.join(top_info.xpath(".//p/span[contains(text(), '主演') or contains(text(), '主持人')]/text()")).replace('主演：', '').replace('主持人：', '').strip()
            director = '/'.join(top_info.xpath(".//p/span[contains(text(), '导演')]/following-sibling::a/text()")).strip()
            year = '/'.join(top_info.xpath(".//p/span[contains(text(), '年代')]/following-sibling::a/text()")).strip()
            detail = ''.join(top_info.xpath(".//p[contains(@class, 'item-desc')]/text()")).strip()
        else:
            pic = info.xpath(".//div[contains(@class,'pic')]/img/@data-original")[0].strip()
            title = info.xpath(".//div[contains(@class,'pic')]/img/@alt")[0].strip()
            type_name = '/'.join(info.xpath(".//div[contains(@class,'info')]/dl/dd/b[contains(text(), '类型')]/following-sibling::a/text()")).strip()
            area = info.xpath(".//div[contains(@class,'info')]/dl/dd/b[contains(text(), '地区')]/../text()")[0].strip()
            actor = info.xpath(".//div[contains(@class,'info')]/dl/dd[contains(text(), '演员') or contains(text(), '主持人')]/text()")
            if len(actor) > 0:
                actor = actor[0].replace('演员：', '').replace('主持人：', '').strip()
            else:
                actor = ''
            director = info.xpath(".//div[contains(@class,'info')]/dl/dd[contains(text(), '导演')]/text()")
            if len(director) > 0:
                director = director[0].replace('导演：', '').strip()
            else:
                director = ''
            year = info.xpath(".//div[contains(@class,'info')]/dl/dd/b[contains(text(), '年代')]/../text()")[-1].strip()
            detail = info.xpath("//div[contains(@class,'des2')]/text()")
            if len(detail) > 0:
                detail = detail[0].replace('剧情：　　', '').replace('剧情：', '').strip()
            else:
                detail = ''

        vod = {
            'vod_id': url,
            'vod_name': title,
            'vod_pic': pic,
            'type_name': type_name,
            'vod_year': year,
            'vod_area': area,
            'vod_remarks': remarks,
            'vod_actor': actor,
            'vod_director': director,
            'vod_content': detail,
            'vod_play_from': '$$$'.join(vod_play_from),
            'vod_play_url': '$$$'.join(vod_play_url)
        }

        result = {
            'list': [
                vod
            ]
        }
        return result

    def searchContent(self, key, quick):
        if '' == self.site_url:
            self.getSiteUrl()

        rsp = self.fetch('{0}/search/{1}'.format(self.site_url, key), headers=self.headers)
        search_url = self.regStr(rsp.text, "var my_search='(\\S*?)'")

        headers = self.headers
        headers['Origin'] = self.site_url
        headers['Referer'] = self.site_url
        rsp = self.fetch('{}?top=10&q={}'.format(search_url, key), headers=headers)

        content = rsp.text.strip()
        #去UTF-8 BOM
        if content.startswith(u'\ufeff'):
            content = content.encode('utf8')[3:].decode('utf8')
        vodList = json.loads(content)

        videos = []
        for vod in vodList:
            sid = vod['url']
            if False == sid.startswith('http'):
                sid = self.site_url + sid
            if False == sid.endswith('/'):
                sid = sid + '/'
            videos.append({
                'vod_id': sid,
                'vod_name': vod['title'],
                'vod_pic': vod['thumb'],
                'vod_remarks': (vod['lianzaijs'] if len(vod['lianzaijs']) > 0 else '1') + '集'
            })
        return {'list': videos}

    def playerContent(self, flag, id, vipFlags):
        result = {
            'parse': '0',
            'playUrl': '',
            'url': id,
            'header': ''
        }
        return result

    def loadVtt(self, url):
        pass

    def isVideoFormat(self, url):
        pass

    def manualVideoCheck(self):
        pass

    def localProxy(self, param):
        action = {}
        return [200, 'video/MP2T', action, '']
