# coding=utf-8
# !/usr/bin/python
# by eaudouce
import sys, requests, time, json, re, urllib.parse, base64
sys.path.append('..')
from lxml import etree
from base.spider import Spider
from pprint import pprint

class Spider(Spider):  # 元类 默认的元类 type
    site_name = 'xk_88看球'

    pub_url = 'http://www.88kq.net/' #http://www.88kq.in/ 备用发布页面
    site_url = ''
    headers = {
        'Accept-Encoding': 'gzip, deflate',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36'
    }

    config = {
        'player': {},
        'class': [
            {'type_name':'正在直播', 'type_id':'0'},
            {'type_name':'篮球直播', 'type_id':'1'},
            {'type_name':'足球直播', 'type_id':'2'},
            {'type_name':'其他直播', 'type_id':'99'}
        ],
        'filter': {
            '1': [{
                'key': 'cid',
                'name': '分类',
                'value': [{'n': i.split(',')[0], 'v': i.split(',')[1]} for i in 'NBA,1#CBA,2#WNBA,20#篮球综合,4'.split('#')]
            }],
            '2': [{
                'key': 'cid',
                'name': '分类',
                'value': [{'n': i.split(',')[0], 'v': i.split(',')[1]} for i in '英超,8#西甲,9#意甲,10#德甲,14#法甲,15#欧冠,12#欧联,13#中超,7#亚冠,11#足总杯,27#美职联,26#中甲,31#足球综合,23'.split('#')]
            }],
            '99': [{
                'key': 'cid',
                'name': '分类',
                'value': [{'n': i.split(',')[0], 'v': i.split(',')[1]} for i in 'CCTV5,18#纬来体育,21#棒球,38#网球,29#NFL,25#羽毛球,19'.split('#')]
            }]
        }
    }

    def getSiteUrl(self):
        rsp = self.fetch(self.pub_url, headers=self.headers)
        root = self.html(self.cleanText(rsp.text))
        urls = root.xpath('//div[contains(@class, "site test-speed")]/a[contains(@href, "88")]/@href')
        url = ''
        for url in urls:
            rsp = self.fetch(url, headers=self.headers)
            #print(url, rsp.status_code)
            if rsp.status_code == 200:
                break
        self.site_url = url.strip('/')
        return self.site_url

    def getName(self):
        return self.site_name

    def init(self, extend=''):
        pass

    def homeContent(self, filter):
        result = {'class': self.config['class']}
        if filter:
            result['filters'] = self.config['filter']
        return result

    def homeVideoContent(self):
        return {'list': []}

    def categoryContent(self, tid, pg, filter, extend):
        if '' == self.site_url:
            self.getSiteUrl()

        tid = str(tid)
        pg = int(pg)
        result = {
            'page': pg,
            'pagecount': 1,
            'limit': 99,
            'total': 99
        }

        videos = []
        if tid == '0':
            url = self.site_url
        else:
            if len(str(extend.get('cid', ''))) > 0:
                cid = extend['cid']
            else:
                cid = self.config['filter'][tid][0]['value'][0]['v']
            url = '{0}/match/{1}/live'.format(self.site_url, cid)
        videos = self.getVideos(url)

        result['list'] = videos
        return result

    def getVideos(self, url):
        rsp = self.fetch(url, headers=self.headers)
        root = self.html(self.cleanText(rsp.text))
        aList = root.xpath('//ul[@class="list-group"]/li[@class="list-group-item group-game-item"]//div[@data-id]/a[contains(text(), "直播")]/../..')
        videos = []
        for a in aList:
            name = ''.join(a.xpath('.//span[@class="game-type"][1]/text()')).strip() + ' ' + ' VS '.join([i.strip() for i in a.xpath('.//span[@class="team-name"]/text()')]).strip()
            pic = ''.join(a.xpath('.//img[@class="team-logo boss"][1]/@src')).strip()
            mark = ''.join(a.xpath('.//div[contains(@class, "category-game-time")][1]/text()')).strip() + ' ' + ''.join(a.xpath('.//a[contains(@class, "btn")][1]/text()')).strip()
            sid = ''.join(a.xpath('.//a[contains(@class, "btn")][1]/@href')).strip()
            videos.append({
                'vod_id': self.genFullUrl(sid, url),
                'vod_name': name,
                'vod_pic': self.genFullUrl(pic, url),
                'vod_remarks': mark
            })
        return videos

    def detailContent(self, array):
        site_domain = '/'.join(array[0].split('/')[:3])
        url = array[0]
        rsp = self.fetch(url, headers=self.headers)
        root = self.html(self.cleanText(rsp.text))

        vod = {
            'vod_id': url,
            'vod_name': ''.join(root.xpath('//div[@class="game-info-container"]//p[@class="game-name"][1]/text()')).strip() + ' ' + ' VS '.join([i.strip() for i in root.xpath('//div[@class="game-info-container"]//span[@class="team-name"]/text()')]).strip(),
            'vod_pic': ''.join(root.xpath('//div[@class="game-info-container"]/div[1]/img[1]/@src')).strip(),
            'vod_remarks': ''.join(root.xpath('//div[@class="game-info-container"]//p[@class="game-time"]/text()')).strip(),
            'vod_content': ' '.join(root.xpath('//div[@class="game-info-container"]//p[@class="game-time"]/text()')).strip() + ' ' + ' '.join(root.xpath('//div[@class="game-info-container"]//p[contains(@class, "game-status")]/text()')).strip()
        }

        rsp = self.fetch(url+'-url', headers=self.headers)
        json_data = json.loads(base64.b64decode(json.loads(rsp.text)['data'][6:-2].encode('utf-8')).decode('utf-8'))
        vod_items = []
        for ep in json_data['links']:
            if ep['url'].find('embed=') < 0 and ep['url'].find('?url=') < 0:
                continue
            vod_items.append('{}${}'.format(ep['name'], ep['url'].split('#')[0]))
        vod_play_from = [self.site_name]
        vod_play_url = ['#'.join(vod_items)]

        vod['vod_play_from'] = '$$$'.join(vod_play_from)
        vod['vod_play_url'] = '$$$'.join(vod_play_url)
        result = {
            'list': [
                vod
            ]
        }
        return result

    def searchContent(self, key, quick):
        return {'list': []}

    def playerContent(self, flag, id, vipFlags):
        if id.find('embed=') > -1:
            url = base64.b64decode(id.split('embed=')[1].split('&')[0].encode('utf-8')).decode('utf-8')
        elif id.find('?url=') > -1:
            url = id.split('?url=')[1]
        else:
            url = id

        result = {
            'parse': '0',
            'playUrl': '',
            'url': url,
            'header': ''
        }
        return result

    def loadVtt(self, url):
        pass

    def isVideoFormat(self, url):
        pass

    def manualVideoCheck(self):
        pass

    def localProxy(self, param):
        action = {}
        return [200, 'video/MP2T', action, '']

    def genFullUrl(self, url, page_url):
        url = url.strip()
        page_url = page_url.strip()
        result = urllib.parse.urlparse(page_url)
        if len(url) < 1:
            return url
        elif url.startswith('http://') or url.startswith('https://'):
            return url
        elif url.startswith('//'):
            return result.scheme + ':' + url
        elif url.startswith('/'):
            return result.scheme + '://' + result.netloc + url
        elif url.startswith('./'):
            return result.scheme + '://' + result.netloc + '/'.join(result.path.split('/')[:-1]).rstrip('/') + '/' + url[2:]
        elif url.startswith('../'):
            url_arr = url.split('../')
            pos = -len(url_arr)
            return result.scheme + '://' + result.netloc + '/'.join(result.path.split('/')[:pos]).rstrip('/') + '/' + url_arr[-1]
        else:
            return result.scheme + '://' + result.netloc + '/'.join(result.path.split('/')[:-1]).rstrip('/') + '/' + url
