# coding=utf-8
# !/usr/bin/python
# by eaudouce
import sys, requests, time, json, re, hashlib, urllib.parse
sys.path.append('..')
from lxml import etree
from base.spider import Spider
from pprint import pprint

class Spider(Spider):  # 元类 默认的元类 type
    site_name = 'xk_金牌'

    pub_url_list = ['https://ivdy.com', 'https://www.sxbfxk.com']
    site_url_list = ['https://www.gs4x7nq4.com', 'https://www.cfkj86.com']
    site_url = ''
    cookies = ''
    headers = {
        'Accept-Encoding': 'gzip, deflate',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36'
    }
    verify_key = 'cb808529bae6b6be45ecfab29a4889bc'
    device_id = '58a80c52-138c-48fd-8edb-138fd74d12c8'

    config = {
        'player': {},
        'class': [
            {'type_name':'电影', 'type_id':'1'},
            {'type_name':'剧集', 'type_id':'2'},
            {'type_name':'综艺', 'type_id':'3'},
            {'type_name':'动漫', 'type_id':'4'}
        ],
        'filter': {}
    }

    def getSiteUrl(self):
        """
        for url in self.pub_url_list:
            rsp = requests.head(url, headers=self.headers)
            while rsp.status_code == 302:
                url = rsp.headers['Location']
                rsp = requests.head(url, headers=self.headers)
            if rsp.status_code == 200:
                break
        """
        for url in self.site_url_list:
            rsp = requests.head(url, headers=self.headers)
            if rsp.status_code == 200:
                break
        self.site_url = url
        return url

    def getName(self):
        return self.site_name

    def init(self, extend=''):
        pass

    def homeContent(self, filter):
        result = {'class': self.config['class']}
        if filter:
            result['filters'] = self.config['filter']
        return result

    def homeVideoContent(self):
        if '' == self.site_url:
            self.getSiteUrl()

        rsp = self.fetch(self.site_url, headers=self.headers)
        content = rsp.text
        pattern = r'\\"list\\"\s*\:\s*(\[\{.*?\}\])'
        matches = re.findall(pattern, content)

        videos = []
        for json_str in matches:
            json_data = json.loads(re.sub(r'\\', '', json_str))
            for vod in json_data:
                sid = '{0}/detail/{1}'.format(self.site_url, vod['vodId'])
                pic = vod['vodPic']
                if not pic.startswith('http'):
                    pic = self.site_url + pic
                videos.append({
                    'vod_id': sid,
                    'vod_name': vod['vodName'],
                    'vod_pic': pic,
                    'vod_remarks': vod['vodRemarks'] if vod.get('vodRemarks') else vod['vodVersion']
                })
        result = {
            'list': videos
        }
        return result

        root = self.html(self.cleanText(rsp.text))
        aList = root.xpath('//div[@class="card-box"]/div[contains(@class, "hover-card__Content-sc-")]/div[@class="content-card"]/a')
        videos = []
        for a in aList:
            name = a.xpath('.//div[@class="title"]/span/text()')[0].strip()
            pic = a.xpath('.//img/@src')[0].strip()
            mark = a.xpath('.//div[@class="score __className_7bf612"]/text()')[0].strip()
            sid = a.xpath("./@href")[0].strip()
            if not sid.startswith('http'):
                sid = self.site_url + sid
            if not pic.startswith('http'):
                pic = self.site_url + pic
            videos.append({
                'vod_id': sid,
                'vod_name': name,
                'vod_pic': pic,
                'vod_remarks': mark
            })
        result = {
            'list': videos
        }
        return result

    def categoryContent(self, tid, pg, filter, extend):
        if '' == self.site_url:
            self.getSiteUrl()

        result = {}
        limit = 24
        pg = int(pg)
        t = str(int(time.time()*1000))
        params = {
            'site_url': self.site_url,
            'tid': tid,
            'page': pg,
            'limit': limit,
            'verify_key': self.verify_key,
            't': t
        }
        signkey = hashlib.sha1(hashlib.new('md5', 'pageNum={page}&pageSize={limit}&sort=1&sortBy=1&type1={tid}&key={verify_key}&t={t}'.format(**params).encode('utf-8')).hexdigest().encode('utf-8')).hexdigest()

        headers = self.headers
        headers['deviceid'] = self.device_id
        headers['sign'] = signkey
        headers['t'] = t
        rsp = self.fetch('{site_url}/api/mw-movie/anonymous/video/list?pageNum={page}&pageSize={limit}&sort=1&sortBy=1&type1={tid}'.format(**params), headers=headers)

        json_data = json.loads(rsp.text)
        videos = []
        if json_data.get('data').get('list'):
            for vod in json_data.get('data').get('list'):
                sid = '{0}/detail/{1}'.format(self.site_url, vod['vodId'])
                pic = vod['vodPic']
                if not pic.startswith('http'):
                    pic = self.site_url + pic
                videos.append({
                    'vod_id': sid,
                    'vod_name': vod['vodName'],
                    'vod_pic': pic,
                    'vod_remarks': vod['vodRemarks'] if vod.get('vodRemarks') else vod['vodVersion']
                })

        if len(videos) == limit:
            pagecount = pg + 1
            total = pg*limit + 1
        else:
            pagecount = pg
            total = (pg - 1)*limit + len(videos)

        result['list'] = videos
        result['page'] = pg
        result['pagecount'] = pagecount
        result['limit'] = limit
        result['total'] = total
        return result

    def detailContent(self, array):
        url = array[0]
        vod_id = url.strip('/').split('/')[-1]
        site_domain = '/'.join(url.split('/')[:3])
        rsp = self.fetch(url, headers=self.headers)
        content = self.cleanText(rsp.text)
        root = self.html(content)

        vod = {
            'vod_id': url,
            'vod_name': ''.join(root.xpath('//h1[@class="title"]/text()')).strip(),
            'vod_pic': ''.join(root.xpath('//div[contains(@class, "detail__InfoBox-sc-")]//img[1]/@src')).strip(),
            'vod_year': ''.join(root.xpath('//div[contains(@class, "detail__InfoBox-sc-")]//div[@class="item-bottom" and contains(text(), "上映时间")]/../div[@class="item-top"]/text()')).strip(),
            'vod_area': ''.join(root.xpath('//div[contains(@class, "detail__InfoBox-sc-")]//div[@class="item-bottom" and contains(text(), "语言")]/../div[@class="item-top"]/text()')).strip(),
            'vod_remarks': ''.join(root.xpath('//div[@class="score"]/text()')).strip(),
            'vod_actor': '/'.join(root.xpath('//div[@class="director"]/div[@class="name" and contains(text(), "主演")]/../a/text()')).strip(),
            'vod_director': '/'.join(root.xpath('//div[@class="director"]/div[@class="name" and contains(text(), "导演")]/../a/text()')).strip(),
            'vod_content': ''.join(root.xpath('//div[@class="intro"]//label[@class="wrapper_more_btn"]/parent::*/text()')).strip()
        }

        ep_pattern = r'"episodeList.*?(\[.*?\])'
        match = re.search(ep_pattern, content)
        vod_items = []
        if match:
            eps = json.loads(re.sub(r'\\', '', match.group(1)))
            for ep in eps:
                vod_items.append('{0}${1}/vod/play/{2}/sid/{3}'.format(ep['name'], site_domain, vod_id, ep['nid']))
        vod_play_from = [self.site_name]
        vod_play_url = ['#'.join(vod_items)]

        vod['vod_play_from'] = '$$$'.join(vod_play_from)
        vod['vod_play_url'] = '$$$'.join(vod_play_url)
        result = {
            'list': [
                vod
            ]
        }
        return result

    def searchContent(self, key, quick):
        if '' == self.site_url:
            self.getSiteUrl()

        limit = 24
        t = str(int(time.time()*1000))
        params = {
            'site_url': self.site_url,
            'key': key,
            'limit': limit,
            'verify_key': self.verify_key,
            't': t
        }
        signkey = hashlib.sha1(hashlib.new('md5', 'keyword={key}&pageNum=1&pageSize={limit}&type=false&key={verify_key}&t={t}'.format(**params).encode('utf-8')).hexdigest().encode('utf-8')).hexdigest()

        headers = self.headers
        headers['deviceid'] = self.device_id
        headers['sign'] = signkey
        headers['t'] = t
        rsp = self.fetch('{site_url}/api/mw-movie/anonymous/video/searchByWordPageable?keyword={key}&pageNum=1&pageSize={limit}&type=false'.format(**params), headers=headers)

        json_data = json.loads(rsp.text)
        videos = []
        if json_data.get('data').get('list'):
            for vod in json_data.get('data').get('list'):
                sid = '{0}/detail/{1}'.format(self.site_url, vod['vodId'])
                pic = vod['vodPic']
                if not pic.startswith('http'):
                    pic = self.site_url + pic
                videos.append({
                    'vod_id': sid,
                    'vod_name': vod['vodName'],
                    'vod_pic': pic,
                    'vod_remarks': vod['vodRemarks'] if vod.get('vodRemarks') else '暂无更新'
                })

        return {'list': videos}

    def playerContent(self, flag, id, vipFlags):
        site_domain = '/'.join(id.split('/')[:3])
        pid = id.split('/')[5]
        nid = id.split('/')[7]
        t = str(int(time.time()*1000))
        signkey = hashlib.sha1(hashlib.new('md5', 'id={0}&nid={1}&key={3}&t={2}'.format(pid, nid, t, self.verify_key).encode('utf-8')).hexdigest().encode('utf-8')).hexdigest()

        headers = self.headers
        headers['deviceid'] = self.device_id
        headers['sign'] = signkey
        headers['t'] = t
        rsp = self.fetch('{0}/api/mw-movie/anonymous/v1/video/episode/url?id={1}&nid={2}'.format(site_domain, pid, nid), headers=headers)
        json_data = json.loads(rsp.text)
        if json_data.get('data'):
            url = json_data['data']['playUrl']
        else:
            url = ''
        result = {
            'parse': '0',
            'playUrl': '',
            'url': url,
            'header': ''
        }
        return result

    def loadVtt(self, url):
        pass

    def isVideoFormat(self, url):
        pass

    def manualVideoCheck(self):
        pass

    def localProxy(self, param):
        action = {}
        return [200, 'video/MP2T', action, '']

    def genFullUrl(self, url, page_url):
        url = url.strip()
        page_url = page_url.strip()
        result = urllib.parse.urlparse(page_url)
        if len(url) < 1:
            return url
        elif url.startswith('http://') or url.startswith('https://'):
            return url
        elif url.startswith('//'):
            return result.scheme + ':' + url
        elif url.startswith('/'):
            return result.scheme + '://' + result.netloc + url
        elif url.startswith('./'):
            return result.scheme + '://' + result.netloc + '/'.join(result.path.split('/')[:-1]).rstrip('/') + '/' + url[2:]
        elif url.startswith('../'):
            url_arr = url.split('../')
            pos = -len(url_arr)
            return result.scheme + '://' + result.netloc + '/'.join(result.path.split('/')[:pos]).rstrip('/') + '/' + url_arr[-1]
        else:
            return result.scheme + '://' + result.netloc + '/'.join(result.path.split('/')[:-1]).rstrip('/') + '/' + url
