# coding=utf-8
# !/usr/bin/python
# by eaudouce
import sys, requests, time, json, re, urllib.parse
sys.path.append('..')
from lxml import etree
from base.spider import Spider
from pprint import pprint

class Spider(Spider):  # 元类 默认的元类 type
    site_name = 'xk_如意'

    site_url_list = ['https://ryzyw.com', 'https://ryzy.tv']
    site_url = 'https://ryzyw.com'
    search_url = '{}/index.php/vod/search.html?wd={}'
    type_url = '{}/index.php/vod/type/id/{}/page/{}.html'
    headers = {
        'Accept-Encoding': 'gzip, deflate',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36'
    }

    config = {
        'player': {},
        'class': [
            {'type_name':'电影', 'type_id':'1'},
            {'type_name':'剧集', 'type_id':'2'},
            {'type_name':'综艺', 'type_id':'3'},
            {'type_name':'动漫', 'type_id':'4'},
            {'type_name':'电影解说', 'type_id':'35'},
            {'type_name':'体育', 'type_id':'36'}
        ],
        'filter': {
            '1': [{
                'key': 'type',
                'name': '分类',
                'value': [{'n': '全部', 'v': '1'}] + [{'n': i.split('$$')[0], 'v': i.split('$$')[1]} for i in '动作$$6#喜剧$$7#科幻$$9#恐怖$$10#爱情$$8#剧情$$11#战争$$12#记录$$20#动画$$47#伦理$$34#预告$$45'.split('#')]
            }],
            '2': [{
                'key': 'type',
                'name': '分类',
                'value': [{'n': '全部', 'v': '2'}] + [{'n': i.split('$$')[0], 'v': i.split('$$')[1]} for i in '国产$$13#欧美$$16#香港$$14#韩国$$15#台湾$$21#日本$$22#海外$$23#泰国$$24#短剧$$46'.split('#')]
            }],
            '3': [{
                'key': 'type',
                'name': '分类',
                'value': [{'n': '全部', 'v': '3'}] + [{'n': i.split('$$')[0], 'v': i.split('$$')[1]} for i in '大陆$$25#港台$$26#日韩$$27#欧美$$28'.split('#')]
            }],
            '4': [{
                'key': 'type',
                'name': '分类',
                'value': [{'n': '全部', 'v': '4'}] + [{'n': i.split('$$')[0], 'v': i.split('$$')[1]} for i in '国产$$29#日韩$$30#欧美$$31#港台$$32#海外$$33'.split('#')]
            }],
            '36': [{
                'key': 'type',
                'name': '分类',
                'value': [{'n': '全部', 'v': '36'}] + [{'n': i.split('$$')[0], 'v': i.split('$$')[1]} for i in '足球$$37#篮球$$38#网球$$39#斯诺克$$40'.split('#')]
            }]
        }
    }

    def getSiteUrl(self):
        for url in self.site_url_list:
            rsp = requests.head(url, headers=self.headers)
            if rsp.status_code == 200:
                break
        self.site_url = url
        return url

    def getName(self):
        return self.site_name

    def init(self, extend=''):
        pass

    def homeContent(self, filter):
        result = {'class': self.config['class']}
        if filter:
            result['filters'] = self.config['filter']
        return result

    def homeVideoContent(self):
        if '' == self.site_url:
            self.getSiteUrl()

        result = self.getVodList(self.site_url, 1)
        return {'list': result['list']}

    def categoryContent(self, tid, pg, filter, extend):
        if '' == self.site_url:
            self.getSiteUrl()

        type_id = extend.get('type') if extend.get('type') else tid
        url = self.type_url.format(self.site_url, type_id, pg)
        return self.getVodList(url, pg)

    def detailContent(self, array):
        url = array[0]
        site_domain = '/'.join(url.split('/')[:3])
        rsp = self.fetch(url, headers=self.headers)
        content = self.cleanText(rsp.text)
        root = self.html(content)

        vod = {
            'vod_id': url,
            'vod_name': ''.join(root.xpath('//div[@class="people"]/div[@class="right"]/p[contains(text(), "片名：")][1]/text()')).strip().replace('片名：', ''),
            'vod_pic': ''.join(root.xpath('//div[@class="people"]/div[@class="left"]/img[1]/@src')).strip(),
            'vod_year': ''.join(root.xpath('//div[@class="people"]/div[@class="right"]/p[contains(text(), "年代：")][1]/text()')).strip().replace('年代：', ''),
            'vod_area': ''.join(root.xpath('//div[@class="people"]/div[@class="right"]/p[contains(text(), "地区：")][1]/text()')).strip().replace('地区：', ''),
            'vod_remarks': ''.join(root.xpath('//div[@class="people"]/div[@class="right"]/p[contains(text(), "状态：")][1]/text()')).strip().replace('状态：', ''),
            'vod_actor': ''.join(root.xpath('//div[@class="people"]/div[@class="right"]/p[contains(text(), "演员：")][1]/text()')).strip().replace('演员：', ''),
            'vod_director': ''.join(root.xpath('//div[@class="people"]/div[@class="right"]/p[contains(text(), "导演：")][1]/text()')).strip().replace('导演：', ''),
            'vod_content': ''.join(''.join(root.xpath('//div[@class="vod_content"][1]/p/text()')).split()).strip()
        }

        vod_play_from = []
        vod_play_url = []
        eps_list = root.xpath('//*[contains(@class, "playlist") and contains(@class, "rym3u8")]')
        count = 1
        for eps in eps_list:
            vod_items = []
            for ep in eps.xpath('./li'):
                ep_info = ''.join(ep.xpath('.//input[@name="copy_rym3u8[]"][1]/@value')).strip()
                if len(ep_info) > 0:
                    vod_items.append(ep_info)
            vod_play_from.append('{}_线路{}'.format(self.site_name, count))
            vod_play_url.append('#'.join(vod_items))
            count += 1

        vod['vod_play_from'] = '$$$'.join(vod_play_from)
        vod['vod_play_url'] = '$$$'.join(vod_play_url)
        result = {
            'list': [
                vod
            ]
        }
        return result

    def searchContent(self, key, quick):
        if '' == self.site_url:
            self.getSiteUrl()

        url = self.search_url.format(self.site_url, key)
        result = self.getVodList(url, 1)
        return {'list': result['list']}

    def getVodList(self, url, pg):
        limit = 50
        pagecount = 1
        total = limit
        pg = int(pg)
        videos = []

        rsp = self.fetch(url, headers=self.headers)
        root = self.html(rsp.text)
        eps = root.xpath('//ul[@class="videoContent"]/li')
        for ep in eps:
            videos.append({
                'vod_id': self.genFullUrl(''.join(ep.xpath('.//a[@class="videoName"]/@href')).strip(), url),
                'vod_name': ''.join(ep.xpath('.//a[@class="videoName"]/text()')).strip(),
                'vod_pic': '',
                'vod_remarks': ''.join(ep.xpath('.//span[@class="region"]/text()')).strip()
            })
        count_text = ''.join(root.xpath('//div[@class="pages"]/span[@class="disabled"]/text()')).strip()
        pattern = r'(\d+).*?(\d+).*?(\d+)'
        match = re.search(pattern, count_text)
        if match:
            total = int(match.group(1))
            pg = int(match.group(2))
            pagecount = int(match.group(3))

        return {
            'list': videos,
            'page': pg,
            'pagecount': pagecount,
            'limit': limit,
            'total': total
        }

    def playerContent(self, flag, id, vipFlags):
        result = {
            'parse': '0',
            'playUrl': '',
            'url': id,
            'header': ''
        }
        return result

    def loadVtt(self, url):
        pass

    def isVideoFormat(self, url):
        pass

    def manualVideoCheck(self):
        pass

    def localProxy(self, param):
        action = {}
        return [200, 'video/MP2T', action, '']

    def genFullUrl(self, url, page_url):
        url = url.strip()
        page_url = page_url.strip()
        result = urllib.parse.urlparse(page_url)
        if len(url) < 1:
            return url
        elif url.startswith('http://') or url.startswith('https://'):
            return url
        elif url.startswith('//'):
            return result.scheme + ':' + url
        elif url.startswith('/'):
            return result.scheme + '://' + result.netloc + url
        elif url.startswith('./'):
            return result.scheme + '://' + result.netloc + '/'.join(result.path.split('/')[:-1]).rstrip('/') + '/' + url[2:]
        elif url.startswith('../'):
            url_arr = url.split('../')
            pos = -len(url_arr)
            return result.scheme + '://' + result.netloc + '/'.join(result.path.split('/')[:pos]).rstrip('/') + '/' + url_arr[-1]
        else:
            return result.scheme + '://' + result.netloc + '/'.join(result.path.split('/')[:-1]).rstrip('/') + '/' + url
