# coding=utf-8
# !/usr/bin/python
# by eaudouce
import sys, requests, time, json, re, hashlib, urllib.parse
sys.path.append('..')
from lxml import etree
from base.spider import Spider
from pprint import pprint

class Spider(Spider):  # 元类 默认的元类 type
    site_name = 'xk_HBOTV'

    site_url = 'https://www.hbotv2.com'
    cookies = ''
    headers = {
        'Accept-Encoding': 'gzip, deflate',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36',
        'Sec-Fetch-Site': 'same-origin',
        'Sec-Fetch-Mode': 'navigate',
        'Sec-Fetch-User': '?1',
        'Sec-Fetch-Dest': 'document'
    }

    config = {
        'player': {},
        'class': [
            {'type_name':'电影', 'type_id':'movie'},
            {'type_name':'剧集', 'type_id':'tv'},
            {'type_name':'综艺', 'type_id':'variety'},
            {'type_name':'动漫', 'type_id':'cartoon'},
            {'type_name':'短剧', 'type_id':'shorts'}
        ],
        'filter': {}
    }

    def getName(self):
        return self.site_name

    def init(self, extend=''):
        pass

    def homeContent(self, filter):
        result = {'class': self.config['class']}
        if filter:
            result['filters'] = self.config['filter']
        return result

    def homeVideoContent(self):
        rsp = self.fetch(self.site_url, headers=self.headers)
        root = self.html(self.cleanText(rsp.text))
        aList = root.xpath('.//ul[contains(@class, "stui-vodlist") and position() < 3]/li')

        count = 0
        limit = 24
        videos = []
        for a in aList:
            count += 1
            if count > limit:
                break
            name = ''.join(a.xpath('.//h4[1]/a/text()')).strip()
            pic = ''.join(a.xpath('.//img[@class="lazyload"][1]/@data-original')).strip()
            mark = ''.join(a.xpath('.//span[@class="pic-text text-right"][1]/text()')).strip()
            sid = ''.join(a.xpath(".//h4[1]/a/@href")).strip()
            videos.append({
                'vod_id': self.genFullUrl(sid, self.site_url),
                'vod_name': name,
                'vod_pic': self.genFullUrl(pic, self.site_url),
                'vod_remarks': mark
            })
        result = {
            'list': videos
        }
        return result

    def categoryContent(self, tid, pg, filter, extend):
        result = {}
        limit = 30
        pg = int(pg)

        if pg > 1:
            url = '{}/{}/index_{}.html'.format(self.site_url, tid, pg)
            referer = '{}/{}/index_{}.html'.format(self.site_url, tid, pg-1) if pg > 2 else '{}/{}/'.format(self.site_url, tid)
        else:
            url = '{}/{}/'.format(self.site_url, tid)
            referer = self.site_url

        headers = self.headers
        headers['Referer'] = referer
        headers['Origin'] = self.site_url
        rsp = self.fetch(url, headers=headers)
        root = self.html(self.cleanText(rsp.text))
        aList = root.xpath('.//ul[contains(@class, "stui-vodlist") and position() < 3]/li')

        videos = []
        for a in aList:
            name = ''.join(a.xpath('.//h4[1]/a/text()')).strip()
            pic = ''.join(a.xpath('.//*[contains(@class, "lazyload")][1]/@data-original')).strip()
            mark = ''.join(a.xpath('.//span[@class="pic-text text-right"][1]/text()')).strip()
            sid = ''.join(a.xpath('.//h4[1]/a/@href')).strip()
            videos.append({
                'vod_id': self.genFullUrl(sid, self.site_url),
                'vod_name': name,
                'vod_pic': self.genFullUrl(pic, self.site_url),
                'vod_remarks': mark
            })

        if len(root.xpath('//ul[contains(@class, "stui-page")]/li/a[contains(text(), "下一页")]')) > 0:
            pagecount = pg + 1
            total = pg*limit + 1
        else:
            pagecount = pg
            total = (pg - 1)*limit + len(videos)

        result['list'] = videos
        result['page'] = pg
        result['pagecount'] = pagecount
        result['limit'] = limit
        result['total'] = total
        return result

    def detailContent(self, array):
        site_domain = '/'.join(array[0].split('/')[:3])
        url = array[0]
        rsp = self.fetch(url, headers=self.headers)
        root = self.html(self.cleanText(rsp.text))

        vod = {
            'vod_id': url,
            'vod_name': ''.join(root.xpath('//div[@class="stui-content__detail"]/h3[@class="title"]/text()')).strip(),
            'vod_pic': ''.join(root.xpath('//div[@class="stui-vodlist__thumb v-thumb"]/img[1]/@data-original')).strip(),
            'vod_year': ''.join(root.xpath('//div[@class="stui-content__detail"]/p[@class="data"]/span[contains(@class, "text-muted") and contains(text(), "年份")]/following-sibling::*[1]/text()')).strip(),
            'vod_area': ''.join(root.xpath('//div[@class="stui-content__detail"]/p[@class="data"]/span[contains(@class, "text-muted") and contains(text(), "地区")]/following-sibling::*[1]/text()')).strip(),
            'vod_remarks': ''.join(root.xpath('//div[@class="stui-content__detail"]/p[@class="data"]/span[contains(@class, "text-muted") and contains(text(), "更新")]/following-sibling::*[1]/text()')).strip(),
            'vod_actor': '/'.join(root.xpath('//div[@class="stui-content__detail"]/p[@class="data"]/span[contains(@class, "text-muted") and contains(text(), "主演")]/following-sibling::*/text()')).strip(),
            'vod_director': '/'.join(root.xpath('//div[@class="stui-content__detail"]/p[@class="data"]/span[contains(@class, "text-muted") and contains(text(), "导演")]/following-sibling::*/text()')).strip(),
            'vod_content': ''.join(root.xpath('//div[@class="detail-content"]/text()')).strip()
        }

        lines = root.xpath('//div[@class="stui-pannel-box"]/div/ul[contains(@class, "stui-content__playlist")]/parent::*/parent::*')
        vod_play_from = []
        vod_play_url = []
        for line in lines:
            vod_play_from.append(line.xpath('.//h3[@class="title"]/text()')[0].strip())
            vod_items = []
            eps = line.xpath('.//ul[contains(@class, "stui-content__playlist")]/li/a')
            for ep in eps:
                vod_items.append('{}${}'.format(etree.tostring(ep, encoding=str, method='text').strip(), self.genFullUrl(ep.get('href', ''), url)))
            vod_play_url.append('#'.join(vod_items))

        vod['vod_play_from'] = '$$$'.join(vod_play_from)
        vod['vod_play_url'] = '$$$'.join(vod_play_url)
        result = {
            'list': [
                vod
            ]
        }
        return result

    def searchContent(self, key, quick):
        headers = self.headers
        headers['Referer'] = self.site_url
        headers['Origin'] = self.site_url
        headers['content-type'] = 'application/x-www-form-urlencoded'
        data = 'keyboard=' + urllib.parse.quote(key, encoding='UTF-8') + '&show=title&tempid=1&tbname=news&mid=1&dopost=search'
        rsp = self.post(self.site_url+'/e/search/index.php', data, headers=headers)
        root = self.html(self.cleanText(rsp.text))
        aList = root.xpath('//div[contains(@class, "col-lg-wide-75")]//ul[contains(@class, "stui-vodlist__media")]/li')

        videos = []
        for a in aList:
            name = ''.join(a.xpath('.//h3[@class="title"][1]/a/text()')).strip()
            pic = ''.join(a.xpath('.//*[contains(@class, "lazyload")][1]/@data-original')).strip()
            mark = ''.join(a.xpath('.//span[@class="pic-text text-right"][1]/text()')).strip()
            sid = ''.join(a.xpath('.//h3[@class="title"][1]/a/@href')).strip()
            videos.append({
                'vod_id': self.genFullUrl(sid, self.site_url),
                'vod_name': name,
                'vod_pic': self.genFullUrl(pic, self.site_url),
                'vod_remarks': mark
            })

        return {'list': videos}

    def playerContent(self, flag, id, vipFlags):
        url = ''
        site_domain = '/'.join(id.split('/')[:3])
        headers = self.headers
        rsp = self.fetch(id, headers=headers)
        matches = re.search(r'var\s*a0\s*=\s*"(.*?)"', rsp.text)
        if matches:
            iframe_url = '{}/player/?url={}'.format(site_domain, matches.group(1))
            headers['sec-fetch-dest'] = 'iframe'
            headers['Referer'] = id
            rsp = self.fetch(iframe_url, headers=headers)
            matches = re.search(r'<iframe\s*name\s*="iframe-player"\s*src="(.*?)"', rsp.text)
            if matches:
                headers['Referer'] = iframe_url
                rsp = self.fetch(self.genFullUrl(matches.group(1), id), headers=headers)
                matches = re.search(r'var\s*url\s*=\s*"(.*?)"', rsp.text)
                if matches:
                    url = self.genFullUrl(matches.group(1), id)
        result = {
            'parse': '0',
            'playUrl': '',
            'url': url,
            'header': {
                'Accept': '*/*',
                'Origin': site_domain,
                'Accept-Encoding': 'gzip, deflate',
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36',
                'Sec-Fetch-Site': 'cross-site',
                'Sec-Fetch-Mode': 'cors',
                'Sec-Fetch-Dest': 'empty'
            }
        }
        return result

    def loadVtt(self, url):
        pass

    def isVideoFormat(self, url):
        pass

    def manualVideoCheck(self):
        pass

    def localProxy(self, param):
        action = {}
        return [200, 'video/MP2T', action, '']

    def genFullUrl(self, url, page_url):
        url = url.strip()
        page_url = page_url.strip()
        result = urllib.parse.urlparse(page_url)
        if len(url) < 1:
            return url
        elif url.startswith('http://') or url.startswith('https://'):
            return url
        elif url.startswith('//'):
            return result.scheme + ':' + url
        elif url.startswith('/'):
            return result.scheme + '://' + result.netloc + url
        elif url.startswith('./'):
            return result.scheme + '://' + result.netloc + '/'.join(result.path.split('/')[:-1]).rstrip('/') + '/' + url[2:]
        elif url.startswith('../'):
            url_arr = url.split('../')
            pos = -len(url_arr)
            return result.scheme + '://' + result.netloc + '/'.join(result.path.split('/')[:pos]).rstrip('/') + '/' + url_arr[-1]
        else:
            return result.scheme + '://' + result.netloc + '/'.join(result.path.split('/')[:-1]).rstrip('/') + '/' + url
