# -*- coding: utf-8 -*-
"""
4K-AV网站视频爬虫
用于抓取https://4k-av.com网站的视频内容
使用时请遵守网站规则及相关法律法规
"""

import sys
import time
import json
import re
from urllib.parse import quote, urlparse
from Crypto.Hash import SHA256

sys.path.append("..")
from base.spider import Spider
from pyquery import PyQuery as pq


class Spider(Spider):
    """4K-AV网站视频爬虫类"""
    
    def __init__(self):
        super().__init__()
        self.host = 'https://www.knvod.com'
        self.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Sec-Fetch-Dest': 'document',
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
            'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
            'sec-ch-ua-platform': '"macOS"',
            'Origin': self.host,
            'Referer': f"{self.host}/",
            'Cookie': 'X-Robots-Tag=CDN-VERIFY'
        }

    def init(self, extend=""):
        """初始化方法"""
        pass

    def getName(self):
        """返回爬虫名称"""
        return "KNVOD爬虫"

    def isVideoFormat(self, url):
        """检查URL是否为视频格式"""
        video_formats = ['.mp4', '.m3u8', '.flv', '.avi', '.mov', '.mkv']
        return any(fmt in url for fmt in video_formats)

    def manualVideoCheck(self):
        """是否需要手动检查视频"""
        return False

    def action(self, action):
        """执行动作"""
        pass

    def destroy(self):
        """清理资源"""
        pass

    def homeContent(self, filter):
        """获取首页内容"""
        try:
            response = self.fetch(self.host, headers=self.headers)
            data = self.get_pyquery(response.text)
            
            result = {}
            classes = []
            
            # 获取分类
            for item in data('.head-more.box a').items():
                href = item.attr('href')
                if href and '/show' in href:
                    match = re.search(r'\d+', href)
                    if match:
                        classes.append({
                            'type_name': item.text(),
                            'type_id': match.group(0)
                        })
            
            result['class'] = classes
            result['list'] = self.get_video_list(data('.border-box.public-r .public-list-div'))
            return result
        except Exception as e:
            print(f"获取首页内容失败: {str(e)}")
            return {'class': [], 'list': []}

    def homeVideoContent(self):
        """获取首页视频内容"""
        return []

    def categoryContent(self, tid, pg, filter, extend):
        """获取分类内容"""
        try:
            url = f"{self.host}/show/{tid}--------{pg}---/"
            response = self.fetch(url, headers=self.headers)
            data = self.get_pyquery(response.text)
            
            result = {}
            result['list'] = self.get_video_list(data('.border-box.public-r .public-list-div'))
            result['page'] = pg
            result['pagecount'] = 9999
            result['limit'] = 90
            result['total'] = 999999
            return result
        except Exception as e:
            print(f"获取分类内容失败: {str(e)}")
            return {'list': [], 'page': pg, 'pagecount': 0, 'limit': 0, 'total': 0}

    def detailContent(self, ids):
        """获取详情内容"""
        try:
            url = f"{self.host}/list/{ids[0]}/"
            response = self.fetch(url, headers=self.headers)
            data = self.get_pyquery(response.text)
            
            v = data('.detail-info.lightSpeedIn .slide-info')
            vod = {
                'vod_id': ids[0],
                'vod_name': data('head title').text().split('-')[0].strip(),
                'vod_year': v.eq(-1).text().split(':', 1)[-1].strip() if v.eq(-1) else '',
                'vod_remarks': v.eq(0).text() if v.eq(0) else '',
                'vod_actor': v.eq(3).text().split(':', 1)[-1].strip() if len(v) > 3 else '',
                'vod_director': v.eq(2).text().split(':', 1)[-1].strip() if len(v) > 2 else '',
                'vod_content': data('.switch-box #height_limit').text().strip(),
                'vod_play_from': '',
                'vod_play_url': ''
            }
            
            # 处理播放列表
            np = data('.anthology.wow.fadeInUp')
            ndata = np('.anthology-tab .swiper-wrapper .swiper-slide')
            pdata = np('.anthology-list .anthology-list-box ul')
            
            play, names = [], []
            for i in range(min(len(ndata), len(pdata))):
                n = ndata.eq(i)('a')
                n('span').remove()
                names.append(n.text().strip())
                
                vs = []
                for v_item in pdata.eq(i)('li').items():
                    vs.append(f"{v_item.text()}${v_item('a').attr('href')}")
                
                play.append('#'.join(vs))
            
            if names and play:
                vod["vod_play_from"] = "$$$".join(names)
                vod["vod_play_url"] = "$$$".join(play)
            
            return {"list": [vod]}
        except Exception as e:
            print(f"获取详情内容失败: {str(e)}")
            return {"list": []}

    def searchContent(self, key, quick, pg="1"):
        """搜索内容"""
        try:
            timestamp = int(time.time() * 1000)
            url = f"{self.host}/index.php/ajax/suggest?mid=1&wd={key}&limit=9999&timestamp={timestamp}"
            response = self.fetch(url, headers=self.headers)
            data = response.json()
            
            videos = []
            for item in data.get('list', []):
                videos.append({
                    'vod_id': item.get('id', ''),
                    'vod_name': item.get('name', ''),
                    'vod_pic': item.get('pic', '')
                })
            
            return {'list': videos, 'page': pg}
        except Exception as e:
            print(f"搜索失败: {str(e)}")
            return {'list': [], 'page': pg}

    def playerContent(self, flag, id, vipFlags):
        """获取播放内容"""
        try:
            headers = {
                'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 17_0_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.8 Mobile/15E148 Safari/604.1',
                'Origin': self.host
            }
            
            response = self.fetch(f"{self.host}{id}", headers=self.headers)
            data = self.get_pyquery(response.text)
            
            # 尝试从脚本中提取播放信息
            scripts = data('.player-box .player-left script')
            jsdata = None
            
            for script in scripts.items():
                script_text = script.text()
                if '=' in script_text and ('url' in script_text or 'from' in script_text):
                    try:
                        js_str = script_text.split('=', 1)[-1].strip()
                        jsdata = json.loads(js_str)
                        break
                    except:
                        continue
            
            if not jsdata:
                raise Exception("未找到播放信息")
            
            url = jsdata.get('url', '')
            
            # 如果URL不是直接的视频格式，需要进一步处理
            if not re.search(r'\.m3u8|\.mp4', url):
                from_val = jsdata.get('from', '')
                if not from_val:
                    raise Exception("未找到from参数")
                
                # 获取解密脚本
                jx_url = f"{self.host}/static/player/{from_val}.js"
                jx_response = self.fetch(jx_url, headers=self.headers)
                jx_content = jx_response.text
                
                # 查找解密URL
                jx_match = re.search(r'http.*?url=', jx_content)
                if not jx_match:
                    raise Exception('未找到解密URL')
                
                parsed_url = urlparse(jx_match.group())
                jx_host = f"{parsed_url.scheme}://{parsed_url.netloc}"
                
                title = data('head title').eq(0).text().split('-')[0].strip()
                next_url = f"{self.host.split('//')[-1]}{jsdata.get('link_next', '')}" if jsdata.get('link_next') else ''
                
                # 获取解密内容
                cd_url = f"{jx_match.group()}{url}&next=//{next_url}&title={quote(title)}"
                cd_response = self.fetch(cd_url, headers=self.headers)
                cd_content = cd_response.text
                
                # 提取配置信息
                config_match = re.search(r'var\s+config\s*=\s*(\{[\s\S]*?\})', cd_content)
                if not config_match:
                    raise Exception('未找到配置信息')
                
                config_str = re.sub(r',\s*}(?=\s*$)', '}', config_match.group(1))
                config = json.loads(config_str)
                
                # 更新配置
                config.update({'key': self.sha256(f"{self.get_hourly_timestamp()}knvod")})
                config.pop('next', None)
                
                # 设置请求头
                post_headers = {
                    'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 17_0_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.8 Mobile/15E148 Safari/604.1',
                    'Accept': 'application/json, text/javascript, */*; q=0.01',
                    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
                    'Cache-Control': 'no-cache',
                    'DNT': '1',
                    'Origin': jx_host,
                    'Pragma': 'no-cache',
                    'Sec-Fetch-Dest': 'empty',
                    'Sec-Fetch-Mode': 'cors',
                    'Sec-Fetch-Site': 'same-origin',
                    'Sec-Fetch-Storage-Access': 'active',
                    'X-Requested-With': 'XMLHttpRequest',
                    'Content-Type': 'application/json'
                }
                
                # 发送POST请求获取播放地址
                post_url = f"{jx_host}/post.php"
                jd_response = self.post(post_url, headers=post_headers, data=json.dumps(config))
                jd_data = json.loads(jd_response.content.decode('utf-8-sig'))
                
                url = jd_data.get('knvod', '')
            
            # 检查是否成功获取URL
            if not url:
                raise Exception('未找到播放地址')
            
            return {"parse": 0, "url": url, "header": headers}
            
        except Exception as e:
            print(f'获取播放内容失败: {e}')
            # 失败时返回原始页面URL
            return {"parse": 1, "url": f"{self.host}{id}", "header": headers}

    def localProxy(self, param):
        """本地代理"""
        pass

    def get_video_list(self, data):
        """提取视频列表"""
        videos = []
        for item in data.items():
            href = item('a').attr('href')
            if not href:
                continue
                
            # 提取ID
            id_match = re.search(r'\d+', href)
            if not id_match:
                continue
                
            vid = id_match.group(0)
            
            # 处理图片URL
            img = item('img').attr('data-src')
            if img and 'url=' in img and 'http' not in img:
                img = f'{self.host}{img}'
            elif not img:
                img = item('img').attr('src')
                if img and not img.startswith('http'):
                    img = f'{self.host}{img}'
            
            # 提取标题
            title = item('a').attr('title')
            if not title:
                title = item('.public-list-pt').text() or item('.public-list-ptb').text()
            
            # 提取备注
            remarks = item('.public-prt').text() or item('.public-list-prb').text()
            
            videos.append({
                'vod_id': vid,
                'vod_name': title,
                'vod_pic': img,
                'vod_remarks': remarks
            })
        
        return videos

    def get_pyquery(self, data):
        """获取PyQuery对象"""
        try:
            return pq(data)
        except Exception as e:
            print(f"解析HTML失败: {str(e)}")
            try:
                return pq(data.encode('utf-8'))
            except:
                return pq('<html></html>')  # 返回空文档避免后续错误

    def get_hourly_timestamp(self):
        """获取当前小时的时间戳"""
        current_time = int(time.time())
        return current_time - (current_time % 3600)

    def sha256(self, text):
        """计算SHA256哈希值"""
        sha = SHA256.new()
        sha.update(text.encode())
        return sha.hexdigest()


# 测试代码
if __name__ == '__main__':
    spider = Spider()
    print("爬虫初始化完成")