# -*- coding: utf-8 -*-
import requests
import re
import json
import traceback
from urllib.parse import quote

class Spider:
    def __init__(self):
        self.siteUrl = "https://xifanys.com"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
            "Referer": self.siteUrl
        }
        self.cateManual = {
            "电影": "1",
            "电视剧": "2",
            "动漫": "3",
            "综艺": "4"
        }

    def getName(self):
        return "蜂蜜影视"

    def init(self, extend=""):
        pass

    # 通用请求方法
    def fetch(self, url, headers=None, params=None):
        try:
            if headers is None:
                headers = self.headers
            response = requests.get(url, headers=headers, params=params, timeout=10)
            response.raise_for_status()
            return response.text
        except Exception as e:
            print(f"[ERROR] 请求失败: {url}, {str(e)}")
            return None

    # 首页分类和推荐
    def homeContent(self, filter):
        result = {
            "class": [],
            "list": []
        }
        
        # 分类
        for name, tid in self.cateManual.items():
            result["class"].append({
                "type_name": name,
                "type_id": tid
            })
        
        # 首页推荐（示例：最新更新）
        try:
            html = self.fetch(self.siteUrl)
            if html:
                # 示例：解析最新更新的影视列表（需根据实际HTML调整）
                pattern = r'<a class="video-item" href="(/detail/\d+)".*?title="(.*?)".*?data-original="(.*?)".*?<span class="tag">(.*?)</span>'
                items = re.findall(pattern, html, re.S)
                for item in items[:12]:  # 取前12条
                    result["list"].append({
                        "vod_id": item[0],
                        "vod_name": item[1],
                        "vod_pic": item[2],
                        "vod_remarks": item[3]
                    })
        except Exception as e:
            print(f"首页解析失败: {str(e)}")
        
        return result

    # 分类页
    def categoryContent(self, tid, pg, filter, extend):
        result = {
            "list": [],
            "page": pg,
            "pagecount": 10,  # 假设总页数
            "limit": 20,
            "total": 200
        }
        
        try:
            url = f"{self.siteUrl}/list/{tid}/{pg}"
            html = self.fetch(url)
            if html:
                # 示例：解析分类列表（需根据实际HTML调整）
                pattern = r'<a class="video-item" href="(/detail/\d+)".*?title="(.*?)".*?data-original="(.*?)".*?<span class="tag">(.*?)</span>'
                items = re.findall(pattern, html, re.S)
                for item in items:
                    result["list"].append({
                        "vod_id": item[0],
                        "vod_name": item[1],
                        "vod_pic": item[2],
                        "vod_remarks": item[3]
                    })
        except Exception as e:
            print(f"分类页解析失败: {str(e)}")
        
        return result

    # 搜索
    def searchContent(self, key, quick, pg=1):
        result = {
            "list": [],
            "page": pg
        }
        
        try:
            url = f"{self.siteUrl}/search?keyword={quote(key)}&page={pg}"
            html = self.fetch(url)
            if html:
                # 示例：解析搜索结果（需根据实际HTML调整）
                pattern = r'<a class="video-item" href="(/detail/\d+)".*?title="(.*?)".*?data-original="(.*?)".*?<p class="desc">(.*?)</p>'
                items = re.findall(pattern, html, re.S)
                for item in items:
                    result["list"].append({
                        "vod_id": item[0],
                        "vod_name": item[1],
                        "vod_pic": item[2],
                        "vod_remarks": item[3]
                    })
        except Exception as e:
            print(f"搜索失败: {str(e)}")
        
        return result

    # 详情页
    def detailContent(self, ids):
        vod_id = ids[0]
        result = {"list": []}
        
        try:
            url = f"{self.siteUrl}{vod_id}"
            html = self.fetch(url)
            if html:
                # 示例：解析详情（需根据实际HTML调整）
                title = re.search(r'<h1 class="title">(.*?)</h1>', html, re.S).group(1)
                pic = re.search(r'class="cover" src="(.*?)"', html, re.S).group(1)
                desc = re.search(r'class="desc">(.*?)</div>', html, re.S).group(1)
                
                # 解析播放列表
                play_from = []
                play_url = []
                
                # 示例：解析多组播放源
                sources = re.findall(r'<div class="play-source">.*?<span>(.*?)</span>(.*?)</div>', html, re.S)
                for source in sources:
                    source_name = source[0]
                    episodes = re.findall(r'href="(.*?)".*?>(.*?)<', source[1], re.S)
                    
                    play_from.append(source_name)
                    play_url.append("#".join([f"{ep[1]}${ep[0]}" for ep in episodes]))
                
                vod = {
                    "vod_id": vod_id,
                    "vod_name": title,
                    "vod_pic": pic,
                    "vod_content": desc,
                    "vod_play_from": "$$$".join(play_from),
                    "vod_play_url": "$$$".join(play_url)
                }
                result["list"].append(vod)
        except Exception as e:
            print(f"详情页解析失败: {str(e)}")
            print(traceback.format_exc())
        
        return result

    # 播放地址解析
    def playerContent(self, flag, id, vipFlags):
        result = {}
        
        try:
            # 示例1：直接返回已知视频地址（需根据实际调整）
            if id.startswith("http"):
                result["parse"] = 0  # 0表示直连
                result["url"] = id
                result["header"] = json.dumps(self.headers)
            
            # 示例2：需要二次解析
            else:
                html = self.fetch(id)
                if html:
                    # 从HTML中提取真实播放地址（示例正则）
                    video_url = re.search(r'var video_url = "(.*?)"', html).group(1)
                    result["parse"] = 0
                    result["url"] = video_url
                    result["header"] = json.dumps(self.headers)
        except Exception as e:
            print(f"播放页解析失败: {str(e)}")
            result["parse"] = 1  # 1表示代理解析
            result["url"] = id
        
        return result

    # 其他必要方法
    def isVideoFormat(self, url):
        return any(ext in url for ext in ['.mp4', '.m3u8', '.flv'])

    def manualVideoCheck(self):
        return False

    def localProxy(self, param):
        return [200, "video/MP2T", {}, param]