import json
import re
import os
from bs4 import BeautifulSoup
from base.spider import Spider
import requests
import binascii

headerx = {
    'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
    'referer': "https://www.leijing1.com",
    'x-requested-with': "XMLHttpRequest",
    'accept-language': "zh-CN,zh;q=0.9,en;q=0.8",
    'Cookie': "cms_token=91643744d93a42f98e15b6784f0bd310; JSESSIONID=D92622710E6342FD11574AC11CF9DFC6; cms_accessToken=7dab23285f764ee3a0b8d84abdff2fe1; cms_refreshToken=2f4d43120dcc4c258c80d0a1cd2df559"
}
cookie = ""   # cockie是列表的格式
header = {
    'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36 Edg/134.0.0.0",
    'Accept': "application/json;charset=UTF-8",
    # 'referer': 'https://cloud.189.cn/',
    'Accept-Encoding': "gzip"
}


class Spider(Spider):
    global xurl
    global headerx
    global headers
    global header

    def getName(self):
        return "首页"

    def init(self, extend):
        pass

    def isVideoFormat(self, url):
        pass

    def manualVideoCheck(self):
        pass

    def homeContent(self, filter):
        result = {
            "class": [
                {"type_id": "42204681950354", "type_name": "电影"},
                {"type_id": "42204684250355", "type_name": "剧集"},
                {"type_id": "42212287587456", "type_name": "影视原盘"},
                {"type_id": "42204792950357", "type_name": "动漫"},
                {"type_id": "42210356650363", "type_name": "综艺"},
                {"type_id": "42204810750358", "type_name": "音乐"},
                {"type_id": "42204697150356", "type_name": "纪录"},
                {"type_id": "42317879720298", "type_name": "演唱会"},
                {"type_id": "42204811950359", "type_name": "4K音乐"},
                {"type_id": "47949118582143", "type_name": "4K8K演示区"},
            ],
        }
        return result

    def homeVideoContent(self):
        pass

    def categoryContent(self, cid, pg, filter, ext):
        result = {}
        videos = []
        url = "https://www.leijing1.com/queryTopicList"
        params = {
            'tagId': cid,
            'page': pg,
        }
        response = requests.get(url, params=params, headers=headerx)
        kjson = response.json()
        for i in kjson['records']:
            name = i['title']
            id = i['id']
            video = {
                "vod_id": id,
                "vod_name": name,
                "vod_pic": "https://web.wya6.com/d/wuyi/yxlz/Pictures/189.jpg",
                "vod_remarks": "",
                "style": {
                    "type": "list"
                }
            }
            videos.append(video)
        result = {'list': videos}
        result['page'] = pg
        result['pagecount'] = 999
        result['limit'] = 90
        result['total'] = 999999
        return result

    def parse_video_info(self, text):
        """
        解析文本内容并提取视频相关信息
        """
        # 初始化默认值
        video_info = {
            'year': '',
            'director': '',
            'actor': '',
            'area': '',
            'remark': '',
            'content': '',
        }

        # 如果文本为空，直接返回
        if not text or text.strip() == '':
            return video_info

        # 使用BeautifulSoup解析HTML
        soup = BeautifulSoup(text, 'html.parser')

        # 提取剧情简介
        intro_div = soup.find('div', style=lambda x: x and 'background-color: rgba(255, 230, 234, 0.6)' in x)
        if intro_div:
            content = intro_div.get_text().replace('◎剧情简介：', '').strip()
            video_info['content'] = content

        # 提取标题信息，可能包含年份和其他信息
        title_div = soup.find('div', style=lambda x: x and 'font-weight: bold' in x)
        if title_div:
            title_text = title_div.get_text().strip()
            # 尝试从标题中提取年份
            year_match = re.search(r'\((\d{4})\)', title_text)
            if year_match:
                video_info['year'] = year_match.group(1)

            # 尝试提取类别/标签
            tags_match = re.search(r'#([^#\s]+)', title_text)
            if tags_match:
                video_info['remark'] = tags_match.group(1)

        # 提取文件大小信息
        size_div = soup.find('code')
        if size_div:
            size_text = size_div.get_text()
            video_info['remark'] = size_text.replace('📂', '').strip()

        # 尝试从整个文本中提取其他信息
        text_content = soup.get_text()

        # 提取年份（备用方法）
        if not video_info['year']:
            year_match = re.search(r'\((\d{4})\)', text_content)
            if year_match:
                video_info['year'] = year_match.group(1)

        # 提取产地信息
        area_match = re.search(r'产\s*地[：:]?\s*([^\s◎]+)', text_content)
        if area_match:
            video_info['area'] = area_match.group(1).strip()

        # 提取导演信息
        director_match = re.search(r'导\s*演[：:]?\s*([^◎]+)', text_content)
        if director_match:
            director_text = director_match.group(1).strip()
            director_names = re.findall(r'[\u4e00-\u9fa5a-zA-Z·]+', director_text)
            video_info['director'] = ' / '.join(director_names)

        # 提取演员信息
        actor_match = re.search(r'演\s*员[：:]?\s*([^◎]+)', text_content)
        if actor_match:
            actor_text = actor_match.group(1).strip()
            actor_names = re.findall(r'[\u4e00-\u9fa5a-zA-Z·]+', actor_text)
            video_info['actor'] = ' / '.join(actor_names)

        # 提取类别信息
        category_match = re.search(r'类\s*别[：:]?\s*([^◎]+)', text_content)
        if category_match:
            video_info['remark'] = category_match.group(1).strip()

        return video_info

    def detailContent(self, ids):
        result = {}
        videos = []
        did = ids[0]
        play_kurl = []
        url = "https://www.leijing1.com/queryTopicContent"
        params = {
            'topicId': did,
        }
        response = requests.get(url, params=params, headers=headerx)
        kjson = response.json()

        detail = kjson['content']


        # 直接使用HTML内容解析
        video_info = self.parse_video_info(detail)

        # 提取分享链接
        soup = BeautifulSoup(detail, 'html.parser')
        first_url = soup.find('a', href=True)
        if first_url:
            share_url = first_url['href']
        else:
            # 备用方法：从文本中提取URL
            url_match = re.search(r'https?://[^\s"\']+', detail)
            share_url = url_match.group() if url_match else ''



        if share_url:
            id = 'https://nm.4688888.xyz/vod/ty_share.php?url=' + share_url
            res = requests.get(url=id, headers=header)


            try:
                kjson = res.json()
                if 'data' in kjson and kjson['data']:
                    formatted_items = []
                    for item in kjson['data']:
                        name = item['name'].replace(' ', '')
                        size = item['size'].replace(' ', '')
                        url = item['url'].replace(' ', '')
                        # 格式：name+(size)$url#
                        formatted_item = f"{name}({size})$https://www.cloud189.cn/hex={url}"
                        formatted_items.append(formatted_item)

                    # 最终结果，每个剧集信息用#分隔
                    final_result = '#'.join(formatted_items)

                else:
                    final_result = ""
            except Exception as e:

                final_result = ""
        else:
            final_result = ""


        video = {
            "vod_id": did,
            "vod_actor": video_info['actor'],
            "vod_director": video_info['director'],
            "vod_content": video_info['content'].replace('\n', '') if video_info['content'] else "暂无简介",
            "vod_remarks": video_info['remark'],
            "vod_year": video_info['year'],
            "vod_area": video_info['area'],
            "vod_play_from": '天翼',
            "vod_play_url": final_result
        }
        videos.append(video)

        result['list'] = videos
        return result

    def playerContent(self, flag, id, vipFlags):
        return {'jx': 1, 'parse': 1, 'url': id, 'header': ''}

    def searchContentPage(self, key, quick, page):
        result = {}
        videos = []
        url = "https://www.leijing1.com/search"
        params = {
            'keyword': key,
            'page': page,
        }
        response = requests.get(url, params=params, headers=headerx)
        kjson = response.json()
        for i in kjson['searchResultPage']['records']:
            id = i['topic']['id']
            name = i['topic']['title'].replace('</B>', '').replace('<B>', '')
            remark = i['topic']['tagName']
            video = {
                "vod_id": id,
                "vod_name": name,
                "vod_pic": 'https://web.wya6.com/d/wuyi/yxlz/Pictures/189.jpg',
                "vod_remarks": remark,
                "style": {
                    "type": "list"
                }
            }
            videos.append(video)
        result = {'list': videos}
        result['page'] = page
        result['pagecount'] = 999
        result['limit'] = 90
        result['total'] = 999999
        return result

    def searchContent(self, key, quick, pg="1"):
        return self.searchContentPage(key, quick, '1')

    def localProxy(self, params):
        if params['type'] == "m3u8":
            return self.proxyM3u8(params)
        elif params['type'] == "media":
            return self.proxyMedia(params)
        elif params['type'] == "ts":
            return self.proxyTs(params)
        return None

