import json
import re
import os
from bs4 import BeautifulSoup
from base.spider import Spider
import requests
import binascii

headerx = {
    'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
    'referer': "https://www.hmxz.org/",
    'x-requested-with': "XMLHttpRequest",
    'accept-language': "zh-CN,zh;q=0.9,en;q=0.8",
}
cookie = ""
header = {
    'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36 Edg/134.0.0.0",
    'Accept': "application/json;charset=UTF-8",
    'Accept-Encoding': "gzip"
}


class Spider(Spider):
    global xurl
    global headerx
    global headers
    global header

    def getName(self):
        return "首页"

    def init(self, extend):
        pass

    def isVideoFormat(self, url):
        pass

    def manualVideoCheck(self):
        pass

    def homeContent(self, filter):
        result = {
            "class": [
                {"type_id": "100_0_0_0", "type_name": "影视"},
                {"type_id": "102_0_0_0", "type_name": "音频"},
                {"type_id": "104_0_0_0", "type_name": "学习"}
            ],
        }
        return result

    def homeVideoContent(self):
        pass

    def categoryContent(self, cid, pg, filter, ext):
        result = {}
        videos = []

        url = f"https://www.suenen.com/forum-1-{pg}.htm?tagids={cid}"
        response = requests.get(url, headers=headerx)
        html_content = response.text
        soup = BeautifulSoup(html_content, 'html.parser')
        video_items = soup.find_all('li', class_='media thread tap')

        for item in video_items:
            vod_id = item.get('data-href', '')
            tid = item.get('data-tid', '')

            subject_div = item.find('div', class_='subject') or item.find('div', class_='style3_subject')
            if subject_div:
                name_link = subject_div.find('a')
                if name_link and not name_link.get('class'):
                    vod_name = name_link.get_text(strip=True)
                else:
                    all_links = subject_div.find_all('a')
                    for link in all_links:
                        if not link.get('class'):
                            vod_name = link.get_text(strip=True)
                            break
                    else:
                        vod_name = "未知标题"
            else:
                vod_name = "未知标题"

            avatar_img = item.find('img', class_='avatar-3')
            vod_pic = "https://www.hmxz.org/upload/avatar/000/2.png?1700069882"
            if avatar_img and avatar_img.get('src'):
                avatar_src = avatar_img.get('src')
                if not avatar_src.startswith('http'):
                    vod_pic = "https://www.suenen.com/" + avatar_src
                else:
                    vod_pic = avatar_src

            username_span = item.find('span', class_='haya-post-info-username')
            if username_span:
                username_link = username_span.find('a')
                username = username_link.get_text(strip=True) if username_link else "未知用户"
            else:
                username = "未知用户"

            date_span = item.find('span', class_='date')
            publish_time = date_span.get_text(strip=True) if date_span else "时间未知"

            view_span = item.find('span', string=lambda text: text and 'icon-eye' in str(text.parent))
            if view_span:
                view_count = view_span.get_text(strip=True).replace(' ', '')
            else:
                view_icon = item.find('i', class_='icon-eye')
                if view_icon and view_icon.parent:
                    view_count = view_icon.parent.get_text(strip=True).replace(' ', '')
                else:
                    view_count = "0"

            category_badge = item.find('a', class_='badge badge-pill badge-danger')
            category = category_badge.get_text(strip=True) if category_badge else ""

            video = {
                "vod_id": "https://www.suenen.com/" + vod_id,
                "vod_name": vod_name,
                "vod_pic": vod_pic,
                "vod_remarks": f"{publish_time} | 浏览:{view_count}",
                "vod_author": username,
                "vod_category": category,
                "vod_tid": tid,
                "style": {
                    "type": "list"
                }
            }
            videos.append(video)

        result = {
            'list': videos,
            'page': pg,
            'pagecount': 999,
            'limit': 90,
            'total': 999999
        }
        return result

    def parse_video_info(self, text):
        video_info = {
            'year': '',
            'director': '',
            'actor': '',
            'area': '',
            'remark': '',
            'content': '',
        }

        if not text or text.strip() == '':
            return video_info

        soup = BeautifulSoup(text, 'html.parser')

        intro_div = soup.find('div', style=lambda x: x and 'background-color: rgba(255, 230, 234, 0.6)' in x)
        if intro_div:
            content = intro_div.get_text().replace('◎剧情简介：', '').strip()
            video_info['content'] = content

        title_div = soup.find('div', style=lambda x: x and 'font-weight: bold' in x)
        if title_div:
            title_text = title_div.get_text().strip()
            year_match = re.search(r'\((\d{4})\)', title_text)
            if year_match:
                video_info['year'] = year_match.group(1)

            tags_match = re.search(r'#([^#\s]+)', title_text)
            if tags_match:
                video_info['remark'] = tags_match.group(1)

        size_div = soup.find('code')
        if size_div:
            size_text = size_div.get_text()
            video_info['remark'] = size_text.replace('📂', '').strip()

        text_content = soup.get_text()

        if not video_info['year']:
            year_match = re.search(r'\((\d{4})\)', text_content)
            if year_match:
                video_info['year'] = year_match.group(1)

        area_match = re.search(r'产\s*地[：:]?\s*([^\s◎]+)', text_content)
        if area_match:
            video_info['area'] = area_match.group(1).strip()

        director_match = re.search(r'导\s*演[：:]?\s*([^◎]+)', text_content)
        if director_match:
            director_text = director_match.group(1).strip()
            director_names = re.findall(r'[\u4e00-\u9fa5a-zA-Z·]+', director_text)
            video_info['director'] = ' / '.join(director_names)

        actor_match = re.search(r'演\s*员[：:]?\s*([^◎]+)', text_content)
        if actor_match:
            actor_text = actor_match.group(1).strip()
            actor_names = re.findall(r'[\u4e00-\u9fa5a-zA-Z·]+', actor_text)
            video_info['actor'] = ' / '.join(actor_names)

        category_match = re.search(r'类\s*别[：:]?\s*([^◎]+)', text_content)
        if category_match:
            video_info['remark'] = category_match.group(1).strip()

        return video_info

    def detailContent(self, ids):
        result = {}
        videos = []
        did = ids[0]

        url = f"{did}"
        response = requests.get(url, headers=headerx)
        html_content = response.text
        soup = BeautifulSoup(html_content, 'html.parser')

        video_info = {
            'title': '',
            'actor': '',
            'director': '',
            'content': '',
            'remark': '',
            'year': '',
            'area': ''
        }

        title_elem = soup.find('h1') or soup.find('div', class_='thread-title') or soup.find('title')
        if title_elem:
            video_info['title'] = title_elem.get_text(strip=True)

        content_elem = soup.find('div', class_='message') or soup.find('div', class_='content') or soup.find('div', class_='post-content')
        if content_elem:
            video_info['content'] = content_elem.get_text(strip=True)

        meta_elems = soup.find_all('meta')
        for meta in meta_elems:
            if meta.get('name') in ['keywords', 'description']:
                content = meta.get('content', '')

        share_url = ""
        all_links = soup.find_all('a', href=True)
        for link in all_links:
            href = link['href']
            if 'cloud.189.cn' in href or 'pan.baidu.com' in href or 'aliyundrive.com' in href or 'quark.cn' in href:
                share_url = href
                break

        if not share_url:
            url_pattern = r'https?://[^\s<>"\'{}|\\^`\[\]]+'
            url_matches = re.findall(url_pattern, html_content)
            for url_match in url_matches:
                if any(domain in url_match for domain in ['cloud.189.cn', 'pan.baidu.com', 'aliyundrive.com', 'quark.cn']):
                    share_url = url_match
                    break

        final_result = ""
        if share_url:
            id = 'https://nm.4688888.xyz/vod/ty_share.php?url=' + share_url
            res = requests.get(url=id, headers=header)
            kjson = res.json()
            if 'data' in kjson and kjson['data']:
                formatted_items = []
                for item in kjson['data']:
                    name = item['name'].replace(' ', '')
                    size = item['size'].replace(' ', '')
                    url = item['url'].replace(' ', '')
                    formatted_item = f"{name}({size})$https://cloud189.cn/hex={url}"
                    formatted_items.append(formatted_item)
                final_result = '#'.join(formatted_items)

        video = {
            "vod_id": did,
            "vod_name": video_info['title'],
            "vod_actor": video_info['actor'],
            "vod_director": video_info['director'],
            "vod_content": video_info['content'],
            "vod_remarks": video_info['remark'],
            "vod_year": video_info['year'],
            "vod_area": video_info['area'],
            "vod_play_from": '至臻天翼',
            "vod_play_url": final_result
        }
        videos.append(video)

        result['list'] = videos
        return result

    def playerContent(self, flag, id, vipFlags):
        return {'jx': 1, 'parse': 1, 'url': id, 'header': ''}

    def searchContentPage(self, key, quick, page):
        result = {}
        videos = []
        url = f"https://www.suenen.com/search.htm?keyword={key}"
        response = requests.get(url, headers=headerx)
        data = response.json()

        if data.get("code") == "0" and "message" in data:
            items = data["message"]
            for item in items:
                if item.get("forumname") == "天翼":
                    vod_id = item.get("url", "")
                    vod_name = item.get("subject", "")
                    if "<span class=\"text-danger\">" in vod_name:
                        vod_name = vod_name.replace("<span class=\"text-danger\">", "").replace("</span>", "")

                    username = item.get("username", "未知用户")
                    user_avatar_url = item.get("user_avatar_url", "")
                    create_date_fmt = item.get("create_date_fmt", "时间未知")
                    views = item.get("views", "0")

                    taglist = item.get("taglist", [])
                    category = ""
                    if taglist:
                        category_names = [tag.get("name", "") for tag in taglist]
                        category = "/".join(category_names)

                    vod_pic = "https://www.hmxz.org/upload/avatar/000/2.png?1700069882"
                    if user_avatar_url:
                        if not user_avatar_url.startswith('http'):
                            vod_pic = "https://www.suenen.com/" + user_avatar_url
                        else:
                            vod_pic = user_avatar_url

                    video = {
                        "vod_id": "https://www.suenen.com/" + vod_id,
                        "vod_name": vod_name,
                        "vod_pic": vod_pic,
                        "vod_remarks": f"{create_date_fmt} | 浏览:{views}",
                        "vod_author": username,
                        "vod_category": category,
                        "vod_tid": item.get("tid", ""),
                        "style": {
                            "type": "list"
                        }
                    }
                    videos.append(video)

        result = {
            'list': videos,
            'pagecount': 999,
            'limit': 90,
            'total': 999999
        }
        return result

    def searchContent(self, key, quick, pg="1"):
        return self.searchContentPage(key, quick, '1')

    def localProxy(self, params):
        if params['type'] == "m3u8":
            return self.proxyM3u8(params)
        elif params['type'] == "media":
            return self.proxyMedia(params)
        elif params['type'] == "ts":
            return self.proxyTs(params)
        return None

