# -*- coding: utf-8 -*-
import asyncio
import importlib
import json
import logging
import os
import random
import re
import threading
import urllib
from collections import Counter
from configparser import ConfigParser
from functools import lru_cache
from typing import List, Tuple, Dict, Any, Optional, Union
from urllib.parse import unquote

import aiohttp
from bs4 import BeautifulSoup

from core.config.platform_parsers.weibo_parser import parse_weibo

# 配置日志
logging.basicConfig(
    format='%(asctime)s - %(levelname)s - %(message)s',
    level=logging.INFO
)
logger = logging.getLogger(__name__)


class DataProcessor:
    _instance = None
    _lock = threading.Lock()
    _parsers_loaded = False

    def __new__(cls):
        """实现单例模式"""
        if cls._instance is None:
            with cls._lock:
                if cls._instance is None:
                    cls._instance = super().__new__(cls)
                    cls._instance._initialized = False
        return cls._instance

    def __init__(self):
        """初始化方法（保证只执行一次）"""
        if self._initialized:
            return

        self._initialized = True
        self.platform_patterns = self._load_platform_patterns()
        self.cookie_config = ConfigParser()
        self._safe_load_cookies()
        self.user_agents = self._load_user_agents()
        self.platform_parsers = {}
        self._load_platform_parsers()
        self.user_agents = [
            # Chrome
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            # Firefox
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:120.0) Gecko/20100101 Firefox/120.0',
            # Edge
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0'
        ]
        if "微博" in self.cookie_config:
            from core.config.platform_parsers.weibo_parser import WeiboClient
            cookie_str = self.cookie_config["微博"].get("cookie", "")
            WeiboClient.validate_cookie(cookie_str)
        print("已加载的解析器:", self.platform_parsers.keys())  # 调试输出
        print(f"当前文件位置: {__file__}")  # 调试输出

    def _load_platform_parsers(self):
        """动态加载平台解析器"""
        parser_dir = os.path.abspath(  # 获取绝对路径
            os.path.join(
                os.path.dirname(__file__),  # core/ 目录
                'config',  # 进入 core/config
                'platform_parsers'  # 最终路径 core/config/platform_parsers
            )
        )
        print(f"[DEBUG] 解析器目录绝对路径: {parser_dir}")  # 关键调试输出
        for filename in os.listdir(parser_dir):
            if filename.endswith('_parser.py'):
                try:
                    # 从模块中获取平台名称
                    platform = filename[:-10]  # 移除 '_parser.py'
                    module_name = f'core.config.platform_parsers.{platform}_parser'
                    module = importlib.import_module(module_name)

                    # 使用模块中定义的 PLATFORM_NAME
                    platform_name = getattr(module, 'PLATFORM_NAME', platform)
                    parse_func = getattr(module, f'parse_{platform}')

                    self.platform_parsers[platform_name] = parse_func
                    print(f"成功注册平台解析器: {platform_name}")  # 调试输出
                except Exception as e:
                    print(f"加载失败: {filename} - {str(e)}")

    def _load_platform_patterns(self) -> Dict[str, re.Pattern]:
        """从配置文件加载平台正则表达式"""
        config_path = os.path.join(
            os.path.dirname(__file__),
            '../configs/platforms.json'  # 修正路径指向新结构
        )
        try:
            with open(config_path, 'r', encoding='utf-8') as f:
                platforms = json.load(f)
            return {p['name']: re.compile(p['pattern'], re.IGNORECASE) for p in platforms}
        except Exception as e:
            logger.error(f"加载平台匹配规则失败: {str(e)}")
            return {}

    @staticmethod
    def _load_user_agents() -> List[str]:
        """加载预定义User-Agent列表"""
        return [
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36...',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit...'
        ]

    def _safe_load_cookies(self):
        """安全加载Cookie配置"""
        config_path = os.path.join(os.path.dirname(__file__), 'cookies.ini')
        try:
            if not os.path.exists(config_path):
                self._init_empty_config()
                return

            with open(config_path, 'rb') as f:
                content = f.read()
                if content.startswith(b'\xef\xbb\xbf'):
                    content = content[3:]
                text = content.decode('utf-8', errors='replace')
                self.cookie_config.read_string(self._repair_config_text(text))
        except Exception as e:
            logger.error(f"加载Cookie配置失败: {str(e)}")
            self._init_empty_config()

    @staticmethod
    def is_likely_url(text: str) -> bool:
        """更智能的URL检测方法"""
        text = text.strip().lower()

        # 明显不是URL的情况
        if not text or len(text) < 6 or ' ' in text:
            return False

        # 包含中文
        if re.search(r'[\u4e00-\u9fff]', text):
            return False

        # 常见URL前缀
        if text.startswith(('http://', 'https://', 'www.', 'ftp://')):
            return True

        # 包含常见域名后缀
        domain_exts = ['.com', '.cn', '.net', '.org', '.io', '.gov']
        if any(ext in text for ext in domain_exts):
            return True

        # 包含IP地址模式
        if re.match(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', text):
            return True

        return False

    def _repair_config_text(self, text: str) -> str:
        """修复常见的格式错误"""
        if not any(line.strip().startswith('[') for line in text.splitlines()):
            text = '[Default]\n' + text
        return text

    def _init_empty_config(self):
        """初始化空配置"""
        self.cookie_config['Default'] = {}

    @classmethod
    def validate_links(cls, raw_links: List[str]) -> List[Tuple[str, str]]:
        """使用配置文件中的规则验证链接"""
        processor = cls()
        valid_links = []
        seen_urls = set()

        for line in raw_links:
            line = line.strip()
            url_match = re.search(r'(https?://[^\s\u4e00-\u9fff]+|www\.[^\s\u4e00-\u9fff]+)', line)
            if not url_match:
                continue

            url = url_match.group(0)
            if not url.startswith(('http://', 'https://')):
                url = 'http://' + url
            url = re.sub(r'/$', '', url)

            if url in seen_urls:
                continue
            seen_urls.add(url)

            # 使用配置文件中的规则匹配
            platform = "未知平台"
            for plat_name, pattern in processor.platform_patterns.items():
                if pattern.search(url):
                    platform = plat_name
                    break

            valid_links.append((url, platform))
        return valid_links

    async def fetch_data(self, session, url, platform, timeout=30):
        """获取数据（带重试机制）"""
        try:
            async with session.get(
                    url,
                    headers=self._build_headers(platform),
                    timeout=aiohttp.ClientTimeout(total=timeout)
            ) as response:
                return await self.parse_response(response, platform)
        except Exception as e:
            logger.error(f"请求失败: {url} - {str(e)}")
            return {
                'status': 'error',
                'message': str(e),
                'likes': 0,
                'comments': 0,
                'shares': 0
            }

    async def parse_response(self, response: aiohttp.ClientResponse, platform: str) -> Dict[str, Any]:
        """修复后的响应处理方法"""

        def _error_template(e: Exception) -> Dict[str, Any]:
            """定义局部错误模板"""
            return {
                'status': 'error',
                'message': str(e),
                'likes': 0,
                'comments': 0,
                'shares': 0
            }

        try:
            if platform == "微博":
                cookie_str = self.cookie_config.get(platform, 'cookie', fallback='')
                html = await response.text()

                # 调用同步兼容接口
                return await asyncio.get_event_loop().run_in_executor(
                    None,
                    lambda: parse_weibo(html, str(response.url), cookie_str)
                )
            else:
                # 其他平台处理...
                html = await self._get_response_content(response)
                return self.platform_parsers[platform](html)

        except Exception as e:
            logger.error(f"响应处理失败: {str(e)}")
            return _error_template(e)  # 使用局部模板

    async def _get_response_content(self, response: aiohttp.ClientResponse) -> str:
        """安全获取响应内容（自动检测编码）"""
        try:
            # 1. 读取原始字节
            content = await response.read()

            # 2. 检测内容编码（优先级）
            encoding = response.charset or 'utf-8'

            # 3. 尝试解码（更安全的策略）
            try:
                return content.decode(encoding, errors='replace')
            except UnicodeDecodeError:
                # 自动回退检测编码
                import chardet
                detected = chardet.detect(content)
                return content.decode(detected['encoding'] or 'utf-8', errors='replace')

        except Exception as e:
            logger.error(f"内容解码失败: {str(e)}")
            return ""

    def _build_headers(self, platform: str) -> Dict[str, str]:
        """生成符合微博要求的请求头"""
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Referer': 'https://weibo.com/',
            'X-Requested-With': 'XMLHttpRequest'  # 关键字段
        }

        # 动态注入Cookie（需与_get_cookies配合）
        cookies = self._get_cookies(platform)
        if cookies:
            headers['Cookie'] = '; '.join([f'{k}={v}' for k, v in cookies.items()])
            print(f"[DEBUG] Cookie注入成功 | 长度: {len(headers['Cookie'])}")

        return headers

    def _get_raw_cookie(self, platform: str) -> str:
        """返回未经处理的原始Cookie（与001.py一致）"""
        if platform in self.cookie_config:
            return self.cookie_config[platform].get('cookie', '')
        return ''

    def _get_xsrf_token(self) -> str:
        """从Cookie中提取XSRF-TOKEN"""
        cookie = self.cookie_config["微博"].get("cookie", "")
        match = re.search(r'XSRF-TOKEN=([^;]+)', cookie)
        return unquote(match.group(1)) if match else ''

    def _get_cookies(self, platform: str) -> Dict[str, str]:
        """从配置文件解析Cookie为字典（与requests兼容格式）"""
        cookies = {}
        if self.cookie_config.has_section(platform):
            cookie_str = self.cookie_config[platform].get('cookie', '')
            # 严格解析Cookie格式（兼容分号和空格分隔）
            for pair in cookie_str.replace('; ', ';').split(';'):
                if '=' in pair:
                    key, value = pair.split('=', 1)
                    cookies[key.strip()] = unquote(value.strip())
        return cookies

    def _get_platform_referer(self, platform: str) -> str:
        """获取平台专用Referer"""
        referer_map = {
            "微博": "https://weibo.com/",
            "抖音": "https://www.douyin.com/",
            "哔哩哔哩": "https://www.bilibili.com/",
            "知乎": "https://www.zhihu.com/",
            "小红书": "https://www.xiaohongshu.com/"
        }
        return referer_map.get(platform, "https://www.google.com/")  # 默认Google避免反爬

    def _get_platform_cookie(self, platform: str) -> Optional[str]:
        """安全获取平台Cookie（带格式验证）"""
        try:
            if not self.cookie_config.has_section(platform):
                print(f"[WARNING] 未找到 {platform} 的Cookie配置")
                return None

            cookie_str = self.cookie_config[platform].get('cookie', '').strip()
            if not cookie_str:
                print(f"[WARNING] {platform} 的Cookie为空")
                return None

            # 验证Cookie有效性（基础格式检查）
            required_keys = {
                "微博": ["SUB", "XSRF-TOKEN"],
                "抖音": ["passport_csrf_token"],
                "哔哩哔哩": ["SESSDATA"]
            }

            if platform in required_keys:
                missing = [k for k in required_keys[platform] if k not in cookie_str]
                if missing:
                    print(f"[ERROR] {platform} Cookie缺少关键字段: {missing}")
                    return None

            return cookie_str
        except Exception as e:
            print(f"[ERROR] 获取 {platform} Cookie失败: {str(e)}")
            return None

    @staticmethod
    def _map_status_code(code: int) -> str:
        """映射HTTP状态码到业务状态"""
        status_map = {
            404: '已删除',
            403: '失效',
            401: '失效',
        }
        return status_map.get(code, '无法抓取')

    @staticmethod
    def _convert_num(text: str) -> int:
        """转换数字文本为整数"""
        try:
            text = text.replace(',', '').strip()
            if '万' in text:
                return int(float(re.search(r'[\d.]+', text).group()) * 10000)
            return int(text)
        except (ValueError, AttributeError):
            return 0


if __name__ == '__main__':
    # 测试代码
    async def test():
        processor = DataProcessor()
        print("已加载解析器:", list(processor.platform_parsers.keys()))

        test_urls = [
            ('https://weibo.com/123', '微博'),
            ('https://www.douyin.com/video/123', '抖音')
        ]

        async with aiohttp.ClientSession() as session:
            for url, platform in test_urls:
                result = await processor.fetch_data(session, url, platform)
                print(f"{platform} 结果:", result)


    asyncio.run(test())