import json
import aiohttp
import logging
from typing import Dict, List, Any, Optional
from urllib.parse import urljoin

from app.core.config import settings

logger = logging.getLogger(__name__)

class ModelMgtClient:
    """
    大语言模型管理系统客户端
    与model_mgt系统通信，使用统一API调用大语言模型服务
    """
    
    def __init__(self):
        """初始化model_mgt客户端"""
        self.base_url = settings.MODEL_MGT_BASE_URL or "http://127.0.0.1:8001"
        self.timeout = aiohttp.ClientTimeout(total=60)  # 设置60秒超时
        self.default_model = settings.MODEL_MGT_DEFAULT_MODEL or "deepseek-r1"
        self.default_scenario = settings.MODEL_MGT_DEFAULT_SCENARIO or "general_chat"
        self.default_strategy = settings.MODEL_MGT_DEFAULT_STRATEGY or "响应最快"
        logger.info(f"初始化ModelMgtClient: {self.base_url}")
    
    async def health_check(self) -> Dict[str, Any]:
        """
        检查model_mgt服务健康状态
        
        Returns:
            Dict[str, Any]: 健康状态信息
        """
        endpoint = "/api/v1/health"
        url = urljoin(self.base_url, endpoint)
        
        try:
            async with aiohttp.ClientSession(timeout=self.timeout) as session:
                async with session.get(url) as response:
                    if response.status == 200:
                        return await response.json()
                    else:
                        logger.error(f"Health check failed: {response.status}")
                        return {"status": "unhealthy", "error": f"HTTP {response.status}"}
        except Exception as e:
            logger.error(f"Health check error: {str(e)}")
            return {"status": "unhealthy", "error": str(e)}
    
    async def get_models(self) -> List[Dict[str, Any]]:
        """
        获取可用模型列表
        
        Returns:
            List[Dict[str, Any]]: 模型列表
        """
        endpoint = "/api/v1/models"
        url = urljoin(self.base_url, endpoint)
        
        try:
            async with aiohttp.ClientSession(timeout=self.timeout) as session:
                async with session.get(url) as response:
                    if response.status == 200:
                        result = await response.json()
                        return result.get("data", [])
                    else:
                        logger.error(f"Get models failed: {response.status}")
                        return []
        except Exception as e:
            logger.error(f"Get models error: {str(e)}")
            return []
    
    async def chat_completion(
        self,
        messages: List[Dict[str, str]],
        model: Optional[str] = None,
        temperature: float = 0.7,
        max_tokens: int = 500,
        **kwargs
    ) -> Dict[str, Any]:
        """
        聊天补全API，使用大语言模型生成回复
        
        Args:
            messages: 对话消息列表，格式为[{"role": "user", "content": "消息内容"}]
            model: 要使用的模型ID，默认使用配置的模型
            temperature: 温度参数，控制随机性
            max_tokens: 最大生成token数
            **kwargs: 其他参数
            
        Returns:
            Dict[str, Any]: 模型响应结果
        """
        endpoint = "/api/v1/chat/completions"
        url = urljoin(self.base_url, endpoint)
        
        payload = {
            "model": model or self.default_model,
            "messages": messages,
            "temperature": temperature,
            "max_tokens": max_tokens,
            **kwargs
        }
        
        try:
            async with aiohttp.ClientSession(timeout=self.timeout) as session:
                async with session.post(url, json=payload) as response:
                    if response.status == 200:
                        return await response.json()
                    else:
                        error_text = await response.text()
                        logger.error(f"Chat completion failed: {response.status}, {error_text}")
                        return {
                            "error": True,
                            "status": response.status,
                            "message": error_text
                        }
        except Exception as e:
            logger.error(f"Chat completion error: {str(e)}")
            return {
                "error": True,
                "message": str(e)
            }
    
    async def detect_pua_with_llm(self, text: str) -> Dict[str, Any]:
        """
        使用大语言模型检测PUA内容
        
        Args:
            text: 待分析的文本
            
        Returns:
            Dict[str, Any]: 检测结果，包含是否为PUA、评分、模式等信息
        """
        # 构建提示词
        system_prompt = """你是一个专业的PUA言论检测专家。你需要识别文本中可能包含的PUA(Pick-Up Artist)话术和操控性语言。
        
PUA话术通常包含以下几种类型：
1. 煤气灯效应(Gaslighting): 让对方怀疑自己的感受和判断
2. 爱情轰炸(Love Bombing): 过度赞美以快速建立亲密感
3. 负面评价(Negging): 通过批评降低对方自尊心
4. 隔离控制(Isolation): 试图切断对方与他人的联系
5. 三角关系(Triangulation): 引入第三者制造不安全感
6. 冷暴力(Silent Treatment): 通过沉默来惩罚对方
7. 罪恶感操纵(Guilt Tripping): 制造罪恶感来控制对方行为
8. 面包屑喂养(Breadcrumbing): 给予最低限度关注维持关系

请分析提供的文本，判断是否包含PUA内容，并提供以下信息：
1. is_pua: 是否包含PUA内容(true/false)
2. score: PUA程度评分(0.0-1.0)
3. risk_level: 风险等级("none"/"low"/"medium"/"high")
4. detected_patterns: 检测到的PUA模式列表，每项包含category(类别)、description(描述)、severity(严重程度0.0-1.0)
5. explanation: 解释为什么文本包含或不包含PUA内容
6. response_suggestions: 如果面对这样的言论，建议如何回应(提供3条建议)

以JSON格式返回结果。"""

        user_prompt = f"请分析以下文本是否包含PUA内容:\n\n{text}"
        
        messages = [
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt}
        ]
        
        try:
            response = await self.chat_completion(
                messages=messages,
                temperature=0.2,  # 使用较低的温度以获得更确定性的结果
                max_tokens=1000
            )
            
            # 检查是否有错误
            if response.get("error"):
                logger.error(f"PUA检测失败: {response.get('message')}")
                return self._fallback_pua_detection(text)
                
            # 提取结果
            assistant_message = response.get("choices", [{}])[0].get("message", {}).get("content", "")
            
            # 解析JSON响应
            try:
                # 查找JSON内容的开始和结束
                json_start = assistant_message.find("{")
                json_end = assistant_message.rfind("}")
                
                if json_start >= 0 and json_end > json_start:
                    json_content = assistant_message[json_start:json_end+1]
                    result = json.loads(json_content)
                    
                    # 确保结果包含所有必要字段
                    if self._validate_pua_result(result):
                        return result
                    else:
                        logger.warning("PUA检测结果缺少必要字段，使用后备检测")
                        return self._fallback_pua_detection(text)
                else:
                    logger.warning(f"无法从响应中提取JSON: {assistant_message}")
                    return self._fallback_pua_detection(text)
                    
            except json.JSONDecodeError:
                logger.error(f"无法解析JSON响应: {assistant_message}")
                return self._fallback_pua_detection(text)
                
        except Exception as e:
            logger.error(f"PUA检测异常: {str(e)}")
            return self._fallback_pua_detection(text)
    
    def _validate_pua_result(self, result: Dict[str, Any]) -> bool:
        """
        验证PUA检测结果是否包含所有必要字段
        
        Args:
            result: 检测结果
            
        Returns:
            bool: 是否有效
        """
        required_fields = ["is_pua", "score", "risk_level", "detected_patterns", "explanation", "response_suggestions"]
        return all(field in result for field in required_fields)
    
    def _fallback_pua_detection(self, text: str) -> Dict[str, Any]:
        """
        后备的PUA检测逻辑，当LLM调用失败时使用
        
        Args:
            text: 待分析的文本
            
        Returns:
            Dict[str, Any]: 检测结果
        """
        # 简单规则匹配的后备方案
        patterns = [
            {
                "pattern": r"你太敏感了|你想太多了|我没那个意思",
                "category": "gaslighting",
                "description": "煤气灯效应，让对方怀疑自己的感受和判断",
                "severity": 0.7
            },
            {
                "pattern": r"我从没见过像你这么好的人|你是我遇到的最特别的人",
                "category": "love_bombing",
                "description": "爱情轰炸，过度赞美以快速建立亲密感",
                "severity": 0.5
            }
        ]
        
        import re
        detected = []
        total_score = 0.0
        
        for pattern_info in patterns:
            if re.search(pattern_info["pattern"], text, re.IGNORECASE):
                detected.append({
                    "category": pattern_info["category"],
                    "description": pattern_info["description"],
                    "severity": pattern_info["severity"]
                })
                total_score += pattern_info["severity"]
        
        # 计算分数和风险等级
        score = min(total_score / 3.0, 1.0) if detected else 0.0
        
        if score == 0:
            risk_level = "none"
        elif score < 0.3:
            risk_level = "low"
        elif score < 0.7:
            risk_level = "medium"
        else:
            risk_level = "high"
        
        # 生成回复建议
        suggestions = []
        if "gaslighting" in [p["category"] for p in detected]:
            suggestions.append("我相信我的感受是真实的，这不是我想太多。")
        if "love_bombing" in [p["category"] for p in detected]:
            suggestions.append("谢谢你的赞美，不过我觉得我们应该多了解一下彼此。")
        
        if not suggestions and detected:
            suggestions = ["我需要考虑一下这个问题。", "让我们换个话题吧。"]
        
        return {
            "is_pua": len(detected) > 0,
            "score": score,
            "risk_level": risk_level,
            "detected_patterns": detected,
            "explanation": "基于简单规则匹配的后备检测" if detected else "未检测到明显PUA内容",
            "response_suggestions": suggestions or ["看起来这段对话没有明显问题。"]
        }

# 创建客户端实例
model_mgt_client = ModelMgtClient() 