from typing import Any, Dict, List, Optional

from app.core.config import settings
from app.core.logging import logger
from app.schemas.analysis import PUADetectionResult
from app.services.nlp.model_mgt_client import model_mgt_client
from app.services.nlp.virtual_model import virtual_nlp_model


class PUADetector:
    """PUA检测服务"""
    
    def __init__(self):
        """初始化PUA检测服务"""
        logger.info("Initializing PUA Detector Service")
        self.use_llm = settings.USE_LLM_FOR_PUA_DETECTION
    
    async def detect_pua(self, text: str) -> PUADetectionResult:
        """
        检测PUA话术
        
        Args:
            text: 待检测文本
            
        Returns:
            PUADetectionResult: PUA检测结果
        """
        try:
            # 判断是否使用大语言模型
            if self.use_llm:
                try:
                    # 使用model_mgt大语言模型检测
                    llm_result = await model_mgt_client.detect_pua_with_llm(text)
                    
                    # 转换为PUADetectionResult对象
                    result = PUADetectionResult(
                        is_pua=llm_result["is_pua"],
                        score=llm_result["score"],
                        detected_patterns=llm_result["detected_patterns"],
                        risk_level=llm_result["risk_level"],
                        explanation=llm_result["explanation"],
                        response_suggestions=llm_result["response_suggestions"]
                    )
                    
                    logger.info(f"使用大语言模型检测PUA成功: is_pua={result.is_pua}, score={result.score}")
                    return result
                    
                except Exception as e:
                    logger.error(f"大语言模型PUA检测失败，将使用虚拟模型: {str(e)}")
                    # 如果LLM检测失败，回退到虚拟模型
                    return await self._detect_with_virtual_model(text)
            else:
                # 使用虚拟模型
                return await self._detect_with_virtual_model(text)
            
        except Exception as e:
            logger.error(f"PUA检测失败: {str(e)}")
            return PUADetectionResult(
                is_pua=False,
                score=0.0,
                detected_patterns=[],
                risk_level="none",
                explanation="检测过程发生错误",
                response_suggestions=[]
            )
    
    async def _detect_with_virtual_model(self, text: str) -> PUADetectionResult:
        """
        使用虚拟NLP模型检测PUA内容
        
        Args:
            text: 待检测文本
            
        Returns:
            PUADetectionResult: PUA检测结果
        """
        # 调用虚拟NLP模型进行PUA检测
        detection_result = await virtual_nlp_model.detect_pua(text)
        
        # 转换为PUADetectionResult对象
        result = PUADetectionResult(
            is_pua=detection_result["is_pua"],
            score=detection_result["score"],
            detected_patterns=detection_result["detected_patterns"],
            risk_level=detection_result["risk_level"],
            explanation=detection_result["explanation"],
            response_suggestions=detection_result["response_suggestions"]
        )
        
        logger.info(f"使用虚拟模型检测PUA: is_pua={result.is_pua}, score={result.score}")
        return result
    
    async def detect_pua_in_messages(
        self, 
        messages: List[Dict[str, Any]]
    ) -> List[Dict[str, Any]]:
        """
        检测多条消息中的PUA话术
        
        Args:
            messages: 消息列表
            
        Returns:
            List[Dict[str, Any]]: 带有PUA检测结果的消息列表
        """
        result = []
        
        for message in messages:
            message_copy = message.copy()
            
            # 仅检测文本类型的消息
            if message.get("msg_type") == "text" and message.get("content"):
                try:
                    # 检测PUA
                    detection_result = await self.detect_pua(message["content"])
                    
                    # 添加检测结果
                    message_copy["pua_detection"] = {
                        "is_pua": detection_result.is_pua,
                        "score": detection_result.score,
                        "detected_patterns": detection_result.detected_patterns,
                        "risk_level": detection_result.risk_level,
                        "explanation": detection_result.explanation,
                        "response_suggestions": detection_result.response_suggestions
                    }
                
                except Exception as e:
                    logger.error(f"检测消息中的PUA失败: {str(e)}")
                    message_copy["pua_detection"] = {
                        "is_pua": False,
                        "score": 0.0,
                        "detected_patterns": [],
                        "risk_level": "none",
                        "explanation": "检测过程发生错误",
                        "response_suggestions": []
                    }
            else:
                # 非文本消息，设置为无PUA
                message_copy["pua_detection"] = {
                    "is_pua": False,
                    "score": 0.0,
                    "detected_patterns": [],
                    "risk_level": "none",
                    "explanation": "非文本消息",
                    "response_suggestions": []
                }
            
            result.append(message_copy)
        
        return result
    
    async def analyze_pua_history(
        self, 
        messages: List[Dict[str, Any]]
    ) -> Dict[str, Any]:
        """
        分析历史消息中的PUA趋势
        
        Args:
            messages: 消息列表
            
        Returns:
            Dict[str, Any]: PUA趋势分析结果
        """
        if not messages:
            return {
                "total_messages": 0,
                "pua_messages": 0,
                "pua_ratio": 0.0,
                "risk_level": "none",
                "common_patterns": [],
                "trend": "stable",
                "recommendation": "暂无历史数据"
            }
        
        try:
            # 先检测每条消息的PUA
            messages_with_detection = await self.detect_pua_in_messages(messages)
            
            # 计算PUA消息数量和比例
            total_messages = len(messages_with_detection)
            pua_messages = sum(1 for msg in messages_with_detection if msg["pua_detection"]["is_pua"])
            pua_ratio = pua_messages / total_messages if total_messages > 0 else 0.0
            
            # 统计常见PUA模式
            pattern_counts = {}
            for msg in messages_with_detection:
                for pattern in msg["pua_detection"]["detected_patterns"]:
                    category = pattern["category"]
                    if category in pattern_counts:
                        pattern_counts[category] += 1
                    else:
                        pattern_counts[category] = 1
            
            # 排序并获取最常见的模式
            common_patterns = [
                {"category": category, "count": count}
                for category, count in sorted(pattern_counts.items(), key=lambda x: x[1], reverse=True)
            ][:3]  # 取前3个
            
            # 确定风险等级
            if pua_ratio == 0:
                risk_level = "none"
            elif pua_ratio < 0.1:
                risk_level = "low"
            elif pua_ratio < 0.3:
                risk_level = "medium"
            else:
                risk_level = "high"
            
            # 简单趋势分析（实际应用中应该分析时间序列）
            trend = "stable"  # 默认为稳定
            
            # 生成建议
            recommendation = self._generate_recommendation(risk_level, common_patterns)
            
            return {
                "total_messages": total_messages,
                "pua_messages": pua_messages,
                "pua_ratio": pua_ratio,
                "risk_level": risk_level,
                "common_patterns": common_patterns,
                "trend": trend,
                "recommendation": recommendation
            }
        
        except Exception as e:
            logger.error(f"分析PUA历史失败: {str(e)}")
            return {
                "total_messages": len(messages),
                "pua_messages": 0,
                "pua_ratio": 0.0,
                "risk_level": "unknown",
                "common_patterns": [],
                "trend": "unknown",
                "recommendation": "分析过程发生错误"
            }
    
    def _generate_recommendation(
        self, 
        risk_level: str, 
        common_patterns: List[Dict[str, Any]]
    ) -> str:
        """
        生成应对建议
        
        Args:
            risk_level: 风险等级
            common_patterns: 常见PUA模式
            
        Returns:
            str: 应对建议
        """
        if risk_level == "none":
            return "当前对话未检测到明显的PUA话术，可以正常交流。"
        
        recommendation = f"该对话的PUA风险等级为{risk_level}。"
        
        if common_patterns:
            recommendation += " 主要表现为:"
            
            for pattern in common_patterns:
                if pattern["category"] == "gaslighting":
                    recommendation += " 煤气灯效应(让你怀疑自己的判断),"
                elif pattern["category"] == "love_bombing":
                    recommendation += " 爱情轰炸(过度赞美以快速建立亲密感),"
                elif pattern["category"] == "negging":
                    recommendation += " 负面评价(通过批评降低你的自尊),"
                elif pattern["category"] == "isolation":
                    recommendation += " 隔离控制(试图切断你与他人的联系),"
                elif pattern["category"] == "triangulation":
                    recommendation += " 三角关系(引入第三者制造不安全感),"
                else:
                    recommendation += f" {pattern['category']},"
            
            recommendation = recommendation.rstrip(",") + "。"
        
        if risk_level == "low":
            recommendation += " 建议保持警惕，但可以继续交流。"
        elif risk_level == "medium":
            recommendation += " 建议在回复时注意保护自己的边界，不要被情绪操控。"
        elif risk_level == "high":
            recommendation += " 建议谨慎交流，保持距离，必要时寻求支持或终止交流。"
        
        return recommendation

    async def detect_pua_with_context(
        self, 
        text: str,
        context_messages: Optional[List[Dict[str, str]]] = None,
        relationship_context: Optional[str] = None,
        severity_threshold: float = 0.3
    ) -> Dict[str, Any]:
        """
        基于上下文的PUA检测，提供更加细致的分析结果
        
        Args:
            text: 待检测文本
            context_messages: 上下文消息列表，格式为[{"role": "user|sender", "content": "消息内容"}]
            relationship_context: 关系上下文类型(dating, friendship, professional, family)
            severity_threshold: 严重性阈值，低于此值的模式会被过滤
            
        Returns:
            Dict[str, Any]: 增强的PUA检测结果
        """
        try:
            # 判断是否使用大语言模型
            if not self.use_llm:
                # 如果不使用LLM，退化为普通检测并添加上下文信息
                base_result = await self._detect_with_virtual_model(text)
                return self._enhance_result_with_context(base_result, relationship_context)
                
            # 准备上下文信息
            context_str = ""
            if context_messages and len(context_messages) > 0:
                context_str = "对话上下文:\n"
                for i, msg in enumerate(context_messages[-5:]):  # 仅使用最近的5条消息
                    role = "对方" if msg.get("role") == "user" else "用户"
                    context_str += f"{role}: {msg.get('content', '')}\n"
            
            # 准备关系上下文信息
            relationship_str = ""
            if relationship_context:
                relation_types = {
                    "dating": "约会/恋爱关系",
                    "friendship": "朋友关系",
                    "professional": "职场关系",
                    "family": "家庭关系"
                }
                relationship_str = f"关系背景: {relation_types.get(relationship_context, relationship_context)}\n"
            
            # 构建增强的系统提示词
            system_prompt = """你是一个专业的PUA言论检测专家。你需要识别文本中可能包含的PUA(Pick-Up Artist)话术和操控性语言，并提供深度分析。
            
PUA话术通常包含以下几种类型：
1. 煤气灯效应(Gaslighting): 让对方怀疑自己的感受和判断
2. 爱情轰炸(Love Bombing): 过度赞美以快速建立亲密感
3. 负面评价(Negging): 通过批评降低对方自尊心
4. 隔离控制(Isolation): 试图切断对方与他人的联系
5. 三角关系(Triangulation): 引入第三者制造不安全感
6. 冷暴力(Silent Treatment): 通过沉默来惩罚对方
7. 罪恶感操纵(Guilt Tripping): 制造罪恶感来控制对方行为
8. 面包屑喂养(Breadcrumbing): 给予最低限度关注维持关系
9. 移情变位(Shifting Blame): 将责任推卸给他人
10. 受害者扮演(Playing Victim): 伪装成受害者来获取同情

请根据提供的文本和上下文，进行全面分析，判断是否包含PUA内容，并提供以下信息：
1. is_pua: 是否包含PUA内容(true/false)
2. score: PUA程度评分(0.0-1.0)
3. risk_level: 风险等级("none"/"low"/"medium"/"high")
4. detected_patterns: 检测到的PUA模式列表，每项包含:
   - category: 类别
   - description: 描述
   - severity: 严重程度(0.0-1.0)
   - evidence: 文本中的具体证据
   - manipulation_tactics: 使用的操控策略
5. explanation: 详细解释为什么文本包含或不包含PUA内容
6. response_suggestions: 针对性的回应建议(提供3-5条)
7. long_term_advice: 长期应对此类模式的建议
8. context_analysis: 分析上下文中的模式(如果提供了上下文)
9. emotional_impact: 评估此类言论可能对接收者造成的情感影响

以JSON格式返回结果。确保分析深入、全面，并针对具体的关系背景提供相关建议。"""

            user_prompt = f"{relationship_str}{context_str}\n待分析文本:\n{text}"
            
            messages = [
                {"role": "system", "content": system_prompt},
                {"role": "user", "content": user_prompt}
            ]
            
            try:
                response = await model_mgt_client.chat_completion(
                    messages=messages,
                    temperature=0.1,  # 使用低温度以获得一致的分析
                    max_tokens=2000  # 允许更长的分析结果
                )
                
                # 检查是否有错误
                if response.get("error"):
                    logger.error(f"上下文PUA检测失败: {response.get('message')}")
                    # 使用普通检测作为后备
                    base_result = await self.detect_pua(text)
                    return self._enhance_result_with_context(base_result, relationship_context)
                    
                # 提取结果
                assistant_message = response.get("choices", [{}])[0].get("message", {}).get("content", "")
                
                # 解析JSON响应
                try:
                    import json
                    # 查找JSON内容的开始和结束
                    json_start = assistant_message.find("{")
                    json_end = assistant_message.rfind("}")
                    
                    if json_start >= 0 and json_end > json_start:
                        json_content = assistant_message[json_start:json_end+1]
                        result = json.loads(json_content)
                        
                        # 过滤低于阈值的检测模式
                        if "detected_patterns" in result:
                            result["detected_patterns"] = [
                                pattern for pattern in result["detected_patterns"]
                                if pattern.get("severity", 0) >= severity_threshold
                            ]
                        
                        return result
                    else:
                        logger.warning(f"无法从增强响应中提取JSON: {assistant_message}")
                        # 使用普通检测作为后备
                        base_result = await self.detect_pua(text)
                        return self._enhance_result_with_context(base_result, relationship_context)
                        
                except json.JSONDecodeError:
                    logger.error(f"无法解析增强JSON响应: {assistant_message}")
                    # 使用普通检测作为后备
                    base_result = await self.detect_pua(text)
                    return self._enhance_result_with_context(base_result, relationship_context)
                    
            except Exception as e:
                logger.error(f"增强PUA检测异常: {str(e)}")
                # 使用普通检测作为后备
                base_result = await self.detect_pua(text)
                return self._enhance_result_with_context(base_result, relationship_context)
                
        except Exception as e:
            logger.error(f"上下文PUA检测失败: {str(e)}")
            # 使用普通检测作为后备
            try:
                base_result = await self.detect_pua(text)
                return self._enhance_result_with_context(base_result, relationship_context)
            except:
                # 最终后备方案
                return {
                    "is_pua": False,
                    "score": 0.0,
                    "detected_patterns": [],
                    "risk_level": "none",
                    "explanation": "检测过程发生错误",
                    "response_suggestions": []
                }
    
    def _enhance_result_with_context(
        self, 
        base_result: PUADetectionResult,
        relationship_context: Optional[str] = None
    ) -> Dict[str, Any]:
        """
        使用上下文信息增强基本PUA检测结果
        
        Args:
            base_result: 基本PUA检测结果
            relationship_context: 关系上下文
            
        Returns:
            Dict[str, Any]: 增强的检测结果
        """
        # 转换PUADetectionResult为字典
        result_dict = {
            "is_pua": base_result.is_pua,
            "score": base_result.score,
            "detected_patterns": base_result.detected_patterns,
            "risk_level": base_result.risk_level,
            "explanation": base_result.explanation,
            "response_suggestions": base_result.response_suggestions
        }
        
        # 根据关系上下文添加特定建议
        if relationship_context and base_result.is_pua:
            if relationship_context == "dating":
                result_dict["long_term_advice"] = "在恋爱关系中，重要的是建立健康的边界和沟通方式。如果操控行为持续出现，考虑寻求专业咨询或重新评估关系。"
            elif relationship_context == "friendship":
                result_dict["long_term_advice"] = "真正的友谊应基于相互尊重和支持。如果这些模式持续存在，可能需要与对方坦诚交流或减少接触。"
            elif relationship_context == "professional":
                result_dict["long_term_advice"] = "在职场中，保持专业界限很重要。考虑记录这些互动，必要时咨询HR或管理层。"
            elif relationship_context == "family":
                result_dict["long_term_advice"] = "家庭关系中的不健康模式通常更为复杂。设立健康界限，必要时寻求家庭治疗可能会有所帮助。"
            else:
                result_dict["long_term_advice"] = "持续关注这些互动模式，如果它们形成模式，考虑设立更强的界限或寻求专业支持。"
                
            # 添加情感影响分析
            result_dict["emotional_impact"] = "这类言论可能导致自我怀疑、情绪低落和自尊心降低，长期影响可能包括焦虑和依赖感增强。"
        
        return result_dict


# 创建单例实例
pua_detector = PUADetector()