#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
心理陪伴智能体 (含人设动态提示词系统)
"""
from datetime import datetime, timedelta
import sys, os
import logging
import json
import uuid
from typing import Dict, List, Any, Optional, Union
from core.models import PersonaConfig, BasePersonaConfig, ExtendedPersonaConfig
from config.settings import SETTINGS
from .base_agent import BaseAgent


try:
    from utils.llm_service import LLMService
    from utils.memory_system import MemorySystem
except ImportError as e:
    logging.error(f"导入依赖失败: {e}")
    LLMService = None
    MemorySystem = None

logger = logging.getLogger("AI-MindCare-System-Companion")
# 清除原有可能使用错误编码的handler
for handler in logger.handlers[:]:
    logger.removeHandler(handler)

# 添加UTF-8编码的handler
handler = logging.StreamHandler(stream=sys.stdout)
handler.setFormatter(
    logging.Formatter(
        "%(asctime)s - %(name)s - %(levelname)s - %(message)s",
        datefmt="%Y-%m-%d %H:%M:%S",
    ))
handler.encoding = "utf-8"
logger.addHandler(handler)


class CompanionAgent(BaseAgent):
    """心理陪伴智能体（含人设动态管理系统）"""

    def __init__(
            self,
            name: str = "心理陪伴智能体",
            persona: str = "professional",  # 默认人设
            memory_system: Optional[MemorySystem] = None,  # 记忆系统
            llm_service: Optional[LLMService] = None,  # LLM服务
    ):
        super().__init__(name=name,
                         role="companion",
                         memory_system=memory_system)

        # 初始化核心服务
        self.llm_service = llm_service
        self.persona_tag = persona
        
        # 修复人设初始化逻辑
        logger.info(f"正在初始化人设: {persona}")
        self.persona = self.llm_service.person_manager.switch_persona(persona)
        
        # 如果人设切换失败，使用默认人设
        if self.persona is None:
            logger.warning(f"人设 {persona} 切换失败，尝试使用默认人设 professional")
            self.persona = self.llm_service.person_manager.switch_persona("professional")
            if self.persona is None:
                logger.error(f"默认人设 professional 也切换失败，使用安心博士")
                self.persona = self.llm_service.person_manager.switch_persona("安心博士")
                self.persona_tag = "安心博士"
            else:
                self.persona_tag = "professional"
        else:
            logger.info(f"成功加载人设: {persona} -> {self.persona.name}")
        
        # 日志记录
        if not self.llm_service:
            logger.error(f"心理陪伴智能体 {name}: LLM 服务未提供。")
        if not self.memory_system:
            logger.error(f"心理陪伴智能体 {name}: MemorySystem 未提供。")
        logger.info(f"心理陪伴智能体 {name} ({self.persona_tag}) 初始化完成，当前人设: {self.persona.name}")

    def _validate_persona(self, persona_name: str) -> None:
        """验证人设完整性并初始化"""
        # 从配置获取人设并设置默认值
        self.persona = SETTINGS.AGENT_PERSONAS.get(
            persona_name, SETTINGS.AGENT_PERSONAS["professional"])
        # 人设必要字段验证
        required_fields = [
            "name", "existing_form", "character", "talking_style"
        ]
        for field in required_fields:
            if not self.persona.get(field):
                self.persona[
                    field] = "未定义" if field != "talking_style" else "中性"
        # 回退信息默认值
        if not self.persona.get("fallback"):
            self.persona.fallback = "很抱歉，我需要更多信息才能更好地帮助你"

    def get_greetings(self) -> str:
        """获取人设问候语"""
        return self.persona.greeting

    def get_fallback(self) -> str:
        """获取人设回退信息"""
        return self.persona.fallback

    def get_persona_info(self) -> Dict[str, str]:
        """获取当前人设核心信息"""
        return {
            "name": self.persona.name,
            "type": self.persona.existing_form,
            "character": self.persona.character,
            "style": self.persona.talking_style
        }

    def switch_persona(self, persona_name: str) -> bool:
        """动态切换人设"""

        ret = self.llm_service.person_manager.switch_persona(persona_name)
        if ret is None:
            logger.warning(f"人设 {persona_name} 不存在，使用默认专业型")
            return False
        self.persona = ret
        self.persona_tag = persona_name
        return True
        
    def _build_persona_prompt(self) -> str:
        """结构化生成人设提示词（含角色档案与交互规则）"""
        p = self.persona
        return f"""
===== AI心理伙伴人设档案 =====
■ 角色名称：{p["name"]}
■ 专业身份：{p["existing_form"]}
■ 性格特质：{p["character"]}
■ 沟通风格：{p["talking_style"]}
■ 专业背景：{p["knowledge"]}
■ 交互原则：
  - 标准问候：{p["greeting"]}
  - 回退策略：{p["fallback"]}
================================="""

    def _get_scene_guide(self, last_message,
                         long_term_memory) -> Dict[str, str]:
        """根据对话内容动态识别场景并生成规则（可扩展为AI场景分类）"""
        # messages = context.get('user_buffer', [])
        # last_message = messages[-1].get('content', '') if messages else ""
        # long_term_memory = context.get('long_term_memory', {})

        # 场景识别逻辑（基于关键词匹配，可升级为ML模型）
        if "失眠" in last_message or "睡眠" in last_message:
            return {
                "scene":
                "睡眠改善咨询",
                "rules":
                """
1. 每句话包含1个自然声音意象（如海浪、雨声）
2. 用"身体时钟"等生活化比喻解释原理
3. 提供3个可立即执行的微行动建议（≤5步）
4. 避免使用"褪黑素""睡眠周期"等专业术语
"""
            }
        elif ("工作" in last_message
              or "学习" in last_message) and ("压力" in last_message
                                            or "焦虑" in last_message):
            return {
                "scene":
                "职场/学业压力疏导",
                "rules":
                """
1. 使用"任务拆解四步法"框架组织建议
2. 每轮对话包含1个数据支撑点（如"68%的职场人经历过类似压力"）
3. 用"拼图游戏"隐喻解释压力管理（每块拼图代表一个任务）
4. 优先提供5分钟内可完成的快速缓解技巧
"""
            }
        elif "人际关系" in last_message or ("朋友" in last_message
                                        and "矛盾" in last_message):
            return {
                "scene":
                "人际关系咨询",
                "rules":
                """
1. 采用"观察-感受-需求-请求"非暴力沟通框架
2. 每轮对话包含1个类似情境案例（如"我之前遇到过类似情况，当时..."）
3. 用"镜子效应"比喻解释人际互动模式
4. 提供3个可替换的沟通话术模板
"""
            }
        elif any(key in long_term_memory for key in ["抑郁", "情绪低落", "绝望"]):
            return {
                "scene":
                "情绪低落支持",
                "rules":
                """
1. 每轮对话包含2个情绪确认语句（如"能感受到你现在很疲惫"）
2. 避免使用"想开点""加油"等无效安慰
3. 提供具体可操作的"情绪急救包"清单
4. 每3轮对话温和提及专业求助渠道
"""
            }
        else:
            return {
                "scene":
                "日常心理陪伴",
                "rules":
                """
1. 保持自然对话节奏，每2-3轮插入1个共情反馈
2. 引用用户记忆中的积极事件（如"你之前提到的宠物带来的快乐"）
3. 遵循"1提问+2支持点"的回应结构
4. 使用emoji增强情感表达（根据人设风格调整数量）
"""
            }

    def _generate_temperature(self) -> float:
        """根据人设性格动态调整生成温度（创造力/随机性）"""
        temp_map = {
            "沉稳、理性": 0.3,
            "开朗、积极": 0.7,
            "睿智、慈祥": 0.4,
            "敏感、富有创造力": 0.8,
            "幽默、乐观": 0.9,
            "严谨、客观": 0.2
        }
        # 支持多性格关键词匹配（取最高相关度）
        persona_chars = self.persona.character.split("、")
        max_temp = 0.5  # 默认温度
        for char in persona_chars:
            if char in temp_map and temp_map[char] > max_temp:
                max_temp = temp_map[char]
        return max_temp

    def _handle_chat_counseling(self, long_term_memory, last_message,

                                user_buffer, model_key, last_picture="") -> str:

        """核心提示词生成与LLM响应处理"""
        # 1. 提取记忆数据并格式化 (字典类型)

        long_term_memory_str = json.dumps(
            long_term_memory,
            ensure_ascii=False, indent=2, default=str) if isinstance(
                long_term_memory, dict) else str(long_term_memory)
        # 2. 构建人设提示词
        # persona_prompt = self._build_persona_prompt()

        # 3. 获取场景化交互规则
        scene_guide = self._get_scene_guide(last_message, long_term_memory)

        # 4. 构建完整提示词（含人设、场景、记忆、对话历史）
        full_prompt = f"""
### 交互场景指南：{scene_guide["scene"]}
{scene_guide["rules"]}

### 用户输入
{last_message}

### 请根据以上场景指南和用户输入，生成符合当前人设的回应
"""


        # 6. 生成LLM调用参数（含动态温度）
        system_message = f"AI心理陪伴服务启动，当前角色：{self.persona.name}"
        temperature = self._generate_temperature()

        try:
            # 7. 调用LLM生成响应
            
            logger.info(f"提示词为：{full_prompt}")
            response = self.llm_service.generate_response_with_rag(
                prompt=full_prompt,
                system_message=system_message,
                temperature=temperature,
                model_key=model_key,
                max_tokens=500,
                persona=self.persona.tag,
                user_buffer=user_buffer,
                long_term_memory_str=long_term_memory_str,
                picture=last_picture,

            )
            logger.info(f'LLM响应原始内容: {response}')
            return response 
        except Exception as e:
            logger.error(f"生成回复时出错: {str(e)}")
            return self.get_fallback()
        
    def _handle_chat_counseling_stream(self, long_term_memory, last_message,

                                user_buffer, model_key, last_picture="") -> str:

        """核心提示词生成与LLM响应处理"""
        # 1. 提取记忆数据并格式化 (字典类型)

        long_term_memory_str = json.dumps(
            long_term_memory,
            ensure_ascii=False, indent=2, default=str) if isinstance(
                long_term_memory, dict) else str(long_term_memory)
        # 2. 构建人设提示词
        # persona_prompt = self._build_persona_prompt()

        # 3. 获取场景化交互规则
        scene_guide = self._get_scene_guide(last_message, long_term_memory)

        # 4. 构建完整提示词（含人设、场景、记忆、对话历史）
        full_prompt = f"""
### 交互场景指南：{scene_guide["scene"]}
{scene_guide["rules"]}

### 用户输入
{last_message}

### 请根据以上场景指南和用户输入，生成符合当前人设的回应
"""


        # 6. 生成LLM调用参数（含动态温度）
        system_message = f"AI心理陪伴服务启动，当前角色：{self.persona.name}"
        temperature = self._generate_temperature()

        try:
            # 7. 调用LLM生成响应
            
            logger.info(f"提示词为：{full_prompt}")
            for response in self.llm_service.generate_response_with_rag_stream(
                prompt=full_prompt,
                system_message=system_message,
                temperature=temperature,
                model_key=model_key,
                max_tokens=500,
                persona=self.persona.tag,
                user_buffer=user_buffer,
                long_term_memory_str=long_term_memory_str,
                picture=last_picture,

            ):
                logger.info(f'LLM响应原始内容: {response}')
                yield response 
        except Exception as e:
            logger.error(f"生成回复时出错: {str(e)}")
            yield self.get_fallback()

    def _process_message(self, message: Dict[str, Any]) -> Dict[str, Any]:
        """处理接收到的消息（对话初始化与多轮交互）"""
        return self._handle_user_response(message)
    
    def _process_message_stream(self, message: Dict[str, Any]) -> Dict[str, Any]:
        """处理接收到的消息（对话初始化与多轮交互）"""
        for res in self._handle_user_response_stream(message):
            yield res

    def _handle_user_response(self, message: Dict[str, Any]) -> Dict[str, Any]:
        conversation_id = message.get("conversation_id")
        user_message = message.get("user_message")
        user_buffer = message.get("user_buffer", [])
        long_term_memory = message.get("long_term_memory", {})
        model_key = message.get("model_key", "deepseek-ai/DeepSeek-V3")
        persona = message.get("persona")
        image_base64 = message.get("image_base64")  # 图像数据（如果有）
        stream = message.get("stream", True)  # 是否使用流式响应

        logger.info(f"**********************************************")
        logger.info(f"conversation_id: {conversation_id}")
        logger.info(f"user_message: {user_message}\n persona: {persona}")
        logger.info(f"model_key: {model_key}\n image_base64: {image_base64}")
        logger.info(f"user_buffer: {user_buffer}")
        logger.info(f"long_term_memory: {long_term_memory}")
        logger.info(f"stream: {message.get('stream', True)}")
        logger.info(f"**********************************************")


        try:
            next_response = self._handle_chat_counseling(
                long_term_memory=long_term_memory,
                last_message=user_message,
                user_buffer=user_buffer,
                model_key=model_key,
                last_picture=image_base64,
                stream=stream,
            )


            return {
                "status": "in_conversation",
                "requires_user_input": True,
                "response": next_response,
                "temperature": self._generate_temperature(),
                "persona_info": self.get_persona_info()
            }
            # return self.send_message(sender_id, response_content)

        except Exception as e:
            logger.error(
                f"生成回复时出错 (Conversation: {conversation_id}): {e}",
                exc_info=True,
            )
            return {
                "status": "error",
                "requires_user_input": True,
                "response": self.get_fallback(),
                "temperature": self._generate_temperature(),
                "persona_info": self.get_persona_info()
            }
        
    def _handle_user_response_stream(self, message: Dict[str, Any]) -> Dict[str, Any]:
        conversation_id = message.get("conversation_id")
        user_message = message.get("user_message")
        user_buffer = message.get("user_buffer", [])
        long_term_memory = message.get("long_term_memory", {})
        model_key = message.get("model_key", "deepseek-ai/DeepSeek-V3")
        persona = message.get("persona")
        image_base64 = message.get("image_base64")  # 图像数据（如果有）
        stream = message.get("stream", True)  # 是否使用流式响应

        logger.info(f"**********************************************")
        logger.info(f"conversation_id: {conversation_id}")
        logger.info(f"user_message: {user_message}\n persona: {persona}")
        logger.info(f"model_key: {model_key}\n image_base64: {image_base64}")
        logger.info(f"user_buffer: {user_buffer}")
        logger.info(f"long_term_memory: {long_term_memory}")
        logger.info(f"stream: {message.get('stream', True)}")
        logger.info(f"**********************************************")


        try:
            for next_response in self._handle_chat_counseling_stream(
                long_term_memory=long_term_memory,
                last_message=user_message,
                user_buffer=user_buffer,
                model_key=model_key,
                last_picture=image_base64,
            ):


                yield {
                    "status": "in_conversation",
                    "requires_user_input": True,
                    "response": next_response,
                    "temperature": self._generate_temperature(),
                    "persona_info": self.get_persona_info()
                }
                # return self.send_message(sender_id, response_content)

        except Exception as e:
            logger.error(
                f"生成回复时出错 (Conversation: {conversation_id}): {e}",
                exc_info=True,
            )
            yield {
                "status": "error",
                "requires_user_input": True,
                "response": self.get_fallback(),
                "temperature": self._generate_temperature(),
                "persona_info": self.get_persona_info()
            }
