"""
智能客服服务
处理智能客服的核心逻辑
"""

from typing import Dict, List, Optional, Any, AsyncGenerator
from pydantic import BaseModel
from datetime import datetime
import uuid
from src.models.chat import ChatMessage, ChatResponse, ChatSession

from src.utils.logging import get_logger
from src.containers import get_knowledge_base_service
from src.services.knowledge_base_service import KnowledgeBaseQuery
from src.research_core.enhanced_multi_agent_workflow import create_enhanced_multi_agent_workflow as create_multi_agent_workflow
from src.research_core.state import AgentState
from src.research_core.intelligent_model_selector import IntelligentModelSelector
from src.research_core.model_evaluator import ModelEvaluator
from src.research_core.model_manager import ModelManager
from src.services.user_preference_service import UserPreferenceService

logger = get_logger(__name__)

# 简单的内存存储，实际应用中应使用数据库
chat_sessions: Dict[str, ChatSession] = {}

def create_chat_session(user_id: str, context_info: Optional[Dict[str, Any]] = None) -> ChatSession:
    """创建新的聊天会话"""
    session_id = str(uuid.uuid4())
    
    # 获取用户偏好
    from src.services.user_preference_service import get_user_preference_service
    preference_service = get_user_preference_service()
    user_preferences = preference_service.get_user_preferences(int(user_id)) if user_id.isdigit() else {}
    
    session = ChatSession(
        session_id=session_id,
        user_id=user_id,
        messages=[],
        created_at=datetime.now(),
        updated_at=datetime.now(),
        context_info=context_info,
        user_preferences=user_preferences
    )
    chat_sessions[session_id] = session
    return session

def get_chat_session(session_id: str) -> Optional[ChatSession]:
    """获取聊天会话"""
    return chat_sessions.get(session_id)

def add_message_to_session(session_id: str, message: ChatMessage):
    """向会话添加消息"""
    session = chat_sessions.get(session_id)
    if session:
        session.messages.append(message)
        session.updated_at = datetime.now()
        
        # 如果消息是用户消息，更新上下文
        if message.role == "user":
            _update_context_from_message(session, message)

def _update_context_from_message(session: ChatSession, message: ChatMessage):
    """从用户消息中提取并更新上下文"""
    # 这里可以实现更复杂的上下文提取逻辑
    # 例如使用NLP技术提取实体、意图等
    if not session.context_info:
        session.context_info = {}
    
    # 简单示例：记录最近的用户消息
    session.context_info["last_user_message"] = message.content
    session.context_info["last_user_message_time"] = message.timestamp.isoformat() if message.timestamp else datetime.now().isoformat()
    
    # 提取实体信息
    from src.utils.nlp_enhancement import get_nlp_support
    nlp_support = get_nlp_support()
    entities = nlp_support.extract_entities(message.content)
    if entities:
        session.context_info["last_entities"] = entities

class ChatbotService:
    def __init__(self):
        # 初始化模型管理器
        self.model_manager = ModelManager()
        
        # 初始化智能模型选择器
        self.model_selector = IntelligentModelSelector()
        
        # 初始化模型评估器
        self.model_evaluator = ModelEvaluator()
        
        # 通过智能模型选择器获取初始语言模型
        self.llm = self.model_selector.select_model_for_task(
            task_type="general_qa",
            task_description="通用问答任务",
            requirements={}
        )
        
        # 初始化知识库服务
        self.kb_service = get_knowledge_base_service()
        
        # 初始化多智能体工作流
        self.workflow = create_multi_agent_workflow()
        
        # 初始化多媒体模型
        self._init_multimedia_models()
        
        # 用于记录模型性能
        self.performance_records = {}
        
        # 初始化联邦学习支持
        self._init_federated_learning()
        
    def _init_federated_learning(self):
        """初始化联邦学习支持"""
        # 定义占位符类
        class PlaceholderFederatedLearningClient:
            async def send_update(self, data):
                # 空实现，不执行任何操作
                pass
        
        # 使用try-except方式处理导入，避免静态检查报错
        try:
            # 尝试导入联邦学习客户端
            import importlib
            federated_module = importlib.import_module('src.research_core.federated_learning')
            if hasattr(federated_module, 'FederatedLearningClient'):
                FederatedLearningClient = getattr(federated_module, 'FederatedLearningClient')
                self.federated_client = FederatedLearningClient()
                logger.info("联邦学习客户端初始化成功")
            else:
                # 如果类不存在，使用占位符类
                self.federated_client = PlaceholderFederatedLearningClient()
                logger.warning("联邦学习客户端类不存在，使用占位符客户端")
        except ImportError:
            # 如果模块不存在，使用占位符类
            self.federated_client = PlaceholderFederatedLearningClient()
            logger.warning("联邦学习模块不可用，使用占位符客户端")
        except Exception as e:
            # 处理其他可能的错误
            self.federated_client = PlaceholderFederatedLearningClient()
            logger.warning(f"联邦学习客户端初始化失败: {e}")
    
    def _init_multimedia_models(self):
        """初始化多媒体模型"""
        # 使用智能模型选择器获取多媒体模型
        try:
            self.image_model = self.model_selector.select_model_for_task(
                task_type="image_recognition",
                task_description="图像识别任务",
                requirements={}
            )
            logger.info("图像模型初始化成功")
        except Exception as e:
            self.image_model = None
            logger.warning(f"图像模型初始化失败: {e}")
            
        try:
            self.speech_recognition_model = self.model_selector.select_model_for_task(
                task_type="speech_recognition",
                task_description="语音识别任务",
                requirements={}
            )
            logger.info("语音识别模型初始化成功")
        except Exception as e:
            self.speech_recognition_model = None
            logger.warning(f"语音识别模型初始化失败: {e}")
            
        try:
            self.speech_synthesis_model = self.model_selector.select_model_for_task(
                task_type="speech_synthesis",
                task_description="语音合成任务",
                requirements={}
            )
            logger.info("语音合成模型初始化成功")
        except Exception as e:
            self.speech_synthesis_model = None
            logger.warning(f"语音合成模型初始化失败: {e}")
            
        try:
            self.video_model = self.model_selector.select_model_for_task(
                task_type="video_analysis",
                task_description="视频分析任务",
                requirements={}
            )
            logger.info("视频模型初始化成功")
        except Exception as e:
            self.video_model = None
            logger.warning(f"视频模型初始化失败: {e}")
            
        # 初始化增强的语音处理模型
        try:
            self.speaker_diarization_model = self.model_selector.select_model_for_task(
                task_type="speaker_diarization",
                task_description="说话人分离任务",
                requirements={}
            )
            logger.info("说话人分离模型初始化成功")
        except Exception as e:
            self.speaker_diarization_model = None
            logger.warning(f"说话人分离模型初始化失败: {e}")
            
        try:
            self.speech_translation_model = self.model_selector.select_model_for_task(
                task_type="speech_translation",
                task_description="语音翻译任务",
                requirements={}
            )
            logger.info("语音翻译模型初始化成功")
        except Exception as e:
            self.speech_translation_model = None
            logger.warning(f"语音翻译模型初始化失败: {e}")
        
    def process_message(self, message: str, session_id: str, context: Dict[str, Any], user_id: str) -> ChatResponse:
        """处理用户消息"""
        import time
        start_time = time.time()
        selected_model = None
        
        try:
            # 确定任务类型（可以基于消息内容或上下文自动分类）
            task_type = self._determine_task_type(message, context)
            
            # 使用智能模型选择器选择最佳模型
            selected_model = self.model_selector.select_model_for_task(
                task_type=task_type,
                task_description="",
                requirements=context
            )
            
            # 记录模型使用情况
            from src.research_core.model_monitoring import model_monitoring
            if selected_model:
                model_monitoring.record_model_call(selected_model.__class__.__name__, "video_analysis")
            
            # 构建反馈上下文（如果有）
            feedback_context = context.get('feedback_context', {})
            
            # 首先尝试从知识库获取答案，集成知识图谱查询
            knowledge_response = self._query_knowledge_base(
                message,
                context=feedback_context
            )
            
            if knowledge_response:
                # 评估模型性能
                response_time = time.time() - start_time
                self._evaluate_and_update_model(
                    model=selected_model,
                    task_type=task_type,
                    input_data=message,
                    output_data=knowledge_response["answer"],
                    response_time=response_time,
                    success=True
                )
                
                # 记录模型使用情况
                from src.research_core.model_monitoring import model_monitoring
                model_monitoring.record_model_call(selected_model.__class__.__name__, task_type)
                
                # 记录知识使用情况和质量评估
                quality_score = self._evaluate_knowledge_quality(
                    message, 
                    knowledge_response, 
                    feedback_context
                )
                
                return ChatResponse(
                    response=knowledge_response["answer"],
                    session_id=session_id,
                    related_knowledge=knowledge_response.get("related_items", []),
                    suggestions=knowledge_response.get("suggestions", []),
                    quality_score=quality_score,
                    source_type=knowledge_response.get("source_type", "knowledge_base")
                )
            
            # 如果知识库没有答案，使用多智能体工作流处理复杂问题
            workflow_response = self._process_with_workflow(message, context)
            if workflow_response:
                # 评估模型性能
                response_time = time.time() - start_time
                self._evaluate_and_update_model(
                    model=selected_model,
                    task_type=task_type,
                    input_data=message,
                    output_data=workflow_response,
                    response_time=response_time,
                    success=True
                )
                
                # 对于工作流处理的响应，也进行质量评估
                quality_score = 0.8  # 假设工作流处理的响应质量较高
                
                return ChatResponse(
                    response=workflow_response,
                    session_id=session_id,
                    quality_score=quality_score,
                    source_type="workflow"
                )
            
            # 默认响应
            default_response = self._generate_default_response(message, selected_model)
            
            # 评估模型性能
            response_time = time.time() - start_time
            self._evaluate_and_update_model(
                model=selected_model,
                task_type=task_type,
                input_data=message,
                output_data=default_response,
                response_time=response_time,
                success=True
            )
            
            # 默认响应的质量分数
            quality_score = 0.6  # 假设默认响应质量中等
            
            return ChatResponse(
                response=default_response,
                session_id=session_id,
                quality_score=quality_score,
                source_type="default"
            )
        except Exception as e:
            logger.error(f"处理消息时出错: {e}")
            
            # 即使出错，也要尝试评估并更新模型
            if selected_model:
                response_time = time.time() - start_time
                self._evaluate_and_update_model(
                    model=selected_model,
                    task_type="general_qa",  # 默认任务类型
                    input_data=message,
                    output_data="处理错误",
                    response_time=response_time,
                    success=False,
                    error=str(e)
                )
            
            # 在出错情况下，设置较低的质量分数和错误来源类型
            return ChatResponse(
                response="抱歉，处理您的请求时出现了问题。请稍后重试或联系人工客服。",
                session_id=session_id,
                quality_score=0.0,
                source_type="error"
            )
    
    def _determine_task_type(self, message: str, context: Dict[str, Any]) -> str:
        """确定任务类型"""
        # 简单的任务类型分类逻辑，可以根据实际需求扩展
        if context.get('content_type') == 'image':
            return 'image_recognition'
        elif context.get('content_type') == 'audio':
            return 'speech_recognition'
        elif context.get('content_type') == 'video':
            return 'video_analysis'
        elif any(keyword in message.lower() for keyword in ['订单', '物流', '购买', '支付']):
            return 'ecommerce_support'
        elif any(keyword in message.lower() for keyword in ['技术', '设置', '错误', '故障']):
            return 'technical_support'
        elif any(keyword in message.lower() for keyword in ['账户', '登录', '注册', '个人信息']):
            return 'account_management'
        else:
            return 'general_qa'
            
    def _evaluate_and_update_model(self, model, task_type: str, input_data: str, 
                                  output_data: str, response_time: float, 
                                  success: bool, error: Optional[str] = None):
        """评估模型性能并更新模型选择器"""
        try:
            # 计算性能指标
            performance_metrics = {
                'response_time': response_time,
                'success_rate': 1.0 if success else 0.0,
                'error_rate': 0.0 if success else 1.0,
                'input_length': len(input_data),
                'output_length': len(output_data)
            }
            
            # 记录性能
            model_name = model.__class__.__name__
            if model_name not in self.performance_records:
                self.performance_records[model_name] = []
            self.performance_records[model_name].append({
                'task_type': task_type,
                'metrics': performance_metrics,
                'timestamp': datetime.now()
            })
            
            # 更新模型评估器（使用现有方法）
            # self.model_evaluator.update_model_performance(
            #     model_name=model_name,
            #     task_type=task_type,
            #     metrics=performance_metrics
            # )
            
            # 更新智能模型选择器（注释掉不存在的方法）
            # self.model_selector.update_model_history(
            #     model_name=model_name,
            #     task_type=task_type,
            #     metrics=performance_metrics
            # )
            
            # 如果启用了联邦学习，发送本地更新
            if self.federated_client and success:
                try:
                    # 准备用于联邦学习的本地更新数据
                    local_update = {
                        'model_name': model_name,
                        'task_type': task_type,
                        'performance_metrics': performance_metrics,
                        'sample_data': {
                            'input': input_data[:200],  # 限制数据大小
                            'output': output_data[:200]
                        }
                    }
                    # 异步发送联邦学习更新
                    import asyncio
                    loop = asyncio.get_event_loop()
                    if loop.is_running():
                        loop.create_task(self.federated_client.send_update(local_update))
                    else:
                        loop.run_until_complete(self.federated_client.send_update(local_update))
                except Exception as fl_error:
                    logger.warning(f"联邦学习更新发送失败: {fl_error}")
        except Exception as e:
            logger.error(f"评估和更新模型时出错: {e}")
            
    async def process_message_stream(self, message: str, session_id: str, context: Dict[str, Any], user_id: str) -> AsyncGenerator[str, None]:
        """流式处理用户消息"""
        import time
        start_time = time.time()
        selected_model = None
        
        try:
            # 确定任务类型
            task_type = self._determine_task_type(message, context)
            
            # 使用智能模型选择器选择最佳模型
            selected_model = self.model_selector.select_model_for_task(
                task_type=task_type,
                task_description=message,
                requirements={}
            )
            
            # 记录模型使用情况（使用监控模块）
            from src.research_core.model_monitoring import model_monitoring
            if selected_model:
                model_monitoring.record_model_call(selected_model.__class__.__name__, task_type)
            
            # 构建反馈上下文（如果有）
            feedback_context = context.get('feedback_context', {})
            
            # 首先尝试从知识库获取答案，集成知识图谱查询
            knowledge_response = self._query_knowledge_base(
                message,
                context=feedback_context
            )
            
            if knowledge_response:
                # 评估模型性能
                response_time = time.time() - start_time
                self._evaluate_and_update_model(
                    model=selected_model,
                    task_type=task_type,
                    input_data=message,
                    output_data=knowledge_response["answer"],
                    response_time=response_time,
                    success=True
                )
                
                # 记录知识使用情况和质量评估
                quality_score = self._evaluate_knowledge_quality(
                    message, 
                    knowledge_response, 
                    feedback_context
                )
                
                yield knowledge_response["answer"]
                return
            
            # 如果知识库没有答案，使用多智能体工作流处理复杂问题
            async for chunk in self._process_with_workflow_stream(message, context):
                yield chunk
                
            # 评估模型性能（流式处理完成后的评估）
            response_time = time.time() - start_time
            self._evaluate_and_update_model(
                model=selected_model,
                task_type=task_type,
                input_data=message,
                output_data="流式响应完成",
                response_time=response_time,
                success=True
            )
        except Exception as e:
            logger.error(f"流式处理消息时出错: {e}")
            
            # 即使出错，也要尝试评估并更新模型
            if selected_model:
                response_time = time.time() - start_time
                self._evaluate_and_update_model(
                    model=selected_model,
                    task_type="general_qa",  # 默认任务类型
                    input_data=message,
                    output_data="流式处理错误",
                    response_time=response_time,
                    success=False,
                    error=str(e)
                )
            
            yield "抱歉，处理您的请求时出现了问题。请稍后重试或联系人工客服。"
    
    def process_image_message(self, image_data: bytes, session_id: str) -> str:
        """处理图像消息"""
        import time
        start_time = time.time()
        selected_model = None
        
        try:
            # 使用智能模型选择器获取当前最佳图像模型
            selected_model = self.model_selector.select_model_for_task(
                task_type="image_recognition",
                task_description="",
                requirements={}
            )
            
            # 记录模型使用情况
            from src.research_core.model_monitoring import model_monitoring
            if selected_model:
                model_monitoring.record_model_call(selected_model.__class__.__name__, "image_recognition")
            
            # 使用模型分析图片内容
            if selected_model:
                analysis_result = selected_model.invoke(str(image_data))
            else:
                analysis_result = "无法分析图片内容"
            
            # 评估模型性能
            response_time = time.time() - start_time
            self._evaluate_and_update_model(
                model=selected_model,
                task_type="image_recognition",
                input_data=f"图像数据 (大小: {len(image_data)} bytes)",
                output_data=str(analysis_result),
                response_time=response_time,
                success=True
            )
            
            return analysis_result
        except Exception as e:
            logger.error(f"处理图像消息时出错: {e}")
            
            # 即使出错，也要尝试评估并更新模型
            if selected_model:
                response_time = time.time() - start_time
                self._evaluate_and_update_model(
                    model=selected_model,
                    task_type="image_recognition",
                    input_data=f"图像数据 (大小: {len(image_data)} bytes)",
                    output_data="图像分析错误",
                    response_time=response_time,
                    success=False,
                    error=str(e)
                )
            
            return "抱歉，无法分析您提供的图片。请稍后重试或提供其他信息。"
    
    async def process_voice_message(self, audio_data: bytes, session_id: str) -> str:
        """处理语音消息"""
        import time
        start_time = time.time()
        selected_model = None
        
        try:
            # 使用智能模型选择器获取当前最佳语音识别模型
            selected_model = self.model_selector.select_model_for_task(
                task_type="speech_recognition",
                task_description="",
                requirements={}
            )
            
            # 记录模型使用情况
            from src.research_core.model_monitoring import model_monitoring
            if selected_model:
                model_monitoring.record_model_call(selected_model.__class__.__name__, "speech_recognition")
            
            # 将语音转换为文本
            if selected_model:
                # 对于语音识别模型，直接使用invoke方法
                # Whisper等语音识别模型通常通过invoke方法处理音频数据
                try:
                    # 尝试直接传递音频数据给模型
                    # 首先将bytes转换为适合模型处理的格式
                    text = selected_model.invoke(audio_data.decode('utf-8', errors='ignore'))
                except Exception:
                    # 如果直接传递音频数据失败，转换为字符串再尝试
                    text = selected_model.invoke(str(audio_data))
            else:
                text = "无法识别语音内容"
            transcribed_text = str(text)  # 确保返回字符串类型
            
            # 评估模型性能
            response_time = time.time() - start_time
            self._evaluate_and_update_model(
                model=selected_model,
                task_type="speech_recognition",
                input_data=f"音频数据 (大小: {len(audio_data)} bytes)",
                output_data=transcribed_text,
                response_time=response_time,
                success=True
            )
            
            return transcribed_text
        except Exception as e:
            logger.error(f"处理语音消息时出错: {e}")
            
            # 即使出错，也要尝试评估并更新模型
            if selected_model:
                response_time = time.time() - start_time
                self._evaluate_and_update_model(
                    model=selected_model,
                    task_type="speech_recognition",
                    input_data=f"音频数据 (大小: {len(audio_data)} bytes)",
                    output_data="语音识别错误",
                    response_time=response_time,
                    success=False,
                    error=str(e)
                )
            
            return "抱歉，无法识别您提供的语音信息。请稍后重试或提供其他信息。"
            
    async def process_voice_command(self, audio_data: bytes, session_id: str, context: Optional[Dict[str, Any]] = None) -> str:
        """
        处理语音命令
        """
        import time
        start_time = time.time()
        
        try:
            # 1. 语音识别
            transcribed_text = await self.process_voice_message(audio_data, session_id)
            
            if "无法识别" in transcribed_text or "抱歉" in transcribed_text:
                return transcribed_text
                
            # 2. 解析命令意图
            command_intent = self._parse_voice_command(transcribed_text, context)
            
            # 3. 执行命令
            result = self._execute_voice_command(command_intent, context)
            
            return result
            
        except Exception as e:
            logger.error(f"处理语音命令时出错: {e}")
            return "抱歉，无法处理您的语音命令。请稍后重试。"

    def _parse_voice_command(self, text: str, context: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
        """
        解析语音命令意图
        """
        # 这里应该集成自然语言理解模型来解析命令
        # 目前使用简单的关键词匹配
        
        command_info = {
            "intent": "unknown",
            "parameters": {},
            "original_text": text
        }
        
        # 常见命令关键词
        commands = {
            "search": ["搜索", "查找", "查询"],
            "create": ["创建", "新建", "增加"],
            "delete": ["删除", "移除"],
            "update": ["更新", "修改"],
            "help": ["帮助", "怎么"],
            "status": ["状态", "情况"]
        }
        
        text_lower = text.lower()
        for intent, keywords in commands.items():
            if any(keyword in text_lower for keyword in keywords):
                command_info["intent"] = intent
                break
        
        # 提取参数
        # 这里应该使用更复杂的NLP技术
        command_info["parameters"]["query"] = text
        
        return command_info

    def _execute_voice_command(self, command_info: Dict[str, Any], context: Optional[Dict[str, Any]] = None) -> str:
        """
        执行语音命令
        """
        intent = command_info.get("intent", "unknown")
        parameters = command_info.get("parameters", {})
        
        if intent == "search":
            query = parameters.get("query", "")
            # 执行搜索操作
            return f"正在为您搜索关于'{query}'的信息..."
            
        elif intent == "create":
            # 执行创建操作
            return "正在为您创建新项目..."
            
        elif intent == "help":
            return "我可以帮您执行搜索、创建、删除等操作。请告诉我您需要什么帮助？"
            
        else:
            return "我理解了您的命令，但暂时无法执行该操作。请使用文本方式与我交互。"

    async def process_multilingual_speech(self, audio_data: bytes, session_id: str, target_language: str = "zh") -> Dict[str, Any]:
        """
        处理多语种语音消息
        """
        result = {
            "transcription": "",
            "translation": "",
            "language": "unknown",
            "confidence": 0.0
        }
        
        try:
            # 1. 语音识别（含语种识别）
            if self.speech_recognition_model:
                # 假设模型可以返回语种信息
                recognition_result = self.speech_recognition_model.invoke(audio_data)
                result["transcription"] = recognition_result.get("text", "")
                result["language"] = recognition_result.get("language", "unknown")
                result["confidence"] = recognition_result.get("confidence", 0.0)
                
            # 2. 语音翻译
            if self.speech_translation_model and result["language"] != target_language:
                translation_result = self.speech_translation_model.invoke({
                    "text": result["transcription"],
                    "source_language": result["language"],
                    "target_language": target_language
                })
                result["translation"] = translation_result.get("translated_text", "")
            else:
                result["translation"] = result["transcription"]
                
        except Exception as e:
            logger.error(f"处理多语种语音时出错: {e}")
            
        return result
    
    async def generate_voice_response(self, text: str) -> bytes:
        """生成语音响应"""
        import time
        start_time = time.time()
        selected_model = None
        
        try:
            # 使用智能模型选择器获取当前最佳语音合成模型
            selected_model = self.model_selector.select_model_for_task(
                task_type="speech_synthesis",
                task_description="",
                requirements={}
            )
            
            # 记录模型使用情况
            from src.research_core.model_monitoring import model_monitoring
            if selected_model:
                model_monitoring.record_model_call(selected_model.__class__.__name__, "speech_synthesis")
            
            # 将文本转换为语音
            if selected_model:
                # 语音合成模型通常通过invoke方法处理文本
                try:
                    # 尝试调用模型的invoke方法生成语音
                    audio_data = selected_model.invoke(text)
                    # 如果返回的是字符串，可能需要转换为bytes
                    if isinstance(audio_data, str):
                        audio_data = audio_data.encode('utf-8')
                    elif not isinstance(audio_data, bytes):
                        # 如果不是bytes类型，默认返回空bytes
                        audio_data = b""
                except Exception:
                    # 如果调用失败，返回空bytes
                    audio_data = b""
            else:
                audio_data = b""
            
            # 评估模型性能
            response_time = time.time() - start_time
            if selected_model:
                self._evaluate_and_update_model(
                    model=selected_model,
                    task_type="speech_synthesis",
                    input_data=text[:200],  # 限制数据大小
                    output_data=f"语音数据 (大小: {len(audio_data)} bytes)",
                    response_time=response_time,
                    success=True
                )
            
            return audio_data
        except Exception as e:
            logger.error(f"生成语音响应时出错: {e}")
            
            # 即使出错，也要尝试评估并更新模型
            if selected_model:
                response_time = time.time() - start_time
                self._evaluate_and_update_model(
                    model=selected_model,
                    task_type="speech_synthesis",
                    input_data=text[:200],  # 限制数据大小
                    output_data="语音合成错误",
                    response_time=response_time,
                    success=False,
                    error=str(e)
                )
            
            return b""
    
    def process_video_message(self, video_data: bytes, session_id: str) -> str:
        """处理视频消息"""
        import time
        start_time = time.time()
        selected_model = None
        
        try:
            # 使用智能模型选择器获取当前最佳视频模型
            selected_model = self.model_selector.select_model_for_task(
                task_type="video_analysis",
                task_description="",
                requirements={}
            )
            
            # 记录模型使用情况
            from src.research_core.model_monitoring import model_monitoring
            if selected_model:
                model_monitoring.record_model_call(selected_model.__class__.__name__, "video_analysis")
            
            # 分析视频内容
            if selected_model:
                analysis_result = selected_model.invoke(str({"video_description": "用户上传的视频", "task": "analyze"}))
            else:
                analysis_result = "无法分析视频内容"
            
            # 评估模型性能
            response_time = time.time() - start_time
            self._evaluate_and_update_model(
                model=selected_model,
                task_type="video_analysis",
                input_data=f"视频数据 (大小: {len(video_data)} bytes)",
                output_data=str(analysis_result),
                response_time=response_time,
                success=True
            )
            
            return analysis_result
        except Exception as e:
            logger.error(f"处理视频消息时出错: {e}")
            
            # 即使出错，也要尝试评估并更新模型
            if selected_model:
                response_time = time.time() - start_time
                self._evaluate_and_update_model(
                    model=selected_model,
                    task_type="video_analysis",
                    input_data=f"视频数据 (大小: {len(video_data)} bytes)",
                    output_data="视频分析错误",
                    response_time=response_time,
                    success=False,
                    error=str(e)
                )
            
            return "抱歉，无法分析您提供的视频。请稍后重试或提供其他信息。"
    
    def _enhanced_knowledge_query(self, query: str, context: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
        """
        增强版知识查询，集成知识库和知识图谱
        
        Args:
            query: 用户查询
            context: 上下文信息
            
        Returns:
            包含答案、相关知识和建议的字典
        """
        try:
            # 初始化结果
            results = {
                "answer": "",
                "related_items": [],
                "suggestions": [],
                "entities": [],
                "relationships": [],
                "source_type": "none"
            }
            
            # 获取知识库服务
            kb_service = get_knowledge_base_service()
            
            # 1. 查询知识库
            kb_results = self._query_knowledge_base(query, context)
            
            # 2. 查询知识图谱
            kg_results = self._query_knowledge_graph(query)
            
            # 3. 融合结果
            if kb_results:
                results.update(kb_results)
                results["source_type"] = "knowledge_base"
                
            if kg_results:
                # 提取实体和关系
                entities = []
                relationships = []
                
                for item in kg_results:
                    if item.get("type") == "node":
                        entities.append({
                            "id": item.get("id"),
                            "name": item.get("properties", {}).get("name", ""),
                            "labels": item.get("labels", []),
                            "properties": item.get("properties", {})
                        })
                    elif item.get("type") == "relationship":
                        relationships.append({
                            "id": item.get("id"),
                            "type": item.get("relationship_type", ""),
                            "source": item.get("source", {}),
                            "target": item.get("target", {})
                        })
                
                results["entities"] = entities
                results["relationships"] = relationships
                
                # 如果已有知识库结果，则融合
                if results["source_type"] == "knowledge_base":
                    results["answer"] = self._fuse_knowledge_results(results["answer"], kg_results)
                    results["source_type"] = "hybrid"
                else:
                    # 仅使用知识图谱结果
                    results["answer"] = self._format_kg_results(kg_results)
                    results["source_type"] = "knowledge_graph"
            
            # 4. 生成相关建议
            suggestions = kb_service.get_search_suggestions(query, 5)
            results["suggestions"] = [
                {"text": suggestion.text, "frequency": suggestion.frequency} 
                for suggestion in suggestions
            ]
            
            return results
            
        except Exception as e:
            logger.error(f"增强版知识查询出错: {e}")
            return {"answer": "抱歉，查询过程中出现错误。", "source_type": "error"}

    def _query_knowledge_base(self, query: str, context: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
        """
        查询知识库获取相关信息
        
        Args:
            query: 查询文本
            context: 上下文信息
            
        Returns:
            知识库查询结果
        """
        try:
            kb_service = get_knowledge_base_service()
            
            # 构建查询对象
            kb_query = KnowledgeBaseQuery(
                query_text=query,
                limit=5,
                status='published',
                semantic_search=True,
                hybrid_search=True,
                include_summary=True,
                include_related=True
            )
            
            # 执行查询并处理可能的错误情况
            try:
                items = kb_service.search_knowledge_items(kb_query)
                if not items:
                    logger.warning(f"知识库查询返回空结果，查询内容: {query}")
            except AttributeError as ae:
                logger.error(f"知识库服务方法调用错误: {ae}")
                return {}
            except Exception as inner_e:
                logger.error(f"执行知识库查询时发生意外错误: {inner_e}")
                return {}
            
            if not items:
                return {}
            
            # 格式化结果，增加空值检查
            formatted_items = []
            for item in items:
                try:
                    content_preview = item.content[:200] + "..." if item.content else ""
                    formatted_items.append({
                        "id": item.id,
                        "title": item.title or "未命名条目",
                        "summary": item.summary or content_preview,
                        "category": item.category or "未分类",
                        "tags": item.tags if item.tags else [],
                        "view_count": item.view_count or 0
                    })
                except Exception as item_error:
                    logger.warning(f"格式化知识条目时出错: {item_error}")
                    continue
            
            # 构建回答，增加空值检查
            if items:
                main_item = items[0]
                try:
                    if main_item.summary:
                        answer = f"{main_item.summary}\n\n详细内容：{main_item.content}"
                    else:
                        answer = main_item.content or "未找到具体内容"
                except Exception as content_error:
                    logger.warning(f"构建回答内容时出错: {content_error}")
                    answer = "知识条目内容不可用"
            else:
                answer = "未找到相关知识条目"
            
            return {
                "answer": answer,
                "related_items": formatted_items
            }
            
        except Exception as e:
            logger.error(f"查询知识库时出错: {e}")
            return {}

    def _query_knowledge_graph(self, query: str, limit: int = 5) -> List[Dict[str, Any]]:
        """查询知识图谱，获取实体和关系信息"""
        try:
            from src.containers import get_knowledge_graph_service
            kg_service = get_knowledge_graph_service()
            
            # 检查kg_service是否有semantic_search方法
            if hasattr(kg_service, 'semantic_search') and callable(getattr(kg_service, 'semantic_search')):
                # 执行语义搜索获取相关实体
                results = kg_service.semantic_search(query, limit)
            else:
                # 如果没有semantic_search方法，记录警告
                logger.warning("KnowledgeGraphService缺少semantic_search方法")
                results = []
            
            return results if results else []
        except AttributeError as e:
            logger.error(f"知识图谱服务缺少必要方法: {e}")
            return []
        except Exception as e:
            logger.error(f"查询知识图谱时出错: {e}")
            return []
    
    def _fuse_knowledge_results(self, kb_answer: str, kg_results: List[Dict[str, Any]]) -> str:
        """融合知识库和知识图谱结果，提供更全面的回答"""
        if not kg_results:
            return kb_answer
            
        # 提取知识图谱中的关键信息
        kg_info = []
        for result in kg_results:
            if result.get('type') == 'node' and 'properties' in result:
                properties = result['properties']
                # 提取实体名称和描述
                entity_name = properties.get('name', properties.get('title', '未命名实体'))
                entity_desc = properties.get('description', properties.get('content', ''))
                
                if entity_name:
                    kg_info.append(f"【相关概念】{entity_name}: {entity_desc}")
            
        # 如果有额外的知识图谱信息，添加到回答中
        if kg_info:
            return f"{kb_answer}\n\n### 相关知识补充\n" + "\n".join(kg_info[:2])
        
        return kb_answer
    
    def _format_kg_results(self, kg_results: List[Dict[str, Any]]) -> str:
        """将知识图谱结果格式化为可读文本"""
        if not kg_results:
            return ""
            
        formatted_results = []
        for result in kg_results:
            if result.get('type') == 'node' and 'properties' in result:
                properties = result['properties']
                entity_name = properties.get('name', properties.get('title', '未命名实体'))
                entity_desc = properties.get('description', properties.get('content', ''))
                entity_type = result.get('labels', [''])[0] if result.get('labels') else ''
                
                if entity_type:
                    formatted_results.append(f"● {entity_name}（{entity_type}）\n  {entity_desc}")
                else:
                    formatted_results.append(f"● {entity_name}\n  {entity_desc}")
            
        return "根据知识图谱，相关信息如下：\n" + "\n\n".join(formatted_results)

    async def process_chat_message_with_knowledge(self, message: str, session_id: str, 
                                                  user_id: Optional[str] = None) -> ChatResponse:
        """
        处理聊天消息，集成知识库和知识图谱功能
        
        Args:
            message: 用户消息
            session_id: 会话ID
            user_id: 用户ID（可选）
            
        Returns:
            聊天响应对象
        """
        try:
            # 获取或创建会话
            session = get_chat_session(session_id)
            if not session:
                session = create_chat_session(user_id or "anonymous")
            
            # 添加用户消息到会话
            user_message = ChatMessage(
                role="user",
                content=message,
                timestamp=datetime.now()
            )
            add_message_to_session(session_id, user_message)
            
            # 查询知识库和知识图谱
            knowledge_results = self._enhanced_knowledge_query(message)
            
            # 构建响应
            response_text = knowledge_results.get("answer", "抱歉，我没有找到相关信息。")
            
            # 如果没有找到答案，使用默认LLM处理
            if not response_text or response_text.strip() == "":
                # 使用默认的LLM处理逻辑
                response_text = await self._process_with_llm(message, session)
            
            # 创建响应对象
            chat_response = ChatResponse(
                response=response_text,
                session_id=session_id,
                suggestions=knowledge_results.get("suggestions", []),
                related_knowledge=knowledge_results.get("related_items", []),
                source_type=knowledge_results.get("source_type", "llm")
            )
            
            # 添加助手消息到会话
            assistant_message = ChatMessage(
                role="assistant",
                content=response_text,
                timestamp=datetime.now()
            )
            add_message_to_session(session_id, assistant_message)
            
            return chat_response
            
        except Exception as e:
            logger.error(f"处理聊天消息时出错: {e}")
            return ChatResponse(
                response="抱歉，处理您的消息时出现错误。",
                session_id=session_id
            )

    async def _process_with_llm(self, message: str, session: ChatSession) -> str:
        """
        使用LLM处理消息的默认方法
        
        Args:
            message: 用户消息
            session: 聊天会话
            
        Returns:
            LLM生成的回复
        """
        try:
            # 构建对话历史
            history = []
            for msg in session.messages[-5:]:  # 只取最近5条消息
                history.append({"role": msg.role, "content": msg.content})
            
            # 添加当前消息
            history.append({"role": "user", "content": message})
            
            # 调用LLM（这里简化处理）
            if self.llm:
                response = self.llm.invoke(history)
                return str(response.content) if hasattr(response, 'content') else str(response)
            else:
                return "您好！我是智能客服助手。请问有什么可以帮助您的吗？"
                
        except Exception as e:
            logger.error(f"使用LLM处理消息时出错: {e}")
            return "抱歉，处理您的消息时出现错误。"

    def _evaluate_knowledge_quality(self, query: str, knowledge_result: Dict[str, Any], feedback_context: Dict[str, Any]) -> float:
        """评估知识质量，返回0-1之间的分数"""
        try:
            # 基础分数
            score = 0.0
            
            # 检查是否有回答内容
            if not knowledge_result or 'answer' not in knowledge_result or not knowledge_result['answer']:
                return 0.0
                
            # 回答相关性评分（基于反馈）
            relevance_score = feedback_context.get('relevance_score', 0.7)  # 默认中等相关
            
            # 回答完整性评分
            answer_length = len(knowledge_result['answer'])
            completeness_score = min(1.0, answer_length / 300)  # 假设300字符以上为完整
            
            # 来源多样性评分
            source_type = knowledge_result.get('source_type', '')
            source_score = 1.0 if source_type == 'hybrid' else 0.7 if source_type else 0.5
            
            # 计算综合评分
            score = (relevance_score * 0.4) + (completeness_score * 0.3) + (source_score * 0.3)
            
            # 记录质量评估结果
            self._record_knowledge_feedback(query, knowledge_result, score)
            
            return round(score, 2)
        except Exception as e:
            logger.error(f"评估知识质量时出错: {e}")
            return 0.5  # 默认中等质量
    
    def _auto_update_knowledge(self, query: str, current_answer: str, feedback_context: Dict[str, Any]) -> bool:
        """自动更新知识，根据反馈改进知识条目"""
        try:
            from src.services.knowledge_base_service import KnowledgeBaseItem
            
            # 获取用户提供的正确答案或修正信息
            corrected_answer = feedback_context.get('correct_answer', '')
            if not corrected_answer:
                # 如果没有明确的正确答案，使用当前答案加上反馈信息生成新的知识条目
                feedback_text = feedback_context.get('feedback_text', '')
                enhanced_content = f"{current_answer}\n\n## 用户反馈\n{feedback_text}"
            else:
                # 使用用户提供的正确答案
                enhanced_content = corrected_answer
            
            # 创建新的知识条目
            new_knowledge = KnowledgeBaseItem(
                title=query[:100] + ("..." if len(query) > 100 else ""),
                content=enhanced_content,
                category="auto_updated",
                tags=["auto-generated", "feedback-improved"],
                status="draft",  # 设为草稿状态，等待审核
                metadata={
                    "source": "auto_update",
                    "original_query": query,
                    "feedback_context": feedback_context,
                    "created_at": self._get_current_timestamp()
                }
            )
            
            # 保存新的知识条目
            created_item = self.kb_service.create_knowledge_item(new_knowledge)
            
            logger.info(f"自动创建知识条目成功: {created_item.id}")
            return True
        except Exception as e:
            logger.error(f"自动更新知识时出错: {e}")
            return False
    
    def _record_knowledge_feedback(self, query: str, knowledge_result: Dict[str, Any], quality_score: float) -> None:
        """记录知识质量反馈，用于持续改进"""
        try:
            # 这里可以实现将反馈存储到数据库或日志系统的逻辑
            # 用于后续分析和改进知识质量
            logger.info(f"知识质量反馈 - 查询: {query}, 分数: {quality_score}, 来源: {knowledge_result.get('source_type')}")
            
            # TODO: 实现持久化存储反馈信息
        except Exception as e:
            logger.error(f"记录知识反馈时出错: {e}")
    
    def submit_feedback(self, session_id: str, message_id: str, rating: float, comment: Optional[str] = None) -> Dict[str, Any]:
        """提交用户对智能客服回答的反馈
        
        Args:
            session_id: 聊天会话ID
            message_id: 消息ID
            rating: 评分（1-5分）
            comment: 可选的评论内容
        
        Returns:
            包含反馈状态和消息的字典
        """
        try:
            # 验证评分范围
            if rating < 1 or rating > 5:
                return {
                    "status": "error",
                    "message": "评分必须在1-5分之间"
                }
            
            # 获取会话和相关消息
            session = get_chat_session(session_id)
            if not session:
                return {
                    "status": "error",
                    "message": "会话不存在"
                }
            
            # 在实际应用中，应该根据message_id查找具体消息
            # 这里为了简化，我们假设使用的是最新消息
            if not session.messages:
                return {
                    "status": "error",
                    "message": "会话中没有消息"
                }
            
            # 获取用户的最后一条消息和助手的回复
            user_message = None
            assistant_message = None
            
            for i in range(len(session.messages) - 1, -1, -1):
                if session.messages[i].role == "assistant":
                    assistant_message = session.messages[i]
                    # 查找该助手回复对应的用户消息
                    for j in range(i - 1, -1, -1):
                        if session.messages[j].role == "user":
                            user_message = session.messages[j]
                            break
                    break
            
            if not user_message or not assistant_message:
                return {
                    "status": "error",
                    "message": "找不到对应的消息记录"
                }
            
            # 构建知识结果字典
            knowledge_result = {
                "answer": assistant_message.content,
                "source_type": "user_feedback",
                "comment": comment
            }
            
            # 记录知识质量反馈
            self._record_knowledge_feedback(user_message.content, knowledge_result, rating)
            
            # 使用UserPreferenceService实现持久化存储
            # 将1-5分的评分映射到0-1的质量分数
            quality_score = (rating - 1) / 4
            
            try:
                # 初始化UserPreferenceService
                preference_service = UserPreferenceService()
                
                # 记录用户交互和反馈
                preference_service.record_interaction(
                    user_id=int(session.user_id),  # 转换为int类型
                    content_id=message_id or str(uuid.uuid4()),  # 如果没有message_id，生成一个
                    interaction_type="feedback",
                    content_type="knowledge_response",
                    metadata={
                        "session_id": session_id,
                        "query": user_message.content,
                        "response": assistant_message.content
                    },
                    score=rating
                )
                
                logger.info(f"用户反馈已成功记录 - 用户ID: {session.user_id}, 会话ID: {session_id}")
            except Exception as e:
                logger.error(f"存储用户反馈时出错: {e}")
                # 即使存储失败，我们仍然返回成功，因为反馈已经记录到日志中
            
            return {
                "status": "success",
                "message": "感谢您的反馈！"
            }
        except Exception as e:
            logger.error(f"处理用户反馈时出错: {e}")
            return {
                "status": "error",
                "message": "处理反馈时发生错误"
            }
    
    def _get_current_timestamp(self) -> str:
        """获取当前时间戳"""
        from datetime import datetime
        return datetime.utcnow().isoformat()
    
    def _process_with_workflow(self, query: str, context: Dict[str, Any]) -> str:
        """使用多智能体工作流处理复杂问题"""
        try:
            # 构建初始状态
            from src.research_core.enhanced_multi_agent_workflow import EnhancedMultiAgentState as MultiAgentState
            initial_state = MultiAgentState(
                question=query,
                search_query="",
                search_results="",
                research_complete=False,
                final_answer="",
                images=[],
                tables=[],
                multimodal_content={},
                search_strategy="",
                analysis_results={},
                agent_assignments={},
                iteration_count=0,
                previous_states=[],
                cache_key="",
                reflection="",
                visualizations={}
            )
            
            # 执行工作流
            result = self.workflow.invoke(initial_state)
            
            # 提取最终答案
            final_answer = getattr(result, 'final_answer', None)
            if final_answer:
                return final_answer
            
            # 如果没有最终答案，返回分析结果
            analysis = getattr(result, 'analysis_results', {})
            if analysis and isinstance(analysis, dict):
                return analysis.get('content', '已完成分析，但未生成具体答案。')
            
            return "已处理您的请求，但未找到明确答案。"
        except Exception as e:
            logger.error(f"使用工作流处理时出错: {e}")
            return ""
    
    async def _process_with_workflow_stream(self, query: str, context: Dict[str, Any]) -> AsyncGenerator[str, None]:
        """流式使用多智能体工作流处理复杂问题"""
        try:
            # 构建初始状态
            from src.research_core.enhanced_multi_agent_workflow import EnhancedMultiAgentState as MultiAgentState
            initial_state = MultiAgentState(
                question=query,
                search_query="",
                search_results="",
                research_complete=False,
                final_answer="",
                images=[],
                tables=[],
                multimodal_content={},
                search_strategy="",
                analysis_results={},
                agent_assignments={},
                iteration_count=0,
                previous_states=[],
                cache_key="",
                reflection="",
                visualizations={}
            )
            
            # 执行工作流并流式返回结果
            final_answer = ""
            
            for output in self.workflow.stream(initial_state):
                if isinstance(output, dict) and "final_answer" in output:
                    answer_chunk = output["final_answer"]
                    yield answer_chunk
                    final_answer += answer_chunk
            
            if not final_answer:
                yield "已完成处理，但未生成具体答案。"
        except Exception as e:
            logger.error(f"流式使用工作流处理时出错: {e}", exc_info=True)
            yield "处理请求时出现错误，请稍后重试。"
    
    def _generate_default_response(self, message: str, model=None) -> str:
        """生成默认响应"""
        # 使用LLM生成响应
        try:
            # 如果提供了特定模型，使用它；否则使用默认llm
            selected_model = model if model else self.llm
            
            prompt = f"""
            你是一个智能客服助手，请以友好和专业的语气回复用户的问题。
            如果你不知道答案，请建议用户联系人工客服或提出更具体的问题。
            
            用户问题: {message}
            
            请提供有用且简洁的回答:
            """
            
            if selected_model:
                response = selected_model.invoke(prompt)
                if hasattr(response, 'content'):
                    return str(response.content)
                return str(response)
            else:
                return "您好！我是智能客服助手。请问有什么可以帮助您的吗？"
        except Exception as e:
            logger.error(f"生成默认响应时出错: {e}")
            return "您好！我是智能客服助手。请问有什么可以帮助您的吗？"