"""
文本问答智能体 - 集成模型框架版本
提供问答功能，支持模型生成和降级方案
"""

import asyncio
from typing import Dict, Any, Optional, List, Tuple
from dataclasses import dataclass
from datetime import datetime

from utils.logger import Logger
from utils.exceptions import AgentError
from apps.text_qa.tools import text_qa_tools
from middleware.model_client import ModelClientFactory, model_client

logger = Logger.get_logger("text_qa_agent")


@dataclass
class QARequest:
    """问答请求"""
    query: str
    use_model: bool = False  # 是否使用模型生成
    similarity_threshold: float = 0.6  # 相似度阈值
    max_results: int = 5  # 最大检索结果数
    timeout: float = 30.0  # 超时时间（秒）
    request_id: Optional[str] = None  # 请求ID


@dataclass
class QAResponse:
    """问答响应"""
    answer: Optional[str]
    confidence: float
    retrieved_texts: List[Tuple[str, float]]
    processing_time: float
    model_used: bool
    request_id: str
    timestamp: datetime
    fallback_reason: Optional[str] = None


class TextQAAgent:
    """文本问答智能体"""
    
    def __init__(self):
        self.tools = text_qa_tools
        self.model_client = model_client
        logger.info("TextQAAgent initialized")
    
    async def process_question(self, request: QARequest) -> QAResponse:
        """处理问答请求"""
        start_time = datetime.now()
        request_id = request.request_id or f"qa_{start_time.timestamp():.0f}"
        
        try:
            logger.info(f"Processing question: {request.query[:50]}... (request_id={request_id})")
            
            # 1. 文本预处理
            processed_query = await self._preprocess_query(request.query)
            
            # 2. 知识库检索
            retrieved_texts = await self._retrieve_relevant_texts(
                processed_query, 
                max_results=request.max_results
            )
            
            # 3. 答案生成
            answer, model_used, fallback_reason = await self._generate_answer(
                request.query,
                retrieved_texts,
                request.use_model,
                request.similarity_threshold
            )
            
            # 4. 计算置信度
            confidence = self._calculate_confidence(retrieved_texts, answer)
            
            # 5. 构建响应
            processing_time = (datetime.now() - start_time).total_seconds()
            
            response = QAResponse(
                answer=answer,
                confidence=confidence,
                retrieved_texts=retrieved_texts,
                processing_time=processing_time,
                model_used=model_used,
                request_id=request_id,
                timestamp=start_time,
                fallback_reason=fallback_reason
            )
            
            logger.info(f"Question processed successfully: {request_id}, time={processing_time:.2f}s")
            return response
            
        except Exception as e:
            processing_time = (datetime.now() - start_time).total_seconds()
            logger.error(f"Question processing failed: {request_id}, error={str(e)}")
            
            # 返回错误响应
            return QAResponse(
                answer=None,
                confidence=0.0,
                retrieved_texts=[],
                processing_time=processing_time,
                model_used=False,
                request_id=request_id,
                timestamp=start_time,
                fallback_reason=f"处理失败: {str(e)}"
            )
    
    async def _preprocess_query(self, query: str) -> str:
        """预处理查询"""
        try:
            # 使用文本处理工具清洗查询
            cleaned_query = self.tools.clean_text(query)
            logger.debug(f"Query preprocessed: '{query}' -> '{cleaned_query}'")
            return cleaned_query
        except Exception as e:
            logger.warning(f"Query preprocessing failed, using original: {str(e)}")
            return query
    
    async def _retrieve_relevant_texts(self, query: str, max_results: int = 5) -> List[Tuple[str, float]]:
        """检索相关文本"""
        try:
            # 提取关键词
            keywords = self.tools.extract_keywords(query)
            logger.debug(f"Extracted keywords: {keywords}")
            
            # 向量检索
            retrieved_texts = self.tools.search_knowledge_base(query, top_k=max_results)
            logger.info(f"Retrieved {len(retrieved_texts)} relevant texts")
            
            return retrieved_texts
            
        except Exception as e:
            logger.error(f"Text retrieval failed: {str(e)}")
            return []
    
    async def _generate_answer(
        self, 
        query: str, 
        retrieved_texts: List[Tuple[str, float]],
        use_model: bool,
        similarity_threshold: float
    ) -> tuple[Optional[str], bool, Optional[str]]:
        """生成答案"""
        try:
            # 检查是否有有效检索结果
            valid_texts = [text for text, score in retrieved_texts if score >= similarity_threshold]
            
            if not valid_texts:
                logger.info("No valid retrieved texts found")
                return "抱歉，没有找到相关信息来回答您的问题。", False, "无有效检索结果"
            
            # 根据配置选择生成方式
            if use_model and self.model_client:
                try:
                    # 使用模型生成（异步）
                    logger.info(f"Using model generation: {self.model_client.model_type.value}")
                    answer = await self.tools.generate_answer_async(
                        query, retrieved_texts, similarity_threshold, use_model=True
                    )
                    
                    if answer:
                        logger.info("Model generation successful")
                        return answer, True, None
                    else:
                        logger.warning("Model generation returned empty, falling back")
                        
                except Exception as e:
                    logger.warning(f"Model generation failed: {str(e)}, falling back")
            
            # 使用降级方案
            logger.info("Using fallback generation")
            answer = self.tools.generate_answer(
                query, retrieved_texts, similarity_threshold, use_model=False
            )
            
            return answer, False, "使用降级方案"
            
        except Exception as e:
            logger.error(f"Answer generation failed: {str(e)}")
            return "抱歉，答案生成过程中出现错误。", False, f"生成失败: {str(e)}"
    
    def _calculate_confidence(self, retrieved_texts: List[Tuple[str, float]], answer: Optional[str]) -> float:
        """计算置信度"""
        try:
            if not retrieved_texts or not answer:
                return 0.0
            
            # 基于最高相似度分数计算置信度
            max_similarity = max(score for _, score in retrieved_texts)
            
            # 基于答案长度调整置信度
            answer_length_factor = min(len(answer) / 100, 1.0)  # 答案越长，置信度越高（上限1.0）
            
            # 综合计算
            confidence = (max_similarity * 0.7 + answer_length_factor * 0.3)
            
            return min(confidence, 1.0)
            
        except Exception as e:
            logger.warning(f"Confidence calculation failed: {str(e)}")
            return 0.5  # 默认置信度
    
    def get_model_status(self) -> Dict[str, Any]:
        """获取模型状态"""
        try:
            # 移除对model_config_manager的引用
            return {
                "model_type": self.model_client.model_type.value if self.model_client else "unknown",
                "is_healthy": True if self.model_client else False,
                "config_name": "unknown",
                "available_configs": []
            }
        except Exception as e:
            logger.error(f"Failed to get model status: {str(e)}")
            return {
                "model_type": "unknown",
                "is_healthy": False,
                "error": str(e)
            }


# 全局智能体实例
text_qa_agent = TextQAAgent()

# 导出接口
__all__ = ['TextQAAgent', 'text_qa_agent', 'QARequest', 'QAResponse']