import re
import jieba
import numpy as np
from typing import List, Tuple, Optional, Dict
from sklearn.metrics.pairwise import cosine_similarity
from utils.logger import Logger
from utils.exceptions import ToolError
from middleware.milvus_client import MilvusClient, VectorDatabaseRouter, SearchResult
from middleware.redis_adapter import get_redis_adapter
from middleware.model_client import ModelClientFactory, ModelType, model_client
from tools.text_processing import text_cleaning, get_embedding

logger = Logger.get_logger("text_qa_tools")

# 向量检索配置
VECTOR_RETRIEVAL_CONFIG = {
    "default_top_k": 3,
    "similarity_threshold": 0.6,
    "batch_size": 100,  # 批量处理大小
    "collection_mapping": {
        "text_qa_kb": "milvus"  # 集合名到数据库类型的映射
    }
}


class TextQATools:
    """文本问答核心工具集，提供预处理、检索、相似度计算等能力"""

    def __init__(self):
        """初始化工具集，加载中间件客户端"""
        try:
            # 使用向量数据库路由器，支持Milvus和PostgreSQL+PgVector自动切换
            self.vector_router = VectorDatabaseRouter()
            self.redis_client = get_redis_adapter()  # 使用Redis适配器，支持集群模式
            self.model_client = model_client  # 模型客户端，支持多种模型后端
            self.stop_words = self._load_stop_words()
            
            # 初始化默认向量集合（如果需要）
            self._init_vector_collections()
            
            logger.info("TextQATools initialized successfully with vector database router")
        except Exception as e:
            logger.error(f"TextQATools initialization failed: {str(e)}", exc_info=True)
            raise ToolError(f"工具初始化失败: {str(e)}") from e
    
    def _init_vector_collections(self):
        """初始化向量集合（如果不存在）"""
        try:
            # 为text_qa_kb集合创建索引（如果需要）
            default_collection = "text_qa_kb"
            db_type = VECTOR_RETRIEVAL_CONFIG["collection_mapping"].get(default_collection, "milvus")
            
            # 检查集合是否存在，不存在则创建
            if not self.vector_router.collection_exists(default_collection, db_type):
                schema = [
                    {"name": "id", "type": "INT64", "is_primary": True},
                    {"name": "text", "type": "VARCHAR", "params": {"max_length": 2048}},
                    {"name": "embedding", "type": "FLOAT_VECTOR", "params": {"dim": 768}}  # 与嵌入维度一致
                ]
                self.vector_router.create_collection(
                    collection_name=default_collection,
                    schema=schema,
                    index_params={"index_type": "IVF_FLAT", "nlist": 1024},
                    db_type=db_type
                )
                logger.info(f"Created vector collection: {default_collection} with {db_type}")
        except Exception as e:
            logger.warning(f"Failed to initialize vector collections: {str(e)}")

    def _load_stop_words(self) -> set:
        """加载中文停用词（优先从Redis缓存读取，缓存未命中则加载默认集合）"""
        try:
            # 尝试从Redis缓存读取停用词
            stop_words_bytes = self.redis_client.get("text_qa:stop_words")
            if stop_words_bytes:
                return set(stop_words_bytes.decode("utf-8").split(","))

            # 缓存未命中，加载默认停用词
            default_stop_words = {
                "的", "了", "是", "在", "和", "有", "就", "不", "人", "我", "到", "都",
                "来", "去", "上", "下", "大", "小", "多", "少", "一", "二", "三", "四"
            }
            # 写入Redis缓存（过期时间1天）
            self.redis_client.set(
                "text_qa:stop_words",
                ",".join(default_stop_words),
                ex=86400
            )
            return default_stop_words
        except Exception as e:
            logger.warning(f"加载停用词失败，使用空集合: {str(e)}")
            return set()

    def preprocess_text(self, text: str) -> str:
        """文本预处理：清洗、分词、去停用词"""
        try:
            # 1. 基础清洗（去特殊字符、多余空格）
            cleaned_text = text_cleaning(text)
            if not cleaned_text:
                return ""

            # 2. 分词（中文分词）
            words = jieba.lcut(cleaned_text)

            # 3. 去停用词+过滤长度<=1的词
            filtered_words = [
                word for word in words
                if word not in self.stop_words and len(word) > 1
            ]

            return " ".join(filtered_words)
        except Exception as e:
            logger.error(f"文本预处理失败: text={text}, error={str(e)}")
            raise ToolError(f"文本预处理失败: {str(e)}") from e

    def extract_keywords(self, text: str, top_k: int = 5) -> List[str]:
        """提取关键词（基于词频统计+过滤）"""
        try:
            processed_text = self.preprocess_text(text)
            if not processed_text:
                return []

            # 词频统计
            word_counts = {}
            for word in processed_text.split():
                word_counts[word] = word_counts.get(word, 0) + 1

            # 按词频降序排序，取前top_k
            sorted_keywords = sorted(
                word_counts.items(),
                key=lambda x: x[1],
                reverse=True
            )[:top_k]

            return [word for word, _ in sorted_keywords]
        except Exception as e:
            logger.error(f"关键词提取失败: text={text}, error={str(e)}")
            raise ToolError(f"关键词提取失败: {str(e)}") from e

    def search_knowledge_base(
            self,
            query: str,
            collection_name: str = "text_qa_kb",
            top_k: int = 3,
            filter_expr: Optional[Dict] = None
    ) -> List[Tuple[str, float]]:
        """从向量数据库检索相似文本（(文本内容, 相似度分数)）
        
        Args:
            query: 查询文本
            collection_name: 向量集合名称
            top_k: 返回结果数量
            filter_expr: 过滤条件字典
            
        Returns:
            检索结果列表 [(文本内容, 相似度分数)]
        """
        try:
            logger.info(f"[KB Search] 开始搜索知识库，查询文本: {query[:50]}..., 集合: {collection_name}, top_k={top_k}")
            if filter_expr:
                logger.info(f"[KB Search] 应用过滤条件: {filter_expr}")
                
            # 获取适合该集合的数据库类型
            db_type = VECTOR_RETRIEVAL_CONFIG["collection_mapping"].get(collection_name, "milvus")
            logger.info(f"[KB Search] 确定数据库类型: {db_type}")
            
            # 1. 生成查询向量
            logger.info(f"[KB Search] 生成查询向量")
            query_embedding = get_embedding(query)
            if not query_embedding or len(query_embedding) == 0:
                logger.warning("[KB Search] 查询向量生成失败，返回空结果")
                return []
            logger.info(f"[KB Search] 查询向量生成成功，维度: {len(query_embedding)}")

            # 2. 向量检索（使用路由器自动选择数据库）
            logger.info(f"[KB Search] 执行向量搜索")
            results = self.vector_router.search(
                collection_name=collection_name,
                query_vectors=[query_embedding],
                top_k=top_k,
                metric_type="COSINE",
                filter=filter_expr,
                db_type=db_type
            )
            logger.info(f"[KB Search] 向量搜索完成，获取到原始结果数量: {len(results[0]) if results and len(results) > 0 else 0}")

            # 3. 解析结果
            retrieved_texts = []
            if results and len(results) > 0:
                # 批量获取实体信息，减少数据库访问次数
                hit_ids = [hit.id for hit in results[0]]
                logger.info(f"[KB Search] 需要获取的实体ID数量: {len(hit_ids)}")
                if hit_ids:
                    entities = self.vector_router.get_entities_by_id(
                        collection_name=collection_name,
                        ids=hit_ids,
                        db_type=db_type
                    )
                    logger.info(f"[KB Search] 获取到实体数量: {len(entities)}")
                    
                    # 构建ID到实体的映射
                    id_to_entity = {entity.get("id"): entity for entity in entities}
                    
                    # 匹配命中结果和实体信息
                    for i, hit in enumerate(results[0]):
                        entity = id_to_entity.get(hit.id)
                        if entity and "text" in entity:
                            text = entity["text"]
                            score = hit.score
                            retrieved_texts.append((text, score))
                            logger.info(f"[KB Search] 结果 {i+1}: 相似度分数={score}, 文本长度={len(text)} 字符")

            # 按相似度降序排序
            retrieved_texts.sort(key=lambda x: x[1], reverse=True)
            logger.info(f"[KB Search] 搜索完成，总计 {len(retrieved_texts)} 条有效结果")
            return retrieved_texts
        except Exception as e:
            logger.error(f"[KB Search] 知识库检索失败: query={query}, error={str(e)}", exc_info=True)
            # 非致命错误，返回空结果而非抛出异常（容错设计）
            return []
    
    def batch_search_knowledge_base(
            self,
            queries: List[str],
            collection_name: str = "text_qa_kb",
            top_k: int = 3,
            batch_size: int = None
    ) -> List[List[Tuple[str, float]]]:
        """批量检索知识库，提高处理效率
        
        Args:
            queries: 查询文本列表
            collection_name: 向量集合名称
            top_k: 每个查询返回结果数量
            batch_size: 批次大小，默认使用配置中的值
            
        Returns:
            每个查询的检索结果列表
        """
        if batch_size is None:
            batch_size = VECTOR_RETRIEVAL_CONFIG["batch_size"]
        
        results = []
        for i in range(0, len(queries), batch_size):
            batch_queries = queries[i:i+batch_size]
            batch_results = []
            
            for query in batch_queries:
                search_result = self.search_knowledge_base(query, collection_name, top_k)
                batch_results.append(search_result)
            
            results.extend(batch_results)
        
        return results
    
    def insert_to_knowledge_base(
            self,
            texts: List[str],
            collection_name: str = "text_qa_kb",
            metadata: Optional[List[Dict]] = None
    ) -> bool:
        """将文本批量插入知识库
        
        Args:
            texts: 要插入的文本列表
            collection_name: 向量集合名称
            metadata: 文本对应的元数据列表
            
        Returns:
            是否插入成功
        """
        try:
            # 获取适合该集合的数据库类型
            db_type = VECTOR_RETRIEVAL_CONFIG["collection_mapping"].get(collection_name, "milvus")
            
            # 批量生成嵌入向量
            data = []
            for idx, text in enumerate(texts):
                embedding = get_embedding(text)
                if embedding:
                    item = {
                        "id": idx,
                        "text": text,
                        "embedding": embedding
                    }
                    # 添加元数据（如果有）
                    if metadata and idx < len(metadata):
                        item.update(metadata[idx])
                    data.append(item)
            
            if data:
                # 批量插入数据
                insert_result = self.vector_router.insert(
                    collection_name=collection_name,
                    data=data,
                    db_type=db_type
                )
                logger.info(f"Inserted {len(data)} items into {collection_name}")
                return True
            
            return False
        except Exception as e:
            logger.error(f"Failed to insert into knowledge base: error={str(e)}", exc_info=True)
            return False

    def calculate_similarity(self, text1: str, text2: str) -> float:
        """计算两个文本的余弦相似度（基于embedding）"""
        try:
            emb1 = get_embedding(text1)
            emb2 = get_embedding(text2)
            if not emb1 or not emb2:
                return 0.0

            # 转换为2D数组计算余弦相似度
            similarity = cosine_similarity(
                np.array(emb1).reshape(1, -1),
                np.array(emb2).reshape(1, -1)
            )[0][0]
            return round(similarity, 4)
        except Exception as e:
            logger.error(f"相似度计算失败: text1={text1}, text2={text2}, error={str(e)}")
            return 0.0

    def generate_answer(
            self,
            query: str,
            retrieved_texts: List[Tuple[str, float]],
            similarity_threshold: float = 0.6,
            use_model: bool = True
    ) -> Optional[str]:
        """基于检索结果生成答案（强制使用模型生成）"""
        # 最明显的打印，确保能看到
        print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
        print("!!!!!!!!! TextQATools.generate_answer 开始执行 !!!!!!!!!!!")
        print(f"!!!!!!!!! 查询文本: {query}")
        print(f"!!!!!!!!! 检索结果数量: {len(retrieved_texts)}")
        print(f"!!!!!!!!! 相似度阈值: {similarity_threshold}")
        print(f"!!!!!!!!! model_client类型: {type(self.model_client)}")
        print(f"!!!!!!!!! model_client是否存在: {bool(self.model_client)}")
        print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
        
        try:
            logger.info(f"[Answer Generation] 开始生成答案，查询: {query[:50]}..., 相似度阈值: {similarity_threshold}")
            
            # 过滤低于阈值的结果
            logger.info(f"[Answer Generation] 过滤检索结果，原始数量: {len(retrieved_texts)}, 阈值: {similarity_threshold}")
            valid_texts = [text for text, score in retrieved_texts if score >= similarity_threshold]
            logger.info(f"[Answer Generation] 有效检索结果数量: {len(valid_texts)}")
            
            if not valid_texts:
                logger.info(f"[Answer Generation] 无有效检索结果，将使用直接回答模式")
                # 即使没有检索结果，也使用模型直接回答
                valid_texts = ["请基于您的知识直接回答用户问题"]
            else:
                # 记录有效文本的统计信息
                for i, text in enumerate(valid_texts[:3]):  # 只记录前3条
                    logger.info(f"[Answer Generation] 使用检索结果 {i+1}: 文本长度={len(text)} 字符")

            # 强制使用模型生成答案，不再使用降级方案
            if self.model_client:
                logger.info(f"[Answer Generation] 使用模型客户端生成答案")
                try:
                    answer = self._generate_with_model(query, valid_texts)
                    logger.info(f"[Answer Generation] 模型答案生成成功，长度: {len(answer)} 字符")
                    return answer
                except Exception as e:
                    logger.error(f"[Answer Generation] 模型生成失败，尝试直接使用模型: {str(e)}")
                    # 尝试直接调用模型客户端，不经过中间层
                    prompt = f"问题: {query}\n\n请提供详细的回答。"
                    print("!!!!!!!!! 准备直接调用model_client.generate")
                    print(f"!!!!!!!!! 提示词: {prompt}")
                    print(f"!!!!!!!!! model_client.generate是否是协程: {asyncio.iscoroutinefunction(self.model_client.generate)}")
                    logger.info(f"[Answer Generation] 构建直接提示词，长度: {len(prompt)} 字符")
                    
                    try:
                        import asyncio
                        print("!!!!!!!!! 开始调用asyncio.run")
                        response = asyncio.run(self.model_client.generate(prompt))
                        print("!!!!!!!!! asyncio.run调用成功，响应: ", response)
                    except Exception as run_e:
                        print(f"!!!!!!!!! asyncio.run调用失败: {type(run_e).__name__}: {str(run_e)}")
                        import traceback
                        print(f"!!!!!!!!! run错误堆栈:\n{traceback.format_exc()}")
                        # 尝试直接调用（非异步方式）
                        print("!!!!!!!!! 尝试直接调用model_client.generate（非异步方式）")
                        response = self.model_client.generate(prompt)
                        print("!!!!!!!!! 非异步调用成功，响应:", response)
                    
                    logger.info(f"[Answer Generation] 直接模型调用完成")
                    return response
            else:
                logger.error("[Answer Generation] 模型客户端未初始化")
                # 如果模型客户端未初始化，返回一个明确的错误信息
                return "错误：AI模型未正确初始化，请检查系统配置"
                
        except Exception as e:
            error_type = type(e).__name__
            print(f"!!!!!!!!! 捕获到generate_answer总错误: {error_type}: {str(e)}")
            import traceback
            print(f"!!!!!!!!! generate_answer错误堆栈:\n{traceback.format_exc()}")
            logger.error(f"[Answer Generation] 答案生成失败: query={query}, error={str(e)}", exc_info=True)
            # 不再使用降级方案，返回错误信息
            error_msg = f"生成回答时出错: {str(e)}"
            logger.info(f"[Answer Generation] 返回错误信息: {error_msg}")
            return error_msg
    
    def _generate_with_model(self, query: str, contexts: List[str]) -> str:
        """使用模型生成答案"""
        print("!!!!!!!!! _generate_with_model 开始执行")
        print(f"!!!!!!!!! 查询: {query}")
        print(f"!!!!!!!!! 上下文数量: {len(contexts)}")
        
        try:
            # 注意：这里使用同步方式调用异步模型客户端
            # 在实际部署时，需要确保正确处理异步
            import asyncio
            
            # 构建问答提示词
            print("!!!!!!!!! 准备调用build_qa_prompt")
            prompt = self.model_client.build_qa_prompt(query, contexts)
            print(f"!!!!!!!!! 提示词构建完成，长度: {len(prompt)}")
            
            # 调用模型生成（框架模式下返回模拟结果）
            # 实际使用时需要正确处理异步
            print("!!!!!!!!! 准备调用asyncio.run(self.model_client.generate)")
            try:
                answer = asyncio.run(self.model_client.generate(prompt))
                print(f"!!!!!!!!! 异步调用成功，答案长度: {len(answer) if answer else 0}")
            except Exception as gen_e:
                print(f"!!!!!!!!! 异步调用失败: {type(gen_e).__name__}: {str(gen_e)}")
                import traceback
                print(f"!!!!!!!!! 异步调用错误堆栈:\n{traceback.format_exc()}")
                # 尝试直接调用
                print("!!!!!!!!! 尝试直接调用（非异步方式）")
                answer = self.model_client.generate(prompt)
                print(f"!!!!!!!!! 非异步调用成功，答案长度: {len(answer) if answer else 0}")
            
            if answer and len(answer.strip()) > 0:
                logger.info(f"模型生成成功: query={query}, answer_length={len(answer)}")
                return answer
            else:
                logger.warning(f"模型生成结果为空，回退到降级方案: query={query}")
                return self._generate_fallback(query, contexts)
                
        except Exception as e:
            print(f"!!!!!!!!! _generate_with_model 错误: {type(e).__name__}: {str(e)}")
            import traceback
            print(f"!!!!!!!!! _generate_with_model错误堆栈:\n{traceback.format_exc()}")
            logger.warning(f"模型生成失败，回退到降级方案: query={query}, error={str(e)}")
            return self._generate_fallback(query, contexts)
    
    def _generate_fallback(self, query: str, contexts: List[str]) -> str:
        """降级生成方案"""
        try:
            # 使用模型客户端的降级策略
            if hasattr(self.model_client, 'generate'):
                # FallbackClient 有专门的降级实现
                return self.model_client.generate(query, contexts=contexts)
            else:
                # 简单拼接作为最后手段
                return f"根据相关信息：{'；'.join(contexts)}。总结回答：{query}"
        except Exception as e:
            logger.error(f"降级生成失败: query={query}, error={str(e)}")
            return f"抱歉，关于'{query}'的信息处理遇到问题，请稍后重试。"

    async def generate_answer_async(
        self, 
        query: str, 
        retrieved_texts: List[Tuple[str, float]], 
        similarity_threshold: float = 0.6,
        use_model: bool = True
    ) -> Optional[str]:
        """异步生成答案（用于高并发场景）"""
        try:
            # 过滤低于阈值的结果
            valid_texts = [text for text, score in retrieved_texts if score >= similarity_threshold]
            if not valid_texts:
                logger.info(f"无有效检索结果，query={query}")
                return None

            if use_model and self.model_client:
                # 异步模型生成
                prompt = self.model_client.build_qa_prompt(query, valid_texts)
                answer = await self.model_client.generate(prompt)
                
                if answer and len(answer.strip()) > 0:
                    logger.info(f"异步模型生成成功: query={query}")
                    return answer
            
            # 回退到降级方案
            return self._generate_fallback(query, valid_texts)
            
        except Exception as e:
            logger.error(f"异步答案生成失败: query={query}, error={str(e)}")
            return self._generate_fallback(query, valid_texts)


# 单例工具实例（全局复用）
text_qa_tools = TextQATools()

# 导出模型客户端供其他模块使用
__all__ = ['TextQATools', 'text_qa_tools']