"""
使用LangChain链式调用的问答系统模块
实现基于LangChain Expression Language (LCEL) 的知识问答功能
支持Redis和MySQL混合存储对话历史
"""
from typing import Dict, List, Any, Optional
import os
import json
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough, RunnableParallel
from langchain_core.output_parsers import StrOutputParser
from langchain_community.vectorstores import Milvus
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_community.llms import Tongyi
from elasticsearch import Elasticsearch
from app.core.config import settings
from app.core.cache import cache_manager

class QAChainSystem:
    """基于LangChain链式调用的问答系统类"""
    
    def __init__(self):
        """初始化问答系统"""
        # 初始化Qwen API
        self.llm = Tongyi(
            model_name="qwen-plus",
            dashscope_api_key=settings.QWEN_API_KEY
        )
        
        # 初始化Elasticsearch客户端 (适配8.x版本)
        self.es_client = Elasticsearch(
            hosts=[f"http://{settings.ELASTICSEARCH_HOST}:{settings.ELASTICSEARCH_PORT}"],
            verify_certs=False,
            timeout=30
        )
        
        # 初始化Milvus连接
        self.milvus_host = settings.MILVUS_HOST
        self.milvus_port = settings.MILVUS_PORT
        
        # 初始化嵌入模型，使用BAAI/bge-large-zh-v1.5中文嵌入模型
        # 这个模型专门针对中文进行了优化，在中文文本处理方面表现更好
        model_path = "./models/bge-large-zh"
        if os.path.exists(model_path):
            # 如果本地模型存在，则使用本地模型
            # 首先修复模型文件名问题
            pooling_config_path = os.path.join(model_path, "1_Pooling", "config .json")
            fixed_pooling_config_path = os.path.join(model_path, "1_Pooling", "config.json")
            if os.path.exists(pooling_config_path) and not os.path.exists(fixed_pooling_config_path):
                os.rename(pooling_config_path, fixed_pooling_config_path)
                print("修复了模型配置文件名中的空格问题")
            
            print(f"使用本地模型: {model_path}")
            self.embeddings = HuggingFaceEmbeddings(
                model_name=model_path
            )
        else:
            # 否则使用在线模型（需要网络下载）
            print("使用在线模型: BAAI/bge-large-zh-v1.5")
            self.embeddings = HuggingFaceEmbeddings(
                model_name="BAAI/bge-large-zh-v1.5"
            )
        
        # 创建问答链
        self.qa_chain = self._create_qa_chain()
    
    def _create_qa_chain(self):
        """
        创建基于LCEL的问答链
        
        Returns:
            问答链
        """
        # 定义提示词模板
        template = """
        你是一个智能教育助手。请根据以下背景知识和对话历史回答问题。
        如果背景知识中没有相关信息，请说明无法根据提供的资料回答该问题。
        
        背景知识:
        {context}
        
        对话历史:
        {history}
        
        问题: {question}
        
        请提供准确、简洁的回答:
        """
        
        prompt = ChatPromptTemplate.from_template(template)
        
        # 创建完整的问答链
        chain = (
            RunnableParallel({
                "context": self._retrieve_context,  # 检索相关文档
                "question": RunnablePassthrough(),  # 传递原始问题
                "history": self._get_conversation_history  # 获取对话历史
            })
            | prompt
            | self.llm
            | StrOutputParser()
        )
        
        return chain
    
    def _retrieve_context(self, question: str) -> str:
        """
        检索相关上下文信息
        
        Args:
            question: 用户问题
            
        Returns:
            相关上下文信息
        """
        # 1. 先进行Elasticsearch关键词检索
        es_results = self._elasticsearch_retrieval(question)
        
        # 2. 再进行Milvus语义检索
        milvus_results = self._milvus_retrieval(question)
        
        # 3. 合并和排序结果
        # 给Elasticsearch结果更高的权重（因为是关键词匹配）
        for result in es_results:
            result["score"] = result["score"] * 1.2  # 提高关键词匹配的权重
            
        # 合并结果
        combined_results = es_results + milvus_results
        
        # 根据分数排序
        combined_results.sort(key=lambda x: x["score"], reverse=True)
        
        # 去重（基于内容相似度）
        unique_results = []
        for result in combined_results:
            # 检查是否已存在相似内容
            is_duplicate = False
            for existing in unique_results:
                if self._is_similar_content(result["content"], existing["content"]):
                    is_duplicate = True
                    break
            
            if not is_duplicate and len(unique_results) < 3:  # 最多返回3个结果
                unique_results.append(result)
        
        # 格式化上下文
        context = "\n".join([
            f"来源: {r['source']}\n内容: {r['content']}" 
            for r in unique_results
        ])
        
        return context
    
    def _get_conversation_history(self, user_id: str) -> str:
        """
        获取用户对话历史
        优先从缓存获取，如果没有则从数据库获取
        
        Args:
            user_id: 用户ID
            
        Returns:
            格式化的对话历史
        """
        try:
            # 从缓存获取对话历史
            history = cache_manager.load_conversation_memory(user_id)
            
            if not history:
                # 如果缓存中没有，则返回空历史
                return ""
            
            # 格式化历史记录
            formatted_history = []
            for item in history[-3:]:  # 只取最近3条
                formatted_history.append(f"用户: {item['question']}")
                formatted_history.append(f"助手: {item['answer']}")
            
            return "\n".join(formatted_history)
        except Exception as e:
            print(f"获取对话历史时出错: {e}")
            return ""
    
    def _elasticsearch_retrieval(self, query: str, index_name: str = "knowledge_base") -> List[Dict[str, Any]]:
        """
        Elasticsearch关键词检索
        
        Args:
            query: 查询语句
            index_name: Elasticsearch索引名称
            
        Returns:
            检索结果列表
        """
        try:
            # 执行Elasticsearch搜索
            search_body = {
                "query": {
                    "multi_match": {
                        "query": query,
                        "fields": ["content", "title"],
                        "type": "best_fields"
                    }
                },
                "size": 3
            }
            
            response = self.es_client.search(
                index=index_name,
                body=search_body
            )
            
            # 解析结果
            results = []
            for hit in response['hits']['hits']:
                results.append({
                    "content": hit['_source'].get('content', ''),
                    "source": hit['_source'].get('source', ''),
                    "score": hit['_score'],
                    "id": hit['_id']
                })
            
            return results
        except Exception as e:
            print(f"Elasticsearch检索出错: {e}")
            return []
    
    def _milvus_retrieval(self, query: str, collection_name: str = "knowledge_base") -> List[Dict[str, Any]]:
        """
        Milvus语义检索
        
        Args:
            query: 查询语句
            collection_name: Milvus集合名称
            
        Returns:
            检索结果列表
        """
        try:
            # 连接到Milvus
            vector_store = Milvus(
                embedding_function=self.embeddings,
                collection_name=collection_name,
                host=self.milvus_host,
                port=self.milvus_port
            )
            
            # 执行相似性搜索
            docs = vector_store.similarity_search_with_score(query, k=3)
            
            # 解析结果
            results = []
            for doc, score in docs:
                results.append({
                    "content": doc.page_content,
                    "source": ", ".join([f"{k}:{v}" for k, v in doc.metadata.items()]) if doc.metadata else "",
                    "score": float(score)
                })
            
            return results
        except Exception as e:
            print(f"Milvus检索出错: {e}")
            return []
    
    def _is_similar_content(self, content1: str, content2: str, threshold: float = 0.8) -> bool:
        """
        判断两个内容是否相似
        
        Args:
            content1: 内容1
            content2: 内容2
            threshold: 相似度阈值
            
        Returns:
            是否相似
        """
        # 简单实现：基于字符重叠率
        set1 = set(content1)
        set2 = set(content2)
        intersection = set1.intersection(set2)
        union = set1.union(set2)
        
        if len(union) == 0:
            return False
            
        similarity = len(intersection) / len(union)
        return similarity > threshold
    
    def dynamic_chunking(self, document: str) -> List[str]:
        """
        动态分块策略
        
        Args:
            document: 原始文档内容
            
        Returns:
            分块后的文档片段列表
        """
        # 实现基本的语义分块逻辑
        # 1. 按段落分割
        paragraphs = document.split('\n\n')
        
        # 2. 对较长的段落进行二次分块
        chunks = []
        chunk_size = 500  # 每块最大字符数
        current_chunk = ""
        
        for paragraph in paragraphs:
            # 如果当前块加上新段落不超过大小限制
            if len(current_chunk) + len(paragraph) < chunk_size:
                current_chunk += paragraph + "\n\n"
            else:
                # 否则保存当前块，并开始新的块
                if current_chunk:  # 确保不是空块
                    chunks.append(current_chunk)
                current_chunk = paragraph + "\n\n"
        
        # 添加最后一个块
        if current_chunk:
            chunks.append(current_chunk)
            
        return chunks
    
    def answer_question(self, question: str, user_id: str) -> Dict[str, Any]:
        """
        回答用户问题（使用链式调用）
        同时保存对话历史到缓存和数据库
        
        Args:
            question: 用户问题
            user_id: 用户ID
            
        Returns:
            回答结果
        """
        try:
            # 使用链式调用生成回答
            answer = self.qa_chain.invoke({
                "question": question,
                "user_id": user_id
            })
            
            # 检索相关上下文用于返回sources
            context = self._retrieve_context(question)
            
            # 准备对话记录
            conversation_record = {
                "question": question,
                "answer": answer,
                "sources": self._extract_sources(context)
            }
            
            # 获取现有的对话历史
            conversation_history = cache_manager.load_conversation_memory(user_id) or []
            # 添加新对话
            conversation_history.append(conversation_record)
            # 保存到缓存（同时会同步到数据库）
            cache_manager.save_conversation_memory(user_id, conversation_history)
            
            return {
                "question": question,
                "answer": answer,
                "sources": self._extract_sources(context),
                "confidence": 0.9  # 简化置信度
            }
        except Exception as e:
            print(f"调用问答链时出错: {e}")
            return {
                "question": question,
                "answer": "抱歉，暂时无法回答您的问题。",
                "sources": [],
                "confidence": 0.0
            }
    
    def _extract_sources(self, context: str) -> List[str]:
        """
        从上下文中提取来源信息
        
        Args:
            context: 上下文信息
            
        Returns:
            来源列表
        """
        sources = []
        for line in context.split('\n'):
            if line.startswith('来源:'):
                source = line.replace('来源:', '').strip()
                if source and source not in sources:
                    sources.append(source)
        return sources

# 全局问答链系统实例
qa_chain_system = QAChainSystem()