import os
from typing import List, Dict, Any, Optional
import numpy as np
from sentence_transformers import SentenceTransformer
import faiss
import json
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

class HybridQASystem:
    def __init__(
        self, 
        vector_db_path: str, 
        embedding_model_name: str = "shibing624/text2vec-base-chinese",
        llm_model_name: str = "deepseek-ai/deepseek-llm-32b-chat",
        similarity_threshold: float = 0.75
    ):
        """
        初始化混合问答系统
        
        Args:
            vector_db_path: 向量库路径
            embedding_model_name: 嵌入模型名称
            llm_model_name: 大语言模型名称
            similarity_threshold: 相似度阈值，超过此阈值认为匹配
        """
        self.similarity_threshold = similarity_threshold
        
        # 初始化嵌入模型
        self.embedding_model = SentenceTransformer(embedding_model_name)
        
        # 加载向量库
        self._load_vector_database(vector_db_path)
        
        # 初始化LLM模型
        self.tokenizer = AutoTokenizer.from_pretrained(llm_model_name)
        self.llm = AutoModelForCausalLM.from_pretrained(
            llm_model_name,
            torch_dtype=torch.bfloat16,
            device_map="auto"
        )
    
    def _load_vector_database(self, vector_db_path: str):
        """加载向量库和对应的元数据"""
        # 加载索引
        self.index = faiss.read_index(os.path.join(vector_db_path, "faiss_index.bin"))
        
        # 加载元数据
        with open(os.path.join(vector_db_path, "metadata.json"), "r", encoding="utf-8") as f:
            self.metadata = json.load(f)
    
    def _query_vector_db(self, query: str, top_k: int = 3) -> List[Dict[str, Any]]:
        """查询向量库获取最相关的文档"""
        # 将查询转换为嵌入向量
        query_vector = self.embedding_model.encode([query])[0]
        query_vector = np.array([query_vector]).astype('float32')
        
        # 搜索最相似的文档
        distances, indices = self.index.search(query_vector, top_k)
        
        results = []
        for i, idx in enumerate(indices[0]):
            if idx != -1 and distances[0][i] < 1 - self.similarity_threshold:  # Faiss返回的是距离，而不是相似度
                results.append({
                    "content": self.metadata[idx]["content"],
                    "similarity": 1 - distances[0][i],  # 转换为相似度
                    "metadata": self.metadata[idx].get("metadata", {})
                })
        
        return results
    
    def _generate_from_llm(self, query: str) -> str:
        """直接使用大语言模型生成回答"""
        inputs = self.tokenizer(f"<|im_start|>user\n{query}<|im_end|>\n<|im_start|>assistant\n", return_tensors="pt").to(self.llm.device)
        
        # 生成回答
        with torch.no_grad():
            outputs = self.llm.generate(
                **inputs,
                max_new_tokens=1024,
                temperature=0.7,
                top_p=0.9,
                repetition_penalty=1.1,
            )
        
        response = self.tokenizer.decode(outputs[0][inputs.input_ids.shape[1]:], skip_special_tokens=True)
        return response.strip()
    
    def _get_response_from_vector_db(self, query: str, results: List[Dict[str, Any]]) -> Optional[str]:
        """从向量库检索结果生成回答"""
        if not results:
            return None
        
        # 根据检索到的内容生成回答
        contents = [f"相关信息 {i+1}：{result['content']}" for i, result in enumerate(results)]
        context = "\n".join(contents)
        
        prompt = f"""<|im_start|>user
基于以下信息回答问题，如果信息不足以回答问题，请直接回复"我没有足够的信息回答这个问题"。

问题：{query}

{context}
<|im_end|>

<|im_start|>assistant
"""
        
        inputs = self.tokenizer(prompt, return_tensors="pt").to(self.llm.device)
        
        with torch.no_grad():
            outputs = self.llm.generate(
                **inputs,
                max_new_tokens=1024,
                temperature=0.3,  # 降低温度提高确定性
                top_p=0.9,
                repetition_penalty=1.1,
            )
        
        response = self.tokenizer.decode(outputs[0][inputs.input_ids.shape[1]:], skip_special_tokens=True)
        
        # 检查回答是否表明信息不足
        if "我没有足够的信息回答这个问题" in response:
            return None
            
        return response.strip()
    
    def answer(self, query: str) -> Dict[str, Any]:
        """
        回答用户问题
        
        Args:
            query: 用户问题
        
        Returns:
            Dict包含回答以及来源信息
        """
        # 1. 首先查询向量库
        vector_results = self._query_vector_db(query)
        
        # 2. 尝试从向量库结果生成回答
        if vector_results:
            # 检查是否有足够高的相似度
            high_confidence = any(result["similarity"] > self.similarity_threshold for result in vector_results)
            
            if high_confidence:
                vector_response = self._get_response_from_vector_db(query, vector_results)
                if vector_response:
                    return {
                        "answer": vector_response,
                        "source": "vector_db",
                        "references": [
                            {"content": result["content"], "similarity": result["similarity"]} 
                            for result in vector_results
                        ]
                    }
        
        # 3. 如果向量库匹配失败，使用DeepSeek模型直接回答
        llm_response = self._generate_from_llm(query)
        return {
            "answer": llm_response,
            "source": "llm",
            "references": []
        }


# 使用示例
if __name__ == "__main__":
    # 初始化系统
    qa_system = HybridQASystem(
        vector_db_path="./vector_db",
        similarity_threshold=0.75
    )
    
    # 测试问题
    questions = [
        "什么是向量数据库?",
        "如何提高深度学习模型的性能?",
        "今天天气怎么样?",
    ]
    
    for question in questions:
        print(f"\n问题: {question}")
        result = qa_system.answer(question)
        print(f"回答来源: {result['source']}")
        print(f"回答: {result['answer']}")
        if result['references']:
            print("\n参考信息:")
            for ref in result['references']:
                print(f"- 相似度: {ref['similarity']:.2f}")
                print(f"  内容: {ref['content'][:100]}...")