import logging
from typing import List
from sentence_transformers import SentenceTransformer
import chromadb
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM  # 改为Seq2Seq模型
import torch

class SimpleRAG:
    def __init__(self, embedding_model_path: str = None):
        """
        初始化RAG系统
        :param embedding_model_path: 本地嵌入模型路径（可选）
        """
        # 初始化嵌入模型
        if embedding_model_path:
            self.embedding_model = SentenceTransformer(embedding_model_path)
        else:
            self.embedding_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")

        # 初始化ChromaDB
        self.client = chromadb.PersistentClient(path="./chroma_db")
        self.collection = self.client.get_or_create_collection("knowledge_base")

        # 初始化ChatLM-mini-Chinese（T5架构）
        self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
        model_path = "D:/ideaSpace/MyPython/models/ChatLM-mini-Chinese"
        self.tokenizer = AutoTokenizer.from_pretrained(model_path)
        self.llm_model = AutoModelForSeq2SeqLM.from_pretrained(model_path).to(self.device)  # 关键修改

    def add_documents(self, documents: List[str]):
        """添加文档到知识库"""
        doc_ids = [f"doc_{i}" for i in range(len(documents))]
        embeddings = self.embedding_model.encode(documents).tolist()
        self.collection.add(ids=doc_ids, embeddings=embeddings, documents=documents)
        logging.info(f"成功添加 {len(documents)} 个文档到知识库")

    def query(self, question: str, top_k: int = 3) -> str:
        """查询RAG系统"""
        # 1. 检索相关文档
        query_embedding = self.embedding_model.encode([question]).tolist()
        results = self.collection.query(query_embeddings=query_embedding, n_results=top_k)
        context = "\n\n".join(results["documents"][0])

        # 2. 构建T5格式的输入
        input_text = f"根据上下文回答问题：{context}\n问题：{question}"
        input_ids = self.tokenizer.encode(input_text, return_tensors="pt").to(self.device)

        # 3. 生成回答（T5使用generate()）
        outputs = self.llm_model.generate(
            input_ids,
            max_new_tokens=256,
            eos_token_id=self.tokenizer.eos_token_id,
            pad_token_id=self.tokenizer.pad_token_id
        )
        return self.tokenizer.decode(outputs[0], skip_special_tokens=True)

# 使用示例
if __name__ == "__main__":
    rag = SimpleRAG(embedding_model_path="D:/ideaSpace/MyPython/models/all-MiniLM-L6-v2")

    documents = [
        "人工智能(AI)是计算机科学的一个分支，致力于开发能模拟人类智能的系统。",
        "机器学习是AI的核心技术之一，它使计算机能够从数据中学习和改进。",
        "深度学习是机器学习的一个子领域，使用多层神经网络处理复杂问题。"
    ]
    rag.add_documents(documents)

    question = "机器学习和深度学习有什么区别？"
    print(f"问题: {question}")
    print(f"回答: {rag.query(question)}")