from langchain.llms.base import LLM
from langchain.chains import RetrievalQA
from transformers import AutoTokenizer, AutoModel
import torch
from langchain_community.vectorstores import FAISS
from typing import Any
from pydantic import Field
from langchain_huggingface import HuggingFaceEmbeddings

device = "cuda" if torch.cuda.is_available() else "cpu"

class ChatGLMWrapper(LLM):
    tokenizer: Any = Field(default=None, exclude=True)
    model: Any = Field(default=None, exclude=True)

    def __init__(self, model_path=r"E:\projects\PycharmProjects\cnsoft\Intelligent Education Agent\Tools\retrieval_qa_chatglm\models\chatglm3-6b"):
        super().__init__()
        self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
        self.model = AutoModel.from_pretrained(model_path, trust_remote_code=True).half().to(device)
        self.model.eval()

    @property
    def _llm_type(self) -> str:
        return "chatglm"

    def _call(self, prompt: str, stop=None) -> str:
        print("calling chatglm")
        response, _ = self.model.chat(self.tokenizer, prompt, history=[])
        return response

# 初始化嵌入模型与向量库
embedding_model = HuggingFaceEmbeddings(
    model_name=r"E:\projects\PycharmProjects\cnsoft\Intelligent Education Agent\Tools\retrieval_qa_chatglm\Knowledge_Base_Constructing\local_model\BAAI\bge-small-zh-v1.5"
)

vector_store = FAISS.load_local(
    r"E:\projects\PycharmProjects\cnsoft\Intelligent Education Agent\Tools\retrieval_qa_chatglm\Knowledge_Base_Constructing\output\tfjs_knowledge_faiss",
    embeddings=embedding_model,
    allow_dangerous_deserialization=True
)

# 构建问答链
llm = ChatGLMWrapper()
qa_chain = RetrievalQA.from_chain_type(
    llm=llm,
    retriever=vector_store.as_retriever(search_type="similarity", k=3),
    return_source_documents=True
)
