from transformers import AutoTokenizer, AutoModel
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
import torch

class QASystem:
    def __init__(self, model_path, vector_store):
        self.tokenizer = AutoTokenizer.from_pretrained(
            model_path,
            trust_remote_code=True,
            local_files_only=True
        )
        
        self.model = AutoModel.from_pretrained(
            model_path,
            trust_remote_code=True,
            local_files_only=True,
            torch_dtype=torch.float16,
            device_map="auto",
            low_cpu_mem_usage=True
        )
        
        self.vector_store = vector_store

    def answer_question(self, question: str):
        # 检索相关文档
        search_results = self.vector_store.similarity_search(question, k=3)
        context = "\n".join([doc.page_content for doc in search_results])
        
        # 生成回答
        prompt = f"已知信息：{context}\n\n问题：{question}\n\n回答："
        response, _ = self.model.chat(self.tokenizer, prompt, history=[])
        return response

# 初始化向量数据库
def create_vector_store():
    embeddings = HuggingFaceEmbeddings(
        model_name="shibing624/text2vec-base-chinese",
        model_kwargs={'device': 'cuda'},  # 使用 GPU
        encode_kwargs={'device': 'cuda:0', 'batch_size': 8},
        cache_folder="D:/demo/gitee/python/models/models/text2vec-base-chinese"
    )
    
    # 假设你已经有文本数据
    texts = ["示例文本1", "示例文本2", "示例文本3"]
    vector_store = FAISS.from_texts(texts, embedding=embeddings)
    return vector_store

# 使用示例
# model_path = "D:/demo/gitee/python/models/chatglm2-6b-int4"
# vector_store = create_vector_store()
# chat_system = ChatSystem(model_path, vector_store)

# question = "你好，你叫什么名字？"
# answer = chat_system.answer_question(question)
# print(answer)