from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModel

class QASystem:
    def __init__(self, model_path, vector_store):
        self.llm = self._init_llm(model_path)
        self.vector_store = vector_store
        self.qa_chain = self._create_qa_chain()
    
    # 初始化LLM，会下载模型
    def _init_llm(self, model_path):
        from transformers import AutoTokenizer, AutoModelForCausalLM
        from langchain_community.llms import HuggingFacePipeline
        import torch
        import os
        
        # 完全禁用网络访问
        os.environ['HF_DATASETS_OFFLINE'] = '1'
        os.environ['TRANSFORMERS_OFFLINE'] = '1'
        os.environ['HF_HUB_OFFLINE'] = '1'
        os.environ['CURL_CA_BUNDLE'] = ''
        
        try:
            # 如果是chatglm3-6b（6G显存），预先验证所有分片文件是否存在
            # expected_shards = [f"model-0000{i}-of-00007.safetensors" for i in range(1, 8)]
            # for shard in expected_shards:
            #     shard_path = os.path.join(model_path, shard)
            #     if not os.path.exists(shard_path):
            #         raise ValueError(f"缺少模型分片文件: {shard}")
            #     # 验证文件大小不为0
            #     if os.path.getsize(shard_path) == 0:
            #         raise ValueError(f"模型分片文件损坏或不完整: {shard}")
                
            model_files = ["config.json", "configuration_chatglm.py", "modeling_chatglm.py", "tokenizer_config.json", "tokenizer.model"]
            for file in model_files:
                file_path = os.path.join(model_path, file)
                if not os.path.exists(file_path):
                    raise ValueError(f"缺少模型文件: {file}")
            if os.path.getsize(file_path) == 0:
                raise ValueError(f"模型文件损坏或不完整: {file}")
            
            # 如果是chatglm3-6b（6G显存），设置更严格的加载参数
            # load_config = {
            #     "trust_remote_code": True,
            #     "local_files_only": True,
            #     "use_auth_token": None,
            #     "cache_dir": None,
            #     "force_download": False,
            #     "resume_download": False,
            # }

            load_config = {
                "trust_remote_code": True,
                "local_files_only": True,
            }
            
            tokenizer = AutoTokenizer.from_pretrained(
                model_path,
                **load_config
            )
       
            # 如果是chatglm3-6b（6G显存），采用AutoModelForCausalLM
            # model = AutoModelForCausalLM.from_pretrained(
            #     model_path,
            #     **load_config,
            #     torch_dtype=torch.float16,
            #     device_map="auto",
            #     low_cpu_mem_usage=True,
            # )

            # 将 AutoModelForCausalLM 改为 AutoModel，因为 ChatGLM2 使用的是自定义模型架构
            model = AutoModel.from_pretrained(  # 使用 AutoModel 而不是 AutoModelForCausalLM
                model_path,
                **load_config,
                torch_dtype=torch.float16,
                device_map="auto",
                low_cpu_mem_usage=True,
            )
       
            from transformers import pipeline
            pipe = pipeline(
                "text-generation",
                model=model,
                tokenizer=tokenizer,
                max_length=2048,
                temperature=0.7,
                trust_remote_code=True,
                device_map="auto",
            )
            
            llm = HuggingFacePipeline(pipeline=pipe)
            return llm
        except Exception as e:
            print(f"模型加载失败: {e}")
            raise

    
    def _create_qa_chain(self):
        prompt_template = """基于以下已知信息，简洁和专业的来回答问题。如果无法从中得到答案，请说 "抱歉，我无法从知识库中找到相关信息。"

已知信息：{context}

问题：{question}

回答："""
        
        PROMPT = PromptTemplate(
            template=prompt_template,
            input_variables=["context", "question"]
        )
        
        chain = RetrievalQA.from_chain_type(
            llm=self.llm,
            chain_type="stuff",
            retriever=self.vector_store.as_retriever(search_kwargs={"k": 3}),
            return_source_documents=True,
            chain_type_kwargs={"prompt": PROMPT}
        )
        return chain
    
    def answer_question(self, question: str):
        try:
            result = self.qa_chain({"query": question})
            return {
                "answer": result["result"],
                "sources": [doc.metadata for doc in result["source_documents"]]
            }
        except Exception as e:
            return {"error": str(e)}