from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModel
from utils.logger import setup_logger

log = setup_logger()

class QASystem:

    def __init__(self, model_path, vector_store):
        log.info(f"开始加载模型: {model_path}")
        try:
            log.info("开始初始化LLM")
            self.llm = self._init_llm(model_path)
            log.info("LLM初始化完成")

            self.vector_store = vector_store
            log.info("开始创建QA链")
            self.qa_chain = self._create_qa_chain()
            log.info("QA链创建完成")
        except Exception as e:
            log.error(f"模型初始化失败: {e}", exc_info=True)
            raise
    
    # 初始化LLM，会下载模型
    def _init_llm(self, model_path):
        try:
            from transformers import AutoTokenizer, AutoModelForCausalLM
            from langchain_community.llms import HuggingFacePipeline
            import torch
            import os
            
            # 完全禁用网络访问
            log.info("设置环境变量")
            os.environ['HF_DATASETS_OFFLINE'] = '1'
            os.environ['TRANSFORMERS_OFFLINE'] = '1'
            os.environ['HF_HUB_OFFLINE'] = '1'
            os.environ['CURL_CA_BUNDLE'] = ''
        
            log.info("验证模型文件")
            model_files = ["config.json", "configuration_chatglm.py", "modeling_chatglm.py", "tokenizer_config.json", "tokenizer.model"]
            for file in model_files:
                file_path = os.path.join(model_path, file)
                if not os.path.exists(file_path):
                    raise ValueError(f"缺少模型文件: {file}")
            if os.path.getsize(file_path) == 0:
                raise ValueError(f"模型文件损坏或不完整: {file}")
            
            log.info("加载模型配置")
            load_config = {
                "trust_remote_code": True,
                "local_files_only": True,
                "low_cpu_mem_usage": True,
                "torch_dtype": torch.float32,
                "device_map": "auto",  # 自动处理设备映射
            }
            
            log.info("加载tokenizer")
            tokenizer = AutoTokenizer.from_pretrained(
                model_path,
                **load_config
            )
            log.info("tokenizer加载完成")

            log.info("加载模型")
            try:
                model = AutoModel.from_pretrained(
                    model_path,
                    **load_config,
                ).to('cpu') # 显式指定使用 CPU
                log.info("模型加载完成")
            except Exception as e:
                log.error(f"模型加载失败: {e}", exc_info=True)
                raise
       
            log.info("创建pipeline")
            from transformers import pipeline
            pipe = pipeline(
                "text-generation",
                model=model,
                tokenizer=tokenizer,
                max_length=2048,
                temperature=0.7,
                trust_remote_code=True,
                device='cpu'  # 指定使用 CPU
            )
            log.info("pipeline创建完成")
            
            llm = HuggingFacePipeline(pipeline=pipe)
            return llm
        except Exception as e:
            print(f"模型LLM加载失败: {e}")
            raise

    
    def _create_qa_chain(self):
        prompt_template = """基于以下已知信息，简洁和专业的来回答问题。如果无法从中得到答案，请说 "抱歉，我无法从知识库中找到相关信息。"

已知信息：{context}

问题：{question}

回答："""
        
        PROMPT = PromptTemplate(
            template=prompt_template,
            input_variables=["context", "question"]
        )
        
        chain = RetrievalQA.from_chain_type(
            llm=self.llm,
            chain_type="stuff",
            retriever=self.vector_store.as_retriever(search_kwargs={"k": 3}),
            return_source_documents=True,
            chain_type_kwargs={"prompt": PROMPT}
        )
        return chain
    
    def answer_question(self, question: str):
        try:
            result = self.qa_chain({"query": question})
            return {
                "answer": result["result"],
                "sources": [doc.metadata for doc in result["source_documents"]]
            }
        except Exception as e:
            return {"error": str(e)}