from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig

class QASystem:
    
    def __init__(self, model_path):
        self.model_path = model_path
        self.tokenizer = None
        self.model = None
        self._initialize_model()
    
    def _initialize_model(self):
        try:
            self.tokenizer = AutoTokenizer.from_pretrained(
                self.model_path, 
                trust_remote_code=True
            )
            self.model = AutoModelForCausalLM.from_pretrained(
                self.model_path,
                device_map="auto",
                trust_remote_code=True,
                bf16=True,
                offload_folder=f"{self.model_path}/offload"
            ).eval()
            
            self.model.generation_config = GenerationConfig.from_pretrained(
                self.model_path, 
                trust_remote_code=True
            )
        except Exception as e:
            raise Exception(f"模型初始化失败: {str(e)}")
    
    def answer_question(self, query):
        try:
            response, new_history = self.model.chat(
                query=query,
                tokenizer=self.tokenizer,
                history=None
            )
            return response, new_history
        except Exception as e:
            raise Exception(f"生成回答失败: {str(e)}")