import torch
from transformers import T5ForConditionalGeneration, T5Tokenizer
import gc
import signal
import os

class T5Model:
    def __init__(self):
        # 设置信号处理
        signal.signal(signal.SIGINT, self._signal_handler)
        signal.signal(signal.SIGTERM, self._signal_handler)
        
        try:
            # 强制使用CPU
            self.device = torch.device('cpu')
            print(f"使用设备: {self.device}")
            
            # 修改模型加载参数
            self.model = T5ForConditionalGeneration.from_pretrained(
                "t5-small",
                torch_dtype=torch.float32,
                device_map=None,  # 不使用自动设备映射
                low_cpu_mem_usage=True
            ).to(self.device)
            
            self.tokenizer = T5Tokenizer.from_pretrained(
                "t5-small",
                model_max_length=512
            )
            print("T5模型加载成功")
            
            # 设置最大长度限制
            self.max_input_length = 512
            self.max_output_length = 64
            
        except Exception as e:
            print(f"模型加载失败: {str(e)}")
            self._cleanup()
            raise

    def _signal_handler(self, signum, frame):
        """处理进程信号"""
        print("\n收到终止信号，正在清理资源...")
        self._cleanup()
        os._exit(0)

    def _cleanup(self):
        """清理资源的统一方法"""
        try:
            if hasattr(self, 'model'):
                self.model.cpu()
                del self.model
            if hasattr(self, 'tokenizer'):
                del self.tokenizer
            torch.cuda.empty_cache()
            gc.collect()
        except Exception as e:
            print(f"清理资源时出错: {str(e)}")

    def generate_answer(self, query, context):
        try:
            # 使用上下文管理器确保资源正确释放
            with torch.no_grad():
                # 限制输入长度
                input_text = f"question: {query} context: {context}"
                inputs = self.tokenizer(
                    input_text,
                    max_length=self.max_input_length,
                    truncation=True,
                    return_tensors="pt"
                )
                
                # 将输入移到相应设备
                input_ids = inputs.input_ids.to(self.device)
                
                # 生成答案
                outputs = self.model.generate(
                    input_ids,
                    max_length=self.max_output_length,
                    num_beams=2,
                    length_penalty=1.0,
                    early_stopping=True
                )
                
                # 立即将输出转移到CPU并释放GPU内存
                outputs = outputs.cpu()
                answer = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
                
                # 清理临时变量
                del outputs
                del input_ids
                torch.cuda.empty_cache()
                
                if not answer or answer.strip() == "":
                    return "抱歉，我无法根据提供的上下文回答这个问题。"
                    
                return answer.strip()
                
        except Exception as e:
            print(f"生成答案时出错: {str(e)}")
            torch.cuda.empty_cache()
            return "生成答案时发生错误，请重试。"

    def __del__(self):
        self._cleanup()
