from typing import Literal
from pathlib import Path
from threading import Thread
from typing import Optional, Dict, Any
import random
import importlib

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from lightning import LightningModule
from torch.optim.lr_scheduler import CosineAnnealingLR, SequentialLR, LinearLR, ReduceLROnPlateau, _LRScheduler

from .losses import ComprehensiveLoss
from .metrics import tensor_accessment
from utils.plots import plot_images


class MInterface(LightningModule):
    """
    鲁棒的PA-US融合模型接口，具有增强的稳定性，支持分割任务
    Robust Model Interface for PA-US Fusion with enhanced stability and segmentation support
    """
    
    def __init__(
        self,
        model_type: Literal['aynet', 'model_aynet', 'myunet', 'model_ynet', 'model_ynet_conv', 'model_ynet_dascat', 'model_ynet_rdcat'] = 'aynet',
        data_range: float = 2.0,     # 数据范围，用于PSNR计算
        learning_rate: float = 1e-3,  # 学习率
        weight_decay: float = 1e-4,   # 权重衰减
        # 损失函数配置
        loss_l2: float = 1.0,        # L2损失权重
        loss_l1: float = 0.0,        # L1损失权重
        loss_perc: float = 0.0,      # 感知损失权重
        loss_msssim: float = 0.0,    # MS-SSIM损失权重
        loss_focal: float = 0.0,     # Focal Loss权重，用于小目标检测
        # 分割相关参数
        enable_segmentation: bool = True,    # 是否启用分割任务
        seg_loss_weight: float = 0.01,         # 分割损失权重
        seg_use_multiscale: bool = True,     # 是否使用多尺度分割头
        seg_use_dropout: bool = False,        # 是否在分割头中使用dropout
        seg_dropout_rate: float = 0.1,        # 分割头dropout率
        # 模型特定参数
        in_channels: int = 1,        # 输入通道数
        up_mode: str = 'upsample',   # 上采样模式
        merge_mode: str = 'concat',  # 特征融合模式
        # 训练参数
        scheduler_type: str = 'cosine',  # 学习率调度器类型
        optimizer_type: str = 'adamw',    # 优化器类型
        grad_clip_norm: float = 1.0,      # 梯度裁剪阈值
        save_plots_dir: str = '',         # 图像保存目录

        # 医学图像重建相关的鲁棒性参数
        dropout: float = 0.1,           # Dropout概率 - 防止过拟合
        ema_decay: float = 0.999,       # 指数移动平均衰减率 - 提升训练稳定性
        input_noise_std: float = 0.01,  # 输入噪声标准差 - 提升对采集噪声的鲁棒性
        
        warmup_epochs: int = 0,         # Number of epochs for learning rate warmup
        scheduler_patience: int = 10,   # Patience for ReduceLROnPlateau
        scheduler_factor: float = 0.5,  # Factor for ReduceLROnPlateau
    ):
        super().__init__()
        self.save_hyperparameters()
        
        # 核心参数设置
        self.model_type = model_type.lower()
        self.data_range = data_range
        self.learning_rate = learning_rate
        self.weight_decay = weight_decay
        self.grad_clip_norm = grad_clip_norm
        self.scheduler_type = scheduler_type
        self.optimizer_type = optimizer_type
        
        # 分割相关参数
        self.enable_segmentation = enable_segmentation and seg_loss_weight > 0
        self.seg_loss_weight = seg_loss_weight
                
        # 医学图像重建相关的鲁棒性参数设置
        self.dropout = dropout
        self.ema_decay = ema_decay
        self.input_noise_std = input_noise_std

        self.warmup_epochs = warmup_epochs # Store warmup_epochs
        self.scheduler_patience = scheduler_patience
        self.scheduler_factor = scheduler_factor

        # 初始化损失函数
        self.loss_function = ComprehensiveLoss(
            data_range,
            lambda_l2=loss_l2,
            lambda_l1=loss_l1, 
            lambda_perc=loss_perc,
            lambda_msssim=loss_msssim,
            lambda_focal=loss_focal,
        )
        
        # 分割损失函数 - 使用BCEWithLogitsLoss用于二分类（AMP安全）
        if self.enable_segmentation:
            self.seg_loss_function = nn.BCEWithLogitsLoss()
        
        # 初始化主模型
        self.model = self._create_model(
            in_channels, up_mode, merge_mode, 
            self.enable_segmentation, seg_use_multiscale, seg_use_dropout, seg_dropout_rate
        )
        
        # 创建EMA模型用于稳定推理
        self.ema_model = None
        if self.ema_decay > 0:
            self.ema_model = self._create_ema_model()
        
        # torch.autograd.set_detect_anomaly(True)
        
        # 设置图像保存路径
        self.save_plots_dir = Path(save_plots_dir) if save_plots_dir else None

    def _create_model(self, in_channels: int, up_mode: str, merge_mode: str, 
                     enable_segmentation: bool = False, seg_use_multiscale: bool = False, 
                     seg_use_dropout: bool = False, seg_dropout_rate: float = 0.1):
        """
        根据类型创建模型并添加Dropout
        Create model based on type with dropout
        """
        if self.model_type == 'myunet':
            from .myunet import HighResolutionModel
            model = HighResolutionModel()
        elif self.model_type in ('aynet', 'model_aynet', 'model_ynet', 'model_ynet_conv', 'model_ynet_dascat', 'model_ynet_rdcat'):
            module_name = f".{self.model_type}"
            model_module = importlib.import_module(module_name, package=__package__)
            
            if 'aynet' in self.model_type:
                ModelClass = getattr(model_module, 'AYNet')
            else: # 'ynet'
                ModelClass = getattr(model_module, 'YNet')
            model = ModelClass(
                in_channels=in_channels, 
                up_mode=up_mode, 
                merge_mode=merge_mode,
                enable_segmentation=enable_segmentation,
                seg_use_multiscale=seg_use_multiscale,
                seg_use_dropout=seg_use_dropout,
                seg_dropout_rate=seg_dropout_rate
            )
        else:
            raise ValueError(f"不支持的模型类型: {self.model_type}")
        
        return model

    def _create_ema_model(self):
        """
        创建指数移动平均模型用于稳定推理
        Create EMA model for stable inference
        """
        ema_model = self._create_model(
            self.hparams.in_channels, 
            self.hparams.up_mode, 
            self.hparams.merge_mode,
            self.hparams.enable_segmentation,
            self.hparams.seg_use_multiscale,
            self.hparams.seg_use_dropout,
            self.hparams.seg_dropout_rate
        )
        # 复制主模型的权重到EMA模型
        ema_model.load_state_dict(self.model.state_dict())
        # EMA模型不需要梯度计算
        for param in ema_model.parameters():
            param.requires_grad = False
        return ema_model

    def _update_ema_model(self):
        """
        更新EMA模型权重
        Update EMA model weights
        """
        if self.ema_model is None or self.ema_decay <= 0:
            return
        
        with torch.no_grad():
            # 指数移动平均更新：ema = decay * ema + (1-decay) * current
            for ema_param, param in zip(self.ema_model.parameters(), self.model.parameters()):
                ema_param.data.mul_(self.ema_decay).add_(param.data, alpha=1 - self.ema_decay)

    def _add_input_noise(self, tensor):
        """
        为输入添加小量噪声提高鲁棒性
        Add small noise to input for robustness
        """
        if self.training and self.input_noise_std > 0:
            if self.current_epoch < self.warmup_epochs:
                # 在warmup阶段逐渐增加噪声
                noise_scale = self.input_noise_std * self.current_epoch / self.warmup_epochs
            else:
                noise_scale = self.input_noise_std
            noise = torch.randn_like(tensor) * noise_scale
            return tensor + noise
        return tensor

    def _normalize_input(self, tensor):
        """
        标准化输入张量以防止极值
        Normalize input tensor to prevent extreme values
        """
        # 限制极值范围
        tensor = torch.clamp(tensor, -1, 1)
        
        # 检查并处理NaN/Inf值
        if torch.isnan(tensor).any() or torch.isinf(tensor).any():
            raise ValueError("Input tensor contains NaN or Inf values.")
        
        return tensor
    
    def _create_segmentation_mask(self, target):
        """
        根据目标图像创建分割掩码
        将>0的像素标记为前景(1)，<=0的像素标记为背景(0)
        """
        seg_mask = (target > 0).float()
        return seg_mask

    def _calculate_segmentation_metrics(self, pred_seg_logits, true_seg):
        """计算分割指标：IoU, Dice, Precision, Recall"""
        # 将预测logits转为概率，再转为二值掩码 (阈值0.5)
        pred_probs = torch.sigmoid(pred_seg_logits)
        pred_binary = (pred_probs > 0.5).float()
        
        # 计算各种指标
        intersection = (pred_binary * true_seg).sum()
        union = pred_binary.sum() + true_seg.sum() - intersection
        
        # IoU (Intersection over Union)
        iou = intersection / (union + 1e-8)
        
        # Dice coefficient
        dice = 2 * intersection / (pred_binary.sum() + true_seg.sum() + 1e-8)
        
        # Precision and Recall
        precision = intersection / (pred_binary.sum() + 1e-8)
        recall = intersection / (true_seg.sum() + 1e-8)
        
        return {
            'iou': iou.item(),
            'dice': dice.item(),
            'precision': precision.item(),
            'recall': recall.item()
        }

    def _save_plots_with_segmentation(self, prediction, target, seg_prediction, seg_mask, batch_idx: int, stage: str):
        """保存包含分割结果的图像"""
        # 保存重建结果
        pred_path = self.save_plots_dir / f'{stage}_batch{batch_idx}_pred.png'
        target_path = self.save_plots_dir / f'{stage}_batch{batch_idx}_target.png'
        
        # 保存分割结果
        seg_pred_path = self.save_plots_dir / f'{stage}_batch{batch_idx}_seg_pred.png'
        seg_mask_path = self.save_plots_dir / f'{stage}_batch{batch_idx}_seg_mask.png'
        
        Thread(target=plot_images, args=(prediction.detach(), None, pred_path), daemon=True).start()
        Thread(target=plot_images, args=(target, None, target_path), daemon=True).start()
        Thread(target=plot_images, args=(seg_prediction.detach(), None, seg_pred_path), daemon=True).start()
        Thread(target=plot_images, args=(seg_mask, None, seg_mask_path), daemon=True).start()
    
    def configure_model(self):
        # if hasattr(torch, 'compile'):
        #     self.model = torch.compile(self.model, mode="reduce-overhead")
        return self.model

    def configure_optimizers(self):
        """Configure optimizer and scheduler with warmup support"""
        # Optimizer
        if self.optimizer_type == 'adamw':
            optimizer = torch.optim.AdamW(
                self.parameters(), 
                lr=self.learning_rate, 
                weight_decay=self.weight_decay
            )
        else:
            optimizer = torch.optim.Adam(
                self.parameters(), 
                lr=self.learning_rate, 
                weight_decay=self.weight_decay
            )
        
        # Handle warmup + plateau scheduler separately since SequentialLR doesn't support ReduceLROnPlateau
        if self.scheduler_type == 'plateau':
            main_scheduler = ReduceLROnPlateau(
                optimizer, 
                mode='min', 
                factor=self.hparams.scheduler_factor,
                patience=self.hparams.scheduler_patience,
            )
            
            # For plateau scheduler, if warmup is requested, we'll only use warmup
            # since SequentialLR doesn't support ReduceLROnPlateau
            if self.warmup_epochs > 0:
                warmup_scheduler = LinearLR(optimizer, start_factor=1e-3, total_iters=self.warmup_epochs)
                return (
                    [optimizer],
                    [
                        {"scheduler": warmup_scheduler, "interval": "epoch", "frequency": 1},
                        {
                            "scheduler": main_scheduler,
                            "monitor": "val_loss",
                            "interval": "epoch",
                            "frequency": 1,
                        },
                    ],
                )
            else:
                scheduler_config = {
                    "scheduler": main_scheduler,
                    "monitor": "val_loss",
                    "interval": "epoch",
                    "frequency": 1,
                }
                return {"optimizer": optimizer, "lr_scheduler": scheduler_config}
        
        elif self.scheduler_type == 'cosine':
            total_epochs = self.trainer.max_epochs

            # Adjust T_max for cosine scheduler if warmup is used
            t_max_cosine = total_epochs - self.warmup_epochs
            if t_max_cosine <= 0:
                t_max_cosine = 1 # Ensure T_max is at least 1
            
            main_scheduler = CosineAnnealingLR(optimizer, T_max=t_max_cosine)
            
            # Combine with Warmup Scheduler if warmup_epochs > 0
            if self.warmup_epochs > 0:
                warmup_scheduler = LinearLR(optimizer, start_factor=1e-3, total_iters=self.warmup_epochs)
                combined_scheduler = SequentialLR(optimizer, schedulers=[warmup_scheduler, main_scheduler], milestones=[self.warmup_epochs])
                return {
                    "optimizer": optimizer, 
                    "lr_scheduler": {
                        "scheduler": combined_scheduler,
                        "interval": "epoch",
                        "frequency": 1,
                    }
                }
            else:
                return {
                    "optimizer": optimizer, 
                    "lr_scheduler": {
                        "scheduler": main_scheduler,
                        "interval": "epoch",
                        "frequency": 1,
                    }
                }
        else:
            # No main scheduler or unsupported type
            if self.warmup_epochs > 0:
                warmup_scheduler = LinearLR(optimizer, start_factor=1e-3, total_iters=self.warmup_epochs)
                return {"optimizer": optimizer, "lr_scheduler": {"scheduler": warmup_scheduler, "interval": "epoch"}}
            return optimizer

    def lr_scheduler_step(self, scheduler, metric):
        """Custom LR scheduler step for handling warmup with ReduceLROnPlateau"""
        # This method is called by PyTorch Lightning.
        # If using SequentialLR, scheduler.step() will internally call step on the current active scheduler.
        # If the active scheduler is ReduceLROnPlateau, it needs the metric.
        # If the active scheduler is LinearLR or CosineAnnealingLR, they don't use the metric.
        current_scheduler = scheduler
        if isinstance(scheduler, SequentialLR):
            # Get the current actual scheduler from SequentialLR
            # _schedulers is an internal list, current_scheduler_idx might be needed if accessible
            # For simplicity, we assume step can be called; metric is only used by Plateau
            pass # Let Lightning pass the metric if configured in scheduler_config

        if metric is None or not hasattr(current_scheduler, 'optimizer') or not any(isinstance(s, ReduceLROnPlateau) for s in getattr(current_scheduler, '_schedulers', [current_scheduler])) :
             scheduler.step() # For schedulers like LinearLR, CosineAnnealingLR, or if metric is not for Plateau
        else:
             scheduler.step(metric) # For ReduceLROnPlateau when it's active and metric is available


    def _shared_step(self, batch, batch_idx: int, stage: str):
        """
        增强的共享步骤，包含鲁棒性特性和分割支持
        Enhanced shared step with robustness features and segmentation support
        """
        
        # 注册钩子以在发生NaN时进行调试
        # hooks = self._check_nan_in_forward()

        MInterface.print_model_parameters_status(self.model, f"{self.model_type} - Before _shared_step")

        rawdata, target, bfimg = batch
        
        # 输入验证和标准化
        rawdata = self._normalize_input(rawdata)
        bfimg = self._normalize_input(bfimg)
        target = self._normalize_input(target)
        
        # 创建分割掩码 (如果启用分割)
        if self.enable_segmentation:
            seg_mask = self._create_segmentation_mask(target)
        
        # 训练时添加输入噪声
        if stage == 'train':
            rawdata = self._add_input_noise(rawdata)
            bfimg = self._add_input_noise(bfimg)
        
        # 前向传播与错误处理
        try:
            if self.enable_segmentation:
                prediction, seg_prediction_logits = self.model(rawdata, bfimg, return_segmentation=True)
            else:
                prediction = self.model(rawdata, bfimg)
            
            # 验证预测结果
            if torch.isnan(prediction).any() or torch.isinf(prediction).any():
                raise RuntimeError(f"Model prediction contains NaN or Inf values at epoch {self.current_epoch}, batch {batch_idx}. "
                                 f"NaN count: {torch.isnan(prediction).sum().item()}, "
                                 f"Inf count: {torch.isinf(prediction).sum().item()}")
            
            # 计算重建损失
            recon_loss_dict = self.loss_function(prediction, target)
            total_loss = recon_loss_dict['loss']
            
            # 计算分割损失 (如果启用)
            if self.enable_segmentation and seg_prediction_logits is not None:
                if seg_prediction_logits.shape[-2:] != seg_mask.shape[-2:]:
                    seg_mask = F.interpolate(seg_mask, size=seg_prediction_logits.shape[-2:], mode='bilinear', align_corners=False)
                # 使用logits和真实掩码计算损失
                seg_loss = self.seg_loss_function(seg_prediction_logits, seg_mask)
                total_loss = total_loss + self.seg_loss_weight * seg_loss
                
                # 记录分割损失
                self.log(f'{stage}_seg_loss', seg_loss, on_step=True, on_epoch=True, prog_bar=True)
                
                # 计算分割指标
                with torch.no_grad():
                    seg_metrics = self._calculate_segmentation_metrics(seg_prediction_logits, seg_mask)
                    for metric_name, metric_value in seg_metrics.items():
                        if np.isfinite(metric_value):
                            self.log(f'{stage}_seg_{metric_name}', metric_value, on_step=True, on_epoch=True, prog_bar=True)
            
        except RuntimeError as e:
            # 处理GPU内存不足错误
            if "out of memory" in str(e):
                torch.cuda.empty_cache()
                return torch.tensor(0.0, requires_grad=True, device=self.device)
            raise e
        
        # 记录重建损失
        for key, value in recon_loss_dict.items():
            if torch.isfinite(value):
                self.log(f'{stage}_{key}', value, on_step=True, on_epoch=True, prog_bar=True)
        
        # 记录总损失
        self.log(f'{stage}_total_loss', total_loss, on_step=True, on_epoch=True, prog_bar=True)
        
        # 计算重建评估指标
        with torch.no_grad():
            try:
                pred_np = prediction.detach().float().cpu().numpy()
                target_np = target.cpu().numpy()
                
                # 确保数据有效性
                if np.isfinite(pred_np).all() and np.isfinite(target_np).all():
                    mpsnr, mssim, _, _ = tensor_accessment(
                        x_pred=pred_np,
                        x_true=target_np,
                        data_range=self.data_range,
                        multi_dimension=False
                    )
                    
                    # 记录有效的评估指标
                    if np.isfinite(mpsnr) and np.isfinite(mssim):
                        self.log(f'{stage}_mpsnr', mpsnr, on_step=True, on_epoch=True, prog_bar=True)
                        self.log(f'{stage}_mssim', mssim, on_step=True, on_epoch=True, prog_bar=True)
            except Exception:
                # 如果指标计算失败则跳过
                pass
        
        # 安全地保存图像
        if self.save_plots_dir and batch_idx < 3:
            try:
                if self.enable_segmentation:
                    # 将logits转为概率用于可视化
                    seg_prediction_probs = torch.sigmoid(seg_prediction_logits)
                    self._save_plots_with_segmentation(prediction, target, seg_prediction_probs, seg_mask, batch_idx, stage)
                else:
                    self._save_plots(prediction, target, batch_idx, stage)
            except Exception:
                # 如果绘图失败则跳过
                pass
        
        MInterface.print_model_parameters_status(self.model, f"{self.model_type} - end _shared_step")

        # 确保在步骤结束时总是移除钩子
        # self._remove_hooks(hooks)
        return total_loss

    def _save_plots(self, prediction, target, batch_idx: int, stage: str):
        """Save prediction and target plots"""
        pred_path = self.save_plots_dir / f'{stage}_batch{batch_idx}_pred.png'
        target_path = self.save_plots_dir / f'{stage}_batch{batch_idx}_target.png'
        
        Thread(target=plot_images, args=(prediction.detach(), None, pred_path), daemon=True).start()
        Thread(target=plot_images, args=(target, None, target_path), daemon=True).start()

    def training_step(self, batch, batch_idx):
        return self._shared_step(batch, batch_idx, 'train')

    def validation_step(self, batch, batch_idx):
        return self._shared_step(batch, batch_idx, 'val')

    def on_before_optimizer_step(self, optimizer):
        """
        增强的梯度裁剪与范数监控
        Enhanced gradient clipping with norm monitoring
        """
        MInterface.print_model_parameters_status(self.model, f"{self.model_type} - Before Grad Clip")

        if self.grad_clip_norm > 0:
            # 执行梯度裁剪并获取梯度范数
            grad_norm = torch.nn.utils.clip_grad_norm_(self.parameters(), self.grad_clip_norm)
            
            # 记录梯度范数用于监控训练稳定性
            if torch.isfinite(grad_norm):
                self.log('grad_norm', grad_norm, on_step=True, prog_bar=False)

        MInterface.print_model_parameters_status(self.model, f"{self.model_type} - Before Optimizer Step")

    def on_train_batch_end(self, outputs, batch, batch_idx):
        """
        每个训练批次结束后更新EMA模型和记录学习率
        Update EMA model and log learning rate after each training batch
        """
        self._update_ema_model()
        
        # 记录当前学习率用于监控warmup进度
        try:
            # 安全地获取学习率
            optimizers = self.trainer.optimizers
            if optimizers and len(optimizers) > 0:
                optimizer = optimizers[0] if isinstance(optimizers, list) else optimizers
                current_lr = optimizer.param_groups[0]['lr']
                self.log('learning_rate', current_lr, on_step=True, prog_bar=False)
        except (IndexError, AttributeError, KeyError):
            # 如果无法获取学习率则跳过记录
            pass

    def on_train_epoch_end(self):
        """记录训练轮次指标"""
        metrics = self.trainer.logged_metrics
        train_loss = metrics.get('train_total_loss_epoch', metrics.get('train_loss_epoch', 0))
        train_mpsnr = metrics.get('train_mpsnr_epoch', 0)
        train_mssim = metrics.get('train_mssim_epoch', 0)
        
        log_msg = f"Epoch {self.current_epoch} [训练] - 损失: {train_loss:.5f} | PSNR: {train_mpsnr:.3f} | SSIM: {train_mssim:.4f}"
        
        # 添加分割指标 (如果启用)
        if self.enable_segmentation:
            train_seg_loss = metrics.get('train_seg_loss_epoch', 0)
            train_seg_dice = metrics.get('train_seg_dice_epoch', 0)
            train_seg_iou = metrics.get('train_seg_iou_epoch', 0)
            log_msg += f" | 分割损失: {train_seg_loss:.5f} | Dice: {train_seg_dice:.4f} | IoU: {train_seg_iou:.4f}"
        
        self.print(log_msg)

    def on_validation_epoch_start(self):
        """
        验证开始时切换到EMA模型（如果可用）
        Switch to EMA model for validation if available
        """
        if self.ema_model is not None and self.ema_decay > 0:
            # 临时交换模型进行验证
            self._temp_model = self.model
            self.model = self.ema_model

    def on_validation_epoch_end(self):
        """
        验证结束后切换回训练模型
        Switch back to training model after validation
        """
        if self.ema_model is not None and self.ema_decay > 0:
            # 恢复训练模型
            self.model = self._temp_model
            delattr(self, '_temp_model')
        
        # 记录验证指标
        metrics = self.trainer.logged_metrics
        val_loss = metrics.get('val_total_loss_epoch', metrics.get('val_loss_epoch', 0))
        val_mpsnr = metrics.get('val_mpsnr_epoch', 0)
        val_mssim = metrics.get('val_mssim_epoch', 0)
        
        log_msg = f"Epoch {self.current_epoch} [验证] - 损失: {val_loss:.5f} | PSNR: {val_mpsnr:.3f} | SSIM: {val_mssim:.4f}"
        
        # 添加分割指标 (如果启用)
        if self.enable_segmentation:
            val_seg_loss = metrics.get('val_seg_loss_epoch', 0)
            val_seg_dice = metrics.get('val_seg_dice_epoch', 0)
            val_seg_iou = metrics.get('val_seg_iou_epoch', 0)
            log_msg += f" | 分割损失: {val_seg_loss:.5f} | Dice: {val_seg_dice:.4f} | IoU: {val_seg_iou:.4f}"
        
        self.print(log_msg)

    def _check_nan_in_forward(self):
        """
        注册前向钩子以查找哪个模块产生NaN。
        返回一个钩子列表，以便稍后移除。
        """
        hooks = []
        # 使用一个属性来确保只报告第一个产生NaN的模块
        if hasattr(self, '_nan_detected_module'):
            delattr(self, '_nan_detected_module')

        def hook(module, input, output):
            # 检查一次以找到第一个层
            if not hasattr(self, '_nan_detected_module'):
                # 检查元组或列表形式的输出
                if isinstance(output, (list, tuple)):
                    for i, out_tensor in enumerate(output):
                        if isinstance(out_tensor, torch.Tensor) and torch.isnan(out_tensor).any():
                            self._nan_detected_module = module.__class__.__name__
                            print(f"\n!!! NaN DETECTED in output tensor {i} of module: {module.__class__.__name__} !!!")
                            print(f"    Module details: {module}\n")
                            break
                # 检查单个张量输出
                elif isinstance(output, torch.Tensor) and torch.isnan(output).any():
                    self._nan_detected_module = module.__class__.__name__
                    print(f"\n!!! NaN DETECTED in output of module: {module.__class__.__name__} !!!")
                    print(f"    Module details: {module}\n")

        for name, module in self.model.named_modules():
            hooks.append(module.register_forward_hook(hook))
        return hooks

    def _remove_hooks(self, hooks):
        """移除所有已注册的钩子。"""
        for hook in hooks:
            hook.remove()

    @staticmethod
    def print_model_parameters_status(model, model_name="Model", check=False):
        """
        遍历模型的所有参数并打印其状态，包括是否存在NaN或Inf。
        """
        if not check:
            return
        print(f"\n--- Parameters Status for {model_name} ---")
        found_nan_overall = False
        found_inf_overall = False
        for name, param in model.named_parameters():
            if param.requires_grad:
                has_nan = torch.isnan(param.data).any().item()
                has_inf = torch.isinf(param.data).any().item()
                
                status_msgs = []
                if has_nan:
                    status_msgs.append("HAS NaN")
                    found_nan_overall = True
                if has_inf:
                    status_msgs.append("HAS Inf")
                    found_inf_overall = True
                
                status_str = ", ".join(status_msgs) if status_msgs else "OK"
                
                print(f"Parameter: {name:<50} Shape: {str(param.data.shape):<25} Grad: {param.requires_grad} Status: {status_str}")
                if has_nan or has_inf: # 如果有问题，打印更多细节
                    if not has_inf and not has_nan: # 避免在全是nan/inf时调用min/max
                        print(f"    Values (min/max): {param.data.min().item():.4e} / {param.data.max().item():.4e}")
                    if param.grad is not None:
                        grad_has_nan = torch.isnan(param.grad).any().item()
                        grad_has_inf = torch.isinf(param.grad).any().item()
                        grad_status_msgs = []
                        if grad_has_nan: grad_status_msgs.append("GRAD HAS NaN")
                        if grad_has_inf: grad_status_msgs.append("GRAD HAS Inf")
                        grad_status_str = ", ".join(grad_status_msgs) if grad_status_msgs else "GRAD OK"
                        print(f"    Gradient Status: {grad_status_str}")
                        if grad_has_nan or grad_has_inf:
                            if not grad_has_inf and not grad_has_nan:
                                print(f"        Grad Values (min/max): {param.grad.min().item():.4e} / {param.grad.max().item():.4e}")
                    else:
                        print(f"    Gradient: None")
                
        if found_nan_overall:
            print(f"WARNING: NaN found in one or more parameters of {model_name}!")
        if found_inf_overall:
            print(f"WARNING: Inf found in one or more parameters of {model_name}!")
        if not found_nan_overall and not found_inf_overall:
            print(f"All parameters in {model_name} appear OK (no NaN/Inf).")
        print(f"--- End of Parameters Status for {model_name} ---\n")
