import os
import torch
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from datetime import datetime
from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler, random_split
from torchvision.transforms import Compose, RandomAffine, RandomRotation, RandomResizedCrop
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
#引入Albumentations进行专业数据增强
import albumentations as A
from albumentations.pytorch import ToTensorV2

# 配置参数
class ModelConfig:
    # 数据路径
    TRAIN_IMAGE_DIR = "trainImage"
    TRAIN_MASK_DIR = "trainMask"
    
    # 训练参数
    NUM_EPOCHS = 100
    BATCH_SIZE = 8
    LEARNING_RATE = 1e-4
    PATCH_SIZE = (448, 448)
    NUM_WORKERS = 4
    
    # 模型参数
    IN_CHANNELS = 3
    OUT_CHANNELS = 3  # 三类输出（背景、肝脏、肿瘤）
    
    # 输出参数
    OUTPUT_DIR = "training_output"
    MODEL_SAVE_PATH = os.path.join(OUTPUT_DIR, "best_model.pth")
    LOG_FILE = os.path.join(OUTPUT_DIR, "training_log.csv")
    #检查点路径
    CHECKPOINT_PATH = os.path.join(OUTPUT_DIR, "checkpoint.pth")
    
    def __init__(self):
        os.makedirs(self.OUTPUT_DIR, exist_ok=True)
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        print(f"使用设备: {self.device}")
        
        with open(self.LOG_FILE, "w") as f:
            f.write("epoch,train_loss,val_loss,liver_dice,tumor_dice,mean_dice,lr\n")

# 自定义数据集类
class LiverDataset(Dataset):
    def __init__(self, image_dir, mask_dir, transform=None):
        """
        数据集格式说明:
        - 图像: (H, W, 3) 三通道, 由 [slice_n, slice_n+1, slice_n+2] 组成
        - 掩码: (H, W) 单通道, 对应 slice_n+1 的标签
        """
        self.image_dir = image_dir
        self.mask_dir = mask_dir
        self.transform = transform
        
        # 获取所有有效样本
        self.samples = []
        image_files = sorted([f for f in os.listdir(image_dir) if f.endswith('.npy')])
        mask_files = sorted([f for f in os.listdir(mask_dir) if f.endswith('.npy')])
        
        for img_file, mask_file in zip(image_files, mask_files):
            mask_path = os.path.join(mask_dir, mask_file)
            mask_data = np.load(mask_path)
            if np.any(mask_data == 1) or np.any(mask_data == 2):  # 包含肝脏或肿瘤
                self.samples.append({
                    'image': os.path.join(image_dir, img_file),
                    'mask': mask_path
                })
    
    def __len__(self):
        return len(self.samples)
    
    def __getitem__(self, idx):
        sample = self.samples[idx]
        
        # 加载图像和掩码
        image = np.load(sample['image']).astype(np.float32)  # (H, W, C)
        mask = np.load(sample['mask']).astype(np.int64)      # (H, W)
        
        # ===使用Albumentations进行同步变换 ===
        if self.transform:
            # 确保图像和掩码同步变换
            transformed = self.transform(image=image, mask=mask)
            image = transformed['image']
            mask = transformed['mask']
        else:
            # 转换为PyTorch张量
            image = torch.from_numpy(image).permute(2, 0, 1)  # (C, H, W)
            mask = torch.from_numpy(mask)                     # (H, W)
        
        return image, mask

# 自定义数据增强变换
def get_transforms(mode="train"):
    """使用Albumentations库实现专业的分割数据增强"""
    if mode == "train":
        return A.Compose([
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
            A.Rotate(limit=15, p=0.5),
            A.RandomBrightnessContrast(p=0.2),
            A.GaussianBlur(p=0.1),
            ToTensorV2()
        ])
    else:  # 验证/测试模式
        return A.Compose([
            ToTensorV2()
        ])

# 2D UNet模型实现
# ResNet模块
class ResNetBlock(nn.Module):
    """ResNet Bottleneck结构"""
    expansion = 4  # 瓶颈通道扩展系数
    
    def __init__(self, in_channels, out_channels, stride=1, dilation=1):
        super(ResNetBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(out_channels)
        
        # 使用空洞卷积替代普通卷积
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, 
                              stride=stride, padding=dilation, dilation=dilation,
                              bias=False)
        self.bn2 = nn.BatchNorm2d(out_channels)
        
        self.conv3 = nn.Conv2d(out_channels, out_channels * self.expansion, 
                              kernel_size=1, bias=False)
        self.bn3 = nn.BatchNorm2d(out_channels * self.expansion)
        
        self.relu = nn.ReLU(inplace=True)
        
        # 残差连接投影
        self.shortcut = nn.Sequential()
        if stride != 1 or in_channels != out_channels * self.expansion:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_channels, out_channels * self.expansion,
                         kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(out_channels * self.expansion)
            )
    
    def forward(self, x):
        identity = x
        
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)
        
        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)
        
        out = self.conv3(out)
        out = self.bn3(out)
        
        out += self.shortcut(identity)
        out = self.relu(out)
        
        return out

# SENet通道注意力模块
class SEBlock(nn.Module):
    """Squeeze-and-Excitation Block"""
    def __init__(self, in_channels, reduction=16):
        super(SEBlock, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(in_channels, in_channels // reduction, bias=False),
            nn.ReLU(inplace=True),
            nn.Linear(in_channels // reduction, in_channels, bias=False),
            nn.Sigmoid()
        )
    
    def forward(self, x):
        b, c, _, _ = x.size()
        y = self.avg_pool(x).view(b, c)
        y = self.fc(y).view(b, c, 1, 1)
        
        # 将原地乘法改为非原地操作，避免混合精度训练中的梯度问题
        x = x * y  # 替代 x.mul_(y)
        
        return x

# 改进空间特征融合模块
class EnhancedAttention(nn.Module):
    """增强型注意力模块，包含通道+空间注意力"""
    def __init__(self, in_channels):
        super(EnhancedAttention, self).__init__()
        # 通道注意力
        self.channel_attention = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(in_channels, in_channels//8, kernel_size=1),
            nn.ReLU(),
            nn.Conv2d(in_channels//8, in_channels, kernel_size=1),
            nn.Sigmoid()
        )
        
        # 空间注意力
        self.spatial_attention = nn.Sequential(
            nn.Conv2d(2, 1, kernel_size=7, padding=3),  # 7x7卷积核
            nn.Sigmoid()
        )
        
    def forward(self, x):
        # 通道注意力
        channel_weights = self.channel_attention(x)
        x = x * channel_weights
        
        # 空间注意力
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        spatial_weights = self.spatial_attention(torch.cat([avg_out, max_out], dim=1))
        x = x * spatial_weights
        
        return x

# SFFBlock为ResNet+Attention组合
class SFFBlock(nn.Module):
    """改进的空间特征融合模块，集成ResNet和增强注意力"""
    def __init__(self, in_channels, out_channels, dilation=1):
        super(SFFBlock, self).__init__()
        # 使用ResNet结构替代原有卷积
        self.resnet_block = ResNetBlock(in_channels, out_channels, 
                                        dilation=dilation)
        
        # 增强型注意力机制
        self.attention = EnhancedAttention(out_channels * ResNetBlock.expansion)
        
        # 特征金字塔融合
        self.feature_pyramid = nn.ModuleList([
            nn.Conv2d(out_channels * ResNetBlock.expansion, 
                     out_channels * ResNetBlock.expansion, 
                     kernel_size=1) for _ in range(3)
        ])
    
    def forward(self, x):
        # ResNet处理
        x = self.resnet_block(x)
        
        # 增强注意力
        x = self.attention(x)
        
        # 特征金字塔融合（）
        pyramid_features = []
        for i, conv in enumerate(self.feature_pyramid):
            if i == 0:
                pyramid_features.append(conv(x))
            else:
                scaled = F.interpolate(pyramid_features[-1], 
                                      scale_factor=2 if i < 2 else 0.5,
                                      mode='bilinear', align_corners=True)
                pyramid_features.append(conv(scaled))
        
        # 多尺度特征融合
        x = torch.cat(pyramid_features, dim=1)
        
        return x

# 2D UNet模型实现
class UNetPlusPlus(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(UNetPlusPlus, self).__init__()
        
        # 在编码器前两层加入空洞卷积
        self.enc1 = self._block(in_channels, 64, dilation=1)
        # 调整编码器通道数
        self.enc2 = self._block(256, 128, dilation=2)  # 输入通道从64改为256
        self.enc3 = self._block(512, 256)  # 输入通道从128改为512
        self.enc4 = self._block(1024, 512)  # 输入通道从256改为1024
        
        self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
        
        # 瓶颈层 - 为普通模块初始化
        self.bottleneck = self._block(2048, 1024)  # 输入通道从512改为2048
        
        # 解码器 (UNet++ 结构)
        # === 修正解码器各层的输入通道数 ===
        # 计算通道数：ResNetBlock输出通道数 = features * expansion
        self.upconv4 = nn.ConvTranspose2d(4096, 512, kernel_size=2, stride=2)
        self.dec4 = self._dense_block(2560, 512)  # 修正: 512(上采样) + 2048(enc4) = 2560
        
        self.upconv3 = nn.ConvTranspose2d(512, 256, kernel_size=2, stride=2)
        # 通道数 = 256(上采样) + 1024(enc3) = 1280
        self.dec3 = self._dense_block(1280, 256)  # 修正为1280通道输入
        
        self.upconv2 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2)
        # 通道数 = 128(上采样) + 512(enc2) = 640
        self.dec2 = self._dense_block(640, 128)  # 保持640通道输入
        
        self.upconv1 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2)
        # 通道数 = 64(上采样) + 256(enc1) = 320
        self.dec1 = self._dense_block(320, 64)   # 保持320通道输入
        
        # 解码器SE模块
        self.se4 = SEBlock(512)
        self.se3 = SEBlock(256)
        self.se2 = SEBlock(128)
        self.se1 = SEBlock(64)
        
        # 空间特征融合模块
        self.sff4 = SFFBlock(512, 512)
        self.sff3 = SFFBlock(256, 256)
        self.sff2 = SFFBlock(128, 128)
        self.sff1 = SFFBlock(64, 64)
        
        # 输出层
        self.conv_out = nn.Conv2d(64, out_channels, kernel_size=1)
        
        # 深监督输出（可选）
        self.deep_supervision = True
        if self.deep_supervision:
            self.out4 = nn.Conv2d(512, out_channels, kernel_size=1)
            self.out3 = nn.Conv2d(256, out_channels, kernel_size=1)
            self.out2 = nn.Conv2d(128, out_channels, kernel_size=1)
            self.out1 = nn.Conv2d(64, out_channels, kernel_size=1)
    
    def forward(self, x):
        # 编码路径
        enc1 = self.enc1(x)
        enc2 = self.enc2(self.pool(enc1))
        enc3 = self.enc3(self.pool(enc2))
        enc4 = self.enc4(self.pool(enc3))
        
        # 瓶颈 - 动态应用梯度检查点
        if self.training:
            # 训练时启用梯度检查点
            bottleneck = torch.utils.checkpoint.checkpoint(
                self.bottleneck,  # 直接传递模块对象
                self.pool(enc4),
                use_reentrant=False
            )
        else:
            # 验证/测试时不使用检查点
            bottleneck = self.bottleneck(self.pool(enc4))
        
        # 解码路径（SE模块）
        dec4 = self.upconv4(bottleneck)
        dec4 = torch.cat([dec4, enc4], dim=1)
        dec4 = self.dec4(dec4)
        dec4 = self.se4(dec4)  # SE模块
        
        dec3 = self.upconv3(dec4)
        # 仅保留必要特征图拼接
        dec3 = torch.cat([dec3, enc3], dim=1)  # 移除dec4的上采样特征
        dec3 = self.dec3(dec3)
        dec3 = self.se3(dec3)  # SE模块
        
        dec2 = self.upconv2(dec3)
        # 仅保留必要特征图拼接
        dec2 = torch.cat([dec2, enc2], dim=1)  # 移除dec3/dec4的上采样特征
        dec2 = self.dec2(dec2)
        dec2 = self.se2(dec2)  # SE模块
        
        dec1 = self.upconv1(dec2)
        # 仅保留必要特征图拼接
        dec1 = torch.cat([dec1, enc1], dim=1)  # 移除dec2/dec3/dec4的上采样特征
        dec1 = self.dec1(dec1)
        dec1 = self.se1(dec1)  # SE模块
        
        if self.deep_supervision:
            out4 = self.out4(dec4)
            out3 = self.out3(dec3)
            out2 = self.out2(dec2)
            out1 = self.out1(dec1)
            
            # 使用双线性插值统一尺寸
            out4 = F.interpolate(out4, size=x.shape[2:], mode='bilinear', align_corners=True)
            out3 = F.interpolate(out3, size=x.shape[2:], mode='bilinear', align_corners=True)
            out2 = F.interpolate(out2, size=x.shape[2:], mode='bilinear', align_corners=True)
            out1 = F.interpolate(out1, size=x.shape[2:], mode='bilinear', align_corners=True)
            
            return (out4 + out3 + out2 + out1) / 4  # 融合所有深监督输出
            
        return self.conv_out(dec1)
    
    def _block(self, in_channels, features, dilation=1):
        return nn.Sequential(
            ResNetBlock(in_channels, features, dilation=dilation),  # 使用ResNet块
            EnhancedAttention(features * ResNetBlock.expansion)  # 注意力
        )
    
    def _dense_block(self, in_channels, features):
        """用于构建UNet++的密集连接块"""
        return nn.Sequential(
            nn.Conv2d(in_channels, features, kernel_size=3, padding=1),
            nn.BatchNorm2d(features),
            nn.ReLU(inplace=True)
        )

# Dice损失函数 - 为正确处理多类别分割
class DiceLoss(nn.Module):
    def __init__(self, weight=None, size_average=True):
        super(DiceLoss, self).__init__()
        self.smooth = 1.0
        self.weight = weight  # 存储类别权重

    def forward(self, inputs, targets):
        # 对预测进行softmax获取概率分布
        inputs = F.softmax(inputs, dim=1)
        
        # 将标签转换为one-hot编码，确保目标张量是整数类型
        targets = targets.long()  # long()转换
        targets = F.one_hot(targets, num_classes=3).permute(0, 3, 1, 2).float()
        
        # 计算交集和并集 (按类别计算)
        intersection = (inputs * targets).sum(dim=(0, 2, 3))
        union = inputs.sum(dim=(0, 2, 3)) + targets.sum(dim=(0, 2, 3))
        
        # 计算各通道的Dice系数
        dice_per_channel = (2. * intersection + self.smooth) / (union + self.smooth)
        
        # === 应用类别权重 ===
        if self.weight is not None:
            dice_per_channel = dice_per_channel * self.weight
        
        # 返回加权平均Dice损失
        return 1 - dice_per_channel.mean()

# 计算Dice系数 - 为与损失函数一致的计算逻辑
def dice_coeff(pred, target, smooth=1.0, small_target_threshold=5):
    """增强型Dice系数计算，特别关注微小目标"""
    pred = F.softmax(pred, dim=1)
    pred = pred.argmax(dim=1)
    
    # 转换为one-hot编码
    target = target.long()
    pred = F.one_hot(pred, num_classes=3).permute(0, 3, 1, 2).float()
    target = F.one_hot(target, num_classes=3).permute(0, 3, 1, 2).float()
    
    # 计算各通道的Dice系数
    intersection = (pred * target).sum(dim=(0, 2, 3))
    union = pred.sum(dim=(0, 2, 3)) + target.sum(dim=(0, 2, 3))
    dice_per_channel = (2. * intersection + smooth) / (union + smooth)
    
    # 单独计算微小目标Dice（修正条件）
    small_dice = 0.0
    small_target_mask = (target[:, 2] > 0).any(dim=(1,2)) & (target[:, 2].sum(dim=(1,2)) < small_target_threshold)
    if small_target_mask.any():
        small_pred = pred[small_target_mask]
        small_target = target[small_target_mask]
        small_intersection = (small_pred * small_target).sum()
        small_union = small_pred.sum() + small_target.sum()
        small_dice = (2. * small_intersection + smooth) / (small_union + smooth)
    
    # 返回明确的元组结构
    return (
        dice_per_channel[0].item(),  # 背景Dice
        dice_per_channel[1].item(),  # 肝脏Dice
        dice_per_channel[2].item(),  # 肿瘤Dice
        dice_per_channel.mean().item(),  # 平均Dice
        small_dice.item()  # 小肿瘤Dice
    )

# ===计算类别权重函数 ===
def calculate_class_weights(dataset):
    """计算类别权重以处理不平衡问题"""
    class_counts = np.zeros(3)  # 背景, 肝脏, 肿瘤
    
    for _, mask in dataset:
        unique, counts = np.unique(mask.numpy(), return_counts=True)
        for cls, count in zip(unique, counts):
            if cls < 3:  # 只考虑0,1,2类
                class_counts[cls] += count
    
    # 避免除以0
    class_counts = np.maximum(class_counts, 1e-7)
    weights = 1.0 / class_counts
    weights /= weights.sum()  # 归一化
    
    return torch.tensor(weights, dtype=torch.float32)

# Focal Loss实现
class FocalLoss(nn.Module):
    def __init__(self, alpha=0.25, gamma=2.0, reduction='mean'):
        super(FocalLoss, self).__init__()
        self.alpha = alpha  # 类别权重
        self.gamma = gamma  # 难易样本调节因子
        self.reduction = reduction

    def forward(self, inputs, targets):
        # 确保目标张量是整数类型（long）
        targets = targets.long()  # 转换
        ce_loss = F.cross_entropy(inputs, targets, reduction='none')
        pt = torch.exp(-ce_loss)
        
        # 计算Focal Loss
        focal_loss = self.alpha * (1 - pt) ** self.gamma * ce_loss
        
        if self.reduction == 'mean':
            return focal_loss.mean()
        elif self.reduction == 'sum':
            return focal_loss.sum()
        else:
            return focal_loss

# 训练模型
def train_model(config):
    # 创建完整数据集
    full_dataset = LiverDataset(
        config.TRAIN_IMAGE_DIR, 
        config.TRAIN_MASK_DIR,
        transform=get_transforms("train")
    )
    
    # 划分训练集和验证集 (80%/20%)
    train_size = int(0.8 * len(full_dataset))
    val_size = len(full_dataset) - train_size
    train_dataset, val_dataset = random_split(
        full_dataset, [train_size, val_size],
        generator=torch.Generator().manual_seed(42)
    )
    
    # === 只使用训练集计算类别权重 ===
    class_weights = calculate_class_weights(train_dataset)
    print(f"类别权重: 背景={class_weights[0]:.4f}, 肝脏={class_weights[1]:.4f}, 肿瘤={class_weights[2]:.4f}")
    
    # === 创建加权采样器 ===
    # 为每个样本分配权重
    sample_weights = []
    for _, mask in train_dataset:
        class_indices = mask.flatten().unique()
        weight = sum(class_weights[cls.item()] for cls in class_indices)
        sample_weights.append(weight)
    
    sampler = WeightedRandomSampler(
        weights=sample_weights,
        num_samples=len(sample_weights),
        replacement=True
    )
    
    # 创建数据加载器
    train_loader = DataLoader(
        train_dataset,
        batch_size=config.BATCH_SIZE,
        sampler=sampler,  # 使用加权采样器
        num_workers=config.NUM_WORKERS,
        pin_memory=True
    )
    
    val_loader = DataLoader(
        val_dataset,
        batch_size=config.BATCH_SIZE,
        shuffle=False,
        num_workers=config.NUM_WORKERS,
        pin_memory=True
    )
    
    # 初始化模型
    model = UNetPlusPlus(
        in_channels=config.IN_CHANNELS,
        out_channels=config.OUT_CHANNELS
    ).to(config.device)
    
    # 损失函数和优化器
    # === : 使用加权损失函数 ===
    criterion_dice = DiceLoss(weight=class_weights.to(config.device))
    criterion_focal = FocalLoss(alpha=0.25, gamma=2.0)  # 根据实验调整参数
    optimizer = optim.Adam(model.parameters(), lr=config.LEARNING_RATE)
    
    # 混合精度训练
    # 更新：使用新的混合精度API
    # 使用兼容的GradScaler初始化方式
    scaler = torch.cuda.amp.GradScaler(enabled=True)  # 替代 torch.amp.GradScaler
    
    # 训练参数
    best_dice = 0.0
    train_losses, val_losses = [], []
    liver_dices, tumor_dices, small_tumor_dices = [], [], []  # 初始化监控列表
    mean_dices = []  # 存储每个epoch的平均Dice值
    dice_scores = []  # 存储每个epoch的平均Dice分数
    
    #检查点恢复功能
    start_epoch = 0
    if os.path.exists(config.CHECKPOINT_PATH):
        print(f"发现检查点文件: {config.CHECKPOINT_PATH}, 尝试恢复训练...")
        checkpoint = torch.load(config.CHECKPOINT_PATH)
        model.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        scaler.load_state_dict(checkpoint['scaler_state_dict'])
        start_epoch = checkpoint['epoch'] + 1
        best_dice = checkpoint['best_dice']
        train_losses = checkpoint['train_losses']
        val_losses = checkpoint['val_losses']
        liver_dices = checkpoint['liver_dices']
        tumor_dices = checkpoint['tumor_dices']
        small_tumor_dices = checkpoint['small_tumor_dices']
        mean_dices = checkpoint['mean_dices']
        dice_scores = checkpoint['dice_scores']
        print(f"成功恢复训练状态, 将从epoch {start_epoch}继续训练")
    
    # 训练循环
    for epoch in range(start_epoch, config.NUM_EPOCHS):
        print(f"\nEpoch {epoch+1}/{config.NUM_EPOCHS}")
        
        # 增强显存监控日志
        if torch.cuda.is_available():
            print(f"当前显存使用: {torch.cuda.memory_allocated(config.device)/1e9:.2f} GB / "
                  f"{torch.cuda.get_device_properties(config.device).total_memory/1e9:.2f} GB")
            # 详细内存统计
            print(f"最大显存占用: {torch.cuda.max_memory_allocated(config.device)/1e9:.2f} GB")
            torch.cuda.reset_peak_memory_stats()
        
        # 训练阶段
        model.train()
        epoch_train_loss = 0.0
        
        for images, masks in tqdm(train_loader, desc="训练"):
            # 显存监控
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            
            images = images.to(config.device)
            masks = masks.to(config.device)
            
            optimizer.zero_grad()
            
            # 使用混合精度训练
            # 更新：使用新的autocast API
            # device_type参数
            with torch.amp.autocast(device_type='cuda', dtype=torch.float16):  # device_type参数
                outputs = model(images)
                
                # 混合损失计算
                loss_dice = criterion_dice(outputs, masks)
                loss_focal = criterion_focal(outputs, masks)
                total_loss = loss_dice + loss_focal
            
            # 使用Scaler缩放梯度
            scaler.scale(total_loss).backward()
            scaler.step(optimizer)
            scaler.update()
            
            epoch_train_loss += total_loss.item()
        
        epoch_train_loss /= len(train_loader)
        train_losses.append(epoch_train_loss)
        
        # 验证阶段
        model.eval()
        epoch_val_loss = 0.0
        liver_dice_list, tumor_dice_list, small_tumor_dice_list = [], [], []
        mean_dice_list = []  # 存储每个batch的平均Dice值
        
        with torch.no_grad():
            for images, masks in tqdm(val_loader, desc="验证"):
                images = images.to(config.device)
                masks = masks.to(config.device)
                
                outputs = model(images)
                loss = criterion_dice(outputs, masks)
                epoch_val_loss += loss.item()
                
                # 计算Dice系数
                liver_dice, tumor_dice, _, mean_dice, small_tumor_dice = dice_coeff(outputs, masks)
                liver_dice_list.append(liver_dice)
                tumor_dice_list.append(tumor_dice)
                small_tumor_dice_list.append(small_tumor_dice)
                mean_dice_list.append(mean_dice)  # 收集每个batch的平均Dice
        
        epoch_val_loss /= len(val_loader)
        val_losses.append(epoch_val_loss)
        
        # 计算平均指标
        avg_liver_dice = np.mean(liver_dice_list)
        avg_tumor_dice = np.mean(tumor_dice_list)
        avg_mean_dice = np.mean(mean_dice_list)  # 使用batch平均值计算
        mean_dices.append(avg_mean_dice)  # 到epoch列表
        dice_scores.append(avg_mean_dice)  # 到Dice分数列表
        
        print(f"训练损失: {epoch_train_loss:.4f}, 验证损失: {epoch_val_loss:.4f}")
        print(f"肝脏Dice: {avg_liver_dice:.4f}, 肿瘤Dice: {avg_tumor_dice:.4f}, 平均Dice: {avg_mean_dice:.4f}")
        print(f"当前学习率: {optimizer.param_groups[0]['lr']:.2e}")
        
        # 保存日志
        with open(config.LOG_FILE, "a") as f:
            f.write(f"{epoch+1},{epoch_train_loss:.4f},{epoch_val_loss:.4f},{avg_liver_dice:.4f},{avg_tumor_dice:.4f},{avg_mean_dice:.4f},{optimizer.param_groups[0]['lr']:.6f}\n")
        
        # 保存最佳模型
        if avg_mean_dice > best_dice:
            best_dice = avg_mean_dice
            torch.save(model.state_dict(), config.MODEL_SAVE_PATH)
            print(f"保存最佳模型，Dice: {best_dice:.4f}")
        
        #保存检查点
        checkpoint = {
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'scaler_state_dict': scaler.state_dict(),
            'best_dice': best_dice,
            'train_losses': train_losses,
            'val_losses': val_losses,
            'liver_dices': liver_dices,
            'tumor_dices': tumor_dices,
            'small_tumor_dices': small_tumor_dices,
            'mean_dices': mean_dices,
            'dice_scores': dice_scores
        }
        torch.save(checkpoint, config.CHECKPOINT_PATH)
        print(f"训练检查点已保存 (epoch {epoch+1})")
        
        # 每5个epoch保存一次图表
        if (epoch + 1) % 5 == 0:
            plot_training_progress(
                train_losses, val_losses, 
                liver_dices, tumor_dices, small_tumor_dices,  # 传递肿瘤Dice数据
                config
            )

    print(f"训练完成! 最佳Dice分数: {best_dice:.4f}")
    return model

# 绘制训练进度图像
def plot_training_progress(train_losses, val_losses, liver_dices, tumor_dices, small_tumor_dices, config):
    # 直接使用默认英文字体
    plt.figure(figsize=(15, 15))
    
    # 损失曲线
    plt.subplot(3, 1, 1)
    plt.plot(train_losses, label="Training Loss")
    plt.plot(val_losses, label="Validation Loss")
    plt.title("Training and Validation Loss")
    plt.xlabel("Epoch")
    plt.ylabel("Loss")
    plt.legend()
    plt.grid(True)
    
    # Dice分数曲线
    plt.subplot(3, 1, 2)
    plt.plot(liver_dices, label="Liver Dice", color="blue")
    plt.plot(tumor_dices, label="Tumor Dice", color="red")
    plt.title("Organ Dice Scores")
    plt.xlabel("Epoch")
    plt.ylabel("Dice Score")
    plt.ylim(0, 1)
    plt.legend()
    plt.grid(True)
    
    # 微小肿瘤Dice曲线
    plt.subplot(3, 1, 3)
    plt.plot(small_tumor_dices, label="Small Tumor Dice", color="green")
    plt.title("Small Tumor Detection Score")
    plt.xlabel("Epoch")
    plt.ylabel("Dice Score")
    plt.ylim(0, 1)
    plt.legend()
    plt.grid(True)
    
    plt.tight_layout()
    plt.savefig(os.path.join(config.OUTPUT_DIR, "training_progress.png"))
    plt.close()

# 主函数
def main():
    start_time = datetime.now()
    print(f"训练开始时间: {start_time.strftime('%Y-%m-%d %H:%M:%S')}")
    
    # 使用重命名后的类
    config = ModelConfig()
    
    # 训练模型
    model = train_model(config)
    
    end_time = datetime.now()
    duration = end_time - start_time
    print(f"训练完成! 总耗时: {duration}")
    print(f"最佳模型保存路径: {config.MODEL_SAVE_PATH}")

if __name__ == "__main__":
    main()