import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
from torch.cuda.amp import GradScaler, autocast
from sklearn.metrics import precision_score, recall_score, f1_score
import matplotlib.pyplot as plt
import nibabel as nib
import warnings
from tqdm import tqdm

# 忽略警告信息
warnings.filterwarnings('ignore')

# 设置随机种子，保证结果可复现
def set_seed(seed=42):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

set_seed(42)

# 设置计算设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")
if torch.cuda.is_available():
    print(f"GPU型号: {torch.cuda.get_device_name(0)}")


# 数据集类定义
class BraTSDataset(Dataset):
    def __init__(self, patient_list, data_dir, slice_range=(60, 100)):
        """
        BraTS数据集类
        :param patient_list: 患者ID列表
        :param data_dir: 数据存储目录
        :param slice_range: 选择的Z轴切片范围，用于减少计算量
        """
        self.data_dir = data_dir
        self.patients = patient_list
        self.modalities = ["flair", "t1", "t1ce", "t2"]  # BraTS的四种模态
        self.slice_range = slice_range

    def load_nifti(self, path):
        """加载NIfTI格式文件并转换为numpy数组"""
        return np.array(nib.load(path).get_fdata(), dtype=np.float32)

    def __getitem__(self, idx):
        """获取单个样本"""
        pid = self.patients[idx]
        p_path = os.path.join(self.data_dir, pid)
        
        # 加载四种模态的影像
        imgs = []
        for mod in self.modalities:
            img_path = os.path.join(p_path, f"{pid}_{mod}.nii.gz")
            img = self.load_nifti(img_path)
            # 选择指定范围的切片
            img = img[:, :, self.slice_range[0]:self.slice_range[1]]
            # 标准化处理
            img = (img - np.mean(img)) / (np.std(img) + 1e-6)
            imgs.append(np.nan_to_num(img))  # 处理可能的NaN值
        
        # 转换为张量，形状: [C, H, W, D]
        imgs = torch.tensor(np.stack(imgs), dtype=torch.float32)
        
        # 加载分割标签
        seg_path = os.path.join(p_path, f"{pid}_seg.nii.gz")
        seg = self.load_nifti(seg_path)
        seg = seg[:, :, self.slice_range[0]:self.slice_range[1]]
        seg[seg == 4] = 3  # 调整标签，将4类合并到3类
        mask = torch.tensor(seg.astype(np.uint8), dtype=torch.long)
        
        return imgs, mask

    def __len__(self):
        """返回数据集大小"""
        return len(self.patients)


# 残差块定义
class ResidualBlock(nn.Module):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.conv1 = nn.Conv3d(in_channels, out_channels, kernel_size=3, padding=1)
        self.bn1 = nn.BatchNorm3d(out_channels)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = nn.Conv3d(out_channels, out_channels, kernel_size=3, padding=1)
        self.bn2 = nn.BatchNorm3d(out_channels)
        
        # 残差连接，当输入输出通道数不同时使用1x1卷积调整
        self.residual = nn.Conv3d(in_channels, out_channels, kernel_size=1) \
            if in_channels != out_channels else nn.Identity()

    def forward(self, x):
        identity = self.residual(x)
        out = self.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        return self.relu(out + identity)


# 3D U-Net模型定义
class Improved3DUNet(nn.Module):
    def __init__(self, in_channels=4, out_channels=4, base_filters=32):
        super().__init__()
        # 编码器部分
        self.enc1 = ResidualBlock(in_channels, base_filters)
        self.pool1 = nn.MaxPool3d(2)
        self.enc2 = ResidualBlock(base_filters, base_filters * 2)
        self.pool2 = nn.MaxPool3d(2)
        
        # 瓶颈部分
        self.bottleneck = ResidualBlock(base_filters * 2, base_filters * 4)
        
        # 解码器部分
        self.up2 = nn.ConvTranspose3d(base_filters * 4, base_filters * 2, kernel_size=2, stride=2)
        self.dec2 = ResidualBlock(base_filters * 4, base_filters * 2)
        self.up1 = nn.ConvTranspose3d(base_filters * 2, base_filters, kernel_size=2, stride=2)
        self.dec1 = ResidualBlock(base_filters * 2, base_filters)
        
        # 输出层
        self.out_conv = nn.Conv3d(base_filters, out_channels, kernel_size=1)

    def forward(self, x):
        # 编码器前向传播
        e1 = self.enc1(x)
        e2 = self.enc2(self.pool1(e1))
        
        # 瓶颈部分
        b = self.bottleneck(self.pool2(e2))
        
        # 解码器前向传播（包含跳跃连接）
        d2 = self.dec2(torch.cat([self.up2(b), e2], dim=1))
        d1 = self.dec1(torch.cat([self.up1(d2), e1], dim=1))
        
        return self.out_conv(d1)


# 损失函数定义
class DiceLoss(nn.Module):
    """Dice损失函数，适用于分割任务"""
    def __init__(self, smooth=1e-6):
        super().__init__()
        self.smooth = smooth

    def forward(self, y_pred, y_true):
        y_pred = F.softmax(y_pred, dim=1)
        y_true_one_hot = F.one_hot(y_true, num_classes=y_pred.shape[1]).permute(0, 4, 1, 2, 3).float()
        
        y_pred = y_pred.view(y_pred.size(0), y_pred.size(1), -1)
        y_true_one_hot = y_true_one_hot.view(y_true_one_hot.size(0), y_true_one_hot.size(1), -1)
        
        intersection = (y_pred * y_true_one_hot).sum(dim=2)
        dice = (2. * intersection + self.smooth) / (y_pred.sum(dim=2) + y_true_one_hot.sum(dim=2) + self.smooth)
        return 1 - dice.mean()


class CombinedLoss(nn.Module):
    """组合损失函数：Dice损失 + 交叉熵损失"""
    def __init__(self, weight_dice=0.7, weight_ce=0.3):
        super().__init__()
        self.weight_dice = weight_dice
        self.weight_ce = weight_ce
        self.ce_loss = nn.CrossEntropyLoss()
        self.dice_loss = DiceLoss()

    def forward(self, inputs, targets):
        ce = self.ce_loss(inputs, targets)
        dice = self.dice_loss(inputs, targets)
        return self.weight_ce * ce + self.weight_dice * dice


# 评估指标计算
def dice_coefficient(y_pred, y_true, smooth=1e-6):
    """计算Dice系数"""
    num_classes = y_pred.shape[1]
    y_pred_soft = F.softmax(y_pred.float(), dim=1)
    y_true_one_hot = F.one_hot(y_true, num_classes=num_classes).permute(0, 4, 1, 2, 3).float()

    y_pred_flat = y_pred_soft.view(y_pred_soft.size(0), num_classes, -1)
    y_true_flat = y_true_one_hot.view(y_true_one_hot.size(0), num_classes, -1)

    intersection = (y_pred_flat * y_true_flat).sum(dim=2)
    union = y_pred_flat.sum(dim=2) + y_true_flat.sum(dim=2)
    
    dice = (2. * intersection + smooth) / (union + smooth)
    return dice.mean().item()


def iou_score(y_pred, y_true, smooth=1e-6):
    """计算IoU分数"""
    num_classes = y_pred.shape[1]
    y_pred_labels = torch.argmax(y_pred, dim=1)
    
    iou_sum = 0.0
    count = 0
    
    for cls in range(1, num_classes):  # 忽略背景类
        pred_mask = (y_pred_labels == cls)
        true_mask = (y_true == cls)
        
        intersection = (pred_mask & true_mask).sum().float()
        union = (pred_mask | true_mask).sum().float()
        
        if union > 0:
            iou = (intersection + smooth) / (union + smooth)
            iou_sum += iou
            count += 1
    
    return iou_sum / count if count > 0 else 0.0


# 训练和验证函数
def train_one_epoch(model, dataloader, optimizer, criterion, device, scaler):
    """训练一个epoch"""
    model.train()
    total_loss = 0.0
    total_dice = 0.0
    total_iou = 0.0
    
    for x, y in tqdm(dataloader, desc="训练中"):
        x, y = x.to(device), y.to(device)
        optimizer.zero_grad()
        
        # 混合精度训练
        with autocast():
            outputs = model(x)
            loss = criterion(outputs, y)
        
        # 反向传播和参数更新
        scaler.scale(loss).backward()
        scaler.unscale_(optimizer)
        torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)  # 梯度裁剪
        scaler.step(optimizer)
        scaler.update()
        
        # 计算指标
        total_loss += loss.item()
        total_dice += dice_coefficient(outputs, y)
        total_iou += iou_score(outputs, y)
        
        # 清理GPU缓存
        torch.cuda.empty_cache()
    
    # 计算平均指标
    avg_loss = total_loss / len(dataloader)
    avg_dice = total_dice / len(dataloader)
    avg_iou = total_iou / len(dataloader)
    
    return avg_loss, avg_dice, avg_iou


def validate_one_epoch(model, dataloader, criterion, device):
    """验证一个epoch"""
    model.eval()
    total_loss = 0.0
    total_dice = 0.0
    total_iou = 0.0
    
    with torch.no_grad():  # 禁用梯度计算
        for x, y in tqdm(dataloader, desc="验证中"):
            x, y = x.to(device), y.to(device)
            
            with autocast():
                outputs = model(x)
                loss = criterion(outputs, y)
            
            # 计算指标
            total_loss += loss.item()
            total_dice += dice_coefficient(outputs, y)
            total_iou += iou_score(outputs, y)
            
            # 清理GPU缓存
            torch.cuda.empty_cache()
    
    # 计算平均指标
    avg_loss = total_loss / len(dataloader)
    avg_dice = total_dice / len(dataloader)
    avg_iou = total_iou / len(dataloader)
    
    return avg_loss, avg_dice, avg_iou


# 早停机制
class EarlyStopping:
    def __init__(self, patience=5, min_delta=0.001):
        self.patience = patience
        self.min_delta = min_delta
        self.counter = 0
        self.best_score = None
        self.early_stop = False

    def __call__(self, val_score):
        if self.best_score is None:
            self.best_score = val_score
        elif val_score < self.best_score - self.min_delta:
            self.counter += 1
            if self.counter >= self.patience:
                self.early_stop = True
        else:
            self.best_score = val_score
            self.counter = 0


# 读取患者列表
def read_patients(filename):
    with open(filename, "r") as f:
        return f.read().splitlines()


# 绘制训练曲线
def plot_training_curves(train_losses, val_losses, train_dices, val_dices, train_ious, val_ious, save_path):
    plt.figure(figsize=(15, 10))
    
    # 损失曲线
    plt.subplot(2, 2, 1)
    plt.plot(train_losses, label='训练损失')
    plt.plot(val_losses, label='验证损失')
    plt.title('损失曲线')
    plt.xlabel('Epoch')
    plt.ylabel('损失值')
    plt.legend()
    plt.grid(True)
    
    # Dice曲线
    plt.subplot(2, 2, 2)
    plt.plot(train_dices, label='训练Dice')
    plt.plot(val_dices, label='验证Dice')
    plt.title('Dice系数曲线')
    plt.xlabel('Epoch')
    plt.ylabel('Dice系数')
    plt.legend()
    plt.grid(True)
    
    # IoU曲线
    plt.subplot(2, 2, 3)
    plt.plot(train_ious, label='训练IoU')
    plt.plot(val_ious, label='验证IoU')
    plt.title('IoU曲线')
    plt.xlabel('Epoch')
    plt.ylabel('IoU值')
    plt.legend()
    plt.grid(True)
    
    plt.tight_layout()
    plt.savefig(save_path)
    plt.close()


# 主训练函数
def main():
    # 配置参数
    config = {
        "data_dir": "/kaggle/working/brats20_nii_gz/BraTS2020_TrainingData/MICCAI_BraTS2020_TrainingData",
        "train_list": "train.txt",
        "val_list": "val.txt",
        "slice_range": (60, 100),
        "batch_size": 1,
        "num_workers": 2,
        "base_filters": 16,
        "lr": 2e-4,
        "weight_decay": 1e-5,
        "num_epochs": 100,
        "patience": 10,
        "output_dir": "models",
        "log_freq": 1
    }
    
    # 创建输出目录
    os.makedirs(config["output_dir"], exist_ok=True)
    
    # 加载患者列表
    train_patients = read_patients(config["train_list"])
    val_patients = read_patients(config["val_list"])
    print(f"训练集大小: {len(train_patients)}, 验证集大小: {len(val_patients)}")
    
    # 创建数据集和数据加载器
    train_dataset = BraTSDataset(train_patients, config["data_dir"], config["slice_range"])
    val_dataset = BraTSDataset(val_patients, config["data_dir"], config["slice_range"])
    
    train_loader = DataLoader(
        train_dataset,
        batch_size=config["batch_size"],
        shuffle=True,
        num_workers=config["num_workers"],
        pin_memory=True
    )
    
    val_loader = DataLoader(
        val_dataset,
        batch_size=config["batch_size"],
        shuffle=False,
        num_workers=config["num_workers"],
        pin_memory=True
    )
    
    # 初始化模型、损失函数和优化器
    model = Improved3DUNet(in_channels=4, out_channels=4, base_filters=config["base_filters"]).to(device)
    criterion = CombinedLoss(weight_dice=0.7, weight_ce=0.3)
    optimizer = optim.AdamW(model.parameters(), lr=config["lr"], weight_decay=config["weight_decay"])
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=config["num_epochs"], eta_min=1e-6)
    scaler = GradScaler()  # 混合精度训练缩放器
    early_stopping = EarlyStopping(patience=config["patience"])
    
    # 记录训练过程中的指标
    train_losses, val_losses = [], []
    train_dices, val_dices = [], []
    train_ious, val_ious = [], []
    best_dice = 0.0
    
    # 开始训练
    print("开始训练...")
    for epoch in range(config["num_epochs"]):
        print(f"\nEpoch [{epoch+1}/{config['num_epochs']}]")
        
        # 训练
        train_loss, train_dice, train_iou = train_one_epoch(
            model, train_loader, optimizer, criterion, device, scaler
        )
        
        # 验证
        val_loss, val_dice, val_iou = validate_one_epoch(
            model, val_loader, criterion, device
        )
        
        # 更新学习率
        scheduler.step()
        
        # 记录指标
        train_losses.append(train_loss)
        val_losses.append(val_loss)
        train_dices.append(train_dice)
        val_dices.append(val_dice)
        train_ious.append(train_iou)
        val_ious.append(val_iou)
        
        # 打印指标
        print(f"训练: 损失={train_loss:.4f}, Dice={train_dice:.4f}, IoU={train_iou:.4f}")
        print(f"验证: 损失={val_loss:.4f}, Dice={val_dice:.4f}, IoU={val_iou:.4f}")
        print(f"当前学习率: {optimizer.param_groups[0]['lr']:.6f}")
        
        # 保存最佳模型
        if val_dice > best_dice:
            best_dice = val_dice
            model_path = os.path.join(config["output_dir"], f"best_model_dice_{best_dice:.4f}.pth")
            torch.save(model.state_dict(), model_path)
            print(f"已保存最佳模型到 {model_path}")
        
        # 早停检查
        early_stopping(val_dice)
        if early_stopping.early_stop:
            print(f"早停触发，在第 {epoch+1} 轮停止训练")
            break
        
        # 定期保存训练曲线
        if (epoch + 1) % config["log_freq"] == 0:
            plot_training_curves(
                train_losses, val_losses,
                train_dices, val_dices,
                train_ious, val_ious,
                os.path.join(config["output_dir"], "training_curves.png")
            )
    
    # 训练结束，保存最终模型和训练曲线
    final_model_path = os.path.join(config["output_dir"], "final_model.pth")
    torch.save(model.state_dict(), final_model_path)
    print(f"已保存最终模型到 {final_model_path}")
    
    plot_training_curves(
        train_losses, val_losses,
        train_dices, val_dices,
        train_ious, val_ious,
        os.path.join(config["output_dir"], "final_training_curves.png")
    )
    
    print(f"训练完成！最佳验证Dice系数: {best_dice:.4f}")


if __name__ == "__main__":
    main()
