# %%
# 导入必要的库
import os
import nibabel as nib  # 用于处理NIfTI格式的医学影像
from glob import glob  # 用于文件路径匹配
from tqdm import tqdm  # 用于显示进度条

# 包含.nii文件的目录（Kaggle路径）
directories = [
    "brats2020/BraTS2020_TrainingData/MICCAI_BraTS2020_TrainingData",
    "brats2020/BraTS2020_ValidationData/MICCAI_BraTS2020_ValidationData"
]
# 输出目录，用于保存转换后的.nii.gz文件
output_base = "brats2020"
# 创建输出目录，如果目录已存在则不报错
os.makedirs(output_base, exist_ok=True)

# 遍历每个数据目录
for base_dir in directories:
    # 获取所有.nii文件的路径并排序
    nii_files = sorted(glob(os.path.join(base_dir, "*", "*.nii")))
    print(f"🔄 正在转换: {base_dir}")
    # 遍历每个.nii文件并转换为.nii.gz格式（压缩格式）
    for nii_path in tqdm(nii_files):
        # 加载NIfTI文件
        img = nib.load(nii_path)
        # 构建保存路径，保持原始相对路径结构
        rel_path = os.path.relpath(nii_path, "brats2020")
        nii_gz_path = os.path.join(output_base, rel_path + ".gz")
        # 创建输出文件所在的目录
        os.makedirs(os.path.dirname(nii_gz_path), exist_ok=True)
        # 保存为压缩格式
        nib.save(img, nii_gz_path)
print("✅ 所有.nii文件已成功转换为.nii.gz格式")

# %%
# 导入必要的库
import os
from glob import glob

# 训练数据目录
train_dir = "brats2020/BraTS2020_TrainingData/MICCAI_BraTS2020_TrainingData"

# 遍历每个病例文件夹
for case_folder in sorted(os.listdir(train_dir)):
    case_path = os.path.join(train_dir, case_folder)
    # 只处理目录
    if not os.path.isdir(case_path):
        continue

    # 查找所有文件名中包含"Segm"的.nii.gz文件（分割文件）
    for file in os.listdir(case_path):
        if file.endswith(".nii.gz") and "Segm" in file:
            old_path = os.path.join(case_path, file)
            # 构建新的文件名，统一命名为"病例ID_seg.nii.gz"
            new_name = f"{case_folder}_seg.nii.gz"
            new_path = os.path.join(case_path, new_name)
            # 重命名文件
            os.rename(old_path, new_path)
            print(f"已重命名: {old_path} -> {new_path}")

# %%
# 导入必要的库
import os
import random
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchinfo import summary  # 用于显示模型结构
from torchsummary import summary
from torch.utils.data import DataLoader, Dataset  # 用于构建数据加载器
from torch.cuda.amp import GradScaler  # 用于混合精度训练
from sklearn.model_selection import train_test_split  # 用于数据集划分
from sklearn.metrics import classification_report, confusion_matrix, precision_score, recall_score, f1_score
import seaborn as sns
import matplotlib.pyplot as plt
import cv2
import nibabel as nib
import warnings
# 忽略警告信息
warnings.filterwarnings('ignore')

# %%
!pip install torchinfo
!pip install nibabel

# %%
# 定义训练集和验证集目录
train_dir = "brats2020/BraTS2020_TrainingData/MICCAI_BraTS2020_TrainingData"
val_dir = "brats2020/BraTS2020_ValidationData/MICCAI_BraTS2020_ValidationData"

# %%
# 数据目录
data_dir = "brats2020/BraTS2020_TrainingData/MICCAI_BraTS2020_TrainingData"

# 获取所有病例的列表
all_cases = sorted([d for d in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, d))])
# 随机选择一个病例进行展示
case_id = random.choice(all_cases)
print("随机选择的病例:", case_id)

# 病例路径
case_path = os.path.join(data_dir, case_id)
# BraTS数据集中的四种模态
modalities = ["flair", "t1", "t1ce", "t2"]
images = []

# 加载四种模态的影像
for mod in modalities:
    img_path = os.path.join(case_path, f"{case_id}_{mod}.nii.gz")
    # 加载影像数据并转换为numpy数组
    img = nib.load(img_path).get_fdata()
    images.append(img)

# 加载分割标签
seg_path = os.path.join(case_path, f"{case_id}_seg.nii.gz")
seg = nib.load(seg_path).get_fdata()

# 随机选择一个Z轴切片进行展示
max_slice = images[0].shape[2]
slice_id = random.randint(0, max_slice - 1)

# 可视化影像和分割结果
plt.figure(figsize=(15, 6))
# 第一行：灰度图显示四种模态
for i in range(4):
    plt.subplot(3, 5, i + 1)
    plt.imshow(images[i][:, :, slice_id], cmap="gray")
    plt.title(f"{modalities[i].upper()} (gray)")
    plt.axis("off")

# 第一行最后一个：灰度图显示分割结果
plt.subplot(3, 5, 5)
plt.imshow(seg[:, :, slice_id], cmap="gray")
plt.title("SEG (gray)")
plt.axis("off")

# 第二行：热图显示四种模态
for i in range(4):
    plt.subplot(3, 5, i + 6)
    plt.imshow(images[i][:, :, slice_id], cmap="plasma")
    plt.title(f"{modalities[i].upper()} (heatmap)")
    plt.axis("off")

# 第二行最后一个：热图显示分割结果
plt.subplot(3, 5, 10)
plt.imshow(seg[:, :, slice_id], cmap="plasma")
plt.title("SEG (heatmap)")
plt.axis("off")

plt.suptitle(f"Modalities + Markup | {case_id}, зріз {slice_id}", fontsize=14)
plt.tight_layout()
plt.show()

# %%
# 创建类似GIF的动画，用于在笔记本上查看3D影像
from matplotlib import animation
from IPython.display import HTML

# 创建画布和子图
fig, axs = plt.subplots(1, 5, figsize=(15, 3))
plt.tight_layout()

# 定义动画更新函数
def animate(z):
     # 显示四种模态
    for i in range(4):
        axs[i].clear()
        axs[i].imshow(images[i][:, :, z], cmap="gray")
        axs[i].set_title(f"{modalities[i].upper()}")
        axs[i].axis("off")
    axs[4].clear()
    axs[4].imshow(seg[:, :, z], cmap="nipy_spectral")
    axs[4].set_title("SEG")
    axs[4].axis("off")
    fig.suptitle(f"Case: {case_id} | Slice: {z}", fontsize=14)

# 创建动画
ani = animation.FuncAnimation(fig, animate, frames=max_slice, interval=100, repeat=True)
plt.close(fig)  # 避免显示静态图像

# 在IPython中显示动画
HTML(ani.to_jshtml())

# %%
# 设置计算设备（优先使用GPU）
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
    num_gpus = torch.cuda.device_count()
    torch.cuda.empty_cache()  # Clear GPU cache before starting
    print(f"Using {num_gpus} GPUs for training." if num_gpus > 1 else
          f"Using {torch.cuda.get_device_name(0)} for training.")
else:
    print("Using CPU for training.")

# %%
# === Settings for Kaggle ===
import os
import random

data_dir = "brats2020/BraTS2020_TrainingData/MICCAI_BraTS2020_TrainingData"
all_patients = sorted([p for p in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, p))])

# === Separation
random.seed(42)
random.shuffle(all_patients)

n_total = len(all_patients)
n_train = int(0.7 * n_total)
n_val   = int(0.15 * n_total)
n_test  = n_total - n_train - n_val

train_patients = all_patients[:n_train]
val_patients   = all_patients[n_train:n_train+n_val]
test_patients  = all_patients[n_train+n_val:]

# === Saving lists in the Kaggle working directory
with open("brats2020/train.txt", "w") as f:
    f.write("\n".join(train_patients))

with open("brats2020/val.txt", "w") as f:
    f.write("\n".join(val_patients))

with open("brats2020/test.txt", "w") as f:
    f.write("\n".join(test_patients))

print(f"✅ Recorded: {len(train_patients)} train, {len(val_patients)} val, {len(test_patients)} test.")

# %%
# === 2. Dataset ===
class BraTSDataset(Dataset):
    def __init__(self, patient_list, data_dir, slice_range=(60, 100)):
        self.data_dir = data_dir
        self.patients = patient_list
        self.modalities = ["flair", "t1", "t1ce", "t2"]
        self.slice_range = slice_range

    def load_nifti(self, path):
        return np.array(nib.load(path).get_fdata(), dtype=np.float32)

    def __getitem__(self, idx):
        pid = self.patients[idx]
        p_path = os.path.join(self.data_dir, pid)
        imgs = []
        for mod in self.modalities:
            img = self.load_nifti(os.path.join(p_path, f"{pid}_{mod}.nii.gz"))
            img = img[:, :, self.slice_range[0]:self.slice_range[1]]
            img = (img - np.mean(img)) / (np.std(img) + 1e-6)
            imgs.append(np.nan_to_num(img))
        imgs = torch.tensor(np.stack(imgs), dtype=torch.float32)
        seg = self.load_nifti(os.path.join(p_path, f"{pid}_seg.nii.gz"))
        seg = seg[:, :, self.slice_range[0]:self.slice_range[1]]
        seg[seg == 4] = 3
        mask = torch.tensor(seg.astype(np.uint8), dtype=torch.long)
        return imgs, mask

    def __len__(self): return len(self.patients)

def read_patients(filename):
    with open(filename, "r") as f:
        return f.read().splitlines()

# %%
train_loader = DataLoader(
    BraTSDataset(read_patients("brats2020/train.txt"), data_dir),
    batch_size=1, shuffle=True
)

val_loader = DataLoader(
    BraTSDataset(read_patients("brats2020/val.txt"), data_dir),
    batch_size=1, shuffle=False
)

test_loader = DataLoader(
    BraTSDataset(read_patients("brats2020/test.txt"), data_dir),
    batch_size=1, shuffle=False
)

# %%
# === Patient list reading function ===
def read_patients(filename):
    with open(filename, "r") as f:
        return f.read().splitlines()

# ===Paths ===
data_dir = "brats2020/BraTS2020_TrainingData/MICCAI_BraTS2020_TrainingData"

# === Reading patients from files===
train_patients = read_patients("brats2020/train.txt")
val_patients   = read_patients("brats2020/val.txt")
test_patients  = read_patients("brats2020/test.txt")

# === Creating data sets with reduced size (memory optimization) ===
train_dataset = BraTSDataset(train_patients, data_dir, slice_range=(60, 100))
val_dataset   = BraTSDataset(val_patients, data_dir, slice_range=(60, 100))
test_dataset  = BraTSDataset(test_patients, data_dir,slice_range=(60, 100))

# === DataLoader ===
train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=2)
val_loader   = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=2)
test_loader  = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=2)

# === Checking the shape of one sample ===
sample_img, sample_mask = next(iter(train_loader))
print(f"Train image shape: {sample_img.shape}")
print(f"Train segmentation shape: {sample_mask.shape}")

sample_img, sample_mask = next(iter(val_loader))
print(f"Validation image shape: {sample_img.shape}")
print(f"Validation segmentation shape: {sample_mask.shape}")

sample_img, sample_mask = next(iter(test_loader))
print(f"Test image shape: {sample_img.shape}")

# %%
# === 4.3D U-Net model ===
class ResidualBlock(nn.Module):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.conv1 = nn.Conv3d(in_channels, out_channels, kernel_size=3, padding=1)
        self.bn1 = nn.BatchNorm3d(out_channels)
        self.relu = nn.ReLU()
        self.conv2 = nn.Conv3d(out_channels, out_channels, kernel_size=3, padding=1)
        self.bn2 = nn.BatchNorm3d(out_channels)

        self.residual = nn.Conv3d(in_channels, out_channels, kernel_size=1) \
            if in_channels != out_channels else nn.Identity()

    def forward(self, x):
        identity = self.residual(x)
        out = self.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        return self.relu(out + identity)
class Improved3DUNet(nn.Module):
    def __init__(self, in_channels=4, out_channels=4, base_filters=32):
        super().__init__()
        self.enc1 = ResidualBlock(in_channels, base_filters)
        self.pool1 = nn.MaxPool3d(2)
        self.enc2 = ResidualBlock(base_filters, base_filters * 2)
        self.pool2 = nn.MaxPool3d(2)

        self.bottleneck = ResidualBlock(base_filters * 2, base_filters * 4)

        self.up2 = nn.ConvTranspose3d(base_filters * 4, base_filters * 2, kernel_size=2, stride=2)
        self.dec2 = ResidualBlock(base_filters * 4, base_filters * 2)
        self.up1 = nn.ConvTranspose3d(base_filters * 2, base_filters, kernel_size=2, stride=2)
        self.dec1 = ResidualBlock(base_filters * 2, base_filters)

        self.out_conv = nn.Conv3d(base_filters, out_channels, kernel_size=1)

    def forward(self, x):
        e1 = self.enc1(x)
        e2 = self.enc2(self.pool1(e1))
        b = self.bottleneck(self.pool2(e2))
        d2 = self.dec2(torch.cat([self.up2(b), e2], dim=1))
        d1 = self.dec1(torch.cat([self.up1(d2), e1], dim=1))
        return self.out_conv(d1)

# === Model initialization===
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = Improved3DUNet().to(device)

# === Output of the model (model.summary) ===
summary(model, input_size=(4, 128, 128, 64))

# === Save the model ===
torch.save(model.state_dict(), "improved3dunet.pth")
print("Model saved as 'improved3dunet.pth'")

# %%
import torch
import torch.nn as nn
from torchinfo import summary

# === 1. 3D U-Net model with residual blocks ===
class ResidualBlock(nn.Module):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.conv1 = nn.Conv3d(in_channels, out_channels, kernel_size=3, padding=1)
        self.bn1 = nn.BatchNorm3d(out_channels)
        self.relu = nn.ReLU()
        self.conv2 = nn.Conv3d(out_channels, out_channels, kernel_size=3, padding=1)
        self.bn2 = nn.BatchNorm3d(out_channels)

        self.residual = nn.Conv3d(in_channels, out_channels, kernel_size=1) \
            if in_channels != out_channels else nn.Identity()

    def forward(self, x):
        identity = self.residual(x)
        out = self.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        return self.relu(out + identity)

class Improved3DUNet(nn.Module):
    def __init__(self, in_channels=4, out_channels=4, base_filters=32):
        super().__init__()
        self.enc1 = ResidualBlock(in_channels, base_filters)
        self.pool1 = nn.MaxPool3d(2)
        self.enc2 = ResidualBlock(base_filters, base_filters * 2)
        self.pool2 = nn.MaxPool3d(2)

        self.bottleneck = ResidualBlock(base_filters * 2, base_filters * 4)

        self.up2 = nn.ConvTranspose3d(base_filters * 4, base_filters * 2, kernel_size=2, stride=2)
        self.dec2 = ResidualBlock(base_filters * 4, base_filters * 2)
        self.up1 = nn.ConvTranspose3d(base_filters * 2, base_filters, kernel_size=2, stride=2)
        self.dec1 = ResidualBlock(base_filters * 2, base_filters)

        self.out_conv = nn.Conv3d(base_filters, out_channels, kernel_size=1)

    def forward(self, x):
        e1 = self.enc1(x)
        e2 = self.enc2(self.pool1(e1))
        b = self.bottleneck(self.pool2(e2))
        d2 = self.dec2(torch.cat([self.up2(b), e2], dim=1))
        d1 = self.dec1(torch.cat([self.up1(d2), e1], dim=1))
        return self.out_conv(d1)

# === 2. Loading the model ===
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = Improved3DUNet().to(device)

# === 3. Loading saved weights ===
model.load_state_dict(torch.load("improved3dunet.pth", map_location=device))
model.eval()

# === 4. Model architecture output ===
summary(model, input_size=(1, 4, 128, 128, 64))  # [B, C, H, W, D]

# %%
import torch

# ===Creating a model and loading weights ===
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = Improved3DUNet().to(device)
model.load_state_dict(torch.load("improved3dunet.pth", map_location=device))
model.eval()

# === Creating a dummy input ===
dummy_input = torch.randn(1, 4, 128, 128, 64).to(device)  # [B, C, H, W, D]

# === Exporting to ONNX ===
torch.onnx.export(
    model,
    dummy_input,
    "improved3dunet.onnx",
    export_params=True,
    opset_version=11,
    do_constant_folding=True,
    input_names=['input'],
    output_names=['output'],
    dynamic_axes={
        'input': {0: 'batch_size'},
        'output': {0: 'batch_size'}
    }
)

print("✅ Model saved as 'improved3dunet.onnx'")

# %%
class DiceLoss(nn.Module):
    def __init__(self, smooth=1e-6):
        super(DiceLoss, self).__init__()
        self.smooth = smooth

    def forward(self, y_pred, y_true):
        # y_pred: model output logits [batch_size, 4, H, W, D], Float
        # y_true: mask [batch_size, H, W, D], Long

        #num_classes = y_pred.shape[1]
        y_pred = F.softmax(y_pred, dim=1)
        y_true_one_hot = F.one_hot(y_true, num_classes=y_pred.shape[1]).permute(0, 4, 1, 2, 3).float()
        y_pred = y_pred.view(y_pred.size(0), y_pred.size(1), -1)
        y_true_one_hot = y_true_one_hot.view(y_true_one_hot.size(0), y_true_one_hot.size(1), -1)
        intersection = (y_pred * y_true_one_hot).sum(dim=2)
        dice = (2. * intersection + self.smooth) / (y_pred.sum(dim=2) + y_true_one_hot.sum(dim=2) + self.smooth)
        return 1 - dice.mean()

class CombinedLoss(nn.Module):
    def __init__(self, weight_dice=0.7, weight_ce=0.3):
        """
        weight_dice: weight Dice loss
        weight_ce: CrossEntropy loss weight
        class_weights: optional — Tensor of size [num_classes] with class weights for CE
        """
        super().__init__()
        self.weight_dice = weight_dice
        self.weight_ce = weight_ce
        self.ce_loss = nn.CrossEntropyLoss()
        self.dice_loss = DiceLoss()

    def forward(self, inputs, targets):
        ce = self.ce_loss(inputs, targets)
        dice = self.dice_loss(inputs, targets)
        return self.weight_ce * ce + self.weight_dice * dice

# %%
# Initialize model with improved architecture
model = Improved3DUNet(in_channels=4, out_channels=4, base_filters=16).to(device)
model = model.to(device)

# Use mixed precision for memory efficiency
use_amp = True
scaler = GradScaler(enabled=use_amp)

# Use combined loss for better segmentation results
criterion = CombinedLoss(weight_dice=0.7, weight_ce=0.3)

# Cosine annealing learning rate for better convergence
optimizer = optim.AdamW(model.parameters(), lr=2e-4, weight_decay=1e-5)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=30, eta_min=1e-6)

# Updated Dice coefficient for evaluation
def dice_coefficient(y_pred, y_true, smooth=1e-6):
    # Calculates the average Dice across all classes and batch
    # y_pred - logit [B, C, H, W, D]
    # y_true -  mask [B, H, W, D]
    num_classes = y_pred.shape[1]
    y_pred_soft = F.softmax(y_pred.float(), dim=1)  # Convert logits to probabilities
    y_true_one_hot = F.one_hot(y_true, num_classes=num_classes).permute(0, 4, 1, 2, 3).float()

    y_pred_flat = y_pred_soft.view(y_pred_soft.size(0), num_classes, -1)
    y_true_flat = y_true_one_hot.view(y_true_one_hot.size(0), num_classes, -1)

    intersection = (y_pred_flat * y_true_flat).sum(dim=2)
    union = y_pred_flat.sum(dim=2) + y_true_flat.sum(dim=2)

    dice = (2. * intersection + smooth) / (union + smooth)
    return dice.mean().item()

# %%
def iou_and_accuracy(y_pred, y_true, num_classes=4, ignore_background=True, smooth=1e-6):
  # Calculates average IoU and Accuracy values ​​for each class (voxel-wise)
  # y_pred: logit from the model [B, C, H, W, D]
  # y_true: real markup [B, H, W, D]
  # We obtain predicted classes from logits
  y_pred_labels = torch.argmax(y_pred, dim=1) # [B, H, W, D]
  mean_ious, mean_accs = [], []
  class_range = range(1, num_classes) if ignore_background else range(num_classes)

  # Iterate through classes, starting with 1, if ignore_clas_0 = True
  for cls in class_range:
    # We create masks for predicted and actual values for the current class
    pred_mask = (y_pred_labels == cls)
    true_mask = (y_true == cls)
    # Calculating Intersection and Unoin
    intersection = (pred_mask & true_mask).sum().float()
    union = (pred_mask | true_mask).sum().float()
    total_voxels = torch.numel(true_mask)
    correct_voxels = (pred_mask == true_mask).sum().float()

    iou = (intersection + smooth) / (union + smooth)
    acc = correct_voxels / total_voxels
    # Add IoU and Accuracy to the lists if Union > 0
    if union > 0:
      mean_ious.append(iou)
      mean_accs.append(acc)
  # We calculate the average values ​​of IoU and Accuracy, if there are at least some values in the lists
  avg_iou = torch.mean(torch.stack(mean_ious)) if mean_ious else torch.tensor(0.0)
  avg_acc = torch.mean(torch.stack(mean_accs)) if mean_accs else torch.tensor(0.0)
  return avg_iou.item(), avg_acc.item()

# %%
# === 1. Learning for an era ===
def train_one_epoch(model, dataloader, optimizer, criterion, device, scaler=None, use_amp=True):
    model.train()
    total_loss, total_dice, total_iou, total_acc = 0.0, 0.0, 0.0, 0.0

    for i, (x, y) in enumerate(dataloader):
        x, y = x.to(device), y.to(device)
        optimizer.zero_grad()

        with torch.cuda.amp.autocast(enabled=use_amp):
          out = model(x)
          loss = criterion(out, y)

        if scaler:
          scaler.scale(loss).backward()
          scaler.unscale_(optimizer)
          torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
          scaler.step(optimizer)
          scaler.update()
        else:
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
            optimizer.step()

        # Metrics
        total_loss += loss.item()
        total_dice += dice_coefficient(out, y)
        iou, acc = iou_and_accuracy(out, y)
        total_iou += iou
        total_acc += acc

        torch.cuda.empty_cache()

    n = len(dataloader)
    return total_loss / n, total_dice / n, total_iou / n, total_acc / n

# === 2. Validation for one epoch===
def validate_one_epoch(model, dataloader, criterion, device, use_amp=True):
    model.eval()
    total_loss, total_dice, total_iou, total_acc = 0.0, 0.0, 0.0, 0.0

    with torch.no_grad():
        for x, y in dataloader:
            x, y = x.to(device), y.to(device)
            with torch.cuda.amp.autocast(enabled=use_amp):
                out = model(x)
                loss = criterion(out, y)

            total_loss += loss.item()
            total_dice += dice_coefficient(out, y)
            iou, acc = iou_and_accuracy(out, y)
            total_iou += iou
            total_acc += acc

            torch.cuda.empty_cache()

    n = len(dataloader)
    return total_loss / n, total_dice / n, total_iou / n, total_acc / n


# === 3. Early stop class ===
class EarlyStopping:
    def __init__(self, patience=5, min_delta=0.001):
        self.patience = patience
        self.min_delta = min_delta
        self.counter = 0
        self.best_loss = float('inf')
        self.early_stop = False

    def step(self, val_loss):
        if val_loss < self.best_loss - self.min_delta:
            self.best_loss = val_loss
            self.counter = 0
        else:
            self.counter += 1
            if self.counter >= self.patience:
                self.early_stop = True

# %%
# === Training parameters ===
num_epochs = 50
best_val_dice = 0.0
early_stopping = EarlyStopping(patience=10)

# === Logs for metrics ===
train_losses, val_losses = [], []
train_dice_scores, val_dice_scores = [], []
train_accuracies, val_accuracies = [], []
train_ious, val_ious = [], []
learning_rates = []


try:
    for epoch in range(num_epochs):
        print(f"\nEpoch [{epoch+1}/{num_epochs}]")

        # === Training ===
        train_loss, train_dice, train_iou, train_acc = train_one_epoch(model, train_loader, optimizer, criterion, device, scaler)

        # === Validation ===
        val_loss, val_dice, val_iou, val_acc = validate_one_epoch(model, val_loader, criterion, device)

        # === Collection of metrics ===
        train_losses.append(train_loss)
        train_dice_scores.append(train_dice)
        train_accuracies.append(train_acc)
        train_ious.append(train_iou)

        val_losses.append(val_loss)
        val_dice_scores.append(val_dice)
        val_accuracies.append(val_acc)
        val_ious.append(val_iou)

        # === Learning speed ===
        current_lr = optimizer.param_groups[0]['lr']
        learning_rates.append(current_lr)
        scheduler.step()

        # === Output metrics ===
        print(f"Train | Loss: {train_loss:.4f} | Dice: {train_dice:.4f} | Iou: {train_iou:.4f} | Acc: {train_acc:.4f}")
        print(f"Val | Loss: {val_loss:.4f} | Dice: {val_dice:.4f} | Iou: {val_iou:.4f} | Acc: {val_acc:.4f} | LR: {current_lr:.6f}")

        # === Saving the best model ===
        if val_dice > best_val_dice:
          best_val_dice = val_dice
          torch.save(model.state_dict(), 'best_brats_model_dice.pth')
          print(f"\n The best model has been saved. (Dice: {best_val_dice:.4f})")

        # === Early stop check ===
        early_stopping.step(val_loss)
        if early_stopping.early_stop:
          print("\n Early stop - training completed.")
          break
        torch.cuda.empty_cache()


except Exception as e:
    print(f"\n Error during training: {e}")
    torch.save(model.state_dict(), 'interrupted_model.pth')
    print("The model is saved as 'interrupted_model.pth'")

# === Construction of graphs ===
epochs = range(1, len(train_losses) + 1)
plt.figure(figsize=(16, 5))

plt.subplot(1, 3, 1)
plt.plot(epochs, train_losses, label='Train Loss')
plt.plot(epochs, val_losses, label='Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Loss per Epoch')
plt.legend()
plt.grid(True)

plt.subplot(1, 3, 2)
plt.plot(epochs, train_dice_scores, label='Train Dice')
plt.plot(epochs, val_dice_scores, label='Validation Dice')
plt.xlabel('Epoch')
plt.ylabel('Dice')
plt.title('Dice per Epoch')
plt.legend()
plt.grid(True)

plt.subplot(1, 3, 3)
plt.plot(epochs, learning_rates)
plt.xlabel('Epoch')
plt.ylabel('LR')
plt.title('Learning Rate')
plt.grid(True)

plt.tight_layout()
plt.savefig('training_metrics_summary.png')
plt.show()

print(f"\n Training complete! Best Dice on validation: {best_val_dice:.4f}")

# %%
# Plot metrics only if training completed
if len(train_losses) > 0:
  try:
    epochs = list(range(1, len(train_losses) +1))
    plt.figure(figsize=(16, 10))  # Increased the height of the figure

    # Loss
    plt.subplot(2, 2, 1)
    plt.plot(epochs, train_losses, label='Train Loss')
    plt.plot(epochs, val_losses, label='Validation Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Loss per Epoch')
    plt.legend()
    plt.grid(True)

    # Dice
    plt.subplot(2, 2, 2)
    plt.plot(epochs, train_dice_scores, label='Train Dice')
    plt.plot(epochs, val_dice_scores, label='Validation Dice')
    plt.xlabel('Epoch')
    plt.ylabel('Dice Score')
    plt.title('Dice per Epoch')
    plt.legend()
    plt.grid(True)

    # Accuracy
    plt.subplot(2, 2, 3)
    plt.plot(epochs, train_accuracies, label='Train Accuracy')
    plt.plot(epochs, val_accuracies, label='Validation Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.title('Accuracy per Epoch')
    plt.legend()
    plt.grid(True)

    # IoU
    plt.subplot(2, 2, 4)
    plt.plot(epochs, train_ious, label='Train IoU')
    plt.plot(epochs, val_ious, label='Validation IoU')
    plt.xlabel('Epoch')
    plt.ylabel('IoU')
    plt.title('IoU per Epoch')
    plt.legend()
    plt.grid(True)

    plt.tight_layout()
    plt.savefig('training_metrics.png')
    print("Metrics graph saved  'training_metrics.png'")
  except Exception as e:
    print(f"Error while creating graphs: {e}")

# %%
def plot_training_results(train_losses, val_losses,
                          train_dice_scores, val_dice_scores,
                          train_accuracies, val_accuracies,
                          train_ious, val_ious,
                          save_path='training_metrics.png'):
    try:
        epochs = range(1, len(train_losses) + 1)
        plt.figure(figsize=(16, 10))

        # Loss
        plt.subplot(3, 2, 1)
        plt.plot(epochs, train_losses, label='Train Loss')
        plt.plot(epochs, val_losses, label='Validation Loss')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.title('Loss per Epoch')
        plt.legend()
        plt.grid(True)

        # Dice
        plt.subplot(3, 2, 2)
        plt.plot(epochs, train_dice_scores, label='Train Dice')
        plt.plot(epochs, val_dice_scores, label='Validation Dice')
        plt.xlabel('Epoch')
        plt.ylabel('Dice Score')
        plt.title('Dice per Epoch')
        plt.legend()
        plt.grid(True)

        # Accuracy
        plt.subplot(3, 2, 3)
        plt.plot(epochs, train_accuracies, label='Train Accuracy')
        plt.plot(epochs, val_accuracies, label='Validation Accuracy')
        plt.xlabel('Epoch')
        plt.ylabel('Accuracy')
        plt.title('Accuracy per Epoch')
        plt.legend()
        plt.grid(True)

        # IoU
        plt.subplot(3, 2, 4)
        plt.plot(epochs, train_ious, label='Train IoU')
        plt.plot(epochs, val_ious, label='Validation IoU')
        plt.xlabel('Epoch')
        plt.ylabel('IoU')
        plt.title('IoU per Epoch')
        plt.legend()
        plt.grid(True)

        # Empty subplots for symmetry
        plt.subplot(3, 2, 5)
        plt.axis('off')
        plt.subplot(3, 2, 6)
        plt.axis('off')

        plt.tight_layout()
        plt.savefig(save_path)
        print(f"Graphics saved  '{save_path}'")
        plt.show()
    except Exception as e:
        print(f"Error while creating graphs: {e}")

# %%
# === Loading the best model ===
model = Improved3DUNet(in_channels=4, out_channels=4, base_filters=16).to(device)  # best_model.pth if you stored state_dict
model.load_state_dict(torch.load("best_brats_model_dice.pth"))
model.eval()

all_preds = []
all_targets = []

# === Test data bypass ===
with torch.no_grad():
    for x, y in test_loader:
        x, y = x.to(device), y.to(device)

        y_pred = model(x)
        preds = torch.argmax(y_pred, dim=1)

        all_preds.extend(preds.cpu().numpy().flatten())
        all_targets.extend(y.cpu().numpy().flatten())

# === Voxel Metrics ===
class_names = ["Background", "Necrosis", "Swelling", "Active tumor"]

# === CLassification Report (by voxels, for Precision/Recall/F1) ===
print("=== Classification Report ===")
print(classification_report(all_targets, all_preds, target_names=class_names, digits=4))

# === Confusion Matrix ===
print("=== Confusion Matrix (by voxels)===")
cm = confusion_matrix(all_targets, all_preds, labels=[0, 1, 2, 3])
print(cm)

# === Dice and IoU feature for each class ===
def dice_iou_per_class(y_true, y_pred, class_id):
    y_true_bin = (np.array(y_true) == class_id).astype(np.uint8)
    y_pred_bin = (np.array(y_pred) == class_id).astype(np.uint8)

    intersection = np.sum(y_true_bin * y_pred_bin)
    union = np.sum(y_true_bin) + np.sum(y_pred_bin)

    iou_denom = np.sum(y_true_bin + y_pred_bin > 0)
    dice = (2. * intersection + 1e-6) / (union + 1e-6)
    iou = (intersection + 1e-6) / (iou_denom + 1e-6)
    return dice, iou


print("\n=== Segmentation metrics for each tissue type ===")
print(f"{'Tumor Region':<20} {'Dice':<10} {'IoU':<10} {'Precision':<10} {'Recall':<10} {'F1-score':<10}")
for i, name in enumerate(class_names):
    dice, iou = dice_iou_per_class(all_targets, all_preds, i)

    y_true_bin = (np.array(all_targets) == i).astype(np.uint8)
    y_pred_bin = (np.array(all_preds) == i).astype(np.uint8)
    precision = precision_score(y_true_bin, y_pred_bin, zero_division=0)
    recall = recall_score(y_true_bin, y_pred_bin, zero_division=0)
    f1 = f1_score(y_true_bin, y_pred_bin, zero_division=0)
    print(f"{name:<20} {dice:<10.4f} {iou:<10.4f} {precision:10.4f} {recall:<10.4} {f1:<10.4f}")

# %%
plot_training_results(train_losses, val_losses, train_dice_scores, val_dice_scores, train_accuracies, val_accuracies, train_ious, val_ious)

# %%
# === Loading the best model ===
model = Improved3DUNet(in_channels=4, out_channels=4, base_filters=16).to(device)
model.load_state_dict(torch.load("best_brats_model_dice.pth"))
model.eval()

all_preds = []
all_targets = []

# === Test data bypass ===
with torch.no_grad():
    for x, y in test_loader:
        x, y = x.to(device), y.to(device)
        y_pred = model(x)
        preds = torch.argmax(y_pred, dim=1)
        all_preds.extend(preds.cpu().numpy().flatten())
        all_targets.extend(y.cpu().numpy().flatten())

# ===Dice and IoU feature ===
def dice_iou_per_class(y_true, y_pred, class_id):
    y_true_bin = (np.array(y_true) == class_id).astype(np.uint8)
    y_pred_bin = (np.array(y_pred) == class_id).astype(np.uint8)
    intersection = np.sum(y_true_bin * y_pred_bin)
    union = np.sum(y_true_bin) + np.sum(y_pred_bin)
    iou_denom = np.sum(y_true_bin + y_pred_bin > 0)
    dice = (2. * intersection + 1e-6) / (union + 1e-6)
    iou = (intersection + 1e-6) / (iou_denom + 1e-6)
    return dice, iou

# === Calculating metrics for each class ===
class_names = ["Background", "Necrosis", "Swelling", "Active tumor"]
metrics_data = []

for i, name in enumerate(class_names):
    dice, iou = dice_iou_per_class(all_targets, all_preds, i)
    y_true_bin = (np.array(all_targets) == i).astype(np.uint8)
    y_pred_bin = (np.array(all_preds) == i).astype(np.uint8)
    precision = precision_score(y_true_bin, y_pred_bin, zero_division=0)
    recall = recall_score(y_true_bin, y_pred_bin, zero_division=0)
    f1 = f1_score(y_true_bin, y_pred_bin, zero_division=0)
    metrics_data.append({
        "Tumor Region": name,
        "Dice": dice,
        "IoU": iou,
        "Precision": precision,
        "Recall": recall,
        "F1-score": f1
    })

# === Saving in CSV ===
df_metrics = pd.DataFrame(metrics_data)
df_metrics.to_csv("segmentation_metrics.csv", index=False)
print("The metrics table is saved in 'segmentation_metrics.csv'")

# === Construction of a schedule ===
plt.figure(figsize=(10, 6))
bar_width = 0.12
x = np.arange(len(class_names))

for i, metric in enumerate(["Dice", "IoU", "Precision", "Recall", "F1-score"]):
    plt.bar(x + i * bar_width,
            [row[metric] for row in metrics_data],
            width=bar_width, label=metric)

plt.xticks(x + bar_width * 2, class_names)
plt.ylabel("Value")
plt.title("Segmentation metrics by tissue type (test sample)")
plt.ylim(0, 1.05)
plt.legend()
plt.grid(True, linestyle='--', alpha=0.5)
plt.tight_layout()
plt.savefig("segmentation_metrics_plot.png")
print("Metrics graph saved in 'segmentation_metrics_plot.png'")

# %%
# === Calculating metrics on the test sample ===
model.eval()
test_losses = []
test_dices = []
test_accuracies = []
test_ious = []

with torch.no_grad():
    for x, y in test_loader:
        x, y = x.to(device), y.to(device)
        y_pred = model(x)

        loss = criterion(y_pred, y)
        test_losses.append(loss.item())

        dice = dice_coefficient(y_pred, y)
        iou, acc = iou_and_accuracy(y_pred, y)

        test_dices.append(dice)
        test_accuracies.append(acc)
        test_ious.append(iou)

# === Graphing the test ===
epochs = list(range(1, len(test_losses) + 1))
plt.figure(figsize=(16, 10))

plt.subplot(2, 2, 1)
plt.plot(epochs, test_losses, label='Test Loss', color='red')
plt.xlabel("Batch")
plt.ylabel("Loss")
plt.title("Test Loss")
plt.grid(True)

plt.subplot(2, 2, 2)
plt.plot(epochs, test_dices, label='Test Dice', color='green')
plt.xlabel("Batch")
plt.ylabel("Dice Score")
plt.title("Test Dice")
plt.grid(True)

plt.subplot(2, 2, 3)
plt.plot(epochs, test_accuracies, label='Test Accuracy', color='blue')
plt.xlabel("Batch")
plt.ylabel("Accuracy")
plt.title("Test Accuracy")
plt.grid(True)

plt.subplot(2, 2, 4)
plt.plot(epochs, test_ious, label='Test IoU', color='purple')
plt.xlabel("Batch")
plt.ylabel("IoU")
plt.title("Test IoU")
plt.grid(True)

plt.tight_layout()
plt.savefig("test_metrics_plot.png")
plt.show()

# %%
SEG_COLORS = {
    0: (0, 0, 0),        # The background is black
    1: (255, 0, 0),      # Necrosis – red
    2: (0, 255, 0),      # Edema – green
    3: (255, 255, 0)     # Active tumor – yellow
}


def label_to_color(seg_slice):
    h, w = seg_slice.shape
    color_img = np.zeros((h, w, 3), dtype=np.uint8)
    for label, color in SEG_COLORS.items():
        color_img[seg_slice == label] = color
    return color_img

def show_random_visuals(patients, data_dir, title="Train", num_cases=3, slice_range=(60, 100)):
    modalities = ["flair", "t1", "t1ce", "t2"]
    selected = random.sample(patients, min(num_cases, len(patients)))

    plt.figure(figsize=(18, 6 * len(selected)))

    for idx, case_id in enumerate(selected):
        case_path = os.path.join(data_dir, case_id)
        images = []

        for mod in modalities:
            img_path = os.path.join(case_path, f"{case_id}_{mod}.nii.gz")
            img = nib.load(img_path).get_fdata()
            images.append(img)

        seg_path = os.path.join(case_path, f"{case_id}_seg.nii.gz")
        seg = nib.load(seg_path).get_fdata().astype(np.uint8)

        slice_idx = random.randint(*slice_range)

        for i, mod_img in enumerate(images):
            plt.subplot(len(selected), 5, idx * 5 + i + 1)
            plt.imshow(mod_img[:, :, slice_idx], cmap='gray')
            plt.title(f"{case_id} - {modalities[i].upper()}")
            plt.axis("off")

        plt.subplot(len(selected), 5, idx * 5 + 5)
        color_mask = label_to_color(seg[:, :, slice_idx])
        plt.imshow(color_mask)
        plt.title(f"{case_id} - SEG (color)")
        plt.axis("off")

    plt.suptitle(f"Random examples | {title}", fontsize=16)
    plt.tight_layout()
    plt.show()

train_patients = read_patients("brats2020/train.txt")
val_patients   = read_patients("brats2020/val.txt")
test_patients  = read_patients("brats2020/test.txt")
data_dir = "brats2020//BraTS2020_TrainingData/MICCAI_BraTS2020_TrainingData"
show_random_visuals(train_patients, data_dir, title="Train")
show_random_visuals(val_patients, data_dir, title="Validation")
show_random_visuals(test_patients, data_dir, title="Test")

# %%
# Set sizes
n_train = len(read_patients("brats2020/train.txt"))
n_val = len(read_patients("brats2020/val.txt"))
n_test = len(read_patients("brats2020/test.txt"))
total = n_train + n_val + n_test

# Data
counts = [n_train, n_val, n_test]
labels = ['Train', 'Validation', 'Test']
percents = [count / total * 100 for count in counts]

# Building a histogram
plt.figure(figsize=(8, 6))
bars = plt.bar(labels, counts, color=['skyblue', 'orange', 'green'])

# Adding captions with percentages
for i, bar in enumerate(bars):
    plt.text(bar.get_x() + bar.get_width()/2, bar.get_height() + 1,
             f"{counts[i]} ({percents[i]:.1f}%)", ha='center', va='bottom', fontsize=11)

plt.title("Distribution of patients between sets", fontsize=14)
plt.ylabel("Number of patients")
plt.grid(axis='y', linestyle='--', alpha=0.5)

# Preservation
plt.tight_layout()
plt.savefig("brats2020/dataset_split_histogram_percent.png")
plt.show()


