
from nnunet_dataloader.ds import get_loader
import numpy as np
import torch 
import torch.nn as nn 
from monai.networks.nets.basic_unet import BasicUNet
from utils.utils import SoftmaxLoss
from light_training.evaluation.metric import dice
from light_training.utils.lr_scheduler import LinearWarmupCosineAnnealingLR
from medpy import metric
import glob 
import random 
torch.backends.cudnn.deterministic = True
random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
np.random.seed(1)
import os
from tqdm import tqdm

os.environ["CUDA_VISIBLE_DEVICES"] = "1"

device = "cuda:0"
save_dir = "./logs_nnunet/"

os.makedirs(save_dir, exist_ok=True)

images_paths = sorted(glob.glob("/home/xingzhaohu/sharefs/datasets/huanhu/IBSR_3/ii_IBSR_*_ana_strip.nii.gz"))
seg_paths = sorted(glob.glob("/home/xingzhaohu/sharefs/datasets/huanhu/IBSR_3/IBSR_*_segTRI_ana.nii.gz"))
print(images_paths)
print(seg_paths)


if __name__ == "__main__":

    from sklearn.model_selection import KFold  ## K折交叉验证

    X = np.arange(18)
    kfold = KFold(n_splits=6, shuffle=False)  ## kfold为KFolf类的一个对象
    fold = 0
    for a, b in kfold.split(X):  ## .split(X)方法返回迭代器，迭代器每次产生两个元素，1、训练数据集的索引；
        fold += 1
       
        print('Train_index: ', a, 'Validation_index:', b)
        # 构建训练集和验证集
        img_paths_train = []
        img_paths_val = []
        seg_paths_train = []
        seg_paths_val = []
        for i in a:
            img_paths_train.append(images_paths[i])
            seg_paths_train.append(seg_paths[i])
        for j in b:
            img_paths_val.append(images_paths[j])
            seg_paths_val.append(seg_paths[j])

        train_paths = {"image": img_paths_train, "label": seg_paths_train}
        val_paths = {"image": img_paths_val, "label": seg_paths_val}

        save_name = f"nnunet_model_e300_{fold}.pt"
        save_best_name = f"best_nnunet_model_e300_{fold}.pt"
        save_path = os.path.join(save_dir, save_name)
        save_best_path = os.path.join(save_dir, save_best_name)

        train_loader, val_loader = get_loader(train_paths, val_paths)
        # loss_func = nn.CrossEntropyLoss()
        loss_func = SoftmaxLoss()
        model = BasicUNet(spatial_dims=3, 
                        in_channels=1, 
                        out_channels=4,
                        features=[16, 16, 32, 64, 128, 16])

        model.to(device)
        optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-3)
        
        report_loss = 0.0
        index = 0
        best_dice = 0

        for e in range(80):
            for i in tqdm(range(20), total=20):
                index += 1
                model.train()
                data = next(train_loader)
                image = data["data"].to(device)
                label = data["seg"].to(device)
                optimizer.zero_grad()

                image = nn.functional.interpolate(image, size=(64, 128, 256), mode="trilinear", align_corners=False)
                label = torch.unsqueeze(label, dim=1)
                label = nn.functional.interpolate(label, size=(64, 128, 256), mode="nearest")
                label = torch.squeeze(label, dim=1).long()

                pred = model(image)
                loss = loss_func(pred, label)
                report_loss += loss.item()

                loss.backward()
                optimizer.step()

                if index % 150 == 0:
                    print(f"epoch is {e}, loss is {report_loss}")
                    report_loss = 0.0
                    # validation 
                    dices_csf = []
                    dices_gm = []
                    dices_wm = []
                    model.eval()
                    for image, label in val_loader:
                        image = image.to(device)
                        label = label.to(device)

                        image = nn.functional.interpolate(image, size=(64, 128, 256), mode="trilinear", align_corners=False)
                        label = torch.unsqueeze(label, dim=1)
                        label = nn.functional.interpolate(label, size=(64, 128, 256), mode="nearest")
                        label = torch.squeeze(label, dim=1).long()

                        pred = model(image).argmax(dim=1).cpu().numpy()
                        label = label.cpu().numpy()
                        o = pred == 1
                        t = label == 1
                        csf = metric.dc(o, t)

                        o = pred == 2
                        t = label == 2
                        gm = metric.dc(o, t)

                        o = pred == 3
                        t = label == 3
                        wm = metric.dc(o, t)


                        dices_csf.append(csf)
                        dices_gm.append(gm)
                        dices_wm.append(wm)

                    mean_dice_csf = sum(dices_csf) / len(dices_csf)
                    mean_dice_gm = sum(dices_gm) / len(dices_gm)
                    mean_dice_wm = sum(dices_wm) / len(dices_wm)

                    
                    mean_dice = (mean_dice_csf + mean_dice_gm + mean_dice_wm) / 3

                    if best_dice < mean_dice:
                        best_dice = mean_dice

                        torch.save(model.state_dict(), save_best_path)
                    
                    # print(f"mean dice is {mean_dice}, best dice is {best_dice}")
                    print(f"csf: {mean_dice_csf}, gm: {mean_dice_gm}, wm: {mean_dice_wm}, mean: {mean_dice}")

            torch.save(model.state_dict(), save_path)





    

