import numpy as np
import torch 
import torch.nn as nn 
from monai.networks.nets.basic_unet import BasicUNet
from monai.networks.nets.unetr import UNETR
from monai.networks.nets.swin_unetr import SwinUNETR
from monai.inferers import SlidingWindowInferer
from light_training.evaluation.metric import dice
from light_training.trainer import Trainer
from monai.utils import set_determinism
from light_training.utils.lr_scheduler import LinearWarmupCosineAnnealingLR
from light_training.utils.files_helper import save_new_model_and_delete_last
from light_training.evaluation.metric import dice, hausdorff_distance_95, jaccard, recall, fscore
from medpy import metric
import json 
from models.unet_soft_loss.uent2d import UNet2D
from models.unet_soft_loss.uent3d import UNet3D
from models.unet_soft_loss.uent25d import UNet25D
from models.att_unet_ml.att_unet25d import AttUNet25D, AttUNet25D_32
from models.att_unet_ml.att_unet2d import AttUNet2D, AttUNet2D_32
from models.att_unet_ml.att_unet3d import AttUNet3D, AttUNet3D_32

from monai.networks.nets.segresnet import SegResNet
from models.transbts.TransBTS_downsample8x_skipconnection import TransBTS
from light_training.model.bit_diffusion import decimal_to_bits, bits_to_decimal
import argparse
import yaml 
from models.hdensenet_huanhu.net import dense_rnn_net

from monai.networks.nets.vnet import VNet
from models.modelgenesis.unet3d import UNet3DModelGen
from models.transvw.models.ynet3d import UNet3DTransVW
from monai.networks.nets.attentionunet import AttentionUnet
from nnunet.network_architecture.generic_UNet import Generic_UNet
import random
import glob 
from torch.utils.data import Dataset, DataLoader
import h5py
from einops import rearrange
from models.swinunet2d.config import get_config
from models.swinunet2d.swinunet import SwinUnet
from models.unet_nested.unet_nested_3d import UNet_Nested3D
import SimpleITK as sitk 
set_determinism(123)
import os

os.environ["CUDA_VISIBLE_DEVICES"] = "1"

max_epoch = 300
batch_size = 2
val_every = 10
num_gpus = 2
device = "cuda:0"
thres = 0.7

torch.backends.cudnn.deterministic = True
random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
np.random.seed(1)

import os

os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
device = "cuda:1"

images_paths = sorted(glob.glob("/home/xingzhaohu/sharefs/datasets/huanhu/IBSR_3/ii_IBSR_*_ana_strip.nii.gz"))
seg_paths = sorted(glob.glob("/home/xingzhaohu/sharefs/datasets/huanhu/IBSR_3/IBSR_*_segTRI_ana.nii.gz"))
print(images_paths)
print(seg_paths)


def compute_metric(pred, gt):
    if pred.sum() > 0 and gt.sum()>0:
        di = metric.binary.dc(pred, gt)
        hd = metric.binary.hd95(pred, gt)
        recal = metric.binary.recall(pred, gt)
    elif pred.sum() > 0 and gt.sum()==0:
        di = 1
        hd = 0
        recal = 1
    else:
        di = 0
        hd = 0
        recal = 0

    volumed_7 = 1 if di > 0.7 else 0
    volumed_9 = 1 if di > 0.9 else 0

    return di, hd, recal, volumed_7, volumed_9

class Dataset3d(Dataset):
    """
    """
    def __init__(
        self,
        image_paths,
        label_paths,
        has_tqdm=True
    ) -> None:
        """
        Args:
            data: input data to load and transform to generate dataset for model.
            transform: transforms to execute operations on input data.
            cache_num: number of items to be cached. Default is `sys.maxsize`.
                will take the minimum of (cache_num, data_length x cache_rate, data_length).
            cache_rate: percentage of cached data in total, default is 1.0 (cache all).
                will take the minimum of (cache_num, data_length x cache_rate, data_length).
            num_workers: the number of worker threads to use.
                If 0 a single thread will be used. Default is 0.
        """

        super(Dataset3d, self).__init__()
        self.image_paths = image_paths
        self.label_paths = label_paths

    def _load_cache_item(self, image_path, label_path):
        img = sitk.ReadImage(image_path)
        img = sitk.GetArrayFromImage(img)
        # img = img.transpose(1, 0, 2)
        label = sitk.ReadImage(label_path)
        label = sitk.GetArrayFromImage(label)
        start = 0
        end = -1
        for i in range(label.shape[0]):
            if label[i].sum() != 0:
                start = i
                break
        for j in range(label.shape[0]-1, 0, -1):
            if label[j].sum() != 0:
                end = j
                break
        img = img[start:end+1]
        label = label[start:end+1]

        # print("start is {}, end is {}".format(start, end))
        # label = label.transpose(1, 0, 2)
        return img, label

    def __getitem__(self, index):

        image, label = self._load_cache_item(self.image_paths[index], self.label_paths[index])

        image = np.expand_dims(image, axis=0)
        image = (image - np.mean(image)) / np.std(image)

        return image.astype(np.float32), label.astype(np.uint8)

    def __len__(self):
        return len(self.image_paths)

class FuseUNet(nn.Module):
    def __init__(self, unet3d, unet25d, unet2d) -> None:
        super().__init__()
        self.unet3d = unet3d
        self.unet25d = unet25d
        self.unet2d = unet2d 

    def forward(self, x):
        return (self.unet3d(x) + self.unet25d(x) + self.unet2d(x))
    

class BraTSTrainer(Trainer):
    def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"):
        super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script)
        self.best_mean_dice = 0.0

    def get_input(self, batch):
        image, label = batch 
        if self.model_name != "swinunet2d":
            image = nn.functional.interpolate(image, size=(64, 128, 256), mode="trilinear", align_corners=False)
            label = torch.unsqueeze(label, dim=1)
            label = nn.functional.interpolate(label, size=(64, 128, 256), mode="nearest")
            label = torch.squeeze(label, dim=1).long()

        else :
            image = nn.functional.interpolate(image, size=(64, 224, 224), mode="trilinear", align_corners=False)
            label = torch.unsqueeze(label, dim=1)
            label = nn.functional.interpolate(label, size=(64, 224, 224), mode="nearest")
            label = torch.squeeze(label, dim=1).long()

        return image, label 

    def validation_step(self, batch, pred_type=None):
        image, label = self.get_input(batch)

        if pred_type is not None:
            output = self.model(image, pred_type).argmax(dim=1).cpu().numpy()
        else :
            output = self.model(image).argmax(dim=1).cpu().numpy()

        target = label.cpu().numpy()
        
        gt = target 
        pred = output
        
        pred_csf = pred == 1
        gt_csf = gt == 1

        pred_gm = pred == 2
        gt_gm = gt == 2

        pred_wm = pred == 3
        gt_wm = gt == 3

        all_metric = compute_metric(pred_csf, gt_csf) + compute_metric(pred_gm, gt_gm) + compute_metric(pred_wm, gt_wm)

        return all_metric

if __name__ == "__main__":
    
    def get_metrics(v_mean, v_out, model_name, fold):
        print(f"model name is {model_name}, fold is {fold}")
        print(f"v_mean is {v_mean}")
        # v_out : [csf_dice, csf_hd, csf_recall, csf_7, csf_9, 
        #           gm_dice, gm_hd, gm_recall, gm_7, gm_9, 
        #               wm_dice, wm_hd, wm_recall, wm_7, wm_9]
        
        csf_7 = np.array([d[3] for d in v_out])
        csf_9 = np.array([d[4] for d in v_out])

        gm_7 = np.array([d[8] for d in v_out])
        gm_9 = np.array([d[9] for d in v_out])

        wm_7 = np.array([d[13] for d in v_out])
        wm_9 = np.array([d[14] for d in v_out])

        csf_dices = [d[0].item() for d in v_out]
        gm_dices = [d[5].item() for d in v_out]
        wm_dices = [d[10].item() for d in v_out]

        csf_hds = [d[1].item() for d in v_out]
        gm_hds = [d[6].item() for d in v_out]
        wm_hds = [d[11].item() for d in v_out]

        csf_recalls = [d[2].item() for d in v_out]
        gm_recalls = [d[7].item() for d in v_out]
        wm_recalls = [d[12].item() for d in v_out]

        mean_dices = torch.tensor([csf_dices, gm_dices, wm_dices]).mean(dim=0).numpy().tolist()
        mean_hds = torch.tensor([csf_hds, gm_hds, wm_hds]).mean(dim=0).numpy().tolist()
        mean_recalls = torch.tensor([csf_recalls, gm_recalls, wm_recalls]).mean(dim=0).numpy().tolist()

        label = np.ones_like(csf_7)

        csf_7 = recall(csf_7, label)
        csf_9 = recall(csf_9, label)

        gm_7 = recall(gm_7, label)
        gm_9 = recall(gm_9, label)

        wm_7 = recall(wm_7, label)
        wm_9 = recall(wm_9, label)
        # print(f"wt_f1 is {wt_f1}, wt_f1_9 is {wt_f1_9}")
        print("*" * 100)

        return {
            "csf": [v_mean[0].item(), v_mean[1].item(), v_mean[2].item(), csf_7, csf_9],
            "gm": [v_mean[5].item(), v_mean[6].item(), v_mean[7].item(), gm_7, gm_9],
            "wm": [v_mean[10].item(), v_mean[11].item(), v_mean[12].item(), wm_7, wm_9],
        }, {
            "mean_dices": mean_dices,
            "mean_hds": mean_hds,
            "mean_recalls": mean_recalls,   
        }

    trainer = BraTSTrainer(env_type="pytorch",
                            max_epochs=max_epoch,
                            batch_size=batch_size,
                            device=device,
                            logdir="None",
                            val_every=val_every,
                            num_gpus=num_gpus,
                            master_port=17751,
                            training_script=__file__,
                            )

    in_ch = 1
    out_ch = 4
    segresnet = SegResNet(3, 16, 1, out_ch)
    vnet = VNet(3, 1, out_ch, bias=True)

    config = get_config("/home/xingzhaohu/jiuding_code/mutual_learning_huanhu/models/swinunet2d/swin_tiny_patch4_window7_224_lite.yaml")
    swinunet = SwinUnet(config, img_size=224, num_classes=out_ch, in_channels=3)

    _, transbts = TransBTS(dataset='ibsr', _conv_repr=True, _pe_type="learned")

    densehnet = dense_rnn_net(64, out_ch, drop_rate=0.0)

    unet_plus = UNet_Nested3D(in_channels=1, n_classes=out_ch, is_deconv=False)

    attunet3d = AttUNet3D_32(1, out_ch)
    attunet25d = AttUNet25D_32(3, out_ch)
    attunet2d = AttUNet2D_32(1, out_ch)

    attunet3d_ml = AttUNet3D(1, out_ch)
    attunet25d_ml = AttUNet25D(3, out_ch)
    attunet2d_ml = AttUNet2D(1, out_ch)

    unet3d = UNet3D(1, out_ch)
    unet25d = UNet25D(3, out_ch)
    unet2d = UNet2D(1, out_ch)

    unet3d_ml = UNet3D(1, out_ch)
    unet25d_ml = UNet25D(3, out_ch)
    unet2d_ml = UNet2D(1, out_ch)
    
    nnunet = BasicUNet(spatial_dims=3, 
                    in_channels=1, 
                    out_channels=out_ch,
                    features=[16, 16, 32, 64, 128, 16])
    

    fuse_unet = FuseUNet(unet3d, unet25d, unet2d)

    model_names = ["segresnet", "vnet", "swinunet2d", "transbts", 
                    "densehnet", "unet_plus", "att_unet3d", "att_unet25d", 
                    "att_unet2d", "att_unet3d_ml", "att_unet25d_ml", "att_unet2d_ml",
                    "unet3d", "unet25d", "unet2d", "unet3d_ml", "unet25d_ml", "unet2d_ml",
                    "nnunet", "fuse_unet"
                    ]
    
    model_logs = [
        [
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/segresnet_e80/model/best_model_0.8887_1.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/segresnet_e80/model/best_model_0.8854_2.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/segresnet_e50/model/best_model_0.8866_3.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/segresnet_e50/model/best_model_0.8933_4.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/segresnet_e50/model/best_model_0.8745_5.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/segresnet_e80/model/best_model_0.8992_6.pt"
        ],
        [
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/vnet/model/best_model_0.7845_1.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/vnet/model/best_model_0.8523_2.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/vnet/model/best_model_0.8770_3.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/vnet/model/best_model_0.8741_4.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/vnet/model/best_model_0.8423_5.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/vnet/model/best_model_0.7944_6.pt"
        ],
        [
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/swinunet2d/model/best_model_0.8672_1.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/swinunet2d/model/final_model_0.8611_2.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/swinunet2d/model/best_model_0.8829_3.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/swinunet2d/model/final_model_0.8754_4.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/swinunet2d/model/final_model_0.8736_5.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/swinunet2d/model/final_model_0.8727_6.pt",
        ],
        [
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/tranbts/model/best_model_0.8568_1.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/tranbts/model/best_model_0.8688_2.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/tranbts/model/best_model_0.8972_3.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/tranbts/model/final_model_0.8910_4.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/tranbts/model/final_model_0.8731_5.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/tranbts/model/final_model_0.8517_6.pt",
        ],
        [
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/hdenseunet/model/final_model_0.6672_1.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/hdenseunet/model/best_model_0.8542_2.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/hdenseunet/model/best_model_0.8878_3.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/hdenseunet/model/best_model_0.8816_4.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/hdenseunet/model/best_model_0.8698_5.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/hdenseunet/model/best_model_0.8784_6.pt"
        ],
        [
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_plus/model/best_model_0.8805_1.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_plus/model/best_model_0.8626_2.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_plus/model/best_model_0.8471_3.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_plus/model/best_model_0.8721_4.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_plus/model/best_model_0.8905_5.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_plus/model/best_model_0.8548_6.pt",
        ],
        [
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_no_ml/model/best_model3d_0.8785_1.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_no_ml/model/best_model3d_0.8621_2.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_no_ml/model/best_model3d_0.8931_3.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_no_ml/model/best_model3d_0.8846_4.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_no_ml/model/best_model3d_0.8721_5.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_no_ml/model/best_model3d_0.8716_6.pt"
        ],
        [
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_no_ml/model/best_model25d_0.8644_1.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_no_ml/model/best_model25d_0.8688_2.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_no_ml/model/best_model25d_0.8662_3.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_no_ml/model/best_model25d_0.8485_4.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_no_ml/model/best_model25d_0.8654_5.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_no_ml/model/best_model25d_0.8573_6.pt",
        ],
        [
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_no_ml/model/best_model2d_0.8444_1.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_no_ml/model/best_model2d_0.8447_2.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_no_ml/model/best_model2d_0.8495_3.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_no_ml/model/best_model2d_0.8344_4.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_no_ml/model/best_model2d_0.8424_5.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_no_ml/model/best_model2d_0.8325_6.pt",
        ],
        [
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_ml_e300/model/best_model3d_0.8995_1.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_ml_e200/model/final_model3d_0.8725_2.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_ml_e200/model/final_model3d_0.8989_3.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_ml_e300/model/final_model3d_0.8933_4.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_ml_e300/model/final_model3d_0.8821_5.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_ml_e300/model/final_model3d_0.8585_6.pt"
        ],
        [
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_ml_e300/model/best_model25d_0.8883_1.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_ml_e300/model/best_model25d_0.8804_2.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_ml_e300/model/best_model25d_0.8821_3.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_ml_e300/model/best_model25d_0.8803_4.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_ml_e300/model/best_model25d_0.8814_5.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_ml_e300/model/best_model25d_0.8614_6.pt"
        ],
        [
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_ml_e300/model/best_model2d_0.8594_1.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_ml_e300/model/best_model2d_0.8583_2.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_ml_e300/model/best_model2d_0.8702_3.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_ml_e300/model/best_model2d_0.8609_4.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_ml_e300/model/best_model2d_0.8637_5.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/attunet_ml_e300/model/best_model2d_0.8524_6.pt"
        ],
        [
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_no_ml/model/final_model3d_0.8743_1.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_no_ml/model/final_model3d_0.8612_2.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_no_ml/model/final_model3d_0.8841_3.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_no_ml/model/final_model3d_0.8874_4.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_no_ml/model/final_model3d_0.8770_5.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_no_ml/model/final_model3d_0.8761_6.pt",
        ],
        [
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_no_ml/model/final_model25d_0.8425_1.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_no_ml/model/final_model25d_0.8303_2.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_no_ml/model/final_model25d_0.8434_3.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_no_ml/model/final_model25d_0.8416_4.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_no_ml/model/final_model25d_0.8484_5.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_no_ml/model/final_model25d_0.8364_6.pt"
        ],
        [
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_no_ml/model/final_model2d_0.8309_1.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_no_ml/model/final_model2d_0.8102_2.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_no_ml/model/final_model2d_0.8526_3.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_no_ml/model/final_model2d_0.8380_4.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_no_ml/model/final_model2d_0.8310_5.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_no_ml/model/final_model2d_0.7951_6.pt"
        ],
        [
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model3d_0.8906_1.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model3d_0.8804_2.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model3d_0.8926_3.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model3d_0.8988_4.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model3d_0.8927_5.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model3d_0.8826_6.pt",
        ],
      
        [
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model25d_0.8707_1.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model25d_0.8546_2.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model25d_0.8832_3.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model25d_0.8737_4.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model25d_0.8691_5.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model25d_0.8535_6.pt"
        ],
        [
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model2d_0.8432_1.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model2d_0.8303_2.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model2d_0.8558_3.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model2d_0.8486_4.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model2d_0.8395_5.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model2d_0.8254_6.pt"
        ],
        
        [
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs_nnunet/nnunet_model_e300_1.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs_nnunet/nnunet_model_e300_2.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs_nnunet/nnunet_model_e300_3.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs_nnunet/nnunet_model_e300_4.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs_nnunet/nnunet_model_e300_5.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs_nnunet/nnunet_model_e300_6.pt"
        ],

        [
        [
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model3d_0.8906_1.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model3d_0.8804_2.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model3d_0.8926_3.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model3d_0.8988_4.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model3d_0.8927_5.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model3d_0.8826_6.pt",
        ],
      
        [
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model25d_0.8707_1.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model25d_0.8546_2.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model25d_0.8832_3.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model25d_0.8737_4.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model25d_0.8691_5.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model25d_0.8535_6.pt"
        ],
        [
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model2d_0.8432_1.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model2d_0.8303_2.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model2d_0.8558_3.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model2d_0.8486_4.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model2d_0.8395_5.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning_ibsr/logs/unet_ml_e250/model/best_model2d_0.8254_6.pt"
        ],
        ]

        
    ]
    models = [segresnet, vnet, swinunet, transbts, densehnet, 
              unet_plus, attunet3d, attunet25d, attunet2d,
              attunet3d_ml, attunet25d_ml, attunet2d_ml, unet3d, 
              unet25d, unet2d, unet3d_ml, unet25d_ml, unet2d_ml, 
              nnunet, fuse_unet]
    
    all_res = {}
    for i in range(len(model_names)):
        model = models[i]
        model_name = model_names[i]

        if model_name != "segresnet":
            continue

        model_log = model_logs[i]
        fold_res = []

        dices = []
        hds = []
        recalls = []
        from sklearn.model_selection import KFold  ## K折交叉验证

        X = np.arange(18)
        kfold = KFold(n_splits=6, shuffle=False)  ## kfold为KFolf类的一个对象
        fold = 0
        for a, b in kfold.split(X):  ## .split(X)方法返回迭代器，迭代器每次产生两个元素，1、训练数据集的索引；
            fold += 1
        
            print('Train_index: ', a, 'Validation_index:', b)
            # 构建训练集和验证集
            img_paths_train = []
            img_paths_val = []
            seg_paths_train = []
            seg_paths_val = []
            for i in a:
                img_paths_train.append(images_paths[i])
                seg_paths_train.append(seg_paths[i])
            for j in b:
                img_paths_val.append(images_paths[j])
                seg_paths_val.append(seg_paths[j])

            val_paths = {"image": img_paths_val, "label": seg_paths_val}
            val_ds = Dataset3d(val_paths["image"], val_paths["label"])
            
            trainer.model_name = model_name
            trainer.model = model
            trainer.model.eval()
            if model_name == "fuse_unet":
                trainer.model.unet3d.load_state_dict(torch.load(model_log[0][fold-1]))
                trainer.model.unet25d.load_state_dict(torch.load(model_log[1][fold-1]))
                trainer.model.unet2d.load_state_dict(torch.load(model_log[2][fold-1]))

            else :       
                fold_weight = model_log[fold-1]

                trainer.load_state_dict(fold_weight)
            
            v_mean, v_out = trainer.validation_single_gpu(val_dataset=val_ds, pred_type=None)
            m1, m2 = get_metrics(v_mean, v_out, model_name, fold)
            fold_res.append(m1)
            for d in m2["mean_dices"]:
                dices.append(d)
            for h in m2["mean_hds"]:
                hds.append(h)
            for r in m2["mean_recalls"]:
                recalls.append(r)

        data = {
            f"{model_name}": fold_res,
            "dices": dices,
            "hds": hds,
            "recalls": recalls,
        } 
        with open("./ibsr_res.txt", "a+") as f:
            f.write(json.dumps(data) + "\n")
        print(f"{model_name}:数据保存成功......")


    # logdir = "./logs_brats/swinunetr/model/final_model_0.8455.pt"
    # logdir = "./logs_brats/unet2d/model/final_model_0.8090.pt"
    # logdir = "./logs_brats/unet2d/model/best_model_0.8276.pt"
    # logdir = "./logs_brats/unet_mutual_learning/model/best_model2d_0.8518.pt"
    # logdir = "./logs_brats/unet_mutual_learning_2_nosoft_w001/model/final_model_0.8528.pt"
    
    # logdir = "./logs_brats/unet3d/model/final_model_0.8237.pt"
    # logdir = "./logs_brats/unet25d/model/best_model_0.8389.pt"

    # logdir = "./logs_brats/unetr/model/best_model_0.8545.pt"
    # logdir = "./logs_brats/tranbts/model/best_model_0.8394.pt"
    # logdir = "./logs_brats/unet_mutual_learning_2/model/best_model_0.8571.pt"
    # logdir = "./logs_brats/segresnet/model/final_model_0.8434.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats_nnunet_new/nnunet_model_e1000.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/swinunet2d/model/best_model_0.8530.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/vnet/model/best_model_0.7739.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/modelsgenesis/model/best_model_0.8675.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/transvw/model/best_model_0.8721.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/transvw/model/final_model_0.8671.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats_nnunet/nnunet_model.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/attentionUNet/model/best_model_0.8346.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/attentionUNet/model/final_model_0.8157.pt"
    # logdir = "./logs_brats/diffusion_seg/model/best_model_0.8236.pt"
    # logdir = "./logs_brats/diffusion_seg_bce_more_params/model/final_model_0.8515.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/unet_mutual_learning_2_nosoft_w001_e1000/model/best_model_0.8678.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/unet_mutual_learning_2_nosoft_w001_e1000/model/final_model_0.8673.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/unet_mutual_learning_2_no_softloss/model/best_model_0.8536.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/cross_unet_mutual_learning_2/model/final_model_0.8658.pt"
    # trainer.load_state_dict(logdir)
    
    # v_mean, v_out = trainer.validation_single_gpu(val_dataset=test_ds)

    # print(f"v_mean is {v_mean}")
    # # print(f"v_out is {v_out}")

    # wt_f1 = np.array([d[-6] for d in v_out])
    # tc_f1 = np.array([d[-5] for d in v_out])
    # et_f1 = np.array([d[-4] for d in v_out])

    # wt_f1_9 = np.array([d[-3] for d in v_out])
    # tc_f1_9 = np.array([d[-2] for d in v_out])
    # et_f1_9 = np.array([d[-1] for d in v_out])

    # label = np.ones_like(wt_f1)

    # wt_f1 = recall(wt_f1, label)
    # tc_f1 = recall(tc_f1, label)
    # et_f1 = recall(et_f1, label)

    # wt_f1_9 = recall(wt_f1_9, label)
    # tc_f1_9 = recall(tc_f1_9, label)
    # et_f1_9 = recall(et_f1_9, label)

    # print(f"wt_f1 is {wt_f1}, tc_f1 is {tc_f1}, et_f1 is {et_f1}, wt_f1_9 is {wt_f1_9}, tc_f1_9 is {tc_f1_9}, et_f1_9 is {et_f1_9}")