import numpy as np
import torch 
import torch.nn as nn 
from monai.networks.nets.basic_unet import BasicUNet
from monai.networks.nets.unetr import UNETR
from monai.networks.nets.swin_unetr import SwinUNETR
from monai.inferers import SlidingWindowInferer
from light_training.evaluation.metric import dice
from light_training.trainer import Trainer
from monai.utils import set_determinism
from light_training.utils.lr_scheduler import LinearWarmupCosineAnnealingLR
from light_training.utils.files_helper import save_new_model_and_delete_last
from models.unet_soft_loss.uent2d import UNet2D
from models.unet_soft_loss.uent3d import UNet3D
from models.unet_soft_loss.uent25d import UNet25D
from monai.networks.nets.segresnet import SegResNet
from models.transbts.TransBTS_downsample8x_skipconnection import TransBTS
from models.nestedformer.nested_former import NestedFormer
from models.swinunet2d.swinunet import SwinUnet
from einops import rearrange
from monai.networks.nets.vnet import VNet
from models.modelgenesis.unet3d import UNet3DModelGen
from models.transvw.models.ynet3d import UNet3DTransVW
from monai.networks.nets.attentionunet import AttentionUnet
import random
import h5py
from torch.utils.data import Dataset
from models.hdensenet_huanhu.net import dense_rnn_net
from models.unet_nested.unet_nested_3d import UNet_Nested3D
import glob 

torch.backends.cudnn.deterministic = True
random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
np.random.seed(1)

import os

os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"

logdir = "./logs_huanhu/unet_ml_2d3d_e200"
# logdir = "./logs_huanhu/unet_no_ml_new"

env = "pytorch"
model_save_path = os.path.join(logdir, "model")
max_epoch = 200
# max_epoch = 1000
batch_size = 1
val_every = 5
num_gpus = 1
device = "cuda:0"

uncer_threshold = 0.5
data_paths = glob.glob("/home/xingzhaohu/sharefs/datasets/huanhu/resize_data/*.h5")
print("数据共：{} 例".format(len(data_paths)))

train_two_fold = []
test_two_fold = []
train_paths = []
test_paths = []
for i in range(78):
    train_paths.append(data_paths[i])
#
for i in range(78, len(data_paths)):
    test_paths.append(data_paths[i])

train_two_fold.append(train_paths)
test_two_fold.append(test_paths)

# ## 第二折
train_paths = []
test_paths = []
for i in range(78, len(data_paths)):
    train_paths.append(data_paths[i])

for i in range(78):
    test_paths.append(data_paths[i])

train_two_fold.append(train_paths)
test_two_fold.append(test_paths)

def compute_uncer(pred_out):

    uncer_out = torch.softmax(pred_out, dim=1)
    ## 计算学习比重
    uncer_out = torch.sum(-uncer_out * torch.log(uncer_out), dim=1, keepdim=True)

    return uncer_out

class Dataset3d(Dataset):
    """
    """
    def __init__(
        self,
        paths
    ) -> None:

        super(Dataset3d, self).__init__()
        self.cache_num = len(paths)
        if self.cache_num > 0:
            self._cache_image = [None] * self.cache_num

            self._cache_label = [None] * self.cache_num

            for i in range(self.cache_num):
                self._cache_image[i], self._cache_label[i] = \
                        self._load_cache_item(paths[i])

    def get_labels(self, label):
        labels = np.zeros(label.shape[1:])
        labels[label[0] == 1] = 1
        # labels[label[1] == 1] = 2
        return labels

    def _load_cache_item(self, d_path):
        h5_image = h5py.File(d_path, "r")
        image = h5_image["image"][()]
        label = h5_image["label"][()]
        h5_image.close()
        # print(image.shape)
        image = image[0:1] # 单模态
        # print(image.shape)

        labels = self.get_labels(label)

        return image, labels

    def __getitem__(self, index):
        image = self._cache_image[index]
        image_mean = image.mean()
        image_std = image.std()
        image = (image - image_mean) / image_std
        # image = (image - image.min()) / (image.max() - image.min())

        label = self._cache_label[index]

        return image.astype(np.float32), label.astype(np.float32)

    def __len__(self):
        return len(self._cache_image)

class FuseUNet(nn.Module):
    def __init__(self, in_ch, out_ch) -> None:
        super().__init__()
        self.model3d = UNet3D(in_ch, out_ch)
        self.model25d = UNet25D(in_ch, out_ch)
        self.model2d = UNet2D(in_ch, out_ch)
       

    def forward(self, x, pred_type="all"):
        if pred_type == "mean":
            pred_3d = self.model3d(x)
            pred_25d = self.model25d(x)
            pred_2d = self.model2d(x)

            return pred_3d  + pred_25d + pred_2d
            # return pred_3d, pred_25d, pred_2d
        
        elif pred_type == "3d":
            return self.model3d(x)
        
        elif pred_type == "25d":
            return self.model25d(x)

        elif pred_type == "2d":
            return self.model2d(x)
        
        elif pred_type == "all":
            pred_3d = self.model3d(x)
            pred_25d = self.model25d(x)
            pred_2d = self.model2d(x)

            return pred_3d, pred_25d, pred_2d

class SwinUNETR2D(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        self.model = SwinUNETR([96, 96], 4, 4, spatial_dims=2)
    
    def forward(self, x):
        b, c, d, w, h = x.shape
        x = rearrange(x, "b c d w h -> (b d) c w h")
        # print(x.shape)
        x = self.model(x)
        x = rearrange(x, "(b d) c w h -> b c d w h", b=b, d=d)
        return x 

class BraTSTrainer(Trainer):
    def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py",fold=0):
        super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script)
    
        self.fold=fold
        # self.model = FuseUNet(in_ch=1, out_ch=2)
        self.best_mean_dice3d = 0.0
        # self.best_mean_dice25d = 0.0
        self.best_mean_dice2d = 0.0
        self.auto_optim = False

        self.model3d = UNet3D(in_ch=1, out_ch=2)
        # self.model25d = UNet25D(in_ch=3, out_ch=2)
        self.model2d = UNet2D(in_ch=1, out_ch=2)
        self.optimizer3d = torch.optim.AdamW(self.model3d.parameters(), lr=1e-4, weight_decay=1e-3)
        # self.optimizer25d = torch.optim.AdamW(self.model25d.parameters(), lr=1e-4, weight_decay=1e-3)
        self.optimizer2d = torch.optim.AdamW(self.model2d.parameters(), lr=1e-4, weight_decay=1e-3)
        
        self.scheduler3d = LinearWarmupCosineAnnealingLR(self.optimizer3d,
                                                  warmup_epochs=20,
                                                  max_epochs=max_epochs,
                                                  warmup_start_lr=1e-6)
        
        # self.scheduler25d = LinearWarmupCosineAnnealingLR(self.optimizer25d,
        #                                           warmup_epochs=20,
        #                                           max_epochs=max_epochs,
        #                                           warmup_start_lr=1e-6)
        
        self.scheduler2d = LinearWarmupCosineAnnealingLR(self.optimizer2d,
                                                  warmup_epochs=20,
                                                  max_epochs=max_epochs,
                                                  warmup_start_lr=1e-6)
        
        self.loss_func = nn.CrossEntropyLoss(weight=torch.tensor([1, 5], device=self.device, dtype=torch.float32))
        self.loss_mse = nn.MSELoss()

    def training_step(self, batch):
        import time
        self.model3d.train()
        # self.model25d.train()
        self.model2d.train()
        image, label = self.get_input(batch)
        self.model3d.to(self.device)
        # self.model25d.to(self.device)
        self.model2d.to(self.device)
        # pred3d, pred25d, pred2d = self.model(image)
        pred3d = self.model3d(image)
        # pred25d = self.model25d(image)
        pred2d = self.model2d(image)

        pred_3d_s = torch.softmax(pred3d, dim=1) 
        # pred_25d_s = torch.softmax(pred25d, dim=1)
        pred_2d_s = torch.softmax(pred2d, dim=1)

        hard_loss_3d = self.loss_func(pred3d, label)
        # hard_loss_25d = self.loss_func(pred25d, label)
        hard_loss_2d = self.loss_func(pred2d, label)

        # pred_25d_uncer_map = compute_uncer(pred25d)
        pred_2d_uncer_map = compute_uncer(pred2d)
        pred_3d_uncer_map = compute_uncer(pred3d)
        
        # pred_25d_uncer_map_bool = pred_25d_uncer_map < uncer_threshold
        pred_2d_uncer_map_bool = pred_2d_uncer_map < uncer_threshold
        pred_3d_uncer_map_bool = pred_3d_uncer_map < uncer_threshold

        # soft_loss_25d = (self.loss_mse(pred_3d_s * pred_25d_uncer_map_bool, pred_25d_s.detach() * pred_25d_uncer_map_bool)).mean()
        soft_loss_2d = (self.loss_mse(pred_3d_s * pred_2d_uncer_map_bool, pred_2d_s.detach() * pred_2d_uncer_map_bool)).mean()
        loss3d = hard_loss_3d + 0.001 * (soft_loss_2d)
        # loss3d = hard_loss_3d

        # soft_loss_2d = (self.loss_mse(pred_25d_s * pred_2d_uncer_map_bool, pred_2d_s.detach() * pred_2d_uncer_map_bool) ).mean()
        # soft_loss_3d = (self.loss_mse(pred_25d_s * pred_3d_uncer_map_bool, pred_3d_s.detach() * pred_3d_uncer_map_bool) ).mean()
        # loss25d = hard_loss_25d + 0.001 * (soft_loss_2d + soft_loss_3d)
        # loss25d = hard_loss_25d

        # soft_loss_25d = (self.loss_mse(pred_2d_s * pred_25d_uncer_map_bool, pred_25d_s.detach() * pred_25d_uncer_map_bool) ).mean()
        soft_loss_3d = (self.loss_mse(pred_2d_s * pred_3d_uncer_map_bool, pred_3d_s.detach() * pred_3d_uncer_map_bool) ).mean()
        loss2d = hard_loss_2d + 0.001 * (soft_loss_3d)
        # loss2d = hard_loss_2d

        loss3d.backward()
        # loss25d.backward()
        loss2d.backward()

        self.optimizer3d.step()
        # self.optimizer25d.step()
        self.optimizer2d.step()

        self.optimizer3d.zero_grad()
        # self.optimizer25d.zero_grad()
        self.optimizer2d.zero_grad()


    def train_epoch_end(self):
        self.scheduler3d.step()
        # self.scheduler25d.step()
        self.scheduler2d.step()

    # for image, label in data_loader:
    def get_input(self, batch):
        image, label = batch 
        
        image = nn.functional.interpolate(image, size=(32, 256, 256), mode="trilinear", align_corners=False)
        label = torch.unsqueeze(label, dim=1)
        label = nn.functional.interpolate(label, size=(32, 256, 256), mode="nearest")
        label = torch.squeeze(label, dim=1).long()
        label = label.long()
        return image, label 

    def validation_step(self, batch):
        self.model3d.eval()
        # self.model25d.eval()
        self.model2d.eval()

        image, label = self.get_input(batch)
        self.model3d.to(self.device)
        # self.model25d.to(self.device)
        self.model2d.to(self.device)

        output3d = self.model3d(image)
        # output25d = self.model25d(image)
        output2d = self.model2d(image)
        # output3d, output25d, output2d = self.model(image)
        output3d = output3d.argmax(dim=1).cpu().numpy()
        # output25d = output25d.argmax(dim=1).cpu().numpy()
        output2d = output2d.argmax(dim=1).cpu().numpy()

        target = label.cpu().numpy()
        t = target
        dice_score3d = dice(output3d, t)
        # dice_score25d = dice(output25d, t)
        dice_score2d = dice(output2d, t)
        
        return [dice_score3d, dice_score2d]

    def validation_end(self, mean_val_outputs):
        dice_score3d, dice_score2d = mean_val_outputs

        self.log("dice_score_3d", dice_score3d, step=self.epoch)
        # self.log("dice_score_25d", dice_score25d, step=self.epoch)
        self.log("dice_score_2d", dice_score2d, step=self.epoch)
        if self.best_mean_dice3d < dice_score3d:
            self.best_mean_dice3d = dice_score3d
            save_new_model_and_delete_last(self.model3d, 
                                        os.path.join(model_save_path, 
                                        f"best_model3d_{dice_score3d:.4f}_{self.fold}.pt"), 
                                        delete_symbol=f"best_model3d_*_{self.fold}")
        save_new_model_and_delete_last(self.model3d, 
                                        os.path.join(model_save_path, 
                                        f"final_model3d_{dice_score3d:.4f}_{self.fold}.pt"), 
                                        delete_symbol=f"final_model3d_*_{self.fold}")
        
        # if self.best_mean_dice25d < dice_score25d:
        #     self.best_mean_dice25d = dice_score25d

        #     save_new_model_and_delete_last(self.model25d, 
        #                                 os.path.join(model_save_path, 
        #                                 f"best_model25d_{dice_score25d:.4f}_{self.fold}.pt"), 
        #                                 delete_symbol=f"best_model25d_*_{self.fold}")

        # save_new_model_and_delete_last(self.model25d, 
        #                                 os.path.join(model_save_path, 
        #                                 f"final_model25d_{dice_score25d:.4f}_{self.fold}.pt"), 
        #                                 delete_symbol=f"final_model25d_*_{self.fold}")

        if self.best_mean_dice2d < dice_score2d:
            self.best_mean_dice2d = dice_score2d

            save_new_model_and_delete_last(self.model2d, 
                                        os.path.join(model_save_path, 
                                        f"best_model2d_{dice_score2d:.4f}_{self.fold}.pt"), 
                                        delete_symbol=f"best_model2d_*_{self.fold}")
        save_new_model_and_delete_last(self.model2d, 
                                        os.path.join(model_save_path, 
                                        f"final_model2d_{dice_score2d:.4f}_{self.fold}.pt"), 
                                        delete_symbol=f"final_model2d_*_{self.fold}")
        
        print(f"dice_score3d is {dice_score3d}, dice_score2d is {dice_score2d}")

if __name__ == "__main__":

    fold = 0
    trainer = BraTSTrainer(env_type=env,
                            max_epochs=max_epoch,
                            batch_size=batch_size,
                            device=device,
                            logdir=logdir,
                            val_every=val_every,
                            num_gpus=num_gpus,
                            master_port=17751,
                            training_script=__file__,
                            fold=fold,
                            )

    train_paths = train_two_fold[fold]
    test_paths = test_two_fold[fold]
    train_ds = Dataset3d(train_paths)
    val_ds = Dataset3d(test_paths)

    trainer.train(train_dataset=train_ds, val_dataset=val_ds)

    print(f"第{fold}折训练完成。。。。")
