import numpy as np
from light_training.dataloading.dataset import get_loader, get_all_training_loader, get_multi_dir_training_loader
import torch 
import torch.nn as nn 
from monai.networks.nets.basic_unet import BasicUNet
from monai.networks.nets.unetr import UNETR
from monai.networks.nets.swin_unetr import SwinUNETR
from monai.inferers import SlidingWindowInferer
from light_training.evaluation.metric import dice
from light_training.trainer import Trainer
from monai.utils import set_determinism
from light_training.utils.files_helper import save_new_model_and_delete_last
from models.uent3d import UNet3D
from monai.networks.nets.segresnet import SegResNet
from models.transbts.TransBTS_downsample8x_skipconnection import TransBTS
from einops import rearrange
from models.modelgenesis.unet3d import UNet3DModelGen
from models.transvw.models.ynet3d import UNet3DTransVW
from monai.networks.nets.basic_unet import BasicUNet
from monai.networks.nets.attentionunet import AttentionUnet
from light_training.loss.compound_losses import DC_and_CE_loss
from light_training.loss.dice import MemoryEfficientSoftDiceLoss
from light_training.evaluation.metric import dice
set_determinism(123)
from light_training.loss.compound_losses import DC_and_CE_loss
import os
from medpy import metric
from monai.losses.dice import DiceLoss
from classes import convert_labels
from torch import nn
from light_training.loss.deepsupervision import AutoDeepSupervision
from classes import segrap_subset
from models.bit_diffusion import decimal_to_bits, bits_to_decimal

os.environ["CUDA_VISIBLE_DEVICES"] = "1,2"

# data_dir0 = "./data/fullres_norm-1to1/train"
data_dir0 = "./data/fullres/train"
data_dir1 = "./data/fullres/val_semi_postprocess"
fold = 1

# logdir = f"./logs/task1_unet3d_alldata_addaug_bs2_ep2000_ds_gpu2"
# logdir = f"./logs/task1_unet3d_alldata_addaug_bs2_ep2000_ds_gpu2"
logdir = f"./logs/task1_unet3d_alldata_addaug_bs2_ep2000_ds_gpu2_norm01_lowps"
# logdir = f"./logs/unet3d_alldata_addaug_bs2_ep1000_ds_gpu1"
# logdir = f"./logs/unet3d_modelsgen_{fold}_addaug_bs2_ep1000_ds_gpu1"
# patch_size = [96, 128, 128]
patch_size = [64, 192, 192]


env = "DDP"
model_save_path = os.path.join(logdir, "model")
max_epoch = 2000
batch_size = 2
val_every = 10
num_gpus = 2
device = "cuda:0"

class CEAndDiceLoss(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        self.loss_func = nn.BCEWithLogitsLoss()
        self.dice_loss = DiceLoss(sigmoid=True, batch=True)

    def forward(self, pred, label):
        return self.loss_func(pred, label) + self.dice_loss(pred, label)

class BraTSTrainer(Trainer):
    def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"):
        super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script)
        self.patch_size = patch_size

        # self.print_time = True
        # label_scale = [[1, 1, 1], [1/2, 1/2, 1/2], [1/4, 1/4, 1/4]]
        label_scale = [[1, 1, 1], [1, 1/2, 1/2], [1, 1/4, 1/4], [1/2, 1/8, 1/8], 
                       [1/4, 1/16, 1/16], [1/8, 1/32, 1/32]]
        
        loss = CEAndDiceLoss()

        self.train_process = 32
        self.loss = AutoDeepSupervision(loss, label_scale)

        from models.nnunet3d import get_nnunet3d
               
        # self.model = get_nnunet3d(2, 6)
        self.model = get_nnunet3d(2, 45)
        
        self.augmentation = True
        self.best_mean_dice = 0.0
        # self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=1e-4, weight_decay=1e-5)
        self.optimizer = torch.optim.SGD(self.model.parameters(), lr=1e-2, weight_decay=3e-5,
                                    momentum=0.99, nesterov=True)
        
        self.scheduler_type = "poly"
        # self.scheduler_type = "cosine_with_warmup"
        # self.warmup = 0.1

                 
    def training_step(self, batch):
        image, label = self.get_input(batch)

        label = convert_labels(label)
        # label = decimal_to_bits(label)

        preds = self.model(image)
        loss = self.loss(preds, label)

        self.log("train_loss", loss, step=self.global_step)
        return loss 

    # for image, label in data_loader:
    def get_input(self, batch):
        image = batch["data"]
        label = batch["seg"]

        label = label.float()
        # label = label[:, 0].long()

        return image, label 

    def cal_metric(self, gt, pred, voxel_spacing=[1.0, 1.0, 1.0]):
        if pred.sum() > 0 and gt.sum() > 0:
            d = dice(pred, gt)
            # hd95 = metric.binary.hd95(pred, gt)
            return np.array([d, 50])
        
        elif gt.sum() == 0 and pred.sum() == 0:
            return np.array([1.0, 50])
        
        else:
            return np.array([0.0, 50])
    
    def validation_step(self, batch):
        image, label = self.get_input(batch)

        # label = self.convert_labels(1, label)
        # output = self.model(image).argmax(dim=1).cpu().numpy()
        output = self.model(image)[0]
        output = torch.sigmoid(output)
        output = (output > 0.5)

        output = output.cpu().numpy()
        label = convert_labels(label).cpu().numpy()

        # output = bits_to_decimal(output).cpu().numpy()
        # label = label[:, 0].cpu().numpy() 
        
        dices = []
        for i in range(45):
            d, h = self.cal_metric(label[:, i], output[:, i])
            dices.append(d)

        # dices = []
        # for i in range(1, 55):
        #     d, h = self.cal_metric(label == i, output == i)
        #     dices.append(d)
        
        return dices
    
    def validation_end(self, mean_val_outputs, val_outputs):
        dices = mean_val_outputs
        print(f"dices is {dices}")

        mean_dice = sum(dices) / len(dices)
        self.log("mean_dice", mean_dice, step=self.epoch)

        if mean_dice > self.best_mean_dice:
            self.best_mean_dice = mean_dice
            save_new_model_and_delete_last(self.model, 
                                            os.path.join(model_save_path, 
                                            f"best_model_{mean_dice:.4f}.pt"), 
                                            delete_symbol="best_model")

        save_new_model_and_delete_last(self.model, 
                                        os.path.join(model_save_path, 
                                        f"final_model_{mean_dice:.4f}.pt"), 
                                        delete_symbol="final_model")

        print(f"mean_dice is {mean_dice}")

if __name__ == "__main__":

    trainer = BraTSTrainer(env_type=env,
                            max_epochs=max_epoch,
                            batch_size=batch_size,
                            device=device,
                            logdir=logdir,
                            val_every=val_every,
                            num_gpus=num_gpus,
                            master_port=17751,
                            training_script=__file__)
    
    # train_ds, val_ds, test_ds = get_loader(data_dir=data_dir0, fold=fold)
    train_ds, val_ds, test_ds = get_all_training_loader(data_dir=data_dir0)
    # train_ds, val_ds, test_ds = get_multi_dir_training_loader(data_dir=[data_dir0, data_dir1])

    trainer.train(train_dataset=train_ds, val_dataset=val_ds)
