import numpy as np
from utils.data_utils_video import get_train_val_dataset
import torch 
import torch.nn as nn 
from monai.inferers import SlidingWindowInferer
from light_training.evaluation.metric import dice
from light_training.trainer import Trainer
from monai.utils import set_determinism
from light_training.utils.files_helper import save_new_model_and_delete_last
from einops import rearrange
set_determinism(123)
from utils.misc import cal_Jaccard
import os

os.environ["CUDA_VISIBLE_DEVICES"] = "2,3"
data_dir = "./data/fullres/train"
fold = 0

# logdir = f"./logs/segformer_video_prompt_extra_adapter_ep15_lr1ef3_noaug_adam"
logdir = f"./logs/segformer_video_prompt_extra_adapter_ep15_lr1ef3_addaugflip_adam_test"

env = "DDP"
model_save_path = os.path.join(logdir, "model")
max_epoch = 15
batch_size = 8
val_every = 1
num_gpus = 2
device = "cuda:0"
image_size = 512

class MirrorTrainer(Trainer):
    def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"):
        super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script)
        
        from networks.segformer_modify.segformer_adapter_extra_embedding import SegFormerAdapterExtra
        self.model = SegFormerAdapterExtra()

        self.best_mean_dice = 0.0

        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=1e-3, weight_decay=3e-5, eps=1e-8)
        self.scheduler_type = "poly"
        # self.warmup = 0.1
        
        # self.loss_func = nn.CrossEntropyLoss()
        # self.loss_func = nn.MSELoss()
        self.loss_func = nn.BCEWithLogitsLoss()

    def training_step(self, batch):
        image, label = self.get_input(batch)

        pred = self.model(image)
        # pred = torch.sigmoid(pred)
        
        loss = self.loss_func(pred, label)

        self.log("train_loss", loss, step=self.global_step)
        return loss 

    # for image, label in data_loader:
    def get_input(self, batch):
        image = batch["image"]
        label = batch["mask"]

        return image, label 

    def cal_metric(self, pred, gt):
        if pred.sum() > 0 and gt.sum() > 0:
            d = cal_Jaccard(pred, gt)
            
            return d 
        
        elif gt.sum() == 0 and pred.sum() == 0:
            return 1.0
        
        else:
            return 0.0
    
    def validation_step(self, batch):
        image, label = self.get_input(batch)
       
        output = self.model(image)
        output = output > 0
        output = output.cpu().numpy()

        target = label.cpu().numpy()
        
        dice = self.cal_metric(output, target)
        
        return dice
    
    def validation_end(self, mean_val_outputs, val_outputs):
        dices = mean_val_outputs

        print(f"dices is {dices}")
        
    
        self.log("dices", dices, step=self.epoch)

        if dices > self.best_mean_dice:
            self.best_mean_dice = dices
            save_new_model_and_delete_last(self.model, 
                                            os.path.join(model_save_path, 
                                            f"best_model_{dices:.4f}.pt"), 
                                            delete_symbol="best_model")

        save_new_model_and_delete_last(self.model, 
                                        os.path.join(model_save_path, 
                                        f"final_model_{dices:.4f}.pt"), 
                                        delete_symbol="final_model")

if __name__ == "__main__":

    trainer = MirrorTrainer(env_type=env,
                            max_epochs=max_epoch,
                            batch_size=batch_size,
                            device=device,
                            logdir=logdir,
                            val_every=val_every,
                            num_gpus=num_gpus,
                            master_port=17752,
                            training_script=__file__)
    
    train_ds, val_ds = get_train_val_dataset(image_size=image_size)

    trainer.train(train_dataset=train_ds, val_dataset=val_ds)

    # import matplotlib.pyplot as plt 
    # index = 0

    # def process(image, mask):
    #     image = image.permute(1, 2, 0)
    #     # mask = mask[0]
    #     return image, mask 
    
    # for d in train_ds:
    #     index += 1

    #     if index < 120:
    #         continue
    #     image = d["image"]
    #     mask = d["mask"]

    #     print(image.shape)
    #     print(mask.shape)
    #     image_1 = image[0]
    #     image_2 = image[1]
    #     image_3 = image[2]
    #     mask_1 = mask[0]
    #     mask_2 = mask[0]
    #     mask_3 = mask[0]

    #     # print()
    #     # print(f"mask shape is {mask.shape}")
    #     # mask = mask[0]
    #     # image = image.permute(1, 2, 0)
    #     image_1, mask_1 = process(image_1, mask_1)
    #     image_2, mask_2 = process(image_2, mask_2)
    #     image_3, mask_3 = process(image_3, mask_3)
    #     plt.subplot(3, 2, 1)
    #     plt.imshow(image_1)
    #     plt.subplot(3, 2, 2)
    #     plt.imshow(mask_1)
    #     plt.subplot(3, 2, 3)
    #     plt.imshow(image_2)
    #     plt.subplot(3, 2, 4)
    #     plt.imshow(mask_2)
    #     plt.subplot(3, 2, 5)
    #     plt.imshow(image_3)
    #     plt.subplot(3, 2, 6)
    #     plt.imshow(mask_3)
    #     plt.show()
    #     if index == 130:
    #         break