#!/usr/bin/env python3

GPU_ID = 0
_FOLD = 0
MODEL = 'unetr'
# MODEL = 'unetr'
DATASET_YML_PATH = f'/home/yusongli/Templates/yunet/nnUNet_preprocessed/Dataset002_C_intensity1500_roi2.0/splits_final_f{_FOLD}.yml'
SAVE_PATH = f'/home/yusongli/Templates/yunet/nnUNet_results/Dataset002_C_intensity1500_roi2.0/{MODEL}/f{_FOLD}'
BATCH_SIZE = 2

# ! <<<
import time
from typing import Tuple, Optional, Dict, List, Union
from numpy.typing import ArrayLike
import pathlib

import nibabel as nib
from dataloader import load_decathlon_datalist, LoadImaged

import sys

sys.path.insert(0, '../..')
from data import operator as op

# ! >>>
import os

from monai.losses import DiceCELoss
from loss import MyDiceCELoss
from monai.inferers import sliding_window_inference
from monai import transforms

from monai.metrics import DiceMetric

from monai.networks.nets import UNETR
from model.swin_unetr import SwinUNETR
from model.myunetr import MyUNETR


from monai.data import (
    CacheDataset,
    decollate_batch,
    list_data_collate,
    write_nifti,
)

import torch
import pytorch_lightning
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from ptflops import get_model_complexity_info


# ## Define the LightningModule (transform, network)
# The LightningModule contains a refactoring of your training code. The following module is a refactoring of the code
# in spleen_segmentation_3d.ipynb:
class Net(pytorch_lightning.LightningModule):
    def __init__(
        self, model: object, model_train_branch: int, resample_size: Tuple[int, int, int], modelstr: str
    ) -> None:
        super().__init__()
        self.rootdir = self.__class__._get_rootdir(modelstr)
        self.resample_size = resample_size

        self.model = model
        self.model_train_branch = model_train_branch
        # self.modelstr = modelstr

        self.loss_function_val = DiceCELoss(to_onehot_y=True, softmax=True)
        if model_train_branch == 2:
            self.loss_function_train = MyDiceCELoss(to_onehot_y=True, softmax=True)
        else:
            self.loss_function_train = self.loss_function_val
        self.post_pred = transforms.AsDiscrete(argmax=True, to_onehot=2, num_classes=2)
        self.post_label = transforms.AsDiscrete(to_onehot=2, num_classes=2)
        self.dice_metric = DiceMetric(include_background=False, reduction="mean", get_not_nans=False)
        self.best_val_dice = 0
        self.best_val_epoch = 0

        # ! EPOCH
        # self.max_epochs = 1000
        self.max_epochs = 1
        self.check_val = 1
        # self.warmup_epochs = 20
        self.metric_values = []
        self.epoch_loss_values = []

        self.ysl_start_time = None
        self.ysl_start_time1 = None
        self.ysl_start_time2 = None
        self.ysl_end_time = None
        self.ysl_end_time1 = None
        self.ysl_end_time2 = None


    def forward(
        self, x: torch.Tensor, training: Optional[bool] = False
    ) -> Union[Tuple[torch.Tensor, torch.Tensor], torch.Tensor]:
        if self.model_train_branch == 2:
            return self.model(x, training=training)
        flops, params = get_model_complexity_info(self.model, (1, 96, 96, 96), as_strings=True, print_per_layer_stat=True)
        print(flops)
        print(params)
        sys.exit()
        return self.model(x)

    def prepare_data(self) -> None:
        # ! <<< open debug yusongli
        # datasets = '/home/yusongli/Documents/shidaoai_new_project/data/meta_data2.yaml'
        # ! ===
        # datasets = '/home/yusongli/Templates/yunet/nnUNet_preprocessed/Dataset002_C_intensity1500_roi2.0/splits_final_f2.yml'
        datasets = DATASET_YML_PATH
        # ! >>> clos debug
        train_files = load_decathlon_datalist(datasets, True, "training")
        val_files = load_decathlon_datalist(datasets, True, "validation")
        # ! <<< open debug yusongli
        # test_files = load_decathlon_datalist(datasets, True, "test")
        # ! >>> clos debug

        mode = ['area', 'nearest']

        self.train_transforms = transforms.Compose(
            [
                LoadImaged(keys=["image", "label"]),
                transforms.AddChanneld(keys=["image", "label"]),
                transforms.Resized(keys=['image', 'label'], spatial_size=list(self.resample_size), mode=mode),
                transforms.ScaleIntensityRanged(keys=["image"], a_min=0, a_max=1500, b_min=0.0, b_max=1.0, clip=True),
                # # ? Shape
                # transforms.RandFlipd(keys=['image', 'label'], spatial_axis=0),
                # transforms.RandFlipd(keys=['image', 'label'], spatial_axis=1),
                # transforms.RandFlipd(keys=['image', 'label'], spatial_axis=2),
                # transforms.RandZoomd(keys=['image', 'label']),
                # transforms.RandRotated(keys=['image', 'label']),
                # # transforms.RandRotate90d(keys=['image', 'label']),
                # transforms.RandAffined(keys=['image', 'label']),
                # transforms.Rand3DElasticd(keys=['image', 'label'], sigma_range=(5, 7), magnitude_range=(50, 150)),
                # transforms.RandGridDistortiond(keys=['image', 'label']),
                # # ? Noise
                # transforms.RandGibbsNoised(keys=['image']),
                # transforms.RandGaussianNoised(keys=['image']),
                # # transforms.RandKSpaceSpikeNoised(keys=['image']),
                # transforms.RandAdjustContrastd(keys=['image']),
                # transforms.RandGaussianSmoothd(keys=['image']),
                # transforms.RandGaussianSharpend(keys=['image']),
                # transforms.RandHistogramShiftd(keys=['image']),
                transforms.ToTensord(keys=["image", "label"], dtype=torch.float32),
            ]
        )
        self.val_transforms = transforms.Compose(
            [
                LoadImaged(keys=["image", "label"]),
                transforms.AddChanneld(keys=["image", "label"]),
                transforms.Resized(keys=['image', 'label'], spatial_size=list(self.resample_size), mode=mode),
                transforms.ScaleIntensityRanged(keys=["image"], a_min=0, a_max=1500, b_min=0, b_max=1, clip=True),
                transforms.ToTensord(keys=["image", "label"], dtype=torch.float32),
            ]
        )
        self.test_transforms = self.val_transforms
        # Ref: https://github.com/Project-MONAI/tutorials/blob/0.6.0/3d_segmentation/torch/unet_inference_dict.py
        self.post_transforms = transforms.Compose(
            [
                transforms.EnsureTyped(keys=['label', 'pred']),
                transforms.Activationsd(keys=['label', 'pred'], sigmoid=True),
                transforms.Invertd(
                    keys=['label', 'pred'],  # invert the `pred` data field, also support multiple fields
                    transform=self.val_transforms,
                    orig_keys='image',  # get the previously applied pre_transforms information on the `img` data field,
                    # then invert `pred` based on this information. we can use same info
                    # for multiple fields, also support different orig_keys for different fields
                    orig_meta_keys='image_meta_dict',  # get the meta data from `img_meta_dict` field when inverting,
                    meta_keys=[
                        'label_meta_dict',
                        'pred_meta_dict',
                    ],  # key field to save inverted meta data, every item maps to `keys`
                    # for example, may need the `affine` to invert `Spacingd` transform,
                    # multiple fields can use the same meta data to invert
                    # if `meta_keys=None`, use "{keys}_{meta_key_postfix}" as the meta key,
                    meta_key_postfix='meta_dict',
                    # if `orig_meta_keys=None`, use "{orig_keys}_{meta_key_postfix}",
                    # otherwise, no need this arg during inverting
                    nearest_interp=False,  # don't change the interpolation mode to "nearest" when inverting transforms
                    # to ensure a smooth output, then execute `AsDiscreted` transform
                    to_tensor=True,  # convert to PyTorch Tensor after inverting
                ),
            ]
        )
        self.train_ds = CacheDataset(
            data=train_files, transform=self.train_transforms, cache_num=26, cache_rate=1.0, num_workers=8,
        )
        self.val_ds = CacheDataset(
            data=val_files, transform=self.val_transforms, cache_num=6, cache_rate=1.0, num_workers=8,
        )
        # ! <<< open debug yusongli
        # self.test_ds = CacheDataset(
        #     data=test_files, transform=self.test_transforms, cache_num=5, cache_rate=1.0, num_workers=8,
        # )
        # ! >>> clos debug

    def train_dataloader(self) -> None:
        return torch.utils.data.DataLoader(
            self.train_ds, batch_size=1, shuffle=True, num_workers=8, pin_memory=True, collate_fn=list_data_collate,
        )

    def val_dataloader(self) -> None:
        return torch.utils.data.DataLoader(self.val_ds, batch_size=1, shuffle=False, num_workers=4, pin_memory=True)

    def test_dataloader(self) -> None:
        return torch.utils.data.DataLoader(self.test_ds, batch_size=1, shuffle=False, num_workers=4, pin_memory=True)

    def configure_optimizers(self) -> None:
        return torch.optim.AdamW(self.model.parameters(), lr=1e-4, weight_decay=1e-5)

    def training_step(self, batch: Dict, batch_idx: int) -> Dict:
        if not self.ysl_start_time:
            self.ysl_start_time = time.time()
        images, labels = batch["image"], batch["label"]
        if self.model_train_branch == 2:
            output1, output2 = self.forward(images, training=True)
            loss = self.loss_function_train(output1, output2, labels)
        else:
            output = self.forward(images)
            loss = self.loss_function_val(output, labels)
        tensorboard_logs = {"train_loss": loss.item()}
        return {"loss": loss, "log": tensorboard_logs}

    def training_epoch_end(self, outputs: List[Dict]) -> None:
        if not self.ysl_end_time:
            self.ysl_end_time = time.time()
        with open('/home/yusongli/unetr_train.log', 'w') as file:
            file.write(f'single epoch train time: {self.ysl_end_time - self.ysl_start_time}')
        avg_loss = torch.stack([x["loss"] for x in outputs]).mean()
        self.epoch_loss_values.append(avg_loss.detach().cpu().numpy())

    def validation_step(self, batch: Dict, batch_idx: int) -> None:
        if not self.ysl_start_time1:
            self.ysl_start_time1 = time.time()
        images, labels = batch['image'], batch['label']
        label_path = batch['label_meta_dict']['filename_or_obj'][0]
        yaml_meta_data = batch['label_meta_dict']['yaml_meta_data']

        roi_size = self.resample_size
        sw_batch_size = 4
        outputs = sliding_window_inference(images, roi_size, sw_batch_size, self.forward)
        loss = self.loss_function_val(outputs, labels)

        batch['pred'] = outputs  # ? Note: They're the same location.

        outputs = [self.post_pred(i) for i in decollate_batch(outputs)]
        labels = [self.post_label(i) for i in decollate_batch(labels)]

        self.dice_metric(y_pred=outputs, y=labels)

        # Ref: https://github.com/Project-MONAI/tutorials/blob/master/3d_segmentation/torch/unet_inference_dict.py
        batch = [self.post_transforms(i) for i in decollate_batch(batch)]

        pred_array = batch[0]['pred'].detach().cpu().max(axis=0, keepdim=False)[1].numpy()

        pred_savepath, mydice = self._get_pred_savepath_and_dice_offline(yaml_meta_data, label_path, pred_array)

        pathlib.Path(pred_savepath).parent.mkdir(parents=True, exist_ok=True)
        write_nifti(pred_array, pred_savepath)

        return {"val_loss": loss, "val_number": len(batch), "dice_offline": mydice}

    def validation_epoch_end(self, outputs: List[Dict]) -> None:
        if not self.ysl_end_time1:
            self.ysl_end_time1 = time.time()
        with open('/home/yusongli/unetr_train.log', 'w') as file:
            file.write(f'single epoch train time: {self.ysl_end_time1 - self.ysl_start_time1}')
        val_loss, num_items = 0, 0
        for output in outputs:
            val_loss += output["val_loss"].sum().item()
            num_items += output["val_number"]

        mean_val_dice = self.dice_metric.aggregate().item()
        self.dice_metric.reset()
        mean_val_loss = torch.tensor(val_loss / num_items)

        tensorboard_logs = {
            "val_dice": mean_val_dice,
            "val_loss": mean_val_loss,
        }
        if mean_val_dice > self.best_val_dice:
            self.best_val_dice = mean_val_dice
            self.best_val_epoch = self.current_epoch
        self.log('best_val_dice', self.best_val_dice)

        print(
            f"current epoch: {self.current_epoch} "
            f"current mean dice: {mean_val_dice:.4f}"
            f"\nbest mean dice: {self.best_val_dice:.4f} "
            f"at epoch: {self.best_val_epoch}"
        )
        self.metric_values.append(mean_val_dice)
        return {"log": tensorboard_logs}

    def test_step(self, batch, batch_idx):
        if not self.ysl_start_time2:
            self.ysl_start_time2 = time.time()
        self.validation_step(batch, batch_idx)

    def test_epoch_end(self, outputs):
        if not self.ysl_end_time2:
            self.ysl_end_time2 = time.time()
        with open('/home/yusongli/unetr_train.log', 'w') as file:
            file.write(f'single epoch train time: {self.ysl_end_time2 - self.ysl_start_time2}')
        self.validation_epoch_end(outputs)

    @staticmethod
    def _get_rootdir(modelstr: str) -> str:
        # ! <<< open debug yusongli
        # return f'{op.COMMON}/' f'_out/wangqifeng-spacial-dilated_k9_i2-net_{modelstr}_val/' f'{op.TIMESTAMP}'
        # ! ===
        # return f'/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_cropped_data/nnUNet/3d_fullres/Task607_CZ2/unetr/{op.TIMESTAMP}'
        # ! ===
        # return f'/home/yusongli/Templates/yunet/nnUNet_results/Dataset002_C_intensity1500_roi2.0/swinunetr/f4'
        # return '/home/yusongli/Templates/yunet/nnUNet_results/Dataset002_C_intensity1500_roi2.0/unetr/f2'
        return SAVE_PATH
        # ! >>> clos debug

    def _get_pred_savepath_and_dice_offline(
        self, yamlmetadata: List, label_path: str, pred_array: ArrayLike
    ) -> Tuple[str, float]:
        # for i in range(len(yamlmetadata)):
        #     if isinstance(yamlmetadata[i], (tuple, list)) and len(yamlmetadata[i]) == 1:
        #         yamlmetadata[i] = yamlmetadata[i][0]
        # _where, _who, _number = yamlmetadata[2:5]
        label = nib.load(label_path)
        label_array = label.get_fdata().copy()
        mydice = op._dice(label_array, pred_array)

        # ! <<< open debug yusongli
        # pred_savepath = f'{self.rootdir}/{self.current_epoch:02d}/{_where}/{_who}/{_number}/{mydice:.4f}.nii.gz'
        # ! ===
        # _nn = yamlmetadata[6]
        # pred_savepath = f'{self.rootdir}/{self.current_epoch:02d}/Z2_{_nn}.nii.gz'
        # ! ===
        pred_savepath = f'{self.rootdir}/{self.current_epoch:02d}/{yamlmetadata[1][0]}.nii.gz'
        # ! >>> clos debug

        return pred_savepath, mydice


# ! =========================================
gpu_id = GPU_ID
resample_size = 96
modelstr = MODEL
# ! =========================================

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
torch.backends.cudnn.benchmark = True
modellist = {'myunetr': [MyUNETR, 2], 'swinunetr': [SwinUNETR, 1], 'unetr': [UNETR, 1]}
resample_size = (resample_size, resample_size, resample_size)

model = modellist[modelstr][0](in_channels=1, out_channels=2, img_size=resample_size)
model_train_branch = modellist[modelstr][1]

net = Net(model, model_train_branch, resample_size, modelstr,)

checkpoint_callback = ModelCheckpoint(
    monitor='best_val_dice', mode='max', dirpath=net.rootdir, filename="{epoch:02d}-{best_val_dice:.4f}"
)

trainer = pytorch_lightning.Trainer(
    gpus=[gpu_id],
    max_epochs=net.max_epochs,
    check_val_every_n_epoch=net.check_val,
    callbacks=checkpoint_callback,
    default_root_dir=net.rootdir,
)


def myfit() -> None:
    trainer.fit(net)
    print(f'resample_size: {resample_size}')


# def mytest() -> None:
#     trainer.test(ckpt_path=)


if __name__ == '__main__':
    myfit()
