# Copyright 2020 - 2022 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#     http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse
import os
from functools import partial

import numpy as np
import torch
import matplotlib.pyplot as plt
import SimpleITK as sitk

from BraTS21.dataset.dataloader import get_loader as get_brats_loader
from Amos22.dataset.dataloader import get_loader as get_amos_loader
from MSD.dataset.dataloader import get_loader as get_msd_loader
from Pretrain.models.ssl_head import SSLHead as SSLHead_gmim
from Sota.Deep.models.ssl_head import SSLHead as SSLHead_deep
from Sota.Mocov3.models.ssl_head import SSLHead as SSLHead_moco
from Sota.Longseq.models.ssl_head import SSLHead as SSLHead_long
from Sota.Multiscale.models.ssl_head import SSLHead as SSLHead_multi
from Sota.Swinuneter.models.ssl_head import SSLHead as SSLHead_swin
from Sota.MAE.models.ssl_head import SSLHead as SSLHead_mae

from monai.inferers import sliding_window_inference

from BraTS21.models.swin_uneter import SwinUNETR
from monai.transforms import Activations, AsDiscrete, Compose

plt.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['font.weight'] = 'bold'
plt.rcParams['mathtext.fontset'] = 'stix'

parser = argparse.ArgumentParser(description="Swin UNETR segmentation pipeline for BRATS Challenge")

# Model
parser.add_argument("--feature_size", default=48, type=int, help="feature size")
parser.add_argument("--in_channels", default=4, type=int, help="number of input channels")
parser.add_argument("--out_channels", default=3, type=int, help="number of output channels")
parser.add_argument("--norm_name", default="instance", type=str, help="normalization name")
parser.add_argument("--dropout_rate", default=0.0, type=float, help="dropout rate")
parser.add_argument("--dropout_path_rate", default=0.0, type=float, help="drop path rate")

# DataAug
parser.add_argument("--a_min", default=-175.0, type=float, help="a_min in ScaleIntensityRanged")
parser.add_argument("--a_max", default=250.0, type=float, help="a_max in ScaleIntensityRanged")
parser.add_argument("--b_min", default=0.0, type=float, help="b_min in ScaleIntensityRanged")
parser.add_argument("--b_max", default=1.0, type=float, help="b_max in ScaleIntensityRanged")
parser.add_argument("--space_x", default=1.5, type=float, help="spacing in x direction")
parser.add_argument("--space_y", default=1.5, type=float, help="spacing in y direction")
parser.add_argument("--space_z", default=2.0, type=float, help="spacing in z direction")
parser.add_argument("--roi_x", default=128, type=int, help="roi size in x direction")
parser.add_argument("--roi_y", default=128, type=int, help="roi size in y direction")
parser.add_argument("--roi_z", default=128, type=int, help="roi size in z direction")
parser.add_argument("--RandFlipd_prob", default=0., type=float, help="RandFlipd aug probability")
parser.add_argument("--RandRotate90d_prob", default=0., type=float, help="RandRotate90d aug probability")
parser.add_argument("--RandScaleIntensityd_prob", default=0, type=float, help="RandScaleIntensityd aug probability")
parser.add_argument("--RandShiftIntensityd_prob", default=0, type=float, help="RandShiftIntensityd aug probability")
parser.add_argument("--infer_overlap", default=0.5, type=float, help="sliding window inference overlap")

# Data
parser.add_argument("--datasets", default='BraTsDataset')
parser.add_argument("--data_dir", default="/dataset/brats2021/", type=str, help="dataset directory")
parser.add_argument("--json_list", default="./jsons/brats21_folds.json", type=str, help="dataset json file")
parser.add_argument("--cache_dataset", action="store_true", help="use monai Dataset class")
parser.add_argument("--spatial_dims", default=3, type=int, help="spatial dimension of input data")
parser.add_argument("--workers", default=8, type=int, help="number of workers")
parser.add_argument("--batch_size", default=1, type=int, help="number of batch size")
parser.add_argument("--sw_batch_size", default=2, type=int, help="number of sliding window batch size")
parser.add_argument("--fold", default=0, type=int, help="data fold")
parser.add_argument("--frac", default=1, type=float)
parser.add_argument("--pos", default=1, type=int, help="parameter of RandCropByPosNegLabeld")
parser.add_argument("--neg", default=1, type=int, help="parameter of RandCropByPosNegLabeld")

# Checkpoint
parser.add_argument("--checkpoint", default=None)

parser.add_argument("--output_dir")
parser.add_argument("--device", default='cpu', type=str)


def custom_color_map():
    import matplotlib.colors

    cmap = plt.cm.viridis
    cmaplist = [cmap(i) for i in range(cmap.N)]
    cmaplist[0] = (0.0,0.0,0.0)
    # 添加透明通道
    # cmaplist[0] = (0.0,0.0,0.0,0.0)
    cmap = matplotlib.colors.LinearSegmentedColormap.from_list('mcm',cmaplist, cmap.N)
    
    
def set_brats_color(image):
    cmap = plt.cm.Paired

    # 将类别对应到指定的颜色，如果需要修改透明背景时，将每一个颜色的最后一维设置为 0
    image_ = np.zeros((image.shape[0], 
                     image.shape[1],
                     image.shape[2], 
                     4)) 
    
    for i, j in enumerate([0, 2, 1, 4]):
        image_[image==j] = cmap(i)
    
    return image_


def set_amos_color(image):
    cmap = plt.cm.Paired

    d, _, _ = image.shape
    # 将类别对应到指定的颜色，如果需要修改透明背景时，将每一个颜色的最后一维设置为 0
    image_ = np.zeros((image.shape[0], 
                     image.shape[1],
                     image.shape[2],
                     4)) 

    image_[image==0] = cmap(0)
    for i in range(12):
        image_[image==i] = cmap(i)
        
    cmap = plt.cm.Pastel1

    image_[image==12] = cmap(2)

    image_[image==13] = cmap(7)
    
    image_[image==14] = cmap(5)
    
    image_[image==15] = cmap(6)

    return image_    


def visual_amos(image, label, seg, output_dir):    
    label = set_amos_color(label)
    seg = set_amos_color(seg)
    os.makedirs(output_dir, exist_ok=True)
    _, _, d = label.shape
    
    for i in range(0, d, 5):
        img_name = os.path.join(output_dir, f'img_{i}.png')
        seg_name = os.path.join(output_dir, f'seg_{i}.png')
        label_name = os.path.join(output_dir, f'label_{i}.png')

        plt.imsave(seg_name, seg[:, :, i], cmap='Paired', dpi=150)
        plt.imsave(label_name, label[:, :, i], cmap='Paired', dpi=150)


def visual_brats(image, label, seg, output_dir):
    label = set_brats_color(label)
    seg = set_brats_color(seg)
    
    os.makedirs(output_dir, exist_ok=True)
    _, _, d = label.shape
    for i in range(0, d, 5):
        img_name = os.path.join(output_dir, f'img_{i}.png')
        seg_name = os.path.join(output_dir, f'seg_{i}.png')
        label_name = os.path.join(output_dir, f'label_{i}.png')
        plt.imsave(seg_name, seg_[:, :, i], dpi=150)
        plt.imsave(label_name, label_[:, :, i], dpi=150)


def visual_msd(image, label, seg, output_dir):
    os.makedirs(output_dir, exist_ok=True)
    _, _, d = label.shape
    for i in range(0, d, 5):
        img_name = os.path.join(output_dir, f'img_{i}.png')
        seg_name = os.path.join(output_dir, f'seg_{i}.png')
        label_name = os.path.join(output_dir, f'label_{i}.png')
        plt.imsave(seg_name, seg[:, :, i], cmap='gray', dpi=150)
        plt.imsave(label_name, label[:, :, i], cmap='gray', dpi=150)


def get_brats_data(args):
    _, val_loader = get_brats_loader(args.datasets,
                                          args.json_list,
                                          args.data_dir,
                                          args.fold,
                                          args.batch_size,
                                          args.workers,
                                          args)
    return val_loader


def get_amos_data(args):
    _, val_loader = get_amos_loader(args.datasets,
                                          args.json_list,
                                          args.data_dir,
                                          args.fold,
                                          args.batch_size,
                                          args.workers,
                                          args)
    return val_loader


def get_msd_data(args):
    _, val_loader = get_msd_loader(args.datasets,
                                          args.json_list,
                                          args.data_dir,
                                          args.fold,
                                          args.batch_size,
                                          args.workers,
                                          args)
    return val_loader


def get_brats_results(model_inferer, data_loader, output_dir, data_index=None):
    post_sigmoid = Activations(sigmoid=True)
    
    with torch.no_grad():
        for i, batch in enumerate(data_loader):  
            image = batch["image"].to(args.device)
            label = batch["label"].to(args.device)
            path = batch['path']

            num = path[0].split("/")[-1].split("_")[1]
            img_name = "BraTS2021_" + num 
            
            if data_index is None or str(data_index) in path[0]:
                print("Inference on case {}".format(img_name))
                logits = model_inferer(image)
                seg_out = logits.detach().cpu()

                seg_out = post_sigmoid(seg_out) > 0.5 
                seg_out = seg_out.to(torch.bool)
                
                image = image[0].detach().cpu().numpy().astype(np.float32)          
                seg_out = seg_out[0].detach().cpu().numpy().astype(np.int8)   
                
                seg_origin_cls = np.zeros_like(seg_out[0])
                seg_origin_cls[seg_out[1] == 1] = 2
                seg_origin_cls[seg_out[0] == 1] = 1
                seg_origin_cls[seg_out[2] == 1] = 4
                        
                label = label[0].detach().cpu().numpy().astype(np.int8)   

                label_origin_cls = np.zeros_like(label[0])
                label_origin_cls[label[1] == 1] = 2
                label_origin_cls[label[0] == 1] = 1
                label_origin_cls[label[2] == 1] = 4
                
                output_dir = os.path.join(output_dir, img_name)
                
                visual_brats(image=image, label=label_origin_cls, seg=seg_origin_cls, output_dir=output_dir)
            else:
                pass

 
def get_amos_results(model_inferer, data_loader, output_dir, data_index=None):
    post_pred = AsDiscrete(argmax=True)
    
    with torch.no_grad():
        for i, batch in enumerate(data_loader):
                image = batch["image"].to(args.device)
                label = batch["label"].to('cpu')
                path = batch['path']

                img_name = path[0].split("/")[-1].split("_")[1]
                
                if data_index is None or str(data_index) in path[0]:
                    print("Inference on case {}".format(img_name))
                    
                    logits = model_inferer(image)
                    logits = post_pred(logits[0])
                    
                    logits = logits[1:].detach().cpu().numpy()
                    label = label[0][0].cpu().numpy()
                    
                    seg_out_ = np.zeros(logits[0])
                    for i in range(15):
                        seg_out_[logits[i] == 1] == i + 1
                        
                    output_dir = os.path.join(output_dir, img_name)
                    visual_amos(image=image, label=label, seg=seg_out_, output_dir=output_dir)   
                else:  
                    pass
            
def get_msd_results(model_inferer, data_loader, output_dir):
    post_label = AsDiscrete()
    post_pred = AsDiscrete(argmax=True)
    
    with torch.no_grad():
        for i, batch in enumerate(data_loader):
            image = batch["image"].to(args.device)
            label = batch["label"].to('cpu')
            path = batch['path']

            img_name = path[0].split("/")[-1].split("_")[1]

            print("Inference on case {}".format(img_name))
            
            logits = model_inferer(image)
            seg_out = post_pred(logits[0])
            label = post_label(label[0])
            
            output_dir = os.path.join(output_dir, img_name)
            visual_msd(image=image, label=label, seg=seg_out, output_dir=output_dir)      


def draw_origin_image(image, out_dir):
    _, _, d = image.shape
    
    for i in range(0, d):
        plt.imsave(f'{out_dir}_{i}.png', image[:, :, i], cmap='gray')


if __name__ == "__main__":
    # args = parser.parse_args()
    
    # args.sw_batch_size = args.batch_size
    # output_dir = os.path.join(args.output_dir, str.split(args.checkpoint, sep='/')[-2])
    # print('Save image to:', args.output_dir)

    # # Get dataloader
    # if args.datasets == 'BraTsDataset':
    #     data_loader = get_brats_data(args)
    # elif args.datasets == 'Amos2022Dataset':
    #     data_loader = get_amos_data(args)
    # else:
    #     data_loader = get_msd_data(args)
        
    # for i, data in enumerate(data_loader):
    #     image = data['image']
    #     path = data['path']

    #     if '630' in path[0]:
    #         print(path)
    #         image = image[0][0].detach().cpu().numpy()
    #         draw_origin_image(image, '/Users/qlc/Desktop/24/1')
    #     if '1419' in path[0]:
    #         print(path)
    #         image = image[0][0].detach().cpu().numpy()
    #         draw_origin_image(image, '/Users/qlc/Desktop/36/1')
    #     if '1146' in path[0]:
    #         print(path)
    #         image = image[0][0].detach().cpu().numpy()
    #         draw_origin_image(image, '/Users/qlc/Desktop/3/1')

    # model = SwinUNETR(
    #     img_size=(args.roi_x, args.roi_y, args.roi_z),
    #     in_channels=args.in_channels,
    #     out_channels=args.out_channels,
    #     feature_size=args.feature_size
    # )

    # inf_size = [args.roi_x, args.roi_y, args.roi_z]
    # model_inferer = partial(
    #     sliding_window_inference,
    #     roi_size=inf_size,
    #     sw_batch_size=args.sw_batch_size,
    #     predictor=model,
    #     overlap=args.infer_overlap,
    # )

    # # Load checkpoint:
    # if args.checkpoint is not None:
    #     checkpoint = torch.load(args.checkpoint, map_location="cpu")
    #     from collections import OrderedDict

    #     new_state_dict = OrderedDict()
    #     for k, v in checkpoint["state_dict"].items():
    #         new_state_dict[k.replace("backbone.", "")] = v
    #     model.load_state_dict(new_state_dict, strict=False)
    #     print("=> loaded model checkpoint")
    
    # model.to(args.device)
    
    # if args.datasets == 'BraTsDataset':
    #     get_brats_results(model_inferer=model_inferer, 
    #                     data_loader=data_loader,
    #                     output_dir=output_dir)
    # elif args.datasets == 'Amos2022Dataset':
    #     get_amos_results(model_inferer=model_inferer, 
    #                     data_loader=data_loader,
    #                     output_dir=output_dir)
    # else:
    #     get_msd_results(model_inferer=model_inferer, 
    #                     data_loader=data_loader,
    #                     output_dir=output_dir)

    # path = '/Users/qlc/Desktop/Dataset/Brats2021/TrainingData/BraTS2021_01146/BraTS2021_01146_t1.nii.gz'
    # out_dir = '/Users/qlc/Desktop/1146/'
    
    # image = sitk.ReadImage(path)
    # image =sitk.GetArrayFromImage(image)
    
    # image = np.transpose(image, [2, 1, 0])

    # draw_origin_image(image, out_dir)
    
    
    a = torch.randn((1,4, 128, 128, 128))
    
    print(a.shape[1:])
    
    print(a.reshape([a.shape[0], -1]).size())
    
    