# Copyright 2020 - 2022 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#     http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse
import os
from time import time

import numpy as np
import torch
import torch.distributed as dist

from BraTS21.dataset.dataloader import get_loader as get_brats_loader
from Amos22.dataset.dataloader import get_loader as get_amos_loader
from Pretrain.models.ssl_head import SSLHead as SSLHead_gmim
from Sota.Deep.models.ssl_head import SSLHead as SSLHead_deep
from Sota.Mocov3.models.ssl_head import SSLHead as SSLHead_moco
from Sota.Longseq.models.ssl_head import SSLHead as SSLHead_long
from Sota.Multiscale.models.ssl_head import SSLHead as SSLHead_multi
from Sota.Swinuneter.models.ssl_head import SSLHead as SSLHead_swin
from Sota.MAE.models.ssl_head import SSLHead as SSLHead_mae

import matplotlib.pyplot as plt
from einops import rearrange

plt.rcParams['font.family'] = 'Times New Roman'
# plt.rcParams['font.weight'] = 'bold'
plt.rcParams['mathtext.fontset'] = 'stix'


parser = argparse.ArgumentParser(description="PyTorch Training")
parser.add_argument("--logdir", default="/home/qlc/train_log", type=str, help="directory to save the tensorboard logs")
parser.add_argument("--num_steps", default=100000, type=int, help="number of training iterations")
parser.add_argument("--eval_num", default=100, type=int, help="evaluation frequency")
parser.add_argument("--warmup_steps", default=500, type=int, help="warmup steps")
parser.add_argument("--in_channels", default=4, type=int, help="number of input channels")
parser.add_argument("--feature_size", default=48, type=int, help="embedding size")
parser.add_argument("--dropout_path_rate", default=0.0, type=float, help="drop path rate")
parser.add_argument("--use_checkpoint", action="store_true", help="use gradient checkpointing to save memory")
parser.add_argument("--spatial_dims", default=3, type=int, help="spatial dimension of input data")
parser.add_argument("--a_min", default=-1000, type=float, help="a_min in ScaleIntensityRanged")
parser.add_argument("--a_max", default=1000, type=float, help="a_max in ScaleIntensityRanged")
parser.add_argument("--b_min", default=0.0, type=float, help="b_min in ScaleIntensityRanged")
parser.add_argument("--b_max", default=1.0, type=float, help="b_max in ScaleIntensityRanged")
parser.add_argument("--space_x", default=1.5, type=float, help="spacing in x direction")
parser.add_argument("--space_y", default=1.5, type=float, help="spacing in y direction")
parser.add_argument("--space_z", default=2.0, type=float, help="spacing in z direction")
parser.add_argument("--roi_x", default=128, type=int, help="roi size in x direction")
parser.add_argument("--roi_y", default=128, type=int, help="roi size in y direction")
parser.add_argument("--roi_z", default=128, type=int, help="roi size in z direction")
parser.add_argument("--batch_size", default=2, type=int, help="number of batch size")
parser.add_argument("--sw_batch_size", default=2, type=int, help="number of sliding window batch size")
parser.add_argument("--lr", default=4e-4, type=float, help="learning rate")
parser.add_argument("--decay", default=0.1, type=float, help="decay rate")
parser.add_argument("--momentum", default=0.9, type=float, help="momentum")
parser.add_argument("--lrdecay", action="store_true", help="enable learning rate decay")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="maximum gradient norm")
parser.add_argument("--loss_type", default="SSL", type=str)
parser.add_argument("--opt", default="adamw", type=str, help="optimization algorithm")
parser.add_argument("--lr_schedule", default="warmup_cosine", type=str)
parser.add_argument("--resume", default=None, type=str, help="resume training")
parser.add_argument("--local_rank", type=int, default=0, help="local rank")
parser.add_argument("--grad_clip", action="store_true", help="gradient clip")
parser.add_argument("--noamp", action="store_true", help="do NOT use amp for training")
parser.add_argument("--dist-url", default="env://", help="url used to set up distributed training")
parser.add_argument("--smartcache_dataset", action="store_true", help="use monai smartcache Dataset")
parser.add_argument("--cache_dataset", action="store_true", help="use monai cache Dataset")

# masklayer
parser.add_argument("--dynamic_masking", action="store_true")
parser.add_argument("--hierarchical_masking", default=0., type=float)
parser.add_argument("--basic_mask_ratio", default=0.5, type=float)
parser.add_argument("--drop_ratio", default=0.3, type=float)
parser.add_argument("--scale", default=0.3, type=float)

parser.add_argument("--alpha1", default=0.005, type=float)
parser.add_argument("--alpha2", default=1, type=float)
parser.add_argument("--lambd", default=0.005, type=int)

parser.add_argument("--datasets", default='BraTsDataset')
parser.add_argument("--json_list", default="./jsons/brats21_folds.json", type=str, help="dataset json file")
parser.add_argument("--data_dir", default="/dataset/brats2021/", type=str, help="dataset directory")
parser.add_argument("--fold", default=0, type=int, help="data fold")
parser.add_argument("--workers", default=8, type=int, help="number of workers")
parser.add_argument("--frac", default=1, type=int, help="number of workers")
parser.add_argument("--pos", default=1, type=int)
parser.add_argument("--neg", default=1, type=int)
parser.add_argument("--RandFlipd_prob", default=0.2, type=float, help="RandFlipd aug probability")
parser.add_argument("--RandRotate90d_prob", default=0.2, type=float, help="RandRotate90d aug probability")
parser.add_argument("--RandScaleIntensityd_prob", default=0.1, type=float, help="RandScaleIntensityd aug probability")
parser.add_argument("--RandShiftIntensityd_prob", default=0.1, type=float, help="RandShiftIntensityd aug probability")

parser.add_argument("--invis_patches", action="store_true", help="calculate loss on masked patches")
parser.add_argument("--device", default='cpu', type=str)
parser.add_argument("--weight_dir", default='/Users/qlc/Desktop/val_best.pt', type=str)
parser.add_argument("--weight_name", default='/Users/qlc/Desktop/val_best.pt', type=str)
parser.add_argument("--ssl_head")
args = parser.parse_args()


def mr_sim():
    # Build model
    # model = SSLHead_deep(args)
    model = eval(args.ssl_head)(args)
    model.to(args.device)
    
    weight_path = os.path.join(args.weight_dir, args.weight_name)

    model_dict = torch.load(weight_path, map_location='cpu')

    model.load_state_dict(model_dict["state_dict"])
    model.eval()
    
    # Get dataloader
    train_loader, _ = get_brats_loader(datasets=args.datasets,
                                    datalist_json=args.json_list,
                                    data_dir=args.data_dir,
                                    fold=args.fold,
                                    batch_size=1,
                                    num_works=8,
                                    args=args)
    
    map = torch.zeros((64, 64))
    
    for i, data in enumerate(train_loader):
        if i > 9:
            break
        
        print(data['path'])
        img = data["image"]
        out = model.swinViT(img)[-1]
        
        out = rearrange(out, 'b c h w d -> (h w d) (b c)')
        sim = torch.einsum('nc,mc->nm', [out, out])

        sim = (sim - sim.min()) / (sim.max() - sim.min() + 1e-12)
        map += sim
        # fig, ax = plt.subplots()
        # ax.set_xticks(range(0, 64, 16))
        # ax.set_yticks(range(0, 64, 16))
    map /= 10  
    plt.imshow(sim.detach().cpu(), cmap='RdBu')
    plt.colorbar()
    plt.show()
        
        
def ct_sim():
    # Build model
    # model = SSLHead_deep(args)
    model = eval(args.ssl_head)(args)
    model.to(args.device)
    weight_path = os.path.join(args.weight_dir, args.weight_name)
    model_dict = torch.load(weight_path, map_location='cpu')

    model.load_state_dict(model_dict["state_dict"])
    model.eval()
    
    # Get dataloader
    train_loader, _ = get_amos_loader(datasets=args.datasets,
                                           datalist_json=args.json_list,
                                           data_dir=args.data_dir,
                                           fold=args.fold,
                                           batch_size=1,
                                           num_works=8,
                                           args=args)
    
    map = torch.zeros((64, 64))
    
    for i, data in enumerate(train_loader):
        if i > 12:
            break
            
        data = data[0]
        
        print(data['path'])
        img = data["image"]
        out = model.swinViT(img)[-1]
        
        out = rearrange(out, 'b c h w d -> (h w d) (b c)')
        sim = torch.einsum('nc,mc->nm', [out, out])

        sim = (sim - sim.min()) / (sim.max() - sim.min() + 1e-12)
        map += sim
        # fig, ax = plt.subplots()
        # ax.set_xticks(range(0, 64, 16))
        # ax.set_yticks(range(0, 64, 16))
    map /= 10  
    plt.imshow(sim.detach().cpu(), cmap='RdBu')
    plt.colorbar()
    plt.show()


if __name__ == "__main__":
    ct_sim()
    # mr_sim()
        


        
        
        
    
        
        