import argparse
import os
from time import time

import numpy as np
import torch
import torch.distributed as dist

from BraTS21.dataset.dataloader import get_loader
from Pretrain.models.ssl_head import SSLHead as SSLHead_gmim
from Sota.Deep.models.ssl_head import SSLHead as SSLHead_deep
from Pretrain.losses import Loss as loss

import matplotlib.pyplot as plt
from einops import rearrange

plt.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['font.weight'] = 'bold'
plt.rcParams['mathtext.fontset'] = 'stix'

parser = argparse.ArgumentParser(description="PyTorch Training")
parser.add_argument("--logdir", default="/home/qlc/train_log", type=str, help="directory to save the tensorboard logs")
parser.add_argument("--num_steps", default=100000, type=int, help="number of training iterations")
parser.add_argument("--eval_num", default=100, type=int, help="evaluation frequency")
parser.add_argument("--warmup_steps", default=500, type=int, help="warmup steps")
parser.add_argument("--in_channels", default=4, type=int, help="number of input channels")
parser.add_argument("--feature_size", default=48, type=int, help="embedding size")
parser.add_argument("--dropout_path_rate", default=0.0, type=float, help="drop path rate")
parser.add_argument("--use_checkpoint", action="store_true", help="use gradient checkpointing to save memory")
parser.add_argument("--spatial_dims", default=3, type=int, help="spatial dimension of input data")
parser.add_argument("--a_min", default=-1000, type=float, help="a_min in ScaleIntensityRanged")
parser.add_argument("--a_max", default=1000, type=float, help="a_max in ScaleIntensityRanged")
parser.add_argument("--b_min", default=0.0, type=float, help="b_min in ScaleIntensityRanged")
parser.add_argument("--b_max", default=1.0, type=float, help="b_max in ScaleIntensityRanged")
parser.add_argument("--space_x", default=1.5, type=float, help="spacing in x direction")
parser.add_argument("--space_y", default=1.5, type=float, help="spacing in y direction")
parser.add_argument("--space_z", default=2.0, type=float, help="spacing in z direction")
parser.add_argument("--roi_x", default=128, type=int, help="roi size in x direction")
parser.add_argument("--roi_y", default=128, type=int, help="roi size in y direction")
parser.add_argument("--roi_z", default=128, type=int, help="roi size in z direction")
parser.add_argument("--batch_size", default=2, type=int, help="number of batch size")
parser.add_argument("--sw_batch_size", default=2, type=int, help="number of sliding window batch size")
parser.add_argument("--lr", default=4e-4, type=float, help="learning rate")
parser.add_argument("--decay", default=0.1, type=float, help="decay rate")
parser.add_argument("--momentum", default=0.9, type=float, help="momentum")
parser.add_argument("--lrdecay", action="store_true", help="enable learning rate decay")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="maximum gradient norm")
parser.add_argument("--loss_type", default="SSL", type=str)
parser.add_argument("--opt", default="adamw", type=str, help="optimization algorithm")
parser.add_argument("--lr_schedule", default="warmup_cosine", type=str)
parser.add_argument("--resume", default=None, type=str, help="resume training")
parser.add_argument("--local_rank", type=int, default=0, help="local rank")
parser.add_argument("--grad_clip", action="store_true", help="gradient clip")
parser.add_argument("--noamp", action="store_true", help="do NOT use amp for training")
parser.add_argument("--dist-url", default="env://", help="url used to set up distributed training")
parser.add_argument("--smartcache_dataset", action="store_true", help="use monai smartcache Dataset")
parser.add_argument("--cache_dataset", action="store_true", help="use monai cache Dataset")

# masklayer
parser.add_argument("--dynamic_masking", action="store_true")
parser.add_argument("--hierarchical_masking", default=0., type=float)
parser.add_argument("--basic_mask_ratio", default=0.5, type=float)
parser.add_argument("--drop_ratio", default=0.3, type=float)
parser.add_argument("--scale", default=0.3, type=float)

parser.add_argument("--alpha1", default=0.005, type=float)
parser.add_argument("--alpha2", default=1, type=float)
parser.add_argument("--lambd", default=0.005, type=int)

parser.add_argument("--datasets", default='BraTsDataset')
parser.add_argument("--json_list", default="./jsons/brats21_folds.json", type=str, help="dataset json file")
parser.add_argument("--data_dir", default="/dataset/brats2021/", type=str, help="dataset directory")
parser.add_argument("--fold", default=0, type=int, help="data fold")
parser.add_argument("--workers", default=8, type=int, help="number of workers")
parser.add_argument("--frac", default=1, type=int, help="number of workers")

parser.add_argument("--invis_patches", action="store_true", help="calculate loss on masked patches")
parser.add_argument("--device", default='cpu', type=str)
parser.add_argument("--weight_path", default='/Users/qlc/Desktop/val_best.pt', type=str)
args = parser.parse_args()


def backward_hook(module, grad_in, grad_out):
    grad_block.append(grad_out[0].detach())
    print('grad_out.size:', grad_out.size())
    return grad_out
    
    
def farward_hook(module, input, output):
    fmap_block.append(output)
    print('output.size:', output.size())
    return output

def get_cam(grads, feature_map):
    weights = np.mean(grads)
    cam += weights * feature_map
    
    cam = np.maximun(cam, 0)
    cam = cam / cam.max()
    return cam

    
if __name__ == '__main__':
    # Build model
    # model = SSLHead_deep(args)
    model = SSLHead_gmim(args)
    model.to(args.device)

    model_dict = torch.load(args.weight_path, map_location='cpu')

    model.load_state_dict(model_dict["state_dict"])
    model.eval()
    
    loss_function = Loss(args)
    
    # Get dataloader
    train_loader, test_loader = get_loader(datasets=args.datasets,
                                           datalist_json=args.json_list,
                                           data_dir=args.data_dir,
                                           fold=args.fold,
                                           batch_size=1,
                                           num_works=8,
                                           args=args)
    
    grad_block = list()
    fmap_block = list()
    
    hook_hanldes = []
    
    for data in train_loader:
        forward = model.conv[-1].register_forward_hook(farward_hook)
        grad = model.conv[-1].register_full_backward_hook(backward_hook)
        hook_hanldes.append(forward)
        hook_hanldes.append(grad)
        
        img = data["image"]
        rec = model(img)
        
        loss = loss_function(img, rec)
        loss.backward()
        
        get_cam(img, fmap, grads_val)
        
        [h.remove() for h in hook_hanldes]
        
        break
    
    