import numpy as np
import cv2
import torch
import scipy.ndimage as ndimage
from skimage.transform import resize
from matplotlib import pyplot as plt
import nibabel
import os
from networks.neuron_net import Neuron_WaveSNet_V2, Neuron_WaveSNet_V2not, Neuron_WSUNet, Neuron_WaveSENet

# from nnunetv2.inference.predict_from_raw_data import nnUNetPredictor

# 设置设备
device = torch.device("cuda", 0)

# ###---导入模型----###
# model_path = '/home/promising/NAS_DATA/nnunet/nnUNet_results/Dataset114_BigNeuron_enhance++/nnUNetTrainerFUMambaBotTest2__nnUNetPlans__3d_fullres'
# test_model_name = 'test2_xxl_enhance++_1(1_5)ce_1dc_1cl(4)_epoch500'

def load_image_3d(image_root):
    """
    从文件夹 image_root 中加载其中包含的 3D 图像数据
    :param image_root: 一个保存二维图像序列的路径，这些二维图像是一个三维图像的分片表示
    :return: numpy 数组形式的 3D 图像
    """
    image_name_list = os.listdir(image_root)
    image_name_list.sort()
    image_3d = []
    for image_name in image_name_list:
        image = cv2.imread(os.path.join(image_root, image_name), 0)
        image_3d.append(image)
    input_np = np.array(image_3d, dtype=np.float32)
    # input_np = input_np.reshape(1, 1, *input_np.shape)
    return input_np

# def get_predictor(model_path):
#     # instantiate the nnUNetPredictor
#     predictor = nnUNetPredictor(
#         tile_step_size=0.5,
#         use_gaussian=True,
#         use_mirroring=True,
#         perform_everything_on_device=True,
#         device=torch.device('cuda', 0),
#         verbose=False,
#         verbose_preprocessing=False,
#         allow_tqdm=True
#     )
#     # initializes the network architecture, loads the checkpoint
#     predictor.initialize_from_trained_model_folder(
#         model_path,
#         use_folds=(1,),
#         checkpoint_name='checkpoint_best.pth',
#     )
#     return predictor

# use register_forward_hook() to gain the features map
class LayerActivations:
    features = None

    def __init__(self, model):
        self.hook = model.register_forward_hook(self.hook_fn)
        # 获取model.features中某一层的output

    def hook_fn(self, module, MRI_tensorut, output):
        self.features = output.cpu()

    def remove(self):  ## remove hook
        self.hook.remove()


# load model（map奇数为大模型，偶数是原始）
model_path = 'D:\\pycharmproject\\result\\paper best\\BERTv2 data5\\neuron_wavesnet_v2_haar_best.pth'
# model_path = 'D:\\pycharmproject\\result\\Wave\\neuron_wavesnet_v2\\epoch_3.pth'
# model_path = 'D:\\pycharmproject\\MICCAI\\paper\\WSUNet\\Neuron_WSUNet_haar_best.pth'
# model_path = 'D:\\pycharmproject\\result\\paper\\BERT_SE 1 data5 TASloss\\Neuron_WaveSENet_haar_best.pth'

model = torch.nn.DataParallel(Neuron_WaveSNet_V2(num_class=2, with_BN=True, channel_width=4, wavename='haar'))
# model = torch.nn.DataParallel(Neuron_WaveSNet_V2not(num_class=2, with_BN=True, channel_width=4, wavename='haar'))
# model = torch.nn.DataParallel(Neuron_WSUNet(num_class=2, with_BN=True, channel_width=4, wavename='haar'))
# model = torch.nn.DataParallel(Neuron_WaveSENet(num_class=2, with_BN=True, channel_width=4, wavename='haar'))


# grad_model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
model_dict = model.state_dict()
# print(model_dict)
pretrained_dict = torch.load(model_path, map_location=torch.device('cpu'))
pretrained_dict = [(k, v) for k, v in pretrained_dict['state_dict'].items()]
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
model.eval()

import os
import numpy as np
import torch
import matplotlib.pyplot as plt
from scipy import ndimage
import cv2

# 设置根目录路径
root_dir = 'E:\\NeCuDa\\DataBase_5_new\\000093'
save_path = 'D:\\pycharmproject\\cam comparison\\cam_93outputs_1_pdf'
# save_path = 'D:\\pycharmproject\\v2\\cam_0outputs_notLLM'

# 遍历根目录下的所有文件夹
for folder_name in os.listdir(root_dir):
    folder_path = os.path.join(root_dir, folder_name)

    # 确保是文件夹
    if os.path.isdir(folder_path):
        MRI_path = os.path.join(folder_path, 'image')  # 假设每个文件夹下都有一个'image'子目录
        MRI = load_image_3d(MRI_path)
        # MRI_array = MRI.get_fdata()
        # MRI_array = MRI_array.astype('float32')

        # data preprocess
        max_value = MRI.max()
        MRI_array = MRI / max_value
        MRI_tensor = torch.FloatTensor(MRI_array).unsqueeze(0).unsqueeze(0)

        # Instantiate, get the i_th layer (second argument) of each convolution
        # conv_out = LayerActivations(grad_model.decoder.eam_layers[4].conv_f)  # test
        conv_out = LayerActivations(model.module.cov_final)  # test

        output = model(MRI_tensor)
        cam = conv_out.features  # gain the ith output
        # cam = output # gain the latest output
        conv_out.remove  # delete the hook

        ###---lAYER-Name--to-visualize--###
        # Create a graph that outputs target convolution and output
        print('cam.shape1', cam.shape)
        cam = cam.cpu().detach().numpy().squeeze()
        print('cam.shape2', cam.shape)
        cam = cam[1]
        print('cam.shape3', cam.shape)

        capi = resize(cam, (MRI_tensor.shape[2], MRI_tensor.shape[3], MRI_tensor.shape[4]))
        # print(capi.shape)
        capi = np.maximum(capi, 0)
        heatmap = (capi - capi.min()) / (capi.max() - capi.min())
        f, axarr = plt.subplots(3, 3, figsize=(12, 12))

        f.suptitle('CAM_3D_medical_image', fontsize=30)

        axial_slice_count = 4
        coronal_slice_count = 4
        sagittal_slice_count = 4

        sagittal_MRI_img = np.squeeze(MRI_array[sagittal_slice_count, :, :])
        sagittal_grad_cmap_img = np.squeeze(heatmap[sagittal_slice_count, :, :])

        # Sagittal view
        img_plot = axarr[0, 0].imshow(np.rot90(sagittal_MRI_img, 1), cmap='gray')
        axarr[0, 0].axis('off')
        axarr[0, 0].set_title('Sagittal MRI', fontsize=20)

        # img_plot = axarr[0, 1].imshow(np.rot90(sagittal_grad_cmap_img, 1), cmap='jet')
        # axarr[0, 1].axis('off')
        # axarr[0, 1].set_title('Weight-CAM', fontsize=20)

        # Zoom in ten times to make the weight map smoother
        sagittal_MRI_img = ndimage.zoom(sagittal_MRI_img, (1, 1), order=3)
        # Overlay the weight map with the original image
        sagittal_overlay = cv2.addWeighted(sagittal_MRI_img, 0.3, sagittal_grad_cmap_img, 0.6, 0)

        img_plot = axarr[0, 2].imshow(np.rot90(sagittal_overlay, 1), cmap='jet')
        axarr[0, 2].axis('off')
        axarr[0, 2].set_title('Overlay', fontsize=20)

        plt.colorbar(img_plot, shrink=0.5)  # color bar if need
        plt.savefig(os.path.join(save_path, f'map_{folder_name}.pdf'))  # 保存每个文件夹的结果
        plt.close()  # 关闭当前图形，释放内存