import os
from PIL import Image
import torch
import numpy as np
import cv2
import matplotlib.pyplot as plt
import torchvision
import torch.nn as nn
import torchvision.transforms as transforms

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)


def ResNet18(device, num_class=3, load_pth=True, pth_path="test_model/ResNet18_val0.9916.pt"):
    model = torchvision.models.resnet18(pretrained=True)
    in_features = model.fc.in_features
    model.fc = nn.Linear(in_features, num_class)
    model = model.to(device)

    if load_pth:
        print(f"############# loading model : {pth_path}  #################")
        pretrained_dict = torch.load(pth_path)
        model_dict = model.state_dict()
        pretrained_dict = {k: v for k, v in pretrained_dict.items() if (not k.startswith("fc")) and k in model_dict}
        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict)
    print(model)
    return model


class CamExtractor():
    """
        Extracts cam features from the model
    """

    def __init__(self, model):
        self.model = model
        self.gradients = []

    def save_gradient(self, grad):
        self.gradients.append(grad)

    def forward_pass_on_convolutions(self, x):
        """
            Does a forward pass on convolutions, hooks the function at given layer
        """
        conv_output = []
        x = self.model.conv1(x)
        x = self.model.bn1(x)
        x = self.model.relu(x)
        x = self.model.maxpool(x)

        x = self.model.layer1(x)
        x.register_hook(self.save_gradient)
        conv_output.append(x)  # Save the convolution output on that layer

        x = self.model.layer2(x)
        x.register_hook(self.save_gradient)
        conv_output.append(x)

        x = self.model.layer3(x)
        x.register_hook(self.save_gradient)
        conv_output.append(x)

        x = self.model.layer4(x)
        x.register_hook(self.save_gradient)
        conv_output.append(x)
        return conv_output, x

    def forward_pass(self, x):
        """
            Does a full forward pass on the model
        """
        # Forward pass on the convolutions
        conv_output, x = self.forward_pass_on_convolutions(x)

        x = self.model.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.model.fc(x)
        return conv_output, x

def get_featrue(x):
    x = model.conv1(x)
    x = model.bn1(x)
    x = model.relu(x)
    x = model.maxpool(x)
    x = model.layer1(x)
    x = model.layer2(x)
    x = model.layer3(x)
    x = model.layer4(x)
    return  x




def draw_CAM(model, img_path, save_path, transform=None, visual_heatmap=False):
    '''
    绘制 Class Activation Map
    :param model: 加载好权重的Pytorch model
    :param img_path: 测试图片路径
    :param save_path: CAM结果保存路径
    :param transform: 输入图像预处理方法
    :param visual_heatmap: 是否可视化原始heatmap（调用matplotlib）
    :return:
    '''
    # 图像加载&预处理
    # img = Image.open(img_path).convert('RGB')
    img = cv2.imread(img_path)
    img = torch.Tensor(img).reshape(3, 416, 416)
    if transform:
        img = transform(img)
    print(img.shape)
    img = img.unsqueeze(0)

    # 获取模型输出的feature/score
    model.eval()
    print(model)
    features = model.features(img)

    output = model.classifier(features)

    # 为了能读取到中间梯度定义的辅助函数
    def extract(g):
        global features_grad
        features_grad = g

    # 预测得分最高的那一类对应的输出score
    pred = torch.argmax(output).item()
    pred_class = output[:, pred]

    features.register_hook(extract)
    pred_class.backward()  # 计算梯度

    grads = features_grad  # 获取梯度

    pooled_grads = torch.nn.functional.adaptive_avg_pool2d(grads, (1, 1))

    # 此处batch size默认为1，所以去掉了第0维（batch size维）
    pooled_grads = pooled_grads[0]
    features = features[0]
    # 512是最后一层feature的通道数
    for i in range(512):
        features[i, ...] *= pooled_grads[i, ...]

    # 以下部分同Keras版实现
    heatmap = features.detach().numpy()
    heatmap = np.mean(heatmap, axis=0)

    heatmap = np.maximum(heatmap, 0)
    heatmap /= np.max(heatmap)

    # 可视化原始热力图
    if visual_heatmap:
        plt.matshow(heatmap)
        plt.show()

    img = cv2.imread(img_path)  # 用cv2加载原始图像
    heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))  # 将热力图的大小调整为与原始图像相同
    heatmap = np.uint8(255 * heatmap)  # 将热力图转换为RGB格式
    heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)  # 将热力图应用于原始图像
    superimposed_img = heatmap * 0.4 + img  # 这里的0.4是热力图强度因子
    cv2.imwrite(save_path, superimposed_img)  # 将图像保存到硬盘


class GradCam():
    """
        Produces class activation map
    """

    def __init__(self, model):
        self.model = model
        self.model.eval()
        # Define extractor
        self.extractor = CamExtractor(self.model)

    def generate_cam(self, input_image, target_layer, target_class=None):
        img = input_image.copy()
        input_image = transform(input_image)
        input_image = input_image.unsqueeze(0)
        # Full forward pass
        # conv_output is the output of convolutions at specified layer
        # model_output is the final output of the model (1, 1000)
        print(input_image.shape)
        conv_output, model_output = self.extractor.forward_pass(input_image)
        if target_class is None:
            target_class = np.argmax(model_output.data.numpy())
        # Target for backprop
        one_hot_output = torch.FloatTensor(1, model_output.size()[-1]).zero_()
        one_hot_output[0][target_class] = 1

        # Zero grads
        self.model.zero_grad()

        # Backward pass with specified target
        model_output.backward(gradient=one_hot_output, retain_graph=True)

        # Get hooked gradients,gradients
        # layer0:(1,512,7,7), layer1:(1,256,14,14), layer2:(1,128,28,28), layer3:(1,64,56,56),
        # 与后面conv_output是反的，因此需要逆序处理
        guided_gradients = self.extractor.gradients[-1 - target_layer].data.numpy()[0]

        # Get convolution outputs
        # layer0.shape:(64,56,56) layer1:(128,28,28) layer2:(256,14,14) layer3:(512,7,7)
        target = conv_output[target_layer].data.numpy()[0]  ###512*13*13
        # Get weights from gradients
        weights = np.mean(guided_gradients, axis=(1, 2))  # Take averages for each gradient 512
        print("target shape", target.shape)
        print("weights shape", weights.shape)
        # Create empty numpy array for cam
        cam = np.ones(target.shape[1:], dtype=np.float32)

        # Have a look at issue #11 to check why the above is np.ones and not np.zeros
        # Multiply each weight with its conv output and then, sum
        for i, w in enumerate(weights):
            cam += w * target[i, :, :]

        print(cam.shape)
        # cam = np.maximum(cam, 0)
        # cam = (cam - np.min(cam)) / (np.max(cam) - np.min(cam))  # Normalize between 0-1
        # cam = np.uint8(cam * 255)  # Scale between 0-255 to visualize

        # cam = np.mean(cam, axis=0)
        cam = np.maximum(cam, 0)
        cam /= np.max(cam)
        # print(cam)
        #
        cam_resize = cv2.resize(cam, (input_image.shape[2],
                                      input_image.shape[3]))
        cam_resize = np.uint8(cam_resize * 255)
        print(cam_resize)

        heatmap = cv2.applyColorMap(cam_resize, cv2.COLORMAP_JET)  # 将热力图应用于原始图像
        cv2.imshow("heatmap", heatmap)
        superimposed_img = heatmap * 0.4 + img  # 这里的0.4是热力图强度因子
        cv2.imshow("cam", superimposed_img.astype(np.uint8))
        cv2.waitKey(0)
        return cam


if __name__ == '__main__':
    model = ResNet18("cuda")
    save_path = "save_path"
    img_path = "hole.jpg"
    img = cv2.imread(img_path).astype(np.float32)

    model = model.to("cpu")
    mygrad_cam = GradCam(model)
    mygrad_cam.generate_cam(img, target_layer=3, target_class=0)
