import os
import re

import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader

import torchvision.transforms as transforms
import torchvision.datasets as datasets

import matplotlib.pyplot as plt

from training import models
from training.datasets.transform_v2 import create_val_test_transform
from training.tools.logger import get_logger
from training.tools.train_utils import parse_args

activation = {}


def get_activation(name):
    def hook(model, input, output):
        activation[name] = output.detach()

    return hook


def main():
    args = parse_args()
    logger = get_logger()
    logger.info(args)

    if args.resume:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)

        logger.info("Loading checkpoint '{}'".format(args.resume))
        model = models.__dict__[args.arch](pretrained=False)
        model.cuda()
        checkpoint = torch.load(args.resume, map_location="cpu")
        state_dict = checkpoint.get("state_dict", checkpoint)
        model.load_state_dict({re.sub("^module.", "", k): v for k, v in state_dict.items()}, strict=False)

        transform = create_val_test_transform(model.default_cfg)

        root_path = 'E:\\workspace-pycharm\\deepfakes\\sifdnet-main\\plot\\img'
        image_folder = os.path.join(root_path, 'attention-map')
        for image_name in os.listdir(image_folder):
            if not image_name.endswith('.png'):
                continue
            image_path = os.path.join(image_folder, image_name)
            image = cv2.imread(image_path, cv2.IMREAD_COLOR)
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            transformed = transform(image=image)
            image = transformed["image"].unsqueeze(0)
            image = image.cuda(args.gpu)

            model.alam1.attn.softmax2_blocks.register_forward_hook(get_activation('high'))
            model.alam2.attn.softmax2_blocks.register_forward_hook(get_activation('mid'))
            model.alam3.attn.softmax2_blocks.register_forward_hook(get_activation('low'))
            output = model(image)

            high_map = activation['high'].squeeze()
            high_map = (high_map.detach().cpu().numpy() * 255.).astype(np.uint8)
            high_map = cv2.resize(high_map, (128, 128))
            high_map = cv2.applyColorMap(high_map, cv2.COLORMAP_VIRIDIS)
            target_file = os.path.join(image_folder, '{}_{}.png'.format(image_name[:-4], 'high'))
            cv2.imwrite(target_file, high_map)

            mid_map = activation['mid'].squeeze()
            mid_map = (mid_map.detach().cpu().numpy() * 255.).astype(np.uint8)
            mid_map = cv2.resize(mid_map, (128, 128))
            mid_map = cv2.applyColorMap(mid_map, cv2.COLORMAP_VIRIDIS)
            target_file = os.path.join(image_folder, '{}_{}.png'.format(image_name[:-4], 'mid'))
            cv2.imwrite(target_file, mid_map)

            low_map = activation['low'].squeeze()
            low_map = (low_map.detach().cpu().numpy() * 255.).astype(np.uint8)
            low_map = cv2.resize(low_map, (128, 128))
            low_map = cv2.applyColorMap(low_map, cv2.COLORMAP_VIRIDIS)
            target_file = os.path.join(image_folder, '{}_{}.png'.format(image_name[:-4], 'low'))
            cv2.imwrite(target_file, low_map)
            # print(low_map.shape, mid_map.shape, high_map.shape)


if __name__ == '__main__':
    main()
