import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from itertools import combinations
from model.ml_vgg import vgg16_bn as attrNet
from mlcam_utils import GradCAM, show_cam_on_image, center_crop_img
import numpy as np
import cv2
import seaborn as sns


class GLCN_Attention(nn.Module):
    def __init__(self):
        super(GLCN_Attention, self).__init__()
        self.glcn = GLCN()

    def forward(self, x):
        out = self.glcn(x) * x
        return out


class ChannelAttentionModule(nn.Module):
    def __init__(self, channel, ratio=2):
        super(ChannelAttentionModule, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        # self.max_pool = nn.AdaptiveMaxPool2d(1)

        self.shared_MLP = nn.Sequential(
            nn.Conv2d(channel, channel // ratio, 1, bias=False),
            nn.ReLU(),
            nn.Conv2d(channel // ratio, channel, 1, bias=False)
        )
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        avgout = self.shared_MLP(self.avg_pool(x))
        # maxout = self.shared_MLP(self.max_pool(x))
        return self.sigmoid(avgout)


class GLCN(nn.Module):
    def __init__(self, planes=64, n=36):
        """ Constructor
        Args:
            planes: output channel dimensionality.
            n: the number used for channels, default:9*8/2=36
        """
        super(GLCN, self).__init__()
        self.a = [i for i in range(9)]
        self.combination = list(combinations(self.a, 2))
        self.group_conv = nn.Conv2d(2*n, n, kernel_size=1, bias=False, groups=n)
        self.bn_group = nn.BatchNorm2d(n)
        self.relu = nn.ReLU(inplace=True)

        self.conv1 = nn.Conv2d(n, n, kernel_size=1, stride=1, bias=False)
        self.bn1 = nn.BatchNorm2d(n)

        self.channel_attention = ChannelAttentionModule(n)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        avgout = torch.mean(x, dim=1, keepdim=True)
        # maxout, _ = torch.max(x, dim=1, keepdim=True)
        # out = torch.cat([avgout, maxout], dim=1)
        # x = torch.sum(out, dim=1, keepdim=True)
        x = get_glcm_stack(avgout)

        for (i, (l1, l2)) in enumerate(self.combination):
            fea = torch.cat((x[:, l1, :, :].unsqueeze(dim=1), x[:, l2, :, :].unsqueeze(dim=1)), dim=1)
            if i == 0:
                feas = fea
            else:
                feas = torch.cat((feas, fea), dim=1)
        feas = self.relu(self.bn_group(self.group_conv(feas)))

        feas = self.conv1(feas)
        feas = self.bn1(feas)

        feas = self.channel_attention(feas)*feas
        feas = torch.sum(feas, dim=1).unsqueeze(1)
        return self.sigmoid(feas)


def load_model(model, state_dict, device):
    model.load_state_dict(state_dict)
    model.to(device)
    model.eval()
    return model


def get_glcm_stack(img, step=1):
    gl3 = torch.cat((img[:, :, :, step:], img[:, :, :, -1 * step:]), dim=3)
    gl6 = torch.cat((img[:, :, step:, :], img[:, :, -1 * step:, :]), dim=2)
    gl7 = torch.cat((img[:, :, :step, :], img[:, :, :-1 * step, :]), dim=2)
    gl8 = torch.cat((img[:, :, :step, :], img[:, :, :-1 * step, :]), dim=2)
    trans_img = torch.cat((img, torch.cat((img[:, :, :, step:], img[:, :, :, -1 * step:]), dim=3), torch.cat((img[:, :, step:, :], img[:, :, -1 * step:, :]), dim=2),
                           torch.cat((gl3[:, :, step:, :], gl3[:, :, -1 * step:, :]), dim=2), torch.cat((img[:, :, :, :step], img[:, :, :, :-1 * step]), dim=3),
                           torch.cat((img[:, :, :step, :], img[:, :, :-1 * step, :]), dim=2), torch.cat((gl6[:, :, :, step:], gl6[:, :, :, -1 * step:]), dim=3),
                           torch.cat((gl7[:, :, :, step:], gl7[:, :, :, -1 * step:]), dim=3), torch.cat((gl8[:, :, :, :step], gl8[:, :, :, :-1 * step]), dim=3)), dim=1)
    return trans_img


class VGG(nn.Module):

    def __init__(self, features, num_classes=3, init_weights=True, load_weights=False, attrNet=attrNet()):
        super(VGG, self).__init__()
        self.features = features
        self.attrNet = attrNet
        self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
        self.early_layer_list = [6, 13, 23, 33]
        self.classifier = nn.Sequential(
            nn.Linear(512 * 7 * 7, 4096),
            nn.ReLU(True),
            nn.Dropout(False),
            nn.Linear(4096, 4096),
            nn.ReLU(True),
            nn.Dropout(False),
            nn.Linear(4096, num_classes),
        )
        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        if load_weights:
            load_model(self.attrNet, state_dict="./runs/train_ml_in_random/best_loss.pt",
                         device=self.device)
        if init_weights:
            self._initialize_weights()

    def forward(self, x, PATH):
        input_x = x
        for i in range(len(self.features._modules.items())):
            if i in self.early_layer_list:
                # print(self.features[i])
                x = self.features[i](x)
                # print(x.shape)
                cam_map = self.get_cam(input_x, target_layer=[self.attrNet.features[-1]], target_size=(x.shape[-2], x.shape[-1]))
                NAME = PATH[0].split("/")[-1].split('.')[0]
                SAVE_CAM = np.transpose(cam_map, (1,2,0))
                # SAVE_CAM /= np.max(SAVE_CAM)
                save_cam = np.uint8(SAVE_CAM*255)
                im_color = cv2.applyColorMap(save_cam, 2)
                cv2.imwrite(f"/home/dhu1/gitee/fp2a-master/experiment-results/CAMATTENTION2/{NAME}_{i}.jpg", im_color)
                cam_map = torch.Tensor(cam_map).to(self.device).reshape(cam_map.shape[0], 1, x.shape[-2], x.shape[-1])
                x = torch.mul(cam_map, x)
            else:
                x = self.features[i](x)
        # x = self.features(x)
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.classifier(x)
        return x

    def get_cam(self, x, target_layer, target_size):
        cam = GradCAM(model=self.attrNet, target_layers=target_layer, use_cuda=False, target_size=target_size)
        # pred = self.attrNet(x)
        # target_category = 281
        # grayscale_cam = cam(input_tensor=x, target_category=target_category)
        grayscale_cam = cam(input_tensor=x)
        w1 = self.sigmoid(cam.target_weights)
        grayscale_cam_2 = cam(input_tensor=x, target_category=[1])
        w2 = self.sigmoid(cam.target_weights)
        # grayscale_cam = grayscale_cam[0, :]
        # grayscale_cam_2 = grayscale_cam_2[0, :]
        # l = grayscale_cam.shape[-1]
        bz = grayscale_cam.shape[0]

        w_1 = w1/(w1+w2)
        w_1 = w_1.reshape(bz, 1, 1)
        w_2 = w2/(w1+w2)
        w_2 = w_2.reshape(bz, 1, 1)

        grayscale_cam_fused = grayscale_cam + grayscale_cam_2

        return grayscale_cam_fused

    def sigmoid(self, x):
        x = 1 / (1 + np.exp(-x))
        return x

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.constant_(m.bias, 0)


def make_layers(cfg, batch_norm=False):
    layers = []
    in_channels = 3
    for i, v in enumerate(cfg):
        if v == 'M':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:
            conv = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
            layers += [conv, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
            if i==14 or i==15 or i==16:
                layers += [GLCN_Attention()]
            else:
                pass
            in_channels = v
    return nn.Sequential(*layers)


cfgs = {
    'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
    'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
    'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
    'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}


def _vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs):
    if pretrained:
        kwargs['init_weights'] = False
    model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
    return model


def vgg16_bn(pretrained=False, progress=True, **kwargs):
    r"""VGG 16-layer model (configuration "D") with batch normalization
    `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_

    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
        progress (bool): If True, displays a progress bar of the download to stderr
    """
    return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs)


if __name__ == '__main__':
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = vgg16_bn().to(device)
    x = torch.ones((1, 3, 224, 224)).to(device)
    print(model(x).shape)
