from __future__ import absolute_import

import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import init


class AttentionRecognitionHead(nn.Module):
    """
    input: [b x 16 x 64 x in_planes]
    output: probability sequence: [b x T x num_classes]
    """

    def __init__(self, num_classes, in_planes, sDim, attDim, max_len_labels):
        super(AttentionRecognitionHead, self).__init__()
        self.num_classes = num_classes  # this is the output classes. So it includes the <EOS>.
        self.in_planes = in_planes  # 512
        self.sDim = sDim  # 512
        self.attDim = attDim  # 512
        self.max_len_labels = max_len_labels

        self.pos2embedding = nn.Embedding(max_len_labels, in_planes)

        self.heads = 8
        self.k_nums = 1
        self.scales = 1
        self.offset_dims = 2 * self.heads * self.k_nums * self.scales  # '2'表示x,y坐标偏移
        self.offset_proj = nn.Linear(in_planes * 2, self.offset_dims)
        self.atten_dims = self.heads * self.k_nums * self.scales
        self.atten_proj = nn.Linear(in_planes * 2, self.atten_dims)

        self.k_proj_0 = nn.Linear(in_planes, in_planes)

        self.d_k = int(in_planes / self.heads)  # 64, 将通道分成self.heads个来计算

        self.wm_proj = nn.Linear(in_planes, in_planes)
        self.dropout = True
        self.droplayer = nn.Dropout(0.1)

        self.channel_context_embed = nn.Linear(self.k_nums, 256)

        self.cls_op = nn.Linear(in_planes, self.num_classes)

        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    def forward(self, x):
        x, targets, lengths = x  # [b,25,512]
        batch_size = x.size(0)

        assert self.max_len_labels == 25, "max_len = 25"
        reading_order = torch.arange(self.max_len_labels, dtype=torch.long).to(self.device)
        reading_order = reading_order.unsqueeze(0).expand(batch_size, -1)  # b,25
        pos_embed = self.pos2embedding(reading_order)  # b,25,512
        pos_embed = torch.cat([pos_embed, x], dim=-1)  # b,25,1024

        # 通过pos_embed生成多个偏移量，总共生成 25*LK个 其中 L为multi scale的层数，K为每张图上面生成的点的个数
        offset = self.offset_proj(pos_embed)  # b,25,offset_dim
        offset = offset.view(batch_size, self.max_len_labels, self.heads, -1)  # b,25,8,2LK    对应到这里就是只为几个固定的position预测了偏移
        offset = offset.view(batch_size, self.max_len_labels, self.heads, self.scales, self.k_nums, 2)  # b,25,M,L,K,2
        offset = offset.permute(0, 2, 3, 4, 1, 5).contiguous()  # b,M,L,K,25,2
        offset = offset.view(batch_size * self.heads, self.scales, self.k_nums, self.max_len_labels, 2)  # bM,L,K,25,2

        atten = self.atten_proj(pos_embed)
        atten = atten.view(batch_size, self.max_len_labels, self.heads, -1)  # b,25,M,LK
        atten = F.softmax(atten, dim=-1)  # 进行softmax操作
        atten = atten.permute(0, 2, 1, 3).contiguous()  # b,M,25,LK
        atten = atten.view(batch_size * self.heads, self.max_len_labels, -1)  # BM,25,LK

        pixel_location = torch.tensor([0.5, 0]).unsqueeze(0).unsqueeze(1).to(self.device)  # 中心点，特征是一维的序列时，y坐标始终为0
        pixel_location = pixel_location.repeat(batch_size, self.max_len_labels, 1)  # b,25,2

        feature_map = x.unsqueeze(1)  # b,1,25,512
        _, h, w, _ = feature_map.size()

        key_pixel_location = self.restore_scale(w, h, pixel_location)
        key_pixel_location = key_pixel_location.repeat(self.heads, 1, 1)  # bM,25,2

        scale_feature = self.k_proj_0(feature_map).view(batch_size, h, w, self.heads, self.d_k)  # b,1,25,8,64
        scale_feature = scale_feature.permute(0, 3, 4, 1, 2).contiguous()  # b,M,64,h,w
        scale_feature = scale_feature.view(-1, self.d_k, h, w)  # bM,64,h,w
        k_features = []
        sample_grid_lst = []
        for k in range(self.k_nums):
            points = key_pixel_location + offset[:, 0, k, :, :]
            grid_x = 2.0 * points[:, :, 0] / max(w - 1, 1) - 1.0
            grid_y = 2.0 * points[:, :, 1] / max(h - 1, 1) - 1.0
            sample_grid = torch.stack((grid_x, grid_y), dim=2).unsqueeze(1)  # BM,25,2
            sample_grid_lst.append(points)
            # F.grid_sample: https://zhuanlan.zhihu.com/p/112030273
            features = F.grid_sample(scale_feature, sample_grid, mode='bilinear', padding_mode='zeros').squeeze(2)  # 从scale_feature采样
            k_features.append(features)
        sample_grid_lst = torch.stack(sample_grid_lst, dim=1).squeeze(2).permute(0, 2, 1, 3)
        k_features = torch.stack(k_features, dim=1)  # BM,K,d_k,25
        """ 
        对这部分提取的特征再利用self-attention进行channel上下文特征的建模，目前验证这一项改动效果不明显，也可能是由于是对每一个head进行的操作，每一个head的维度太小，需要将head合并之后再计算
        channel context information modeling 
        """
        k_features_ = k_features.permute(0, 3, 2, 1)  # BM,25,d_k,K    K代表最后的维度,比较软的做法是对多个头分别去计算，或者变成一个头去计算
        k_features_embed = self.channel_context_embed(k_features_)  # BM,25,d_k,256
        channel_impact_on_channel = F.softmax(torch.matmul(k_features_embed, k_features_embed.permute(0, 1, 3, 2)), dim=-1)  # BM,25,d_k,256 x BM,25,256,d_k  -> BM,25,d_k,d_k
        k_features_gather = torch.matmul(channel_impact_on_channel, k_features_).permute(0, 3, 2, 1)  # BM,25,d_k,K -> BM,K,d_k,25
        k_features = k_features + k_features_gather  # 加上残差，用于快速的收敛和训练的稳定性

        scale_features = k_features.unsqueeze(1)  # bM,L,K,d_k,25
        scale_features = scale_features.permute(0, 4, 3, 1, 2).contiguous()  # BM,25,d_k,L,K
        scale_features = scale_features.view(batch_size * self.heads, self.max_len_labels, self.d_k, -1)  # bM,25,d_k,LK

        features = torch.einsum('nlds,nls -> nld', scale_features, atten)  # https://blog.csdn.net/beilizhang/article/details/114631973
        # BM,25,d_k,LK   BM,25,LK  -> BM,25,d_k

        sample_grid_lst[:, :, :, 0] = torch.clamp(sample_grid_lst[:, :, :, 0], 0, w - 1)
        sample_grid_lst[:, :, :, 1] = torch.clamp(sample_grid_lst[:, :, :, 1], 0, h - 1)

        # atten_map = self.atten_vis(sample_grid_lst, atten, batch_size, w, h)

        features = features.view(batch_size, self.heads, self.max_len_labels, self.d_k)

        features = features.permute(0, 2, 1, 3).contiguous()  # b,25,8,64

        features = features.view(batch_size, self.max_len_labels, self.d_k * self.heads)  # b,25,512

        pvam_features = self.wm_proj(features)

        if self.dropout:
            pvam_features = self.droplayer(pvam_features)

        final_out = self.cls_op(pvam_features)  # b,25,97
        return final_out

    @staticmethod
    def restore_scale(width, height, ref_point):
        """
        对参考点做变换
        """
        new_point = ref_point.clone().detach()
        new_point[..., 0] = new_point[..., 0] * (width - 1)
        new_point[..., 1] = new_point[..., 1] * (height - 1)
        return new_point

    @staticmethod
    def atten_vis(sample_grid, atten, batch_size, width, height):
        """
        sample_grid: BM,seq_len,K,2    坐标信息
        atten: BM,seq_len,K    注意力分布信息
        """
        bm, seq_len, k = atten.size()
        m = bm // batch_size
        atten = atten.view(batch_size, -1, seq_len, k).detach().cpu().numpy()  # b,m,l,k

        main_vector_idx = np.argsort(atten, axis=-1)

        # print(main_vector_idx.shape)

        # print(atten[0,0,0,:])
        sample_grid = sample_grid.view(batch_size, -1, seq_len, k, 2).detach().cpu().numpy()  # b,m,l,k,2
        atten_map = np.zeros((batch_size, m, seq_len, height, width))
        for b_idx in range(batch_size):
            for m_idx in range(m):
                for s_idx in range(seq_len):

                    for k_idx in main_vector_idx[b_idx, m_idx, s_idx, -1:]:
                        # for k_idx in range(k):
                        atten_map[b_idx, m_idx, s_idx, int(sample_grid[b_idx, m_idx, s_idx, k_idx, 1]), int(sample_grid[b_idx, m_idx, s_idx, k_idx, 0])] = atten[b_idx, m_idx, s_idx, k_idx]
        # atten_map = atten_map[:,2,:,:,:]
        atten_map = np.mean(atten_map, axis=1)  # b,seq_len,height,width
        return atten_map


class AttentionUnit(nn.Module):
    def __init__(self, sDim, xDim, attDim):
        super(AttentionUnit, self).__init__()

        self.sDim = sDim
        self.xDim = xDim
        self.attDim = attDim

        self.sEmbed = nn.Linear(sDim, attDim)
        self.xEmbed = nn.Linear(xDim, attDim)
        self.wEmbed = nn.Linear(attDim, 1)

        # self.init_weights()

    def init_weights(self):
        init.normal_(self.sEmbed.weight, std=0.01)
        init.constant_(self.sEmbed.bias, 0)
        init.normal_(self.xEmbed.weight, std=0.01)
        init.constant_(self.xEmbed.bias, 0)
        init.normal_(self.wEmbed.weight, std=0.01)
        init.constant_(self.wEmbed.bias, 0)

    def forward(self, x, sPrev):
        batch_size, T, _ = x.size()  # [b x T x xDim]=[b,25,512]
        x = x.view(-1, self.xDim)  # [(b x T) x xDim]=[25*b,512]
        xProj = self.xEmbed(x)  # [(b x T) x attDim]
        xProj = xProj.view(batch_size, T, -1)  # [b x T x attDim]

        sPrev = sPrev.squeeze(0)
        sProj = self.sEmbed(sPrev)  # [b x attDim]
        sProj = torch.unsqueeze(sProj, 1)  # [b x 1 x attDim]
        sProj = sProj.expand(batch_size, T, self.attDim)  # [b x T x attDim]

        sumTanh = torch.tanh(sProj + xProj)  # [b,25,512]
        sumTanh = sumTanh.view(-1, self.attDim)  # [25*b,512]

        vProj = self.wEmbed(sumTanh)  # [(b x T) x 1]
        vProj = vProj.view(batch_size, T)

        alpha = F.softmax(vProj, dim=1)  # attention weights for each sample in the minibatch

        return alpha


class DecoderUnit(nn.Module):
    def __init__(self, sDim, xDim, yDim, attDim):
        super(DecoderUnit, self).__init__()
        self.sDim = sDim
        self.xDim = xDim
        self.yDim = yDim
        self.attDim = attDim
        self.emdDim = attDim

        self.attention_unit = AttentionUnit(sDim, xDim, attDim)
        self.tgt_embedding = nn.Embedding(yDim + 1, self.emdDim)  # the last is used for <BOS>
        self.gru = nn.GRU(input_size=xDim + self.emdDim, hidden_size=sDim, batch_first=True)
        self.fc = nn.Linear(sDim, yDim)

        # self.init_weights()

    def init_weights(self):
        init.normal_(self.tgt_embedding.weight, std=0.01)
        init.normal_(self.fc.weight, std=0.01)
        init.constant_(self.fc.bias, 0)

    def forward(self, x, sPrev, yPrev):
        # x: feature sequence from the image decoder.
        batch_size, T, _ = x.size()  # [b,25,512]
        alpha = self.attention_unit(x, sPrev)  # [b,25] x是K和V，sPrev是Q，GRU解码器隐藏层的状态
        context = torch.bmm(alpha.unsqueeze(1), x).squeeze(1)  # [b,512]
        yProj = self.tgt_embedding(yPrev.long())  # [b,512]
        # self.gru.flatten_parameters()
        output, state = self.gru(torch.cat([yProj, context], 1).unsqueeze(1), sPrev)
        output = output.squeeze(1)  # [b,512]

        output = self.fc(output)  # [b,512]->[b,97]
        return output, state
