from __future__ import absolute_import

import math

import torch
from torch import nn
from torch.nn import functional as F


def generate_pixel_location(width, height):
    """
    生成参考点的坐标
    """
    grid_y, grid_x = torch.meshgrid(torch.arange(0, height), torch.arange(0, width))
    grid_y = grid_y / (height - 1)
    grid_x = grid_x / (width - 1)
    grid = torch.stack((grid_x, grid_y), 2).float()
    grid.requires_grad = False
    return grid


def restore_scale(width, height, ref_point):
    """
    对参考点做变换
    """
    new_point = ref_point.clone().detach()
    new_point[..., 0] = new_point[..., 0] * (width - 1)
    new_point[..., 1] = new_point[..., 1] * (height - 1)
    return new_point


class Step_Generation_Attention(nn.Module):
    def __init__(self, n_dim=512, num_class=0, max_len=200):
        super(Step_Generation_Attention, self).__init__()
        # self.pool1 = nn.MaxPool2d((2, 1), (2, 1))  # 降低特征图的大小
        # self.conv1 = nn.Conv2d(512, 256, 3, 2, 1)
        # self.global_op = nn.Linear(2 * 16 * 512, 512)
        self.offset_proj = nn.Linear(n_dim + n_dim, 2)  # 回归x,y
        # self.offset_proj = nn.Linear(n_dim + n_dim + n_dim, 2)  # 回归x,y
        # self.offset_lstm = nn.LSTM(n_dim,n_dim,batch_first=True)
        self.language_lstm = nn.LSTM(n_dim, n_dim, 2, batch_first=True)
        self.embed_op = nn.Embedding(num_class, n_dim)
        self.cls_op = nn.Linear(n_dim + n_dim, num_class)
        # self.cls_op = nn.Linear(n_dim + n_dim + n_dim, num_class)
        self.position_embedding = nn.Embedding(max_len, n_dim)
        self.STA_index = num_class - 1
        self.n_dim = n_dim
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        # self.pool_grid_feature = nn.MaxPool2d(2)
        self.pool_grid_feature = nn.AvgPool2d(4)  # 平均池化

    def _get_position_index(self, length, batch_size, device=None):
        position_index = torch.arange(length, device=device)
        position_index = position_index.repeat([batch_size, 1])
        position_index = position_index.long()
        return position_index

    def _get_sample_features(self, points, features, w, h):
        # sample_grid = torch.zeros((points.shape[0], 4, 4, points.shape[1]), dtype=features.dtype).to(points.device)  # 采样一个4×4的小块，16个点
        
        # # 按从左到右从上到下的顺序采样点
        # for i in range(4):
        #     for j in range(4):
        #         x = points[:, 0] if j == 0 else points[:, 0] + j
        #         y = points[:, 1] if i == 0 else points[:, 1] + i
        #         # 将坐标归一化到[-1,1]
        #         sample_grid[:, i, j, 0] = 2.0 * x / max(w - 1, 1) - 1.0
        #         sample_grid[:, i, j, 1] = 2.0 * y / max(h - 1, 1) - 1.0


        grid_x = 2.0 * points[:, 0] / max(w - 1, 1) - 1.0
        grid_y = 2.0 * points[:, 1] / max(h - 1, 1) - 1.0
        sample_grid = torch.stack((grid_x, grid_y), dim=1).unsqueeze(1).unsqueeze(1)  # b,2
        sample_features = F.grid_sample(features, sample_grid, mode='bilinear', padding_mode='zeros', align_corners=False)  # TODO: 采样小块
        # sample_features = self.pool_grid_feature(sample_features)
        sample_features = sample_features.squeeze(-1).squeeze(-1)  # b,n_dim
        return sample_features

    def forward(self, encoder_features, targets=None, lengths=None, init_max_len=100):
        b, c, h, w = encoder_features.size()
        init_pixel_location_relative = torch.tensor([0.0, 0.5]).unsqueeze(0).repeat(b, 1).to(self.device)  # b,2   #矩形左边框的中点,相对量
        init_pixel_location_positive = restore_scale(w, h, init_pixel_location_relative)  # 对应在原图上的位置
        max_len = max(lengths) if lengths != None else init_max_len

        # get global feature info
        # global_feature = self.pool1(encoder_features)  # [b,c,h/2,w]
        # global_feature = self.conv1(encoder_features)  # 使用卷积降维度 [b,c/2,h/2,w/2]
        # global_feature = global_feature.view(b, -1).contiguous()  # [b, c*h/2*w]
        # global_feature = self.global_op(global_feature)  # [b, 512]
        # global_feature = global_feature.unsqueeze(1).repeat(1, max_len, 1)  # [b,max_len,512]

        position_index = self._get_position_index(max_len, b, self.device)  # 位置信息增强的embedding
        # print(position_index.size())
        position_embedding = self.position_embedding(position_index)
        # position_embedding = torch.cat([position_embedding, global_feature], dim=-1)
        points = init_pixel_location_positive  # [b,2]
        current_features = self._get_sample_features(points, encoder_features, w, h)  # [b,512]
        start_token = torch.zeros((b)).fill_(self.STA_index).to(self.device).long()
        state = (torch.FloatTensor(2, b, self.n_dim).fill_(0).to(self.device), torch.FloatTensor(2, b, self.n_dim).fill_(0).to(self.device))
        character_embed = self.embed_op(start_token)
        outputs_prob = []
        step_points_vis = []
        for step in range(max_len):
            offset = self.offset_proj(torch.cat([current_features, position_embedding[:, step, :]], dim=-1))  # [b,2]  TODO: 是不是要结合全局信息来决策是否用loss约束
            offset = torch.clamp(offset, -1.5, 12)  # 上下边界可以正常偏移，水平方向不可以再偏移了

            # print(offset)
            # offset = self.offset_proj(current_features)
            # offset = torch.clamp(offset,-16,16)
            # offset[:,0] = torch.clamp(offset[:,0],0,w)    #限定偏移量的位置 水平方向只能向右偏移
            # offset[:,1] = torch.clamp(offset[:,1],-h,h)   #垂直方向可以向上或者向下偏移
            points = points + offset
            points_x = points[:, 0].clone()
            points_y = points[:, 1].clone()

            # TODO: 考虑将归一化？直接clamp掉有点简单粗暴
            # 采样一个2×2的小块，points[:, 0], points[:, 1]分别是小块左上点的x,y坐标
            points[:, 0] = torch.clamp(points_x, 0, w - 1)
            points[:, 1] = torch.clamp(points_y, 0, h - 1)

            # print(points[0,:])
            current_features = self._get_sample_features(points, encoder_features, w, h)
            # x_t = torch.cat([character_embed],dim=-1) #b,1024
            x_t = character_embed
            h_t, state = self.language_lstm(x_t.unsqueeze(1), state)  # h:[b,1,512], state:([2,b,512], [2,b,512])
            # print(h_t.size())
            concat_features = torch.cat([h_t.squeeze(1), current_features], dim=-1)  # [b,1024]
            logits = self.cls_op(concat_features)  # [b,98]
            if self.training:
                next_char = targets[:, step].long()
            else:
                next_char = logits.argmax(-1)
            character_embed = self.embed_op(next_char)
            outputs_prob.append(logits)
            step_points_vis.append(points)
        outputs_prob = torch.cat([item.unsqueeze(1) for item in outputs_prob], 1)  # b,seq_len,n_class
        step_points_vis = torch.cat([item.unsqueeze(1) for item in step_points_vis], 1)
        return outputs_prob, step_points_vis


if __name__ == "__main__":
    # encoder_features = torch.randn(5,512,8,32)
    # targets = torch.randint(0,37,(5,25))
    # lengths = torch.LongTensor([3,5,6,8,9])
    # model = Step_Generation_Attention(n_dim=512,num_class=37,max_len=25)
    # output = model(encoder_features,targets,lengths)
    # print(output.size())
    w = 25
    h = 4
    points = torch.tensor([0, 1.5]).unsqueeze(0)
    grid_x = 2.0 * points[:, 0] / max(w - 1, 1) - 1.0
    grid_y = 2.0 * points[:, 1] / max(h - 1, 1) - 1.0
    sample_grid = torch.stack((grid_x, grid_y), dim=1)
    print(sample_grid)

# if __name__ == '__main__':
#   width = 10
#   height = 6
#   # grid = generate_pixel_location(width,height)
#   # print(grid)
#   b = 4
#   init = torch.tensor([0,0.5]).unsqueeze(0).repeat(b,1)
#   key = restore_scale(width,height,init)
#   print(key)
