# from backbone import Resnet50

from hybrid_encoder import ConvNormLayer, MLP
import torch
import torch.nn as nn
import torch.nn.init as init
import math
from utils import deformable_attention_core_func, inverse_sigmoid, bias_init_with_prob
import torch.nn.functional as F
import torch.nn.init as init

class RTDETRTransformer(nn.Module):

    def __init__(self, eps=1e-2, num_queries=300):
        super().__init__()
        self.eps = eps
        self.num_queries = num_queries
        self.convnorm = nn.ModuleList([
            ConvNormLayer(in_channel=256, out_channel=256) for _ in range(3)])
        self.linear1 = nn.Linear(256, 256)
        self.norm1 = nn.LayerNorm(256)
        self.cls_head = nn.Linear(256, 80)
        self.box_head = MLP(256, 256, 4, 3)
        self.shared_box_head = MLP(4, 2 * 256, 256, num_layers=2)
        self.decoder = TransformerDecoder()
        self._set_parameters()

    def _set_parameters(self):
        bias = bias_init_with_prob(0.01)
        init.xavier_uniform_(self.linear1.weight)
        init.xavier_uniform_(self.shared_box_head.layers[0].weight)
        init.xavier_uniform_(self.shared_box_head.layers[1].weight)

        init.constant_(self.cls_head.bias, bias)
        init.constant_(self.box_head.layers[-1].weight, 0)
        init.constant_(self.box_head.layers[-1].bias, 0)

    def forward(self, x):
        fea_flatten = []
        for i in range(3):
            fea_flatten.append(self.convnorm[i](x[i]).flatten(2).permute(0, 2, 1))

        fea_cat = torch.concat(fea_flatten, dim=1)
        device = fea_cat.device
        anchors, valid_mask = self._generate_anchors()
        anchors = anchors.to(device)
        valid_mask = valid_mask.to(device)
        fea_cat = torch.where(valid_mask, fea_cat, 0.)
        output_memory = self.norm1(self.linear1(fea_cat))
        enc_topk_boxes, enc_topk_logits, target, \
            reference_points_detach, reference_points_input = self._get_decoder_input(output_memory, anchors)
        dec_out = self.decoder(target, 
                                reference_points_detach, 
                                reference_points_input, 
                                output_memory,
                                self.shared_box_head)
        dec_out.append({'pred_box':enc_topk_boxes, 'pred_logits':enc_topk_logits})
        return dec_out

    def _get_decoder_input(self, output_memory, anchors):
        enc_output_class = self.cls_head(output_memory)
        _, topk_ind = torch.topk(enc_output_class.max(-1).values, self.num_queries, dim=1)
        offsets = self.box_head(output_memory)
        enc_outputs_coord_unact = offsets + anchors
        reference_points_unact = enc_outputs_coord_unact.gather(dim=1, \
            index=topk_ind.unsqueeze(-1).repeat(1, 1, enc_outputs_coord_unact.shape[-1]))
        
        enc_topk_boxes = torch.sigmoid(reference_points_unact)
        reference_points_detach = enc_topk_boxes.detach()
        ref_points_input = reference_points_detach.unsqueeze(2)
        enc_topk_logits = enc_output_class.gather(dim=1, \
            index=topk_ind.unsqueeze(-1).repeat(1, 1, enc_output_class.shape[-1]))

        target = output_memory.gather(dim=1, index=topk_ind.unsqueeze(-1).repeat(1, 1, output_memory.shape[-1]))

        return enc_topk_boxes, enc_topk_logits, target, reference_points_detach, ref_points_input

    def _generate_anchors(self,
                          spatial_shapes=[(60, 60), (30, 30), (15, 15)], #640:[(80, 80), (40, 40), (20, 20)]  480:[(60, 60), (30, 30), (15, 15)]
                          grid_size=0.05,
                          dtype=torch.float32,
                          ):
        # if spatial_shapes is None:
        #     spatial_shapes = [[int(self.eval_spatial_size[0] / s), int(self.eval_spatial_size[1] / s)]
        #         for s in self.feat_strides
        #     ]
        anchors = []
        #计算各特征图上anchor的相对坐标，cxcywh
        for lvl, (h, w) in enumerate(spatial_shapes):
            grid_y, grid_x = torch.meshgrid(\
                torch.arange(end=h, dtype=dtype), \
                torch.arange(end=w, dtype=dtype), indexing='ij')
            grid_xy = torch.stack([grid_x, grid_y], -1)
            valid_WH = torch.tensor([w, h]).to(dtype)
            grid_xy = (grid_xy.unsqueeze(0) + 0.5) / valid_WH
            wh = torch.ones_like(grid_xy) * grid_size * (2.0 ** lvl)
            anchors.append(torch.concat([grid_xy, wh], -1).reshape(-1, h * w, 4))

        anchors = torch.concat(anchors, 1)
        valid_mask = ((anchors > self.eps) * (anchors < 1 - self.eps)).all(-1, keepdim=True)
        anchors = torch.log(anchors / (1 - anchors))
        # anchors = torch.where(valid_mask, anchors, float('inf'))
        # anchors[valid_mask] = torch.inf # valid_mask [1, 8400, 1]
        anchors = torch.where(valid_mask, anchors, torch.inf)

        return anchors, valid_mask



class TransformerDecoder(nn.Module):

    def __init__(self, num_dec_layers=6, hidden_dim=256):
        super().__init__()
        self.num_dec_layers = num_dec_layers
        self.dec_bbox_head = nn.ModuleList([
            MLP(hidden_dim, hidden_dim, 4, num_layers=3)
            for _ in range(num_dec_layers)
        ])
        self.dec_score_head = nn.ModuleList([
            nn.Linear(hidden_dim, 80) for _ in range(num_dec_layers)
        ])
        self.dec_layers = nn.ModuleList([
            DecoderLayer() for _ in range(num_dec_layers)
        ])
        self._set_parameters()

    def _set_parameters(self):
        bias = bias_init_with_prob(0.01)
        for cls_, reg_ in zip(self.dec_score_head, self.dec_bbox_head):
            init.constant_(cls_.bias, bias)
            init.constant_(reg_.layers[-1].bias, 0)
            init.constant_(reg_.layers[-1].weight, 0)



    def forward(self, target, 
                        reference_points_detach, 
                        reference_points_input, 
                        output_memory,
                        shared_box_head):
        query_pos_embed = shared_box_head(reference_points_detach)
        dec_out = []
        
        for i in range(self.num_dec_layers):
            target = self.dec_layers[i](query_pos_embed, 
                                            reference_points_input,
                                            target,
                                            output_memory)
            #----------------------------------------------------------------------------------
            # dec_out_logits = self.dec_score_head[i](target)
            # dec_out_bbox = torch.sigmoid(self.dec_bbox_head[i](target) + \
            #                             inverse_sigmoid(reference_points_detach))
            
            # reference_points_detach = dec_out_bbox.detach()
            # reference_points_input = reference_points_detach.unsqueeze(2)

            # query_pos_embed = shared_box_head(reference_points_detach) #检查错误加了这一行
            #----------------------------------------------------------------------------------
            #-----------------------------------look forward twice-----------------------------
            dec_out_logits = self.dec_score_head[i](target)
            if i == 0:
                dec_out_bbox = torch.sigmoid(self.dec_bbox_head[i](target) + \
                                            inverse_sigmoid(reference_points_detach))
            else:
                dec_out_bbox = torch.sigmoid(self.dec_bbox_head[i](target) + inverse_sigmoid(reference_points))
            reference_points = dec_out_bbox
            reference_points_detach = dec_out_bbox.detach()
            reference_points_input = reference_points_detach.unsqueeze(2)

            query_pos_embed = shared_box_head(reference_points_detach) #检查错误加了这一行            
            #----------------------------------------end----------------------------------------
            dec_out.append({'pred_box':dec_out_bbox, 'pred_logits':dec_out_logits})
            
        return dec_out



class DecoderLayer(nn.Module):
    def __init__(self):
        super().__init__()
        self.self_attn = nn.MultiheadAttention(256, 8)
        self.cross_attn = MSDeformableAttention()
        self.norm1 = nn.LayerNorm(256)
        self.norm2 = nn.LayerNorm(256)
        self.norm3 = nn.LayerNorm(256)
        self.linear1 = nn.Linear(256, 1024)
        self.linear2 = nn.Linear(1024, 256)
        self.act = nn.ReLU()

    def with_pos_embed(self, query_pos_embed, target):
        return query_pos_embed + target

    def forward(self, query_pos_embed, ref_points_input, target, output_memory):
        q = k = self.with_pos_embed(query_pos_embed, target)
        out, _ = self.self_attn(q, k, value=target)
        residual = self.norm1(out + target)
        query = query_pos_embed + residual
        output = self.cross_attn(query, 
                                 ref_points_input, 
                                 output_memory,  
                                 [(60, 60), (30, 30), (15, 15)]) #640:[(80, 80), (40, 40), (20, 20)] 480:[(60, 60), (30, 30), (15, 15)]
        residual = self.norm2(residual + output)
        out = self.act(self.linear1(residual))
        out = self.linear2(out)
        target = self.norm3(out + residual)
        return target
        
    


class MSDeformableAttention(nn.Module):
    def __init__(self, embed_dim=256, num_heads=8, num_levels=3, num_points=4,):
        """
        Multi-Scale Deformable Attention Module
        """
        super(MSDeformableAttention, self).__init__()
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.num_levels = num_levels
        self.num_points = num_points
        self.total_points = num_heads * num_levels * num_points

        self.head_dim = embed_dim // num_heads
        assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"

        self.sampling_offsets = nn.Linear(embed_dim, self.total_points * 2,)
        self.attention_weights = nn.Linear(embed_dim, self.total_points)
        self.value_proj = nn.Linear(embed_dim, embed_dim)
        self.output_proj = nn.Linear(embed_dim, embed_dim)

        self.ms_deformable_attn_core = deformable_attention_core_func

        self._reset_parameters()


    def _reset_parameters(self):
        # sampling_offsets
        init.constant_(self.sampling_offsets.weight, 0)
        thetas = torch.arange(self.num_heads, dtype=torch.float32) * (2.0 * math.pi / self.num_heads)
        grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
        grid_init = grid_init / grid_init.abs().max(-1, keepdim=True).values
        grid_init = grid_init.reshape(self.num_heads, 1, 1, 2).tile([1, self.num_levels, self.num_points, 1])
        scaling = torch.arange(1, self.num_points + 1, dtype=torch.float32).reshape(1, 1, -1, 1)
        grid_init *= scaling
        self.sampling_offsets.bias.data[...] = grid_init.flatten()

        # attention_weights
        init.constant_(self.attention_weights.weight, 0)
        init.constant_(self.attention_weights.bias, 0)

        # proj
        init.xavier_uniform_(self.value_proj.weight)
        init.constant_(self.value_proj.bias, 0)
        init.xavier_uniform_(self.output_proj.weight)
        init.constant_(self.output_proj.bias, 0)


    def forward(self,
                query,
                reference_points,
                value,
                value_spatial_shapes,
                value_mask=None):
        """
        Args:
            query (Tensor): [bs, query_length, C]   2x300x256
            reference_points (Tensor): [bs, query_length, n_levels, 2], range in [0, 1], top-left (0,0),
                bottom-right (1, 1), including padding area         2x300x1x4
            value (Tensor): [bs, value_length, C]           2x8400x256
            value_spatial_shapes (List): [n_levels, 2], [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})] [(80, 80), (40, 40), (20, 20)]
            value_level_start_index (List): [n_levels], [0, H_0*W_0, H_0*W_0+H_1*W_1, ...] [0, 6400, 8000]
            value_mask (Tensor): [bs, value_length], True for non-padding elements, False for padding elements

        Returns:
            output (Tensor): [bs, Length_{query}, C]
        """
        bs, Len_q = query.shape[:2]
        Len_v = value.shape[1]

        value = self.value_proj(value)
        if value_mask is not None:
            value_mask = value_mask.astype(value.dtype).unsqueeze(-1)
            value *= value_mask
        value = value.reshape(bs, Len_v, self.num_heads, self.head_dim)

        sampling_offsets = self.sampling_offsets(query).reshape(
            bs, Len_q, self.num_heads, self.num_levels, self.num_points, 2)
        attention_weights = self.attention_weights(query).reshape(
            bs, Len_q, self.num_heads, self.num_levels * self.num_points)
        attention_weights = F.softmax(attention_weights, dim=-1).reshape(
            bs, Len_q, self.num_heads, self.num_levels, self.num_points)

        if reference_points.shape[-1] == 2:
            offset_normalizer = torch.tensor(value_spatial_shapes)
            offset_normalizer = offset_normalizer.flip([1]).reshape(
                1, 1, 1, self.num_levels, 1, 2)
            sampling_locations = reference_points.reshape(
                bs, Len_q, 1, self.num_levels, 1, 2
            ) + sampling_offsets / offset_normalizer
        elif reference_points.shape[-1] == 4:
            sampling_locations = (
                reference_points[:, :, None, :, None, :2] + sampling_offsets /
                self.num_points * reference_points[:, :, None, :, None, 2:] * 0.5)
        else:
            raise ValueError(
                "Last dim of reference_points must be 2 or 4, but get {} instead.".
                format(reference_points.shape[-1]))

        output = self.ms_deformable_attn_core(value, value_spatial_shapes, sampling_locations, attention_weights)

        output = self.output_proj(output)

        return output


# if __name__ == '__main__':
#     backbone = Resnet50().to('cuda')
#     encoder = HybridEncoder().to('cuda')
#     x = torch.randn([2, 3, 640, 640]).to('cuda')
#     x = backbone(x)
#     out = encoder(x)
#     decoder = RTDETRTransformer().to('cuda')
#     out = decoder(out)
#     print(out)
