import torch
import torch.nn as nn
import torch.nn.functional as F
import math

from torch.nn.init import xavier_uniform_, constant_


class FeedForward(nn.Module):
    def __init__(self, embed_dims, dropout=0.1):
        super(FeedForward, self).__init__()
        self.linear1 = nn.Linear(embed_dims, embed_dims * 4)
        self.activation = nn.GELU()
        self.linear2 = nn.Linear(embed_dims * 4, embed_dims)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        x = self.linear1(x)
        x = self.activation(x)
        x = self.dropout(x)
        x = self.linear2(x)
        x = self.dropout(x)
        return x
    

class DeformableAttention(nn.Module):
    def __init__(self, embed_dims, group=1, num_heads=8, dropout=0.1, num_levels=1, num_points=4):
        super(DeformableAttention, self).__init__()
        self.embed_dims = embed_dims
        self.group = group
        self.num_heads = num_heads
        self.num_levels = num_levels
        self.num_points = num_points
        
        if embed_dims % num_heads != 0 or (embed_dims // num_heads) % group != 0:
            raise ValueError(f"embed_dims ({embed_dims}) must be divisible by num_heads ({num_heads})")
        self.dim_per_head = (embed_dims // num_heads) // group

        self.sampling_offsets = nn.Linear(embed_dims, group * num_heads * num_levels * num_points * 2)
        self.attention_weights = nn.Linear(embed_dims, num_heads * num_levels * num_points)
        self.value_proj = nn.Linear(embed_dims, embed_dims)
        self.output_proj = nn.Linear(embed_dims, embed_dims // group)
        self.dropout = nn.Dropout(dropout)

        self.init_weights()

    def init_weights(self):
        constant_(self.sampling_offsets.weight.data, 0.)
        thetas = torch.arange(self.num_heads, dtype=torch.float32) * (2.0 * math.pi / self.num_heads)
        grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
        grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.num_heads, 1, 1, 2).unsqueeze(0).repeat(self.group, 1, self.num_levels, self.num_points, 1)
        for i in range(self.num_points):
            grid_init[:, :, :, i, :] *= i + 1
        with torch.no_grad():
            self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
        constant_(self.attention_weights.weight.data, 0.)
        constant_(self.attention_weights.bias.data, 0.)
        xavier_uniform_(self.value_proj.weight.data)
        constant_(self.value_proj.bias.data, 0.)
        xavier_uniform_(self.output_proj.weight.data)
        constant_(self.output_proj.bias.data, 0.)

    def forward(self,
                query,  # (bs, query_len, embed_dims)
                value,  # (bs, value_len, embed_dims / group)
                ref_point,  # (1, query_len, 2)
                pos_encoding=None,  # (query_len, embed_dims / group)
                return_weight = False
                ):
        
        bs, query_len, embed_dims = query.shape
        value_len = value.size(1)

        assert self.embed_dims == embed_dims and \
            self.embed_dims == value.size(2) and \
            query_len == ref_point.size(1) and \
            bs == value.size(0) and \
            ref_point.size(0) == 1, "shape error."

        if pos_encoding is not None:
            assert query_len == pos_encoding.size(0), "shape error."
            pos_encoding = pos_encoding.unsqueeze(0).repeat(1, 1, self.group)
            query = query + pos_encoding  # (bs, query_len, embed_dims)

        # Compute sampling offsets
        sampling_offsets = self.sampling_offsets(query)  # (bs, query_len, group * num_heads * num_levels * num_points * 2)
        sampling_offsets = sampling_offsets.view(bs, query_len, -1, 2)  # (bs, query_len, group * num_heads * num_levels * num_points, 2)
        sampling_offsets = sampling_offsets.permute(0, 2, 1, 3).contiguous().view(-1, query_len, 2)  # (bs * group * num_heads * num_levels * num_points, query_len, 2)
        sampling_offsets = torch.tanh(sampling_offsets) / 4  # [-0.25, 0.25]

        # Generate sampling locations
        sampling_locations = ref_point + sampling_offsets
        sampling_locations = sampling_locations.unsqueeze(2)  # (bs * group * num_heads * num_levels * num_points, query_len, 1, 2)

        # Compute attention scores
        attention_weights = self.attention_weights(query)  # (bs, query_len, num_heads * num_levels * num_points)

        if return_weight:
            return attention_weights
        
        attention_weights = attention_weights.view(bs, query_len, self.num_heads, self.num_levels * self.num_points)
        attention_weights = F.softmax(attention_weights, -1)
        attention_weights = attention_weights.permute(0, 2, 1, 3).contiguous().unsqueeze(3)  # (bs, num_heads, query_len, 1, num_levels * num_points)

        # Project Value
        V = self.value_proj(value)  # (bs, value_len, embed_dims)
        V = V.view(bs, value_len, self.num_heads, self.dim_per_head, self.group)
        V = V.permute(0, 4, 2, 3, 1).contiguous().unsqueeze(3).repeat(1, 1, 1, self.num_levels * self.num_points, 1, 1)  # (bs, group, num_heads, num_levels * num_points, dim_per_head, value_len)
        V = V.view(-1, self.dim_per_head, value_len).unsqueeze(-1)  # (bs * group * num_heads * num_levels * num_points, dim_per_head, value_len, 1)
        
        sampled_V = F.grid_sample(V, sampling_locations, align_corners=True)  # (bs * group * num_heads * num_levels * num_points, dim_per_head, query_len, 1)
        sampled_V = sampled_V.view(bs, self.group, self.num_heads, self.num_levels * self.num_points, self.dim_per_head, query_len)
        sampled_V = sampled_V.permute(0, 2, 5, 3, 4, 1).contiguous()  # (bs, num_heads, query_len, num_levels * num_points, dim_per_head, group)
        sampled_V = sampled_V.view(bs, self.num_heads, query_len, self.num_levels * self.num_points, self.dim_per_head * self.group)

        # Compute output by weighted sum of V
        output = torch.matmul(attention_weights, sampled_V)  # (bs, num_heads, query_len, 1, dim_per_head * group)
        output = output.permute(0, 2, 3, 1, 4).contiguous().view(bs, query_len, -1)  # (bs, query_len, embed_dims)

        # Apply output projection and dropout
        output = self.output_proj(output)  # (bs, query_len, embed_dims / group)
        output = self.dropout(output)

        return output
    