import torch
import torch.nn as nn
import torch.nn.functional as F
import math

from torch.nn.init import xavier_uniform_, constant_

class BEVFormer(nn.Module):
    def __init__(self, embed_dims=256, num_heads=8, num_levels=1, num_points=4, num_bev_queue=2, dropout=0.1, bev_h=200, bev_w=200):
        super(BEVFormer, self).__init__()
        self.embed_dims = embed_dims
        self.num_heads = num_heads
        self.num_levels = num_levels
        self.num_points = num_points
        self.num_bev_queue = num_bev_queue
        self.dim_per_head = embed_dims // num_heads
        self.bev_h = bev_h
        self.bev_w = bev_w

        self.sampling_offsets = nn.Linear(embed_dims * num_bev_queue, num_bev_queue * num_heads * num_levels * num_points * 2)
        self.attention_weights = nn.Linear(embed_dims * num_bev_queue, num_heads * num_levels * num_points)
        self.value_proj = nn.Linear(embed_dims * num_bev_queue, embed_dims * num_bev_queue)
        self.output_proj = nn.Linear(embed_dims * num_bev_queue, embed_dims)
        self.dropout = nn.Dropout(dropout)

        self.bev_list = []
        self.bev_query = nn.Embedding(bev_h * bev_w, embed_dims)
        position_embeds = self.generate_2d_positional_encoding()
        self.register_buffer("position_embeds", position_embeds)
        self.learned_pos_enc = nn.Embedding(bev_h * bev_w, embed_dims)

        self.init_weights()

    def init_weights(self):
        constant_(self.sampling_offsets.weight.data, 0.)
        thetas = torch.arange(self.num_heads, dtype=torch.float32) * (2.0 * math.pi / self.num_heads)
        grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
        grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.num_heads, 1, 1, 2).unsqueeze(0).repeat(self.num_bev_queue, 1, self.num_levels, self.num_points, 1)
        for i in range(self.num_points):
            grid_init[:, :, :, i, :] *= i + 1
        with torch.no_grad():
            self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
        constant_(self.attention_weights.weight.data, 0.)
        constant_(self.attention_weights.bias.data, 0.)
        xavier_uniform_(self.value_proj.weight.data)
        constant_(self.value_proj.bias.data, 0.)
        xavier_uniform_(self.output_proj.weight.data)
        constant_(self.output_proj.bias.data, 0.)
        nn.init.xavier_uniform_(self.learned_pos_enc.weight)

    def generate_ref_2d(self, H, W, device, dtype):
        ref_y, ref_x = torch.meshgrid(
            torch.linspace(0.5 - H / 2, H / 2 - 0.5, H, dtype=dtype, device=device),
            torch.linspace(0.5 - W / 2, W / 2 - 0.5, W, dtype=dtype, device=device)
        )
        ref_y = ref_y.reshape(-1) / H * 2  # Shape: (H*W,)
        ref_x = ref_x.reshape(-1) / W * 2  # Shape: (H*W,)
        ref_2d = torch.stack((ref_x, ref_y), dim=-1)  # Shape: (H*W, 2)
        ref_2d = ref_2d.unsqueeze(0)  # Shape: (1, H*W, 2)
        return ref_2d
    
    def generate_2d_positional_encoding(self):
        height = self.bev_h
        width = self.bev_w
        dim = self.embed_dims

        if dim % 4 != 0:
            raise ValueError("Embedding dimension (dim) must be divisible by 4 for 2D positional encoding.")

        y_position = torch.arange(height).unsqueeze(1)  # (height, 1)
        x_position = torch.arange(width).unsqueeze(1)   # (width, 1)

        div_term = torch.exp(torch.arange(0, dim // 2, 2) * (-math.log(10000.0) / (dim // 2)))

        pe_y = torch.zeros(height, dim // 2)
        pe_y[:, 0::2] = torch.sin(y_position * div_term)
        pe_y[:, 1::2] = torch.cos(y_position * div_term)

        pe_x = torch.zeros(width, dim // 2)
        pe_x[:, 0::2] = torch.sin(x_position * div_term)
        pe_x[:, 1::2] = torch.cos(x_position * div_term)

        pe_y = pe_y.unsqueeze(1).repeat(1, width, 1)  # (height, width, dim//2)
        pe_x = pe_x.unsqueeze(0).repeat(height, 1, 1)  # (height, width, dim//2)
        pos_encoding = torch.cat([pe_y, pe_x], dim=2)  # (height, width, dim)

        return pos_encoding

    def deformable_attention(self, prev_bev, bev_query=None):
        bev_h = self.bev_h
        bev_w = self.bev_w
        num_query = bev_h * bev_w

        bs, embed_dims, bev_h_in, bev_w_in = prev_bev[0].shape  # (bs, embed_dims, H, W)
        device = prev_bev[0].device
        dtype = prev_bev[0].dtype
        assert bev_h == bev_h_in and bev_w == bev_w_in, f"Shape mismatch."

        if bev_query is None:
            bev_query = self.bev_query.weight  # (bev_h * bev_w, embed_dims)
            bev_query = bev_query.permute(1, 0).contiguous().view(1, self.embed_dims, bev_h, bev_w)  # Shape: (1, embed_dims, H, W)
            pos_encoding = self.position_embeds.permute(2, 0, 1).unsqueeze(0)  # (1, dim, H, W)
            bev_query = bev_query + pos_encoding  # (1, dim, H, W)
            bev_query = bev_query.repeat(bs, 1, 1, 1)  # (bs, dim, H, W)

        # Concatenate previous BEV and current BEV Query if prev_bev exists
        assert isinstance(prev_bev, list), "prev_bev should be a list of tensors."
        bev_list = prev_bev + [bev_query]
        if len(bev_list) < self.num_bev_queue:
            num_pad = self.num_bev_queue - len(bev_list)
            bev_list = [bev_list[0].clone() for _ in range(num_pad)] + bev_list
        elif len(bev_list) > self.num_bev_queue:
            bev_list = bev_list[-self.num_bev_queue:]
        assert len(bev_list) == self.num_bev_queue, "bev_list length mismatch after padding."
        value = torch.cat(bev_list, dim=1)  # (bs, embed_dims * num_bev_queue, H, W)

        # Generate reference points
        ref_2d = self.generate_ref_2d(bev_h, bev_w, device, dtype)  # (1, H*W, 2)
        ref_point = ref_2d.view(1, bev_h, bev_w, 2)  # (1, H, W, 2)

        # Compute sampling offsets
        value = value.permute(0, 2, 3, 1).contiguous()  # (bs, H, W, embed_dims * num_bev_queue)
        sampling_offsets = self.sampling_offsets(value)  # (bs, H, W, num_bev_queue * num_heads * num_levels * num_points * 2)
        sampling_offsets = sampling_offsets.view(bs, bev_h, bev_w, -1, 2)  # (bs, H, W, num_bev_queue * num_heads * num_levels * num_points, 2)
        sampling_offsets = sampling_offsets.permute(0, 3, 1, 2, 4).contiguous().view(-1, bev_h, bev_w, 2)  # (bs * num_bev_queue * num_heads * num_levels * num_points, H, W, 2)

        # Generate sampling locations
        sampling_locations = ref_point + sampling_offsets / torch.tensor([bev_w, bev_h], device=device, dtype=dtype)  # (bs * num_bev_queue * num_heads * num_levels * num_points, H, W, 2)

        # Compute attention scores
        attention_weights = self.attention_weights(value)  # (bs, H, W, num_heads * num_levels * num_points)
        attention_weights = attention_weights.view(bs, num_query, self.num_heads, self.num_levels * self.num_points)
        attention_weights = F.softmax(attention_weights, -1)
        attention_weights = attention_weights.permute(0, 2, 1, 3).contiguous().unsqueeze(3)  # (bs, num_heads, H * W, 1, num_levels * num_points)

        # Project Query, Key, and Value
        V = self.value_proj(value)  # (bs, H, W, embed_dims * num_bev_queue)
        V = V.view(bs, bev_h, bev_w, self.num_heads, self.dim_per_head, self.num_bev_queue)
        V = V.permute(0, 5, 3, 4, 1, 2).contiguous().unsqueeze(3).repeat(1, 1, 1, self.num_levels * self.num_points, 1, 1, 1)  # (bs, num_bev_queue, num_heads, num_levels * num_points, dim_per_head, H, W)
        V = V.view(-1, self.dim_per_head, bev_h, bev_w)  # (bs * num_bev_queue * num_heads * num_levels * num_points, dim_per_head, H, W)
        
        sampled_V = F.grid_sample(V, sampling_locations, align_corners=True)  # (bs * num_heads * num_bev_queue * num_levels * num_points, dim_per_head, H, W)
        sampled_V = sampled_V.view(bs, self.num_heads, self.num_bev_queue, self.num_levels * self.num_points, self.dim_per_head, num_query)  # (bs, num_heads, num_levels * num_points, dim_per_head * num_bev_queue, H * W)
        sampled_V = sampled_V.permute(0, 1, 5, 3, 4, 2).contiguous()  # (bs, num_heads, H * W, num_levels * num_points, dim_per_head, num_bev_queue)
        sampled_V = sampled_V.view(bs, self.num_heads, num_query, self.num_levels * self.num_points, self.dim_per_head * self.num_bev_queue)

        # Compute output by weighted sum of V
        output = torch.matmul(attention_weights, sampled_V)  # (bs, num_heads, H * W, 1, dim_per_head * num_bev_queue)
        output = output.permute(0, 2, 3, 1, 4).contiguous().view(bs, num_query, -1)  # (bs, H*W, embed_dims * num_bev_queue)

        # Apply output projection and dropout
        output = self.output_proj(output)  # (bs, H*W, embed_dims)
        output = output.permute(0, 2, 1).contiguous().view(bs, embed_dims, bev_h, bev_w)  # (bs, embed_dims, H, W)
        output = self.dropout(output) + bev_query  # (bs, embed_dims, H, W)

        return output
    
    def forward(self, bev):
        x = self.deformable_attention(prev_bev = self.bev_list + [bev], bev_query=None)
        for _ in range(5):
            x = self.deformable_attention(prev_bev = self.bev_list + [x], bev_query=None)
        bev_feature = x

        if len(self.bev_list) >= self.num_bev_queue:
            self.bev_list.pop(0)
        self.bev_list.append(bev_feature)
        
        return output

if __name__ == "__main__":
    bs = 2
    embed_dims = 4
    bev_h, bev_w = 5, 5
    tsa = BEVFormer(embed_dims=embed_dims, num_heads=2, num_levels=1, num_points=4, num_bev_queue=4, dropout=0.1, bev_h=bev_h, bev_w=bev_w)
    bev_query = torch.randn(bs, embed_dims, bev_h, bev_w)
    prev_bev = [torch.randn(bs, embed_dims, bev_h, bev_w)]
    output = tsa(prev_bev = prev_bev, bev_query = bev_query)
    print(output.shape)
