import torch
import torch.nn as nn
import math
import torchvision.models as models
from torchvision.models import ResNet50_Weights

from models.deformable_attention import DeformableAttention, FeedForward


class BEVFormerEncoder(nn.Module):
    def __init__(self, embed_dim, position_embeds, layers=3):
        super(BEVFormerEncoder, self).__init__()
        self.embed_dim = embed_dim
        self.position_embeds = position_embeds
        self.layers = layers

        self.self_attention_layers = nn.ModuleList(
            [DeformableAttention(embed_dims=embed_dim) for _ in range(layers)]
        )

        self.cross_attention_layers = nn.ModuleList(
            [DeformableAttention(embed_dims=embed_dim) for _ in range(layers)]
        )

        self.ffn_layers = nn.ModuleList(
            [FeedForward(embed_dims=embed_dim) for _ in range(layers)]
        )

    def forward(self, query, x, ref_point):
        x = x.permute(0, 2, 3, 1).contiguous()  # (bs, H, W, embed_dim)
        x = x.view(x.size(0), -1, self.embed_dim)  # (bs, H*W, embed_dim)

        feature = query

        for i in range(self.layers):
            res = feature  # (bs, H*W, embed_dim)
            feature = self.self_attention_layers[i](query=feature, value=feature, ref_point=ref_point, pos_encoding=self.position_embeds)
            feature = feature + res
            res = feature
            feature = self.cross_attention_layers[i](query=feature, value=x, ref_point=ref_point, pos_encoding=self.position_embeds)
            feature = feature + res
            res = feature
            feature = self.ffn_layers[i](feature)
            feature = feature + res

        return feature
    

class BEVFormerDecoder(nn.Module):
    def __init__(self, embed_dim, position_embeds, layers=3):
        super(BEVFormerDecoder, self).__init__()
        self.embed_dim = embed_dim
        self.position_embeds = position_embeds
        self.layers = layers

        self.self_attention_layers = nn.ModuleList(
            [DeformableAttention(embed_dims=embed_dim) for _ in range(layers)]
        )

        self.cross_attention_layers = nn.ModuleList(
            [DeformableAttention(embed_dims=embed_dim) for _ in range(layers)]
        )

        self.ffn_layers = nn.ModuleList(
            [FeedForward(embed_dims=embed_dim) for _ in range(layers)]
        )

    def forward(self, query, x, ref_point):
        self.position_embeds = self.position_embeds.to(x.device)

        feature = query

        for i in range(self.layers):
            res = feature
            feature = self.self_attention_layers[i](query=feature+self.position_embeds.unsqueeze(0), value=feature, ref_point=ref_point, pos_encoding=self.position_embeds)
            feature = feature + res
            res = feature
            feature = self.cross_attention_layers[i](query=feature, value=x, ref_point=ref_point, pos_encoding=self.position_embeds)
            feature = feature + res
            res = feature
            feature = self.ffn_layers[i](feature)
            feature = feature + res

        return feature
    

class BEVFormer_V0(nn.Module):
    def __init__(self, embed_dims=256, bev_h=64, bev_w=64, num_classes=2, input_size=(512, 512)):
        super(BEVFormer_V0, self).__init__()

        self.embed_dims = embed_dims
        self.bev_h = bev_h
        self.bev_w = bev_w
        self.decoder_query_size = (bev_h, bev_w)
        self.input_size = input_size
        self.num_class = num_classes

        resnet = models.resnet50(weights=ResNet50_Weights.DEFAULT)
        modules = list(resnet.children())[:-2]
        self.feature_extractor = nn.Sequential(*modules)  # 输出尺寸: (bs, 2048, 16, 16) 对于 512x512 输入

        self.conv_up = nn.Sequential(
            nn.ConvTranspose2d(2048, 1024, kernel_size=4, stride=2, padding=1),  # (bs, 1024, 32, 32)
            nn.ReLU(inplace=True),
            nn.ConvTranspose2d(1024, 512, kernel_size=4, stride=2, padding=1),   # (bs, 512, 64, 64)
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1),            # (bs, 256, 64, 64)
            nn.ReLU(inplace=True)
        )

        self.encode_query = nn.Embedding(bev_h * bev_w, embed_dims)
        self.decode_query = nn.Embedding(bev_h * bev_w, embed_dims)

        self.output_fc = nn.Linear(embed_dims, num_classes)

        self.init_weights()
    
    def get_env(self, x):
        self.running_device = x.device
        self.running_type = x.dtype

        self.ref_point = self.generate_ref_2d(self.bev_h, self.bev_w)

        static_position_embeds = self.generate_positional_encoding(self.bev_h, self.bev_w).to(self.running_device)
        self.encoder = BEVFormerEncoder(self.embed_dims, static_position_embeds).to(self.running_device)
        self.decoder = BEVFormerDecoder(self.embed_dims, static_position_embeds).to(self.running_device)

    def init_weights(self):
        pass

    def generate_ref_2d(self, H, W):
        ref_y, ref_x = torch.meshgrid(
            torch.linspace(0.5 - H / 2, H / 2 - 0.5, H, dtype=self.running_type, device=self.running_device),
            torch.linspace(0.5 - W / 2, W / 2 - 0.5, W, dtype=self.running_type, device=self.running_device), indexing='ij'
        )
        ref_y = ref_y.reshape(-1) / H * 2  # Shape: (H*W,)
        ref_x = ref_x.reshape(-1) / W * 2  # Shape: (H*W,)
        ref_2d = torch.stack((ref_x, ref_y), dim=-1)  # Shape: (H*W, 2)
        ref_2d = ref_2d.unsqueeze(0)  # Shape: (1, H*W, 2)
        return ref_2d
    
    def generate_positional_encoding(self, height, width):
        dim = self.embed_dims

        if dim % 4 != 0:
            raise ValueError("Embedding dimension (dim) must be divisible by 4 for 2D positional encoding.")

        y_position = torch.arange(height).unsqueeze(1)  # (height, 1)
        x_position = torch.arange(width).unsqueeze(1)   # (width, 1)

        div_term = torch.exp(torch.arange(0, dim // 2, 2) * (-math.log(10000.0) / (dim // 2)))

        pe_y = torch.zeros(height, dim // 2)
        pe_y[:, 0::2] = torch.sin(y_position * div_term)
        pe_y[:, 1::2] = torch.cos(y_position * div_term)

        pe_x = torch.zeros(width, dim // 2)
        pe_x[:, 0::2] = torch.sin(x_position * div_term)
        pe_x[:, 1::2] = torch.cos(x_position * div_term)

        pe_y = pe_y.unsqueeze(1).repeat(1, width, 1)  # (height, width, dim//2)
        pe_x = pe_x.unsqueeze(0).repeat(height, 1, 1)  # (height, width, dim//2)
        pos_encoding = torch.cat([pe_y, pe_x], dim=2)  # (height, width, dim)
        pos_encoding = pos_encoding.view(-1, self.embed_dims)  # (H*W, dim)

        return pos_encoding

    def forward(self, x):
        bs = x.size(0)

        x = self.feature_extractor(x)  # (bs, 2048, 16, 16)
        x = self.conv_up(x) 

        bev_query = self.encode_query.weight.unsqueeze(0).repeat(bs, 1, 1)  # (bs, H*W, embed_dim)
        decode_query = self.decode_query.weight.unsqueeze(0).repeat(bs, 1, 1)  # (bs, query_len, embed_dim)

        bev_feature = self.encoder(bev_query, x, self.ref_point)
        result_feature = self.decoder(decode_query, bev_feature, self.ref_point)
        
        output = self.output_fc(result_feature)
        output = output.permute(0, 2, 1).contiguous().view(bs, self.num_class, self.bev_h, self.bev_w)
        return output
