# -*- coding: UTF-8 -*-
# *******************************************************************
# File Name: vgg_transformer
# > Author: 04000387
# > Created Time: 2024/12/25 14:04
# *******************************************************************
from torch import nn
from .point2point import EncoderModule
import torch


class PositionalEmbedding(nn.Module):
    def __init__(self, seq_length, d_model):
        super().__init__()

        self.encoding = torch.zeros((seq_length, d_model))

        self.seq = torch.arange(0, seq_length, dtype=torch.float).reshape(-1, 1)

        self.dim = torch.arange(0, d_model, dtype=torch.float)
        value = (1 / (10000 ** (self.dim[0::2].reshape(1, -1) / d_model)))

        self.encoding[:, 0::2] = torch.sin(self.seq @ value)
        self.encoding[:, 1::2] = torch.cos(self.seq @ value)

        self.register_buffer("positionalEmbedding", self.encoding)

    def forward(self, x):
        return self.positionalEmbedding[:x.size(1), :].detach().repeat(x.size(0), 1, 1)


class TransformerOriginalEmbedding(nn.Module):
    def __init__(self, vocab_size, embedding_dim, padding_idx, max_length=4096):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=padding_idx)
        self.positional = PositionalEmbedding(max_length, embedding_dim)

    def forward(self, x):
        return self.embedding(x) + self.positional(x)


class VggTransformer(nn.Module):
    def __init__(self, out_channel=512, vocab_size=16000, nhead=8, dim_feedforward=1024, num_layer=2,
                 padding_idx=0):
        super().__init__()
        self.padding_idx = padding_idx

        self.encoder = EncoderModule(out_channel, num_layer)

        self.embedding = TransformerOriginalEmbedding(vocab_size, out_channel, padding_idx)
        layers = nn.TransformerDecoderLayer(out_channel, nhead, dim_feedforward, batch_first=True)
        self.decoder = nn.TransformerDecoder(layers, num_layer)
        # 类别数量 + 4的框的位置
        self.head = nn.Linear(out_channel, vocab_size + 4, bias=True)

    def forward(self, img, input_ids, attention_mask: torch.Tensor):
        tgt_mask, tgt_key_padding_mask = self._generate_mask(attention_mask)
        x = self.encoder(img)
        vocab_embed = self.embedding(input_ids)
        x = self.decoder(vocab_embed, x, tgt_key_padding_mask=tgt_key_padding_mask, tgt_is_causal=True,
                         tgt_mask=tgt_mask)

        x = self.head(x)

        return x

    # def set_parameter_no_grad(self):
    #     vgg_params = list(self.vgg.parameters())
    #     for param in vgg_params:
    #         param.requires_grad = False

    @staticmethod
    def _generate_mask(_attention_mask: torch.Tensor):
        device = _attention_mask.device
        key_padding_mask = torch.zeros_like(_attention_mask, device=device, dtype=torch.float32)
        key_padding_mask[_attention_mask == 0] = -torch.inf
        mask = nn.Transformer.generate_square_subsequent_mask(_attention_mask.size(1), device, dtype=torch.float32)
        return mask, key_padding_mask
