import json
from typing import Any, Mapping
import torch
from torch import nn

from torchvision.models import resnet50, ResNet50_Weights
from einops import rearrange


class MyTransformer(nn.Module):
    image_position_mask = None
    resnet_finetuning_range_from = 72
    tgt_mask = None

    def __init__(
        self,
        word_num,
        word_vec_dim,
        temperature=10000,
        nhead=8,
        num_encoder_layers=6,
        num_decoder_layers=6,
        dim_feedforward=2048,
        dropout=0.1,
        activation="relu",
        custom_encoder=None,
        custom_decoder=None,
        layer_norm_eps=1e-5,
    ):
        super().__init__()
        self.word_num = word_num
        self.word_vec_dim = word_vec_dim
        self.temperature = temperature

        self.indicator_dict = {k: v for k, v in locals().items() if k not in ["self", "__class__"]}

        # ResNet部分
        self.resnet = nn.Sequential(*list(resnet50(weights=ResNet50_Weights.IMAGENET1K_V2).children())[:-2])
        for p in self.resnet.parameters():
            p.requires_grad = False

        self.conv = nn.Conv2d(2048, word_vec_dim, 1)

        # Embedding
        self.word_vec_embed = nn.Embedding(word_num, word_vec_dim)

        # Transformer
        self.transform = nn.Transformer(
            d_model=word_vec_dim,
            batch_first=True,
            nhead=nhead,
            num_encoder_layers=num_encoder_layers,
            num_decoder_layers=num_decoder_layers,
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            activation=activation,
            custom_encoder=custom_encoder,
            custom_decoder=custom_decoder,
            layer_norm_eps=layer_norm_eps,
        )

        # 线性层
        self.vocab_liner = nn.Sequential(
            nn.Linear(word_vec_dim, 512),
            nn.LeakyReLU(),
            nn.Linear(512, word_num),
        )

    @property
    def resnet_train_params(self):
        return list(self.resnet.parameters())[self.resnet_finetuning_range_from :]

    @property
    def other_params(self):
        other_params = [p for n, p in self.named_parameters() if not n.startswith("resnet")]
        return other_params

    def forward(self, images, input_seq, resnet_finetuning=False, tgt_key_padding_mask=None, tgt_mask=None):
        # 使用ResNet和额外的卷积层处理图像
        if resnet_finetuning:
            res_output = self.resnet(images)
        else:
            with torch.no_grad():
                res_output = self.resnet(images)
        res_output = self.conv(res_output).flatten(2)  # (batch, 256, len)
        res_output += self.generate_image_position_mask().to(images.device)  # 图片添加位置编码
        res_output = res_output.permute(0, 2, 1)  # (batch, len, 256)

        # Embedding处理
        input_vec = self.word_vec_embed(input_seq)
        _, *input_shape = input_vec.shape
        input_vec += self.sinusoidal_position_encoding(*input_shape, device=input_vec.device)

        # Transformer处理
        output = self.transform(
            res_output, input_vec, tgt_key_padding_mask=tgt_key_padding_mask, tgt_mask=tgt_mask
        )

        # 映射到词表
        output = self.vocab_liner(output)
        return output

    @property
    def train_indicator(self):
        return "MyTransformer: " + json.dumps(self.indicator_dict)

    def generate_image_position_mask(self):
        """生成图像位置编码，使用固定的位置编码"""
        if self.image_position_mask is not None:
            return self.image_position_mask

        # 生成位置编码
        num_pos_feats = self.word_vec_dim // 2  # 一半为x位置编码，一半为y位置编码

        W, H = 8, 8
        y_embed = torch.arange(1, W + 1).unsqueeze(1).repeat(1, H)
        x_embed = torch.arange(1, H + 1).unsqueeze(0).repeat(W, 1)
        dim_t = torch.arange(num_pos_feats, dtype=torch.float32)
        dim_t = self.temperature ** (2 * (dim_t // 2) / num_pos_feats)  # 统一相邻两个的值，使sin与cos计算相同的值
        pos_y = y_embed[:, :, None] / dim_t  # 使用None扩展维度，进行广播
        pos_x = x_embed[:, :, None] / dim_t
        pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), -1).flatten(2)
        pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), -1).flatten(2)
        pos = torch.cat((pos_y, pos_x), -1)  # (W ,H ,num_pos_feats*2)
        pos = rearrange(pos, "w h c -> c (w h)")
        self.image_position_mask = pos
        return pos

    def freeze_resnet(self):
        for p in self.resnet.parameters():
            p.requires_grad = False

    def partly_unfreeze_resnet(self):
        for p in self.resnet_train_params:
            p.requires_grad = True

    @staticmethod
    def sinusoidal_position_encoding(seq_len, d_model, device=torch.device("cpu")):
        """
        生成正弦和余弦位置编码。

        参数:
            seq_len (int): 序列的长度。
            d_model (int): 模型的维度。
            device (torch.device): 设备，可以是'cuda'或'cpu'。

        返回:
            torch.Tensor: 位置编码矩阵。
        """
        position = torch.arange(seq_len, dtype=torch.float, device=device).unsqueeze(1)
        div_term = torch.exp(
            torch.arange(0, d_model, 2).float() * (-torch.log(torch.tensor(10000.0)) / d_model)
        ).to(device)
        pe = torch.zeros(seq_len, d_model, device=device)
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        return pe

    def get_tgt_mask(self, len, device=torch.device("cpu")):
        if self.tgt_mask is not None:
            return self.tgt_mask
        else:
            self.tgt_mask = torch.triu(torch.full((len, len), True, device=device), diagonal=1)
        return self.tgt_mask
