# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn

from util.misc import NestedTensor


class PositionEmbeddingSine(nn.Module):
    """
    This is a more standard version of the position embedding, very similar to the one
    used by the Attention is all you need paper, generalized to work on images.
    """
    def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
        super().__init__()
        self.num_pos_feats = num_pos_feats# 位置编码的维度（默认64）
        self.temperature = temperature # 温度参数（控制正弦波频率）
        self.normalize = normalize# 是否归一化坐标
        if scale is not None and normalize is False:
            raise ValueError("normalize should be True if scale is passed")
        if scale is None:
            scale = 2 * math.pi# 默认缩放因子为2π
        self.scale = scale

    def forward(self, tensor_list: NestedTensor):
        x = tensor_list.tensors# 输入张量（B, C, H, W）
        mask = tensor_list.mask# 掩码（B, H, W），标记有效像素
        assert mask is not None
        not_mask = ~mask#反转掩码（1表示有效位置）
        # 计算每个位置的累积和（模拟坐标）
        # 通过累加反转的掩码生成像素的绝对位置（如[1, 2, 3, ...]）。
        y_embed = not_mask.cumsum(1, dtype=torch.float32)
        x_embed = not_mask.cumsum(2, dtype=torch.float32)
        #将坐标缩放到 [0, scale] 范围，避免数值过大。
        if self.normalize:
            eps = 1e-6
            y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
            x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale

        dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
        dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)

        pos_x = x_embed[:, :, :, None] / dim_t# 宽度方向位置编码
        pos_y = y_embed[:, :, :, None] / dim_t# 高度方向位置编码
        # 高度方向位置编码
        pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
        pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
        #pos_y 和 pos_x 的形状均为 (B, H, W, num_pos_feats)，拼接后为 (B, H, W, 2*num_pos_feats)
        #通过 permute(0, 3, 1, 2) 调整为 (B, 2*num_pos_feats, H, W)。
        pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)## 合并并调整维度
        """(B, 2 * num_pos_feats, H, W);2 * num_pos_feats：因为正弦编码会拼接高度（pos_y）和
        宽度（pos_x）的位置编码，各占 num_pos_feats 维。
        例如：若 num_pos_feats=64，则输出通道数为 128"""
        return pos



class PositionEmbeddingLearned(nn.Module):#可学习位置编码
    """
    Absolute pos embedding, learned.
    """
    def __init__(self, num_pos_feats=256):
        super().__init__()
        self.row_embed = nn.Embedding(50, num_pos_feats)## 行（高度）编码
        self.col_embed = nn.Embedding(50, num_pos_feats) # 列（宽度）编码
        self.reset_parameters()

    def reset_parameters(self):
        nn.init.uniform_(self.row_embed.weight)
        nn.init.uniform_(self.col_embed.weight)

    def forward(self, tensor_list: NestedTensor):
        x = tensor_list.tensors
        h, w = x.shape[-2:]
        i = torch.arange(w, device=x.device)
        j = torch.arange(h, device=x.device)
        x_emb = self.col_embed(i)
        y_emb = self.row_embed(j)
        pos = torch.cat([
            x_emb.unsqueeze(0).repeat(h, 1, 1),
            y_emb.unsqueeze(1).repeat(1, w, 1),
        ], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1)
        return pos


def build_position_encoding(args):#位置编码构建函数
    N_steps = args.hidden_dim // 2# 位置编码维度（隐藏层维度的一半 ）
    if args.position_embedding in ('v2', 'sine'):
        # TODO find a better way of exposing other arguments
        position_embedding = PositionEmbeddingSine(N_steps, normalize=True)
    elif args.position_embedding in ('v3', 'learned'):
        position_embedding = PositionEmbeddingLearned(N_steps)
    else:
        raise ValueError(f"not supported {args.position_embedding}")
    """例如：输入图像尺寸 (B, C, 224, 224)，hidden_dim=512：输出形状为 (B, 512, 224, 224)。
    输入特征图层级 (B, C, 14, 14)（如 Vision Transformer 的 patch 特征）：输出形状为 (B, 512, 14, 14)。"""
    return position_embedding
