import torch
import torch.nn as nn
from timm.models.vision_transformer import Block, PatchEmbed


class MAE(nn.Module):
    def __init__(self, image_size=224, patch_size=16, embed_dim=768, mask_ratio=0.75):
        super().__init__()

        self.mask_ratio = mask_ratio
        self.patch_size = patch_size

        self.patch_embed = PatchEmbed(
            img_size=image_size, patch_size=patch_size, embed_dim=embed_dim
        )
        num_patches = self.patch_embed.num_patches

        # positional embedding
        self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))

        # transformer encoder
        self.encoder = nn.Sequential(
            *[Block(dim=embed_dim, num_heads=12, mlp_ratio=3.0) for _ in range(12)]
        )
        self.norm = nn.LayerNorm(embed_dim)

        self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))

        # decoder - 预测patch的像素值
        self.decoder = nn.Sequential(
            nn.Linear(embed_dim, embed_dim),
            nn.LayerNorm(embed_dim),
            nn.GELU(),
            nn.Linear(embed_dim, patch_size ** 2 * 3)  # 预测patch的RGB像素值
        )

    def forward(self, imgs):
        B, C, H, W = imgs.shape

        # 1. 图像分块嵌入
        patches = self.patch_embed(imgs)  # (B, num_patches, embed_dim)

        # 2. 添加位置编码
        x = patches + self.pos_embed

        # 3. 生成mask
        N = x.shape[1]  # patch数量
        num_masked = int(self.mask_ratio * N)
        all_indices = torch.rand(B, N, device=x.device).argsort(dim=1)
        mask_indices = all_indices[:, :num_masked]  # (B, num_masked)
        unmask_indices = all_indices[:, num_masked:]  # (B, num_unmasked)

        # 4. 只对unmasked tokens进行encoder处理
        # 获取unmasked tokens
        batch_range = torch.arange(B, device=x.device)[:, None]
        unmasked_tokens = x[batch_range, unmask_indices]  # (B, num_unmasked, embed_dim)

        # 5. Encoder处理unmasked tokens
        for blk in self.encoder:
            unmasked_tokens = blk(unmasked_tokens)
        encoded_unmasked = self.norm(unmasked_tokens)

        # 6. 准备decoder输入：将encoded tokens放回原位置，masked位置用mask_token
        decoder_tokens = torch.zeros_like(x)  # (B, N, embed_dim)

        # 放置unmasked tokens
        decoder_tokens[batch_range, unmask_indices] = encoded_unmasked

        # 放置masked tokens
        mask_tokens = self.mask_token.expand(B, num_masked, -1)
        decoder_tokens[batch_range, mask_indices] = mask_tokens

        # 7. Decoder重建所有patches
        reconstructed_patches = self.decoder(decoder_tokens)  # (B, N, patch_size²*3)

        return reconstructed_patches, patches, mask_indices


def mse_loss(reconstructed_patches, original_patches, mask_indices):
    """
    reconstructed_patches: (B, N, patch_size²*3) - 重建的patch像素值
    original_patches: (B, N, embed_dim) - 原始的patch嵌入
    mask_indices: (B, num_masked) - 每个batch中被mask的patch索引
    """
    B, N = reconstructed_patches.shape[0], reconstructed_patches.shape[1]
    loss = 0
    for i in range(B):
        masked_original = original_patches[i, mask_indices[i]]  # (num_masked, embed_dim)
        masked_reconstructed = reconstructed_patches[i, mask_indices[i]]  # (num_masked, patch_size²*3)
        loss += torch.mean((masked_original - masked_reconstructed) ** 2)

    return loss / B


img = torch.rand(2, 3, 224, 224)
mae_model = MAE()
reconstructed_patches, patches, mask_indices = mae_model(img)
loss = mse_loss(reconstructed_patches, patches, mask_indices)
print(loss)