import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Optional, Tuple, Union, List, Dict

class MLP(nn.Module):
    """MLP层，包含两个线性变换和GELU激活"""
    def __init__(self, in_features: int, hidden_features: int, out_features: int, drop: float = 0.0):
        super().__init__()
        self.fc1 = nn.Linear(in_features, hidden_features)
        self.act = nn.GELU()
        self.fc2 = nn.Linear(hidden_features, out_features)
        self.drop = nn.Dropout(drop)
        
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x = self.fc1(x)
        x = self.act(x)
        x = self.drop(x)
        x = self.fc2(x)
        x = self.drop(x)
        return x

class MixerBlock(nn.Module):
    """Mixer块，包含token-mixing和channel-mixing两个MLP"""
    def __init__(self, dim: int, seq_len: int, token_dim: int, channel_dim: int, drop: float = 0.0):
        super().__init__()
        # Token-mixing MLP (作用于序列维度)
        self.token_mixer = nn.Sequential(
            nn.Linear(seq_len, token_dim),
            nn.GELU(),
            nn.Dropout(drop),
            nn.Linear(token_dim, seq_len),
            nn.Dropout(drop)
        )
        
        # Channel-mixing MLP (作用于特征维度)
        self.channel_mixer = nn.Sequential(
            nn.Linear(dim, channel_dim),
            nn.GELU(),
            nn.Dropout(drop),
            nn.Linear(channel_dim, dim),
            nn.Dropout(drop)
        )
        
        # Layer Normalization放在残差连接之前
        self.norm1 = nn.LayerNorm(dim)
        self.norm2 = nn.LayerNorm(dim)
        
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        # x shape: [B, N, C] - B: batch, N: sequence length, C: feature dimension
        
        # Token-mixing (处理序列维度)
        skip = x
        x = self.norm1(x)
        x = x.transpose(1, 2)  # [B, C, N] - 转置以便token mixer处理序列维度
        x = self.token_mixer(x)
        x = x.transpose(1, 2)  # [B, N, C] - 转回原始形状
        x = skip + x  # 残差连接
        
        # Channel-mixing (处理特征维度)
        skip = x
        x = self.norm2(x)
        x = self.channel_mixer(x)
        x = skip + x  # 残差连接
        
        return x

class PatchEmbedding(nn.Module):
    """将图像分割成patch并嵌入到特征空间"""
    def __init__(self, img_size: int = 224, patch_size: int = 16, in_channels: int = 3, embed_dim: int = 768):
        super().__init__()
        self.img_size = img_size
        self.patch_size = patch_size
        self.grid_size = img_size // patch_size
        self.num_patches = self.grid_size * self.grid_size
        
        self.proj = nn.Conv2d(in_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
        
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        # x shape: [B, C, H, W]
        B, C, H, W = x.shape
        assert H == self.img_size and W == self.img_size, \
            f"Input image size ({H}*{W}) doesn't match model ({self.img_size}*{self.img_size})."
        
        # 分割图像并投影到嵌入空间
        x = self.proj(x)  # [B, C, H//patch_size, W//patch_size]
        x = x.flatten(2)  # [B, C, H*W//patch_size^2]
        x = x.transpose(1, 2)  # [B, H*W//patch_size^2, C]
        
        return x

class MLP_Mixer(nn.Module):
    """MLP-Mixer模型"""
    def __init__(
        self,
        img_size: int = 224,
        patch_size: int = 16,
        in_channels: int = 3,
        num_classes: int = 1000,
        embed_dim: int = 768,
        depth: int = 12,
        token_dim: int = 384,
        channel_dim: int = 3072,
        drop_rate: float = 0.0,
        ):
        super().__init__()
        
        # Patch embedding
        self.patch_embed = PatchEmbedding(
            img_size=img_size, 
            patch_size=patch_size, 
            in_channels=in_channels, 
            embed_dim=embed_dim
        )
        self.num_patches = self.patch_embed.num_patches
        
        # 位置编码（可选）
        self.pos_embed = nn.Parameter(torch.zeros(1, self.num_patches, embed_dim))
        nn.init.trunc_normal_(self.pos_embed, std=0.02)
        
        self.dropout = nn.Dropout(drop_rate)
        
        # Mixer块
        self.blocks = nn.Sequential(*[
            MixerBlock(
                dim=embed_dim,
                seq_len=self.num_patches,
                token_dim=token_dim,
                channel_dim=channel_dim,
                drop=drop_rate
            )
            for _ in range(depth)
        ])
        
        self.norm = nn.LayerNorm(embed_dim)
        self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
        
        self.apply(self._init_weights)
        
    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            nn.init.trunc_normal_(m.weight, std=0.02)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.LayerNorm):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)
    
    def forward_features(self, x: torch.Tensor) -> torch.Tensor:
        # x shape: [B, C, H, W]
        x = self.patch_embed(x)  # [B, N, C]
        x = x + self.pos_embed
        x = self.dropout(x)
        x = self.blocks(x)
        x = self.norm(x)
        return x
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x = self.forward_features(x)
        x = x.mean(dim=1)  # [B, C]
        x = self.head(x)
        return x

# 预定义的模型配置
def mixer_s32(pretrained: bool = False, **kwargs) -> MLP_Mixer:
    """Mixer-S/32 模型 (小模型，patch_size=32)"""
    model = MLP_Mixer(
        patch_size=32,
        embed_dim=512,
        depth=8,
        token_dim=256,
        channel_dim=2048,
        **kwargs
    )
    return model

def mixer_s16(pretrained: bool = False, **kwargs) -> MLP_Mixer:
    """Mixer-S/16 模型 (小模型，patch_size=16)"""
    model = MLP_Mixer(
        patch_size=16,
        embed_dim=512,
        depth=8,
        token_dim=256,
        channel_dim=2048,
        **kwargs
    )
    return model

def mixer_b32(pretrained: bool = False, **kwargs) -> MLP_Mixer:
    """Mixer-B/32 模型 (基础模型，patch_size=32)"""
    model = MLP_Mixer(
        patch_size=32,
        embed_dim=768,
        depth=12,
        token_dim=384,
        channel_dim=3072,
        **kwargs
    )
    return model

def mixer_b16(pretrained: bool = False, **kwargs) -> MLP_Mixer:
    """Mixer-B/16 模型 (基础模型，patch_size=16)"""
    model = MLP_Mixer(
        patch_size=16,
        embed_dim=768,
        depth=12,
        token_dim=384,
        channel_dim=3072,
        **kwargs
    )
    return model

def mixer_l32(pretrained: bool = False, **kwargs) -> MLP_Mixer:
    """Mixer-L/32 模型 (大模型，patch_size=32)"""
    model = MLP_Mixer(
        patch_size=32,
        embed_dim=1024,
        depth=24,
        token_dim=512,
        channel_dim=4096,
        **kwargs
    )
    return model

def mixer_l16(pretrained: bool = False, **kwargs) -> MLP_Mixer:
    """Mixer-L/16 模型 (大模型，patch_size=16)"""
    model = MLP_Mixer(
        patch_size=16,
        embed_dim=1024,
        depth=24,
        token_dim=512,
        channel_dim=4096,
        **kwargs
    )
    return model

