import torch
import torch.nn as nn
from torch.nn import TransformerEncoder, TransformerEncoderLayer
import math
from einops import rearrange

class SpatioTemporalAutoencoder(nn.Module):
    def __init__(self,
                 input_dim=298 * 3,       # 关节数×坐标
                 latent_dim=512,       # 潜在特征维度
                 num_heads=8,          # Transformer头数
                 num_layers=6,         # Transformer层数
                 spatial_hidden=512,   # 空间编码隐藏层
                 temporal_hidden=512, # 时间编码隐藏层
                 dropout=0.1):
        super().__init__()
        
        # 空间编码器
        self.spatial_encoder = nn.Sequential(
            nn.Linear(input_dim, spatial_hidden),
            nn.LayerNorm(spatial_hidden),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(spatial_hidden, latent_dim),
            nn.LayerNorm(latent_dim)
        )
        
        # 时间Transformer编码器
        self.temporal_encoder = nn.TransformerEncoder(
            encoder_layer=TransformerEncoderLayer(
                d_model=latent_dim,
                nhead=num_heads,
                dim_feedforward=temporal_hidden,
                dropout=dropout,
                activation='gelu',
                batch_first=True),
            num_layers=num_layers
        )
        # 位置编码
        self.pos_encoder = PositionalEncoding(latent_dim, dropout=0)
        
        # 解码器部分（对称结构）
        self.hand_temporal_decoder = nn.TransformerEncoder(
            encoder_layer=TransformerEncoderLayer(
                d_model=latent_dim,
                nhead=num_heads,
                dim_feedforward=temporal_hidden,
                dropout=dropout,
                activation='gelu',
                batch_first=True),
            num_layers=num_layers
        )
        
        self.hand_spatial_decoder = nn.Sequential(
            nn.Linear(latent_dim, spatial_hidden),
            nn.LayerNorm(spatial_hidden),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(spatial_hidden, 42*3)
        )

        # 解码器部分（对称结构）
        self.obj_temporal_decoder = nn.TransformerEncoder(
            encoder_layer=TransformerEncoderLayer(
                d_model=latent_dim,
                nhead=num_heads,
                dim_feedforward=temporal_hidden,
                dropout=dropout,
                activation='gelu',
                batch_first=True),
            num_layers=num_layers
        )
        
        self.obj_spatial_decoder = nn.Sequential(
            nn.Linear(latent_dim, spatial_hidden),
            nn.LayerNorm(spatial_hidden),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(spatial_hidden, 256*3)
        )
        
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_normal_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.LayerNorm):
                nn.init.constant_(m.bias, 0)
                nn.init.constant_(m.weight, 1.0)

    def forward(self, x):
        '''
        Args:
            x: [B, T, J=298, C]
        '''
        # 编码过程
        # hand_pose = rearrange(x, 'b t (m k) c -> b t m k c')  # [B, T, J*C]
        # obj_pose = rearrange(x, 'b t (m k) c -> b t m k c')
        B, T, J, C = x.shape
        x = x.view(B*T, J*C)
        spatial_feat = self.spatial_encoder(x)           # [B*T, latent]
        spatial_feat = spatial_feat.view(B, T, -1)       # [B, T, latent]
        
        # 加入位置编码
        temporal_feat = self.pos_encoder(spatial_feat)   # [B, T, latent]
        encoded = self.temporal_encoder(temporal_feat)   # [B, T, latent]
        
        # 解码过程
        pred_hand = self.hand_temporal_decoder(encoded)    # [B, T, latent]
        pred_hand = pred_hand.view(B*T, -1)        # [B*T, latent]
        pred_hand = self.hand_spatial_decoder(pred_hand)     # [B*T, J*C]
        pred_hand = rearrange(pred_hand, '(b t) (v c) -> b t v c', b=B, c=C)  # [B, T, J, C]

        pred_obj = self.obj_temporal_decoder(encoded)    # [B, T, latent]
        pred_obj = pred_obj.view(B*T, -1)        # [B*T, latent]
        pred_obj = self.obj_spatial_decoder(pred_obj)     # [B*T, J*C]
        pred_obj = rearrange(pred_obj, '(b t) (v c) -> b t v c', b=B, c=C)  # [B, T, J, C]
        return pred_hand, pred_obj
    


class PositionalEncoding(nn.Module):
    def __init__(self, d_model, dropout=0.1, max_len=100):
        super().__init__()
        self.dropout = nn.Dropout(p=dropout)
        position = torch.arange(max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
        pe = torch.zeros(1, max_len, d_model)
        pe[0, :, 0::2] = torch.sin(position * div_term)
        pe[0, :, 1::2] = torch.cos(position * div_term)
        self.register_buffer('pe', pe)

    def forward(self, x):
        x = x + self.pe[:, :x.size(1), :]
        return self.dropout(x)
    

if __name__ == "__main__":
    model = SpatioTemporalAutoencoder()
    print(f"{sum([p.numel() for p in model.parameters()]) / 1e6} M")
    # 示例输入
    hoi_pose = torch.randn(2, 49, 298, 3)  # [B, T, J, C]
    pred_hand, pred_obj = model(hoi_pose)
    print(f"pred_hand shape: {pred_hand.shape}")
    print(f"pred_obj shape: {pred_obj.shape}")