"""
LSTM自编码器模型
作者: magical857
日期: 2025-11-01
"""

import torch
import torch.nn as nn


class LSTMEncoder(nn.Module):
    """LSTM编码器"""
    
    def __init__(self, input_dim, hidden_dim, num_layers, koopman_dim, dropout=0.2):
        super(LSTMEncoder, self).__init__()
        
        self.hidden_dim = hidden_dim
        self.num_layers = num_layers
        
        self.lstm = nn.LSTM(
            input_size=input_dim,
            hidden_size=hidden_dim,
            num_layers=num_layers,
            dropout=dropout if num_layers > 1 else 0,
            batch_first=True
        )
        
        # 映射到Koopman空间
        self.fc_koopman = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, koopman_dim)
        )
    
    def forward(self, x):
        """
        Args:
            x: (batch_size, seq_len, input_dim)
        Returns:
            koopman_embedding: (batch_size, seq_len, koopman_dim)
        """
        # LSTM编码
        lstm_out, (h_n, c_n) = self.lstm(x)
        # lstm_out: (batch_size, seq_len, hidden_dim)
        
        # 映射到Koopman空间
        batch_size, seq_len, _ = lstm_out.shape
        lstm_out_flat = lstm_out.reshape(-1, self.hidden_dim)
        koopman_flat = self.fc_koopman(lstm_out_flat)
        koopman_embedding = koopman_flat.reshape(batch_size, seq_len, -1)
        
        return koopman_embedding


class LSTMDecoder(nn.Module):
    """LSTM解码器"""
    
    def __init__(self, koopman_dim, hidden_dim, num_layers, output_dim, dropout=0.2):
        super(LSTMDecoder, self).__init__()
        
        self.hidden_dim = hidden_dim
        self.num_layers = num_layers
        
        # 从Koopman空间映射回隐藏空间
        self.fc_hidden = nn.Sequential(
            nn.Linear(koopman_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, hidden_dim)
        )
        
        self.lstm = nn.LSTM(
            input_size=hidden_dim,
            hidden_size=hidden_dim,
            num_layers=num_layers,
            dropout=dropout if num_layers > 1 else 0,
            batch_first=True
        )
        
        # 输出层
        self.fc_out = nn.Linear(hidden_dim, output_dim)
    
    def forward(self, koopman_embedding):
        """
        Args:
            koopman_embedding: (batch_size, seq_len, koopman_dim)
        Returns:
            reconstruction: (batch_size, seq_len, output_dim)
        """
        # 映射回隐藏空间
        batch_size, seq_len, koopman_dim = koopman_embedding.shape
        koopman_flat = koopman_embedding.reshape(-1, koopman_dim)
        hidden_flat = self.fc_hidden(koopman_flat)
        hidden = hidden_flat.reshape(batch_size, seq_len, self.hidden_dim)
        
        # LSTM解码
        lstm_out, _ = self.lstm(hidden)
        # lstm_out: (batch_size, seq_len, hidden_dim)
        
        # 输出重构
        lstm_out_flat = lstm_out.reshape(-1, self.hidden_dim)
        out_flat = self.fc_out(lstm_out_flat)
        reconstruction = out_flat.reshape(batch_size, seq_len, -1)
        
        return reconstruction


class LSTMAutoencoder(nn.Module):
    """LSTM自编码器"""
    
    def __init__(self, state_dim, control_dim, hidden_dim, num_layers, 
                 koopman_dim, dropout=0.2):
        super(LSTMAutoencoder, self).__init__()
        
        self.state_dim = state_dim
        self.control_dim = control_dim
        input_dim = state_dim + control_dim
        
        self.encoder = LSTMEncoder(
            input_dim=input_dim,
            hidden_dim=hidden_dim,
            num_layers=num_layers,
            koopman_dim=koopman_dim,
            dropout=dropout
        )
        
        self.decoder = LSTMDecoder(
            koopman_dim=koopman_dim,
            hidden_dim=hidden_dim,
            num_layers=num_layers,
            output_dim=state_dim,  # 只重构状态
            dropout=dropout
        )
    
    def forward(self, states, controls):
        """
        Args:
            states: (batch_size, seq_len, state_dim)
            controls: (batch_size, seq_len, control_dim)
        Returns:
            reconstruction: (batch_size, seq_len, state_dim)
            koopman_embedding: (batch_size, seq_len, koopman_dim)
        """
        # 拼接状态和控制
        x = torch.cat([states, controls], dim=-1)
        
        # 编码到Koopman空间
        koopman_embedding = self.encoder(x)
        
        # 解码重构状态
        reconstruction = self.decoder(koopman_embedding)
        
        return reconstruction, koopman_embedding
    
    def encode(self, states, controls):
        """编码到Koopman空间"""
        x = torch.cat([states, controls], dim=-1)
        return self.encoder(x)
    
    def decode(self, koopman_embedding):
        """从Koopman空间解码"""
        return self.decoder(koopman_embedding)


if __name__ == "__main__":
    # 测试
    batch_size = 8
    seq_len = 50
    state_dim = 4
    control_dim = 2
    
    model = LSTMAutoencoder(
        state_dim=state_dim,
        control_dim=control_dim,
        hidden_dim=128,
        num_layers=2,
        koopman_dim=64,
        dropout=0.2
    )
    
    states = torch.randn(batch_size, seq_len, state_dim)
    controls = torch.randn(batch_size, seq_len, control_dim)
    
    reconstruction, koopman_emb = model(states, controls)
    
    print(f"输入状态形状: {states.shape}")
    print(f"输入控制形状: {controls.shape}")
    print(f"Koopman嵌入形状: {koopman_emb.shape}")
    print(f"重构输出形状: {reconstruction.shape}")
    print(f"模型参数量: {sum(p.numel() for p in model.parameters()):,}")