"""
模型定义模块
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from config import Config

class MultiScaleAttention(nn.Module):
    """多尺度注意力机制"""
    
    def __init__(self, d_model, nhead, dropout=0.1):
        super().__init__()
        self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
        
        # 多尺度卷积层
        self.conv1 = nn.Conv1d(d_model, d_model, kernel_size=3, padding=1)
        self.conv2 = nn.Conv1d(d_model, d_model, kernel_size=5, padding=2)
        self.conv3 = nn.Conv1d(d_model, d_model, kernel_size=7, padding=3)
        self.conv4 = nn.Conv1d(d_model, d_model, kernel_size=9, padding=4)
        
        # 特征融合权重
        self.fusion_weights = nn.Parameter(torch.ones(5) / 5)  # 5个特征（原始+4个卷积）
        
        self.norm = nn.LayerNorm(d_model)
        self.dropout = nn.Dropout(dropout)
        
    def forward(self, src, is_causal=False):
        """
        前向传播
        
        Args:
            src: 输入张量
            is_causal: 是否使用因果注意力
            
        Returns:
            torch.Tensor: 输出张量
        """
        # 转置输入以适应卷积层
        src_permuted = src.permute(1, 2, 0)  # (B, C, L)
        
        # 多尺度特征提取
        conv1_out = self.conv1(src_permuted)
        conv2_out = self.conv2(src_permuted)
        conv3_out = self.conv3(src_permuted)
        conv4_out = self.conv4(src_permuted)
        
        # 应用softmax确保权重和为1
        weights = F.softmax(self.fusion_weights, dim=0)
        
        # 加权特征融合
        fused = (weights[0] * src_permuted + 
                weights[1] * conv1_out + 
                weights[2] * conv2_out + 
                weights[3] * conv3_out + 
                weights[4] * conv4_out)
        
        # 转回原始形状并应用自注意力
        fused = fused.permute(2, 0, 1)  # (L, B, C)
        src2 = self.norm(fused)
        src2, _ = self.self_attn(src2, src2, src2, need_weights=False, is_causal=is_causal)
        
        # 残差连接 - 确保形状匹配
        if src.shape == src2.shape:
            src = src + self.dropout(src2)
        else:
            print(f"形状不匹配: src {src.shape}, src2 {src2.shape}")
            # 如果形状不匹配，调整src2以匹配src
            if src.shape[0] < src2.shape[0]:
                src2 = src2[:src.shape[0], :, :]
            elif src.shape[0] > src2.shape[0]:
                # 填充src2
                pad = torch.zeros(src.shape[0] - src2.shape[0], src2.shape[1], src2.shape[2], device=src2.device)
                src2 = torch.cat([src2, pad], dim=0)
            src = src + self.dropout(src2)
        
        return src

class PreNormTransformerEncoderLayer(nn.Module):
    """预归一化Transformer编码器层"""
    
    def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1):
        super().__init__()
        self.self_attn = MultiScaleAttention(d_model, nhead, dropout=dropout)
        self.linear1 = nn.Linear(d_model, dim_feedforward)
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Linear(dim_feedforward, d_model)
        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)

    def forward(self, src):
        """
        前向传播
        
        Args:
            src: 输入张量
            
        Returns:
            torch.Tensor: 输出张量
        """
        # 第一个子层：多尺度注意力
        src2 = self.norm1(src)
        src2 = self.self_attn(src2, is_causal=False)
        src = src + self.dropout1(src2)
        
        # 第二个子层：前馈网络
        src2 = self.norm2(src)
        src2 = self.linear2(self.dropout(torch.relu(self.linear1(src2))))
        src = src + self.dropout2(src2)
        
        return src

class TransformerAutoencoder(nn.Module):
    """Transformer自编码器"""
    
    def __init__(self, k, D, time_steps, channel_dim=128, nhead=8, num_layers=2, 
                 dropout_rate=0.1, batch_size=20, k_nonzero=16):
        super(TransformerAutoencoder, self).__init__()
        self.k = k
        self.time_steps = time_steps
        self.k_nonzero = k_nonzero
        
        # 输入归一化层
        self.input_norm = nn.LayerNorm(channel_dim)
        
        # 使用可学习的位置编码
        self.positional_encoding = nn.Parameter(torch.zeros(1, batch_size, channel_dim))
        # 使用正态分布初始化位置编码
        nn.init.normal_(self.positional_encoding, mean=0.0, std=0.02)
        
        # Transformer编码器层
        self.transformer_layers = nn.ModuleList([
            PreNormTransformerEncoderLayer(
                d_model=channel_dim,
                nhead=nhead,
                dim_feedforward=1024,
                dropout=dropout_rate
            ) for _ in range(num_layers + 1)  # 增加层数
        ])
        
        # 输出层
        self.fc = nn.Sequential(
            nn.Linear(channel_dim, 512),
            nn.ReLU(),
            nn.Dropout(dropout_rate),
            nn.Linear(512, k),
            nn.ReLU(),
            nn.Dropout(dropout_rate)
        )
        
        # D矩阵注册为不可训练的buffer
        self.register_buffer('D', D)
    
    def forward(self, Y):
        """
        前向传播
        
        Args:
            Y: 输入张量，形状为(batch, channel_dim, time_steps)
            
        Returns:
            tuple: (重构的Y, 稀疏表示X)
        """
        # 准备Transformer的输入
        # 转置使时间维度在第一位: [time_steps, batch, channel_dim]
        Y_transposed = Y.permute(2, 0, 1)  # (time_steps, batch, channel_dim)
        
        # 输入归一化
        Y_transposed = self.input_norm(Y_transposed)
        
        # 添加位置编码 - 适应不同批次大小
        batch_size = Y.shape[0]
        if batch_size == self.positional_encoding.shape[1]:
            # 批次大小匹配，直接使用
            pos_encoding = self.positional_encoding.repeat(self.time_steps, 1, 1)
        else:
            # 批次大小不匹配，调整位置编码
            # print(f"调整位置编码: 从批次大小 {self.positional_encoding.shape[1]} 到 {batch_size}")
            # 创建临时位置编码
            temp_pos_encoding = self.positional_encoding[:, :batch_size, :] if batch_size < self.positional_encoding.shape[1] else \
                torch.cat([self.positional_encoding, 
                          torch.zeros(1, batch_size - self.positional_encoding.shape[1], 
                                     self.positional_encoding.shape[2], device=self.positional_encoding.device)], dim=1)
            pos_encoding = temp_pos_encoding.repeat(self.time_steps, 1, 1)
        
        encoded = Y_transposed + pos_encoding
        
        # 通过Transformer编码器层
        for layer in self.transformer_layers:
            encoded = layer(encoded)
            
        # 输出处理
        encoded = encoded.permute(1, 0, 2)  # (batch, time_steps, channel_dim)
        
        # 稀疏表示
        X = self.fc(encoded)  # (batch, time_steps, k)
        X = X.permute(0, 2, 1)  # (batch, k, time_steps)
        
        # 稀疏化：只保留每个时间步前K_nonzero个最大值
        topk_values, topk_indices = torch.topk(X, self.k_nonzero, dim=1)
        mask = torch.zeros_like(X)
        mask.scatter_(1, topk_indices, topk_values)
        X = mask
        
        # 归一化
        X = X / (torch.sum(X, dim=1, keepdim=True) + 1e-8)
        
        # 重构Y
        X_transposed = X.transpose(1, 2)  # (batch, time_steps, k)
        Y_reconstructed = torch.einsum("btk,ck->btc", X_transposed, self.D)  # (batch, time_steps, channel_dim)
        Y_reconstructed = Y_reconstructed.permute(0, 2, 1)  # (batch, channel_dim, time_steps)
        
        return Y_reconstructed, X

class CustomLoss(nn.Module):
    """自定义损失函数"""
    
    def __init__(self, alpha=0.1, beta=0.1):
        super(CustomLoss, self).__init__()
        self.mse = nn.MSELoss()
        self.alpha = alpha
        self.beta = beta
        
    def forward(self, y_pred, y_true, X):
        """
        计算损失
        
        Args:
            y_pred: 预测值
            y_true: 真实值
            X: 稀疏表示矩阵
            
        Returns:
            torch.Tensor: 总损失
        """
        # 重构损失
        recon_loss = torch.sqrt(self.mse(y_pred, y_true))
        
        # 稀疏性损失
        sparsity_loss = torch.mean(torch.abs(X))
        
        # 平滑性损失（相邻时间步的差异）
        smooth_loss = torch.mean(torch.abs(X[:, :, 1:] - X[:, :, :-1]))
        
        return recon_loss + self.alpha * sparsity_loss + self.beta * smooth_loss

def create_model(config=None):
    """
    创建模型实例
    
    Args:
        config: 配置对象
        
    Returns:
        tuple: (model, criterion, optimizer)
    """
    if config is None:
        config = Config()
    
    # 需要先加载端元矩阵
    from data_loader import DataLoader
    data_loader = DataLoader(config)
    D_tensor = data_loader.get_endmember_matrix()
    
    # 创建模型
    model = TransformerAutoencoder(
        k=config.K,
        D=D_tensor,
        time_steps=config.SEQ_LEN,
        channel_dim=config.CHANNEL_DIM,
        nhead=config.NHEAD,
        num_layers=config.NUM_LAYERS,
        dropout_rate=config.DROPOUT_RATE,
        batch_size=config.BATCH_SIZE,
        k_nonzero=config.K_NONZERO
    ).to(config.DEVICE)
    
    # 创建损失函数
    criterion = CustomLoss(alpha=config.ALPHA, beta=config.BETA)
    
    # 创建优化器
    optimizer = torch.optim.Adam(
        model.parameters(),
        lr=config.LEARNING_RATE,
        weight_decay=config.WEIGHT_DECAY
    )
    
    return model, criterion, optimizer 