import torch
import torch.nn as nn
import torch.optim as optim
import scipy.io
import matplotlib.pyplot as plt
import numpy as np
import os

# -----------------------------
# 参数设置
# -----------------------------
m = 50  # 时间步数，即数据在时间维度上的长度
k = 481  # 稀疏字典的维度
K_nonzero = 20  # 稀疏性要求：每个时间步仅保留前 20 个非零值
lambda_sparsity = 0.1  # 稀疏正则化系数
epochs = 1000  # 训练迭代次数
learning_rate = 0.01  # 优化器学习率（MLP通常收敛快一些）
channel_dim = 128  # 输入数据通道数（64 个实部 + 64 个虚部）

# -----------------------------
# 数据读取与处理
# -----------------------------
data_folder1 = "dync"
file_prefix = "spatial_correlation_"
file_extension = ".mat"

# 初始化存储复数数据的矩阵，形状为 (8, 8, m)
result_matrix = np.zeros((8, 8, m), dtype=np.complex64)

# 从文件中读取数据
for i in range(1, m+1):
    fname2 = os.path.join(data_folder1, f"{file_prefix}{i}{file_extension}")
    if os.path.exists(fname2):
        mat2 = scipy.io.loadmat(fname2)
        if "p" in mat2:
            Y = mat2["p"]
            if Y.size == 64:
                Y_reshaped = Y.reshape(8, 8)
                result_matrix[:, :, i-1] = Y_reshaped
            else:
                print(f"文件 {fname2} 中变量 'p' 的形状无法重构为 8x8。")
        else:
            print(f"文件 {fname2} 中未找到变量 'p'。")
    else:
        print(f"文件未找到: {fname2}")

# 将 (8, 8, m) 的复数数据分解为实部和虚部，并拼接成 (128, m)
Y_real = np.real(result_matrix).reshape(64, m)
Y_imag = np.imag(result_matrix).reshape(64, m)
Y_combined = np.concatenate((Y_real, Y_imag), axis=0)  # (128, m)

# 转换为 PyTorch 张量，并增加 batch 维度：形状 (1, 128, m)
Y_tensor = torch.from_numpy(Y_combined).float().cuda().unsqueeze(0)

# 读取端元矩阵 D（字典）
fname4 = "Data/DC1/phasecha.mat"
mat4 = scipy.io.loadmat(fname4)
D = mat4["phasecha"]
if D.shape[0] == 64:
    D_reshaped = D.reshape(64, k)
else:
    raise ValueError("D 的行数与 8x8 的重构要求不一致。")

D_real = np.real(D_reshaped)
D_imag = np.imag(D_reshaped)
D_combined = np.concatenate((D_real, D_imag), axis=0)  # (128, k)
D_tensor = torch.from_numpy(D_combined).float().cuda()

# -----------------------------
# 定义基于 MLP 的自动编码器模型
# -----------------------------
class MLPAutoencoder(nn.Module):
    def __init__(self, k, D, channel_dim=128, hidden_dim=256):
        """
        参数:
            k: 稀疏表示的维度（字典大小）
            D: 固定字典，形状 (128, k)
            channel_dim: 输入数据的通道数
            hidden_dim: MLP 中间层的尺寸
        """
        super(MLPAutoencoder, self).__init__()
        self.k = k
        
        # MLP 编码器：对每个时间步（长度为 channel_dim 的向量）进行编码，映射到 k 维
        self.encoder = nn.Sequential(
            nn.Linear(channel_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, k),
            nn.ReLU()  # 保证编码值为非负，有助于后续稀疏化
        )
        
        # 将字典 D 注册为固定参数（不可训练）
        self.register_buffer('D', D)
    
    def forward(self, Y):
        """
        输入:
            Y: 形状 (batch, channel_dim, time_steps)
        输出:
            Y_reconstructed: 重构数据，形状与输入相同 (batch, channel_dim, time_steps)
            X: 稀疏表示，形状 (batch, k, time_steps)
        """
        batch_size, channel_dim, time_steps = Y.shape
        # 将每个时间步的数据看作独立的样本：转换为 (batch * time_steps, channel_dim)
        Y_permuted = Y.permute(0, 2, 1).contiguous()   # (batch, time_steps, channel_dim)
        Y_flat = Y_permuted.view(-1, channel_dim)        # (batch * time_steps, channel_dim)
        
        # 通过 MLP 编码，得到每个时间步的表示： (batch * time_steps, k)
        X_flat = self.encoder(Y_flat)
        
        # 将编码结果恢复到 (batch, time_steps, k)
        X = X_flat.view(batch_size, time_steps, self.k)
        
        # 对编码结果做 ReLU 已经保证部分非负，这里再进行 top-k 稀疏化:
        # 对每个时间步只保留前 K_nonzero 个最大值，其余置为 0
        # 注意：topk 操作是在最后一维 k 上进行
        topk_values, topk_indices = torch.topk(X, K_nonzero, dim=2)
        mask = torch.zeros_like(X)
        mask.scatter_(2, topk_indices, topk_values)
        X_sparse = mask

        # 对每个时间步进行归一化（防止数值过大或过小）
        X_sum = torch.sum(X_sparse, dim=2, keepdim=True) + 1e-8
        X_norm = X_sparse / X_sum  # (batch, time_steps, k)
        
        # 转置 X 以匹配后续计算：转换为 (batch, k, time_steps)
        X_norm = X_norm.permute(0, 2, 1)
        
        # 利用固定字典 D 重构 Y：
        # X_norm 的转置： (batch, time_steps, k)
        X_transposed = X_norm.permute(0, 2, 1)
        # 使用 Einstein 求和计算重构结果：
        # D 的形状为 (128, k)，这样计算后输出形状为 (batch, time_steps, 128)
        Y_reconstructed = torch.einsum("btk, ck -> btc", X_transposed, self.D)
        # 还原回 (batch, 128, time_steps)
        Y_reconstructed = Y_reconstructed.permute(0, 2, 1)
        
        return Y_reconstructed, X_norm

# -----------------------------
# 模型、损失函数和优化器初始化
# -----------------------------
model = MLPAutoencoder(k=k, D=D_tensor, channel_dim=channel_dim, hidden_dim=256).cuda()
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# 训练标志，若 mm==0 则执行训练，否则加载已有模型
mm = 0
if mm == 0:
    loss_values = []
    for epoch in range(epochs):
        model.train()
        optimizer.zero_grad()
        
        # 前向传播
        Y_reconstructed, X = model(Y_tensor)
        
        # 计算重构的 MSE 损失
        mse_loss = criterion(Y_reconstructed, Y_tensor)
        
        # 稀疏性损失：确保表示 X 的稀疏度接近目标（目标为 K_nonzero / k）
        target_sparsity = K_nonzero / k
        # 计算每个时间步中非零元素的比例（注意 X_norm 中有少量数值可能非常小）
        actual_sparsity = (X > 0).float().mean(dim=1)  # 形状 (batch, time_steps)
        sparsity_loss = ((actual_sparsity - target_sparsity) ** 2).mean()
        
        loss = mse_loss + lambda_sparsity * sparsity_loss
        
        loss.backward()
        optimizer.step()
        
        loss_values.append(loss.item())
        
        if (epoch + 1) % 100 == 0:
            print(f"Epoch [{epoch+1}/{epochs}], Total Loss: {loss.item():.4f}, "
                  f"MSE Loss: {mse_loss.item():.4f}, Sparsity Loss: {sparsity_loss.item():.4f}")
    
    # 绘制损失曲线
    plt.plot(range(epochs), loss_values)
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Loss Curve')
    plt.show()
    
    # 保存模型
    torch.save(model, "dync/model_mlp.pth")
else:
    model = torch.load("dync/model_mlp.pth")

# -----------------------------
# 模型评估
# -----------------------------
model.eval()
with torch.no_grad():
    Y_reconstructed, X = model(Y_tensor)

# 计算总重构误差（MSE）
mse_total = criterion(Y_reconstructed, Y_tensor).item()

# 为了对比，这里取第2个时间步的数据进行 RMSE 计算
subset1 = Y_reconstructed.squeeze(0)[:, 1]  # 形状 (128,)
subset2 = Y_tensor.squeeze(0)[:, 1]           # 形状 (128,)
mse_total1 = criterion(subset1, subset2).item()
rmse_total = torch.sqrt(torch.tensor(mse_total1)).item()

# 计算每个时间步的 RMSE
subset3 = Y_reconstructed.squeeze(0)  # 形状 (128, time_steps)
subset4 = Y_tensor.squeeze(0)           # 形状 (128, time_steps)
rmse_per_column = torch.sqrt(torch.mean((subset3 - subset4) ** 2, dim=0))
print("每个时间步的 RMSE:", rmse_per_column)
print(f"最终重构 MSE: {mse_total:.6f}")
