import torch
import torch.nn as nn
import torch.optim as optim
import scipy.io
import matplotlib.pyplot as plt
import numpy as np
import os

# 设置设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# -------------------- 参数设置 --------------------
m = 50  # 训练数据时间步数（50个时间点）
test_m = 50  # 测试数据时间步数（50个时间点）
k = 481  # 丰度矩阵维度，即输出稀疏表示的维度
K_nonzero = 20  # 每个时间步保留前20个非零值
lambda_sparsity = 0.1  # 稀疏正则化系数
epochs = 1000  # 训练轮数
learning_rate = 0.0001  # 学习率

# 数据读取参数
data_folder = "dync"
file_prefix = "spatial_correlation_"
file_extension = ".mat"

# -------------------- 训练数据读取 --------------------
# 训练数据：文件编号 1~50
result_matrix = np.zeros((8, 8, m), dtype=np.complex64)
for i in range(1, m+1):
    fname = os.path.join(data_folder, f"{file_prefix}{i}{file_extension}")
    if os.path.exists(fname):
        mat = scipy.io.loadmat(fname)
        if "p" in mat:
            Y = mat["p"]
            if Y.size == 64:
                Y_reshaped = Y.reshape(8, 8)
                result_matrix[:, :, i-1] = Y_reshaped
            else:
                print(f"Shape of 'p' in {fname} is not 64.")
        else:
            print(f"'p' not found in {fname}")
    else:
        print(f"File not found: {fname}")

Y_real = np.real(result_matrix).reshape(64, m)
Y_imag = np.imag(result_matrix).reshape(64, m)
Y_combined = np.concatenate((Y_real, Y_imag), axis=0)  # (128, m)
Y_tensor = torch.from_numpy(Y_combined).float().to(device).unsqueeze(0)  # (1, 128, m)

# -------------------- 端元矩阵读取 --------------------
fname_D = os.path.join("Data/DC1", "phasecha.mat")
mat_D = scipy.io.loadmat(fname_D)
D = mat_D["phasecha"]
if D.shape[0] == 64:
    D_reshaped = D.reshape(64, k)
else:
    raise ValueError("The shape of D is not compatible with 8x8 reshaping.")
D_real = np.real(D_reshaped)
D_imag = np.imag(D_reshaped)
D_combined = np.concatenate((D_real, D_imag), axis=0)  # (128, k)
D_tensor = torch.from_numpy(D_combined).float().to(device)

# -------------------- TransformerAutoencoder 定义 --------------------
class TransformerAutoencoder(nn.Module):
    def __init__(self, k, D, time_steps, channel_dim=128, nhead=4, num_layers=2, dropout_rate=0.0):
        super(TransformerAutoencoder, self).__init__()
        self.k = k
        self.time_steps = time_steps

        # 将时间步作为序列输入：先进行线性投影
        self.embedding = nn.Linear(channel_dim, channel_dim)
        self.positional_encoding = nn.Parameter(torch.randn(1, time_steps, channel_dim))
        self.dropout_embedding = nn.Dropout(dropout_rate)
        # Transformer 编码器
        encoder_layer = nn.TransformerEncoderLayer(d_model=channel_dim, nhead=nhead, dim_feedforward=512, batch_first=True)
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
        # 全连接层：将 Transformer 输出映射到稀疏表示
        self.fc = nn.Sequential(
            nn.Linear(channel_dim, k),
            nn.ReLU(),
            nn.Dropout(dropout_rate)
        )
        # 注册端元矩阵 D 为 buffer
        self.register_buffer('D', D)

    def forward(self, Y):
        # 输入 Y: (batch, channel_dim, time_steps)
        Y = Y.permute(0, 2, 1)  # (batch, time_steps, channel_dim)
        Y = self.embedding(Y) + self.positional_encoding  # (batch, time_steps, channel_dim)
        Y = self.dropout_embedding(Y)
        Y = Y.permute(1, 0, 2)  # (time_steps, batch, channel_dim)
        encoded = self.transformer_encoder(Y)  # (time_steps, batch, channel_dim)
        encoded = encoded.permute(1, 0, 2)  # (batch, time_steps, channel_dim)
        X = self.fc(encoded)  # (batch, time_steps, k)
        X = X.permute(0, 2, 1)  # (batch, k, time_steps)
        # 稀疏化：每个时间步只保留前 K_nonzero 个最大值
        topk_values, topk_indices = torch.topk(X, K_nonzero, dim=1)
        mask = torch.zeros_like(X)
        mask.scatter_(1, topk_indices, topk_values)
        X = mask
        # 归一化：使每个时间步的稀疏表示和为1
        X = X / (torch.sum(X, dim=1, keepdim=True) + 1e-8)
        # 重构 Y: 将 X 转换为 (batch, time_steps, k)
        X_transposed = X.transpose(1, 2)  # (batch, time_steps, k)
        # 重构：利用爱因斯坦求和 (batch, time_steps, k) 与 (128, k) 得到 (batch, time_steps, channel_dim)
        Y_reconstructed = torch.einsum("btk,ck->btc", X_transposed, self.D)
        Y_reconstructed = Y_reconstructed.permute(0, 2, 1)  # (batch, channel_dim, time_steps)
        return Y_reconstructed, X

# -------------------- 模型初始化 --------------------
model = TransformerAutoencoder(k=k, D=D_tensor, time_steps=m, channel_dim=128, nhead=4, num_layers=2, dropout_rate=0.0).to(device)
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5)

# -------------------- 训练过程 --------------------
loss_values = []
for epoch in range(epochs):
    model.train()
    optimizer.zero_grad()
    Y_reconstructed, X = model(Y_tensor)
    mse_loss = criterion(Y_reconstructed, Y_tensor)
    target_sparsity = K_nonzero / k
    actual_sparsity = (X > 0).float().mean(dim=1)
    sparsity_loss = ((actual_sparsity - target_sparsity) ** 2).mean()
    loss = mse_loss + lambda_sparsity * sparsity_loss
    loss.backward()
    optimizer.step()
    loss_values.append(loss.item())
    if (epoch + 1) % 100 == 0:
        print(f"Epoch [{epoch+1}/{epochs}], Total Loss: {loss.item():.4f}, MSE Loss: {mse_loss.item():.4f}, Sparsity Loss: {sparsity_loss.item():.4f}")

plt.figure()
plt.plot(range(epochs), loss_values)
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.title("Training Loss Curve")
plt.show()

# -------------------- 测试集数据读取 --------------------
# 这里假设测试集使用文件编号 51～100，即 test_m=50
test_result_matrix = np.zeros((8, 8, test_m), dtype=np.complex64)
for i in range(m+1, m+test_m+1):
    fname = os.path.join(data_folder, f"{file_prefix}{i}{file_extension}")
    if os.path.exists(fname):
        mat = scipy.io.loadmat(fname)
        if "p" in mat:
            Y = mat["p"]
            if Y.size == 64:
                Y_reshaped = Y.reshape(8, 8)
                test_result_matrix[:, :, i - m - 1] = Y_reshaped
            else:
                print(f"Shape of 'p' in {fname} is not 64.")
        else:
            print(f"'p' not found in {fname}")
    else:
        print(f"File not found: {fname}")

Y_test_real = np.real(test_result_matrix).reshape(64, test_m)
Y_test_imag = np.imag(test_result_matrix).reshape(64, test_m)
Y_test_combined = np.concatenate((Y_test_real, Y_test_imag), axis=0)  # (128, test_m)
Y_test_tensor = torch.from_numpy(Y_test_combined).float().to(device).unsqueeze(0)  # (1, 128, test_m)

# -------------------- 测试集评估 --------------------
model.eval()
with torch.no_grad():
    Y_test_reconstructed, X_test = model(Y_test_tensor)
test_mse = criterion(Y_test_reconstructed, Y_test_tensor).item()
print(f"Test Reconstruction MSE: {test_mse:.6f}")

Y_test_rec = Y_test_reconstructed.squeeze(0)  # (128, test_m)
Y_test_true = Y_test_tensor.squeeze(0)  # (128, test_m)
test_rmse_per_column = torch.sqrt(torch.mean((Y_test_rec - Y_test_true)**2, dim=0))
print("Test RMSE per column:", test_rmse_per_column)
