import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.optim.lr_scheduler import StepLR
import torch
import torch.nn as nn
import torch.optim as optim
import scipy.io
import matplotlib.pyplot as plt
import numpy as np
import os
# 设置参数 只考虑时间
m = 50  # 时间步数，即数据在时间维度上的长度。这里的 m 代表有 6 个时间点
k = 481   # 丰度矩阵的维度，即端元个数
K_nonzero = 20  # 稀疏性要求，即在表示向量 X 中，每个时间步选择前 20 个值为非零，其余为零
lambda_sparsity = 0.1  # 稀疏正则化系数，用于控制稀疏性损失在总损失中的权重
epochs = 1000   # 训练的迭代次数
learning_rate = 0.0001  # 优化器的学习率

# 数据读取参数
data_folder1 = "dync"
file_prefix = "spatial_correlation_"
file_extension = ".mat"

# 我们的数据原始形状为 (8,8,m)，表示每个时间步是一个8x8的复数矩阵，共有 m 个时间步
# 接下来会将其转换为 (128, m)，因为 (8,8)=64 个实数点，复数有实部和虚部各64，共计128
result_matrix = np.zeros((8, 8, m), dtype=np.complex64)

# 从文件中读取数据
# 假设文件中存储的数据为 "p" 对应64个复数值可重构为8x8的矩阵
# 将这些数据依次读入 result_matrix 的第三维度（时间维）
for i in range(1, m+1):
    fname2 = os.path.join(data_folder1, f"{file_prefix}{i}{file_extension}")
    if os.path.exists(fname2):
        mat2 = scipy.io.loadmat(fname2)
        if "p" in mat2:
            Y = mat2["p"]
            if Y.size == 64:
                Y_reshaped = Y.reshape(8, 8)
                result_matrix[:, :, i-1] = Y_reshaped
            else:
                print(f"Shape of 'p' in {fname2} is not compatible for reshaping to 8x8.")
        else:
            print(f"'p' not found in {fname2}")
    else:
        print(f"File not found: {fname2}")

# 此时 result_matrix 形状为 (8,8,m)，里面是复数数据
# 我们将其分解为实部和虚部，再拼接起来形成 (128, m)
Y_real = np.real(result_matrix).reshape(64, m)  # 将8x8拉平成64行，m列
Y_imag = np.imag(result_matrix).reshape(64, m)  # 虚部同理

# 拼接实部和虚部，使得每个时间步有128个通道(64实+64虚)
Y_combined = np.concatenate((Y_real, Y_imag), axis=0)  # (128, m)

# 转换为 PyTorch 张量，并增加 batch_size 维度(b=1)
# 最终张量形状为 (1, 128, m)
# 格式为 (batch, channels, length)
Y_tensor = torch.from_numpy(Y_combined).float().cuda().unsqueeze(0)

# 读取端元矩阵 D
# D 的原始形状为 (64, k) （因为 8x8=64），同理也有实部虚部，需要扩展到 (128,k)
fname4 = "Data/DC1/phasecha.mat"
mat4 = scipy.io.loadmat(fname4)
D = mat4["phasecha"]
if D.shape[0] == 64:
    D_reshaped = D.reshape(64, k)
else:
    raise ValueError("The number of rows in D is not compatible with 8x8 reshaping.")

D_real = np.real(D_reshaped)
D_imag = np.imag(D_reshaped)
D_combined = np.concatenate((D_real, D_imag), axis=0)  # (128, k)
D_tensor = torch.from_numpy(D_combined).float().cuda()
class TransformerAutoencoder(nn.Module):
    def __init__(self, k, D, time_steps, channel_dim=128, nhead=8, num_layers=2, dropout=0.1):
        super(TransformerAutoencoder, self).__init__()
        self.k = k
        self.time_steps = time_steps

        # 位置编码，使用可训练的位置编码
        self.positional_encoding = nn.Parameter(torch.randn(1, time_steps, channel_dim))

        # Transformer 编码器层
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=channel_dim, 
            nhead=nhead, 
            dim_feedforward=512, 
            dropout=dropout
        )
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)

        # 稀疏表示映射
        self.fc = nn.Sequential(
            nn.Linear(channel_dim, k),  # 每个时间步映射为 k 个稀疏系数
            nn.ReLU()
        )

        # 端元矩阵 D 注册为不可训练的 buffer
        self.register_buffer('D', D)

    def forward(self, Y):
        # 输入形状: (batch, channel_dim, time_steps)
        Y = Y.permute(0, 2, 1)  # 转换为 (batch, time_steps, channel_dim)

        # 添加位置编码
        Y = Y + self.positional_encoding  # (batch, time_steps, channel_dim)

        # Transformer 编码器
        Y = Y.permute(1, 0, 2)  # 转换为 (time_steps, batch, channel_dim)
        encoded = self.transformer_encoder(Y)  # (time_steps, batch, channel_dim)
        encoded = encoded.permute(1, 0, 2)  # 转回 (batch, time_steps, channel_dim)

        # 稀疏表示
        X = self.fc(encoded)  # (batch, time_steps, k)
        X = X.permute(0, 2, 1)  # 转换为 (batch, k, time_steps)

        # 稀疏化：只保留每个时间步前 K_nonzero 个最大值
        topk_values, topk_indices = torch.topk(X, K_nonzero, dim=1)
        mask = torch.zeros_like(X)
        mask.scatter_(1, topk_indices, topk_values)
        X = mask

        # 归一化
        X = X / (torch.sum(X, dim=1, keepdim=True) + 1e-8)

        # 重构 Y
        X_transposed = X.transpose(1, 2)  # (batch, time_steps, k)
        Y_reconstructed = torch.einsum("btk,ck->btc", X_transposed, self.D)  # (batch, time_steps, channel_dim)
        Y_reconstructed = Y_reconstructed.permute(0, 2, 1)  # 转回 (batch, channel_dim, time_steps)

        return Y_reconstructed, X


# 模型参数
k = 481  # 稀疏字典维度
channel_dim = 128  # 通道数
time_steps = 50  # 时间步数
learning_rate = 0.001  # 初始学习率

# 初始化模型、损失函数和优化器
nhead = 2  # 多头自注意力数
num_layers = 2  # Transformer 编码器层数
dropout = 0.1  # Dropout 概率
model = TransformerAutoencoder(k=k, D=D_tensor, time_steps=time_steps, channel_dim=channel_dim, nhead=nhead, num_layers=num_layers, dropout=dropout).cuda()

criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# 学习率调度器
scheduler = StepLR(optimizer, step_size=100, gamma=0.5)  # 每 100 个 epoch 学习率减半

# 训练模型
epochs = 1000
loss_values = []

for epoch in range(epochs):
    model.train()
    optimizer.zero_grad()

    # 前向传播
    Y_reconstructed, X = model(Y_tensor)

    # MSE重构误差
    mse_loss = criterion(Y_reconstructed, Y_tensor)

    # 稀疏性损失：确保表示 X 的稀疏度接近目标稀疏度
    target_sparsity = K_nonzero / k
    actual_sparsity = (X > 0).float().mean(dim=1)  # 实际稀疏度
    sparsity_loss = ((actual_sparsity - target_sparsity) ** 2).mean()

    # 总损失 = MSE重构误差 + 稀疏性惩罚
    loss = mse_loss + lambda_sparsity * sparsity_loss

    # 后向传播
    loss.backward()
    optimizer.step()
    scheduler.step()  # 更新学习率

    loss_values.append(loss.item())

    # 每100个epoch打印一次日志
    if (epoch + 1) % 100 == 0:
        print(f"Epoch [{epoch + 1}/{epochs}], Total Loss: {loss.item():.4f}, "
              f"MSE Loss: {mse_loss.item():.4f}, Sparsity Loss: {sparsity_loss.item():.4f}")

# 绘制损失曲线
plt.plot(range(epochs), loss_values)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Loss Curve')
plt.show()
