import torch
import torch.nn as nn
import torch.optim as optim
import scipy.io
import matplotlib.pyplot as plt
import numpy as np
import os

# 设置参数
m = 50 # 时间步数
k = 481   # 丰度矩阵的维度
K_nonzero = 20  # 稀疏性要求
lambda_sparsity = 0.1  # 稀疏正则化系数
epochs = 1000
learning_rate = 0.001

# 数据读取参数
data_folder1 = "dync"
file_prefix = "spatial_correlation_"
file_extension = ".mat"

# 读取输入数据 (8,8,m) 的复数数据，并转换为实部和虚部拼接后的 (128, m)
result_matrix = np.zeros((8, 8, m), dtype=np.complex64)

for i in range(1, m+1):
    fname2 = os.path.join(data_folder1, f"{file_prefix}{i}{file_extension}")
    if os.path.exists(fname2):
        mat2 = scipy.io.loadmat(fname2)
        if "p" in mat2:
            Y = mat2["p"]
            if Y.size == 64:
                Y_reshaped = Y.reshape(8, 8)
                result_matrix[:, :, i-1] = Y_reshaped
            else:
                print(f"Shape of 'p' in {fname2} is not compatible for reshaping to 8x8.")
        else:
            print(f"'p' not found in {fname2}")
    else:
        print(f"File not found: {fname2}")

# 分离实部和虚部，并拼接
Y_real = np.real(result_matrix).reshape(64, m)
Y_imag = np.imag(result_matrix).reshape(64, m)
Y_combined = np.concatenate((Y_real, Y_imag), axis=0)  # (128, m)

# 转换为 PyTorch Tensor (batch_size=1)
Y_tensor = torch.from_numpy(Y_combined).float().cuda().unsqueeze(0)  # (1, 128, m)

# 读取端元矩阵 D (8, 8, k)
fname4 = "Data/DC1/phasecha.mat"
mat4 = scipy.io.loadmat(fname4)
D = mat4["phasecha"]
if D.shape[0] == 64:
    D_reshaped = D.reshape(64, k)
else:
    raise ValueError("The number of rows in D is not compatible with 8x8 reshaping.")

# 将 D 的实部和虚部堆叠，形状从 (64,k) -> (128,k)
D_real = np.real(D_reshaped)
D_imag = np.imag(D_reshaped)
D_combined = np.concatenate((D_real, D_imag), axis=0)  # (128, k)

D_tensor = torch.from_numpy(D_combined).float().cuda()  # (128, k)


# 定义模型（使用1D卷积）
class Conv1DAutoencoder(nn.Module):
    def __init__(self, k, D, time_steps):
        super(Conv1DAutoencoder, self).__init__()
        # 输入数据维度： (batch, 128, m)
        # 使用1D卷积：输入通道为128，输出通道16
        self.encoder = nn.Sequential(
            nn.Conv1d(128, 64, kernel_size=3, stride=1, padding=1),
            nn.LeakyReLU(0.1),
            nn.Conv1d(64, 32, kernel_size=3, stride=2, padding=1),  # 时间维度缩减一半: m -> m/2
            nn.LeakyReLU(0.1),
            nn.Flatten()
        )

        # 计算全连接层输入尺寸
        # 第一层卷积不改变长度 (m)，仍是 m
        # 第二层卷积 stride=2，会将长度除以2，即输出长度为 m/2
        # 输出通道为32，因此展平后大小是 32 * (m/2)
        fc_input_size = 32 * (time_steps // 2)

        self.fc = nn.Sequential(
            nn.Linear(fc_input_size, k * time_steps),
            nn.ReLU()
        )

        self.k = k
        self.time_steps = time_steps
        self.register_buffer('D', D)

    def forward(self, Y):
        # Y: (batch, 128, m)
        X_flat = self.encoder(Y)  # (batch, fc_input_size)
        X = self.fc(X_flat)       # (batch, k * m)
        X = X.view(-1, self.k, self.time_steps)  # (batch, k, m)

        # 稀疏化
        topk_values, topk_indices = torch.topk(X, K_nonzero, dim=1)
        mask = torch.zeros_like(X)
        mask.scatter_(1, topk_indices, topk_values)
        X = mask

        # 归一化
        X = X / (torch.sum(X, dim=1, keepdim=True) + 1e-8)

        # 重建 Y： (batch, 128, m)
        # D: (128, k)
        # X: (batch, k, m)
        # 我们需要 (batch, 128, m) = (batch, 128, k) x (k, m) ，但我们现在有 (D) (128, k) 需要 X^T (batch,m,k)
        # 利用爱因斯坦求和约定："bkm,ik->bim" (X和D转置操作)
        # 这里需要先转置 X: (batch, k, m) -> (batch, m, k)
        X_transposed = X.transpose(1, 2)  # (batch, m, k)
        # 求和: b,m,k * i,k -> b,m,i （其中 i=128）
        # D 的形状为 (128, k), 把它看作 i,k
        Y_reconstructed = torch.einsum("bmk,ik->bmi", X_transposed, self.D)  # (batch, m, 128)
        Y_reconstructed = Y_reconstructed.transpose(1, 2)  # (batch, 128, m)

        return Y_reconstructed, X


# 初始化模型、损失函数和优化器
model = Conv1DAutoencoder(k=k, D=D_tensor, time_steps=m).cuda()
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# 训练模型
loss_values = []
for epoch in range(epochs):
    model.train()
    optimizer.zero_grad()

    Y_reconstructed, X = model(Y_tensor)
    mse_loss = criterion(Y_reconstructed, Y_tensor)

    # 稀疏性损失
    target_sparsity = K_nonzero / k
    actual_sparsity = (X > 0).float().mean(dim=1)
    sparsity_loss = ((actual_sparsity - target_sparsity) ** 2).mean()
    loss = mse_loss + lambda_sparsity * sparsity_loss

    loss.backward()
    optimizer.step()

    loss_values.append(loss.item())

    # if (epoch + 1) % 100 == 0:
    #     print(f"Epoch [{epoch + 1}/{epochs}], Total Loss: {loss.item():.4f}, "
    #           f"MSE Loss: {mse_loss.item():.4f}, Sparsity Loss: {sparsity_loss.item():.4f}")
    print(f"Epoch [{epoch + 1}/{epochs}], Total Loss: {loss.item():.4f}, "
              f"MSE Loss: {mse_loss.item():.4f}, Sparsity Loss: {sparsity_loss.item():.4f}")

# 绘制损失曲线
plt.plot(range(epochs), loss_values)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Loss Curve')
plt.show()

# 测试模型
model.eval()
with torch.no_grad():
    Y_reconstructed, X = model(Y_tensor)

# 计算最终重构误差
mse_total = criterion(Y_reconstructed, Y_tensor).item()
print(f"Final Reconstruction MSE: {mse_total:.6f}")
