import torch
import torch.nn as nn
import torch.optim as optim
import scipy.io
import matplotlib.pyplot as plt
import numpy as np
import os

# 设置参数 只考虑时间
m = 50  # 时间步数，即数据在时间维度上的长度。这里的m代表有6个时间点
k = 481   # 丰度矩阵的维度，即端元个数
K_nonzero = 20  # 稀疏性要求，即在表示向量X中，每个时间步选择前20个值为非零，其余为零
lambda_sparsity = 0.1  # 稀疏正则化系数，用于控制稀疏性损失在总损失中的权重
epochs = 1000   # 训练的迭代次数
learning_rate = 0.0001  # 优化器的学习率
# 设置参数 只考虑时间
m = 10  # 时间步数，即数据在时间维度上的长度。这里的 m 代表有 6 个时间点
k = 481   # 丰度矩阵的维度，即端元个数
start=11
time_steps =m  # 时间步数
len=m

# 数据读取参数
data_folder1 = "dync"
file_prefix = "spatial_correlation_"
file_extension = ".mat"

# 数据读取与预处理部分（保持和原代码一致，此处省略详细注释，可参考原代码解释）
result_matrix = np.zeros((8, 8, m), dtype=np.complex64)
for i in range(1, m + 1):
    fname2 = os.path.join(data_folder1, f"{file_prefix}{i}{file_extension}")
    if os.path.exists(fname2):
        mat2 = scipy.io.loadmat(fname2)
        if "p" in mat2:
            Y = mat2["p"]
            if Y.size == 64:
                Y_reshaped = Y.reshape(8, 8)
                result_matrix[:, :, i - 1] = Y_reshaped
            else:
                print(f"Shape of 'p' in {fname2} is not compatible for reshaping to 8x8.")
        else:
            print(f"'p' not found in {fname2}")
    else:
        print(f"File not found: {fname2}")

Y_real = np.real(result_matrix).reshape(64, m)
Y_imag = np.imag(result_matrix).reshape(64, m)
Y_combined = np.concatenate((Y_real, Y_imag), axis=0)
Y_tensor = torch.from_numpy(Y_combined).float().cuda().unsqueeze(0)

# 读取端元矩阵D（保持和原代码一致，此处省略详细注释，可参考原代码解释）
fname4 = "Data/DC1/phasecha.mat"
mat4 = scipy.io.loadmat(fname4)
D = mat4["phasecha"]
if D.shape[0] == 64:
    D_reshaped = D.reshape(64, k)
else:
    raise ValueError("The number of rows in D is not compatible with 8x8 reshaping.")

D_real = np.real(D_reshaped)
D_imag = np.imag(D_reshaped)
D_combined = np.concatenate((D_real, D_imag), axis=0)
D_tensor = torch.from_numpy(D_combined).float().cuda()

class CausalConv1d(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, dilation=1, **kwargs):
        super(CausalConv1d, self).__init__()
        self.padding = (kernel_size - 1) * dilation
        self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, dilation=dilation, **kwargs)

    def forward(self, x):
        x = nn.functional.pad(x, (self.padding, 0))
        return self.conv(x)
# # 定义时间卷积网络（TCN）自编码器模型
# class TCNConv1DAutoencoder(nn.Module):
#     def __init__(self, k, D, time_steps):
#         super(TCNConv1DAutoencoder, self).__init__()
#         # 编码器部分：使用多个一维卷积层构建深层网络结构，逐渐减少通道数并控制时间维度尺寸变化
#         self.encoder = nn.Sequential(
#             nn.Conv1d(128, 64, kernel_size=3, stride=1, padding=1),
#             nn.LeakyReLU(0.1),
#             nn.Conv1d(64, 32, kernel_size=3, stride=2, padding=1),
#             nn.LeakyReLU(0.1),
#             nn.Conv1d(32, 16, kernel_size=3, stride=2, padding=1),
#             nn.LeakyReLU(0.1),
#             nn.Flatten()
#         )

#         # 计算全连接层输入尺寸，根据最后一层卷积层输出情况确定
#         fc_input_size = self._calculate_fc_input_size(time_steps)

#         # 全连接层映射到k*m大小的向量，即每个时间步有k个系数
#         self.fc = nn.Sequential(
#             nn.Linear(fc_input_size, k * time_steps),
#             nn.ReLU()  # 保证非负性
#         )

#         self.k = k
#         self.time_steps = time_steps
#         self.register_buffer('D', D)

#     def _calculate_fc_input_size(self, time_steps):
#         # 模拟输入一个示例张量，用于计算经过编码器后展平的尺寸大小
#         x = torch.randn(1, 128, time_steps).cuda()
#         # 将模型的encoder部分的参数也移动到GPU上（如果还没移动的话）
#         self.encoder = self.encoder.cuda()
#         x = self.encoder(x)
#         return x.shape[1]

#     def forward(self, Y):
#         # 前向传播Y: (batch, 128, m)
#         X_flat = self.encoder(Y)  # (batch, fc_input_size)
#         X = self.fc(X_flat)  # (batch, k*m)
#         X = X.view(-1, self.k, self.time_steps)  # (batch, k, m)

#         # 稀疏化：
#         # 只保留每个时间步前K_nonzero个最大值，其余置0
#         topk_values, topk_indices = torch.topk(X, K_nonzero, dim=1)
#         mask = torch.zeros_like(X)
#         mask.scatter_(1, topk_indices, topk_values)
#         X = mask

#         # 归一化：
#         # 将X沿k方向归一化，使其和为1，确保表示的归一性
#         X = X / (torch.sum(X, dim=1, keepdim=True) + 1e-8)

#         # 重构Y：
#         # Y_reconstructed = D * X，其中D为 (128,k)，X为 (batch,k,m)
#         # 我们需要 (batch,128,m) 作为输出
#         X_transposed = X.transpose(1, 2)  # (batch, m, k)
#         Y_reconstructed = torch.einsum("bmk,ik->bmi", X_transposed, self.D)
#         Y_reconstructed = Y_reconstructed.transpose(1, 2)  # (batch,128,m)

#         return Y_reconstructed, X

class TCNConv1DAutoencoder(nn.Module):
    def __init__(self, k, D, time_steps):
        super(TCNConv1DAutoencoder, self).__init__()
        # 编码器部分，使用多个因果卷积层构建深层网络结构，逐渐减少通道数并增加感受野
        self.encoder = nn.Sequential(
            CausalConv1d(128, 64, kernel_size=3, dilation=1),
            nn.LeakyReLU(0.1),
            CausalConv1d(64, 32, kernel_size=3, dilation=2),
            nn.LeakyReLU(0.1),
            CausalConv1d(32, 16, kernel_size=3, dilation=4),
            nn.LeakyReLU(0.1),
            nn.Flatten()
        )

        # 计算全连接层输入尺寸，根据最后一层因果卷积层输出情况确定
        fc_input_size = self._calculate_fc_input_size(time_steps)

        # 全连接层映射到k*m大小的向量，即每个时间步有k个系数
        self.fc = nn.Sequential(
            nn.Linear(fc_input_size, k * time_steps),
            nn.ReLU()  # 保证非负性
        )

        self.k = k
        self.time_steps = time_steps
        self.register_buffer('D', D)

    def _calculate_fc_input_size(self, time_steps):
        # 模拟输入一个示例张量，用于计算经过编码器后展平的尺寸大小
        x = torch.randn(1, 128, time_steps).cuda()
        # 将模型的encoder部分的参数也移动到GPU上（如果还没移动的话）
        self.encoder = self.encoder.cuda()
        x = self.encoder(x)
        return x.shape[1]

    def forward(self, Y):
        # 前向传播Y: (batch, 128, m)
        X_flat = self.encoder(Y)  # (batch, fc_input_size)
        X = self.fc(X_flat)  # (batch, k*m)
        X = X.view(-1, self.k, self.time_steps)  # (batch, k, m)

        # 稀疏化：
        # 只保留每个时间步前K_nonzero个最大值，其余置0
        topk_values, topk_indices = torch.topk(X, K_nonzero, dim=1)
        mask = torch.zeros_like(X)
        mask.scatter_(1, topk_indices, topk_values)
        X = mask

        # 归一化：
        # 将X沿k方向归一化，使其和为1，确保表示的归一性
        X = X / (torch.sum(X, dim=1, keepdim=True) + 1e-8)

        # 重构Y：
        # Y_reconstructed = D * X，其中D为 (128,k)，X为 (batch,k,m)
        # 我们需要 (batch,128,m) 作为输出
        X_transposed = X.transpose(1, 2)  # (batch, m, k)
        Y_reconstructed = torch.einsum("bmk,ik->bmi", X_transposed, self.D)
        Y_reconstructed = Y_reconstructed.transpose(1, 2)  # (batch,128,m)

        return Y_reconstructed, X
# 初始化模型、损失函数和优化器
model = TCNConv1DAutoencoder(k=k, D=D_tensor, time_steps=m).cuda()
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# 训练模型
loss_values = []
for epoch in range(epochs):
    model.train()
    optimizer.zero_grad()

    # 前向传播
    Y_reconstructed, X = model(Y_tensor)

    # MSE重构误差
    mse_loss = criterion(Y_reconstructed, Y_tensor)

    # 稀疏性损失：确保表示X的稀疏度接近目标稀疏度
    target_sparsity = K_nonzero / k
    actual_sparsity = (X > 0).float().mean(dim=1)
    sparsity_loss = ((actual_sparsity - target_sparsity) ** 2).mean()

    # 总损失 = MSE重构误差 + 稀疏性惩罚
    loss = mse_loss + lambda_sparsity * sparsity_loss

    # 后向传播
    loss.backward()
    optimizer.step()

    loss_values.append(loss.item())

    # 每100个epoch打印一次日志
    if (epoch + 1) % 100 == 0:
        print(f"Epoch [{epoch + 1}/{epochs}], Total Loss: {loss.item():.4f}, "
              f"MSE Loss: {mse_loss.item():.4f}, Sparsity Loss: {sparsity_loss.item():.4f}")

# 绘制损失曲线，以观察训练过程
plt.plot(range(epochs), loss_values)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Loss Curve')
plt.show()

# 测试模型（使用训练数据）
model.eval()
with torch.no_grad():
    Y_reconstructed, X = model(Y_tensor)
# 计算最终重构误差
mse_total = criterion(Y_reconstructed, Y_tensor).item()
# 去掉批次维度，保留 [128, 50]，再取列
subset1 = Y_reconstructed.squeeze(0)[:, 1]  # 结果形状 [128, 1]
subset2=Y_tensor.squeeze(0)[:,1] # 结果形状 [128, 1]
mse_total1 = criterion(subset1 ,subset2).item()
rmse_total = torch.sqrt(torch.tensor(mse_total1)).item()
subset3 = Y_reconstructed.squeeze(0)  # 结果形状 [128, 1]
subset4=Y_tensor.squeeze(0)  # 结果形状 [128, 1]
# 逐列计算 RMSE

rmse_per_column = torch.sqrt(torch.mean((subset3 - subset4) ** 2, dim=0))
print("逐列 RMSE:", rmse_per_column)
print(f"Final Reconstruction MSE: {mse_total:.6f}")

# 计算最终重构误差
mse_total = criterion(Y_reconstructed, Y_tensor).item()
print(f"Final Reconstruction MSE: {mse_total:.6f}")



for i in range(start, start+len):
    fname2 = os.path.join(data_folder1, f"{file_prefix}{i}{file_extension}")
    if os.path.exists(fname2):
        mat2 = scipy.io.loadmat(fname2)
        if "p" in mat2:
            Y = mat2["p"]
            if Y.size == 64:
                Y_reshaped = Y.reshape(8, 8)
                result_matrix[:, :, i-start] = Y_reshaped
            else:
                print(f"Shape of 'p' in {fname2} is not compatible for reshaping to 8x8.")
        else:
            print(f"'p' not found in {fname2}")
    else:
        print(f"File not found: {fname2}")

# 此时 result_matrix 形状为 (8,8,m)，里面是复数数据
# 我们将其分解为实部和虚部，再拼接起来形成 (128, m)
Y_real = np.real(result_matrix).reshape(64, m)  # 将8x8拉平成64行，m列
Y_imag = np.imag(result_matrix).reshape(64, m)  # 虚部同理

# 拼接实部和虚部，使得每个时间步有128个通道(64实+64虚)
Y_combined = np.concatenate((Y_real, Y_imag), axis=0)  # (128, m)

# 转换为 PyTorch 张量，并增加 batch_size 维度(b=1)
# 最终张量形状为 (1, 128, m)
# 格式为 (batch, channels, length)
Y_tensor = torch.from_numpy(Y_combined).float().cuda().unsqueeze(0)

# 切换为评估模式
model.eval()


# # 测试模型（使用训练数据）
# model.eval()
with torch.no_grad():
    Y_reconstructed, X = model(Y_tensor)

# 计算最终重构误差
mse_total = criterion(Y_reconstructed, Y_tensor).item()
# 去掉批次维度，保留 [128, 50]，再取列
subset1 = Y_reconstructed.squeeze(0)[:, 1]  # 结果形状 [128, 1]
subset2=Y_tensor.squeeze(0)[:,1] # 结果形状 [128, 1]
mse_total1 = criterion(subset1 ,subset2).item()
rmse_total = torch.sqrt(torch.tensor(mse_total1)).item()
subset3 = Y_reconstructed.squeeze(0)  # 结果形状 [128, 1]
subset4=Y_tensor.squeeze(0)  # 结果形状 [128, 1]
# 逐列计算 RMSE

rmse_per_column = torch.sqrt(torch.mean((subset3 - subset4) ** 2, dim=0))
print("逐列 RMSE:", rmse_per_column)
print(f"Final Reconstruction MSE: {mse_total:.6f}")