import torch
import torch.nn as nn
import torch.optim as optim
import scipy.io
import matplotlib.pyplot as plt
import numpy as np
import os
dtype = torch.cuda.FloatTensor
# 读取输入数据和端元矩阵
# 输入数据 Y (8, 8, 100)
data_folder1 = "dync"
file_prefix = "spatial_correlation_"
file_extension = ".mat"

result_matrix = np.zeros((8, 8, 100))  # 初始化输入矩阵

for i in range(1, 101):
    fname2 = os.path.join(data_folder1, f"{file_prefix}{i}{file_extension}")
    if os.path.exists(fname2):
        mat2 = scipy.io.loadmat(fname2)
        if "p" in mat2:
            Y = mat2["p"]
            if Y.size == 64:  # 确保 Y 的形状是 (64,)
                Y_reshaped = Y.reshape(8, 8)
                result_matrix[:, :, i-1] = Y_reshaped
            else:
                print(f"Shape of 'p' in {fname2} is not compatible for reshaping to 8x8.")
        else:
            print(f"'p' not found in {fname2}")
    else:
        print(f"File not found: {fname2}")

# 转换为 PyTorch Tensor
Y = torch.from_numpy(result_matrix).float().cuda()  # 形状为 (8, 8, 100)

# 读取端元矩阵 D (8, 8, 481)
fname4 = "Data/DC1/phasecha.mat"
mat4 = scipy.io.loadmat(fname4)
D = mat4["phasecha"]
if D.shape[0] == 64:
    D1 = D.reshape(8, 8, -1)  # 转换为 (8, 8, 481)
else:
    raise ValueError("The number of rows in D is not compatible with 8x8 reshaping.")
D = torch.from_numpy(D1).float().cuda()

# 定义模型
class Conv3DAutoencoder(nn.Module):
    def __init__(self, k, D):
        super(Conv3DAutoencoder, self).__init__()
        self.encoder = nn.Sequential(
            nn.Conv3d(1, 16, kernel_size=(3, 3, 3), stride=1, padding=1),  # 输入 (1, 8, 8, 100) -> 输出 (16, 8, 8, 100)
            nn.LeakyReLU(0.1),
            nn.Conv3d(16, 32, kernel_size=(3, 3, 3), stride=2, padding=1),  # 输出 (32, 4, 4, 50)
            nn.LeakyReLU(0.1),
            nn.Flatten(),  # 展平为全连接输入
            nn.Linear(32 * 4 * 4 * 50, k * 100),  # 输出 (k * 100)
            nn.ReLU()  # 保证非负性
        )
        self.k = k
        self.time_steps = 100
        self.register_buffer('D', D)

    def forward(self, Y):
        batch_size = Y.size(0)
        X_flat = self.encoder(Y)  # 输出 (batch, k * 100)
        X = X_flat.view(batch_size, self.k, self.time_steps)  # 转换为 (batch, k, 100)

        # 稀疏化
        topk_values, topk_indices = torch.topk(X, K_nonzero, dim=1)
        mask = torch.zeros_like(X)
        mask.scatter_(1, topk_indices, topk_values)
        X = mask

        # 归一化
        X = X / (torch.sum(X, dim=1, keepdim=True) + 1e-8)

        # 重建
        Y_reconstructed = torch.einsum("bkt,ijk->bijt", X, self.D)  # (batch, 8, 8, 100)
        return Y_reconstructed, X


# 参数
k = 481
K_nonzero = 20

# 模型
model = Conv3DAutoencoder(k, D).cuda()

# 添加 batch 维度
Y = Y.unsqueeze(0).unsqueeze(0)  # (1, 1, 8, 8, 100)

# 定义损失函数和优化器
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 训练模型
epochs = 1000
lambda_sparsity = 0.1  # 稀疏性正则化系数
loss_values = []

for epoch in range(epochs):
    model.train()
    optimizer.zero_grad()

    # 前向传播
    Y_reconstructed, X = model(Y)
    mse = torch.nn.MSELoss().type(dtype)
    mse_loss = mse(Y_reconstructed, Y)
    # mse_loss = criterion(Y_reconstructed, Y)  # 重建误差
    # 标准化输入
    # 标准化输入
    # Y_normalized = (Y - Y.mean()) / (Y.std() + 1e-8)
    # Y_reconstructed_normalized = (Y_reconstructed - Y_reconstructed.mean()) / (Y_reconstructed.std() + 1e-8)

    # MSE Loss
    # mse_loss = nn.MSELoss()(Y_reconstructed_normalized, Y_normalized)

    # Sparsity Loss: 稀疏性差异约束
    target_sparsity = K_nonzero / k
    actual_sparsity = (X > 0).float().mean(dim=1)
    sparsity_loss = ((actual_sparsity - target_sparsity) ** 2).mean()
    loss = mse_loss + lambda_sparsity * sparsity_loss

    # 反向传播和优化
    loss.backward()
    optimizer.step()

    loss_values.append(loss.item())

    # 打印损失值
    if (epoch + 1) % 100 == 0:
        print(f"Epoch [{epoch + 1}/{epochs}], Total Loss: {loss.item():.4f}, "
              f"MSE Loss: {mse_loss.item():.4f}, Sparsity Loss: {sparsity_loss.item():.4f}")
     
    # print(f"Epoch [{epoch + 1}/{epochs}], Total Loss: {loss.item():.4f}, "
    #           f"MSE Loss: {mse_loss.item():.4f}, Sparsity Loss: {sparsity_loss.item():.4f}")    

# 绘制损失曲线
plt.plot(range(epochs), loss_values)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Loss Curve')
plt.show()

# 测试模型
model.eval()
with torch.no_grad():
    Y_reconstructed, X = model(Y)

# 假设输出 X 的形状为 (batch, k, time_steps)
batch_size, k, time_steps = X.size()

# 遍历每个时间点（列）获取非零值
# for t in range(time_steps):
#     # 提取当前时间点 (列)
#     column_values = X[:, :, t]  # 当前列，形状为 (batch, k)

#     # 获取非零值
#     nonzero_values = column_values[column_values > 0]  # 获取非零值

#     # 获取非零值的索引
#     nonzero_indices = torch.nonzero(column_values > 0, as_tuple=True)

#     # 打印当前时间点的非零值和索引
#     print(f"Time Step {t + 1}:")
#     print(f"  Non-zero values: {nonzero_values}")
#     print(f"  Non-zero indices: {nonzero_indices}")

# 每个时间点的重构 MSE
time_mse_errors = []

for t in range(Y.size(-1)):
    if t==20:
        m=1
    Y_t = Y[..., t]
    Y_reconstructed_t = Y_reconstructed[..., t]
    mse_t = criterion(Y_reconstructed_t, Y_t).item()
    time_mse_errors.append(mse_t)
    # print(f"Time Step {t+1}: MSE = {mse_t:.6f}")

# 打印总体结果
print(f"Final Reconstruction MSE: {sum(time_mse_errors) / len(time_mse_errors):.6f}")
