import torch
import torch.nn as nn
import torch.optim as optim
import scipy.io
import matplotlib.pyplot as plt
import numpy as np
import os


# 设置参数 只考虑时间
total_data = 80  # 总数据量
batch_size = 20  # 批次大小
k = 481  # 丰度矩阵的维度，即端元个数
start = 100
len = 20
m = len
K_nonzero = 16  # 稀疏性要求，即在表示向量 X 中，每个时间步选择前 20 个值为非零，其余为零
lambda_sparsity = 0.1  # 稀疏正则化系数，用于控制稀疏性损失在总损失中的权重
epochs = 1000  # 训练的迭代次数
learning_rate = 0.0001  # 优化器的学习率

# 数据读取参数
data_folder1 = "dync"
file_prefix = "spatial_correlation_"
file_extension = ".mat"

# 读取端元矩阵 D
# D 的原始形状为 (64, k) （因为 8x8=64），同理也有实部虚部，需要扩展到 (128,k)
fname4 = "Data/DC1/phasecha.mat"
mat4 = scipy.io.loadmat(fname4)
D = mat4["phasecha"]
if D.shape[0] == 64:
    D_reshaped = D.reshape(64, k)
else:
    raise ValueError("The number of rows in D is not compatible with 8x8 reshaping.")

D_real = np.real(D_reshaped)
D_imag = np.imag(D_reshaped)
D_combined = np.concatenate((D_real, D_imag), axis=0)  # (128, k)
D_tensor = torch.from_numpy(D_combined).float().cuda()


class PreNormTransformerEncoderLayer(nn.Module):
    def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1):
        super().__init__()
        self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
        self.linear1 = nn.Linear(d_model, dim_feedforward)
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Linear(dim_feedforward, d_model)
        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)

    def forward(self, src, src_mask=None, src_key_padding_mask=None, is_causal=False):
        src2 = self.norm1(src)
        src2, _ = self.self_attn(src2, src2, src2, attn_mask=src_mask,
                                 key_padding_mask=src_key_padding_mask)
        src = src + self.dropout1(src2)
        src2 = self.norm2(src)
        src2 = self.linear2(self.dropout(torch.relu(self.linear1(src2))))
        src = src + self.dropout2(src2)
        return src


class TransformerAutoencoder(nn.Module):
    def __init__(self, k, D, time_steps, channel_dim=128, nhead=8, num_layers=2, dropout_rate=0.0):
        super(TransformerAutoencoder, self).__init__()
        self.k = k
        self.time_steps = time_steps

        # 多尺度特征提取模块
        self.conv1 = nn.Conv1d(channel_dim, channel_dim, kernel_size=3, padding=1)
        self.conv2 = nn.Conv1d(channel_dim, channel_dim, kernel_size=5, padding=2)
        self.conv3 = nn.Conv1d(channel_dim, channel_dim, kernel_size=7, padding=3)

        # 将时间步作为序列输入
        # 修改输入维度为 channel_dim * 3
        self.embedding = nn.Linear(channel_dim * 3, channel_dim * 3)  # 投影到高维特征空间
        self.positional_encoding = nn.Parameter(torch.randn(1, time_steps, channel_dim * 3))  # 可训练位置编码
        self.dropout_embedding = nn.Dropout(dropout_rate)  # 嵌入层后的 Dropout
        # Transformer 编码器
        encoder_layer = PreNormTransformerEncoderLayer(d_model=channel_dim * 3, nhead=nhead, dim_feedforward=512)
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)

        # 全连接层：将 Transformer 输出映射到稀疏表示
        self.fc = nn.Sequential(
            nn.Linear(channel_dim * 3, k),  # 因为融合了三个尺度的特征，所以输入维度乘以 3
            nn.BatchNorm1d(k),  # 添加 BatchNorm1d 进行归一化
            nn.ReLU(),
            nn.Dropout(dropout_rate)  # 全连接层后的 Dropout
        )

        # D 矩阵注册为不可训练的 buffer
        self.register_buffer('D', D)

    def forward(self, Y):
        # 输入形状: (batch, channel_dim, time_steps)
        Y = Y.permute(0, 2, 1)  # 转换为 (batch, time_steps, channel_dim)

        # 多尺度特征提取
        Y_permuted = Y.permute(0, 2, 1)  # 转换为 (batch, channel_dim, time_steps)
        feat1 = self.conv1(Y_permuted).permute(0, 2, 1)  # 转换回 (batch, time_steps, channel_dim)
        feat2 = self.conv2(Y_permuted).permute(0, 2, 1)
        feat3 = self.conv3(Y_permuted).permute(0, 2, 1)

        # 特征融合
        fused_feat = torch.cat([feat1, feat2, feat3], dim=-1)

        # 添加位置编码
        Y = self.embedding(fused_feat) + self.positional_encoding  # (batch, time_steps, channel_dim * 3)

        # Transformer 编码器
        Y = Y.permute(1, 0, 2)  # 转换为 (time_steps, batch, channel_dim * 3)
        encoded = self.transformer_encoder(Y)  # (time_steps, batch, channel_dim * 3)
        encoded = encoded.permute(1, 0, 2)  # 转回 (batch, time_steps, channel_dim * 3)

        # 稀疏表示
        X = self.fc(encoded.view(-1, encoded.size(-1)))  # 调整形状以适应 BatchNorm1d
        X = X.view(encoded.size(0), encoded.size(1), -1)  # 恢复原始形状
        X = X.permute(0, 2, 1)  # 转换为 (batch, k, time_steps)

        # 稀疏化：只保留每个时间步前 K_nonzero 个最大值
        topk_values, topk_indices = torch.topk(X, K_nonzero, dim=1)
        mask = torch.zeros_like(X)
        mask.scatter_(1, topk_indices, topk_values)
        X = mask

        # 归一化
        X = X / (torch.sum(X, dim=1, keepdim=True) + 1e-8)

        # 重构 Y
        X_transposed = X.transpose(1, 2)  # (batch, time_steps, k)
        Y_reconstructed = torch.einsum("btk,ck->btc", X_transposed, self.D)  # (batch, time_steps, channel_dim)
        Y_reconstructed = Y_reconstructed.permute(0, 2, 1)  # 转回 (batch, channel_dim, time_steps)

        return Y_reconstructed, X


# 模型参数
channel_dim = 128  # 实部和虚部通道数

learning_rate = 0.001  # 优化器的学习率
# 初始化模型、损失函数和优化器
nhead = 4  # 自注意力多头数
num_layers = 2  # Transformer 层数

# 初始化 TransformerAutoencoder
model = TransformerAutoencoder(k=k, D=D_tensor, time_steps=batch_size, channel_dim=channel_dim, nhead=nhead,
                               num_layers=num_layers).cuda()
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5)  # weight_decay 即为 L2 正则化
mm = 0

if mm == 0:
    # 训练模型
    loss_values = []
    for epoch in range(epochs):
        total_loss = 0
        num_batches = total_data // batch_size
        for batch_idx in range(num_batches):
            start_idx = batch_idx * batch_size
            end_idx = start_idx + batch_size

            result_matrix = np.zeros((8, 8, batch_size), dtype=np.complex64)

            # 从文件中读取数据
            for i in range(start_idx + 1, end_idx + 1):
                fname2 = os.path.join(data_folder1, f"{file_prefix}{i}{file_extension}")
                if os.path.exists(fname2):
                    mat2 = scipy.io.loadmat(fname2)
                    if "p" in mat2:
                        Y = mat2["p"]
                        if Y.size == 64:
                            Y_reshaped = Y.reshape(8, 8)
                            result_matrix[:, :, i - start_idx - 1] = Y_reshaped
                        else:
                            print(f"Shape of 'p' in {fname2} is not compatible for reshaping to 8x8.")
                    else:
                        print(f"'p' not found in {fname2}")
                else:
                    print(f"File not found: {fname2}")

            # 此时 result_matrix 形状为 (8,8,batch_size)，里面是复数数据
            # 我们将其分解为实部和虚部，再拼接起来形成 (128, batch_size)
            Y_real = np.real(result_matrix).reshape(64, batch_size)  # 将8x8拉平成64行，batch_size列
            Y_imag = np.imag(result_matrix).reshape(64, batch_size)  # 虚部同理

            # 拼接实部和虚部，使得每个时间步有128个通道(64实+64虚)
            Y_combined = np.concatenate((Y_real, Y_imag), axis=0)  # (128, batch_size)

            # 转换为 PyTorch 张量，并增加 batch_size 维度(b=1)
            # 最终张量形状为 (1, 128, batch_size)
            # 格式为 (batch, channels, length)
            Y_tensor = torch.from_numpy(Y_combined).float().cuda().unsqueeze(0)

            model.train()
            optimizer.zero_grad()

            # 前向传播
            Y_reconstructed, X = model(Y_tensor)

            # MSE重构误差
            mse_loss = criterion(Y_reconstructed, Y_tensor)

            # 稀疏性损失：确保表示 X 的稀疏度接近目标稀疏度
            # 目标稀疏度 = K_nonzero / k，即期望有 K_nonzero/k 的比例为非零
            target_sparsity = K_nonzero / k
            actual_sparsity = (X > 0).float().mean(dim=1)  # 实际稀疏度
            sparsity_loss = ((actual_sparsity - target_sparsity) ** 2).mean()

            # 总损失 = MSE重构误差 + 稀疏性惩罚
            loss = mse_loss + lambda_sparsity * sparsity_loss

            # 后向传播
            loss.backward()
            optimizer.step()

            total_loss += loss.item()

        avg_loss = total_loss / num_batches
        loss_values.append(avg_loss)

        # 每100个epoch打印一次日志
        if (epoch + 1) % 100 == 0:
            print(f"Epoch [{epoch + 1}/{epochs}], Average Loss: {avg_loss:.4f}")

    # 绘制损失曲线，以观察训练过程
    plt.plot(range(epochs), loss_values)
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Loss Curve')
    plt.show()
    torch.save(model, "dync/model_full11.pth")
else:
    model = torch.load("dync/model_full11.pth")

# 测试部分可以根据需要修改，这里简单以最后一批数据为例
start_idx = (total_data // batch_size - 1) * batch_size
end_idx = start_idx + batch_size

result_matrix = np.zeros((8, 8, batch_size), dtype=np.complex64)

# 从文件中读取数据
for i in range(start_idx + 1, end_idx + 1):
    fname2 = os.path.join(data_folder1, f"{file_prefix}{i}{file_extension}")
    if os.path.exists(fname2):
        mat2 = scipy.io.loadmat(fname2)
        if "p" in mat2:
            Y = mat2["p"]
            if Y.size == 64:
                Y_reshaped = Y.reshape(8, 8)
                result_matrix[:, :, i - start_idx - 1] = Y_reshaped
            else:
                print(f"Shape of 'p' in {fname2} is not compatible for reshaping to 8x8.")
        else:
            print(f"'p' not found in {fname2}")
    else:
        print(f"File not found: {fname2}")

# 此时 result_matrix 形状为 (8,8,batch_size)，里面是复数数据
# 我们将其分解为实部和虚部，再拼接起来形成 (128, batch_size)
Y_real = np.real(result_matrix).reshape(64, batch_size)  # 将8x8拉平成64行，batch_size列
Y_imag = np.imag(result_matrix).reshape(64, batch_size)  # 虚部同理

# 拼接实部和虚部，使得每个时间步有128个通道(64实+64虚)
Y_combined = np.concatenate((Y_real, Y_imag), axis=0)  # (128, batch_size)

# 转换为 PyTorch 张量，并增加 batch_size 维度(b=1)
# 最终张量形状为 (1, 128, batch_size)
# 格式为 (batch, channels, length)
Y_tensor = torch.from_numpy(Y_combined).float().cuda().unsqueeze(0)

# 切换为评估模式
model.eval()

with torch.no_grad():
    Y_reconstructed, X = model(Y_tensor)

X = X.squeeze(0)  # 此时X的形状变为 (481, batch_size)
# 计算每列的和
column_sums = torch.sum(X.squeeze(0), dim=0)

print("逐列 he1:", column_sums)
nonzero_indices_list = []
nonzero_values_list = []

# 遍历每一列
for col in range(X.size(1)):
    # 获取当前列的非零元素的索引
    col_tensor = X[:, col]
    non_zero_mask = col_tensor != 0
    nonzero_indices = torch.nonzero(non_zero_mask, as_tuple=False).squeeze(1).tolist()
    nonzero_indices_list.append(nonzero_indices)
    # 获取当前列非零元素的值，并保留三位小数
    values_tensor = torch.round(col_tensor[non_zero_mask], decimals=3)
    formatted_values = [round(float(v), 3) for v in values_tensor]
    nonzero_values_list.append(formatted_values)

# 打印每列的非零位置序号和对应的值
for col_idx, (indices, values) in enumerate(zip(nonzero_indices_list, nonzero_values_list)):
    print(f"Column {col_idx + 1} nonzero indices: {indices}")
    print(f"Column {col_idx + 1}  values: {values}")

# 计算最终重构误差
mse_total = criterion(Y_reconstructed, Y_tensor).item()
# 去掉批次维度，保留 [128, batch_size]，再取列
subset1 = Y_reconstructed.squeeze(0)[:, 1]  # 结果形状 [128, 1]
subset2 = Y_tensor.squeeze(0)[:, 1]  # 结果形状 [128, 1]
mse_total1 = criterion(subset1, subset2).item()
rmse_total = torch.sqrt(torch.tensor(mse_total1)).item()
subset3 = Y_reconstructed.squeeze(0)  # 结果形状 [128, 1]
subset4 = Y_tensor.squeeze(0)  # 结果形状 [128, 1]
# 逐列计算 RMSE

rmse_per_column = torch.sqrt(torch.mean((subset3 - subset4) ** 2, dim=0))
print("逐列 RMSE1:", rmse_per_column)
print(f"Final Reconstruction MSE: {mse_total:.6f}")

for i in range(start, start + len):
    fname2 = os.path.join(data_folder1, f"{file_prefix}{i}{file_extension}")
    if os.path.exists(fname2):
        mat2 = scipy.io.loadmat(fname2)
        if "p" in mat2:
            Y = mat2["p"]
            if Y.size == 64:
                Y_reshaped = Y.reshape(8, 8)
                result_matrix[:, :, i - start] = Y_reshaped
            else:
                print(f"Shape of 'p' in {fname2} is not compatible for reshaping to 8x8.")
        else:
            print(f"'p' not found in {fname2}")
    else:
        print(f"File not found: {fname2}")

# 此时 result_matrix 形状为 (8,8,m)，里面是复数数据
# 我们将其分解为实部和虚部，再拼接起来形成 (128, m)
Y_real = np.real(result_matrix).reshape(64, len)  # 将8x8拉平成64行，m列
Y_imag = np.imag(result_matrix).reshape(64, m)  # 虚部同理

# 拼接实部和虚部，使得每个时间步有128个通道(64实+64虚)
Y_combined = np.concatenate((Y_real, Y_imag), axis=0)  # (128, m)

# 转换为 PyTorch 张量，并增加 batch_size 维度(b=1)
# 最终张量形状为 (1, 128, m)
# 格式为 (batch, channels, length)
Y_tensor = torch.from_numpy(Y_combined).float().cuda().unsqueeze(0)

# 切换为评估模式
model.eval()

# # 测试模型（使用训练数据）
# model.eval()
with torch.no_grad():
    Y_reconstructed, X = model(Y_tensor)

# 计算最终重构误差
mse_total = criterion(Y_reconstructed, Y_tensor).item()
# 去掉批次维度，保留 [128, 50]，再取列
subset1 = Y_reconstructed.squeeze(0)[:, 1]  # 结果形状 [128, 1]
subset2 = Y_tensor.squeeze(0)[:, 1]  # 结果形状 [128, 1]
mse_total1 = criterion(subset1, subset2).item()
rmse_total = torch.sqrt(torch.tensor(mse_total1)).item()
subset3 = Y_reconstructed.squeeze(0)  # 结果形状 [128, 1]
subset4 = Y_tensor.squeeze(0)  # 结果形状 [128, 1]
# 逐列计算 RMSE

rmse_per_column = torch.sqrt(torch.mean((subset3 - subset4) ** 2, dim=0))
print("逐列 RMSE2:", rmse_per_column)
print(f"Final Reconstruction MSE: {mse_total:.6f}")
X = X.squeeze(0)  # 此时X的形状变为 (481, 10)
# 计算每列的和
column_sums = torch.sum(X.squeeze(0), dim=0)

print("逐列 he1:", column_sums)
nonzero_indices_list = []
nonzero_values_list = []

# 遍历每一列
for col in range(X.size(1)):
    # 获取当前列的非零元素的索引
    col_tensor = X[:, col]
    non_zero_mask = col_tensor != 0
    nonzero_indices = torch.nonzero(non_zero_mask, as_tuple=False).squeeze(1).tolist()
    nonzero_indices_list.append(nonzero_indices)
    # 获取当前列非零元素的值，并保留三位小数
    values_tensor = torch.round(col_tensor[non_zero_mask], decimals=3)
    formatted_values = [round(float(v), 3) for v in values_tensor]
    nonzero_values_list.append(formatted_values)

# 打印每列的非零位置序号和对应的值
for col_idx, (indices, values) in enumerate(zip(nonzero_indices_list, nonzero_values_list)):
    print(f"Column {col_idx + 1} nonzero indices: {indices}")
    print(f"Column {col_idx + 1}  values: {values}")

    