import torch
import torch.nn as nn
import torch.optim as optim
import scipy.io
import matplotlib.pyplot as plt
import numpy as np
import os
import random
import torch.nn.functional as F

# 固定随机种子
def set_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

# 设置随机种子
SEED = 42  # 可以设置为任意整数
set_seed(SEED)

# 设置参数 只考虑时间
total_data = 1100  # 总数据量
batch_size = 20  # 批次大小
k = 481  # 丰度矩阵的维度，即端元个数
start = 1140
seq_len = 20  # 改名为seq_len
m = seq_len
K_nonzero = 16  # 稀疏性要求，即在表示向量 X 中，每个时间步选择前 20 个值为非零，其余为零
lambda_sparsity = 0.1  # 稀疏正则化系数，用于控制稀疏性损失在总损失中的权重
epochs = 1500  # 训练的迭代次数
learning_rate = 0.00001  # 降低学习率，因为RMSE的梯度范围不同

# 数据读取参数
data_folder1 = "dy"
file_prefix = "spatial_correlation_"
file_extension = ".mat"

# 读取端元矩阵 
# D 的原始形状为 (64, k) （因为 8x8=64），同理也有实部虚部，需要扩展到 (128,k)
fname4 = "Data/DC1/phasecha.mat"
mat4 = scipy.io.loadmat(fname4)
D = mat4["phasecha"]
if D.shape[0] == 64:
    D_reshaped = D.reshape(64, k)
else:
    raise ValueError("The number of rows in D is not compatible with 8x8 reshaping.")

D_real = np.real(D_reshaped)
D_imag = np.imag(D_reshaped)
D_combined = np.concatenate((D_real, D_imag), axis=0)  # (128, k)
D_tensor = torch.from_numpy(D_combined).float().cuda()

class MultiScaleAttention(nn.Module):
    def __init__(self, d_model, nhead, dropout=0.1):
        super().__init__()
        self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
        
        # 多尺度卷积层
        self.conv1 = nn.Conv1d(d_model, d_model, kernel_size=3, padding=1)
        self.conv2 = nn.Conv1d(d_model, d_model, kernel_size=5, padding=2)
        self.conv3 = nn.Conv1d(d_model, d_model, kernel_size=7, padding=3)
        self.conv4 = nn.Conv1d(d_model, d_model, kernel_size=9, padding=4)
        
        # 特征融合权重
        self.fusion_weights = nn.Parameter(torch.ones(5) / 5)  # 5个特征（原始+4个卷积）
        
        self.norm = nn.LayerNorm(d_model)
        self.dropout = nn.Dropout(dropout)
        
    def forward(self, src, is_causal=False):
        # 转置输入以适应卷积层
        src_permuted = src.permute(1, 2, 0)  # (B, C, L)
        
        # 多尺度特征提取
        conv1_out = self.conv1(src_permuted)
        conv2_out = self.conv2(src_permuted)
        conv3_out = self.conv3(src_permuted)
        conv4_out = self.conv4(src_permuted)
        
        # 应用softmax确保权重和为1
        weights = F.softmax(self.fusion_weights, dim=0)
        
        # 加权特征融合
        fused = (weights[0] * src_permuted + 
                weights[1] * conv1_out + 
                weights[2] * conv2_out + 
                weights[3] * conv3_out + 
                weights[4] * conv4_out)
        
        # 转回原始形状并应用自注意力
        fused = fused.permute(2, 0, 1)  # (L, B, C)
        src2 = self.norm(fused)
        # 确保查询、键和值的形状相同，并显式设置is_causal参数
        src2, _ = self.self_attn(src2, src2, src2, need_weights=False, is_causal=is_causal)
        
        # 残差连接 - 确保形状匹配
        if src.shape == src2.shape:
            src = src + self.dropout(src2)
        else:
            print(f"形状不匹配: src {src.shape}, src2 {src2.shape}")
            # 如果形状不匹配，调整src2以匹配src
            if src.shape[0] < src2.shape[0]:
                src2 = src2[:src.shape[0], :, :]
            elif src.shape[0] > src2.shape[0]:
                # 填充src2
                pad = torch.zeros(src.shape[0] - src2.shape[0], src2.shape[1], src2.shape[2], device=src2.device)
                src2 = torch.cat([src2, pad], dim=0)
            src = src + self.dropout(src2)
        
        return src

class PreNormTransformerEncoderLayer(nn.Module):
    def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1):
        super().__init__()
        self.self_attn = MultiScaleAttention(d_model, nhead, dropout=dropout)
        self.linear1 = nn.Linear(d_model, dim_feedforward)
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Linear(dim_feedforward, d_model)
        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)

    def forward(self, src):
        # 第一个子层：多尺度注意力
        src2 = self.norm1(src)
        src2 = self.self_attn(src2, is_causal=False)
        src = src + self.dropout1(src2)
        
        # 第二个子层：前馈网络
        src2 = self.norm2(src)
        src2 = self.linear2(self.dropout(torch.relu(self.linear1(src2))))
        src = src + self.dropout2(src2)
        
        return src

class TransformerAutoencoder(nn.Module):
    def __init__(self, k, D, time_steps, channel_dim=128, nhead=8, num_layers=2, dropout_rate=0.1):
        super(TransformerAutoencoder, self).__init__()
        self.k = k
        self.time_steps = time_steps
        
        # 使用可学习的位置编码
        self.positional_encoding = nn.Parameter(torch.zeros(1, batch_size, channel_dim))
        # 使用正态分布初始化位置编码
        nn.init.normal_(self.positional_encoding, mean=0.0, std=0.02)
        
        # Transformer编码器层
        self.transformer_layers = nn.ModuleList([
            PreNormTransformerEncoderLayer(
                d_model=channel_dim,
                nhead=nhead,
                dim_feedforward=1024,
                dropout=dropout_rate
            ) for _ in range(num_layers + 1)  # 增加层数
        ])
        
        # 输出层
        self.fc = nn.Sequential(
            nn.Linear(channel_dim, 512),
            nn.ReLU(),
            nn.Dropout(dropout_rate),
            nn.Linear(512, k),
            nn.ReLU(),
            nn.Dropout(dropout_rate)
        )
        
        # D矩阵注册为不可训练的buffer
        self.register_buffer('D', D)
    
    def forward(self, Y):
        # 输入形状: (batch, channel_dim, time_steps)
        
        # 准备Transformer的输入
        # 转置使时间维度在第一位: [time_steps, batch, channel_dim]
        Y_transposed = Y.permute(2, 0, 1)  # (time_steps, batch, channel_dim)
        
        # 添加位置编码 - 适应不同批次大小
        batch_size = Y.shape[0]
        if batch_size == self.positional_encoding.shape[1]:
            # 批次大小匹配，直接使用
            pos_encoding = self.positional_encoding.repeat(self.time_steps, 1, 1)
        else:
            # 批次大小不匹配，调整位置编码
            print(f"调整位置编码: 从批次大小 {self.positional_encoding.shape[1]} 到 {batch_size}")
            # 创建临时位置编码
            temp_pos_encoding = self.positional_encoding[:, :batch_size, :] if batch_size < self.positional_encoding.shape[1] else \
                torch.cat([self.positional_encoding, 
                          torch.zeros(1, batch_size - self.positional_encoding.shape[1], 
                                     self.positional_encoding.shape[2], device=self.positional_encoding.device)], dim=1)
            pos_encoding = temp_pos_encoding.repeat(self.time_steps, 1, 1)
        
        encoded = Y_transposed + pos_encoding
        
        # 通过Transformer编码器层
        for layer in self.transformer_layers:
            encoded = layer(encoded)
            
        # 输出处理
        encoded = encoded.permute(1, 0, 2)  # (batch, time_steps, channel_dim)
        
        # 稀疏表示
        X = self.fc(encoded)  # (batch, time_steps, k)
        X = X.permute(0, 2, 1)  # (batch, k, time_steps)
        
        # 稀疏化：只保留每个时间步前K_nonzero个最大值
        topk_values, topk_indices = torch.topk(X, K_nonzero, dim=1)
        mask = torch.zeros_like(X)
        mask.scatter_(1, topk_indices, topk_values)
        X = mask
        
        # 归一化
        X = X / (torch.sum(X, dim=1, keepdim=True) + 1e-8)
        
        # 重构Y
        X_transposed = X.transpose(1, 2)  # (batch, time_steps, k)
        Y_reconstructed = torch.einsum("btk,ck->btc", X_transposed, self.D)  # (batch, time_steps, channel_dim)
        Y_reconstructed = Y_reconstructed.permute(0, 2, 1)  # (batch, channel_dim, time_steps)
        
        return Y_reconstructed, X

# 模型参数
channel_dim = 128  # 通道维度，确保能被nhead整除

# 初始化模型、损失函数和优化器
nhead = 4  # 注意力头数
num_layers = 2 # Transformer 层数

# 初始化 TransformerAutoencoder 模型
model = TransformerAutoencoder(k=k, D=D_tensor, time_steps=seq_len, channel_dim=channel_dim, nhead=nhead,
                              num_layers=num_layers).cuda()

# 自定义RMSE损失函数
class CustomLoss(nn.Module):
    def __init__(self, alpha=0.1, beta=0.1):
        super(CustomLoss, self).__init__()
        self.mse = nn.MSELoss()
        self.alpha = alpha
        self.beta = beta
        
    def forward(self, y_pred, y_true, X):
        # 重构损失
        recon_loss = torch.sqrt(self.mse(y_pred, y_true))
        
        # 稀疏性损失
        sparsity_loss = torch.mean(torch.abs(X))
        
        # 平滑性损失（相邻时间步的差异）
        smooth_loss = torch.mean(torch.abs(X[:, 1:] - X[:, :-1]))
        
        return recon_loss + self.alpha * sparsity_loss + self.beta * smooth_loss

criterion = CustomLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5)
mm = 1 # 强制重新训练模型

# 准备训练和测试数据
total_batches = total_data // batch_size
all_batch_indices = list(range(total_batches))
test_batch_indices = np.random.choice(all_batch_indices, 10, replace=False)
train_batch_indices = [idx for idx in all_batch_indices if idx not in test_batch_indices and idx < 45]  # 限制训练批次索引

if mm == 0:
    # 训练模型
    loss_values = []
    print("开始训练...")
    for epoch in range(epochs):
        model.train()
        total_loss = 0
        
        for batch_idx in train_batch_indices:
            start_idx = batch_idx * batch_size
            end_idx = start_idx + batch_size
            

            result_matrix = np.zeros((8, 8, batch_size), dtype=np.complex64)

            # 从文件中读取数据
            for i in range(start_idx + 1, end_idx + 1):
                fname2 = os.path.join(data_folder1, f"{file_prefix}{i}{file_extension}")
                if os.path.exists(fname2):
                    mat2 = scipy.io.loadmat(fname2)
                    if "p" in mat2:
                        Y = mat2["p"]
                        if Y.size == 64:
                            Y_reshaped = Y.reshape(8, 8)
                            result_matrix[:, :, i - start_idx - 1] = Y_reshaped
                        else:
                            print(f"Shape of 'p' in {fname2} is not compatible for reshaping to 8x8.")
                    else:
                        print(f"'p' not found in {fname2}")
                else:
                    print(f"File not found: {fname2}")

            # 此时 result_matrix 形状为 (8,8,batch_size)，里面是复数数据
            # 我们将其分解为实部和虚部，再拼接起来形成 (128, batch_size)
            Y_real = np.real(result_matrix).reshape(64, batch_size)  # 将8x8拉平成64行，batch_size列
            Y_imag = np.imag(result_matrix).reshape(64, batch_size)  # 虚部同理

            # 拼接实部和虚部，使得每个时间步有128个通道(64实+64虚)
            Y_combined = np.concatenate((Y_real, Y_imag), axis=0)  # (128, batch_size)
            
            # 转换为 PyTorch 张量，并增加 batch_size 维度(b=1)
            # 最终张量形状为 (1, 128, batch_size)
            # 格式为 (batch, channels, length)
            Y_tensor = torch.from_numpy(Y_combined).float().cuda().unsqueeze(0)
            
            # 使用模型进行预测
            Y_reconstructed, X = model(Y_tensor)
            
            # 计算损失
            loss = criterion(Y_reconstructed, Y_tensor, X)
           
            
            # 反向传播
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
            
            total_loss += loss.item()
        
        # 计算平均损失
        avg_loss = total_loss / len(train_batch_indices)
        loss_values.append(avg_loss)
        print(f"Epoch [{epoch + 1}/{epochs}], 平均损失: {avg_loss:.6f}")

    # 保存模型
    torch.save(model, "dync/model_full11.pth")
else:
    print("加载预训练模型...")
    model = torch.load("dync/model_smooth_transformer_final.pth")
    model.eval()

# 测试部分使用测试集数据
test_loss = 0
rmse_list = []
for batch_idx in test_batch_indices:
    start_idx = batch_idx * batch_size
    end_idx = start_idx + batch_size

    result_matrix = np.zeros((8, 8, batch_size), dtype=np.complex64)

    # 从文件中读取数据
    for i in range(start_idx + 1, end_idx + 1):
        fname2 = os.path.join(data_folder1, f"{file_prefix}{i}{file_extension}")
        if os.path.exists(fname2):
            mat2 = scipy.io.loadmat(fname2)
            if "p" in mat2:
                Y = mat2["p"]
                if Y.size == 64:
                    Y_reshaped = Y.reshape(8, 8)
                    result_matrix[:, :, i - start_idx - 1] = Y_reshaped
                else:
                    print(f"Shape of 'p' in {fname2} is not compatible for reshaping to 8x8.")
            else:
                print(f"'p' not found in {fname2}")
        else:
            print(f"File not found: {fname2}")

    # 此时 result_matrix 形状为 (8,8,batch_size)，里面是复数数据
    # 我们将其分解为实部和虚部，再拼接起来形成 (128, batch_size)
    Y_real = np.real(result_matrix).reshape(64, batch_size)  # 将8x8拉平成64行，batch_size列
    Y_imag = np.imag(result_matrix).reshape(64, batch_size)  # 虚部同理

    # 拼接实部和虚部，使得每个时间步有128个通道(64实+64虚)
    Y_combined = np.concatenate((Y_real, Y_imag), axis=0)  # (128, batch_size)
    
    # 转换为PyTorch张量并测试
    Y_tensor = torch.from_numpy(Y_combined).float().cuda().unsqueeze(0)  # 确保形状是 [1, 128, batch_size]
    
    # 打印形状进行调试
    print(f"测试批次 {batch_idx} - Y_tensor shape: {Y_tensor.shape}")
    
    model.eval()
    with torch.no_grad():
        # 获取模型输出
        Y_reconstructed, X = model(Y_tensor)
        
        # 确保Y_reconstructed和Y_tensor的形状匹配
        if Y_reconstructed.shape != Y_tensor.shape:
            print(f"警告: 形状不匹配 - Y_reconstructed: {Y_reconstructed.shape}, Y_tensor: {Y_tensor.shape}")
            # 如果批次维度不匹配，保留第一个批次
            if Y_reconstructed.shape[0] != Y_tensor.shape[0]:
                Y_reconstructed = Y_reconstructed[0:1]  # 只保留第一个批次，形状变为 [1, 128, batch_size]
        
        print(f"调整后 - Y_reconstructed shape: {Y_reconstructed.shape}")
        
        # 计算MSE - 现在形状应该匹配
        mse = criterion(Y_reconstructed, Y_tensor, X).item()
        test_loss += mse
        
        # 计算RMSE - 确保形状一致
        Y_reconstructed_squeezed = Y_reconstructed.squeeze(0)  # 应该是 [128, batch_size]
        Y_tensor_squeezed = Y_tensor.squeeze(0)  # [128, batch_size]
        
        print(f"计算RMSE - Y_reconstructed_squeezed: {Y_reconstructed_squeezed.shape}, Y_tensor_squeezed: {Y_tensor_squeezed.shape}")
        
        # 计算每个时间步的RMSE
        rmse_batch = []
        for t in range(Y_tensor_squeezed.shape[1]):
            # 对每个时间步单独计算RMSE
            mse_val = torch.mean((Y_reconstructed_squeezed[:, t] - Y_tensor_squeezed[:, t]) ** 2).item()
            rmse_val = np.sqrt(mse_val)
            rmse_batch.append(rmse_val)
        
        rmse_list.append(rmse_batch)

# 打印每个测试批次的RMSE
print("\n测试集RMSE结果：")
for i, rmse in enumerate(rmse_list):
    print(f"测试批次 {i+1} 的RMSE:")
    for j, value in enumerate(rmse):
        print(f"  时间步 {j+1}: {value:.6f}")

# 计算平均RMSE
avg_rmse = np.mean([val for batch in rmse_list for val in batch])
print(f"\n测试集平均RMSE: {avg_rmse:.6f}")
print(f"测试集平均MSE: {test_loss / len(test_batch_indices):.6f}")

# 打印稀疏表示结果
X = X.squeeze(0)  # [481, batch_size]
column_sums = torch.sum(X, dim=0)
print("\n稀疏表示结果：")
print("每列的和:", [f"{float(val):.6f}" for val in column_sums])

nonzero_indices_list = []
nonzero_values_list = []

# 遍历每一列
for col in range(X.size(1)):
    col_tensor = X[:, col]
    non_zero_mask = col_tensor != 0
    nonzero_indices = torch.nonzero(non_zero_mask, as_tuple=False).squeeze(1).tolist()
    nonzero_indices_list.append(nonzero_indices)
    values_tensor = torch.round(col_tensor[non_zero_mask], decimals=3)
    formatted_values = [round(float(v), 3) for v in values_tensor]
    nonzero_values_list.append(formatted_values)

# 打印每列的非零位置序号和对应的值
for col_idx, (indices, values) in enumerate(zip(nonzero_indices_list, nonzero_values_list)):
    print(f"\n时间步 {col_idx + 1}:")
    print(f"非零位置序号: {indices}")
    print(f"对应的值: {values}")

data_folder1 = "dy"
start = 1500
for i in range(start, start+seq_len):
    fname2 = os.path.join(data_folder1, f"{file_prefix}{i}{file_extension}")
    if os.path.exists(fname2):
        mat2 = scipy.io.loadmat(fname2)
        if "p" in mat2:
            Y = mat2["p"]
            if Y.size == 64:
                Y_reshaped = Y.reshape(8, 8)
                result_matrix[:, :, i-start] = Y_reshaped
            else:
                print(f"Shape of 'p' in {fname2} is not compatible for reshaping to 8x8.")
        else:
            print(f"'p' not found in {fname2}")
    else:
        print(f"File not found: {fname2}")

# 此时 result_matrix 形状为 (8,8,m)，里面是复数数据
# 我们将其分解为实部和虚部，再拼接起来形成 (128, m)
Y_real = np.real(result_matrix).reshape(64, seq_len)  # 将8x8拉平成64行，m列
Y_imag = np.imag(result_matrix).reshape(64, m)  # 虚部同理

# 拼接实部和虚部，使得每个时间步有128个通道(64实+64虚)
Y_combined = np.concatenate((Y_real, Y_imag), axis=0)  # (128, m)

# 转换为 PyTorch 张量，并增加 batch_size 维度(b=1)
# 最终张量形状为 (1, 128, m)
# 格式为 (batch, channels, length)
Y_tensor = torch.from_numpy(Y_combined).float().cuda().unsqueeze(0)

# 切换为评估模式
model.eval()

# 测试模型（使用训练数据）
with torch.no_grad():
    Y_reconstructed, X = model(Y_tensor)

# 计算最终重构误差
mse_total = criterion(Y_reconstructed, Y_tensor, X).item()
print(f"最终重构MSE: {mse_total:.6f}")

# 计算RMSE
Y_reconstructed_squeezed = Y_reconstructed.squeeze(0)  # [128, seq_len]
Y_tensor_squeezed = Y_tensor.squeeze(0)  # [128, seq_len]
rmse = torch.sqrt(torch.mean((Y_reconstructed_squeezed - Y_tensor_squeezed) ** 2)).item()
print(f"最终RMSE: {rmse:.6f}")

# 打印稀疏表示结果
X = X.squeeze(0)  # [481, seq_len]
print("\n最终稀疏表示:")
column_sums = torch.sum(X, dim=0)
print("列和:", [f"{float(val):.6f}" for val in column_sums])

# 打印非零元素
nonzero_indices_list = []
nonzero_values_list = []

for col in range(X.size(1)):
    col_tensor = X[:, col]
    non_zero_mask = col_tensor != 0
    nonzero_indices = torch.nonzero(non_zero_mask, as_tuple=False).squeeze(1).tolist()
    nonzero_indices_list.append(nonzero_indices)
    values_tensor = torch.round(col_tensor[non_zero_mask], decimals=3)
    formatted_values = [round(float(v), 3) for v in values_tensor]
    nonzero_values_list.append(formatted_values)

# 打印每列的非零位置序号和对应的值
for col_idx, (indices, values) in enumerate(zip(nonzero_indices_list, nonzero_values_list)):
    print(f"\n时间步 {col_idx + 1}:")
    print(f"非零位置序号: {indices}")
    print(f"对应的值: {values}")

    