import torch
import torch.nn as nn
import torch.optim as optim
import scipy.io
import matplotlib.pyplot as plt
import numpy as np
import os


# 设置参数 只考虑时间
m = 100  # 时间步数，即数据在时间维度上的长度。这里的 m 代表有 6 个时间点
k = 481  # 丰度矩阵的维度，即端元个数
start = 100
time_steps = 10  # 时间步数
length = 10
K_nonzero = 481  # 稀疏性要求，即在表示向量 X 中，每个时间步选择前 20 个值为非零，其余为零
lambda_sparsity = 0  # 稀疏正则化系数，用于控制稀疏性损失在总损失中的权重
epochs = 1000  # 训练的迭代次数
learning_rate = 0.0001  # 优化器的学习率
batch_size = 10
# 数据读取参数
data_folder1 = "dync"
file_prefix = "spatial_correlation_"
file_extension = ".mat"


def load_data(folder, prefix, extension, start_idx, end_idx):
    """
    从指定文件夹中读取数据，并将其转换为 PyTorch 张量。

    参数:
    folder (str): 数据文件夹的路径。
    prefix (str): 文件名的前缀。
    extension (str): 文件名的扩展名。
    start_idx (int): 起始时间步。
    end_idx (int): 结束时间步。

    返回:
    torch.Tensor: 转换后的 PyTorch 张量。
    """
    result_matrix = np.zeros((8, 8, end_idx - start_idx), dtype=np.complex64)
    for i in range(start_idx, end_idx):
        fname = os.path.join(folder, f"{prefix}{i}{extension}")
        try:
            mat = scipy.io.loadmat(fname)
            if "p" in mat:
                Y = mat["p"]
                if Y.size == 64:
                    Y_reshaped = Y.reshape(8, 8)
                    result_matrix[:, :, i - start_idx] = Y_reshaped
                else:
                    print(f"Shape of 'p' in {fname} is not compatible for reshaping to 8x8.")
            else:
                print(f"'p' not found in {fname}")
        except FileNotFoundError:
            print(f"File not found: {fname}")

    Y_real = np.real(result_matrix).reshape(64, end_idx - start_idx)
    Y_imag = np.imag(result_matrix).reshape(64, end_idx - start_idx)
    Y_combined = np.concatenate((Y_real, Y_imag), axis=0)
    Y_tensor = torch.from_numpy(Y_combined).float().cuda()  # 确保转换为 cuda 张量
    return Y_tensor.unsqueeze(0)


def load_endmember_matrix(file_path):
    """
    从指定文件中读取端元矩阵，并将其转换为 PyTorch 张量。

    参数:
    file_path (str): 端元矩阵文件的路径。

    返回:
    torch.Tensor: 转换后的 PyTorch 张量。
    """
    mat = scipy.io.loadmat(file_path)
    D = mat["phasecha"]
    if D.shape[0] == 64:
        D_reshaped = D.reshape(64, k)
    else:
        raise ValueError("The number of rows in D is not compatible with 8x8 reshaping.")

    D_real = np.real(D_reshaped)
    D_imag = np.imag(D_reshaped)
    D_combined = np.concatenate((D_real, D_imag), axis=0)
    D_tensor = torch.from_numpy(D_combined).float().cuda()  # 确保转换为 cuda 张量
    return D_tensor


# 空间特征提取分支（使用2维卷积）
class SpatialBranch(nn.Module):
    def __init__(self, in_channels):
        super(SpatialBranch, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=3, padding=1)  # 将128改为in_channels，如果in_channels是1则正确
        self.relu1 = nn.LeakyReLU()
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
        self.relu2 = nn.LeakyReLU()
        self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
        self.relu3 = nn.LeakyReLU()
        self.conv4 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
        self.relu4 = nn.LeakyReLU()

    def forward(self, x):
        x = self.conv1(x)
        x = self.relu1(x)
        x = self.conv2(x)
        x = self.relu2(x)
        x = self.conv3(x)
        x = self.relu3(x)
        x = self.conv4(x)
        x = self.relu4(x)
        x = x.view(x.size(0), -1)
        return x


# 时间特征提取分支（使用1维卷积）
class TemporalBranch(nn.Module):
    def __init__(self, in_channels):
        super(TemporalBranch, self).__init__()
        self.conv1 = nn.Conv1d(in_channels, 32, kernel_size=3, padding=1)
        self.relu1 = nn.LeakyReLU()
        self.conv2 = nn.Conv1d(32, 64, kernel_size=3, padding=1)
        self.relu2 = nn.LeakyReLU()
        self.conv3 = nn.Conv1d(64, 128, kernel_size=3, padding=1)
        self.relu3 = nn.LeakyReLU()
        self.conv4 = nn.Conv1d(128, 256, kernel_size=3, padding=1)
        self.relu4 = nn.LeakyReLU()

    def forward(self, x):
        # 将输入调整为适合1维卷积的形状 (batch_size, in_channels, time_steps)
        x = x.permute(0, 2, 1)
        x = self.conv1(x)
        x = self.relu1(x)
        x = self.conv2(x)
        x = self.relu2(x)
        x = self.conv3(x)
        x = self.relu3(x)
        x = self.conv4(x)
        x = self.relu4(x)
        x = x.view(x.size(0), -1)
        return x


class DualStreamModel(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(DualStreamModel, self).__init__()
        self.spatial_branch = SpatialBranch(1)  
        self.temporal_branch = TemporalBranch(in_channels)  
        self.fc_merge = nn.Linear(512, out_channels)

    def forward(self, x):
        spatial_output = self.spatial_branch(x)
        temporal_output = self.temporal_branch(x)
        combined_output = torch.cat((spatial_output, temporal_output), dim=1)
        output = self.fc_merge(combined_output)
        return output


def train_model(model, Y_tensor, D_tensor, criterion, optimizer, epochs, batch_size):
    """
    训练模型。

    参数:
    model (nn.Module): 要训练的模型。
    Y_tensor (torch.Tensor): 输入数据。
    D_tensor (torch.Tensor): 端元矩阵张量。
    criterion (nn.Module): 损失函数。
    optimizer (torch.optim.Optimizer): 优化器。
    epochs (int): 训练的迭代次数。
    batch_size (int): 批量大小。

    返回:
    list: 每个 epoch 的损失值。
    """
    loss_values = []
    m = Y_tensor.size(2)
    for epoch in range(epochs):
        model.train()
        total_loss = 0
        num_batches = m // batch_size
        for batch_idx in range(num_batches):
            start_idx = batch_idx * batch_size
            end_idx = start_idx + batch_size
            batch_Y = Y_tensor[:, :, start_idx:end_idx]

            optimizer.zero_grad()
            batch_Y = batch_Y.cuda()

            # 前向传播
            X = model(batch_Y)

            # 重构 Y
            X_transposed = X.transpose(1, 0)  # (k, batch)
            Y_reconstructed = torch.einsum("kb,cb->cb", X_transposed, D_tensor)  # (batch, channel_dim)
            Y_reconstructed = Y_reconstructed.unsqueeze(2)  # (batch, channel_dim, 1)

            # MSE 重构误差
            mse_loss = criterion(Y_reconstructed, batch_Y)

            # 稀疏性损失：确保表示 X 的稀疏度接近目标稀疏度
            # 目标稀疏度 = K_nonzero / k，即期望有 K_nonzero/k 的比例为非零
            target_sparsity = K_nonzero / k
            actual_sparsity = (X > 0).float().mean(dim=1)  # 实际稀疏度
            sparsity_loss = ((actual_sparsity - target_sparsity) ** 2).mean()

            # 总损失 = MSE 重构误差 + 稀疏性惩罚
            loss = mse_loss + lambda_sparsity * sparsity_loss

            # 后向传播
            loss.backward()
            optimizer.step()

            total_loss += loss.item()
        avg_loss = total_loss / num_batches
        loss_values.append(avg_loss)

        # 每 100 个 epoch 打印一次日志
        if (epoch + 1) % 100 == 0:
            print(f"Epoch [{epoch + 1}/{epochs}], Average Loss: {avg_loss:.4f}")

    return loss_values


def test_model(model, Y_tensor, D_tensor, criterion):
    """
    测试模型。

    参数:
    model (nn.Module): 要测试的模型。
    Y_tensor (torch.Tensor): 输入数据。
    D_tensor (torch.Tensor): 端元矩阵张量。
    criterion (nn.Module): 损失函数。

    返回:
    float: 最终重构误差。
    torch.Tensor: 逐列的 RMSE。
    """
    model.eval()
    with torch.no_grad():
        Y_tensor = Y_tensor.cuda()
        X = model(Y_tensor)
        # 重构 Y
        X_transposed = X.transpose(1, 0)  # (k, batch)
        Y_reconstructed = torch.einsum("kb,cb->cb", X_transposed, D_tensor)  # (batch, channel_dim)
        Y_reconstructed = Y_reconstructed.unsqueeze(2)  # (batch, channel_dim, 1)

    # 计算最终重构误差
    mse_total = criterion(Y_reconstructed, Y_tensor).item()
    subset1 = Y_reconstructed.squeeze(0)[:, 1]
    subset2 = Y_tensor.squeeze(0)[:, 1]
    mse_total1 = criterion(subset1, subset2).item()
    rmse_total = torch.sqrt(torch.tensor(mse_total1)).item()
    subset3 = Y_reconstructed.squeeze(0)
    subset4 = Y_tensor.squeeze(0)
    # 逐列计算 RMSE
    rmse_per_column = torch.sqrt(torch.mean((subset3 - subset4) ** 2, dim=0))
    print(f"Final Reconstruction MSE: {mse_total:.6f}")
    return mse_total, rmse_per_column


# 读取数据
Y_tensor = load_data(data_folder1, file_prefix, file_extension, 1, m + 1)
D_tensor = load_endmember_matrix("Data/DC1/phasecha.mat")

# 模型参数
in_channels = 128 
out_channels = k  

# 初始化模型、损失函数和优化器
model = DualStreamModel(in_channels, out_channels).cuda()
criterion = nn.MSELoss().cuda()
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5)
mm = 0
if mm == 0:
    # 训练模型
    loss_values = train_model(model, Y_tensor, D_tensor, criterion, optimizer, epochs, batch_size)

    # 绘制损失曲线，以观察训练过程
    plt.plot(range(epochs), loss_values)
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Loss Curve')
    plt.show()
    torch.save(model, "dync/model_full1.pth")
else:
    model = torch.load("dync/model_full1.pth")
    model = model.cuda()