# -*- coding: utf-8 -*-
"""
GCN-GRU 模型实现 (PyTorch) - [专业优化版]
集成了模型堆叠、Dropout、学习率调度、梯度裁剪等关键优化技术，并包含可视化。
"""
import torch
import torch.nn as nn
import numpy as np
import argparse
from tqdm import tqdm
import matplotlib.pyplot as plt


# ======================================================================================
# 阶段一：数据准备与图的构建 (与之前版本相同)
# ======================================================================================

class StandardScaler:
    def __init__(self, mean, std):
        self.mean = mean
        self.std = std

    def transform(self, data):
        return (data - self.mean) / self.std

    def inverse_transform(self, data):
        return (data * self.std) + self.mean


def generate_synthetic_data(num_nodes, num_features, num_timesteps):
    print(f"正在生成 {num_timesteps} 个时间步长，{num_nodes} 个节点的合成数据...")
    adj_mx = np.zeros((num_nodes, num_nodes), dtype=np.float32)
    for i in range(num_nodes):
        for j in range(i, num_nodes):
            if i != j and np.random.rand() > 0.95:
                adj_mx[i, j] = adj_mx[j, i] = 1.0
    time_series = np.zeros((num_timesteps, num_nodes, num_features))
    time_steps = np.arange(0, num_timesteps)
    for i in range(num_nodes):
        phase, amplitude = np.random.rand() * 2 * np.pi, np.random.rand() * 1.5 + 0.5
        noise = np.random.randn(num_timesteps) * 0.15
        signal = np.sin(time_steps / 20.0 + phase) * amplitude + noise
        time_series[:, i, 0] = signal
    return time_series, adj_mx


def load_synthetic_dataset(batch_size, test_batch_size, T_in=12, T_out=12):
    NUM_NODES, NUM_FEATURES, NUM_TIMESTEPS = 50, 1, 3000
    time_series_data, adj_mx = generate_synthetic_data(NUM_NODES, NUM_FEATURES, NUM_TIMESTEPS)
    train_size, val_size = int(NUM_TIMESTEPS * 0.7), int(NUM_TIMESTEPS * 0.1)
    train_data, val_data, test_data = time_series_data[:train_size], time_series_data[
                                                                     train_size:train_size + val_size], time_series_data[
                                                                                                        train_size + val_size:]
    scaler = StandardScaler(mean=train_data.mean(), std=train_data.std())
    train_data, val_data, test_data = scaler.transform(train_data), scaler.transform(val_data), scaler.transform(
        test_data)
    adj_mx_with_self_loops = np.identity(NUM_NODES) + adj_mx
    degrees = np.sum(adj_mx_with_self_loops, axis=1)
    d_inv_sqrt = np.power(degrees, -0.5)
    d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
    d_mat_inv_sqrt = np.diag(d_inv_sqrt)
    final_adj_matrix = d_mat_inv_sqrt.dot(adj_mx_with_self_loops).dot(d_mat_inv_sqrt)
    adj_matrix_tensor = torch.from_numpy(final_adj_matrix).float()

    def create_samples(data, T_in, T_out):
        X, Y = [], []
        for i in range(len(data) - T_in - T_out + 1):
            X.append(data[i:i + T_in])
            Y.append(data[i + T_in:i + T_in + T_out])
        return np.array(X), np.array(Y)

    X_train, y_train = create_samples(train_data, T_in, T_out)
    X_val, y_val = create_samples(val_data, T_in, T_out)
    X_test, y_test = create_samples(test_data, T_in, T_out)
    X_train, y_train = torch.from_numpy(X_train).float(), torch.from_numpy(y_train).float()
    X_val, y_val = torch.from_numpy(X_val).float(), torch.from_numpy(y_val).float()
    X_test, y_test = torch.from_numpy(X_test).float(), torch.from_numpy(y_test).float()
    train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(X_train, y_train), batch_size=batch_size,
                                               shuffle=True)
    val_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(X_val, y_val), batch_size=test_batch_size,
                                             shuffle=False)
    test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(X_test, y_test),
                                              batch_size=test_batch_size, shuffle=False)
    return train_loader, val_loader, test_loader, scaler, adj_matrix_tensor, NUM_NODES


# ======================================================================================
# 阶段二：[优化后] 的GCN-GRU 模型架构
# ======================================================================================

class GCN(nn.Module):
    def __init__(self, in_features, out_features):
        super(GCN, self).__init__()
        self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features))
        nn.init.xavier_uniform_(self.weight)

    def forward(self, x, adj):
        support = torch.matmul(x, self.weight)
        output = torch.einsum('nm,btmc->btnc', adj, support)
        return output


class ST_Block(nn.Module):
    """[优化]：ST-Block现在包含Dropout"""

    def __init__(self, in_features, gcn_hidden_features, gru_hidden_features, dropout_rate):
        super(ST_Block, self).__init__()
        self.gcn = GCN(in_features, gcn_hidden_features)
        self.gru = nn.GRU(gcn_hidden_features, gru_hidden_features, batch_first=True)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(dropout_rate)

    def forward(self, x, adj):
        # 输入x维度: (Batch, T_in, N, F_in)
        gcn_out = self.gcn(x, adj)
        gcn_out_activated = self.relu(gcn_out)
        gcn_out_dropped = self.dropout(gcn_out_activated)

        batch_size, T_in, num_nodes, F_gcn_hidden = gcn_out_dropped.shape
        gru_input = gcn_out_dropped.permute(0, 2, 1, 3).reshape(batch_size * num_nodes, T_in, F_gcn_hidden)

        # 对于GRU，我们需要它的所有时间步的输出，而不仅仅是最后一个隐藏状态
        gru_output, _ = self.gru(gru_input)
        gru_output = gru_output.reshape(batch_size, num_nodes, T_in, -1)
        gru_output = gru_output.permute(0, 2, 1, 3)  # (Batch, T_in, N, F_gru_hidden)

        # 残差连接: 将输入x与GRU的输出相加 (如果维度匹配)
        # 这里为了简化，我们只返回处理后的序列
        return gru_output


class GCN_GRU(nn.Module):
    """[优化]：主模型现在可以堆叠多个ST-Block"""

    def __init__(self, in_features, gcn_hidden_features, gru_hidden_features, num_nodes, T_out, num_layers,
                 dropout_rate):
        super(GCN_GRU, self).__init__()

        self.st_blocks = nn.ModuleList()
        # 输入层
        self.st_blocks.append(ST_Block(in_features, gcn_hidden_features, gru_hidden_features, dropout_rate))

        # 隐藏层
        for _ in range(num_layers - 1):
            self.st_blocks.append(ST_Block(gru_hidden_features, gcn_hidden_features, gru_hidden_features, dropout_rate))

        # 输出层
        # 我们只取序列的最后一个时间步的输出来做预测
        self.fc1 = nn.Linear(gru_hidden_features, 128)
        self.fc2 = nn.Linear(128, T_out)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(dropout_rate)

    def forward(self, x, adj):
        # x的输入维度: (Batch, T_in, N, F_in)
        for block in self.st_blocks:
            x = block(x, adj)  # 每个块的输出是下一个块的输入

        # 只取序列的最后一个时间步进行预测
        final_sequence_output = x[:, -1, :, :]  # (Batch, N, F_gru_hidden)

        # 通过全连接层
        fc_out = self.relu(self.fc1(final_sequence_output))
        fc_out_dropped = self.dropout(fc_out)
        final_output = self.fc2(fc_out_dropped)  # (Batch, N, T_out)

        final_output = final_output.permute(0, 2, 1).unsqueeze(-1)
        return final_output


# ======================================================================================
# 阶段三 & 四：[优化后] 的模型训练与评估
# ======================================================================================

def train_epoch(model, train_loader, optimizer, loss_fn, device, adj, clip_grad=1.0):
    """[优化]：增加梯度裁剪"""
    model.train()
    total_loss = 0
    for X_batch, y_batch in tqdm(train_loader, desc="Training", leave=False):
        X_batch, y_batch, adj_batch = X_batch.to(device), y_batch.to(device), adj.to(device)
        optimizer.zero_grad()
        output = model(X_batch, adj_batch)
        loss = loss_fn(output, y_batch)
        loss.backward()

        # --- 梯度裁剪 ---
        torch.nn.utils.clip_grad_norm_(model.parameters(), clip_grad)

        optimizer.step()
        total_loss += loss.item()
    return total_loss / len(train_loader)


# ... (evaluate 和 predict_and_evaluate 函数无需改变)
def evaluate(model, data_loader, loss_fn, device, adj):
    model.eval()
    total_loss = 0
    with torch.no_grad():
        for X_batch, y_batch in data_loader:
            X_batch, y_batch, adj_batch = X_batch.to(device), y_batch.to(device), adj.to(device)
            output = model(X_batch, adj_batch)
            loss = loss_fn(output, y_batch)
            total_loss += loss.item()
    return total_loss / len(data_loader)


def predict_and_evaluate(model, data_loader, scaler, device, adj):
    model.eval()
    all_preds, all_reals = [], []
    with torch.no_grad():
        for X_batch, y_batch in tqdm(data_loader, desc="Testing", leave=False):
            X_batch, y_batch, adj_batch = X_batch.to(device), y_batch.to(device), adj.to(device)
            output = model(X_batch, adj_batch)
            all_preds.append(scaler.inverse_transform(output.cpu()))
            all_reals.append(scaler.inverse_transform(y_batch.cpu()))
    preds_tensor, reals_tensor = torch.cat(all_preds, dim=0), torch.cat(all_reals, dim=0)
    mae = torch.mean(torch.abs(preds_tensor - reals_tensor)).item()
    rmse = torch.sqrt(torch.mean((preds_tensor - reals_tensor) ** 2)).item()
    return mae, rmse, preds_tensor.numpy(), reals_tensor.numpy()


# ... (绘图函数无需改变)
def plot_loss_curve(train_losses, val_losses):
    plt.figure(figsize=(12, 6))
    plt.plot(train_losses, label='Training Loss')
    plt.plot(val_losses, label='Validation Loss')
    plt.title('Training and Validation Loss Over Epochs');
    plt.xlabel('Epoch');
    plt.ylabel('Loss (MSE)')
    plt.legend();
    plt.grid(True);
    plt.show()


def plot_predictions(predictions, ground_truth, node_index=0, num_samples_to_plot=300):
    preds_series = predictions[:num_samples_to_plot, 0, node_index, 0]
    reals_series = ground_truth[:num_samples_to_plot, 0, node_index, 0]
    plt.figure(figsize=(15, 7))
    plt.plot(reals_series, label='Actual Values', color='blue', linewidth=2)
    plt.plot(preds_series, label='Predicted Values', color='red', linestyle='--', linewidth=1.5)
    plt.title(f'Prediction vs. Actual for Node {node_index}');
    plt.xlabel('Time Step');
    plt.ylabel('Value')
    plt.legend();
    plt.grid(True);
    plt.show()


# 主执行函数
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--epochs', type=int, default=50, help='增加训练轮数')
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--lr', type=float, default=0.001)
    parser.add_argument('--T_in', type=int, default=12)
    parser.add_argument('--T_out', type=int, default=12)
    # --- 新增模型超参数 ---
    parser.add_argument('--hidden_dim', type=int, default=64, help='GCN和GRU的隐藏层维度')
    parser.add_argument('--num_layers', type=int, default=2, help='堆叠的ST-Block层数')
    parser.add_argument('--dropout_rate', type=float, default=0.3, help='Dropout比率')
    args = parser.parse_args()

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")

    train_loader, val_loader, test_loader, scaler, adj_matrix_tensor, num_nodes = load_synthetic_dataset(
        args.batch_size, args.batch_size, args.T_in, args.T_out
    )

    model = GCN_GRU(
        in_features=1,
        gcn_hidden_features=args.hidden_dim,
        gru_hidden_features=args.hidden_dim,
        num_nodes=num_nodes,
        T_out=args.T_out,
        num_layers=args.num_layers,
        dropout_rate=args.dropout_rate
    ).to(device)

    print("\n优化后的模型架构:")
    print(model)

    loss_fn = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    # --- [优化]：定义学习率调度器 ---
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=5, factor=0.5)
    train_loss_history, val_loss_history = [], []
    best_val_loss = float('inf')

    for epoch in range(args.epochs):
        train_loss = train_epoch(model, train_loader, optimizer, loss_fn, device, adj_matrix_tensor)
        val_loss = evaluate(model, val_loader, loss_fn, device, adj_matrix_tensor)
        train_loss_history.append(train_loss)
        val_loss_history.append(val_loss)

        print(f"Epoch {epoch + 1:02d}/{args.epochs}, Train Loss: {train_loss:.4f}, Validation Loss: {val_loss:.4f}")

        # --- [优化]：在每个epoch后调用调度器 ---
        scheduler.step(val_loss)

        if val_loss < best_val_loss:
            best_val_loss = val_loss
            torch.save(model.state_dict(), 'best_gcn_gru_model.pth')

    print("\n--- 训练完成，开始绘制训练图表 ---")
    plot_loss_curve(train_loss_history, val_loss_history)

    print("\n--- 加载最佳模型进行测试和预测绘图 ---")
    model.load_state_dict(torch.load('best_gcn_gru_model.pth'))
    test_mae, test_rmse, predictions, ground_truth = predict_and_evaluate(model, test_loader, scaler, device,
                                                                          adj_matrix_tensor)

    print("\n--- 测试集最终评估结果 ---")
    print(f"MAE: {test_mae:.4f}, RMSE: {test_rmse:.4f}")

    plot_predictions(predictions, ground_truth, node_index=5)