# # Generating Adversarial Predictive Models with WGAN-GP
# import torch
# import torch.nn as nn
# import numpy as np
# import pandas as pd
# from matplotlib import pyplot as plt
# from sklearn.preprocessing import MinMaxScaler
# from torch.utils.data import DataLoader, TensorDataset
# import torch.nn.functional as F
# import os

# # 设置随机种子函数，在多次实验中随机更改种子进行随机实验
# def set_seed(seed):
#     torch.manual_seed(seed)
#     np.random.seed(seed)

# # 读取数据
# script_dir = os.path.dirname(os.path.abspath(__file__))

# train_path = os.path.join(script_dir, '..', '..', 'dataset', 'train_data.csv')
# test_path = os.path.join(script_dir, '..', '..', 'dataset', 'test_data.csv')

# train_data = pd.read_csv(train_path)
# test_data = pd.read_csv(test_path)

# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# # 数据表头，根据题目要求选择需要的特征列
# data_head = ['season', 'yr', 'mnth', 'hr', 'holiday', 'weekday', 'workingday',
#              'weathersit', 'temp', 'atemp', 'hum', 'windspeed',
#              'casual', 'registered', 'cnt']

# # 提取需要的特征列
# train_features = train_data[data_head]
# test_features = test_data[data_head]

# # 合并训练和测试数据，便于统一处理
# features = pd.concat([train_features, test_features], axis=0).reset_index(drop=True)

# # 输入特征列表，'cnt' 为目标变量
# input_features = ['season', 'yr', 'mnth', 'hr', 'holiday', 'weekday', 'workingday',
#                   'weathersit', 'temp', 'atemp', 'hum', 'windspeed']

# # 提取输入特征和目标变量
# X = features[input_features].values
# y = features['cnt'].values.reshape(-1, 1)

# # 数据标准化
# scaler_X = MinMaxScaler(feature_range=(-1, 1))
# X_normalized = scaler_X.fit_transform(X)

# scaler_y = MinMaxScaler(feature_range=(-1, 1))
# y_normalized = scaler_y.fit_transform(y)

# # 定义预测类型列表：短期预测和长期预测，需要分别训练模型
# predict_types = ["short", "long"]

# # 记录最终结果的字典
# results = {}

# # 定义生成器模型
# # class Generator(nn.Module):
# #     def __init__(self, input_size, cnn_out_channels, tcn_channels, lstm_hidden, transformer_hidden, num_heads, factor, seq_length):
# #         super(Generator, self).__init__()
# #         # CNN 模块
# #         self.cnn = nn.Sequential(
# #             nn.Conv1d(in_channels=input_size, out_channels=cnn_out_channels, kernel_size=3, padding=1),
# #             nn.ReLU(),
# #             nn.MaxPool1d(kernel_size=2),
# #             nn.Conv1d(in_channels=cnn_out_channels, out_channels=cnn_out_channels*2, kernel_size=3, padding=1),
# #             nn.ReLU(),
# #             nn.MaxPool1d(kernel_size=2)
# #         )
# #         # TCN 模块
# #         self.tcn = nn.Sequential(
# #             nn.Conv1d(in_channels=cnn_out_channels*2, out_channels=tcn_channels, kernel_size=3, padding=1),
# #             nn.ReLU(),
# #             nn.MaxPool1d(kernel_size=2)
# #         )
# #         # LSTM 模块
# #         self.lstm = nn.LSTM(input_size=tcn_channels, hidden_size=lstm_hidden, batch_first=True)
# #         # Transformer 模块
# #         encoder_layer = nn.TransformerEncoderLayer(d_model=transformer_hidden, nhead=num_heads)
# #         self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=2)
# #         # Cross Attention
# #         self.cross_attention = nn.MultiheadAttention(embed_dim=lstm_hidden, num_heads=num_heads)
# #         # Fully Connected Layers
# #         self.fc1 = nn.Linear(lstm_hidden, 128)
# #         self.fc2 = nn.Linear(128, int(factor * seq_length))
# #         # self.output_layer = nn.Linear(factor * seq_length, 1)
# #         fc_out_size = int(seq_length / 8 * factor * seq_length)
# #         self.output_layer = nn.Linear(fc_out_size, int(factor * seq_length))
# #         self.factor = factor
# #         self.seq_length = seq_length
# #         self.dropout = nn.Dropout(0.5)

# #     def forward(self, x):
# #         # x shape: (batch_size, seq_length, input_size)
# #         x = x.permute(0, 2, 1)  # 转换为 (batch_size, input_size, seq_length) 以适应 Conv1d
# #         cnn_out = self.cnn(x)  # (batch_size, cnn_out_channels*2, seq_length/4)
# #         tcn_out = self.tcn(cnn_out)  # (batch_size, tcn_channels, seq_length/8)
# #         tcn_out = tcn_out.permute(0, 2, 1)  # (batch_size, seq_length/8, tcn_channels)
# #         # LSTM
# #         lstm_out, _ = self.lstm(tcn_out)  # (batch_size, seq_length/8, lstm_hidden)
# #         # Transformer
# #         transformer_out = self.transformer(tcn_out)  # (batch_size, seq_length/8, transformer_hidden)
# #         # Cross Attention (nn.MultiheadAttention 期望输入为 (seq_length, batch_size, embed_dim))
# #         lstm_out_trans = lstm_out.permute(1, 0, 2)  # (seq_length/8, batch_size, lstm_hidden)
# #         transformer_out_trans = transformer_out.permute(1, 0, 2)  # (seq_length/8, batch_size, transformer_hidden)
# #         attn_output, _ = self.cross_attention(lstm_out_trans, transformer_out_trans, transformer_out_trans)  # (seq_length/8, batch_size, lstm_hidden)
# #         attn_output = attn_output.permute(1, 0, 2)  # (batch_size, seq_length/8, lstm_hidden)
# #         # Fully Connected Layers
# #         fc_out = F.relu(self.fc1(attn_output))  # (batch_size, seq_length/8, 128)
# #         fc_out = self.dropout(fc_out)
# #         fc_out = self.fc2(fc_out)  # (batch_size, seq_length/8, factor * seq_length)
# #         fc_out = fc_out.view(fc_out.size(0), -1)  # (batch_size, seq_length/8 * factor * seq_length)
# #         out = self.output_layer(fc_out)  # (batch_size, 1)
# #         return out
# # 仅使用 Transformer 作为生成器
# class Generator(nn.Module):
#     def __init__(self, input_size, transformer_hidden, num_heads, factor, seq_length):
#         super(Generator, self).__init__()
#         # 输入投影层，将 input_size 映射到 transformer_hidden
#         self.input_projection = nn.Linear(input_size, transformer_hidden)
        
#         # Transformer 模块
#         encoder_layer = nn.TransformerEncoderLayer(d_model=transformer_hidden, nhead=num_heads)
#         self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=2)
        
#         # Fully Connected Layers
#         self.fc1 = nn.Linear(seq_length * transformer_hidden, 128)
#         self.fc2 = nn.Linear(128, int(factor * seq_length))
#         self.dropout = nn.Dropout(0.5)
#         self.output_layer = nn.Linear(int(factor * seq_length), int(factor * seq_length))
        
#         self.factor = factor
#         self.seq_length = seq_length

#     def forward(self, x):
#         # x shape: (batch_size, seq_length, input_size)
#         x = x.permute(1, 0, 2)  # 转换为 (seq_length, batch_size, input_size) 以适应 Transformer
#         transformer_in = self.input_projection(x)  # (seq_length, batch_size, transformer_hidden)
#         transformer_out = self.transformer(transformer_in)  # (seq_length, batch_size, transformer_hidden)
#         transformer_out = transformer_out.permute(1, 0, 2)  # (batch_size, seq_length, transformer_hidden)
        
#         # Flatten
#         transformer_out = transformer_out.contiguous().view(transformer_out.size(0), -1)  # (batch_size, seq_length * transformer_hidden)
        
#         # Fully Connected Layers
#         fc_out = F.relu(self.fc1(transformer_out))  # (batch_size, 128)
#         fc_out = self.dropout(fc_out)
#         fc_out = F.relu(self.fc2(fc_out))  # (batch_size, factor * seq_length)
#         out = self.output_layer(fc_out)  # (batch_size, factor * seq_length)
        
#         return out

# # 定义判别器模型
# # class Discriminator(nn.Module):
# #     def __init__(self, input_size, hidden_size, seq_length, factor):
# #         super(Discriminator, self).__init__()
# #         self.model = nn.Sequential(
# #             # nn.Linear(seq_length * factor + 1, hidden_size),
# #             # nn.Linear(seq_length * input_size + 1, hidden_size),
# #             nn.Linear(seq_length * input_size + int(factor * seq_length), hidden_size),
# #             nn.LeakyReLU(0.2),
# #             nn.Linear(hidden_size, hidden_size),
# #             nn.LeakyReLU(0.2),
# #             nn.Linear(hidden_size, 1),
# #             nn.Sigmoid()
# #         )
    
# #     def forward(self, x, y):
# #         # x: 输入序列 (batch_size, seq_length, input_size)
# #         # y: 目标序列或生成序列 (batch_size, 1)
# #         x = x.view(x.size(0), -1)  # (batch_size, seq_length * input_size)
# #         y = y.view(y.size(0), -1)  # (batch_size, factor * seq_length)
# #         combined = torch.cat((x, y), dim=1)  # (batch_size, seq_length * input_size + 1)
# #         out = self.model(combined)
# #         return out
# class Discriminator(nn.Module):
#     def __init__(self, input_size, hidden_size, seq_length, factor):
#         super(Discriminator, self).__init__()
#         self.model = nn.Sequential(
#             nn.Linear(seq_length * input_size + int(factor * seq_length), hidden_size),
#             nn.LeakyReLU(0.2),
#             nn.Linear(hidden_size, hidden_size),
#             nn.LeakyReLU(0.2),
#             nn.Linear(hidden_size, 1)
#             # nn.Sigmoid() # WGAN 不使用 Sigmoid
#         )
    
#     def forward(self, x, y):
#         # x: 输入序列 (batch_size, seq_length, input_size)
#         # y: 目标序列或生成序列 (batch_size, factor * seq_length)
#         x = x.view(x.size(0), -1)  # (batch_size, seq_length * input_size)
#         y = y.view(y.size(0), -1)  # (batch_size, factor * seq_length)
#         combined = torch.cat((x, y), dim=1)  # (batch_size, seq_length * input_size + factor * seq_length)
#         out = self.model(combined)
#         return out
    
# # 定义 WGAN-GP 的梯度惩罚函数
# def compute_gradient_penalty(D, real_samples, fake_samples, device, lambda_gp=10):
#     # 随机权重 alpha，用于插值
#     alpha = torch.rand(real_samples.size(0), 1, device=device)
#     alpha = alpha.expand_as(real_samples)
    
#     # 插值样本
#     interpolates = alpha * real_samples + ((1 - alpha) * fake_samples)
#     interpolates = interpolates.requires_grad_(True)
    
#     d_interpolates = D(interpolates)
    
#     # 对插值样本的输出求梯度
#     gradients = torch.autograd.grad(
#         outputs=d_interpolates,
#         inputs=interpolates,
#         grad_outputs=torch.ones(d_interpolates.size(), device=device),
#         create_graph=True,
#         retain_graph=True,
#         only_inputs=True
#     )[0]
    
#     gradients = gradients.view(gradients.size(0), -1)
#     gradient_norm = gradients.norm(2, dim=1)
    
#     # 计算梯度惩罚
#     gradient_penalty = lambda_gp * ((gradient_norm - 1) ** 2).mean()
#     return gradient_penalty

# # 定义训练和测试过程
# for predict_type in predict_types:
#     # 定义因子，根据预测类型调整
#     factor = 1 if predict_type == "short" else 2.5  # 长期预测因子为2.5，对应240/96=2.5

#     # 准备数据集
#     def prepare_data(X, y, seq_length):
#         input_seq, target_seq = [], []
#         for i in range(len(X) - int(seq_length * (factor + 1))):
#             input_seq.append(X[i:i + seq_length])
#             target_seq.append(y[i + seq_length:i + int(seq_length * (factor + 1))])
#         return np.array(input_seq), np.array(target_seq)

#     seq_length = 96  # 输入序列长度

#     # 划分训练集和验证集，使用训练数据的80%作为训练集，20%作为验证集
#     train_size = int(len(train_data) * 0.8)
#     val_size = len(train_data) - train_size

#     X_train_full = X_normalized[:len(train_data)]
#     y_train_full = y_normalized[:len(train_data)]

#     train_inputs_np, train_targets_np = prepare_data(
#         X_train_full[:train_size + seq_length * int(factor + 1)],
#         y_train_full[:train_size + seq_length * int(factor + 1)],
#         seq_length
#     )

#     val_inputs_np, val_targets_np = prepare_data(
#         X_train_full[train_size:],
#         y_train_full[train_size:],
#         seq_length
#     )

#     # 测试集数据
#     X_test = X_normalized[len(train_data):]
#     y_test = y_normalized[len(train_data):]

#     test_inputs_np, test_targets_np = prepare_data(X_test, y_test, seq_length)

#     # 设置进行实验的次数（最少五次）
#     num_experiments = 5

#     # 记录每次实验的结果
#     mse_list = []
#     mae_list = []

#     for exp_num in range(num_experiments):
#         print(f"\n{'='*20} Starting experiment {exp_num + 1} for {predict_type} term prediction {'='*20}\n")
#         # 设置随机种子
#         set_seed(exp_num)

#         # 数据转换为Tensor，并移动到设备上
#         train_inputs = torch.tensor(train_inputs_np, dtype=torch.float32).to(device)
#         # train_targets = torch.tensor(train_targets_np[:, -1, :], dtype=torch.float32).to(device)  # 只预测最后一个时间步
#         val_inputs = torch.tensor(val_inputs_np, dtype=torch.float32).to(device)
#         # val_targets = torch.tensor(val_targets_np[:, -1, :], dtype=torch.float32).to(device)
#         test_inputs = torch.tensor(test_inputs_np, dtype=torch.float32).to(device)
#         # test_targets = torch.tensor(test_targets_np[:, -1, :], dtype=torch.float32).to(device)

#         train_targets = torch.tensor(train_targets_np.reshape(train_targets_np.shape[0], -1), dtype=torch.float32).to(device)  # 预测整个序列
#         val_targets = torch.tensor(val_targets_np.reshape(val_targets_np.shape[0], -1), dtype=torch.float32).to(device)
#         test_targets = torch.tensor(test_targets_np.reshape(test_targets_np.shape[0], -1), dtype=torch.float32).to(device)

#         # 创建数据集
#         train_dataset = TensorDataset(train_inputs, train_targets)
#         val_dataset = TensorDataset(val_inputs, val_targets)
#         test_dataset = TensorDataset(test_inputs, test_targets)

#         # 创建数据加载器
#         train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
#         val_loader = DataLoader(val_dataset, batch_size=64, shuffle=False)
#         test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)

#         # 初始化生成器和判别器
#         input_size = len(input_features)  # 输入特征数
#         # cnn_out_channels = 16
#         # tcn_channels = 64
#         # lstm_hidden = 64
#         transformer_hidden = 64
#         num_heads = 8
#         hidden_size_d = 128  # 判别器隐藏层大小

#         # generator = Generator(input_size=input_size, cnn_out_channels=cnn_out_channels, tcn_channels=tcn_channels,
#         #                       lstm_hidden=lstm_hidden, transformer_hidden=transformer_hidden, num_heads=num_heads,
#         #                       factor=factor, seq_length=seq_length).to(device)

#         generator = Generator(input_size=input_size, transformer_hidden=transformer_hidden, num_heads=num_heads,
#                               factor=factor, seq_length=seq_length).to(device)

#         discriminator = Discriminator(input_size=input_size, hidden_size=hidden_size_d, seq_length=seq_length,
#                                       factor=factor).to(device)

#         # 定义损失函数
#         # WGAN 不使用 BCELoss，而是使用 Wasserstein 损失
#         adversarial_loss = nn.BCELoss()
#         generator_loss_fn = nn.MSELoss()
#         mae_loss_fn = nn.L1Loss()

#         # 定义优化器
#         optimizer_G = torch.optim.Adam(generator.parameters(), lr=0.0001, betas=(0.5, 0.999))
#         optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=0.0001, betas=(0.5, 0.999))

#         # 定义学习率调度器
#         scheduler_G = torch.optim.lr_scheduler.StepLR(optimizer_G, step_size=100, gamma=0.1)
#         scheduler_D = torch.optim.lr_scheduler.StepLR(optimizer_D, step_size=100, gamma=0.1)

#         # 保存模型的路径
#         output_path = os.path.join(script_dir, '.', 'output/', predict_type)
#         if not os.path.exists(output_path):
#             os.makedirs(output_path)

#         # 训练模型
#         epochs = 500 if predict_type == 'short' else 1000
#         best_epoch = 0
#         best_val_loss = float('inf')
#         train_losses = []
#         val_losses = []
#         generator_losses = []
#         discriminator_losses = []

#         # 早停参数
#         patience = 50
#         trigger_times = 0

#         # WGAN-GP 参数
#         lambda_gp = 10
#         n_critic = 5  # 每训练一个生成器，训练5个判别器

#         for epoch in range(epochs):
#             generator.train()
#             discriminator.train()
#             d_losses_epoch = []
#             g_losses_epoch = []
#             for inputs, targets in train_loader:
#                 # Adversarial ground truths
#                 valid = torch.ones(inputs.size(0), 1, device=device)
#                 fake = torch.zeros(inputs.size(0), 1, device=device)

#                 # ---------------------
#                 #  训练生成器
#                 # ---------------------
#                 optimizer_G.zero_grad()
#                 # 生成预测
#                 predictions = generator(inputs)
#                 # 生成判别器的输出
#                 pred_fake = discriminator(inputs, predictions)
#                 # 损失函数
#                 g_loss = adversarial_loss(pred_fake, valid) + generator_loss_fn(predictions, targets) + mae_loss_fn(predictions, targets)
#                 g_loss.backward()
#                 optimizer_G.step()

#                 # ---------------------
#                 #  训练判别器
#                 # ---------------------
#                 optimizer_D.zero_grad()
#                 # 判别真实数据
#                 pred_real = discriminator(inputs, targets)
#                 d_real_loss = adversarial_loss(pred_real, valid)
#                 # 判别生成数据
#                 pred_fake = discriminator(inputs, predictions.detach())
#                 d_fake_loss = adversarial_loss(pred_fake, fake)
#                 # 总判别器损失
#                 d_loss = (d_real_loss + d_fake_loss) / 2
#                 d_loss.backward()
#                 optimizer_D.step()

#             # 在验证集上进行验证
#             generator.eval()
#             discriminator.eval()
#             with torch.no_grad():
#                 val_MSE_losses = []
#                 val_MAE_losses = []
#                 for val_inputs, val_targets in val_loader:
#                     val_predictions = generator(val_inputs)
#                     val_MSE = generator_loss_fn(val_predictions, val_targets)
#                     val_MAE = mae_loss_fn(val_predictions, val_targets)
#                     val_MSE_losses.append(val_MSE.item())
#                     val_MAE_losses.append(val_MAE.item())

#                 average_val_MSE = np.mean(val_MSE_losses)
#                 average_val_MAE = np.mean(val_MAE_losses)
#                 train_losses.append(g_loss.item())
#                 val_losses.append(average_val_MSE)

#                 if (epoch + 1) % 100 == 0 or epoch == 0:
#                     print(f'Epoch [{epoch + 1}/{epochs}], G Loss: {g_loss.item():.6f}, D Loss: {d_loss.item():.6f}, Val MSE: {average_val_MSE:.6f}, Val MAE: {average_val_MAE:.6f}')

#                 if average_val_MSE < best_val_loss and epoch >= (400 if predict_type == 'short' else 500):
#                     torch.save(generator.state_dict(), os.path.join(output_path, f'gapm_{seq_length}h_best.pt'))
#                     # best_model_state_G = generator.state_dict().copy()
#                     best_val_loss = average_val_MSE
#                     best_epoch = epoch

#         print(f'Best Epoch: {best_epoch + 1}')

#         # 使用最佳模型进行测试
#         generator.load_state_dict(torch.load(os.path.join(output_path, f'gapm_{seq_length}h_best.pt')))
#         generator.eval()
#         with torch.no_grad():
#             test_MSE_losses = []
#             test_MAE_losses = []
#             min_loss = float('inf')
#             draw_idx = 0
#             for idx, (test_inputs, test_targets) in enumerate(test_loader):
#                 test_inputs, test_targets = test_inputs.to(device), test_targets.to(device)
#                 test_predictions = generator(test_inputs)
#                 test_MSE = generator_loss_fn(test_predictions, test_targets)
#                 test_MAE = mae_loss_fn(test_predictions, test_targets)
#                 test_MSE_losses.append(test_MSE.item())
#                 test_MAE_losses.append(test_MAE.item())

#                 # 保存损失最小的一组结果用于绘图
#                 if test_MAE.item() < min_loss:
#                     min_loss = test_MAE.item()
#                     draw_inputs = test_inputs
#                     draw_targets = test_targets
#                     draw_prediction = test_predictions.cpu().numpy()
#                     draw_idx = idx

#             # 计算测试集的平均损失和标准差
#             average_test_MSE_loss = np.mean(test_MSE_losses)
#             std_test_MSE_loss = np.std(test_MSE_losses)
#             average_test_MAE_loss = np.mean(test_MAE_losses)
#             std_test_MAE_loss = np.std(test_MAE_losses)

#             print(f'Test MSE Loss: {average_test_MSE_loss:.6f} ± {std_test_MSE_loss:.6f}')
#             print(f'Test MAE Loss: {average_test_MAE_loss:.6f} ± {std_test_MAE_loss:.6f}')

#             # 记录本次实验的结果
#             mse_list.append(average_test_MSE_loss)
#             mae_list.append(average_test_MAE_loss)

#         # 绘制损失曲线，仅在最后一次实验绘制
#         if exp_num == num_experiments - 1:
#             plt.figure(figsize=(10, 5))
#             plt.plot(range(1, epochs + 1), train_losses, label='Generator Loss')
#             plt.plot(range(1, epochs + 1), val_losses, label='Validation MSE Loss')
#             plt.title(f'Training and Validation Loss Over Epochs ({predict_type} term prediction)')
#             plt.xlabel('Epochs')
#             plt.ylabel('Loss')
#             plt.legend()
#             # 保存为文件
#             output_loss_file = os.path.join(output_path, f'loss_curve_{predict_type}.png')
#             plt.savefig(output_loss_file)
#             plt.show()

#             # 绘制预测结果与真实值的比较曲线

#             # 反标准化预测结果
#             predicted_cnt = scaler_y.inverse_transform(draw_prediction.reshape(-1, 1))

#             # 反标准化目标值
#             actual_cnt = scaler_y.inverse_transform(draw_targets.cpu().numpy().reshape(-1, 1))

#             # 获取输入序列对应的 'cnt' 值
#             input_seq_start_idx = len(train_data) + draw_idx
#             input_seq_end_idx = input_seq_start_idx + seq_length
#             input_cnt = scaler_y.inverse_transform(y_normalized[input_seq_start_idx:input_seq_end_idx])

#             # 创建时间轴
#             time_axis_input = np.arange(seq_length)
#             time_axis_output = np.arange(seq_length, seq_length + int(seq_length * factor))

#             # 绘制曲线
#             plt.figure(figsize=(12, 6))
#             plt.plot(time_axis_input, input_cnt, label='Input cnt')
#             plt.plot(time_axis_output, actual_cnt, label='Actual cnt')
#             plt.plot(time_axis_output, predicted_cnt, label='Predicted cnt', linestyle='dashed')

#             plt.title(f'Bike Rental Count Prediction ({predict_type} term)')
#             plt.xlabel('Time Steps')
#             plt.ylabel('Rental Count')
#             plt.legend()
#             # 保存为文件
#             output_vs_file = os.path.join(output_path, f'prediction_vs_actual_{predict_type}.png')
#             plt.savefig(output_vs_file)
#             plt.show()

#     # 计算并打印结果的平均值和标准差
#     mse_mean = np.mean(mse_list)
#     mse_std = np.std(mse_list)
#     mae_mean = np.mean(mae_list)
#     mae_std = np.std(mae_list)

#     print(f"\n{'='*20} Final Results for {predict_type} term prediction {'='*20}")
#     print(f"Average Test MSE Loss over {num_experiments} runs: {mse_mean:.6f} ± {mse_std:.6f}")
#     print(f"Average Test MAE Loss over {num_experiments} runs: {mae_mean:.6f} ± {mae_std:.6f}")

#     # 将结果保存到字典中
#     results[predict_type] = {
#         'MSE_mean': mse_mean,
#         'MSE_std': mse_std,
#         'MAE_mean': mae_mean,
#         'MAE_std': mae_std
#     }

# # 打印最终结果
# print("\n====================== Summary of All Results ======================")
# for predict_type in predict_types:
#     print(f"\nResults for {predict_type} term prediction:")
#     print(f"Average Test MSE Loss over {num_experiments} runs: {results[predict_type]['MSE_mean']:.6f} ± {results[predict_type]['MSE_std']:.6f}")
#     print(f"Average Test MAE Loss over {num_experiments} runs: {results[predict_type]['MAE_mean']:.6f} ± {results[predict_type]['MAE_std']:.6f}")

# Generating Adversarial Predictive Models with WGAN-GP
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from torch.utils.data import DataLoader, TensorDataset
import torch.nn.functional as F
import os

# 设置随机种子函数，在多次实验中随机更改种子进行随机实验
def set_seed(seed):
    torch.manual_seed(seed)
    np.random.seed(seed)

# 读取数据
script_dir = os.path.dirname(os.path.abspath(__file__))

train_path = os.path.join(script_dir, '..', '..', 'dataset', 'train_data.csv')
test_path = os.path.join(script_dir, '..', '..', 'dataset', 'test_data.csv')

train_data = pd.read_csv(train_path)
test_data = pd.read_csv(test_path)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 数据表头，根据题目要求选择需要的特征列
data_head = ['season', 'yr', 'mnth', 'hr', 'holiday', 'weekday', 'workingday',
             'weathersit', 'temp', 'atemp', 'hum', 'windspeed',
             'casual', 'registered', 'cnt']

# 提取需要的特征列
train_features = train_data[data_head]
test_features = test_data[data_head]

# 合并训练和测试数据，便于统一处理
features = pd.concat([train_features, test_features], axis=0).reset_index(drop=True)

# 输入特征列表，'cnt' 为目标变量
input_features = ['season', 'yr', 'mnth', 'hr', 'holiday', 'weekday', 'workingday',
                  'weathersit', 'temp', 'atemp', 'hum', 'windspeed']

# 提取输入特征和目标变量
X = features[input_features].values
y = features['cnt'].values.reshape(-1, 1)

# 数据标准化
scaler_X = MinMaxScaler(feature_range=(-1, 1))
X_normalized = scaler_X.fit_transform(X)

scaler_y = MinMaxScaler(feature_range=(-1, 1))
y_normalized = scaler_y.fit_transform(y)

# 定义预测类型列表：短期预测和长期预测，需要分别训练模型
predict_types = ["short", "long"]

# 记录最终结果的字典
results = {}

# 定义生成器模型（仅使用 Transformer + 全连接层）
class Generator(nn.Module):
    def __init__(self, input_size, transformer_hidden, num_heads, factor, seq_length):
        super(Generator, self).__init__()
        # 输入投影层，将 input_size 映射到 transformer_hidden
        self.input_projection = nn.Linear(input_size, transformer_hidden)
        
        # Transformer 模块
        encoder_layer = nn.TransformerEncoderLayer(d_model=transformer_hidden, nhead=num_heads)
        self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=2)
        
        # Fully Connected Layers
        self.fc1 = nn.Linear(seq_length * transformer_hidden, 128)
        self.fc2 = nn.Linear(128, int(factor * seq_length))
        self.dropout = nn.Dropout(0.5)
        self.output_layer = nn.Linear(int(factor * seq_length), int(factor * seq_length))
        
        self.factor = factor
        self.seq_length = seq_length

    def forward(self, x):
        # x shape: (batch_size, seq_length, input_size)
        x = x.permute(1, 0, 2)  # 转换为 (seq_length, batch_size, input_size) 以适应 Transformer
        transformer_in = self.input_projection(x)  # (seq_length, batch_size, transformer_hidden)
        transformer_out = self.transformer(transformer_in)  # (seq_length, batch_size, transformer_hidden)
        transformer_out = transformer_out.permute(1, 0, 2)  # (batch_size, seq_length, transformer_hidden)
        
        # Flatten
        transformer_out = transformer_out.contiguous().view(transformer_out.size(0), -1)  # (batch_size, seq_length * transformer_hidden)
        
        # Fully Connected Layers
        fc_out = F.relu(self.fc1(transformer_out))  # (batch_size, 128)
        fc_out = self.dropout(fc_out)
        fc_out = F.relu(self.fc2(fc_out))  # (batch_size, factor * seq_length)
        out = self.output_layer(fc_out)  # (batch_size, factor * seq_length)
        
        return out

# 定义判别器模型（WGAN 不使用 Sigmoid）
class Discriminator(nn.Module):
    def __init__(self, input_size, hidden_size, seq_length, factor):
        super(Discriminator, self).__init__()
        self.model = nn.Sequential(
            nn.Linear(seq_length * input_size + int(factor * seq_length), hidden_size),
            nn.LeakyReLU(0.2),
            nn.Linear(hidden_size, hidden_size),
            nn.LeakyReLU(0.2),
            nn.Linear(hidden_size, 1)  # 不使用 Sigmoid
        )
    
    def forward(self, x, y):
        # x: 输入序列 (batch_size, seq_length, input_size)
        # y: 目标序列或生成序列 (batch_size, factor * seq_length)
        x = x.view(x.size(0), -1)  # (batch_size, seq_length * input_size)
        y = y.view(y.size(0), -1)  # (batch_size, factor * seq_length)
        combined = torch.cat((x, y), dim=1)  # (batch_size, seq_length * input_size + factor * seq_length)
        out = self.model(combined)
        return out

# 定义 WGAN-GP 的梯度惩罚函数
def compute_gradient_penalty(D, x, y_real, y_fake, device, lambda_gp=10):
    batch_size, y_dim = y_real.size()
    alpha = torch.rand(batch_size, 1, device=device).expand(-1, y_dim)  # [batch_size, factor * seq_length]
    
    # 插值样本
    interpolates = alpha * y_real + (1 - alpha) * y_fake  # [batch_size, factor * seq_length]
    interpolates = interpolates.requires_grad_(True)
    
    # 判别器输出
    d_interpolates = D(x, interpolates)  # [batch_size, 1]
    
    # 对插值样本的输出求梯度
    gradients = torch.autograd.grad(
        outputs=d_interpolates,
        inputs=interpolates,
        grad_outputs=torch.ones_like(d_interpolates),
        create_graph=True,
        retain_graph=True,
        only_inputs=True
    )[0]
    
    gradients = gradients.view(batch_size, -1)  # [batch_size, factor * seq_length]
    gradient_norm = gradients.norm(2, dim=1)  # [batch_size]
    
    # 计算梯度惩罚
    gradient_penalty = lambda_gp * ((gradient_norm - 1) ** 2).mean()
    return gradient_penalty

# 定义训练和测试过程
for predict_type in predict_types:
    # 定义因子，根据预测类型调整
    factor = 1 if predict_type == "short" else 2.5  # 长期预测因子为2.5，对应240/96=2.5

    # 准备数据集
    def prepare_data(X, y, seq_length):
        input_seq, target_seq = [], []
        for i in range(len(X) - int(seq_length * (factor + 1))):
            input_seq.append(X[i:i + seq_length])
            target_seq.append(y[i + seq_length:i + int(seq_length * (factor + 1))].flatten())
        return np.array(input_seq), np.array(target_seq)

    seq_length = 96  # 输入序列长度

    # 划分训练集和验证集，使用训练数据的80%作为训练集，20%作为验证集
    train_size = int(len(train_data) * 0.8)
    val_size = len(train_data) - train_size

    X_train_full = X_normalized[:len(train_data)]
    y_train_full = y_normalized[:len(train_data)]

    train_inputs_np, train_targets_np = prepare_data(
        X_train_full[:train_size + seq_length * int(factor + 1)],
        y_train_full[:train_size + seq_length * int(factor + 1)],
        seq_length
    )

    val_inputs_np, val_targets_np = prepare_data(
        X_train_full[train_size:],
        y_train_full[train_size:],
        seq_length
    )

    # 测试集数据
    X_test = X_normalized[len(train_data):]
    y_test = y_normalized[len(train_data):]

    test_inputs_np, test_targets_np = prepare_data(X_test, y_test, seq_length)

    # 设置进行实验的次数（最少五次）
    num_experiments = 5

    # 记录每次实验的结果
    mse_list = []
    mae_list = []

    for exp_num in range(num_experiments):
        print(f"\n{'='*20} Starting experiment {exp_num + 1} for {predict_type} term prediction {'='*20}\n")
        # 设置随机种子
        set_seed(exp_num)

        # 数据转换为Tensor，并移动到设备上
        train_inputs = torch.tensor(train_inputs_np, dtype=torch.float32).to(device)
        train_targets = torch.tensor(train_targets_np, dtype=torch.float32).to(device)  # 预测整个序列

        val_inputs = torch.tensor(val_inputs_np, dtype=torch.float32).to(device)
        val_targets = torch.tensor(val_targets_np, dtype=torch.float32).to(device)

        test_inputs = torch.tensor(test_inputs_np, dtype=torch.float32).to(device)
        test_targets = torch.tensor(test_targets_np, dtype=torch.float32).to(device)

        # 创建数据集
        train_dataset = TensorDataset(train_inputs, train_targets)
        val_dataset = TensorDataset(val_inputs, val_targets)
        test_dataset = TensorDataset(test_inputs, test_targets)

        # 创建数据加载器
        train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
        val_loader = DataLoader(val_dataset, batch_size=64, shuffle=False)
        test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)

        # 初始化生成器和判别器
        input_size = len(input_features)  # 输入特征数
        transformer_hidden = 64  # 根据 Generator 的定义
        num_heads = 8
        hidden_size_d = 128  # 判别器隐藏层大小

        generator = Generator(input_size=input_size, transformer_hidden=transformer_hidden, num_heads=num_heads,
                              factor=factor, seq_length=seq_length).to(device)

        discriminator = Discriminator(input_size=input_size, hidden_size=hidden_size_d, seq_length=seq_length,
                                      factor=factor).to(device)

        # 定义损失函数
        generator_loss_fn = nn.MSELoss()
        mae_loss_fn = nn.L1Loss()

        # 定义优化器
        optimizer_G = optim.Adam(generator.parameters(), lr=0.0001, betas=(0.5, 0.999))
        optimizer_D = optim.Adam(discriminator.parameters(), lr=0.0001, betas=(0.5, 0.999))

        # 定义学习率调度器（可选）
        scheduler_G = optim.lr_scheduler.StepLR(optimizer_G, step_size=100, gamma=0.1)
        scheduler_D = optim.lr_scheduler.StepLR(optimizer_D, step_size=100, gamma=0.1)

        # 保存模型的路径
        output_path = os.path.join(script_dir, 'output', predict_type)
        if not os.path.exists(output_path):
            os.makedirs(output_path)

        # 训练模型
        epochs = 500 if predict_type == 'short' else 1000
        best_epoch = 0
        best_val_loss = float('inf')
        train_losses = []
        val_losses = []
        generator_losses = []
        discriminator_losses = []

        # 早停参数
        patience = 50
        trigger_times = 0

        # WGAN-GP 参数
        lambda_gp = 10
        n_critic = 5  # 每训练一个生成器，训练5个判别器

        for epoch in range(epochs):
            generator.train()
            discriminator.train()
            d_losses_epoch = []
            g_losses_epoch = []
            for i, (inputs, targets) in enumerate(train_loader):
                # ---------------------
                #  训练判别器
                # ---------------------
                for _ in range(n_critic):
                    discriminator.zero_grad()
                    
                    # 真实数据
                    real_validity = discriminator(inputs, targets)
                    # 生成数据
                    fake_predictions = generator(inputs).detach()
                    fake_validity = discriminator(inputs, fake_predictions)
                    
                    # 计算 WGAN 损失
                    d_loss = fake_validity.mean() - real_validity.mean()
                    
                    # 计算梯度惩罚
                    gradient_penalty = compute_gradient_penalty(discriminator, inputs, targets, fake_predictions, device, lambda_gp)
                    
                    # 总判别器损失
                    d_total_loss = d_loss + gradient_penalty
                    d_total_loss.backward()
                    optimizer_D.step()
                    
                    d_losses_epoch.append(d_total_loss.item())

                # ---------------------
                #  训练生成器
                # ---------------------
                generator.zero_grad()
                
                # 生成数据
                fake_predictions = generator(inputs)
                # 判别生成数据
                fake_validity = discriminator(inputs, fake_predictions)
                
                # 计算生成器损失
                g_loss = -fake_validity.mean() + generator_loss_fn(fake_predictions, targets) + mae_loss_fn(fake_predictions, targets)
                g_loss.backward()
                optimizer_G.step()
                
                g_losses_epoch.append(g_loss.item())

            # 更新学习率调度器
            scheduler_G.step()
            scheduler_D.step()

            # 在验证集上进行验证
            generator.eval()
            discriminator.eval()
            with torch.no_grad():
                val_MSE_losses = []
                val_MAE_losses = []
                for val_inputs, val_targets in val_loader:
                    val_predictions = generator(val_inputs)
                    val_MSE = generator_loss_fn(val_predictions, val_targets)
                    val_MAE = mae_loss_fn(val_predictions, val_targets)
                    val_MSE_losses.append(val_MSE.item())
                    val_MAE_losses.append(val_MAE.item())

                average_val_MSE = np.mean(val_MSE_losses)
                average_val_MAE = np.mean(val_MAE_losses)
                train_losses.append(np.mean(g_losses_epoch))
                val_losses.append(average_val_MSE)

                if (epoch + 1) % 100 == 0 or epoch == 0:
                    print(f'Epoch [{epoch + 1}/{epochs}], G Loss: {np.mean(g_losses_epoch):.6f}, D Loss: {np.mean(d_losses_epoch):.6f}, Val MSE: {average_val_MSE:.6f}, Val MAE: {average_val_MAE:.6f}')

                # 保存最佳模型参数到文件
                if average_val_MSE < best_val_loss and epoch >= (400 if predict_type == 'short' else 500):
                    torch.save(generator.state_dict(), os.path.join(output_path, f'gapm_{seq_length}h_best.pt'))
                    best_val_loss = average_val_MSE
                    best_epoch = epoch
                    trigger_times = 0
                else:
                    trigger_times += 1
                    if trigger_times >= patience:
                        print("Early stopping triggered!")
                        break

        print(f'Best Epoch: {best_epoch + 1}')

        # 使用最佳模型进行测试
        generator.load_state_dict(torch.load(os.path.join(output_path, f'gapm_{seq_length}h_best.pt')))
        generator.eval()
        with torch.no_grad():
            test_MSE_losses = []
            test_MAE_losses = []
            min_loss = float('inf')
            draw_idx = 0
            draw_prediction = None
            draw_targets = None
            draw_inputs = None
            for idx, (test_inputs, test_targets) in enumerate(test_loader):
                test_inputs, test_targets = test_inputs.to(device), test_targets.to(device)
                test_predictions = generator(test_inputs)
                test_MSE = generator_loss_fn(test_predictions, test_targets)
                test_MAE = mae_loss_fn(test_predictions, test_targets)
                test_MSE_losses.append(test_MSE.item())
                test_MAE_losses.append(test_MAE.item())

                # 保存损失最小的一组结果用于绘图
                if test_MAE.item() < min_loss:
                    min_loss = test_MAE.item()
                    draw_inputs = test_inputs
                    draw_targets = test_targets
                    draw_prediction = test_predictions.cpu().numpy()
                    draw_idx = idx

            # 计算测试集的平均损失和标准差
            average_test_MSE_loss = np.mean(test_MSE_losses)
            std_test_MSE_loss = np.std(test_MSE_losses)
            average_test_MAE_loss = np.mean(test_MAE_losses)
            std_test_MAE_loss = np.std(test_MAE_losses)

            print(f'Test MSE Loss: {average_test_MSE_loss:.6f} ± {std_test_MSE_loss:.6f}')
            print(f'Test MAE Loss: {average_test_MAE_loss:.6f} ± {std_test_MAE_loss:.6f}')

            # 记录本次实验的结果
            mse_list.append(average_test_MSE_loss)
            mae_list.append(average_test_MAE_loss)

        # 绘制损失曲线，仅在最后一次实验绘制
        if exp_num == num_experiments - 1:
            plt.figure(figsize=(10, 5))
            plt.plot(range(1, epoch + 2), train_losses, label='Generator Loss')
            plt.plot(range(1, epoch + 2), val_losses, label='Validation MSE Loss')
            plt.title(f'Training and Validation Loss Over Epochs ({predict_type} term prediction)')
            plt.xlabel('Epochs')
            plt.ylabel('Loss')
            plt.legend()
            # 保存为文件
            output_loss_file = os.path.join(output_path, f'loss_curve_{predict_type}.png')
            plt.savefig(output_loss_file)
            plt.show()

            # 绘制预测结果与真实值的比较曲线

            # 反标准化预测结果
            predicted_cnt = scaler_y.inverse_transform(draw_prediction.reshape(-1, 1))

            # 反标准化目标值
            actual_cnt = scaler_y.inverse_transform(draw_targets.cpu().numpy().reshape(-1, 1))

            # 获取输入序列对应的 'cnt' 值
            input_seq_start_idx = len(train_data) + draw_idx
            input_seq_end_idx = input_seq_start_idx + seq_length
            input_cnt = scaler_y.inverse_transform(y_normalized[input_seq_start_idx:input_seq_end_idx])

            # 创建时间轴
            time_axis_input = np.arange(seq_length)
            time_axis_output = np.arange(seq_length, seq_length + int(seq_length * factor))

            # 确保 actual_cnt 和 predicted_cnt 的长度与 time_axis_output 一致
            actual_cnt = actual_cnt.flatten()
            predicted_cnt = predicted_cnt.flatten()

            # 检查长度是否匹配
            if len(actual_cnt) != len(time_axis_output):
                print(f"调整 actual_cnt 和 predicted_cnt 的长度以匹配 time_axis_output。")
                min_length = min(len(actual_cnt), len(time_axis_output))
                actual_cnt = actual_cnt[:min_length]
                predicted_cnt = predicted_cnt[:min_length]
                time_axis_output = time_axis_output[:min_length]

            # 绘制曲线
            plt.figure(figsize=(12, 6))
            plt.plot(time_axis_input, input_cnt, label='Input cnt')
            plt.plot(time_axis_output, actual_cnt, label='Actual cnt')
            plt.plot(time_axis_output, predicted_cnt, label='Predicted cnt', linestyle='dashed')

            plt.title(f'Bike Rental Count Prediction ({predict_type} term)')
            plt.xlabel('Time Steps')
            plt.ylabel('Rental Count')
            plt.legend()
            # 保存为文件
            output_vs_file = os.path.join(output_path, f'prediction_vs_actual_{predict_type}.png')
            plt.savefig(output_vs_file)
            plt.show()

    # 计算并打印结果的平均值和标准差
    mse_mean = np.mean(mse_list)
    mse_std = np.std(mse_list)
    mae_mean = np.mean(mae_list)
    mae_std = np.std(mae_list)

    print(f"\n{'='*20} Final Results for {predict_type} term prediction {'='*20}")
    print(f"Average Test MSE Loss over {num_experiments} runs: {mse_mean:.6f} ± {mse_std:.6f}")
    print(f"Average Test MAE Loss over {num_experiments} runs: {mae_mean:.6f} ± {mae_std:.6f}")

    # 将结果保存到字典中
    results[predict_type] = {
        'MSE_mean': mse_mean,
        'MSE_std': mse_std,
        'MAE_mean': mae_mean,
        'MAE_std': mae_std
    }

# 打印最终结果
print("\n====================== Summary of All Results ======================")
for predict_type in predict_types:
    print(f"\nResults for {predict_type} term prediction:")
    print(f"Average Test MSE Loss over {num_experiments} runs: {results[predict_type]['MSE_mean']:.6f} ± {results[predict_type]['MSE_std']:.6f}")
    print(f"Average Test MAE Loss over {num_experiments} runs: {results[predict_type]['MAE_mean']:.6f} ± {results[predict_type]['MAE_std']:.6f}")
