import torch
import torch.nn as nn
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from torch.utils.data import DataLoader, TensorDataset
import torch.nn.functional as F
import os

# 设置随机种子函数，在多次实验中随机更改种子进行随机实验
def set_seed(seed):
    torch.manual_seed(seed)
    np.random.seed(seed)

# 读取数据
script_dir = os.path.dirname(os.path.abspath(__file__))

train_path = os.path.join(script_dir, '..', '..', 'dataset', 'train_data.csv')
test_path = os.path.join(script_dir, '..', '..', 'dataset', 'test_data.csv')

train_data = pd.read_csv(train_path)
test_data = pd.read_csv(test_path)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 数据表头，根据题目要求选择需要的特征列
data_head = ['season', 'yr', 'mnth', 'hr', 'holiday', 'weekday', 'workingday',
             'weathersit', 'temp', 'atemp', 'hum', 'windspeed',
             'casual', 'registered', 'cnt']

# 提取需要的特征列
train_features = train_data[data_head]
test_features = test_data[data_head]

# 合并训练和测试数据，便于统一处理
features = pd.concat([train_features, test_features], axis=0).reset_index(drop=True)

# 输入特征列表，'cnt' 为目标变量
input_features = ['season', 'yr', 'mnth', 'hr', 'holiday', 'weekday', 'workingday',
                  'weathersit', 'temp', 'atemp', 'hum', 'windspeed']

# 提取输入特征和目标变量
X = features[input_features].values
y = features['cnt'].values.reshape(-1, 1)

# 数据标准化
scaler_X = MinMaxScaler(feature_range=(-1, 1))
X_normalized = scaler_X.fit_transform(X)

scaler_y = MinMaxScaler(feature_range=(-1, 1))
y_normalized = scaler_y.fit_transform(y)

# 定义预测类型列表：短期预测和长期预测，需要分别训练模型
predict_types = ["short", "long"]

# 记录最终结果的字典
results = {}

# 进行短期预测和长期预测的循环实验
for predict_type in predict_types:
    # 定义因子，根据预测类型调整
    factor = 1 if predict_type == "short" else 2.5  # 长期预测因子为2.5，对应240/96=2.5

    # 准备数据集
    def prepare_data(X, y, seq_length):
        input_seq, target_seq = [], []
        for i in range(len(X) - int(seq_length * (factor + 1))):
            input_seq.append(X[i:i + seq_length])
            target_seq.append(y[i + seq_length:i + int(seq_length * (factor + 1))])
        return np.array(input_seq), np.array(target_seq)

    seq_length = 96  # 输入序列长度

    # 划分训练集和验证集，使用训练数据的20%作为验证集
    train_size = int(len(train_data) * 0.8)
    val_size = len(train_data) - train_size

    X_train_full = X_normalized[:len(train_data)]
    y_train_full = y_normalized[:len(train_data)]

    train_inputs_np, train_targets_np = prepare_data(X_train_full[:train_size + seq_length * int(factor + 1)],
                                                     y_train_full[:train_size + seq_length * int(factor + 1)],
                                                     seq_length)

    val_inputs_np, val_targets_np = prepare_data(X_train_full[train_size:],
                                                 y_train_full[train_size:],
                                                 seq_length)

    # 测试集数据
    X_test = X_normalized[len(train_data):]
    y_test = y_normalized[len(train_data):]

    test_inputs_np, test_targets_np = prepare_data(X_test, y_test, seq_length)

    # 设置进行实验的次数（最少五次）
    num_experiments = 5

    # 记录每次实验的结果
    mse_list = []
    mae_list = []

    for exp_num in range(num_experiments):
        print(f"\n{'='*20} Starting experiment {exp_num + 1} for {predict_type} term prediction {'='*20}\n")
        # 设置随机种子
        set_seed(exp_num)

        # 数据转换为Tensor，并移动到设备上
        train_inputs = torch.tensor(train_inputs_np, dtype=torch.float32).to(device)
        train_targets = torch.tensor(train_targets_np, dtype=torch.float32).to(device)

        val_inputs = torch.tensor(val_inputs_np, dtype=torch.float32).to(device)
        val_targets = torch.tensor(val_targets_np, dtype=torch.float32).to(device)

        test_inputs = torch.tensor(test_inputs_np, dtype=torch.float32).to(device)
        test_targets = torch.tensor(test_targets_np, dtype=torch.float32).to(device)

        # 创建数据集
        train_dataset = TensorDataset(train_inputs, train_targets)
        val_dataset = TensorDataset(val_inputs, val_targets)
        test_dataset = TensorDataset(test_inputs, test_targets)

        # 创建数据加载器
        train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
        val_loader = DataLoader(val_dataset, batch_size=64, shuffle=False)
        test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)

        # 构建Transformer模型
        class TimeSeriesTransformer(nn.Module):
            def __init__(self, input_size, hidden_size, output_size, num_layers, num_heads, dropout=0.1):
                super(TimeSeriesTransformer, self).__init__()
                self.embedding = nn.Linear(input_size, hidden_size)
                self.encoder_layer = nn.TransformerEncoderLayer(hidden_size, nhead=num_heads,
                                                                dim_feedforward=hidden_size * num_heads, batch_first=True)
                self.encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=num_layers)
                self.trans = nn.Linear(hidden_size, input_size)
                self.linear2 = nn.GRU(input_size, hidden_size, batch_first=True)
                self.fc = nn.Linear(hidden_size, int(16 * factor))
                self.linear = nn.Linear(16, output_size)

            def forward(self, x):
                x1 = self.embedding(x)
                x1 = self.encoder(x1)
                x1 = self.trans(x1)
                x1 = F.tanh(x1)
                x, _ = self.linear2(x + x1)
                x = self.fc(x)
                x = self.linear(x.view(x.size(0), int(factor * seq_length), 16))
                return x

        # 设置超参数
        input_size = len(input_features)  # 输入特征数
        hidden_size = 64  # Transformer隐藏层大小
        output_size = 1  # 输出特征数（'cnt'）
        num_layers = 2  # Transformer层数
        num_heads = 8  # Transformer头数
        dropout = 0.1

        # 初始化模型、损失函数和优化器，并将它们移动到设备上
        model = TimeSeriesTransformer(input_size, hidden_size, output_size, num_layers, num_heads, dropout).to(device)
        MSE_loss_fn = nn.MSELoss()
        MAE_loss_fn = nn.L1Loss()
        optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

        # 保存模型的路径
        output_path = os.path.join(script_dir, '.', 'output/', predict_type)
        if not os.path.exists(output_path):
            os.makedirs(output_path)

        # 训练模型
        epochs = 500 if predict_type == 'short' else 1000
        best_epoch = 0
        best_val_loss = float('inf')
        train_MSE_losses = []
        val_avg_MSE_losses = []
        for epoch in range(epochs):
            model.train()
            for inputs, targets in train_loader:
                optimizer.zero_grad()
                outputs = model(inputs)
                MSE_loss = MSE_loss_fn(outputs, targets)
                MAE_loss = MAE_loss_fn(outputs, targets)
                MSE_loss.backward()
                optimizer.step()

            # 在验证集上进行验证
            model.eval()
            with torch.no_grad():
                val_MSE_losses = []
                val_MAE_losses = []
                for val_inputs, val_targets in val_loader:
                    val_outputs = model(val_inputs)
                    val_MSE_loss = MSE_loss_fn(val_outputs, val_targets)
                    val_MAE_loss = MAE_loss_fn(val_outputs, val_targets)
                    val_MSE_losses.append(val_MSE_loss.item())
                    val_MAE_losses.append(val_MAE_loss.item())

                average_val_MSE_loss = np.mean(val_MSE_losses)
                average_val_MAE_loss = np.mean(val_MAE_losses)
                train_MSE_losses.append(MSE_loss.item())
                val_avg_MSE_losses.append(average_val_MSE_loss)
                if (epoch + 1) % 100 == 0 or epoch == 0:
                    print(
                        f'Epoch [{epoch + 1}/{epochs}], MSE Loss: {MSE_loss.item():.6f}, Val MSE Loss: {average_val_MSE_loss:.6f}, MAE Loss: {MAE_loss.item():.6f}, Val MAE Loss: {average_val_MAE_loss:.6f}')
                if average_val_MSE_loss < best_val_loss and epoch >= (400 if predict_type == 'short' else 500):
                    torch.save(model.state_dict(), os.path.join(output_path, f'transformer_{seq_length}h_best.pt'))
                    # 保存模型参数到内存，而不是文件
                    # best_model_state = model.state_dict().copy()
                    best_val_loss = average_val_MSE_loss
                    best_epoch = epoch

        print(f'Best Epoch: {best_epoch + 1}')

        # 使用最佳模型进行测试
        model.load_state_dict(torch.load(os.path.join(output_path, f'transformer_{seq_length}h_best.pt')))
        model.eval()
        with torch.no_grad():
            test_MSE_losses = []
            test_MAE_losses = []
            min_loss = float('inf')
            draw_idx = 0
            for idx, (test_inputs, test_targets) in enumerate(test_loader):
                test_inputs, test_targets = test_inputs.to(device), test_targets.to(device)
                test_outputs = model(test_inputs)
                test_MSE_loss = MSE_loss_fn(test_outputs, test_targets)
                test_MAE_loss = MAE_loss_fn(test_outputs, test_targets)
                test_MSE_losses.append(test_MSE_loss.item())
                test_MAE_losses.append(test_MAE_loss.item())

                # 保存损失最小的一组结果用于绘图
                if test_MAE_loss.item() < min_loss:
                    min_loss = test_MAE_loss.item()
                    draw_inputs = test_inputs
                    draw_targets = test_targets
                    draw_prediction = test_outputs.cpu().numpy()
                    draw_idx = idx

            # 计算测试集的平均损失和标准差
            average_test_MSE_loss = np.mean(test_MSE_losses)
            std_test_MSE_loss = np.std(test_MSE_losses)
            average_test_MAE_loss = np.mean(test_MAE_losses)
            std_test_MAE_loss = np.std(test_MAE_losses)

            print(f'Test MSE Loss: {average_test_MSE_loss:.6f} ± {std_test_MSE_loss:.6f}')
            print(f'Test MAE Loss: {average_test_MAE_loss:.6f} ± {std_test_MAE_loss:.6f}')

            # 记录本次实验的结果
            mse_list.append(average_test_MSE_loss)
            mae_list.append(average_test_MAE_loss)

        # 绘制损失曲线，仅在最后一次实验绘制
        if exp_num == num_experiments - 1:
            plt.figure(figsize=(10, 5))
            plt.plot(range(1, epochs + 1), train_MSE_losses, label='Train Loss')
            plt.plot(range(1, epochs + 1), val_avg_MSE_losses, label='Validation Loss')
            plt.title(f'Training and Validation Loss Over Epochs ({predict_type} term prediction)')
            plt.xlabel('Epochs')
            plt.ylabel('Loss')
            plt.legend()
            # 保存为文件
            output_loss_file = os.path.join(output_path, f'loss_curve_{predict_type}.png')
            plt.savefig(output_loss_file)
            plt.show()

            # 绘制预测结果与真实值的比较曲线

            # 反标准化预测结果
            predicted_cnt = scaler_y.inverse_transform(draw_prediction.reshape(-1, 1))

            # 反标准化目标值
            actual_cnt = scaler_y.inverse_transform(draw_targets.cpu().numpy().reshape(-1, 1))

            # 获取输入序列对应的 'cnt' 值
            input_seq_start_idx = len(train_data) + draw_idx
            input_seq_end_idx = input_seq_start_idx + seq_length
            input_cnt = scaler_y.inverse_transform(y_normalized[input_seq_start_idx:input_seq_end_idx])

            # 创建时间轴
            time_axis_input = np.arange(seq_length)
            time_axis_output = np.arange(seq_length, seq_length + int(seq_length * factor))

            # 绘制曲线
            plt.figure(figsize=(12, 6))
            plt.plot(time_axis_input, input_cnt, label='Input cnt')
            plt.plot(time_axis_output, actual_cnt, label='Actual cnt')
            plt.plot(time_axis_output, predicted_cnt, label='Predicted cnt', linestyle='dashed')

            plt.title(f'Bike Rental Count Prediction ({predict_type} term)')
            plt.xlabel('Time Steps')
            plt.ylabel('Rental Count')
            plt.legend()
            # 保存为文件
            output_vs_file = os.path.join(output_path, f'prediction_vs_actual_{predict_type}.png')
            plt.savefig(output_vs_file)
            plt.show()

    # 计算并打印结果的平均值和标准差
    mse_mean = np.mean(mse_list)
    mse_std = np.std(mse_list)
    mae_mean = np.mean(mae_list)
    mae_std = np.std(mae_list)

    print(f"\n{'='*20} Final Results for {predict_type} term prediction {'='*20}")
    print(f"Average Test MSE Loss over {num_experiments} runs: {mse_mean:.6f} ± {mse_std:.6f}")
    print(f"Average Test MAE Loss over {num_experiments} runs: {mae_mean:.6f} ± {mae_std:.6f}")

    # 将结果保存到字典中
    results[predict_type] = {
        'MSE_mean': mse_mean,
        'MSE_std': mse_std,
        'MAE_mean': mae_mean,
        'MAE_std': mae_std
    }

# 打印最终结果
print("\n====================== Summary of All Results ======================")
for predict_type in predict_types:
    print(f"\nResults for {predict_type} term prediction:")
    print(f"Average Test MSE Loss over {num_experiments} runs: {results[predict_type]['MSE_mean']:.6f} ± {results[predict_type]['MSE_std']:.6f}")
    print(f"Average Test MAE Loss over {num_experiments} runs: {results[predict_type]['MAE_mean']:.6f} ± {results[predict_type]['MAE_std']:.6f}")