import torch
import torch.nn as nn
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
from torch.utils.data import DataLoader, TensorDataset
import os
from torch.optim.lr_scheduler import ReduceLROnPlateau
from sklearn.model_selection import train_test_split

# 设置随机种子
seed = 1024
torch.manual_seed(seed)
np.random.seed(seed)

# 读取数据
script_dir = os.path.dirname(os.path.abspath(__file__))
train_path = os.path.join(script_dir, '..', '..', 'dataset', 'train_data.csv')
test_path = os.path.join(script_dir, '..', '..', 'dataset', 'test_data.csv')

train_data = pd.read_csv(train_path)
test_data = pd.read_csv(test_path)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 特征和目标变量
feature_columns = ['season', 'yr', 'mnth', 'hr', 'holiday', 'weekday', 'workingday',
                   'weathersit', 'temp', 'atemp', 'hum', 'windspeed']
target_column = ['cnt']

# 合并训练集和测试集，统一进行处理
data = pd.concat([train_data, test_data], ignore_index=True)

# 对类别特征进行独热编码
categorical_features = ['season', 'yr', 'mnth', 'hr', 'holiday', 'weekday', 'workingday', 'weathersit']
numerical_features = ['temp', 'atemp', 'hum', 'windspeed']

encoder = OneHotEncoder(sparse=False)
encoded_categorical = encoder.fit_transform(data[categorical_features])

# 对数值特征进行归一化
scaler_numeric = MinMaxScaler()
scaled_numeric = scaler_numeric.fit_transform(data[numerical_features])

# 对目标变量进行归一化
scaler_target = MinMaxScaler()
scaled_target = scaler_target.fit_transform(data[target_column])

# 合并所有特征
features = np.hstack([encoded_categorical, scaled_numeric])
targets = scaled_target  # 'cnt'列

# 划分训练集和测试集
train_size = len(train_data)
test_size = len(test_data)

train_features = features[:train_size]
train_targets = targets[:train_size]

test_features = features[train_size:]
test_targets = targets[train_size:]

# 准备数据序列
def create_sequences(features, targets, input_seq_len, output_seq_len):
    X = []
    y = []
    for i in range(len(features) - input_seq_len - output_seq_len + 1):
        X.append(features[i:(i+input_seq_len)])
        y.append(targets[(i+input_seq_len):(i+input_seq_len+output_seq_len)])
    return np.array(X), np.array(y)

# 设置输入和输出序列长度
seq_length = 96  # 输入序列长度
predict_type = "short"  # "short" 表示短期预测96小时，"long" 表示长期预测240小时

if predict_type == "short":
    output_seq_len = 96
else:
    output_seq_len = 240

# 创建训练和测试序列
train_X, train_y = create_sequences(train_features, train_targets, seq_length, output_seq_len)
test_X, test_y = create_sequences(test_features, test_targets, seq_length, output_seq_len)

# 从训练集中划分验证集
def train_val_split(X, y, val_ratio=0.1):
    """
    不打乱数据，按时间顺序划分训练集和验证集
    """
    total_samples = X.shape[0]
    val_size = int(total_samples * val_ratio)
    train_size = total_samples - val_size
    return X[:train_size], X[train_size:], y[:train_size], y[train_size:]

train_X_seq, val_X_seq, train_y_seq, val_y_seq = train_val_split(train_X, train_y, val_ratio=0.1)

# 转换为 PyTorch 张量
train_X_tensor = torch.tensor(train_X_seq, dtype=torch.float32).to(device)
train_y_tensor = torch.tensor(train_y_seq, dtype=torch.float32).to(device)
val_X_tensor = torch.tensor(val_X_seq, dtype=torch.float32).to(device)
val_y_tensor = torch.tensor(val_y_seq, dtype=torch.float32).to(device)
test_X_tensor = torch.tensor(test_X, dtype=torch.float32).to(device)
test_y_tensor = torch.tensor(test_y, dtype=torch.float32).to(device)

# 创建数据集
train_dataset = TensorDataset(train_X_tensor, train_y_tensor)
val_dataset = TensorDataset(val_X_tensor, val_y_tensor)
test_dataset = TensorDataset(test_X_tensor, test_y_tensor)

# 创建数据加载器
batch_size = 64
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=False)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
# test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

# 定义模型
class LSTMModel(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers, output_seq_len):
        super(LSTMModel, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.output_seq_len = output_seq_len
        self.input_size = input_size
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
        self.decoder_lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
        self.fc = nn.Linear(hidden_size, 1)
        self.hidden_to_input = nn.Linear(hidden_size, input_size)

    def forward(self, x):
        # 编码器
        h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
        c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
        encoder_outputs, (hn, cn) = self.lstm(x, (h0, c0))
        # 取编码器的最后一个时间步输出
        decoder_input = encoder_outputs[:, -1, :]  # (batch_size, hidden_size)
        # 将 decoder_input 映射回 input_size
        decoder_input = self.hidden_to_input(decoder_input).unsqueeze(1)  # (batch_size, 1, input_size)
        predictions = []
        # 解码器
        for _ in range(self.output_seq_len):
            decoder_output, (hn, cn) = self.decoder_lstm(decoder_input, (hn, cn))
            pred = self.fc(decoder_output.squeeze(1))  # (batch_size, 1)
            predictions.append(pred)
            # 准备下一时间步的输入
            decoder_input = self.hidden_to_input(decoder_output.squeeze(1)).unsqueeze(1)  # (batch_size, 1, input_size)
        predictions = torch.stack(predictions, dim=1)  # (batch_size, output_seq_len, 1)
        return predictions

input_size = train_X.shape[2]
hidden_size = 64
num_layers = 2
model = LSTMModel(input_size, hidden_size, num_layers, output_seq_len).to(device)

# 定义损失函数和优化器
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

# 添加早停机制
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10, verbose=True)

# 训练模型
epochs = 100
best_val_loss = float('inf')
best_epoch = -1
train_losses = []
val_losses = []

for epoch in range(epochs):
    model.train()
    epoch_train_losses = []
    for inputs, targets in train_loader:
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs.view(-1), targets.view(-1))
        loss.backward()
        optimizer.step()
        epoch_train_losses.append(loss.item())
    train_loss = np.mean(epoch_train_losses)
    train_losses.append(train_loss)

    # 验证模型
    model.eval()
    epoch_val_losses = []
    with torch.no_grad():
        for inputs, targets in val_loader:
            outputs = model(inputs)
            val_loss = criterion(outputs.view(-1), targets.view(-1))
            epoch_val_losses.append(val_loss.item())
    val_loss = np.mean(epoch_val_losses)
    val_losses.append(val_loss)

    # 计算验证损失后，调整学习率
    scheduler.step(val_loss)

    print(f'Epoch [{epoch+1}/{epochs}], Train Loss: {train_loss:.6f}, Val Loss: {val_loss:.6f}')

    # 保存最佳模型
    if val_loss < best_val_loss:
        best_val_loss = val_loss
        best_epoch = epoch
        torch.save(model.state_dict(), 'lstm_best_model.pth')

# 绘制训练和验证损失曲线
plt.figure(figsize=(10,5))
plt.plot(range(1, epochs+1), train_losses, label='Train Loss')
plt.plot(range(1, epochs+1), val_losses, label='Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training and Validation Losses')
plt.legend()
plt.savefig('loss_curve.png')  # 保存为文件
plt.show()

print(f'Best Validation Loss: {best_val_loss:.6f} at epoch {best_epoch+1}')

# 加载最佳模型
model.load_state_dict(torch.load('lstm_best_model.pth'))

# 在测试集上评估模型
model.eval()
predictions = []
actuals = []
with torch.no_grad():
    for inputs, targets in test_loader:
        outputs = model(inputs)
        predictions.append(outputs.cpu())
        actuals.append(targets.cpu())

predictions = torch.cat(predictions).numpy()
actuals = torch.cat(actuals).numpy()

# 重塑预测值和实际值
predictions = predictions.reshape(-1, 1)
actuals = actuals.reshape(-1, 1)

# 反归一化
predictions = scaler_target.inverse_transform(predictions)
actuals = scaler_target.inverse_transform(actuals)

# 计算 MSE 和 MAE
from sklearn.metrics import mean_squared_error, mean_absolute_error

MSE_value = mean_squared_error(actuals, predictions)
MAE_value = mean_absolute_error(actuals, predictions)
print(f'Mean Squared Error on Test Data: {MSE_value:.4f}')
print(f'Mean Absolute Error on Test Data: {MAE_value:.4f}')

# 绘制实际值与预测值对比图
plt.figure(figsize=(12,6))
plt.plot(actuals[:1000], label='Actual')
plt.plot(predictions[:1000], label='Predicted', linestyle='dashed')
plt.xlabel('Time Steps')
plt.ylabel('Bike Rental Count')
plt.title('LSTM Prediction vs Actual')
plt.legend()
plt.savefig('prediction_vs_actual.png')  # 保存为文件
plt.show()