#step4 train and predict
import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
import os
import time
from math import sqrt
from sklearn.metrics import mean_squared_error, mean_absolute_percentage_error

os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'

def set_seed(seed=42):
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

set_seed(42)

class OptimizedTimeSeriesDataset(Dataset):
    def __init__(self, data, seq_length, pred_length):
        self.seq_length = seq_length
        self.pred_length = pred_length
        self.samples = []
        total_length = len(data)
        load_data = data[:, 0].reshape(-1, 1)
        weather_time_data = data[:, 1:]
        for i in range(total_length - seq_length - pred_length):
            x_load = load_data[i:i + seq_length]
            x_weather_time = weather_time_data[i:i + seq_length]
            x_combined = np.concatenate([x_load, x_weather_time], axis=1)
            y = load_data[i + seq_length:i + seq_length + pred_length].flatten()
            if np.isnan(x_combined).any() or np.isnan(y).any():
                continue
            self.samples.append((x_combined, y))

    def __len__(self):
        return len(self.samples)

    def __getitem__(self, index):
        x, y = self.samples[index]
        return torch.tensor(x, dtype=torch.float32), torch.tensor(y, dtype=torch.float32)

class EnhancedLSTMModel(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers, output_size, dropout=0.2):
        super(EnhancedLSTMModel, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bidirectional=False,
                            dropout=dropout if num_layers > 1 else 0)
        self.fc = nn.Sequential(
            nn.Linear(hidden_size, hidden_size),
            nn.LeakyReLU(0.01),
            nn.Dropout(dropout),
            nn.Linear(hidden_size, output_size)
        )
        self.init_weights()

    def init_weights(self):
        for name, param in self.named_parameters():
            if 'weight' in name:
                nn.init.xavier_uniform_(param.data)
            elif 'bias' in name:
                nn.init.constant_(param.data, 0)

    def forward(self, x):
        h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
        c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
        out, _ = self.lstm(x, (h0, c0))
        out = out[:, -1, :]
        if torch.isnan(out).any():
            print("!0!LSTM 输出含 NaN")
        return self.fc(out)

def normalize_data(df):
    df = df.copy()
    min_vals = df.min()
    max_vals = df.max()
    for col in df.columns:
        if max_vals[col] == min_vals[col]:
            df[col] = 0
        else:
            df[col] = (df[col] - min_vals[col]) / (max_vals[col] - min_vals[col])
    return df, min_vals, max_vals

def denormalize_data(data, min_val, max_val):
    return data * (max_val - min_val) + min_val

def main():
    df = pd.read_csv('data/Area1_Load_Weather_Time.csv')

    print("原始数据描述:\n", df.describe())
    print("\n缺失值统计:\n", df.isnull().sum())

    for col in ['Max_Temperature', 'Min_Temperature', 'Avg_Temperature', 'Avg_Humidity', 'Rainfall']:
        df[col] = df[col].ffill()

    print("是否仍含NaN:", df.isnull().sum().sum())

    df = df[df['Time'] <= '2015-01-11']
    time_column = df['Time']
    df = df.drop('Time', axis=1)

    df_normalized, min_vals, max_vals = normalize_data(df)
    print("\n归一化后数据描述:\n", df_normalized.describe())

    m_all = len(df_normalized)
    m_val = 52 * 96
    m_test = 52 * 96
    m_train = m_all - m_val - m_test

    train_df = df_normalized.iloc[:m_train].values
    val_df = df_normalized.iloc[m_train:m_train + m_val].values
    test_df = df_normalized.iloc[m_train + m_val:].values

    print(f"\n数据集大小: 训练集={m_train}, 验证集={m_val}, 测试集={m_test}")

    seq_length = 96 * 3
    pred_length = 96

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"\n使用设备: {device}")

    train_dataset = OptimizedTimeSeriesDataset(train_df, seq_length, pred_length)
    val_dataset = OptimizedTimeSeriesDataset(val_df, seq_length, pred_length)
    test_dataset = OptimizedTimeSeriesDataset(test_df, seq_length, pred_length)

    x, y = next(iter(DataLoader(train_dataset, batch_size=1)))
    print(f"\n样本检查 - 输入形状: {x.shape}, 输出形状: {y.shape}")
    print(f"输入范围: [{x.min():.4f}, {x.max():.4f}]")
    print(f"输出范围: [{y.min():.4f}, {y.max():.4f}]")

    batch_size = 32
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

    input_size = x.shape[2]
    model = EnhancedLSTMModel(input_size, 128, 2, pred_length, dropout=0.3).to(device)
    print(f"\n模型结构:\n{model}")

    criterion = nn.MSELoss()
    optimizer = optim.AdamW(model.parameters(), lr=1e-4, weight_decay=1e-5)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=5, factor=0.5)

    num_epochs = 50
    best_val_loss = float('inf')
    train_losses, val_losses = [], []

    print("\n开始训练...")
    for epoch in range(num_epochs):
        start = time.time()
        model.train()
        epoch_train_loss = 0.0
        for inputs, targets in train_loader:
            inputs, targets = inputs.to(device), targets.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            if torch.isnan(outputs).any():
                print(f"❌ Epoch {epoch + 1} 发现NaN输出，跳过训练")
                continue
            loss = criterion(outputs, targets)
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
            optimizer.step()
            epoch_train_loss += loss.item()
        epoch_train_loss /= len(train_loader)

        model.eval()
        epoch_val_loss = 0.0
        with torch.no_grad():
            for inputs, targets in val_loader:
                inputs, targets = inputs.to(device), targets.to(device)
                outputs = model(inputs)
                loss = criterion(outputs, targets)
                epoch_val_loss += loss.item()
        epoch_val_loss /= len(val_loader)
        scheduler.step(epoch_val_loss)

        train_losses.append(epoch_train_loss)
        val_losses.append(epoch_val_loss)
        if epoch_val_loss < best_val_loss:
            best_val_loss = epoch_val_loss
            torch.save(model.state_dict(), 'best_model.pt')
            print(f"✅ Epoch {epoch + 1}: 保存最佳模型，验证损失: {best_val_loss:.6f}")

        print(f"Epoch {epoch + 1}/{num_epochs} | Train Loss: {epoch_train_loss:.6f} | Val Loss: {epoch_val_loss:.6f} | Time: {time.time()-start:.2f}s")

    # 可视化
    plt.figure()
    plt.plot(train_losses, label='Train')
    plt.plot(val_losses, label='Validation')
    plt.xlabel("Epoch")
    plt.ylabel("Loss")
    plt.title("Loss Curve")
    plt.legend()
    plt.savefig("loss_curve.png")
    plt.show()

    print("\n测试阶段开始...")
    model.load_state_dict(torch.load('best_model.pt'))
    model.eval()

    test_samples = len(test_dataset)
    total_time_points = test_samples + pred_length - 1
    predictions_accum = np.zeros(total_time_points)
    counts_accum = np.zeros(total_time_points, dtype=int)
    targets_accum = np.zeros(total_time_points)
    counts_target = np.zeros(total_time_points, dtype=int)

    sample_index = 0
    with torch.no_grad():
        for inputs, targets in test_loader:
            inputs = inputs.to(device)
            outputs = model(inputs)
            outputs = outputs.cpu().numpy()
            targets = targets.cpu().numpy()
            outputs_denorm = denormalize_data(outputs, min_vals['Load'], max_vals['Load'])
            targets_denorm = denormalize_data(targets, min_vals['Load'], max_vals['Load'])

            batch_size = outputs.shape[0]
            for j in range(batch_size):
                start_pos = sample_index
                end_pos = sample_index + pred_length
                if end_pos > total_time_points:
                    valid_len = total_time_points - start_pos
                    predictions_accum[start_pos:start_pos + valid_len] += outputs_denorm[j, :valid_len]
                    targets_accum[start_pos:start_pos + valid_len] += targets_denorm[j, :valid_len]
                    counts_accum[start_pos:start_pos + valid_len] += 1
                    counts_target[start_pos:start_pos + valid_len] += 1
                else:
                    predictions_accum[start_pos:end_pos] += outputs_denorm[j]
                    targets_accum[start_pos:end_pos] += targets_denorm[j]
                    counts_accum[start_pos:end_pos] += 1
                    counts_target[start_pos:end_pos] += 1
                sample_index += 1

    counts_accum[counts_accum == 0] = 1
    counts_target[counts_target == 0] = 1
    predictions = predictions_accum / counts_accum
    targets = targets_accum / counts_target

    rmse = sqrt(mean_squared_error(targets, predictions))
    mape = mean_absolute_percentage_error(targets, predictions) * 100
    print(f"\n测试集结果: RMSE = {rmse:.2f}, MAPE = {mape:.2f}%")

    plt.figure(figsize=(14, 8))
    plt.subplot(2, 1, 1)
    plt.plot(targets, label='Actual', alpha=0.7)
    plt.plot(predictions, label='Predicted', alpha=0.7)
    plt.title('Overall Prediction Results')
    plt.xlabel('Time Steps')
    plt.ylabel('Load')
    plt.legend()

    plt.subplot(2, 1, 2)
    total_points = len(targets)
    if total_points >= 5500:
        start_idx = 5000
    else:
        # 靠后取 500 个点，如果数据不足 500 则全取
        start_idx = max(0, total_points - 500)

    end_idx = min(start_idx + 500, total_points)
    plt.plot(targets[start_idx:end_idx], label='Actual')
    plt.plot(predictions[start_idx:end_idx], label='Predicted')

    plt.title('Detailed View (500 Time Steps)')
    plt.xlabel('Time Steps')
    plt.ylabel('Load')
    plt.legend()

    plt.tight_layout()
    plt.savefig('prediction_results.png')
    plt.show()

    start_index = m_train + m_val + seq_length
    end_index = start_index + len(targets)
    time_slice = time_column.iloc[start_index:end_index].reset_index(drop=True)
    results_df = pd.DataFrame({
        'Time': time_slice,
        'Actual': targets,
        'Predicted': predictions
    })
    results_df.to_csv('data/prediction_results.csv', index=False)
    print("✅ 预测结果已保存到 prediction_results.csv 和 prediction_results.png")

if __name__ == "__main__":
    main()
