import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
from models.ConvLSTM import ConvLSTM  # 模型在 models/ 目录下
import os
import time
import matplotlib.pyplot as plt


def train_model(train_loader, train_dataset, val_loader, val_dataset, config):
    try:
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    except Exception as e:
        raise RuntimeError(f"设备检测失败: {e}")

    try:
        input_dim = config['train']['input_dim']
        output_dim = config['train']['output_dim']
        hidden_dim = config['train']['hidden_dim']
        kernel_size = config['train'].get('kernel_size', 3)
        num_layers = config['train'].get('num_layers', 1)
        future_steps = config['train']['future_steps']
        learning_rate = config['train']['learning_rate']
        weight_decay = config['train']['weight_decay']
        num_epochs = config['train']['num_epochs']
        save_path = config['train'].get('save_path', './checkpoints')
    except KeyError as e:
        raise KeyError(f"配置文件缺少关键字段: {e}")

    os.makedirs(save_path, exist_ok=True)

    try:
        model = ConvLSTM(input_dim, output_dim,hidden_dim, kernel_size, num_layers, future_steps).to(device)
    except Exception as e:
        raise RuntimeError(f"模型初始化失败: {e}")

    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)

    train_losses = []
    val_losses = []

    for epoch in range(1, num_epochs + 1):
        start_time = time.time()
        model.train()
        train_loss = 0.0

        for batch_idx, batch in enumerate(tqdm(train_loader, desc=f"Epoch {epoch}/{num_epochs}")):
            try:
                x, y = batch
            except Exception as e:
                raise RuntimeError(f"数据加载失败，批次 {batch_idx}: {e}")

            x, y = x.to(device), y.to(device)

            optimizer.zero_grad()
            output = model(x)  # [B, T_out, C, H, W]

            if output.shape != y.shape:
                raise RuntimeError(f"模型输出形状 {output.shape} 与标签形状 {y.shape} 不匹配")

            # 反归一化
            output_denorm = train_dataset.denormalize(output.clone())
            y_denorm = train_dataset.denormalize(y.clone())

            loss = criterion(output_denorm, y_denorm)
            loss.backward()
            optimizer.step()

            train_loss += loss.item() * x.size(0)

        avg_train_loss = train_loss / len(train_loader.dataset)
        elapsed = time.time() - start_time
        print(f"Epoch {epoch} Train Loss: {avg_train_loss:.6f}  用时: {elapsed:.2f}秒")

        print('验证：')
        model.eval()
        val_loss = 0.0
        # 验证
        model.eval()
        val_loss = 0.0
        with torch.no_grad():
            for batch_idx, batch in enumerate(val_loader):
                x, y = batch
                x, y = x.to(device), y.to(device)

                output = model(x)

                output_denorm = val_dataset.denormalize(output.clone())
                y_denorm = val_dataset.denormalize(y.clone())

                loss = criterion(output_denorm, y_denorm)
                val_loss += loss.item() * x.size(0)

            avg_val_loss = val_loss / len(val_loader.dataset)
        print(f"Epoch {epoch} Val Loss: {avg_val_loss:.6f}")

        train_losses.append(avg_train_loss)
        val_losses.append(avg_val_loss)

        # 每10个epoch保存一次模型
        if epoch % 10 == 0 or epoch == num_epochs:
            checkpoint_path = os.path.join(save_path, f"48_6_convlstm_epoch{epoch}.pt")
            try:
                torch.save(model.state_dict(), checkpoint_path)
                print(f"Saved model to {checkpoint_path}")
            except Exception as e:
                print(f"模型保存失败: {e}")

    # === 绘制损失图 ===
    plt.figure(figsize=(10, 6))
    plt.plot(range(1, num_epochs + 1), train_losses, label='Train Loss', marker='o')
    plt.plot(range(1, num_epochs + 1), val_losses, label='Val Loss', marker='s')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Train and Validation Loss over Epochs')
    plt.legend()
    plt.grid(True)
    plot_path = os.path.join(save_path, 'loss_curve.png')
    plt.savefig(plot_path)
    plt.close()
    print(f"Loss 曲线已保存至 {plot_path}")
