# main.py - 主程序入口
import argparse
import os
import sys
import time

import dill
import pandas as pd
import torch
from torch import nn
from torch.utils.data import DataLoader

from data import DataDetime, split_data_cnn
from itransformer_lstm import ItransformerLstm
from train import train, evaluate, create_optimizer_and_scheduler
# 导入自定义模块
from utils import same_seeds, EarlyStopping, plot_training_loss, plot_time_series_prediction, \
    plot_error_distribution, plot_cumulative_error, plot_daily_power_curve, save_metrics_to_csv


def main():
    """主函数"""
    # 设置随机种子以确保实验可重复性
    seeds = 42
    same_seeds(seeds)

    # 默认参数设置
    site = '7-First-Solar'           # 站点名称
    dataset = 'Autumn'               # 数据集名称（秋季数据）
    batch_size = 128                 # 批次大小
    learning_rate = 0.0001           # 学习率
    epochs = 150                     # 训练轮数

    # 解析命令行参数（如果程序不是在Jupyter或Colab环境中运行）
    # 这允许通过命令行覆盖默认参数
    if not any(x in sys.argv[0] for x in ['ipykernel_launcher.py', 'colab_kernel_launcher.py']):
        parser = argparse.ArgumentParser(description="Hyperparameters")
        parser.add_argument("--batch_size", type=int, default=128)
        parser.add_argument("--learning_rate", type=float, default=0.0001)
        parser.add_argument("--epochs", type=int, default=150)
        args = parser.parse_args()
        batch_size = args.batch_size
        learning_rate = args.learning_rate
        epochs = args.epochs

    # 设置训练设备：优先使用GPU（CUDA），如果不可用则使用CPU
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"Using device: {device}")

    # 文件路径和时间序列参数设置
    file_path = f'new_station07.csv'      # 数据文件路径
    time_length = 24 * 1                  # 输入序列长度（24小时）
    predict_length = 1                    # 预测长度（1小时）
    multi_steps = False                   # 是否进行多步预测

    # 创建模型和结果保存目录
    os.makedirs(f'model_save/{dataset}', exist_ok=True)      # 模型保存路径
    os.makedirs(f'data_record/{dataset}', exist_ok=True)     # 数据记录路径

    # 加载数据集
    try:
        df_all = pd.read_csv(file_path, header=0)  # 读取CSV文件
        print(f"Successfully loaded data from {file_path}")
        print(f"Data shape: {df_all.shape}")       # 打印数据形状
    except Exception as e:
        print(f"Error loading CSV file: {e}")
        exit(1)  # 加载失败则退出程序

    # 数据预处理：将数据划分为训练集、验证集和测试集
    # 返回处理后的数据、时间戳和标准化器
    data_train, data_valid, data_test, timestamp_train, timestamp_valid, timestamp_test, scalar = split_data_cnn(
        df_all, 0.8, 0.1, time_length)

    # 创建数据集对象
    # DataDetime是自定义的数据集类，用于处理时间序列数据
    dataset_train = DataDetime(data=data_train, lookback_length=time_length, multi_steps=multi_steps,
                               lookforward_length=predict_length)
    dataset_valid = DataDetime(data=data_valid, lookback_length=time_length, multi_steps=multi_steps,
                               lookforward_length=predict_length)
    dataset_test = DataDetime(data=data_test, lookback_length=time_length, multi_steps=multi_steps,
                              lookforward_length=predict_length)

    # 创建数据加载器，用于批量加载数据
    train_loader = DataLoader(dataset_train, batch_size=batch_size, shuffle=True)    # 训练集打乱顺序
    valid_loader = DataLoader(dataset_valid, batch_size=batch_size, shuffle=False)   # 验证集不打乱
    test_loader = DataLoader(dataset_test, batch_size=batch_size, shuffle=False)     # 测试集不打乱

    print(f"Train samples: {len(dataset_train)}")
    print(f"Valid samples: {len(dataset_valid)}")
    print(f"Test samples: {len(dataset_test)}")

    # 模型超参数设置
    params_dict = {'hidden_dim': 32, 'layer_L': 3, 'layer_I': 4, 'heads': 12, 'dim_lstm': 32}
    
    # 创建ItransformerLstm模型实例
    model = ItransformerLstm(
        input_size=df_all.shape[1] - 1,         # 输入特征维度（减去时间列）
        length_input=time_length,               # 输入序列长度
        dim_embed=params_dict['hidden_dim'],    # 嵌入维度
        dim_lstm=params_dict['dim_lstm'],       # LSTM隐藏层维度
        depth=params_dict['layer_I'],           # Transformer层数
        heads=params_dict['heads'],             # 注意力头数
        depth_lstm=params_dict['layer_L']       # LSTM层数
    ).to(device)  # 将模型移动到指定设备

    print(f"Model created with {sum(p.numel() for p in model.parameters())} parameters")  # 打印模型参数数量

    # 定义损失函数
    criterion_MAE = nn.L1Loss(reduction='sum').to(device)   # MAE损失（平均绝对误差）
    criterion_MSE = nn.MSELoss(reduction='sum').to(device)  # MSE损失（均方误差）
    # 创建优化器和学习率调度器
    optm, optm_schedule = create_optimizer_and_scheduler(model, learning_rate)

    # 模型保存路径设置
    model_name = f"iLK_{dataset}"                           # 模型名称
    model_save = f"model_save/{dataset}/{model_name}.pt"    # 模型保存完整路径

    # 初始化训练记录和早停机制
    train_losses, valid_losses = [], []                     # 记录训练和验证损失
    earlystopping = EarlyStopping(model_save, patience=10, delta=0.0001)  # 早停机制，耐心值为10轮

    # 训练循环控制
    need_train = True                                       # 是否需要训练模型
    if need_train:
        print("Starting training...")
        try:
            for epoch in range(epochs):                     # 遍历训练轮数
                time_start = time.time()                    # 记录开始时间
                
                # 训练阶段
                train_loss = train(data=train_loader, model=model, criterion=criterion_MAE, optm=optm, device=device)
                
                # 验证阶段
                valid_loss, ms, _, _ = evaluate(data=valid_loader, model=model, criterion=criterion_MAE,
                                                device=device)

                # 记录损失值用于后续可视化
                train_losses.append(train_loss)
                valid_losses.append(valid_loss)
                
                # 更新学习率（基于验证损失）
                optm_schedule.step(valid_loss)
                
                # 早停检查：如果验证损失连续patience轮不改善，则停止训练
                earlystopping(valid_loss, model)

                # 打印训练信息
                print('')
                print(
                    f'{model_name}|time:{(time.time() - time_start):.2f}|Loss_train:{train_loss:.4f}|Learning_rate:{optm.state_dict()["param_groups"][0]["lr"]:.4f}\n'
                    f'Loss_valid:{valid_loss:.4f}|MAE:{ms[0]:.4f}|RMSE:{ms[1]:.4f}|R2:{ms[2]:.4f}|MBE:{ms[3]:.4f}',
                    flush=True)

                # 如果触发早停条件，则结束训练
                if earlystopping.early_stop:
                    print("Early stopping")
                    break
                    
        except KeyboardInterrupt:
            print("Training interrupted by user")           # 处理用户中断（Ctrl+C）

        # 绘制训练损失曲线
        plot_training_loss(train_losses, valid_losses, model_name, dataset)

    # 加载最佳模型进行测试（早停机制保存的最佳模型）
    print("Loading best model for testing...")
    with open(model_save, "rb") as f:
        model = torch.load(f, map_location=device, pickle_module=dill)

    # 测试评估
    test_loss, ms_test, test_labels, test_preds = evaluate(data=test_loader, model=model, criterion=criterion_MAE,
                                                           device=device, scalar=scalar)

    # 打印测试结果
    print(
        f'Test Results - Loss:{test_loss:.4f}|MAE:{ms_test[0]:.4f}|RMSE:{ms_test[1]:.4f}|R2:{ms_test[2]:.4f}|MBE:{ms_test[3]:.4f}')

    # 保存评估指标到CSV文件
    save_metrics_to_csv(site, model_name, ms_test, dataset)

    # 绘制各种可视化图表
    print("Generating visualizations...")
    plot_time_series_prediction(test_labels, test_preds, timestamp_test, time_length, model_name, dataset)  # 时间序列预测图
    plot_error_distribution(test_labels, test_preds, model_name, dataset)                                   # 误差分布图
    plot_cumulative_error(test_labels, test_preds, model_name, dataset)                                     # 累积误差图
    plot_daily_power_curve(test_labels, test_preds, timestamp_test, time_length, model_name, dataset)       # 日功率曲线图

    print("Training and evaluation completed!")  # 程序执行完成


if __name__ == "__main__":
    main()