import datetime
import os
import torch
import logging

import graphgps  # noqa, register custom modules
from graphgps.optimizer.extra_optimizers import ExtendedSchedulerConfig

from torch_geometric.graphgym.cmd_args import parse_args
from torch_geometric.graphgym.config import (cfg, dump_cfg,
                                             set_cfg, load_cfg,
                                             makedirs_rm_exist)
from torch_geometric.graphgym.loader import create_loader
from torch_geometric.graphgym.logger import set_printing
from torch_geometric.graphgym.optim import create_optimizer, \
    create_scheduler, OptimizerConfig
from torch_geometric.graphgym.model_builder import create_model
from torch_geometric.graphgym.train import train
from torch_geometric.graphgym.utils.agg_runs import agg_runs
from torch_geometric.graphgym.utils.comp_budget import params_count
from torch_geometric.graphgym.utils.device import auto_select_device
from torch_geometric.graphgym.register import train_dict
from torch_geometric import seed_everything

from graphgps.finetuning import load_pretrained_model_cfg, \
    init_model_from_pretrained
from graphgps.logger import create_logger
import warnings

def new_optimizer_config(cfg):
    return OptimizerConfig(optimizer=cfg.optim.optimizer,
                           base_lr=cfg.optim.base_lr,
                           weight_decay=cfg.optim.weight_decay,
                           momentum=cfg.optim.momentum)


def new_scheduler_config(cfg):
    return ExtendedSchedulerConfig(
        scheduler=cfg.optim.scheduler,
        steps=cfg.optim.steps, lr_decay=cfg.optim.lr_decay,
        max_epoch=cfg.optim.max_epoch, reduce_factor=cfg.optim.reduce_factor,
        schedule_patience=cfg.optim.schedule_patience, min_lr=cfg.optim.min_lr,
        num_warmup_epochs=cfg.optim.num_warmup_epochs,
        train_mode=cfg.train.mode, eval_period=cfg.train.eval_period)


def custom_set_out_dir(cfg, cfg_fname, name_tag):
    """Set custom main output directory path to cfg.
    Include the config filename and name_tag in the new :obj:`cfg.out_dir`.

    Args:
        cfg (CfgNode): Configuration node
        cfg_fname (string): Filename for the yaml format configuration file
        name_tag (string): Additional name tag to identify this execution of the
            configuration file, specified in :obj:`cfg.name_tag`
    """
    run_name = os.path.splitext(os.path.basename(cfg_fname))[0]
    run_name += f"-{name_tag}" if name_tag else ""
    cfg.out_dir = os.path.join(cfg.out_dir, run_name)


def custom_set_run_dir(cfg, run_id):
    """Custom output directory naming for each experiment run.

    Args:
        cfg (CfgNode): Configuration node
        run_id (int): Main for-loop iter id (the random seed or dataset split)
    """
    cfg.run_dir = os.path.join(cfg.out_dir, str(run_id))
    # Make output directory
    if cfg.train.auto_resume:
        os.makedirs(cfg.run_dir, exist_ok=True)
    else:
        makedirs_rm_exist(cfg.run_dir)


def run_loop_settings():
    """Create main loop execution settings based on the current cfg.

    Configures the main execution loop to run in one of two modes:
    1. 'multi-seed' - Reproduces default behaviour of GraphGym when
        args.repeats controls how many times the experiment run is repeated.
        Each iteration is executed with a random seed set to an increment from
        the previous one, starting at initial cfg.seed.
    2. 'multi-split' - Executes the experiment run over multiple dataset splits,
        these can be multiple CV splits or multiple standard splits. The random
        seed is reset to the initial cfg.seed value for each run iteration.

    Returns:
        List of run IDs for each loop iteration
        List of rng seeds to loop over
        List of dataset split indices to loop over
    """
    # 检查配置中的 run_multiple_splits 列表的长度。如果列表为空，则进入“多种子（multi-seed）”运行模式。
    if len(cfg.run_multiple_splits) == 0:
        # 'multi-seed' run mode
        # 将 args.repeat 的值赋给 num_iterations，表示实验重复的次数。
        num_iterations = args.repeat
        # 生成一个种子列表，包含从初始种子 cfg.seed 开始的连续值，数量为 num_iterations。
        seeds = [cfg.seed + x for x in range(num_iterations)]
        # 将当前数据集的分割索引（cfg.dataset.split_index）重复 num_iterations 次，生成一个分割索引列表。
        split_indices = [cfg.dataset.split_index] * num_iterations
        # 将 seeds 列表赋值给 run_ids，表示每次运行的 ID。
        run_ids = seeds
    # 如果 run_multiple_splits 列表非空，进入“多分割（multi-split）”运行模式。
    else:
        # 'multi-split' run mode
        # 检查命令行参数中的 repeat 是否不等于 1。
        if args.repeat != 1:
            # 如果 repeat 不为 1，抛出一个未实现的异常，因为当前不支持在多分割模式下重复运行。
            raise NotImplementedError("Running multiple repeats of multiple "
                                      "splits in one run is not supported.")
        # 将 cfg.run_multiple_splits 列表的长度赋值给 num_iterations，表示实验的运行次数
        num_iterations = len(cfg.run_multiple_splits)
        # 创建一个种子列表，所有元素均为 cfg.seed，长度为 num_iterations，确保每次运行使用相同的种子。
        seeds = [cfg.seed] * num_iterations
        # 将 cfg.run_multiple_splits 赋值给 split_indices，表示每次运行的分割索引。
        split_indices = cfg.run_multiple_splits
        # 将 split_indices 赋值给 run_ids，表示每次运行的 ID。
        run_ids = split_indices
    # 返回三个列表：run_ids（运行 ID 列表）、seeds（种子列表）和 split_indices（分割索引列表）。
    return run_ids, seeds, split_indices


if __name__ == '__main__':
    # 忽略警告信息，保持输出整洁。
    warnings.filterwarnings("ignore")


    # Load cmd line args
    # 解析命令行参数，返回参数对象 args。
    args = parse_args()
    # Load config file
    # 设置默认配置。
    set_cfg(cfg)
    # 根据解析的命令行参数加载配置文件，更新 cfg
    load_cfg(cfg, args)
    # 设置输出目录，包含配置文件名和名称标签。
    custom_set_out_dir(cfg, args.cfg_file, cfg.name_tag)
    # 将当前配置输出到文件，便于后续查看
    dump_cfg(cfg)


    # Set Pytorch environment
    # 设置 PyTorch 使用的线程数，优化性能
    torch.set_num_threads(cfg.num_threads)
    # Repeat for multiple experiment runs
    # 循环设置每次实验的 ID、种子和数据集分割索引。
    for run_id, seed, split_index in zip(*run_loop_settings()):
        # Set configurations for each run
        # 为每次运行设置自定义输出目录。
        custom_set_run_dir(cfg, run_id)
        # 配置打印格式。
        set_printing()
        # 设置当前数据集的分割索引。
        cfg.dataset.split_index = split_index
        # 设置随机种子。
        cfg.seed = seed
        # 设置当前运行的 ID。
        cfg.run_id = run_id
        # 设置全局随机种子，以确保可重复性。
        seed_everything(cfg.seed)
        # 自动选择计算设备（如 GPU）。
        auto_select_device('greedy')
        # 检查是否有预训练模型的目录。
        if cfg.pretrained.dir:
            # 如果有，加载预训练模型的配置。
            cfg = load_pretrained_model_cfg(cfg)
        # 记录当前运行 ID、种子和分割索引的日志信息。
        logging.info(f"[*] Run ID {run_id}: seed={cfg.seed}, "
                     f"split_index={cfg.dataset.split_index}")
        logging.info(f"    Starting now: {datetime.datetime.now()}")
        # Set machine learning pipeline
        # 创建数据加载器。
        loaders = create_loader()
        # 创建日志记录器。
        loggers = create_logger()
        # custom_train expects three loggers for 'train', 'valid' and 'test'.
        # GraphGym code creates one logger/loader for each of the 'train_mask' etc.
        # attributes in the dataset. As a work around it, we create one logger for each
        # of the types.
        # loaders are a const, so it is ok to just duplicate the loader.
        # 检查数据集名称。
        if cfg.dataset.name == 'ogbn-arxiv' or cfg.dataset.name == 'ogbn-proteins':
            # 为验证和测试创建额外的日志记录器。
            loggers_2 = create_logger()
            loggers_3 = create_logger()
            loggers_2[0].name = "val"
            loggers_3[0].name = "test"
            # 将额外的日志记录器添加到列表中。
            loggers.extend(loggers_2)
            loggers.extend(loggers_3)
            # 复制加载器，以便为训练、验证和测试使用。
            loaders = loaders*3
        # 创建模型实例。
        model = create_model()
        # 检查是否有预训练模型目录。
        if cfg.pretrained.dir:
            # 如果有，初始化模型为预训练状态。
            model = init_model_from_pretrained(
                model, cfg.pretrained.dir, cfg.pretrained.freeze_main,
                cfg.pretrained.reset_prediction_head
            )
        # 创建优化器。
        optimizer = create_optimizer(model.parameters(),
                                     new_optimizer_config(cfg))
        # 创建学习率调度器。
        scheduler = create_scheduler(optimizer, new_scheduler_config(cfg))
        # Print model info
        # 记录模型的信息。
        logging.info(model)
        # 记录当前配置的信息。
        logging.info(cfg)
        # 统计模型参数数量并存储在配置中。
        cfg.params = params_count(model)
        # 记录参数数量。
        logging.info('Num parameters: %s', cfg.params)
        # Start training
        # 检查训练模式。
        if cfg.train.mode == 'standard':
            if cfg.wandb.use:
                logging.warning("[W] WandB logging is not supported with the "
                                "default train.mode, set it to `custom`")
            # 如果是标准模式，开始训练
            train(loggers, loaders, model, optimizer, scheduler)
        else:
            # 否则，根据自定义模式进行训练。
            train_dict[cfg.train.mode](loggers, loaders, model, optimizer,
                                       scheduler)
    # Aggregate results from different seeds
    # 尝试聚合多个运行的结果。
    try:
        agg_runs(cfg.out_dir, cfg.metric_best)
    except Exception as e:
        # 如果聚合失败，记录错误信息。
        logging.info(f"Failed when trying to aggregate multiple runs: {e}")
    # When being launched in batch mode, mark a yaml as done
    # 检查是否需要标记任务完成。
    if args.mark_done:
        # 将配置文件重命名以标记完成。
        os.rename(args.cfg_file, f'{args.cfg_file}_done')
    # 记录所有操作完成的时间信息。
    logging.info(f"[*] All done: {datetime.datetime.now()}")
