import torch
import os
import time
import numpy as np
import argparse
import json
import wandb.wandb_run
import yaml
import math
import wandb
import sys

# 添加当前目录和父目录到Python路径
current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
sys.path.insert(0, current_dir)

# 设置环境变量以优化内存使用
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"

# 导入自定义模块
from cs336_basics.adamw import AdamW
from cs336_basics.checkpointing import save_checkpoint, load_checkpoint
from cs336_basics.lr_schedule import lr_linear_schedule
from cs336_basics.model import Transformer
from cs336_basics.data_loader import get_batch, create_data_loader
from cs336_basics.loss import cross_entropy_loss
from cs336_basics.gradient_clip import gradient_clip
from cs336_basics.lr_schedule import lr_cosine_schedule, lr_double_schedule


class Logger:
    """Logger that handles console, file, and wandb logging
    处理控制台、文件和wandb日志记录的Logger类"""

    def __init__(
        self,
        log_file: str | None = None,              # 日志文件路径
        wandb_run: wandb.wandb_run.Run | None = None,  # wandb运行实例
        resume: bool = False,                     # 是否从检查点恢复
    ):
        # 保存日志文件路径和wandb运行实例
        self.log_file = log_file
        self.wandb_run = wandb_run

        # 初始化日志文件
        if self.log_file:
            # 创建日志文件目录（如果不存在）
            os.makedirs(os.path.dirname(self.log_file), exist_ok=True)
            # 如果不是从检查点恢复，则清空日志文件
            if not resume:
                with open(self.log_file, "w") as _:  # 清空文件
                    pass

    def log_info(self, message: str | dict, console=True):
        """Log a message to console and/or file
        将消息记录到控制台和/或文件"""
        # 如果消息是字典，则格式化为字符串
        if isinstance(message, dict):
            message = self.format_metrics(message)

        # 如果需要，打印到控制台
        if console:
            print(message)

        # 如果指定了日志文件，则写入文件
        if self.log_file:
            with open(self.log_file, "a") as f:
                f.write(message + "\n")

    def log_metrics(self, metrics: dict):
        """Log metrics to wandb
        将指标记录到wandb"""
        # 如果有wandb运行实例，则记录指标
        if self.wandb_run:
            self.wandb_run.log(metrics)

    def format_metrics(self, metrics_dict: dict) -> str:
        """Format metrics dictionary into a readable string
        将指标字典格式化为可读字符串"""
        return " | ".join(f"{key}: {value}" for key, value in metrics_dict.items())


class Config(dict):
    """Config object that allows attribute-style access to dictionary keys
    允许属性样式访问字典键的配置对象"""

    def __init__(self, *args, **kwargs):
        # 调用父类构造函数
        super().__init__(*args, **kwargs)
        # 使字典键可以通过属性访问
        self.__dict__ = self

        # 将嵌套字典转换为Config对象
        for key, value in self.items():
            if isinstance(value, dict):
                self[key] = Config(value)


def load_config_from_file(config_path: str) -> dict:
    """Load configuration from a file and return as a dictionary
    从文件加载配置并返回字典"""
    # 获取文件扩展名
    ext = os.path.splitext(config_path)[1].lower()
    # 打开文件并根据扩展名加载配置
    with open(config_path) as f:
        if ext == ".json":
            return json.load(f)
        elif ext in [".yaml", ".yml"]:
            return yaml.safe_load(f)
        else:
            raise ValueError(f"Unsupported config file format: {ext}")


def load_config(
    config_path: str | None = None, base_config: dict | None = None
) -> Config:
    """Load configuration from a file and detect runtime device
    从文件加载配置并检测运行时设备"""
    # 如果提供了基础配置（例如在恢复时），则使用它
    if base_config is not None:
        config = base_config
    # 如果提供了配置文件路径，则直接加载
    elif config_path is not None:
        config = load_config_from_file(config_path)
    else:
        # 只有在没有提供任何配置时才尝试加载默认配置
        default_config_path = os.path.join(
            os.path.dirname(__file__), "config", "train_1.yml"
        )
        # 如果默认配置文件存在，则加载它
        if os.path.exists(default_config_path):
            config = load_config_from_file(default_config_path)
        else:
            # 如果没有默认配置文件，使用一个基本配置
            config = {
                "run": {
                    "run_id": "run_<timestamp>",
                    "out_dir": "./out",
                    "wandb_project": None,
                    "wandb_tags": [],
                },
                "data": {
                    "train_data_path": "./train_val_data/train.bin",
                    "valid_data_path": "./train_val_data/valid.bin",
                },
                "model": {
                    "d_model": 768,
                    "d_ff": 2048,
                    "num_heads": 8,
                    "num_layers": 6,
                    "context_length": 256,
                    "vocab_size": 32000,
                },
                "training": {
                    "batch_size": 32,
                    "eval_batch_size": 32,
                    "max_steps": 1000,
                    "grad_accum_steps": 1,
                    "eval_interval": 100,
                    "eval_steps": 10,
                    "checkpoint_interval": 500,
                    "eval_before_training": True,
                    "max_l2_norm": 1.0,
                    "lr_max": 1e-3,
                    "lr_min": 1e-4,
                    "warmup_ratio": 0.1,
                    "lr_schedule": "cosine",
                },
                "optimizer": {
                    "lr": 1e-3,
                    "betas": [0.9, 0.999],
                    "eps": 1e-8,
                    "weight_decay": 0.01,
                },
            }

    # 确保必要的字段存在
    if "run" not in config:
        config["run"] = {}
    if "run_id" not in config["run"]:
        config["run"]["run_id"] = "run_<timestamp>"
    if "out_dir" not in config["run"]:
        config["run"]["out_dir"] = "./out"
    if "wandb_project" not in config["run"]:
        config["run"]["wandb_project"] = None
    if "wandb_tags" not in config["run"]:
        config["run"]["wandb_tags"] = []
    if "data" not in config:
        config["data"] = {}
    if "train_data_path" not in config["data"]:
        config["data"]["train_data_path"] = "./train_val_data/train.bin"
    if "valid_data_path" not in config["data"]:
        config["data"]["valid_data_path"] = "./train_val_data/valid.bin"
    if "model" not in config:
        config["model"] = {}
    if "training" not in config:
        config["training"] = {}
    if "optimizer" not in config:
        config["optimizer"] = {}

    # 确保training部分包含所有必需的字段
    training_defaults = {
        "batch_size": 32,
        "eval_batch_size": 32,
        "max_steps": 1000,
        "grad_accum_steps": 1,
        "eval_interval": 100,
        "eval_steps": 10,
        "checkpoint_interval": 500,
        "eval_before_training": True,
        "max_l2_norm": 1.0,
        "lr_max": 1e-3,
        "lr_min": 1e-4,
        "warmup_ratio": 0.1,
        "lr_schedule": "cosine",
        "phase_one_iters": None,
        "phase_two_iters": None,
        "phase_two_type": None,
        "cosine_cycle_iters": None,
        "linear_cycle_iters": None,
        "warmup_iters": None,
        "lr_inter": None,
        "resume": False,
        "resume_checkpoint": None,
        "device": None,
    }

    # 为training部分设置默认值
    for key, default_value in training_defaults.items():
        if key not in config["training"]:
            config["training"][key] = default_value

    # 确保optimizer部分包含所有必需的字段
    optimizer_defaults = {
        "lr": 1e-3,
        "betas": [0.9, 0.999],
        "eps": 1e-8,
        "weight_decay": 0.01,
    }

    # 为optimizer部分设置默认值
    for key, default_value in optimizer_defaults.items():
        if key not in config["optimizer"]:
            config["optimizer"][key] = default_value

    # 替换运行ID中的时间戳占位符
    config["run"]["run_id"] = config["run"]["run_id"].replace(
        "<timestamp>", f"{int(time.time())}"
    )

    # 在运行时检测设备和数据类型
    device = "cpu"
    # 如果CUDA可用，则使用CUDA设备
    if torch.cuda.is_available():
        # 如果配置中指定了设备，则使用指定设备，否则使用默认CUDA设备
        if config["training"].get("device", None) is not None:
            device = config["training"]["device"]
        else:
            device = "cuda"
    # 如果MPS可用，则使用MPS设备
    elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
        device = "mps"

    print(f"Using device: {device}")

    # 根据设备选择数据类型
    dtype = torch.bfloat16 if torch.cuda.is_available() else torch.float32

    # 将设备和数据类型保存到配置中
    config["device"] = device
    config["dtype"] = str(dtype)  # 将数据类型转换为字符串以便JSON序列化

    # 将嵌套字典转换为Config对象
    return Config(config)


def train(config: Config | None = None):
    """Train a transformer model with the given configuration
    使用给定配置训练transformer模型"""
    # 如果没有提供配置，则加载默认配置
    if config is None:
        config = load_config()

    # 检查是否从检查点恢复训练
    resuming = config.training.get("resume", False)
    start_step = 1

    # 设置运行目录和相关文件路径
    run_dir = os.path.join(config.run.out_dir, config.run.run_id)
    config_outfile = os.path.join(run_dir, "config.json")
    log_file = os.path.join(run_dir, "log.txt")
    checkpoint_dir = os.path.join(run_dir, "checkpoints")

    # 创建必要的目录
    os.makedirs(run_dir, exist_ok=True)
    os.makedirs(checkpoint_dir, exist_ok=True)

    # 创建指向run_dir的符号链接latest（如果存在则先删除）
    # 在Windows上创建符号链接需要管理员权限，所以我们添加一个检查
    try:
        latest_symlink = os.path.join(config.run.out_dir, "latest")
        if os.path.islink(latest_symlink) or os.path.exists(latest_symlink):
            os.remove(latest_symlink)
        os.symlink(os.path.abspath(run_dir), latest_symlink, target_is_directory=True)
    except (OSError, NotImplementedError) as e:
        # 在Windows上可能没有权限创建符号链接，这时我们简单地跳过
        print(f"Warning: Could not create symlink: {e}")
        print("This is common on Windows and can be ignored.")

    # 初始化wandb和logger
    wandb_run = (
        None
        if not config.run.wandb_project
        else wandb.init(
            project=config.run.wandb_project,
            id=config.run.run_id,  # 使用run_id作为wandb id
            resume="must" if resuming else None,  # 根据是否恢复设置resume参数
            name=config.run.run_id,
            config=config,
            dir=run_dir,
            tags=config.run.wandb_tags,
        )
    )

    # 创建Logger实例
    logger = Logger(log_file=log_file, wandb_run=wandb_run, resume=resuming)

    # 保存配置（如果不是恢复训练）
    if resuming:
        logger.log_info(f"Resuming training from existing config: {config_outfile}")
    else:
        # 将配置保存到文件
        with open(config_outfile, "w") as f:
            json.dump(
                config,
                f,
                indent=2,
                default=lambda o: list(o) if isinstance(o, tuple) else o.__dict__,
            )
        logger.log_info(f"Saved config to: {config_outfile}")

    # 获取设备和数据类型
    device = config.device
    dtype = getattr(
        torch, config.dtype.split(".")[-1]
    )  # 将字符串转换回torch数据类型

    # 加载训练和验证数据
    train_data = np.memmap(config.data.train_data_path, dtype=np.uint16, mode="r")
    valid_data = np.memmap(config.data.valid_data_path, dtype=np.uint16, mode="r")

    # 初始化模型
    model = Transformer(**config.model, device=device, dtype=dtype)
    model.to(device)

    # 打印可训练参数数量
    print(
        f"Trainable params: {sum(p.numel() for p in model.parameters() if p.requires_grad)}"
    )

    # 只对2D参数进行衰减（即不包括layernorms）
    param_dict = {pn: p for pn, p in model.named_parameters() if p.requires_grad}
    decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]
    nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]
    optim_groups = [
        {"params": decay_params, **config.optimizer},
        {"params": nodecay_params, **config.optimizer, "weight_decay": 0.0},
    ]
    num_decay_params = sum(p.numel() for p in decay_params)
    num_nodecay_params = sum(p.numel() for p in nodecay_params)
    print(
        f"Decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters"
    )
    print(
        f"Non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters"
    )

    # 创建优化器
    optimizer = AdamW(optim_groups, **config.optimizer)
    # optimizer = AdamW(model.parameters(), **config.optimizer)

    # 如果是恢复训练，则加载检查点
    if resuming:
        checkpoint_path = config.training.resume_checkpoint
        logger.log_info(f"Loading checkpoint from: {checkpoint_path}")
        start_step = load_checkpoint(checkpoint_path, model, optimizer) + 1
        logger.log_info(f"Resuming training from step {start_step}")

    # 在GPU上编译+AMP，在MPS上使用AOT
    use_compile = config.training.get("use_compile", False)  # 默认不使用编译
    if use_compile and device != "mps":
        try:
            model = torch.compile(model)
            torch.set_float32_matmul_precision("high")
            print("Model compiled successfully")
        except Exception as e:
            print(f"Warning: Could not compile model: {e}")
            print("Continuing without compilation")
    elif use_compile and device == "mps":
        try:
            model = torch.compile(model, backend="aot_eager")
            print("Model compiled with aot_eager backend")
        except Exception as e:
            print(f"Warning: Could not compile model: {e}")
            print("Continuing without compilation")

    # 获取训练参数
    max_steps = config.training.max_steps
    batch_size = config.training.batch_size
    max_l2_norm = config.training.max_l2_norm
    eval_interval = config.training.eval_interval
    checkpoint_interval = config.training.checkpoint_interval
    grad_accum_steps = config.training.grad_accum_steps

    lr_max = config.training.lr_max
    lr_inter = config.training.lr_inter
    lr_min = config.training.lr_min
    warmup_ratio = config.training.warmup_ratio
    warmup_iters = config.training.warmup_iters
    phase_one_iters = config.training.phase_one_iters
    phase_two_iters = config.training.phase_two_iters
    phase_two_type = config.training.phase_two_type
    cosine_cycle_iters = config.training.cosine_cycle_iters
    linear_cycle_iters = config.training.linear_cycle_iters

    # 设置学习率调度参数的默认值
    if warmup_iters is False or warmup_iters is None:
        warmup_iters = int(warmup_ratio * max_steps)

    if cosine_cycle_iters is False or cosine_cycle_iters is None:
        cosine_cycle_iters = max_steps

    if linear_cycle_iters is False or linear_cycle_iters is None:
        linear_cycle_iters = max_steps

    if phase_two_iters is False or phase_two_iters is None:
        phase_two_iters = max_steps

    def evaluate(step: int, is_last_step: bool):
        """评估模型在验证集上的性能"""
        # 设置评估步数
        n_eval_steps = config.training.eval_steps

        # 如果是最后一步，增加评估步数
        if is_last_step:
            n_eval_steps = n_eval_steps * 3

        # 设置模型为评估模式
        model.eval()

        # 禁用梯度计算
        with torch.no_grad():
            val_loss = 0.0
            # 在验证集上进行多次评估
            for _ in range(n_eval_steps):
                # 获取验证数据批次
                x, y = get_batch(
                    valid_data,
                    config.training.eval_batch_size,
                    config.model.context_length,
                    device,
                )
                # 使用自动混合精度进行前向传播
                with torch.autocast(device_type=device, dtype=dtype):
                    logits = model(x)
                # 计算损失
                loss = cross_entropy_loss(logits, y)
                val_loss += loss.item()

                # 清理内存
                del x, y, logits, loss

            # 计算平均验证损失
            val_loss /= n_eval_steps

            # 清理GPU缓存
            if device == "cuda":
                torch.cuda.empty_cache()

        # 获取进度字符串
        progress_str = get_progress_str(step, max_steps)

        # WandB指标
        metrics = {
            "eval/loss": val_loss,
            "eval/perplexity": get_perplexity(val_loss),
            "eval/peak_memory": get_peak_memory(device),
            "step": step,
        }

        # 控制台和本地文件指标
        display_metrics = {
            "step": progress_str,
            "v_loss": f"{val_loss:.4f}",
            "v_ppl": f"{get_perplexity(val_loss):.2f}",
            "mem": f"{get_peak_memory(device):.1f}MB",
        }

        # 记录指标
        logger.log_info(display_metrics)
        logger.log_metrics(metrics)

        # 恢复到训练模式
        model.train()

    # 如果不是恢复训练且配置要求在训练前评估，则进行评估
    if config.training.eval_before_training and not resuming:
        evaluate(0, False)

    # 创建数据加载器
    # 计算每轮合理的样本数
    max_possible_samples = len(train_data) - config.model.context_length - 1
    samples_per_epoch = min(
        50000, max_possible_samples
    )  # 限制在50k以避免内存问题

    # 在Windows上使用0个工作进程以避免内存问题
    num_workers = 0 if sys.platform == "win32" else 4

    # 创建训练数据加载器
    train_loader = create_data_loader(
        train_data,
        batch_size,
        config.model.context_length,
        num_workers=num_workers,
        pin_memory=device.startswith("cuda"),
        samples_per_epoch=samples_per_epoch,
    )
    # 创建数据加载器迭代器
    train_loader_iter = iter(train_loader)

    # 获取第一个批次
    x, y = next(train_loader_iter)
    # 根据需要移动到设备
    if device.startswith("cuda"):
        x = x.pin_memory().to(device, non_blocking=True)
        y = y.pin_memory().to(device, non_blocking=True)
    else:
        x = x.to(device)
        y = y.to(device)

    # 设置模型为训练模式
    model.train()

    # 开始训练循环
    for step in range(start_step, max_steps + 1):
        # 记录开始时间
        t0 = time.time()
        # 检查是否是最后一步
        is_last_step = step == max_steps
        # 初始化累积损失
        loss_accum = 0.0

        # 梯度累积循环
        for _ in range(grad_accum_steps):
            # 使用自动混合精度进行前向传播
            with torch.autocast(device_type=device, dtype=dtype):
                logits = model(x)

            # 计算损失并进行梯度累积
            loss = cross_entropy_loss(logits, y) / grad_accum_steps

            # 获取数据加载器中的下一个批次
            try:
                x, y = next(train_loader_iter)
            except StopIteration:
                # 当数据加载器耗尽时创建新的迭代器
                # 这确保我们在每"轮"中获得新样本
                train_loader_iter = iter(train_loader)
                x, y = next(train_loader_iter)

            # 根据需要移动到设备
            if device.startswith("cuda"):
                x = x.pin_memory().to(device, non_blocking=True)
                y = y.pin_memory().to(device, non_blocking=True)
            else:
                x = x.to(device)
                y = y.to(device)

            # 累积损失并进行反向传播
            loss_accum += loss.detach()
            loss.backward()

        # 梯度裁剪
        norm = gradient_clip(model.parameters(), max_l2_norm)

        # 根据配置选择学习率调度器
        if config.training.lr_schedule == "linear":
            lr = lr_linear_schedule(
                step, lr_max, lr_min, warmup_iters, linear_cycle_iters
            )
        elif config.training.lr_schedule == "cosine":
            lr = lr_cosine_schedule(
                step, lr_max, lr_min, warmup_iters, cosine_cycle_iters
            )
        elif config.training.lr_schedule == "double":
            lr = lr_double_schedule(
                step,
                lr_max,
                lr_inter,
                lr_min,
                warmup_iters,
                phase_one_iters,
                phase_two_iters,
                phase_two_type,
            )

        # 更新优化器的学习率
        for param_group in optimizer.param_groups:
            param_group["lr"] = lr

        # 执行优化步骤
        optimizer.step()
        # 清零梯度
        optimizer.zero_grad(set_to_none=True)

        # 根据设备类型同步
        if device == "cuda":
            torch.cuda.synchronize()
        elif device == "mps":
            torch.mps.synchronize()

        # 计算训练指标
        t1 = time.time()
        dt = t1 - t0
        tokens_per_sec = (
            config.model.context_length * batch_size * grad_accum_steps / dt
        )
        train_loss = loss_accum.item()
        progress_str = get_progress_str(step, max_steps)

        # WandB指标
        metrics = {
            "train/loss": train_loss,
            "train/perplexity": get_perplexity(train_loss),
            "train/lr": lr,
            "train/grad_norm": norm,
            "train/tokens_per_sec": tokens_per_sec,
            "train/peak_memory": get_peak_memory(device),
            "step": step,
        }

        # 控制台和本地文件指标
        display_metrics = {
            "step": progress_str,
            "t_loss": f"{train_loss:.4f}",
            "t_ppl": f"{get_perplexity(train_loss):.2f}",
            "lr": f"{lr:.4e}",
            "grad_norm": f"{norm:.2f}",
            "mem": f"{get_peak_memory(device):.1f}MB",
            "tok/sec": f"{int(tokens_per_sec):,}",
            "dt": f"{dt * 1000:.2f}ms",
        }

        # 记录指标
        logger.log_info(display_metrics)
        logger.log_metrics(metrics)

        # 如果达到评估间隔或最后一步，则进行评估
        if step % eval_interval == 0 or is_last_step:
            evaluate(step, is_last_step)

        # 如果达到检查点间隔或最后一步，则保存检查点
        if step % checkpoint_interval == 0 or is_last_step:
            checkpoint_path = os.path.join(checkpoint_dir, f"checkpoint_{step}.pt")
            save_checkpoint(model, optimizer, step, checkpoint_path)

            # 创建指向checkpoint_path的符号链接latest（如果存在则先删除）
            try:
                latest_symlink = os.path.join(checkpoint_dir, "latest.pt")
                if os.path.islink(latest_symlink) or os.path.exists(latest_symlink):
                    os.remove(latest_symlink)
                os.symlink(os.path.abspath(checkpoint_path), latest_symlink)
            except (OSError, NotImplementedError) as e:
                # 在Windows上可能没有权限创建符号链接，这时我们简单地跳过
                print(f"Warning: Could not create checkpoint symlink: {e}")

            logger.log_info(f"Saved checkpoint to: {checkpoint_path}")

    # 结束wandb运行
    wandb.finish()


def get_peak_memory(device):
    """Get peak memory usage in MB on the current device
    获取当前设备的峰值内存使用量（MB）"""
    # 如果不是CUDA设备，返回0
    if device != "cuda":
        return 0

    # 获取CUDA最大内存分配量并转换为MB
    peak_memory = torch.cuda.max_memory_allocated() / (1024 * 1024)
    # 重置CUDA峰值内存统计数据
    torch.cuda.reset_peak_memory_stats()
    return peak_memory


def get_perplexity(loss):
    """Calculate perplexity from cross-entropy loss
    从交叉熵损失计算困惑度"""
    # 限制损失值以避免溢出
    return math.exp(min(loss, 20))


def get_progress_str(step, max_steps):
    """获取进度字符串"""
    return f"step {step}/{max_steps} ({step / max_steps * 100:.2f}%)"


def parse_value(value_str: str):
    """Convert argparse arg to list, int, float, bool, or leave as string.
    将argparse参数转换为列表、整数、浮点数、布尔值或保持为字符串。"""
    # 如果是列表格式，则解析为列表
    if value_str.strip().startswith("[") and value_str.strip().endswith("]"):
        content = value_str.strip()[1:-1].strip()
        return [parse_value(v.strip()) for v in content.split(",")] if content else []

    # 尝试转换为整数
    try:
        return int(value_str)
    except ValueError:
        pass

    # 尝试转换为浮点数
    try:
        return float(value_str)
    except ValueError:
        pass

    # 转换为布尔值
    lower = value_str.strip().lower()
    if lower in ("true", "false"):
        return lower == "true"

    # 保持为字符串
    return value_str


def deep_set(config_dict, key_path: str, value):
    """Deeply set dot-separated key in a config dictionary
    在配置字典中深度设置点分隔的键"""
    # 按点分割键路径
    keys = key_path.split(".")
    d = config_dict
    # 遍历键路径的每一部分
    for k in keys[:-1]:
        # 如果键不存在或不是字典，则创建空字典
        if k not in d or not isinstance(d[k], dict):
            d[k] = {}
        d = d[k]
    # 设置最终键的值
    d[keys[-1]] = value


if __name__ == "__main__":
    # 创建命令行参数解析器
    parser = argparse.ArgumentParser(description="Train a transformer model")
    # 添加配置文件路径参数
    parser.add_argument("--config", type=str, help="Path to config file (optional)")
    # 添加恢复训练的目录路径参数
    parser.add_argument(
        "--resume-from", type=str, help="Path to run directory to resume from"
    )
    # 添加恢复训练时覆盖配置的文件路径参数
    parser.add_argument(
        "--override-config",
        type=str,
        help="Path to config file with values to override when resuming",
    )
    # 添加覆盖配置参数的参数，可重复使用
    parser.add_argument(
        "--override-param",
        action="append",
        default=[],
        help="Override a config param, e.g. model.d_model=512 (can be repeated)",
    )
    # 解析命令行参数
    args = parser.parse_args()

    # 如果指定了恢复训练的目录
    if args.resume_from:
        # 从之前的运行加载配置
        resume_config_path = os.path.join(args.resume_from, "config.json")
        base_config = load_config_from_file(resume_config_path)
        base_config["training"]["resume"] = True
        base_config["training"]["resume_checkpoint"] = os.path.join(
            args.resume_from, "checkpoints/latest.pt"
        )

        # 如果提供了覆盖配置则使用，否则为None
        config = load_config(args.override_config, base_config=base_config)
    else:
        # 加载配置，如果没有指定配置文件则使用默认配置
        config = load_config(args.config)  # Will use default if args.config is None

    # 处理覆盖参数
    for override_str in args.override_param:
        # 检查覆盖字符串格式是否正确
        if "=" not in override_str:
            raise ValueError(f"Invalid override: {override_str}, must be like key=val")

        # 分割键和值
        key, raw_value = override_str.split("=", 1)
        value = parse_value(raw_value)
        # 深度设置配置值
        deep_set(config, key, value)

    # 开始训练
    train(config)
