# # coding=utf-8
# from __future__ import absolute_import, division, print_function
# import logging
# import argparse
# import os
# import random
# import numpy as np
# from datetime import timedelta
#
# import torch
# import torch.distributed as dist
#
# from tqdm import tqdm
# # from torch.utils.tensorboard import SummaryWriter
# from tensorboardX import SummaryWriter
# #from apex import amp
# #from apex.parallel import DistributedDataParallel as DDP
#
# from models.modeling import VisionTransformer, CONFIGS
# from utils.scheduler import WarmupLinearSchedule, WarmupCosineSchedule
# # from transformers import get_linear_schedule_with_warmup as WarmupLinearSchedule
# from utils.data_utils import get_loader
# from utils.dist_util import get_world_size
#
# CUDA_VISIBLE_DEVICES=1
# logger = logging.getLogger(__name__)
#
#
# """
# 训练参数：
# --name cifar10-100_500
# --dataset cifar10
# --model_type ViT-B_16
# --pretrained_dir checkpoint/ViT-B_16.npz
# """
# #数据集，窗口选择16 * 16，8 * 8，32 * 32，
#
# #命令行参数
# class AverageMeter(object):
#
#
#     """Computes and stores the average and current value"""
#     def __init__(self):
#         self.reset()
#
#     def reset(self):
#         self.val = 0
#         self.avg = 0
#         self.sum = 0
#         self.count = 0
#
#     def update(self, val, n=1):
#         self.val = val
#         self.sum += val * n
#         self.count += n
#         self.avg = self.sum / self.count
#
#
# def simple_accuracy(preds, labels):
#     return (preds == labels).mean()
#
#
# def saves_model(args, model):
#     model_to_save = model.module if hasattr(model, 'module') else model
#     model_checkpoint = os.path.join(args.output_dir, "%s_checkpoint.bin" % args.name)
#     torchave(model_to_save.state_dict(), model_checkpoint)
#     logger.info("Saved model checkpoint to [DIR: %s]", args.output_dir)
#
#
# def setup(args):
#     # Prepare model
#     config = CONFIGS[args.model_type]
#
#     num_classes = 10 if args.dataset == "cifar10" else 100
#
#     model = VisionTransformer(config, args.img_size, zero_head=True, num_classes=num_classes)
#     model.load_from(np.load(args.pretrained_dir))
#     model.to(args.device)
#     num_params = count_parameters(model)
#
#     logger.info("{}".format(config))
#     logger.info("Training parameters %s", args)
#     logger.info("Total Parameter: \t%2.1fM" % num_params)
#     print(num_params)
#     return args, model
#
#
# def count_parameters(model):
#     params = sum(p.numel() for p in model.parameters() if p.requires_grad)
#     return params/1000000
#
#
# def set_seed(args): #随机种子
#     random.seed(args.seed)
#     np.random.seed(args.seed)
#     torch.manual_seed(args.seed)
#     if args.n_gpu > 0:
#         torch.cuda.manual_seed_all(args.seed)
#
#
# def valid(args, model, writer, test_loader, global_step):
#     # Validation!
#     eval_losses = AverageMeter()
#
#     logger.info("***** Running Validation *****")
#     logger.info("  Num steps = %d", len(test_loader))
#     logger.info("  Batch size = %d", args.eval_batch_size)
#
#     model.eval()
#     all_preds, all_label = [], []
#     epoch_iterator = tqdm(test_loader,
#                           desc="Validating... (loss=X.X)",
#                           bar_format="{l_bar}{r_bar}",
#                           dynamic_ncols=True,
#                           disable=args.local_rank not in [-1, 0])
#     loss_fct = torch.nn.CrossEntropyLoss()
#     for step, batch in enumerate(epoch_iterator):
#         batch = tuple(t.to(args.device) for t in batch)
#         x, y = batch
#         with torch.no_grad():
#             logits = model(x)[0]
#
#             eval_loss = loss_fct(logits, y)
#             eval_losses.update(eval_loss.item())
#
#             preds = torch.argmax(logits, dim=-1)
#
#         if len(all_preds) == 0:
#             all_preds.append(preds.detach().cpu().numpy())
#             all_label.append(y.detach().cpu().numpy())
#         else:
#             all_preds[0] = np.append(
#                 all_preds[0], preds.detach().cpu().numpy(), axis=0
#             )
#             all_label[0] = np.append(
#                 all_label[0], y.detach().cpu().numpy(), axis=0
#             )
#         epoch_iterator.set_description("Validating... (loss=%2.5f)" % eval_losses.val)
#
#     all_preds, all_label = all_preds[0], all_label[0]
#     accuracy = simple_accuracy(all_preds, all_label)
#
#     logger.info("\n")
#     logger.info("Validation Results")
#     logger.info("Global Steps: %d" % global_step)
#     logger.info("Valid Loss: %2.5f" % eval_losses.avg)
#     logger.info("Valid Accuracy: %2.5f" % accuracy)
#
#     writer.add_scalar("test/accuracy", scalar_value=accuracy, global_step=global_step)
#     return accuracy
#
#
# def train(args, model):
#     """ Train the model """
#     if args.local_rank in [-1, 0]:
#         os.makedirs(args.output_dir, exist_ok=True)
#         writer = SummaryWriter(log_dir=os.path.join("logs", args.name))
#
#     args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
#
#     # Prepare dataset
#     train_loader, test_loader = get_loader(args)
#
#     # Prepare optimizer and scheduler
#     optimizer = torch.optim.SGD(model.parameters(),
#                                 lr=args.learning_rate,
#                                 momentum=0.9,
#                                 weight_decay=args.weight_decay)
#     t_total = args.num_steps
#     if args.decay_type == "cosine":
#         scheduler = WarmupCosineSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
#     else:
#         scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
#     """
#     if args.fp16:
#         model, optimizer = amp.initialize(models=model,
#                                           optimizers=optimizer,
#                                           opt_level=args.fp16_opt_level)
#         amp._amp_state.loss_scalers[0]._loss_scale = 2**20
#
#     # Distributed training
#     if args.local_rank != -1:
#         model = DDP(model, message_size=250000000, gradient_predivide_factor=get_world_size())
#     """
#     # Train!
#     logger.info("***** Running training *****")
#     logger.info("  Total optimization steps = %d", args.num_steps)
#     logger.info("  Instantaneous batch size per GPU = %d", args.train_batch_size)
#     logger.info("  Total train batch size (w. parallel, distributed & accumulation) = %d",
#                 args.train_batch_size * args.gradient_accumulation_steps * (
#                     torch.distributed.get_world_size() if args.local_rank != -1 else 1))
#     logger.info("  Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
#
#     model.zero_grad()
#     set_seed(args)  # Added here for reproducibility (even between python 2 and 3)
#     losses = AverageMeter()
#     global_step, best_acc = 0, 0
#     while True:
#         model.train()
#         epoch_iterator = tqdm(train_loader,
#                               desc="Training (X / X Steps) (loss=X.X)",
#                               bar_format="{l_bar}{r_bar}",
#                               dynamic_ncols=True,
#                               disable=args.local_rank not in [-1, 0])
#         for step, batch in enumerate(epoch_iterator):
#             batch = tuple(t.to(args.device) for t in batch)
#             x, y = batch
#             loss = model(x, y)
#
#             if args.gradient_accumulation_steps > 1:
#                 loss = loss / args.gradient_accumulation_steps
#             if args.fp16:
#                 with amp.scale_loss(loss, optimizer) as scaled_loss:
#                     scaled_loss.backward()
#             else:
#                 loss.backward()
#
#             if (step + 1) % args.gradient_accumulation_steps == 0:
#                 losses.update(loss.item()*args.gradient_accumulation_steps)
#                 if args.fp16:
#                     torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
#                 else:
#                     torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
#                 scheduler.step()
#                 optimizer.step()
#                 optimizer.zero_grad()
#                 global_step += 1
#
#                 epoch_iterator.set_description(
#                     "Training (%d / %d Steps) (loss=%2.5f)" % (global_step, t_total, losses.val)
#                 )
#                 if args.local_rank in [-1, 0]:
#                     writer.add_scalar("train/loss", scalar_value=losses.val, global_step=global_step)
#                     writer.add_scalar("train/lr", scalar_value=scheduler.get_lr()[0], global_step=global_step)
#                 if global_step % args.eval_every == 0 and args.local_rank in [-1, 0]:
#                     accuracy = valid(args, model, writer, test_loader, global_step)
#                     if best_acc < accuracy:
#                         save_model(args, model)
#                         best_acc = accuracy
#                     model.train()
#
#                 if global_step % t_total == 0:
#                     break
#         losses.reset()
#         if global_step % t_total == 0:
#             break
#
#     if args.local_rank in [-1, 0]:
#         writer.close()
#     logger.info("Best Accuracy: \t%f" % best_acc)
#     logger.info("End Training!")
#
#
# def main():
#     parser = argparse.ArgumentParser()
#     # Required parameters #下面是所有的参数
#     parser.add_argument("--name", required=True,
#                         help="Name of this run. Used for monitoring.")
#     parser.add_argument("--dataset", choices=["cifar10", "cifar100"], default="cifar10",
#                         help="Which downstream task.")
#     parser.add_argument("--model_type", choices=["ViT-B_16", "ViT-B_32", "ViT-L_16",
#                                                  "ViT-L_32", "ViT-H_14", "R50-ViT-B_16"],
#                         default="ViT-B_16",
#                         help="Which variant to use.")
#     parser.add_argument("--pretrained_dir", type=str, default="checkpoint/ViT-B_16.npz",
#                         help="Where to search for pretrained ViT models.")
#     parser.add_argument("--output_dir", default="output", type=str,
#                         help="The output directory where checkpoints will be written.")
#
#     parser.add_argument("--img_size", default=224, type=int,
#                         help="Resolution size")
#     parser.add_argument("--train_batch_size", default=16, type=int,
#                         help="Total batch size for training.")
#     parser.add_argument("--eval_batch_size", default=64, type=int,
#                         help="Total batch size for eval.")
#     parser.add_argument("--eval_every", default=100, type=int,
#                         help="Run prediction on validation set every so many steps."
#                              "Will always run one evaluation at the end of training.")
#
#     parser.add_argument("--learning_rate", default=3e-2, type=float,
#                         help="The initial learning rate for SGD.")
#     parser.add_argument("--weight_decay", default=0, type=float,
#                         help="Weight deay if we apply some.")
#     parser.add_argument("--num_steps", default=10000, type=int,
#                         help="Total number of training epochs to perform.")
#     parser.add_argument("--decay_type", choices=["cosine", "linear"], default="cosine",
#                         help="How to decay the learning rate.")
#     parser.add_argument("--warmup_steps", default=500, type=int,
#                         help="Step of training to perform learning rate warmup for.")
#     parser.add_argument("--max_grad_norm", default=1.0, type=float,
#                         help="Max gradient norm.")
#
#     parser.add_argument("--local_rank", type=int, default=-1,
#                         help="local_rank for distributed training on gpus")
#     parser.add_argument('--seed', type=int, default=42,
#                         help="random seed for initialization")
#     parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
#                         help="Number of updates steps to accumulate before performing a backward/update pass.")
#     parser.add_argument('--fp16', action='store_true',
#                         help="Whether to use 16-bit float precision instead of 32-bit")
#     parser.add_argument('--fp16_opt_level', type=str, default='O2',
#                         help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
#                              "See details at https://nvidia.github.io/apex/amp.html")
#     parser.add_argument('--loss_scale', type=float, default=0,
#                         help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
#                              "0 (default value): dynamic loss scaling.\n"
#                              "Positive power of 2: static loss scaling value.\n")
#     args = parser.parse_args()
#
#     # Setup CUDA, GPU & distributed training
#     if args.local_rank == -1:
#         device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#         args.n_gpu = torch.cuda.device_count()
#     else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
#         torch.cuda.set_device(args.local_rank)
#         device = torch.device("cuda", args.local_rank)
#         torch.distributed.init_process_group(backend='nccl',
#                                              timeout=timedelta(minutes=60))
#         args.n_gpu = 1
#     args.device = device
#
#     # Setup logging  日志
#     logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
#                         datefmt='%m/%d/%Y %H:%M:%S',
#                         level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
#     logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" %
#                    (args.local_rank, args.device, args.n_gpu, bool(args.local_rank != -1), args.fp16))
#
#     # Set seed
#     set_seed(args)
#
#     # Model & Tokenizer Setup
#     args, model = setup(args)
#
#     # Training
#     train(args, model)
#
#
# if __name__ == "__main__":
#     main()


# coding=utf-8
from __future__ import absolute_import, division, print_function
import logging
import argparse
import os
import random
import numpy as np
from datetime import timedelta

import torch
import torch.distributed as dist

from tqdm import tqdm
from tensorboardX import SummaryWriter  # 保留实际使用的导入

from models.modeling import VisionTransformer, CONFIGS
from utils.scheduler import WarmupLinearSchedule, WarmupCosineSchedule
from utils.data_utils import get_loader
from utils.dist_util import get_world_size

# 修正：使用 os.environ 设置 CUDA 可见设备
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
logger = logging.getLogger(__name__)


"""
训练参数示例：
--name cifar10-vit-b16
--dataset cifar10
--model_type ViT-B_16
--pretrained_dir checkpoint/ViT-B_16.npz
"""


class AverageMeter(object):
    """计算并存储平均值和当前值"""
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count


def simple_accuracy(preds, labels):
    """计算准确率"""
    return (preds == labels).mean()


def save_model(args, model):
    """保存模型检查点"""
    # 处理分布式训练的模型包裹问题
    model_to_save = model.module if hasattr(model, 'module') else model
    # 生成检查点路径
    model_checkpoint = os.path.join(args.output_dir, f"{args.name}_checkpoint.bin")
    # 保存模型参数（修正 torch.save 调用）
    torch.save(model_to_save.state_dict(), model_checkpoint)
    logger.info(f"Saved model checkpoint to [DIR: {args.output_dir}]")


def setup(args):
    """初始化模型和配置"""
    config = CONFIGS[args.model_type]
    num_classes = 10 if args.dataset == "cifar10" else 100

    # 初始化 Vision Transformer 模型
    model = VisionTransformer(
        config=config,
        img_size=args.img_size,
        zero_head=True,  # 微调时分类头置零
        num_classes=num_classes
    )
    # 加载预训练权重（假设为 .npz 格式）
    model.load_from(np.load(args.pretrained_dir))
    # 将模型移动到指定设备（CPU/GPU）
    model.to(args.device)
    # 计算可训练参数数量（单位：百万）
    num_params = count_parameters(model)

    logger.info(f"Model Config: {config}")
    logger.info(f"Training Parameters: {args}")
    logger.info(f"Total Parameters: {num_params:.1f}M")
    print(f"Total Parameters: {num_params:.1f}M")

    return args, model


def count_parameters(model):
    """计算可训练参数数量"""
    params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    return params / 1000000  # 转换为百万单位


def set_seed(args):
    """设置随机种子以确保可复现性"""
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if args.n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)


def valid(args, model, writer, test_loader, global_step):
    """验证阶段"""
    eval_losses = AverageMeter()
    model.eval()
    all_preds, all_labels = [], []

    logger.info("\n***** Running Validation *****")
    logger.info(f"  Num steps: {len(test_loader)}")
    logger.info(f"  Batch size: {args.eval_batch_size}")

    loss_fct = torch.nn.CrossEntropyLoss()
    epoch_iterator = tqdm(test_loader, desc="Validating", dynamic_ncols=True, disable=args.local_rank not in [-1, 0])

    with torch.no_grad():
        for step, batch in enumerate(epoch_iterator):
            batch = tuple(t.to(args.device) for t in batch)
            x, y = batch
            logits = model(x)[0]  # 假设模型返回 (logits, other_outputs)

            loss = loss_fct(logits, y)
            eval_losses.update(loss.item())

            preds = torch.argmax(logits, dim=-1)
            # 收集预测结果和真实标签
            if len(all_preds) == 0:
                all_preds.append(preds.detach().cpu().numpy())
                all_labels.append(y.detach().cpu().numpy())
            else:
                all_preds[0] = np.append(all_preds[0], preds.detach().cpu().numpy(), axis=0)
                all_labels[0] = np.append(all_labels[0], y.detach().cpu().numpy(), axis=0)

            epoch_iterator.set_description(f"Validating (loss={loss.item():.5f})")

    # 计算准确率
    all_preds, all_labels = all_preds[0], all_labels[0]
    accuracy = simple_accuracy(all_preds, all_labels)

    logger.info("\nValidation Results")
    logger.info(f"Global Steps: {global_step}")
    logger.info(f"Valid Loss: {eval_losses.avg:.5f}")
    logger.info(f"Valid Accuracy: {accuracy:.5f}")

    # 写入 TensorBoard 日志
    writer.add_scalar("test/accuracy", accuracy, global_step)
    writer.add_scalar("test/loss", eval_losses.avg, global_step)
    return accuracy


def train(args, model):
    """训练阶段"""
    if args.local_rank in [-1, 0]:
        # 创建输出目录和 TensorBoard 日志目录
        os.makedirs(args.output_dir, exist_ok=True)
        writer = SummaryWriter(log_dir=os.path.join("logs", args.name))

    # 调整批量大小（考虑梯度累积）
    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
    # 加载数据集
    train_loader, test_loader = get_loader(args)

    # 初始化优化器和学习率调度器
    optimizer = torch.optim.SGD(
        model.parameters(),
        lr=args.learning_rate,
        momentum=0.9,
        weight_decay=args.weight_decay
    )
    t_total = args.num_steps  # 总训练步数

    # 选择学习率衰减方式
    if args.decay_type == "cosine":
        scheduler = WarmupCosineSchedule(optimizer, args.warmup_steps, t_total)
    else:
        scheduler = WarmupLinearSchedule(optimizer, args.warmup_steps, t_total)

    # 训练循环
    logger.info("\n***** Running Training *****")
    logger.info(f"  Total optimization steps: {t_total}")
    logger.info(f"  Batch size per GPU: {args.train_batch_size}")
    logger.info(f"  Total effective batch size: {args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)}")

    model.zero_grad()
    set_seed(args)
    losses = AverageMeter()
    global_step = 0
    best_accuracy = 0.0

    while global_step < t_total:
        model.train()
        epoch_iterator = tqdm(train_loader, desc="Training", dynamic_ncols=True, disable=args.local_rank not in [-1, 0])

        for step, batch in enumerate(epoch_iterator):
            batch = tuple(t.to(args.device) for t in batch)
            x, y = batch
            # 前向传播计算损失（假设模型返回 loss）
            loss = model(x, y)

            # 梯度累积处理
            if args.gradient_accumulation_steps > 1:
                loss = loss / args.gradient_accumulation_steps

            # 反向传播（未启用混合精度，如需启用需取消注释 Apex 相关代码）
            loss.backward()

            # 梯度更新
            if (step + 1) % args.gradient_accumulation_steps == 0:
                # 梯度裁剪
                torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
                # 学习率调度和参数更新
                scheduler.step()
                optimizer.step()
                optimizer.zero_grad()
                global_step += 1

                # 更新损失记录
                losses.update(loss.item() * args.gradient_accumulation_steps)
                epoch_iterator.set_description(f"Training (Step {global_step}/{t_total}, Loss={losses.val:.5f})")

                # 写入 TensorBoard 日志
                if args.local_rank in [-1, 0]:
                    writer.add_scalar("train/loss", losses.val, global_step)
                    writer.add_scalar("train/lr", scheduler.get_lr()[0], global_step)

                # 验证并保存最佳模型
                if global_step % args.eval_every == 0 and args.local_rank in [-1, 0]:
                    current_accuracy = valid(args, model, writer, test_loader, global_step)
                    if current_accuracy > best_accuracy:
                        save_model(args, model)
                        best_accuracy = current_accuracy
                    model.train()  # 验证后切回训练模式

                # 提前退出循环（达到总步数）
                if global_step >= t_total:
                    break

        losses.reset()  # 每个 epoch 结束后重置损失统计

    if args.local_rank in [-1, 0]:
        writer.close()  # 关闭 TensorBoard 写入器

    logger.info(f"\nTraining Complete! Best Accuracy: {best_accuracy:.5f}")
    logger.info(f"Checkpoint saved to: {os.path.join(args.output_dir, f'{args.name}_checkpoint.bin')}")


def main():
    """主函数：参数解析和训练流程控制"""
    parser = argparse.ArgumentParser(description="Vision Transformer (ViT) Training Script")

    # 必需参数
    parser.add_argument("--name", required=True, help="训练任务名称，用于区分不同实验")
    parser.add_argument("--dataset", choices=["cifar10", "cifar100"], default="cifar10", help="数据集选择")
    parser.add_argument("--model_type", choices=["ViT-B_16", "ViT-B_32", "ViT-L_16", "ViT-L_32", "ViT-H_14", "R50-ViT-B_16"], default="ViT-B_16", help="ViT 模型变体")
    parser.add_argument("--pretrained_dir", type=str, default="checkpoint/ViT-B_16.npz", help="预训练模型路径（.npz 文件）")
    parser.add_argument("--output_dir", default="output", help="检查点保存目录")

    # 模型参数
    parser.add_argument("--img_size", type=int, default=224, help="输入图像尺寸（需与预训练模型匹配）")
    parser.add_argument("--train_batch_size", type=int, default=16, help="训练批量大小")
    parser.add_argument("--eval_batch_size", type=int, default=64, help="验证批量大小")
    parser.add_argument("--eval_every", type=int, default=100, help="每多少步进行一次验证")

    # 优化参数
    parser.add_argument("--learning_rate", type=float, default=0.03, help="初始学习率（SGD）")
    parser.add_argument("--weight_decay", type=float, default=0.0, help="权重衰减系数")
    parser.add_argument("--num_steps", type=int, default=10000, help="总训练步数")
    parser.add_argument("--decay_type", choices=["cosine", "linear"], default="cosine", help="学习率衰减方式")
    parser.add_argument("--warmup_steps", type=int, default=500, help="学习率热身步数")
    parser.add_argument("--max_grad_norm", type=float, default=1.0, help="最大梯度范数（梯度裁剪）")

    # 分布式训练参数
    parser.add_argument("--local_rank", type=int, default=-1, help="分布式训练 GPU 编号（-1 表示单卡）")
    parser.add_argument("--seed", type=int, default=42, help="随机种子")
    parser.add_argument("--gradient_accumulation_steps", type=int, default=1, help="梯度累积步数")

    # 混合精度训练参数（如需启用需安装 apex）
    parser.add_argument("--fp16", action="store_true", help="启用 FP16 混合精度训练")
    parser.add_argument("--fp16_opt_level", type=str, default="O2", help="Apex 优化级别")
    parser.add_argument("--loss_scale", type=float, default=0.0, help="损失缩放值（0 表示动态缩放）")

    args = parser.parse_args()

    # 设备和分布式训练配置
    if args.local_rank == -1:
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        args.n_gpu = torch.cuda.device_count()
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        torch.distributed.init_process_group(backend="nccl", timeout=timedelta(minutes=60))
        args.n_gpu = 1
    args.device = device

    # 日志配置
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN
    )
    logger.warning(
        f"Process rank: {args.local_rank}, Device: {device}, n_gpu: {args.n_gpu}, "
        f"Distributed training: {args.local_rank != -1}, FP16: {args.fp16}"
    )

    # 设置随机种子
    set_seed(args)

    # 初始化模型和配置
    args, model = setup(args)

    # 开始训练
    train(args, model)


if __name__ == "__main__":
    main()