# 基础库
import os
import time
import math
import random
import numpy as np
import warnings
import argparse

# PyTorch 相关
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, DistributedSampler
from torch.utils.data.distributed import DistributedSampler
import torch.distributed as dist
from torch import optim, nn

# Transformers 相关
from transformers import AutoTokenizer, SchedulerType, get_scheduler, set_seed

# Deepspeed 相关
import deepspeed
from deepspeed import get_accelerator
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus

# 自定义模块
from model.model import MiniMindLM
from model.model_vlm import MiniMindVLM
from model.model_videolm import MiniMindVideoLM
from model.LMConfig import LMConfig, VLMConfig, VideoLMConfig
from model.model_lora import apply_lora, load_lora, save_lora
from model.dataset import PretrainDataset, SFTDataset, DPODataset, VLMDataset, VideoDataset

#from inference import main as eval_one_epoch

# 确保打印tensor所有值
#torch.set_printoptions(threshold=float('inf'))

# 忽略警告
warnings.filterwarnings('ignore')


def Logger0(content):
    if dist.get_rank() == 0:
        print(content)


def parse_args():
    parser = argparse.ArgumentParser(description="Training script arguments")
    # Model Argument
    parser.add_argument(
        "--dim",
        type=int,
        default=512,
        help="Dimension of the model."
    )
    parser.add_argument(
        "--n_layers",
        type=int,
        default=24,
        help="Number of layers in the model."
    )
    parser.add_argument(
        "--n_heads",
        type=int,
        default=8,
        help="Number of attention heads."
    )
    parser.add_argument(
        "--n_kv_heads",
        type=int,
        default=8,
        help="Number of key-value heads."
    )
    parser.add_argument(
        "--vocab_size",
        type=int,
        default=6400,
        help="Vocabulary size."
    )
    parser.add_argument(
        "--hidden_dim",
        type=int,
        default=2048,
        help="Hidden dimension."
    )
    parser.add_argument(
        "--multiple_of",
        type=int,
        default=64,
        help="Multiple of dimension."
    )
    parser.add_argument(
        "--norm_eps",
        type=float,
        default=1e-5,
        help="Normalization epsilon value."
    )
    parser.add_argument(
        "--rope_theta",
        type=int,
        default=1e6,
        help="RoPE theta value."
    )
    parser.add_argument(
        "--dropout",
        type=float,
        default=0.0,
        help="Dropout rate."
    )
    parser.add_argument(
        "--flash_attn",
        action="store_true",
        help="Whether to use flash attention."
    )

    # MOE Argument
    parser.add_argument(
        "--use_moe",
        action="store_true",
        default=False,
        help="Whether to use Mixture of Experts (MoE)."
    )
    parser.add_argument(
        "--num_experts_per_tok",
        type=int,
        default=2,
        help="Number of experts per token."
    )
    parser.add_argument(
        "--n_routed_experts",
        type=int,
        default=16,
        help="Number of routed experts."
    )
    parser.add_argument(
        "--n_shared_experts",
        action="store_true",
        help="Number of shared experts."
    )
    parser.add_argument(
        "--scoring_func",
        type=str,
        default="softmax",
        help="Scoring function for MoE."
    )
    parser.add_argument(
        "--aux_loss_alpha",
        type=float,
        default=0.1,
        help="Alpha value for auxiliary loss."
    )
    parser.add_argument(
        "--seq_aux",
        action="store_true",
        help="Whether to compute auxiliary loss at the sequence level."
    )
    parser.add_argument(
        "--norm_topk_prob",
        action="store_true",
        help="Whether to normalize top-k probabilities."
    )
    #deepspeed argument
    parser.add_argument(
        "--local_rank",
        type=int,
        default=-1,
        help="Local rank of the process. Used for distributed training.",
    )
    # Learning rate and scheduler
    parser.add_argument(
        "--learning_rate",
        type=float,
        default=1e-6,
        help="Initial learning rate (after the potential warmup period) to use.",
    )
    parser.add_argument(
        "--gradient_accumulation_steps",
        type=int,
        default=8,
        help="Number of steps to accumulate gradients before performing a backward/update pass.",
    )
    parser.add_argument(
        "--lr_scheduler_type",
        type=str,
        default="cosine",
        choices=[
            "linear",
            "cosine",
            "cosine_with_restarts",
            "polynomial",
            "constant",
            "constant_with_warmup",
            "inverse_sqrt",
            "reduce_lr_on_plateau",
            "cosine_with_min_lr",
            "warmup_stable_decay",
        ],
        help="The scheduler type to use.",
    )
    parser.add_argument(
        "--num_warmup_steps",
        type=int,
        default=0,
        help="Number of steps for the warmup in the lr scheduler.",
    )
    parser.add_argument(
        "--num_train_epochs",
        type=int,
        default=1,
        help="Total number of training epochs to perform.",
    )

    # Training and optimization
    parser.add_argument(
        "--gradient_checkpointing",
        action="store_true",
        help="Enable gradient checkpointing to reduce memory usage.",
    )
    parser.add_argument(
        "--num_workers",
        type=int,
        default=1,
        help="Number of worker threads for data loading.",
    )
    parser.add_argument(
        "--seed",
        type=int,
        default=1,
        help="Random seed for reproducibility.",
    )

    # Data and model
    parser.add_argument(
        "--data_path",
        type=str,
        default="dpo_1000.jsonl",
        help="Path to the training data.",
    )
    parser.add_argument(
        "--images_path",
        type=str,
        default=None,
        help="Path to the image data.",
    )
    parser.add_argument(
        "--images_lmdb_path",
        type=str,
        default=None,
        help="Path to the image data.",
    )
    parser.add_argument(
        "--tokenizer_path",
        type=str,
        default=None,
        help="Path to the tokenizer data.",
    )
    parser.add_argument(
        "--max_seq_len",
        type=int,
        default=2048,
        help="Maximum sequence length for input IDs.",
    )
    parser.add_argument(
        "--dtype",
        type=str,
        default="fp16",
        choices=["fp16", "bf16", "fp32"],
        help="Data type for training (fp16, bf16, or fp32).",
    )
    parser.add_argument(
        "--gradient_clipping",
        type=float,
        default=1,
        help="Gradient clipping value.",
    )
    parser.add_argument(
        "--train_micro_batch_size_per_gpu",
        type=int,
        default=1,
        help="Micro batch size per GPU for training.",
    )
    parser.add_argument(
        "--zero_stage",
        type=int,
        default=3,
        help="Zero-Optimization stage (0, 1, 2, or 3).",
    )

    # Device and checkpointing
    parser.add_argument(
        "--device",
        type=str,
        default="cuda",
        help="Device to use for training (e.g., 'cuda' or 'cpu').",
    )
    parser.add_argument(
        "--save_dir",
        type=str,
        default="tmp",
        help="Directory to save model checkpoints and logs.",
    )

    # Training stage and Lora
    parser.add_argument(
        "--train_stage",
        type=str,
        default="DPO",
        choices=["PT", "SFT", "LORA", "Distillation", "DPO", 
                 "VLM_PT", "VLM_SFT",
                 "VideoLM_PT", "VideoLM_SFT"],
        help="Training stage to perform.",
    )
    parser.add_argument(
        "--lora_rank",
        type=int,
        default=160,
        help="Rank for LoRA adaptation.",
    )
    parser.add_argument(
        "--load_ckpt_path",
        type=str,
        default=None,
        help="Path to load model checkpoint.",
    )
    parser.add_argument(
        "--steps_per_print",
        type=int,
        default=100,
        help=".",
    )
    parser.add_argument(
        "--wall_clock_breakdown",
        action="store_true",
        help="."
    )

    parser = deepspeed.add_config_arguments(parser)
    args = parser.parse_args()
    args.train_batch_size = args.gradient_accumulation_steps * args.train_micro_batch_size_per_gpu * torch.distributed.get_world_size()
    args.global_rank = torch.distributed.get_rank()
    Logger0("------------------------ arguments ------------------------")
    args_dict = vars(args)
    # 格式化输出
    for key, value in args_dict.items():
        Logger0(f"{key} {'.' * (40 - len(key))} {value}")
    Logger0("-------------------- end of arguments ---------------------")
    return args


def _z3_params_to_fetch(param_list):
    return [
        p for p in param_list
        if hasattr(p, 'ds_id') and p.ds_status == ZeroParamStatus.NOT_AVAILABLE
    ]


def save_zero_three_model(model, args):
    zero_stage_3 = (args.zero_stage == 3)
    os.makedirs(args.save_dir, exist_ok=True)
    WEIGHTS_NAME = f"{args.train_stage}.pth"
    output_model_file = os.path.join(args.save_dir, WEIGHTS_NAME)

    model_to_save = model.module if hasattr(model,
                                                'module') else model
    if not zero_stage_3:
        if args.global_rank == 0:
            
            output_state_dict = model_to_save.state_dict()
            if args.train_stage == "VLM_PT" or args.train_stage == "VLM_SFT":
                output_state_dict = {
                    key: value for key, value in output_state_dict.items() if not key.startswith('vision_encoder.')
                }
            print("output_state_dict", output_state_dict)
    
            torch.save(output_state_dict, output_model_file)
    else:
        output_state_dict = {}
        for k, v in model_to_save.named_parameters():

            if hasattr(v, 'ds_id'):
                with deepspeed.zero.GatheredParameters(_z3_params_to_fetch([v
                                                                            ]),
                                                       enabled=zero_stage_3):
                    v_p = v.data.cpu()
            else:
                v_p = v.cpu()
            if args.global_rank == 0 and "lora" not in k:
                output_state_dict[k] = v_p

        if args.train_stage == "VLM_PT" or args.train_stage == "VLM_SFT":
            output_state_dict = {
                key: value for key, value in output_state_dict.items() if not key.startswith('vision_encoder.')
            }
        
        if args.global_rank == 0:
            print("output_state_dict", output_state_dict)
            torch.save(output_state_dict, output_model_file)
        del output_state_dict


def get_deepspeed_args(args):
    if args.dtype == "fp16":
        data_type = "fp16"
        dtype_config = {"enabled": True, "loss_scale_window": 100}
    elif args.dtype == "bf16":
        data_type = "bfloat16"
        dtype_config = {"enabled": True}
    return {
        "train_micro_batch_size_per_gpu": args.train_micro_batch_size_per_gpu,
        "gradient_accumulation_steps":args.gradient_accumulation_steps,
        "train_batch_size": args.gradient_accumulation_steps * args.train_micro_batch_size_per_gpu * torch.distributed.get_world_size(),
        "gradient_clipping": args.gradient_clipping,
        "zero_optimization": {
            "stage": args.zero_stage
        },
        "gradient_checkpointing": args.gradient_checkpointing,
        "data_types": {
            "grad_accum_dtype": args.dtype
        },
        data_type: dtype_config,  
        "steps_per_print": args.steps_per_print,
        "wall_clock_breakdown": args.wall_clock_breakdown,  #记录前向传播、反向传播以及优化器更新等各个阶段的延迟时间
    }

def init_env(args):
    def set_random_seed(seed):
        if seed is not None:
            set_seed(seed)
            random.seed(seed)
            np.random.seed(seed)
            torch.manual_seed(seed)
            get_accelerator().manual_seed_all(seed)
    set_random_seed(args.seed)
    
    return args

def init_dataloader(args, tokenizer):
    if args.train_stage == "VLM_PT" or args.train_stage == "VLM_SFT":
        train_set = VLMDataset(args.data_path, args.images_path, tokenizer, lmdb_path=args.images_lmdb_path, preprocess=args.preprocess,
                        image_special_token=args.model_config.image_special_token,
                        max_length=args.max_seq_len)
    elif args.train_stage == "VideoLM_PT" or args.train_stage == "VideoLM_SFT":
        train_set = VideoDataset(args.data_path, args.images_path, tokenizer,
                        video_special_token=args.model_config.image_special_token,
                        max_length=args.max_seq_len)
    else:
        if args.train_stage=="PT":
            dataset = PretrainDataset
        elif args.train_stage=="SFT" or args.train_stage == "LORA" or args.train_stage=="Distillation":
            dataset = SFTDataset
        elif args.train_stage=="DPO":
            dataset = DPODataset
        else:
            raise ValueError("unsupported args.train_stage")
        train_set = dataset(args.data_path, tokenizer, max_length=args.max_seq_len)    
    train_sampler = DistributedSampler(train_set) 
    train_loader = DataLoader(
        train_set,
        batch_size=args.train_micro_batch_size_per_gpu,
        pin_memory=True,
        drop_last=False,
        shuffle=False,
        num_workers=args.num_workers,
        sampler=train_sampler
    )
    Logger0(f'train_loader length:{len(train_loader)}')
    return train_loader

def init_model_tokenizer(args):
    tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_path)
    if args.train_stage == "VideoLM_PT" or args.train_stage == "VideoLM_SFT":
        # 使用 args 中的参数初始化 LMConfig
        model_config = VideoLMConfig(
            dim=args.dim,
            n_layers=args.n_layers,
            n_heads=args.n_heads,
            n_kv_heads=args.n_kv_heads,
            vocab_size=args.vocab_size,
            hidden_dim=args.hidden_dim,
            multiple_of=args.multiple_of,
            norm_eps=args.norm_eps,
            max_seq_len=args.max_seq_len,
            rope_theta=args.rope_theta,
            dropout=args.dropout,
            flash_attn=args.flash_attn,
            use_moe=args.use_moe,
            num_experts_per_tok=args.num_experts_per_tok,
            n_routed_experts=args.n_routed_experts,
            n_shared_experts=args.n_shared_experts,
            scoring_func=args.scoring_func,
            aux_loss_alpha=args.aux_loss_alpha,
            seq_aux=args.seq_aux,
            norm_topk_prob=args.norm_topk_prob
        )
        setattr(args, "model_config", model_config)        
        
        # 加载纯语言模型权重
        model = MiniMindVideoLM(model_config)
        if args.load_ckpt_path is not None:
            state_dict = torch.load(args.load_ckpt_path, map_location=args.device)
            model.load_state_dict(state_dict, strict=False)
        if args.train_stage == "VideoLM_PT":
            # 冻结除vision_proj和video_summarizer外的所有参数
            for name, param in model.named_parameters():
                param.requires_grad = False
                if 'vision_proj' or 'video_summarizer' in name:
                    param.requires_grad = True
            # 可训练
            if hasattr(model, "layers"):
                last_two_layers = model.layers[-1:]
                for layer in last_two_layers:
                    for param in layer.parameters():
                        param.requires_grad = True

        print_model_parameters(model)
        Logger0(f'VideoLM可训练参数量：{sum(p.numel() for p in model.parameters() if p.requires_grad) / 1e6:.3f} 百万')
        return model.to(args.device), tokenizer,
            
    if args.train_stage == "VLM_PT" or args.train_stage == "VLM_SFT":
        # 使用 args 中的参数初始化 LMConfig
        model_config = VLMConfig(
            dim=args.dim,
            n_layers=args.n_layers,
            n_heads=args.n_heads,
            n_kv_heads=args.n_kv_heads,
            vocab_size=args.vocab_size,
            hidden_dim=args.hidden_dim,
            multiple_of=args.multiple_of,
            norm_eps=args.norm_eps,
            max_seq_len=args.max_seq_len,
            rope_theta=args.rope_theta,
            dropout=args.dropout,
            flash_attn=args.flash_attn,
            use_moe=args.use_moe,
            num_experts_per_tok=args.num_experts_per_tok,
            n_routed_experts=args.n_routed_experts,
            n_shared_experts=args.n_shared_experts,
            scoring_func=args.scoring_func,
            aux_loss_alpha=args.aux_loss_alpha,
            seq_aux=args.seq_aux,
            norm_topk_prob=args.norm_topk_prob
        )
        setattr(args, "model_config", model_config)
        
        #moe_path = '_moe' if model_config.use_moe else ''
        # 加载纯语言模型权重
        model = MiniMindVLM(model_config)
        if args.load_ckpt_path is not None:
            state_dict = torch.load(args.load_ckpt_path, map_location=args.device)
            model.load_state_dict(state_dict, strict=False)
        if args.train_stage == "VLM_PT":
            # 冻结除 vision_proj 外的所有参数
            for name, param in model.named_parameters():
                if 'vision_proj' not in name:
                    param.requires_grad = False
            # 可训练
            if hasattr(model, "layers"):
                last_two_layers = model.layers[-1:]
                for layer in last_two_layers:
                    for param in layer.parameters():
                        param.requires_grad = True

        print_model_parameters(model)
        Logger0(f'VLM可训练参数量：{sum(p.numel() for p in model.parameters() if p.requires_grad) / 1e6:.3f} 百万')

        _, preprocess = MiniMindVLM.get_vision_model()
        setattr(args, "preprocess", preprocess)
        return model.to(args.device), tokenizer,
        
    # 使用 args 中的参数初始化 LMConfig
    lm_config = LMConfig(
        dim=args.dim,
        n_layers=args.n_layers,
        n_heads=args.n_heads,
        n_kv_heads=args.n_kv_heads,
        vocab_size=args.vocab_size,
        hidden_dim=args.hidden_dim,
        multiple_of=args.multiple_of,
        norm_eps=args.norm_eps,
        max_seq_len=args.max_seq_len,
        rope_theta=args.rope_theta,
        dropout=args.dropout,
        flash_attn=args.flash_attn,
        use_moe=args.use_moe,
        num_experts_per_tok=args.num_experts_per_tok,
        n_routed_experts=args.n_routed_experts,
        n_shared_experts=args.n_shared_experts,
        scoring_func=args.scoring_func,
        aux_loss_alpha=args.aux_loss_alpha,
        seq_aux=args.seq_aux,
        norm_topk_prob=args.norm_topk_prob
    )
    model = MiniMindLM(lm_config)
    if args.load_ckpt_path is not None:
        state_dict = torch.load(args.load_ckpt_path, map_location=args.device)
        model.load_state_dict(state_dict, strict=False)
    model = model.to(args.device)
    total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    Logger0(f'LLM总参数量：{ total_params / 1e6:.3f} 百万')
    if args.train_stage == "LORA":
        apply_lora(model, args.lora_rank)
        lora_params_count = sum(p.numel() for name, p in model.named_parameters() if 'lora' in name)  # LoRA 参数数量
        Logger0(f"LLM 总参数量: {total_params}")
        Logger0(f"LoRA 参数量: {lora_params_count}")
        Logger0(f"LoRA 参数占比: {lora_params_count / total_params * 100:.2f}%")
        for name, param in model.named_parameters():
            if 'lora' not in name:
                param.requires_grad = False
    if args.train_stage == "DPO":
        # 初始化参考模型
        ref_model = MiniMindLM(lm_config)
        ref_model.load_state_dict(state_dict, strict=False)
        ref_model.eval()
        ref_model.requires_grad_(False)
        ref_model = ref_model.to(args.device)
        args.ref_model = ref_model
    return model, tokenizer

def print_model_parameters(model):
    """ 打印模型各个层参数
    """
    param_sum = 0
    for name, param in model.named_parameters():
        if param.requires_grad:
            param_sum += param.numel()
            Logger0(f"Layer: {name}, Parameters: {param.numel()}")
    Logger0(f"Total of parameters: {param_sum}") 
    

def get_train_parameter(model, args):
    if args.train_stage == "LORA":
        lora_params = []
        for name, param in model.named_parameters():
            if 'lora' in name:
                lora_params.append(param)
        return lora_params
    return filter(lambda p: p.requires_grad, model.parameters())

def save_model_process(model, args): 
    model.eval()
    # moe_path = '_moe' if lm_config.use_moe else ''
    # ckp = f'{args.save_dir}/pretrain_{lm_config.dim}{moe_path}.pth'
    os.makedirs(args.save_dir, exist_ok=True)
    if args.train_stage=="LORA":
        save_lora(model, f'{args.save_dir}/lora{args.lora_rank}_2048.pth')
    else:
        save_zero_three_model(model, args)
        #ckp = f'{args.save_dir}/{args.train_stage}_2048.pth'
        #state_dict = model.module.state_dict()
        #torch.save(state_dict, ckp)
    model.train()

def to_device(batch, device):
    output = {}
    for k, v in batch.items():
        try:
            output[k] = v.to(device)
        except:
            output[k] = v
    return output

def logits_to_probs(logits, labels):
    # logits shape: (batch_size, seq_len, vocab_size)
    # labels shape: (batch_size, seq_len)
    # probs shape: (batch_size, seq_len)
    log_probs = F.log_softmax(logits, dim=2)
    probs = torch.gather(log_probs, dim=2, index=labels.unsqueeze(2)).squeeze(-1)
    return probs


def dpo_loss(ref_probs, probs, beta):
    # ref_probs 和 probs 都是 shape: (batch_size, seq_len)
    # 计算每个样本的平均概率
    ref_probs = ref_probs.mean(dim=1)
    probs = probs.mean(dim=1)

    # 将 chosen 和 rejected 数据分开
    bs = ref_probs.shape[0]
    chosen_ref_probs = ref_probs[:bs // 2]
    reject_ref_probs = ref_probs[bs // 2:]
    chosen_probs = probs[:bs // 2]
    reject_probs = probs[bs // 2:]

    pi_logratios = chosen_probs - reject_probs
    ref_logratios = chosen_ref_probs - reject_ref_probs
    logits = pi_logratios - ref_logratios
    loss = -F.logsigmoid(beta * logits)
    return loss.mean()

def compute_loss_process(model, args, batch, tokenizer):
    with torch.cuda.amp.autocast():
        if args.train_stage=="DPO":
            x_chosen = batch['x_chosen']
            x_rejected = batch['x_rejected']
            y_chosen = batch['y_chosen']
            y_rejected = batch['y_rejected']
            mask_chosen = batch['mask_chosen']
            mask_rejected = batch['mask_rejected']
            x = torch.cat([x_chosen, x_rejected], dim=0)
            y = torch.cat([y_chosen, y_rejected], dim=0)
            mask = torch.cat([mask_chosen, mask_rejected], dim=0)
            with torch.no_grad():
                ref_outputs = args.ref_model(x)
                ref_logits = ref_outputs.logits
            ref_probs = logits_to_probs(ref_logits, y)
            ref_probs = ref_probs * mask
            outputs = model(x)
            logits = outputs.logits
            probs = logits_to_probs(logits, y)
            probs = probs * mask
            loss = dpo_loss(ref_probs, probs, beta=0.1)
        else:
            x = batch['x']
            y = batch['y']
            loss_mask = batch['loss_mask']
            loss_fct = nn.CrossEntropyLoss(reduction='none')
            if args.train_stage=="VLM_PT" or args.train_stage == "VLM_SFT":
                pixel_tensors = batch['image_tensors']
                res = model(x, pixel_tensors=pixel_tensors)
            elif args.train_stage=="VideoLM_PT" or args.train_stage == "VideoLM_SFT":
                pixel_tensors = batch['video_tensor']
                res = model(x, pixel_tensors=pixel_tensors)
            elif args.train_stage=="PT":
                # pretrain拼接数据构造atten_mask
                micro_batch_size, seq_length = x.size()
                position_ids = torch.arange(seq_length, dtype=torch.long,
                            device=x.device)
                position_ids = position_ids.unsqueeze(0).expand_as(x)
                attention_mask = torch.tril(torch.ones((micro_batch_size, seq_length, seq_length), device=x.device, dtype=torch.uint8)).view(micro_batch_size, 1, seq_length, seq_length)
                for b in range(micro_batch_size):
                    eod_index = position_ids[b, x[b] == tokenizer.eos_token_id]
                    # Loop through EOD indecies:
                    pre_eod_idx = -1
                    for j in range(eod_index.size()[0]):
                        i = eod_index[j]
                        if i == pre_eod_idx:
                            break
                        pre_eod_idx = i + 1
                        # Mask attention loss.
                        attention_mask[b, 0, (i + 1):, :(i + 1)] = 0
                #Logger0(f"attention_mask: {attention_mask}")

                res = model(x, attention_mask)
            elif args.train_stage=="SFT":
                # pretrain拼接数据构造atten_mask
                micro_batch_size, seq_length = x.size()
                position_ids = torch.arange(seq_length, dtype=torch.long,
                            device=x.device)
                position_ids = position_ids.unsqueeze(0).expand_as(x)
                #Logger0(f"position_ids: {position_ids}") 
                #Logger0(f"x: {x}") 
                attention_mask = torch.tril(torch.ones((micro_batch_size, seq_length, seq_length), device=x.device, dtype=torch.uint8)).view(micro_batch_size, 1, seq_length, seq_length)
                for b in range(micro_batch_size):
                    # 找到 assistant 的位置
                    tokenizer_assistant_id = 1078
                    assistant_indices = position_ids[b, x[b] == tokenizer_assistant_id]
                    #Logger0(f"assistant_indices: {assistant_indices}")     
                    eod_index=[]
                    # 遍历每个 assistant 的位置，找到其后第一个 eos_token_id 的位置
                    for assistant_idx in assistant_indices.tolist():
                        # 在 assistant 之后的部分查找 eos_token_id
                        for i in range(assistant_idx + 1, seq_length):
                            if x[b][i] == tokenizer.eos_token_id:
                                eod_index.append(i)
                                break
                    #Logger0(f"eod_index: {eod_index}")        
                    #eod_index = position_ids[b, x[b] == tokenizer.eos_token_id]
                    # Loop through EOD indecies:
                    for i in eod_index:
                        # Mask attention loss.
                        attention_mask[b, 0, (i + 1):, :(i + 1)] = 0
                #Logger0(f"attention_mask: {attention_mask}")

                res = model(x, attention_mask)
            else:
                res = model(x)
            loss = loss_fct(
                res.logits.view(-1, res.logits.size(-1)),
                y.view(-1)
            ).view(y.size())
            if args.train_stage=="Distillation":
                # 思考标签占位符
                start_of_think_ids = tokenizer('<think>').input_ids
                end_of_think_ids = tokenizer('</think>').input_ids
                start_of_answer_ids = tokenizer('</answer>').input_ids
                end_of_answer_ids = tokenizer('</answer>').input_ids
                sp_ids = torch.isin(y.view(-1),
                torch.tensor(start_of_think_ids + end_of_think_ids
                                + start_of_answer_ids + end_of_answer_ids
                                ).to(args.device))
                # 在 sp_ids 对应的位置增加额外的惩罚
                loss_mask = loss_mask.view(-1)
                loss_mask_sum = loss_mask.sum()
                loss_mask[sp_ids] = 10
                loss_mask = loss_mask.view(y.size())
                loss = (loss * loss_mask).sum() / loss_mask_sum
            else:
                loss = (loss * loss_mask).sum() / loss_mask.sum()
            loss += res.aux_loss
        return loss

def main():
    args = parse_args()
    model, tokenizer = init_model_tokenizer(args)
    train_loader=  init_dataloader(args, tokenizer)   
    #AdamOptimizer = FusedAdam
    optimizer = optim.AdamW(get_train_parameter(model, args),
                              lr=args.learning_rate,
                              betas=(0.9, 0.95))

    num_update_steps_per_epoch = math.ceil(
        len(train_loader) / args.gradient_accumulation_steps)
    lr_scheduler = get_scheduler(
        name=args.lr_scheduler_type,
        optimizer=optimizer,
        num_warmup_steps=args.num_warmup_steps,
        num_training_steps=args.num_train_epochs * num_update_steps_per_epoch,
    )
    ds_config = get_deepspeed_args(args)
    model, optimizer, _, lr_scheduler = deepspeed.initialize(
        model=model,
        optimizer=optimizer,
        args=args,
        config=ds_config,
        lr_scheduler=lr_scheduler,
        dist_init_required=True)

    for epoch in range(args.num_train_epochs):
        Logger0(
            f"Beginning of Epoch {epoch+1}/{args.num_train_epochs}, Total Micro Batches {len(train_loader)}, num_update_steps_per_epoch: {num_update_steps_per_epoch} ")
        
        model.train()
        for step, batch in enumerate(train_loader):
            start = time.time()
            batch = to_device(batch, args.device)
            loss = compute_loss_process(model, args, batch, tokenizer)
            model.backward(loss)
            model.step()
            end = time.time()
            
            if step % args.gradient_accumulation_steps == 0:
                Logger0(
                    'Epoch:[{}/{}]({}/{}) loss:{:.3f} lr:{:.12f} Time:{}'.format(
                        epoch + 1,
                        args.num_train_epochs,
                        step,
                        len(train_loader),
                        loss.item(),
                        optimizer.param_groups[-1]['lr'],
                        end-start))

    # save_hf_format(model, tokenizer, args)
    save_model_process(model, args)


def init_distributed_mode():
    global local_rank, DEVICE

    dist.init_process_group(backend="nccl")
    rank = int(os.environ["RANK"])
    local_rank = int(os.environ["LOCAL_RANK"])
    world_size = int(os.environ["WORLD_SIZE"])
    DEVICE = f"cuda:{local_rank}"
    torch.cuda.set_device(DEVICE)
    
if __name__ == '__main__':
    init_distributed_mode()
    main()

