# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.

"""General utilities."""
import os
import sys
from datetime import datetime

import torch

try:
    from transformer_engine.pytorch.optimizers import multi_tensor_applier, multi_tensor_l2norm
except ImportError:
    try:
        from apex.multi_tensor_apply import multi_tensor_applier
    except ImportError:
        multi_tensor_applier = None

    try:
        from amp_C import multi_tensor_l2norm
    except ImportError:
        import warnings
        warnings.warn(
            f'Transformer Engine and Apex are not installed. '
            'Falling back to local implementations of '
            'multi_tensor_applier and multi_tensor_l2norm'
        )

        from megatron.core.utils import (
            local_multi_tensor_l2_norm as multi_tensor_l2norm,
            local_multi_tensor_applier as multi_tensor_applier,
        )

from megatron.core import DistributedDataParallel as DDP
from megatron.core import mpu
from megatron.core.tensor_parallel import param_is_not_tensor_parallel_duplicate
from megatron.legacy.model import Float16Module
from megatron.legacy.model.module import param_is_not_shared
from megatron.training import get_adlr_autoresume, get_args

ALL_MODULE_WRAPPER_CLASSNAMES = (DDP, Float16Module)

def get_batch_on_this_cp_rank(batch):
    """ Slice batch input along sequence dimension into multiple chunks,
        which are parallelized across GPUs in a context parallel group.
    """

    # With causal masking, each token only attends to its prior tokens. Simply split
    # sequence into CP chunks can result in severe load imbalance. That's to say, chunks
    # at the end of sequence have bigger workload than others. To address this issue,
    # we split sequence into 2*CP ranks. Assuming CP=2, we then get 4 chunks, chunk_0
    # and chunk_3 are assigned to GPU0, chunk_1 and chunk_2 are assigned to GPU1, so
    # that we can get balanced workload among GPUs in a context parallel group.
    args = get_args()
    cp_size = args.context_parallel_size
    if cp_size > 1:
        cp_rank = mpu.get_context_parallel_rank()
        for key, val in batch.items():
            if val is not None:
                seq_dim = 1 if key not in ['attention_mask', 'feature_attention_mask'] else 2
                val = val.view(
                    *val.shape[0:seq_dim],
                    2 * cp_size,
                    val.shape[seq_dim] // (2 * cp_size),
                    *val.shape[(seq_dim + 1) :],
                )
                index = torch.tensor([cp_rank, (2 * cp_size - cp_rank - 1)], 
                                     device="cpu", pin_memory=True).cuda(non_blocking=True)
                val = val.index_select(seq_dim, index)
                val = val.view(*val.shape[0:seq_dim], -1, *val.shape[(seq_dim + 2) :])
                batch[key] = val

    return batch


def get_batch_on_this_tp_rank_batch(batch_in):

    args = get_args()

    def _broadcast(item):
       if item is not None:
            torch.distributed.broadcast(item, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group())
    data = batch_in
    use_bf16 = args.bf16
    dtype = torch.bfloat16 if use_bf16 else torch.float
    if mpu.get_tensor_model_parallel_rank() == 0:
        if data['input_features'] is not None:
            input_feature_shape = torch.tensor(data['input_features'].shape).cuda(non_blocking=True)
        else:
            input_feature_shape = torch.tensor([1000, 1, 1]).cuda(non_blocking=True)
        
        _broadcast(input_feature_shape)
        batch = {
            'input_features': data["input_features"].cuda(non_blocking=True).to(dtype) if ("input_features" in data and data["input_features"] is not None) else None,
            'input_ids': data["input_ids"].cuda(non_blocking=True),
            'attention_mask': data["attention_mask"].cuda(non_blocking=True),
            'feature_attention_mask': data["feature_attention_mask"].cuda(non_blocking=True) if ("feature_attention_mask" in data and data["feature_attention_mask"] is not None) else None,
            # 'feature_lengths': data["feature_lengths"].cuda(non_blocking=True) if "feature_lengths" in data else None,
            #    'audio_emb_lengths': data["audio_emb_lengths"].cuda(non_blocking=True) if "audio_emb_lengths" in data else None,
            'labels': data["labels"].cuda(non_blocking=True),
            'loss_mask': data["loss_mask"].cuda(non_blocking=True)
            #    'position_ids': data["position_ids"].cuda(non_blocking=True)
        }

        # for key in batch:
        #     _broadcast(batch[key])

        if args.pipeline_model_parallel_size == 1:
            _broadcast(batch['input_features'])
            _broadcast(batch['input_ids'])
            _broadcast(batch['attention_mask'])
            _broadcast(batch['feature_attention_mask'])
            _broadcast(batch['loss_mask'])
            _broadcast(batch['labels'])
            # _broadcast(batch['feature_lengths'])
        elif mpu.is_pipeline_first_stage():
            _broadcast(batch['input_features'])
            _broadcast(batch['input_ids'])
            _broadcast(batch['attention_mask'])
            _broadcast(batch['feature_attention_mask'])
            _broadcast(batch['feature_lengths'])
            _broadcast(batch['labels'])
            _broadcast(batch['loss_mask'])
        elif mpu.is_pipeline_last_stage():
            batch['input_features'] = None
            batch['feature_attention_mask'] = None
            batch['feature_lengths'] = None            
            batch['input_ids'] = None
            _broadcast(batch['attention_mask'])
            _broadcast(batch['labels'])
            _broadcast(batch['loss_mask'])
            

    else:
        input_feature_shape = torch.tensor([1,2,3], dtype=torch.int64).cuda(non_blocking=True)
        _broadcast(input_feature_shape)      
        if input_feature_shape[0].item() == 1000 and  input_feature_shape[1].item() == 1 and input_feature_shape[2].item() == 1:
            feature_am_shape = (input_feature_shape[0].item(), input_feature_shape[-1].item())
            batch = {
                'input_features': None,
                'input_ids': torch.empty((args.micro_batch_size, args.seq_length), dtype=torch.int64, device=torch.cuda.current_device()),
                'attention_mask': torch.empty((args.micro_batch_size, args.seq_length), dtype=torch.int64, device=torch.cuda.current_device()),
                'feature_attention_mask': None,
                'labels': torch.empty((args.micro_batch_size, args.seq_length), dtype=torch.int64, device=torch.cuda.current_device()),
                'loss_mask': torch.empty((args.micro_batch_size, args.seq_length), dtype=torch.float32, device=torch.cuda.current_device()),
            }

            if args.pipeline_model_parallel_size == 1:
                _broadcast(batch['input_ids'])
                _broadcast(batch['attention_mask'])
                _broadcast(batch['loss_mask'])
                _broadcast(batch['labels'])
        else:
            feature_am_shape = (input_feature_shape[0].item(), input_feature_shape[-1].item())
            batch = {
                'input_features': torch.empty(input_feature_shape.tolist(), dtype=dtype, device=torch.cuda.current_device()),
                'input_ids': torch.empty((args.micro_batch_size, args.seq_length), dtype=torch.int64, device=torch.cuda.current_device()),
                'attention_mask': torch.empty((args.micro_batch_size, args.seq_length), dtype=torch.int64, device=torch.cuda.current_device()),
                'feature_attention_mask': torch.empty(feature_am_shape, dtype=torch.int32, device=torch.cuda.current_device()),
                'labels': torch.empty((args.micro_batch_size, args.seq_length), dtype=torch.int64, device=torch.cuda.current_device()),
                'loss_mask': torch.empty((args.micro_batch_size, args.seq_length), dtype=torch.float32, device=torch.cuda.current_device()),
            }

            if args.pipeline_model_parallel_size == 1:
                _broadcast(batch['input_features'])
                _broadcast(batch['input_ids'])
                _broadcast(batch['attention_mask'])
                _broadcast(batch['feature_attention_mask'])
                _broadcast(batch['loss_mask'])
                _broadcast(batch['labels'])
                # _broadcast(batch['feature_lengths'])
            elif mpu.is_pipeline_first_stage():
                _broadcast(batch['input_features'])
                _broadcast(batch['input_ids'])
                _broadcast(batch['attention_mask'])
                _broadcast(batch['feature_attention_mask'])
                _broadcast(batch['feature_lengths'])
                _broadcast(batch['labels'])
                _broadcast(batch['loss_mask'])
            elif mpu.is_pipeline_last_stage():
                batch['input_features'] = None
                batch['feature_attention_mask'] = None
                batch['feature_lengths'] = None
                batch['input_ids'] = None
                _broadcast(batch['attention_mask'])
                _broadcast(batch['labels'])
                _broadcast(batch['loss_mask'])   

        # for key in batch:
        #     _broadcast(batch[key])
    return batch

def get_batch_on_this_tp_rank_batch_IdealLLM(batch_in):

    args = get_args()
    def _broadcast(item):
        if item is not None:
           torch.distributed.broadcast(item, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group())
    data = batch_in
    use_bf16 = args.bf16
    dtype = torch.bfloat16 if use_bf16 else torch.float
    if mpu.get_tensor_model_parallel_rank() == 0:
        input_feature_shape = torch.tensor(data['input_features'].shape).cuda(non_blocking=True)
        input_ids_shape = torch.tensor(data['input_ids'].shape).cuda(non_blocking=True)
        _broadcast(input_feature_shape)
        _broadcast(input_ids_shape)
        batch = {
            'input_features': data["input_features"].cuda(non_blocking=True).to(dtype) if "input_features" in data else None,
            'input_ids': data["input_ids"].cuda(non_blocking=True),
            'target_lengths': data["target_lengths"].cuda(non_blocking=True),
            'raw_wav_lengths': data["raw_wav_lengths"].cuda(non_blocking=True),
            'language_id': data["language_id"].cuda(non_blocking=True),
        }
        # for key in batch:
        #     _broadcast(batch[key])

        if args.pipeline_model_parallel_size == 1:
            _broadcast(batch['input_features'])
            _broadcast(batch['input_ids'])
            _broadcast(batch['target_lengths'])
            _broadcast(batch['raw_wav_lengths'])
            _broadcast(batch['language_id'])
        elif mpu.is_pipeline_first_stage():
            _broadcast(batch['input_features'])
            _broadcast(batch['input_ids'])
            _broadcast(batch['target_lengths'])
            _broadcast(batch['raw_wav_lengths'])
        elif mpu.is_pipeline_last_stage():
            batch['input_features'] = None
            # batch['input_ids'] = None
            _broadcast(batch['target_lengths'])
            _broadcast(batch['raw_wav_lengths'])
            _broadcast(batch['input_ids'])
            # _broadcast(batch['attention_mask'])
            # _broadcast(batch['labels'])
            # _broadcast(batch['loss_mask'])                        
            

    else:
        # 非主rank接收广播的形状信息
        input_feature_shape = torch.tensor([1,2]).cuda(non_blocking=True)
        input_ids_shape = torch.tensor([1,2]).cuda(non_blocking=True)
        _broadcast(input_feature_shape)
        _broadcast(input_ids_shape)    

        batch = {
            'input_features': torch.empty(input_feature_shape.tolist(), dtype=dtype, device=torch.cuda.current_device()),
            'input_ids': torch.empty(input_ids_shape.tolist(), dtype=torch.int64, device=torch.cuda.current_device()),
            'target_lengths': torch.empty(args.micro_batch_size, dtype=torch.int64, device=torch.cuda.current_device()),
            'raw_wav_lengths': torch.empty(args.micro_batch_size, dtype=torch.int64, device=torch.cuda.current_device()),
            'language_id': torch.empty(args.micro_batch_size, dtype=torch.int64, device=torch.cuda.current_device()),
            # 'attention_mask': torch.empty((args.micro_batch_size, args.seq_length), dtype=torch.int64, device=torch.cuda.current_device()),
            # 'feature_attention_mask': torch.empty(feature_am_shape, dtype=torch.int32, device=torch.cuda.current_device()),
            # 'labels': torch.empty((args.micro_batch_size, args.seq_length), dtype=torch.int64, device=torch.cuda.current_device()),
            # 'loss_mask': torch.empty((args.micro_batch_size, args.seq_length), dtype=torch.float32, device=torch.cuda.current_device()),
        }
        if args.pipeline_model_parallel_size == 1:
            _broadcast(batch['input_features'])
            _broadcast(batch['input_ids'])
            _broadcast(batch['target_lengths'])
            _broadcast(batch['raw_wav_lengths'])
            _broadcast(batch['language_id'])
            # _broadcast(batch['attention_mask'])
            # _broadcast(batch['feature_attention_mask'])
            # _broadcast(batch['labels'])
            # _broadcast(batch['loss_mask'])
        elif mpu.is_pipeline_first_stage():
            _broadcast(batch['input_features'])
            _broadcast(batch['input_ids'])
            _broadcast(batch['target_lengths'])
            _broadcast(batch['raw_wav_lengths'])
            # _broadcast(batch['attention_mask'])
            # _broadcast(batch['feature_attention_mask'])
            # _broadcast(batch['labels'])
            # _broadcast(batch['loss_mask'])
        elif mpu.is_pipeline_last_stage():
            batch['input_features'] = None
            _broadcast(batch['target_lengths'])
            _broadcast(batch['raw_wav_lengths'])
            _broadcast(batch['input_ids'])
            # _broadcast(batch['attention_mask'])
            # _broadcast(batch['labels'])
            # _broadcast(batch['loss_mask'])    
        # for key in batch:
        #     _broadcast(batch[key])
    return batch


def get_batch_on_this_tp_rank_batch_IdealLLM_v2(batch_in):

    args = get_args()
    def _broadcast(item):
        if item is not None:
           torch.distributed.broadcast(item, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group())
    data = batch_in
    use_bf16 = args.bf16
    dtype = torch.bfloat16 if use_bf16 else torch.float
    if mpu.get_tensor_model_parallel_rank() == 0:
        input_feature_shape = torch.tensor(data['input_features'].shape).cuda(non_blocking=True)
        input_ids_shape = torch.tensor(data['input_ids'].shape).cuda(non_blocking=True)
        _broadcast(input_feature_shape)
        _broadcast(input_ids_shape)
        batch = {
            'input_features': data["input_features"].cuda(non_blocking=True).to(dtype) if "input_features" in data else None,
            'input_ids': data["input_ids"].cuda(non_blocking=True),
            'attention_mask': data["attention_mask"].cuda(non_blocking=True),
            'raw_wav_lengths': data["raw_wav_lengths"].cuda(non_blocking=True),
            'language_id': data["language_id"].cuda(non_blocking=True),
            'labels': data["labels"].cuda(non_blocking=True),
            'loss_mask': data["loss_mask"].cuda(non_blocking=True)
        }
        # for key in batch:
        #     _broadcast(batch[key])

        if args.pipeline_model_parallel_size == 1:
            _broadcast(batch['input_features'])
            _broadcast(batch['input_ids'])
            _broadcast(batch['attention_mask'])
            _broadcast(batch['raw_wav_lengths'])
            _broadcast(batch['language_id'])
            _broadcast(batch['labels'])
            _broadcast(batch['loss_mask'])
        elif mpu.is_pipeline_first_stage():
            _broadcast(batch['input_features'])
            _broadcast(batch['input_ids'])
            _broadcast(batch['attention_mask'])
            _broadcast(batch['raw_wav_lengths'])
            _broadcast(batch['language_id'])
            _broadcast(batch['labels'])
            _broadcast(batch['loss_mask'])
        elif mpu.is_pipeline_last_stage():
            batch['input_features'] = None
            batch['input_ids'] = None
            _broadcast(batch['attention_mask'])
            _broadcast(batch['raw_wav_lengths'])
            _broadcast(batch['language_id'])
            _broadcast(batch['labels'])
            _broadcast(batch['loss_mask'])                        
            

    else:
        # 非主rank接收广播的形状信息
        input_feature_shape = torch.tensor([1,2]).cuda(non_blocking=True)
        input_ids_shape = torch.tensor([1,2]).cuda(non_blocking=True)
        _broadcast(input_feature_shape)
        _broadcast(input_ids_shape)    

        batch = {
            'input_features': torch.empty(input_feature_shape.tolist(), dtype=dtype, device=torch.cuda.current_device()),
            'input_ids': torch.empty(input_ids_shape.tolist(), dtype=torch.int64, device=torch.cuda.current_device()),
            'target_lengths': torch.empty(args.micro_batch_size, dtype=torch.int64, device=torch.cuda.current_device()),
            'raw_wav_lengths': torch.empty(args.micro_batch_size, dtype=torch.int64, device=torch.cuda.current_device()),
            'language_id': torch.empty(args.micro_batch_size, dtype=torch.int64, device=torch.cuda.current_device()),
            'attention_mask': torch.empty((args.micro_batch_size, input_ids_shape[1]), dtype=torch.int64, device=torch.cuda.current_device()),
            'labels': torch.empty((args.micro_batch_size, input_ids_shape[1]), dtype=torch.int64, device=torch.cuda.current_device()),
            'loss_mask': torch.empty((args.micro_batch_size, input_ids_shape[1]), dtype=torch.float32, device=torch.cuda.current_device()),
        }
        if args.pipeline_model_parallel_size == 1:
            _broadcast(batch['input_features'])
            _broadcast(batch['input_ids'])
            _broadcast(batch['attention_mask'])
            _broadcast(batch['raw_wav_lengths'])
            _broadcast(batch['language_id'])
            _broadcast(batch['labels'])
            _broadcast(batch['loss_mask'])
        elif mpu.is_pipeline_first_stage():
            _broadcast(batch['input_features'])
            _broadcast(batch['input_ids'])
            _broadcast(batch['attention_mask'])
            _broadcast(batch['raw_wav_lengths'])
            _broadcast(batch['language_id'])
            _broadcast(batch['labels'])
            _broadcast(batch['loss_mask'])
        elif mpu.is_pipeline_last_stage():
            batch['input_features'] = None
            batch['input_ids'] = None
            _broadcast(batch['attention_mask'])
            _broadcast(batch['raw_wav_lengths'])
            _broadcast(batch['language_id'])
            _broadcast(batch['labels'])
            _broadcast(batch['loss_mask'])    
        # for key in batch:
        #     _broadcast(batch[key])
    return batch
  

def get_ltor_masks_and_position_ids_sft(
      data,
      eod_token,
      reset_position_ids,
      reset_attention_mask,
      eod_mask_loss,
      pad_mask_loss=True,
      seq_limit_window=None,
      prefix_indices=None,
      loss_on_targets_only=False,
      pad_token=2,
      sep_token=0,
      eor_token=10,
      sft_not_mask_prompt_in_attn=False,
      audio_output_lengths=None,
      audio_token_index=151646
    ):  
    """ 
    Build masks and position id for left to right model.
    :param prefix_indices: argument can have multiple types:
        - None signifies that the model is fully autoregressive.
        - List[int] the argument holds all prefix indices that split a row into an input and a target
        - List[List[int]] the argument holds all prefix indices that split documents between input and target.
    :param loss_on_targets_only: bool to determine if we should mask loss on prefix.
    :param audio_output_lengths: length of audio embeddings that replace specific tokens in input_ids.
    :param audio_token_index: the index of tokens to be replaced with audio embeddings.
    """
    
    # support sequence sep_token for chatML Template
    if type(sep_token) != list:
        sep_token = [sep_token]
    else:
        pass
    sep_length = len(sep_token)
    sep_token = data.new_tensor(sep_token)
    
    sparse_attn_mask = False
    
    # Extract batch size and sequence length.
    micro_batch_size, seq_length = data.size()
    
    # Attention mask (lower triangular).
    if reset_attention_mask or prefix_indices is not None:
        att_mask_batch = micro_batch_size
    else:
        att_mask_batch = 1
    
    if sparse_attn_mask:
        attention_mask = torch.zeros((att_mask_batch, seq_length, 2), device=data.device).to(torch.int32)
        attention_mask[:, :, 0] += torch.arange(seq_length, device=data.device)  # m_l
        attention_mask[:, :, 1] += seq_length - 1  # m_u
    else:
        attention_mask = torch.tril(torch.ones(
            (att_mask_batch, seq_length, seq_length), device=data.device)).view(
            att_mask_batch, 1, seq_length, seq_length)
    # Loss mask.
    loss_mask = torch.ones(data.size(), dtype=torch.float, device=data.device)
    if eod_mask_loss:
        loss_mask[data == eod_token] = 0.0
    if pad_mask_loss:
        loss_mask[data == pad_token] = 0.0
    
    # Position ids.
    position_ids = torch.arange(seq_length, dtype=torch.long,
                                device=data.device)
    position_ids = position_ids.unsqueeze(0).expand_as(data)
    # We need to clone as the ids will be modifed based on batch index.
    if reset_position_ids:
        position_ids = position_ids.clone()
    
    if reset_position_ids or reset_attention_mask:
        # Loop through the batches:
        for b in range(micro_batch_size):
            # prompt loss mask
            if loss_on_targets_only:
                eor_index = [(idx, 'eor') for idx in position_ids[b, data[b] == eor_token]]
    
                sep_index = []
                for idx in range(data.size(1) - sep_length + 1):
                    if torch.equal(data[b, idx:idx + sep_length], sep_token):
                        sep_index.append((position_ids[b, idx + sep_length - 1].item(), 'sep'))
    
                # sep_index = [(idx, 'sep') for idx in position_ids[b, data[b] == sep_token]]
                eod_index = [(idx, 'eod') for idx in position_ids[b, data[b] == eod_token]]
                all_index = sorted(eor_index + sep_index + eod_index)
                for i, idx in enumerate(all_index):
                    if idx[1] == 'sep':
                        if i == 0:
                            loss_mask[b][0: idx[0]] = 0.0
                        else:
                            pre = all_index[i - 1]
                            if (pre[1] == 'eod' or pre[1] == 'eor'):
                                loss_mask[b][pre[0]: idx[0]] = 0.0
                loss_mask[b][data[b] == eor_token] = 0.0
            # Find indecies where EOD token is.
            eod_index = position_ids[b, data[b] == eod_token]
            # Detach indecies from positions if going to modify positions.
            if reset_position_ids:
                eod_index = eod_index.clone()
            # Loop through EOD indecies:
            prev_index = 0
            for j in range(eod_index.size()[0]):
                i = eod_index[j]
                if reset_attention_mask:
                    if sparse_attn_mask:
                        attention_mask[b, :(i + 1), 1] = torch.min(attention_mask[b, :(i + 1), 1], attention_mask[b, :(i + 1), 1] * 0 + i)
                    else:
                        attention_mask[b, 0, (i + 1):, :(i + 1)] = 0
                # Reset positions.
                if reset_position_ids:
                    position_ids[b, (i + 1):] -= (i + 1 - prev_index)
                    prev_index = i + 1
            # max window
            if seq_limit_window and seq_limit_window < seq_length:
                if sparse_attn_mask:
                    assert not sft_not_mask_prompt_in_attn, 'sparse_attn_mask dose not support sft_not_mask_prompt_in_attn'
                    attention_mask[b, :, 1] = torch.min(attention_mask[b, :, 1], attention_mask[b, :, 0] + seq_limit_window)
                else:
                    rows_indices = torch.arange(attention_mask[b][0].size(0), device=data.device).unsqueeze(1).expand_as(attention_mask[b][0])
                    cols_indices = torch.arange(attention_mask[b][0].size(1), device=data.device).unsqueeze(0).expand_as(attention_mask[b][0])
                    row_minus_col = rows_indices - cols_indices
                    selected_elements = row_minus_col > seq_limit_window
                    if sft_not_mask_prompt_in_attn:
                        eor_index = [(idx, 'eor') for idx in position_ids[b, data[b] == eor_token]]
                        # sep_index = [(idx, 'sep') for idx in position_ids[b, data[b] == sep_token]]
                        sep_index = []
                        for idx in range(data.size(1) - sep_length + 1):
                            if torch.equal(data[b, idx:idx + sep_length], sep_token):
                                sep_index.append((position_ids[b, idx + sep_length - 1].item(), 'sep'))
                        eod_index = [(idx, 'eod') for idx in position_ids[b, data[b] == eod_token]]
                        all_index = sorted(eor_index + sep_index + eod_index)
                        for i, idx in enumerate(all_index):
                            if idx[1] == 'sep':
                                if i == 0:
                                    selected_elements[0:idx[0], 0:idx[0]] = False
                                else:
                                    pre = all_index[i - 1]
                                    if (pre[1] == 'eod' or pre[1] == 'eor'):
                                        selected_elements[pre[0]: idx[0], pre[0]: idx[0]] = False
                    attention_mask[b][0][selected_elements] = 0
    # Convert attention mask to binary:
    if not sparse_attn_mask:
        attention_mask = (attention_mask < 0.5)
    
    # Adjust loss_mask, attention_mask, and position_ids for audio_output_lengths
    if audio_output_lengths is not None and audio_token_index is not None:
        for b in range(micro_batch_size):
            if isinstance(audio_token_index, int):
                audio_token_index = [audio_token_index]
            audio_positions = [torch.where(data[b] == idx)[0] for idx in audio_token_index]
            for audio_len, audio_pos in zip(audio_output_lengths[b], audio_positions):
                if len(audio_pos) > 0:
                    audio_idx = audio_pos[0].item()
                    if audio_idx < seq_length:
                        # Update attention_mask
                        # if audio_idx + audio_len < seq_length:
                        #     attention_mask[b, 0, audio_idx:audio_idx + audio_len, audio_idx + audio_len:] = 0
                        # # Insert placeholder for audio embeddings and adjust loss_mask
                        # data[b, audio_idx:audio_idx + audio_len] = -1
                        # only edit loss_mask
                        loss_mask[b, audio_idx:audio_idx + audio_len] = 0.0
                        # position_ids[b, audio_idx:audio_idx + audio_len] = -1
    
    return attention_mask, loss_mask, position_ids
