# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


import torch

from megatron.training import get_args
from megatron.core import mpu
from megatron.core.tensor_parallel import copy_to_tensor_model_parallel_region
# from megatron import get_args, get_global_memory_buffer, mpu
from megatron.core.parallel_state import (
    get_tensor_model_parallel_group,
    get_tensor_model_parallel_rank,
    get_tensor_model_parallel_world_size,
)

from megatron.core.tensor_parallel.utils import VocabUtility


# from deepspeed.moe.global_vars import glob_queue


class _VocabParallelCrossEntropy(torch.autograd.Function):

    @staticmethod
    def forward(ctx, vocab_parallel_logits, target):
        
        # glob_queue.sync_allgather_before_all2all()

        # Maximum value along vocab dimension across all GPUs.
        logits_max = torch.max(vocab_parallel_logits, dim=-1)[0]
        torch.distributed.all_reduce(logits_max,
                                     op=torch.distributed.ReduceOp.MAX,
                                     group=get_tensor_model_parallel_group())
        # Subtract the maximum value.
        vocab_parallel_logits.sub_(logits_max.unsqueeze(dim=-1))

        # Get the partition's vocab indecies
        get_vocab_range = VocabUtility.vocab_range_from_per_partition_vocab_size
        partition_vocab_size = vocab_parallel_logits.size()[-1]
        rank = get_tensor_model_parallel_rank()
        world_size = get_tensor_model_parallel_world_size()
        vocab_start_index, vocab_end_index = get_vocab_range(
            partition_vocab_size, rank, world_size)

        # Create a mask of valid vocab ids (1 means it needs to be masked).
        target_mask = (target < vocab_start_index) | (target >= vocab_end_index)
        masked_target = target.clone() - vocab_start_index
        masked_target[target_mask] = 0

        # Get predicted-logits = logits[target].
        # For Simplicity, we convert logits to a 2-D tensor with size
        # [*, partition-vocab-size] and target to a 1-D tensor of size [*].
        logits_2d = vocab_parallel_logits.view(-1, partition_vocab_size)
        masked_target_1d = masked_target.view(-1)
        arange_1d = torch.arange(start=0, end=logits_2d.size()[0],
                                 device=logits_2d.device)
        predicted_logits_1d = logits_2d[arange_1d, masked_target_1d]
        predicted_logits_1d = predicted_logits_1d.clone().contiguous()
        predicted_logits = predicted_logits_1d.view_as(target)
        predicted_logits[target_mask] = 0.0
        # All reduce is needed to get the chunks from other GPUs.
        torch.distributed.all_reduce(predicted_logits,
                                     op=torch.distributed.ReduceOp.SUM,
                                     group=get_tensor_model_parallel_group())

        # Sum of exponential of logits along vocab dimension across all GPUs.
        exp_logits = vocab_parallel_logits
        torch.exp(vocab_parallel_logits, out=exp_logits)
        sum_exp_logits = exp_logits.sum(dim=-1)
        torch.distributed.all_reduce(sum_exp_logits,
                                     op=torch.distributed.ReduceOp.SUM,
                                     group=get_tensor_model_parallel_group())

        # Loss = log(sum(exp(logits))) - predicted-logit.
        loss = torch.log(sum_exp_logits) - predicted_logits

        # Store softmax, target-mask and masked-target for backward pass.
        exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1))
        ctx.save_for_backward(exp_logits, target_mask, masked_target_1d)

        return loss

    @staticmethod
    def backward(ctx, grad_output):

        # Retreive tensors from the forward path.
        softmax, target_mask, masked_target_1d = ctx.saved_tensors

        # All the inputs have softmax as thier gradient.
        grad_input = softmax
        # For simplicity, work with the 2D gradient.
        partition_vocab_size = softmax.size()[-1]
        grad_2d = grad_input.view(-1, partition_vocab_size)

        # Add the gradient from matching classes.
        arange_1d = torch.arange(start=0, end=grad_2d.size()[0],
                                 device=grad_2d.device)
        grad_2d[arange_1d, masked_target_1d] -= (
            1.0 - target_mask.view(-1).float())

        # Finally elementwise multiplication with the output gradients.
        grad_input.mul_(grad_output.unsqueeze(dim=-1))

        return grad_input, None


class _VocabParallelLogSoftmax(torch.autograd.Function):

    @staticmethod
    def forward(ctx, vocab_parallel_logits):

        # glob_queue.sync_allgather_before_all2all()

        # Maximum value along vocab dimension across all GPUs.
        logits_max = torch.max(vocab_parallel_logits, dim=-1)[0]
        torch.distributed.all_reduce(logits_max,
                                     op=torch.distributed.ReduceOp.MAX,
                                     group=get_tensor_model_parallel_group())
        # Subtract the maximum value.
        vocab_parallel_logits = vocab_parallel_logits - logits_max.unsqueeze(dim=-1)

        # Sum of exponential of logits along vocab dimension across all GPUs.
        exp_logits = torch.exp(vocab_parallel_logits)
        sum_exp_logits = exp_logits.sum(dim=-1)
        torch.distributed.all_reduce(sum_exp_logits,
                                     op=torch.distributed.ReduceOp.SUM,
                                     group=get_tensor_model_parallel_group())

        exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1))
        
        output = vocab_parallel_logits - torch.log(sum_exp_logits.unsqueeze(dim=-1))

        # save_for_backward will have no effect when you run in no_grad mode. 
        # We don’t need to worry about it.
        ctx.save_for_backward(exp_logits)
        return output

    @staticmethod
    def backward(ctx, grad_outputs):
        grad_input = torch.zeros_like(grad_outputs)
        softmax = ctx.saved_tensors[0]


        sum_row = torch.sum(grad_outputs, dim=-1)
        # computer row sum
        torch.distributed.all_reduce(sum_row,
                                     op=torch.distributed.ReduceOp.SUM,
                                     group=get_tensor_model_parallel_group())

        # computer p_ij * dp_ij
        torch.mul(softmax, sum_row.unsqueeze(dim=-1), out=grad_input)

        # computer gradients
        grad_input = grad_outputs - grad_input

        return grad_input


class _VocabParallelSoftmax(torch.autograd.Function):

    @staticmethod
    def forward(ctx, vocab_parallel_logits):

        # glob_queue.sync_allgather_before_all2all()

        # Maximum value along vocab dimension across all GPUs.
        logits_max = torch.max(vocab_parallel_logits, dim=-1)[0]
        torch.distributed.all_reduce(logits_max,
                                     op=torch.distributed.ReduceOp.MAX,
                                     group=get_tensor_model_parallel_group())
        # Subtract the maximum value.
        vocab_parallel_logits = vocab_parallel_logits - logits_max.unsqueeze(dim=-1)

        # Sum of exponential of logits along vocab dimension across all GPUs.
        exp_logits = vocab_parallel_logits
        torch.exp(vocab_parallel_logits, out=exp_logits)
        sum_exp_logits = exp_logits.sum(dim=-1)
        torch.distributed.all_reduce(sum_exp_logits,
                                     op=torch.distributed.ReduceOp.SUM,
                                     group=get_tensor_model_parallel_group())

        exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1))

        # save_for_backward will have no effect when you run in no_grad mode. 
        # We don’t need to worry about it.
        ctx.save_for_backward(exp_logits)
        return exp_logits

    @staticmethod
    def backward(ctx, grad_outputs):
        grad_input = torch.zeros_like(grad_outputs)
        softmax = ctx.saved_tensors[0]

        # computer p_ij * dp_ij
        torch.mul(softmax, grad_outputs, out=grad_input)

        # computer row sum
        sum_row = torch.sum(grad_input, dim=-1)
        torch.distributed.all_reduce(sum_row,
                                     op=torch.distributed.ReduceOp.SUM,
                                     group=get_tensor_model_parallel_group())


        # computer gradients
        grad_input -= torch.mul(softmax, sum_row.unsqueeze(dim=-1))

        return grad_input


class _VocabParallelGather(torch.autograd.Function):

    @staticmethod
    def forward(ctx, vocab_parallel_logits, labels):
        get_vocab_range = VocabUtility.vocab_range_from_per_partition_vocab_size
        partition_vocab_size = vocab_parallel_logits.size()[-1]
        rank = get_tensor_model_parallel_rank()
        world_size = get_tensor_model_parallel_world_size()
        vocab_start_index, vocab_end_index = get_vocab_range(
            partition_vocab_size, rank, world_size)

        # Create a mask of valid vocab ids (1 means it needs to be masked).
        target_mask = (labels < vocab_start_index) | (labels >= vocab_end_index)
        masked_target = labels.clone() - vocab_start_index  # find local position for partitioned_vocab
        masked_target[target_mask] = 0  # mask extra position index

        logits_2d = vocab_parallel_logits.view(-1, partition_vocab_size)  # bs * sl, partitioned_v
        masked_target_1d = masked_target.view(-1)  # bs * sl
        arange_1d = torch.arange(start=0, end=logits_2d.size()[0],
                                    device=logits_2d.device)
        predicted_logits_1d = logits_2d[arange_1d, masked_target_1d]
        predicted_logits_1d = predicted_logits_1d.clone().contiguous()
        predicted_logits = predicted_logits_1d.view_as(labels)
        predicted_logits[target_mask] = 0.0

        torch.distributed.all_reduce(predicted_logits,
                                    op=torch.distributed.ReduceOp.SUM,
                                    group=get_tensor_model_parallel_group())

        ctx.save_for_backward(vocab_parallel_logits, target_mask, masked_target_1d)

        return predicted_logits


    @staticmethod
    def backward(ctx, grad_outputs):
        logits, target_mask, masked_target_1d = ctx.saved_tensors
        grad_inputs = torch.zeros_like(logits)
        partition_vocab_size = logits.size()[-1]
        grad_2d = grad_inputs.view(-1, partition_vocab_size)
        grad_outputs_1d = grad_outputs.view(-1)
        arange_1d = torch.arange(start=0, end=grad_2d.size()[0],
                                 device=grad_2d.device)
        grad_2d[arange_1d, masked_target_1d] = grad_outputs_1d[arange_1d]
        grad_inputs[target_mask, :] = 0.0

        return grad_inputs, None


def fused_fowrward(lm_output_tile,
                   labels_tile, 
                   logit_weights, 
                   seq_len, 
                   tile_size,
                   partition_vocab_size,
                   vocab_start_index,
                   vocab_end_index,
                   world_size,
                   rank):

    tile_buffer = mpu.get_global_memory_buffer().get_tensor(  # tile_size(bs*sl), vocab
        [tile_size, partition_vocab_size], torch.float32, "tile")
    tile_buffer_half = mpu.get_global_memory_buffer().get_tensor(  # tile_size(bs*sl), vocab
        [tile_size, partition_vocab_size], lm_output_tile.dtype, "tile")
    
    # word_embedding_matmul
    torch.matmul(lm_output_tile, logit_weights.t(), out=tile_buffer_half)
    tile_buffer.copy_(tile_buffer_half)

    # sync
    # glob_queue.sync_allgather_before_all2all()

    # cross_entropy
    logits_max = torch.max(tile_buffer, dim=-1)[0]  # while torch.max func exist arg 'dim', its return is a tuple contains values and indices
    torch.distributed.all_reduce(logits_max,  # tile_size(bs*sl)
                                op=torch.distributed.ReduceOp.MAX,
                                group=get_tensor_model_parallel_group())
    tile_buffer.sub_(logits_max.unsqueeze(dim=-1))

    target_mask = (labels_tile < vocab_start_index) | (labels_tile >= vocab_end_index)
    masked_target = labels_tile.clone().contiguous() - vocab_start_index  # tile_size(bs*sl)
    masked_target[target_mask] = 0

    logits_2d = tile_buffer.view(-1, partition_vocab_size)
    masked_target_1d = masked_target.view(-1)
    arange_1d = torch.arange(start=0, end=logits_2d.size()[0],
                                device=logits_2d.device)
    predicted_logits_1d = logits_2d[arange_1d, masked_target_1d]
    predicted_logits_1d = predicted_logits_1d.clone().contiguous()
    predicted_logits = predicted_logits_1d.view_as(labels_tile)
    predicted_logits[target_mask] = 0.0
    torch.distributed.all_reduce(predicted_logits,
                                op=torch.distributed.ReduceOp.SUM,
                                group=get_tensor_model_parallel_group())
    exp_logits = tile_buffer  # tile_size(bs*sl), vocab
    torch.exp(tile_buffer, out=exp_logits)
    sum_exp_logits = exp_logits.sum(dim=-1)  # tile_size(bs*sl)
    torch.distributed.all_reduce(sum_exp_logits,
                                    op=torch.distributed.ReduceOp.SUM,
                                    group=get_tensor_model_parallel_group())
    exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1))
    loss = torch.log(sum_exp_logits) - predicted_logits  # tile_size(bs*sl)

    return exp_logits, loss, target_mask, masked_target_1d

def seq_parallel_fwd(lm_output, sequence_parallel, world_size):
    if sequence_parallel:
        input_parallel = lm_output
        model_parallel = mpu.get_tensor_model_parallel_world_size() > 1
        dim_size = list(input_parallel.size())
        dim_size[0] = dim_size[0] * world_size
        all_gather_buffer = \
            mpu.get_global_memory_buffer().get_tensor(dim_size, input_parallel.dtype, "mpu")
        torch.distributed._all_gather_base(
            all_gather_buffer,
            input_parallel,
            group=get_tensor_model_parallel_group())
        total_input = all_gather_buffer
    else:
        total_input = copy_to_tensor_model_parallel_region(lm_output)
    return total_input

def seq_parallel_bwd(lm_output, sequence_parallel, world_size):
    if sequence_parallel:
        input_parallel = lm_output
        dim_size = list(input_parallel.size())
        dim_size[0] = dim_size[0] * world_size
        all_gather_buffer = \
            mpu.get_global_memory_buffer().get_tensor(dim_size, input_parallel.dtype, "mpu")
        handle = torch.distributed._all_gather_base(
            all_gather_buffer,
            input_parallel,
            group=get_tensor_model_parallel_group(), async_op=True)
        _ = torch.empty(1, device=lm_output.device) + 1
        total_input = all_gather_buffer
    else:
        total_input = copy_to_tensor_model_parallel_region(lm_output)
        handle = None
    return total_input, handle

class _FusedClassifierCrossEntropy(torch.autograd.Function):
    @staticmethod
    def forward(ctx, lm_output, labels, logit_weights,
                get_key_value, parallel_output,
                forward_method_parallel_output,
                tile_size=1):
        ### Setting
        # if torch.distributed.get_rank() == 0:
        #     print(f"lm_output size is {lm_output.size()}, logit_weights size is {logit_weights.size()}")
        args = get_args()
        sequence_parallel = args.sequence_parallel
        ctx.sequence_parallel = sequence_parallel
        ctx.tile_size = tile_size
        ctx.lm_output_shape0 = lm_output.size(0)
        ctx.lm_output_shape1 = lm_output.size(1)
        if get_key_value:
            lm_output, presents = lm_output

        if forward_method_parallel_output is not None:
            parallel_output = forward_method_parallel_output

        labels = labels.transpose(0,1)  # bs, sl -> sl, bs
        losses = []
        world_size = get_tensor_model_parallel_world_size()
        get_vocab_range = VocabUtility.vocab_range_from_per_partition_vocab_size
        partition_vocab_size = logit_weights.size()[0]
        rank = get_tensor_model_parallel_rank()
        vocab_start_index, vocab_end_index = get_vocab_range(
            partition_vocab_size, rank, world_size)

        ### Classifier
        total_input = seq_parallel_fwd(lm_output, sequence_parallel, world_size)
        batch_size = total_input.size(1)
        seq_len = total_input.size(0)
        ### combine batch_size and seq_len to one dim
        all_token_num = batch_size * seq_len
        total_input = total_input.view(all_token_num, -1)
        labels = labels.reshape(all_token_num, *labels.shape[2:])  # debug
        tile_num = all_token_num // tile_size
        assert all_token_num // tile_size and all_token_num % tile_size == 0

        lm_output_tiles = torch.split(total_input, tile_size, dim=0)
        labels_tiles = torch.split(labels, tile_size, dim=0)
        # tile_num = batch_size // tile_size
        # assert batch_size // tile_size and batch_size % tile_size == 0

        # lm_output_tiles = torch.split(total_input, tile_size, dim=1)
        # labels_tiles = torch.split(labels, tile_size, dim=1)

        with torch.no_grad():
            for i in range(tile_num):
                lm_output_tile = lm_output_tiles[i]
                labels_tile = labels_tiles[i]
                _, loss, _, _ = fused_fowrward(lm_output_tile, labels_tile, logit_weights, 
                                            seq_len, tile_size, partition_vocab_size,
                                            vocab_start_index, vocab_end_index, world_size, rank)
                losses.append(loss)

        loss = torch.cat(losses, dim=0).reshape(seq_len, batch_size)  # sl*bs
        ctx.save_for_backward(lm_output, logit_weights, labels)

        return loss

    @staticmethod
    def backward(ctx, grad_output):
        # labels: tile_size(bs*sl) * 1; logit_weights is vocab; lm_output: origin shape
        lm_output, logit_weights, labels = ctx.saved_tensors
        world_size = get_tensor_model_parallel_world_size()
        
        total_input = seq_parallel_fwd(lm_output, ctx.sequence_parallel, world_size)  # [sl, bs, hd]

        batch_size = total_input.size(1)
        seq_len = total_input.size(0)
        all_token_num = batch_size * seq_len
        tile_num = all_token_num // ctx.tile_size

        get_vocab_range = VocabUtility.vocab_range_from_per_partition_vocab_size
        partition_vocab_size = logit_weights.size()[0]
        rank = get_tensor_model_parallel_rank()
        vocab_start_index, vocab_end_index = get_vocab_range(
            partition_vocab_size, rank, world_size)
        
        total_input = total_input.view(all_token_num, -1)
        grad_output = grad_output.reshape(all_token_num, -1)
        
        # labels and grad_output should already be (tile_size, *) shape
        if ctx.sequence_parallel:
            lm_output = lm_output.view(all_token_num//mpu.get_tensor_model_parallel_world_size(), -1)
            lm_output_tiles = torch.split(lm_output, ctx.tile_size//mpu.get_tensor_model_parallel_world_size(), dim=0)
            # total_input_tiles = torch.split(total_input, ctx.tile_size, dim=0)
            labels = labels.reshape(mpu.get_tensor_model_parallel_world_size(), -1)
            labels_tiles = torch.cat(torch.split(labels, ctx.tile_size//mpu.get_tensor_model_parallel_world_size(), dim=-1))
            labels_tiles = labels_tiles.reshape(tile_num, mpu.get_tensor_model_parallel_world_size(), -1).reshape(-1, ctx.tile_size).contiguous()
            grad_output = grad_output.reshape(mpu.get_tensor_model_parallel_world_size(), -1)
            grad_outputs = torch.cat(torch.split(grad_output, ctx.tile_size//mpu.get_tensor_model_parallel_world_size(), dim=-1))
            grad_outputs = grad_outputs.reshape(tile_num, mpu.get_tensor_model_parallel_world_size(), -1).reshape(-1, ctx.tile_size).contiguous()
        else:
            lm_output = lm_output.view(all_token_num, -1)
            lm_output_tiles = torch.split(lm_output, ctx.tile_size, dim=0)
            labels_tiles = torch.split(labels, ctx.tile_size, dim=0)
            grad_outputs = torch.split(grad_output, ctx.tile_size, dim=0)

        grad_inputs = []
        grad_weights = torch.zeros_like(logit_weights, memory_format=torch.contiguous_format)

        with torch.no_grad():
            for i in range(tile_num):
                lm_output_tile = lm_output_tiles[i]
                # total_input_tile = total_input_tiles[i]
                total_input_tile = seq_parallel_fwd(lm_output_tile, ctx.sequence_parallel, world_size).reshape(ctx.tile_size, -1)
                labels_tile = labels_tiles[i].reshape(-1)
                grad_output_tile = grad_outputs[i].reshape(-1)
                # softmax: tile_size(bs*sl), vocab; target_mask: tile_size(bs*sl); masked_target_1d: tile_size(bs*sl)
                softmax, _, target_mask, masked_target_1d = fused_fowrward(total_input_tile, labels_tile, logit_weights, 
                                                            seq_len, ctx.tile_size, partition_vocab_size,
                                                            vocab_start_index, vocab_end_index, world_size, rank)
                grad_input = softmax
                partition_vocab_size = softmax.size()[-1]
                grad_2d = grad_input.view(-1, partition_vocab_size)
                arange_1d = torch.arange(start=0, end=grad_2d.size()[0],
                            device=grad_2d.device)
                grad_2d[arange_1d, masked_target_1d] -= (
                    1.0 - target_mask.contiguous().view(-1).float())
                grad_input.mul_(grad_output_tile.unsqueeze(dim=-1))  # 

                # translate precision 
                tile_buffer_half = mpu.get_global_memory_buffer().get_tensor(
                    [ctx.tile_size, partition_vocab_size], lm_output_tile.dtype, "tile")
                tile_buffer_half.copy_(grad_input)
                grad_input = tile_buffer_half

                ### then setence

                # total_input, handle = seq_parallel_bwd(lm_output_tile, ctx.sequence_parallel, world_size)
                grad_input_mlp = grad_input.matmul(logit_weights)
                # if ctx.sequence_parallel:
                #     handle.wait()
                # grad_input = grad_input.view(grad_input.shape[0] * grad_input.shape[1],
                #                        grad_input.shape[2])
                # total_input_tile = total_input_tile.contiguous().view(total_input_tile.shape[0] * total_input_tile.shape[1],
                #                        total_input_tile.shape[2])
                total_input_tile = total_input_tile.contiguous()

                if ctx.sequence_parallel:
                    dim_size = list(lm_output_tile.size())
                    sub_grad_input = torch.empty(dim_size, dtype=lm_output_tile.dtype,
                                device=torch.cuda.current_device(),
                                requires_grad=False)
                    # reduce_scatter
                    handle = torch.distributed._reduce_scatter_base(sub_grad_input, grad_input_mlp,
                                                                    group=get_tensor_model_parallel_group(),
                                                                    async_op=True)

                    _ = torch.empty(1, device=grad_output.device) + 1
                
                grad_weight = grad_input.t().matmul(total_input_tile)  
                grad_weights += grad_weight
                if ctx.sequence_parallel:
                    handle.wait()
                    grad_inputs.append(sub_grad_input)
                else:
                    grad_inputs.append(grad_input_mlp)

        grad_input = torch.cat(grad_inputs, dim=0).reshape(ctx.lm_output_shape0, ctx.lm_output_shape1, -1).contiguous()
        return grad_input, None, grad_weights, None, None, None, None

def vocab_parallel_cross_entropy(vocab_parallel_logits, target):
    """Helper function for the cross entropy."""
    args = get_args()
    if args.mask_vocab_pad and args.vocab_padding_size != 0:
        tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size()
        vocab_start_index, vocab_end_index = \
            VocabUtility.vocab_range_from_global_vocab_size(
                args.padded_vocab_size, get_tensor_model_parallel_rank(),
                tensor_model_parallel_size)
        real_vocab_num = args.padded_vocab_size - args.vocab_padding_size
        if real_vocab_num < vocab_end_index:
            if real_vocab_num > vocab_start_index:
                vocab_padding_size_this_partition = vocab_end_index - real_vocab_num
            elif real_vocab_num < vocab_start_index:
                vocab_padding_size_this_partition = vocab_end_index - vocab_start_index
            vocab_parallel_logits[:, :, -vocab_padding_size_this_partition:] = -10000.0

    return _VocabParallelCrossEntropy.apply(vocab_parallel_logits, target)

def vocab_parallel_logsoftmax(vocab_parallel_logits):
    args = get_args()
    if args.mask_vocab_pad and args.vocab_padding_size != 0:
        tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size()
        vocab_start_index, vocab_end_index = \
            VocabUtility.vocab_range_from_global_vocab_size(
                args.padded_vocab_size, get_tensor_model_parallel_rank(),
                tensor_model_parallel_size)
        real_vocab_num = args.padded_vocab_size - args.vocab_padding_size
        if real_vocab_num < vocab_end_index:
            if real_vocab_num > vocab_start_index:
                vocab_padding_size_this_partition = vocab_end_index - real_vocab_num
            elif real_vocab_num < vocab_start_index:
                vocab_padding_size_this_partition = vocab_end_index - vocab_start_index
            vocab_parallel_logits[:, :, -vocab_padding_size_this_partition:] = -10000.0

    return _VocabParallelLogSoftmax.apply(vocab_parallel_logits)

def vocab_parallel_softmax(vocab_parallel_logits):
    args = get_args()
    if args.mask_vocab_pad and args.vocab_padding_size != 0:
        tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size()
        vocab_start_index, vocab_end_index = \
            VocabUtility.vocab_range_from_global_vocab_size(
                args.padded_vocab_size, get_tensor_model_parallel_rank(),
                tensor_model_parallel_size)
        real_vocab_num = args.padded_vocab_size - args.vocab_padding_size
        if real_vocab_num < vocab_end_index:
            if real_vocab_num > vocab_start_index:
                vocab_padding_size_this_partition = vocab_end_index - real_vocab_num
            elif real_vocab_num < vocab_start_index:
                vocab_padding_size_this_partition = vocab_end_index - vocab_start_index
            vocab_parallel_logits[:, :, -vocab_padding_size_this_partition:] = -10000.0

    return _VocabParallelSoftmax.apply(vocab_parallel_logits)

def vocab_parallel_gather(vocab_parallel_logits, labels):
    return _VocabParallelGather.apply(vocab_parallel_logits, labels)

def fused_post_language_model_processing_with_cross_entropy(lm_output, labels, logit_weights,
                                                            get_key_value, parallel_output,
                                                            forward_method_parallel_output,
                                                            tile_size):
    losses =  _FusedClassifierCrossEntropy.apply(lm_output, labels, logit_weights,
                                                get_key_value, parallel_output,
                                                forward_method_parallel_output,
                                                tile_size)
    return losses.transpose(0,1).contiguous()
