# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains utilities to manipulate torch memory buffers
"""

import math
from typing import List
import torch
from torch import nn

from megatron.core.utils import get_model_config
from megatron.core.distributed import DistributedDataParallelConfig
from megatron.training import get_args
from megatron.core import parallel_state


class MegatronMemoryBuffer:
    """
    A memory buffer adapted from megatron core ParamAndGradBuffer to reuse with megatron
    distributed optimizer training.
    """

    def __init__(self, module: torch.nn.Module, disable_bucketing=False):
        args = get_args()
        ddp_config = DistributedDataParallelConfig(
            grad_reduce_in_fp32=args.accumulate_allreduce_grads_in_fp32,
            overlap_grad_reduce=args.overlap_grad_reduce,
            use_distributed_optimizer=args.use_distributed_optimizer,
            check_for_nan_in_grad=args.check_for_nan_in_loss_and_grad,
            bucket_size=args.ddp_bucket_size)

        if ddp_config.bucket_size is None:
            dp_size = parallel_state.get_data_parallel_world_size()
            ddp_config.bucket_size = max(40000000, 1000000 * dp_size)
            # Set bucket_size to infinity if overlap_grad_reduce is False.
        if not ddp_config.overlap_grad_reduce:
            ddp_config.bucket_size = None

        bucket_size = ddp_config.bucket_size
        if parallel_state.get_pipeline_model_parallel_rank() > 0:
            bucket_size = None
        if disable_bucketing:
            bucket_size = None

        # Group parameters by their gradient type.
        dense_params = []
        expert_parallel_params = []
        for name, param in module.named_parameters():
            if not param.requires_grad:
                continue

            param.grad_added_to_main_grad = False

            if getattr(param, 'allreduce', True):
                dense_params.append(param)
            else:
                expert_parallel_params.append(param)

        def allocate_buffers_for_parameters(
                input_params,
                dp_size
        ):
            param_and_grad_dtype_to_params = {}

            # Group parameters by their gradient type.
            for param in input_params:
                if not param.requires_grad:
                    continue

                param_dtype = param.dtype
                grad_dtype = torch.float if ddp_config.grad_reduce_in_fp32 else param.dtype

                params = param_and_grad_dtype_to_params.get((param_dtype, grad_dtype), [])
                params.append(param)
                param_and_grad_dtype_to_params[(param_dtype, grad_dtype)] = params

            # Allocate the grad buffers and map the grads.
            buffers = []
            # We don't need grad_type here, but we need to align with ParamAndGradBuffer.
            for (param_dtype, grad_dtype), params in param_and_grad_dtype_to_params.items():
                buffers.append(
                    ParamBuffer(
                        ddp_config,
                        param_dtype,
                        params,
                        bucket_size,
                        dp_size
                    )
                )
            return buffers

        # Allocate the param+grad buffers for dense params' grads.
        dp_size = parallel_state.get_data_parallel_world_size()
        self.buffers = allocate_buffers_for_parameters(dense_params, dp_size)

        # Allocate separate param+grad buffers for expert parallel params' grads.
        assert not expert_parallel_params, "Not support expert parallel params currently."
        ep_size = parallel_state.get_expert_model_parallel_world_size()
        self.expert_parallel_buffers = allocate_buffers_for_parameters(expert_parallel_params, ep_size)

        if ddp_config.use_distributed_optimizer:

            @torch.no_grad()
            def unmap_weight_tensor(m):
                if hasattr(m, 'weight_tensor'):
                    m.weight_tensor = None

            module.apply(unmap_weight_tensor)

    @classmethod
    def from_DDP_model(cls, DDP_model):
        megatron_buffer = cls.__new__(cls)
        megatron_buffer.buffers = DDP_model.buffers
        return megatron_buffer

    def build_memory_reference(self, maintain_weight):
        for buffer in self.buffers:
            buffer._build_memory_reference(maintain_weight)

    def offload(self, to_empty=False):
        for buffer in self.buffers:
            if not to_empty:
                buffer.param_data.to('cpu', non_blocking=True)
            else:
                buffer.param_data = torch.empty_like(buffer.param_data, device='cpu')

    def onload(self, to_empty=False):
        for buffer in self.buffers:
            if not to_empty:
                buffer.param_data.to(torch.cuda.current_device(), non_blocking=True)
            else:
                buffer.param_data = torch.empty_like(buffer.param_data, device='cuda')


class ParamBuffer:
    """
    Groups parameters and gradients into a contiguous buffer, and then breaks the buffer into
    buckets with roughly `bucket_size` parameters each.

    Args:
        ddp_config: DistributedDataParallel config object.
        param_dtype: Type of param tensor.
        grad_dtype: Type of grad tensor.
        params: List of parameters whose parameters and gradients are collated in the underlying
            tensor.
        data_parallel_group: Data-parallel process group.
        bucket_size: The rough size of each bucket in terms of number of parameters.
        param_to_name: Mapping from `torch.nn.Parameter` to name (for logging purposes).
        gradient_scaling_factor: This factor is utilized to scale gradients prior to their
            communication. Its application is twofold: it facilitates the averaging of gradients
            and the scaling of gradients in the context of the Mixture of Experts (MoE) model.
    """

    def __init__(
        self,
        ddp_config: DistributedDataParallelConfig,
        param_dtype: torch.dtype,
        params: List[torch.nn.Parameter],
        bucket_size: int,
        dp_size: int,
    ):
        self.ddp_config = ddp_config

        # Check that params are unique.
        unique_params = set()
        for param in params:
            assert param not in unique_params
            unique_params.add(param)
        del unique_params

        # Store attributes that will be needed later.
        self.param_dtype = param_dtype
        self.data_parallel_world_size = dp_size

        # Data structures to store underlying buckets and relevant indexing data.
        self.buckets = []
        self.param_index_map = {}  # Param -> location in buffer mapping (used in dist. optimizer).

        def _pad(number_to_be_padded: int, divisor: int) -> int:
            return int(math.ceil(number_to_be_padded / divisor) * divisor)

        def _pad_if_needed(data_index: int) -> int:
            """
            Pads data indices if using distributed optimizer (to ensure uniform sharding).
            """
            if self.ddp_config.use_distributed_optimizer:
                # Workaround for TE bug causing cuBLAS to pick an incompatible algorithm.
                # This also helps cuBLAS pick more efficient algorithms for GEMMs.
                # We now ensure that all buckets start at a memory address that is 256-byte
                # aligned (128 values since params and grads use >= 16-bit precision).
                return _pad(data_index, math.lcm(self.data_parallel_world_size, 128))
            return data_index

        # First, figure out how many elements should be in the underlying buffer storage.
        # Note that if we need to split the buffer into smaller buckets, each of these
        # might need to be padded as well (if using the distributed optimizer).
        data_start_index = 0
        bucket_data_start_index = data_start_index
        bucket_params = set()
        per_bucket_numel_unpadded = []
        bucket_id = 0

        def _create_new_bucket(data_end_index: int) -> int:
            """
            We do not need bucket here, just to align padding with megatron.
            """
            nonlocal bucket_data_start_index, bucket_params, bucket_id
            per_bucket_numel_unpadded.append(data_end_index - bucket_data_start_index)
            data_end_index = _pad_if_needed(data_end_index)
            # Update bucket metadata.
            bucket_data_start_index = data_end_index
            # Re-set bucket_params and increment bucket_id for next bucket.
            bucket_params = set()
            bucket_id += 1
            # Return the potentially padded data_end_index.
            return data_end_index

        for param in params[::-1]:
            # Iterate through parameters in reverse order to roughly follow backprop order,
            # and skip parameters that don't require gradients.
            if not param.requires_grad:
                continue
            this_numel = param.data.nelement()
            data_end_index = data_start_index + this_numel

            def _does_param_require_new_bucket(param):
                return (
                    getattr(param, "shared_embedding", False)
                    and self.ddp_config.use_distributed_optimizer
                )

            # Create bucket with already collected parameters if current param needs its own bucket.
            if _does_param_require_new_bucket(param) and len(bucket_params) > 0:
                # We are creating a bucket for the already accumulated parameters, whose params
                # end at the current data_start_index.
                if self.ddp_config.use_distributed_optimizer:
                    # data_start_index should already be padded.
                    assert data_start_index % self.data_parallel_world_size == 0
                _create_new_bucket(data_start_index)

            self.param_index_map[param] = (
                data_start_index,
                data_end_index,
                bucket_id,
            )
            bucket_params.add(param)

            # If we have enough elements already or the current param is part of the shared embedding
            # layer and needs a separate bucket, form a new bucket.
            if (
                bucket_size is not None
                and (data_end_index - bucket_data_start_index) >= bucket_size
            ) or _does_param_require_new_bucket(param):
                data_end_index = _create_new_bucket(data_end_index)
            data_start_index = data_end_index

        # Add remaining params to a new bucket.
        if len(bucket_params) > 0:
            data_end_index = _create_new_bucket(data_end_index)

        # Next, create underlying storage for buffer (with numel elements that includes
        # padding as necessary).
        self.numel = data_end_index
        self.numel_unpadded = sum(per_bucket_numel_unpadded)
        assert self.numel_unpadded <= self.numel
        if self.ddp_config.use_distributed_optimizer:
            assert self.numel % self.data_parallel_world_size == 0
        else:
            assert self.numel == self.numel_unpadded

        self.param_data = torch.zeros(
            self.numel,
            dtype=self.param_dtype,
            device=torch.cuda.current_device(),
            requires_grad=False,
        )

        # Finally, map param.data and param.main_grad fields to buffers.


    def _build_memory_reference(self, maintain_weight):
        for param, value in self.param_index_map.items():
            data_start_index, data_end_index, bucket_id = value

            buffer = self._get(
                param.data.shape, data_start_index
            )
            if maintain_weight:
                buffer.copy_(param.data)
            param.data = buffer


    def _get(self, shape: torch.Size, start_index: int) -> torch.Tensor:
        """
        Return a tensor with the input `shape` as a view into the 1-D data starting at
        `start_index`.
        """
        end_index = start_index + shape.numel()
        assert end_index <= self.numel, 'Requested tensor is out of buffer range'
        assert self.param_data is not None
        buffer_tensor = self.param_data[start_index:end_index]
        buffer_tensor = buffer_tensor.view(shape)
        return buffer_tensor