# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
#  Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved.
from typing import Literal

import torch
from torch import Tensor

from megatron.core import tensor_parallel
from megatron.core.models.common.embeddings.language_model_embedding import LanguageModelEmbedding
from megatron.core.transformer.transformer_config import TransformerConfig


def language_model_embedding_init_func(
        self,
        config: TransformerConfig,
        vocab_size: int,
        max_sequence_length: int,
        position_embedding_type: Literal['learned_absolute', 'rope', 'none'] = 'learned_absolute',
        num_tokentypes: int = 0,
        skip_weight_param_allocation: bool = False,
):
    """Patch language model embeddings init."""
    super(LanguageModelEmbedding, self).__init__(config=config)

    self.config: TransformerConfig = config
    self.vocab_size: int = vocab_size
    self.max_sequence_length: int = max_sequence_length
    self.add_position_embedding: bool = position_embedding_type == 'learned_absolute'
    self.num_tokentypes = num_tokentypes
    self.reduce_scatter_embeddings = (
            (not self.add_position_embedding)
            and self.num_tokentypes <= 0
            and self.config.sequence_parallel
    )

    # Word embeddings (parallel).
    self.word_embeddings = tensor_parallel.VocabParallelEmbedding(
        num_embeddings=self.vocab_size,
        embedding_dim=self.config.hidden_size,
        init_method=self.config.init_method,
        reduce_scatter_embeddings=self.reduce_scatter_embeddings,
        config=self.config,
        skip_weight_param_allocation=skip_weight_param_allocation,
    )

    # Position embedding (serial).
    if self.add_position_embedding:
        self.position_embeddings = torch.nn.Embedding(
            self.max_sequence_length, self.config.hidden_size
        )

        # Initialize the position embeddings.
        if self.config.perform_initialization:
            self.config.init_method(self.position_embeddings.weight)

    if self.num_tokentypes > 0:
        self.tokentype_embeddings = torch.nn.Embedding(
            self.num_tokentypes, self.config.hidden_size
        )
        # Initialize the token-type embeddings.
        if self.config.perform_initialization:
            self.config.init_method(self.tokentype_embeddings.weight)
    else:
        self.tokentype_embeddings = None

    # Embeddings dropout
    self.embedding_dropout = torch.nn.Dropout(self.config.hidden_dropout)


def language_model_embedding_forward(self,
                                     input_ids: Tensor,
                                     position_ids: Tensor,
                                     tokentype_ids: int = None,
                                     weight: Tensor = None) -> Tensor:
    """Pacth forward pass of the embedding module.

    Args:
        input_ids (Tensor): The input tokens
        position_ids (Tensor): The position id's used to calculate position embeddings
        tokentype_ids (int): The token type ids. Used when args.bert_binary_head is set to True. Defaults to None
        weight (Tensor): embedding weight

    Returns:
        Tensor: The output embeddings
    """
    if weight is None:
        if self.word_embeddings.weight is None:
            raise RuntimeError(
                "weight was not supplied to VocabParallelEmbedding forward pass "
                "and skip_weight_param_allocation is True."
            )
        weight = self.word_embeddings.weight

    word_embeddings = self.word_embeddings(input_ids, weight)
    if self.add_position_embedding:
        position_embeddings = self.position_embeddings(position_ids)
        embeddings = word_embeddings + position_embeddings
    else:
        embeddings = word_embeddings

    if not self.reduce_scatter_embeddings:
        # Data format change to avoid explicit tranposes : [b s h] --> [s b h].
        embeddings = embeddings.transpose(0, 1).contiguous()

    if tokentype_ids is not None:
        if self.tokentype_embeddings is None:
            raise ValueError("tokentype_embeddings should not be None when tokentype_ids are provided.")
        # [b s h] -> [s b h] (So that it can be added with embeddings)
        tokentype_embedding = self.tokentype_embeddings(tokentype_ids).permute(1, 0, 2)
        embeddings = embeddings + tokentype_embedding
    else:
        if self.tokentype_embeddings is not None:
            raise ValueError("tokentype_embeddings should be None when tokentype_ids are not provided.")

    # If the input flag for fp32 residual connection is set, convert for float.
    if self.config.fp32_residual_connection:
        embeddings = embeddings.float()

    # Dropout.
    if self.config.sequence_parallel:
        if not self.reduce_scatter_embeddings:
            embeddings = tensor_parallel.scatter_to_sequence_parallel_region(embeddings)
        # `scatter_to_sequence_parallel_region` returns a view, which prevents
        # the original tensor from being garbage collected. Clone to facilitate GC.
        # Has a small runtime cost (~0.5%).
        if self.config.clone_scatter_output_in_embedding:
            embeddings = embeddings.clone()
        with tensor_parallel.get_cuda_rng_tracker().fork():
            embeddings = self.embedding_dropout(embeddings)
    else:
        embeddings = self.embedding_dropout(embeddings)

    return embeddings
