# Copyright (c) 2024, TENCENT CORPORATION. All rights reserved.


# Author adrenzhou@tencent.com





from typing import Optional, Union
import torch
import torch.nn as nn
from torch import Tensor
from megatron.core.config_logger import has_config_logger_enabled, log_config_to_disk
from megatron.core.models.common.speech_module.speech_module import SpeechModule
from megatron.core.transformer.custom_layers.transformer_engine import TENorm
from megatron.core.transformer.enums import ModelType
from megatron.core.transformer.spec_utils import ModuleSpec, build_module
from megatron.core.transformer.transformer_block import TransformerBlock
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.models.speech.whisper_layer_specs import get_whisper_layer_with_transformer_engine_spec
from megatron.core import InferenceParams, parallel_state, tensor_parallel
from megatron.core.utils import make_viewless_tensor
import pdb

class WhisperMegatron(SpeechModule):
    """Whisper model implemented using Megatron's framework."""
    def __init__(
        self,
        transformer_config: TransformerConfig,
        transformer_layer_spec: ModuleSpec,
        ln_pre_impl: Union[ModuleSpec, type] = TENorm,
        input_dim: int = 128,
        conv1_out_dim: int = 1280,
        conv_kernel_size: int = 3,
        conv_stride: int = 1,
        seq_length: int = 1500,
        vocab_size = 4096,
        pre_process: bool = True,
        post_process: bool = True,        
    ):
        super().__init__(config=transformer_config)

        if has_config_logger_enabled(transformer_config):
            log_config_to_disk(transformer_config, locals(), prefix=type(self).__name__)

        self.conv1 = nn.Conv1d(
            in_channels=input_dim,
            out_channels=conv1_out_dim,
            kernel_size=conv_kernel_size,
            stride=conv_stride,
            padding=(conv_kernel_size - 1) // 2,
        )

        self.conv2 = nn.Conv1d(
            in_channels=conv1_out_dim,
            out_channels=conv1_out_dim,
            kernel_size=conv_kernel_size,
            stride=2,
            padding=(conv_kernel_size - 1) // 2,
        )

        self.position_embeddings = nn.Embedding(seq_length, conv1_out_dim)

        self.gelu = nn.GELU()
        self.model_type = ModelType.encoder_or_decoder


        self.encoder = TransformerBlock(
            config=transformer_config,
            spec=transformer_layer_spec,
            pre_process=True,
            post_process=False,
        )
        self.post_process = post_process
        self.avg_pooler = nn.AvgPool1d(2, stride=2)
        self.final_layer_norm = nn.LayerNorm(conv1_out_dim, eps=1e-5)

        if self.post_process:
            self.output_layer = tensor_parallel.ColumnParallelLinear(
                transformer_config.hidden_size,
                vocab_size,
                config=transformer_config,
                init_method=transformer_config.init_method,
                bias=False,
                skip_bias_add=False,
                gather_output=False,
                skip_weight_param_allocation=False,
            )
            self.ctc_loss = nn.CTCLoss(blank=0, reduction='mean', zero_infinity=True)

    def set_input_tensor(self, input_tensor: Tensor) -> None:
        """Sets input tensor to the model.

        See megatron.model.transformer.set_input_tensor()

        Args:
            input_tensor (Tensor): Sets the input tensor for the model.
        """
        # This is usually handled in schedules.py but some inference code still
        # gives us non-lists or None
        if not isinstance(input_tensor, list):
            input_tensor = [input_tensor]

        assert len(input_tensor) == 1, 'input_tensor should only be length 1 for gpt/bert'
        self.encoder.set_input_tensor(input_tensor[0])
    def forward(self, 
                x: torch.Tensor, 
                attention_mask: Optional[torch.Tensor] = None,
                labels: torch.Tensor = None,
                pooling = True) -> torch.Tensor:
        """Forward pass of Whisper model.

        Args:
            x (torch.Tensor): Input tensor of shape [batch, input_dim, seq_len]
            attention_mask (Optional[torch.Tensor]): Attention mask for the transformer layers

        Returns:
            torch.Tensor: Output tensor of shape [batch, seq_len, hidden_size]
        """         
       
        x = self.gelu(self.conv1(x))  # shape = [batch, conv1_out_dim, seq_len]
        x = self.gelu(self.conv2(x))  # shape = [batch, conv1_out_dim, seq_len / 2]

        batch_size, hidden_size, seq_len = x.size()
        position_ids = torch.arange(seq_len, dtype=torch.long, device=x.device).expand(batch_size, -1)
        x = x.permute(0, 2, 1)  # [batch, seq_len/2, hidden_size]
        x = x + self.position_embeddings(position_ids)
        x = x.permute(1, 0, 2)  # [seq_len/2, batch, hidden_size]

        x = x.contiguous()
        x = self.encoder(x, attention_mask)

        # x = x.permute(1, 0, 2)  # [batch, seq_len/2, hidden_size]

        # x in : [seq_len/2, batch, hidden_size]
        # x out: [batch, hidden_size, seq_len/2]
        if pooling:
            x = x.permute(1, 2, 0)
            x = self.avg_pooler(x)
            x = x.permute(0, 2, 1)
            # now: x: [batch, seq_len/2, hidden_size]
        else:
            x = x.permute(1, 0, 2)

        x = self.final_layer_norm(x)
        if not self.post_process:
            x = x.contiguous()
            return x
        else:
            logits, _ = self.output_layer(x)
            #logits = make_viewless_tensor(inp=logits, requires_grad=logits.requires_grad, keep_graph=True)
            logits = logits.contiguous()
            if labels is None:
                return logits
            else:
                loss = self.compute_speech_model_loss(labels, logits, attention_mask)
                return loss

    def compute_speech_model_loss(self, labels, logits, attention_mask):
        # labels: [batch, label_seq_len]
        # logits: [batch, seq_len/2, vocab_size]
        # attention_mask: [batch, 1, 1, seq_len]  has real seq len information
        batch_size, seq_len, vocab_size = logits.size()
        device = logits.device
        
        # Prepare input_lengths based on attention_mask
        # Reduce the attention mask by 2x along the sequence length dimension (downsampling)
        attention_mask = attention_mask.squeeze(1).squeeze(1)  # [batch, seq_len]
        input_lengths = attention_mask.sum(dim=-1)  # downsampling by 2, [batch]
        input_lengths = input_lengths.to(dtype=torch.long, device=device)
        
        # Prepare logits for CTCLoss
        # Permute logits to shape [seq_len, batch, vocab_size]
        log_probs = logits.permute(1, 0, 2).log_softmax(2)  # [seq_len, batch, vocab_size]
        
        # Prepare target_lengths based on labels (-1 marks padding)
        target_lengths = (labels != -1).sum(dim=-1)  # [batch]
        target_lengths = target_lengths.to(dtype=torch.long, device=device)
        # pdb.set_trace()
        # Replace -1 in labels with 0, as required by CTCLoss
        labels = labels.masked_fill(labels == -1, 0)
        label_max_len = target_lengths.max().item()
        labels = labels[:, :label_max_len]  # [batch, label_max_seq_len]
        # Flatten the labels for CTCLoss
        # pdb.set_trace()
        # targets = labels.view(-1)  # [batch * label_seq_len]
        targets = labels
        # Compute CTC loss
        loss = self.ctc_loss(log_probs.float(), targets, input_lengths, target_lengths)
        return loss


def model_provider(
    pre_process=True, post_process=True, add_encoder=True, add_decoder=True,
    parallel_output=True) -> WhisperMegatron:
    transformer_config = TransformerConfig(
        hidden_size=1280,
        num_attention_heads=20,
        num_layers=32,
        layernorm_epsilon=1e-5,
        apply_query_key_layer_scaling=True,
        tensor_model_parallel_size=1
    )
    transformer_layer_spec = get_whisper_layer_with_transformer_engine_spec()
    model = WhisperMegatron(transformer_config, transformer_layer_spec)
    return model
# Debugging code
def debug_whisper_megatron():
    from megatron.training.initialize import initialize_megatron
    from megatron.training.training import setup_model_and_optimizer
    from megatron.training.checkpointing import load_checkpoint
    from megatron.training.checkpointing import save_checkpoint    
    model_type = ModelType.encoder_or_decoder
    initialize_megatron()
    model, optimizer, opt_param_scheduler = setup_model_and_optimizer(
        model_provider, model_type)



    iteration = 1
    num_floating_point_operations_so_far = 0
    # pdb.set_trace()
    # iteration,num_floating_point_operations_so_far = load_checkpoint(
    #         model, optimizer, opt_param_scheduler, strict=False,
    #         ft_client=None)    

    # save_checkpoint(iteration, model, optimizer, opt_param_scheduler,
    #                     0)        
    # model.cuda(torch.cuda.current_device())

    # Create random input tensor
    dic = torch.load('whisper_hf.pt')
    input_tensor = dic['input_features'].cuda(torch.cuda.current_device())
    input_tensor = input_tensor.to(dtype=torch.float32)
    attention_mask = dic['attention_mask']
    attention_mask = attention_mask.to(dtype=torch.bool).reshape(1,1,1,-1)
    # input_tensor = torch.randn(1, 128, 512).cuda()  # [batch, input_dim, seq_len]
    # attention_mask = torch.ones(1, 1, 1, 512, dtype=torch.bool).cuda()  # Example attention mask
    # Run forward pass
    model = model[0]
    # model.eval()
    # output = model(input_tensor, attention_mask)
    output = model(input_tensor, attention_mask) #, labels=torch.ones(1,64,dtype=torch.int64, device=input_tensor.device))
    # pdb.set_trace()
    # output.backward(retain_graph=True)
    print(output)
    pdb.set_trace()
    result = dic['result_hidden'].cpu()
    # print(f"max diff: {(output-result).abs().max()}")
    # print(f"mean diff: {(output-result).abs().mean()}")
    print("Output shape:", output.shape)
    # pdb.set_trace()



if __name__ == "__main__":
    debug_whisper_megatron()
