from functools import partial

import torch
import torch.nn.functional as F

from megatron.training import get_args
from megatron.training import get_tokenizer
from megatron.training import print_rank_0
from megatron.training import get_timers
from megatron.core import tensor_parallel
from megatron.core.enums import ModelType
from megatron.core.models.speech.whisper_model import WhisperMegatron
from megatron.training import pretrain
from megatron.training.utils import average_losses_across_data_parallel_group
from megatron.training.arguments import core_transformer_config_from_args
from megatron.core.transformer.spec_utils import import_module
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.models.speech.whisper_layer_specs import get_whisper_layer_with_transformer_engine_spec
from megatron.core.datasets.blended_megatron_dataset_builder import BlendedMegatronDatasetBuilder
from megatron.core.datasets.utils import get_blend_from_list
from megatron.core import mpu, tensor_parallel
from megatron.core.datasets.speech_dataset import AudioLLMDataset
from megatron.core.utils import StragglerDetector
import pdb
stimer = StragglerDetector()

def whisper_transformer_config_from_args(args):
    kwargs = {}
    kwargs['hidden_size'] = args.whisper_hidden_size
    kwargs['num_attention_heads'] = args.whisper_num_attention_heads
    kwargs['num_layers'] = args.whisper_num_layers
    kwargs['layernorm_epsilon'] = args.whisper_layernorm_epsilon
    kwargs['apply_query_key_layer_scaling'] = args.whisper_apply_query_key_layer_scaling
    kwargs['tensor_model_parallel_size'] = args.whisper_tensor_model_parallel_size  
    
    return TransformerConfig(**kwargs)

def model_provider(
    pre_process=False, post_process=True, add_encoder=True, add_decoder=False,
    parallel_output=True) -> WhisperMegatron:
    print_rank_0('building Whisper Encoder model ...')    
    args = get_args()
    
    whisper_max_len = args.whisper_seq_length

    whisper_config = whisper_transformer_config_from_args(args)
    whisper_spec = get_whisper_layer_with_transformer_engine_spec()

    model = WhisperMegatron(
        transformer_config=whisper_config,
        transformer_layer_spec=whisper_spec,
        input_dim=args.whisper_input_dim,
        conv1_out_dim=args.whisper_conv1_out_dim,
        conv_kernel_size=args.whisper_conv_kernel_size,
        conv_stride=args.whisper_conv_stride,
        seq_length=args.whisper_seq_length,
        vocab_size=args.whisper_vocab_size,
        pre_process=args.whisper_pre_process,
        post_process=args.whisper_post_process,
    )
    return model


        
def get_batch(data_iterator):
    """Generate a batch"""
    if (not mpu.is_pipeline_first_stage()) and (not mpu.is_pipeline_last_stage()):
        return None, None, None, None, None
    args = get_args()
    batch = None

    def highend_broadcast(src_tensor, src_rank, group=None):
        # Step 1: Broadcast the shape list
        if torch.distributed.get_rank() == src_rank:
            shape = list(src_tensor.shape)
        else:
            shape = None
        
        # Broadcast the shape list (shape length)
        shape_len = torch.tensor(len(shape) if shape is not None else 0, dtype=torch.int64, device=torch.cuda.current_device())
        torch.distributed.broadcast(shape_len, src=src_rank, group=group)
        
        # Allocate memory for shape if needed
        if shape is None:
            shape = [0] * shape_len.item()
        
        # Broadcast the actual shape values
        shape_tensor = torch.tensor(shape, dtype=torch.int64) if torch.distributed.get_rank() == src_rank else torch.empty(shape_len.item(), dtype=torch.int64)
        shape_tensor = shape_tensor.to(torch.cuda.current_device())
        torch.distributed.broadcast(shape_tensor, src=src_rank, group=group)
        
        # Convert the shape to a list
        shape = shape_tensor.tolist()
        
        # Step 2: Broadcast the tensor
        if torch.distributed.get_rank() != src_rank:
            src_tensor = torch.empty(shape, dtype=torch.float32, device=torch.cuda.current_device())
        
        torch.distributed.broadcast(src_tensor, src=src_rank, group=group)

        return src_tensor

    if mpu.get_tensor_model_parallel_rank() == 0:

        data_in = next(data_iterator)
        batch = data_in        
        for key in batch:
            batch[key] = batch[key].cuda(non_blocking=True)
            highend_broadcast(batch[key], mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group())
    else:
        batch = {}
        for key in [
            "spectrograms", "text","padding_masks"
        ]:
            batch[key] = highend_broadcast(None, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group())
    # batch = get_batch_on_this_tp_rank_batch(batch)
    # get batches based on the TP rank you are on
    # batch = get_batch_on_this_tp_rank(data_iterator)

    # slice batch along sequence dimension for context parallelism
    # batch = get_batch_on_this_cp_rank(batch)
    return batch.values()    





def loss_func(padding_masks, output_tensor):
    args = get_args()

    losses = output_tensor.float()
    total_sequences = torch.tensor(1).to(torch.cuda.current_device())
    loss = torch.cat(
        [
            torch.sum(losses.view(-1)).view(1),
            total_sequences.view(1)
        ]
    )
    if args.context_parallel_size > 1:
        torch.distributed.all_reduce(loss, group=mpu.get_context_parallel_group())
    reporting_loss = loss.clone().detach()
    torch.distributed.all_reduce(reporting_loss, group=mpu.get_data_parallel_group())

    return (
        loss[0] * args.context_parallel_size, loss[1].clone().detach().to(torch.int),
        {
            'ctc loss': (
                reporting_loss[0], reporting_loss[1]
            )
        }
    )

def forward_step(data_iterator, model):
    """
    {
            "spectrogram": spectrogram,
            "raw_wav_length": raw_wav_length,
            "text": encoded_text,
            "Q": Q,
            "id": ann["path"],
            "padding_mask": padding_mask,
        }
    """
    """Forward step."""
    args = get_args()
    timers = get_timers()
    timers('batch-generator', log_level=2).start()
    global stimer
    with stimer(bdata=True):
        spectrograms, text, padding_masks = get_batch(
            data_iterator)
    timers('batch-generator').stop()    
    with stimer:
        output = model(spectrograms, padding_masks, text)
    return output, partial(loss_func, padding_masks)



def add_speech_args(parser):
    
    group = parser.add_argument_group(title="speech")

    group.add_argument('--whisper-input-dim', type=int, default=128,
                       help='Input dimension size')
    group.add_argument('--whisper-conv1-out-dim', type=int, default=1280,
                       help='Output dimension of first convolution layer')
    group.add_argument('--whisper-conv-kernel-size', type=int, default=3,
                       help='Kernel size for convolution')
    group.add_argument('--whisper-conv-stride', type=int, default=1,
                       help='Stride size for convolution')
    group.add_argument('--whisper-seq-length', type=int, default=1500,
                       help='Sequence length')
    group.add_argument('--whisper-vocab-size', type=int, default=4352,
                       help='Vocabulary size')
    group.add_argument('--whisper-pre-process', action='store_true', default=True,
                       help='Enable pre-processing')
    group.add_argument('--whisper-post-process', action='store_true', default=True,
                       help='Enable post-processing')
    group.add_argument('--whisper-hidden-size', type=int, default=1280,
                       help='Hidden size for the model')
    group.add_argument('--whisper-num-attention-heads', type=int, default=20,
                       help='Number of attention heads')
    group.add_argument('--whisper-num-layers', type=int, default=32,
                       help='Number of layers in the model')
    group.add_argument('--whisper-layernorm-epsilon', type=float, default=1e-5,
                       help='Layer normalization epsilon value')
    group.add_argument('--whisper-apply-query-key-layer-scaling', action='store_true', default=True,
                       help='Apply query key layer scaling')
    group.add_argument('--whisper-tensor-model-parallel-size', type=int, default=1,
                       help='Tensor model parallel size')

    return parser

def asr_train_valid_test_datasets_provider(train_val_test_num_samples):
    whisper_path = "/apdcephfs_qy3/share_976139/users/joyounglv/pretrained_ckpt/whisper-large-v3"
    vocab_file = "/apdcephfs_qy3/share_976139/users/adrenzhou/data/aishell/dict/lang_char.txt"
    args = get_args()

    train_ds = AudioLLMDataset(args.train_data_path, whisper_path, vocab_file, data_aug=True)
    valid_ds = AudioLLMDataset(args.valid_data_path, whisper_path, vocab_file)
    return train_ds, valid_ds, None



if __name__ == "__main__":
    asr_train_valid_test_datasets_provider.is_distributed = True
    pretrain(
        asr_train_valid_test_datasets_provider,
        model_provider,
        ModelType.encoder_or_decoder,
        forward_step,
        args_defaults={'tokenizer_type': 'GPT2BPETokenizer'},
        extra_args_provider=add_speech_args,
        # get_embedding_ranks=llava_embedding_ranks,
        # get_position_embedding_ranks=llava_position_embedding_ranks,
    )    