import os
import pdb
import random
from copy import deepcopy
from functools import partial

import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from transformers import AutoTokenizer

from megatron.core import mpu, parallel_state
from megatron.core.datasets.blended_megatron_dataset_builder import BlendedMegatronDatasetBuilder
from megatron.core.datasets.IdealLLM_dataset_v2 import IdealLLMDataset, collate_fn
from megatron.core.datasets.utils import get_blend_from_list
from megatron.core.enums import ModelType
from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec
from megatron.core.models.multimodal.Ideal_LLM_v2 import IdealLLM, get_mlp_module_spec
from megatron.core.models.speech.whisper_layer_specs import (
    get_whisper_layer_with_transformer_engine_spec,
)
from megatron.core.models.speech.whisper_model import WhisperMegatron
from megatron.core.transformer.spec_utils import import_module
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.utils import StragglerDetector
from megatron.training import get_args, get_timers, get_tokenizer, pretrain, print_rank_0
from megatron.training.arguments import core_transformer_config_from_args
from megatron.training.audio_utils import get_batch_on_this_tp_rank_batch_IdealLLM_v2
from megatron.training.utils import average_losses_across_data_parallel_group

stimer = StragglerDetector()

def whisper_transformer_config_from_args(args):
    kwargs = {}
    # kwargs['input_dim'] = args.whisper_input_dim
    kwargs['hidden_size'] = args.whisper_hidden_size
    kwargs['num_attention_heads'] = args.whisper_num_attention_heads
    kwargs['num_layers'] = args.whisper_num_layers
    kwargs['layernorm_epsilon'] = args.whisper_layernorm_epsilon
    kwargs['apply_query_key_layer_scaling'] = args.whisper_apply_query_key_layer_scaling
    kwargs['tensor_model_parallel_size'] = args.tensor_model_parallel_size  
    kwargs['add_qkv_bias'] = args.whisper_add_qkv_bias
    kwargs['attention_dropout'] = 0.0
    kwargs['hidden_dropout'] = 0.0
    return TransformerConfig(**kwargs)

def model_provider(
    pre_process=True, post_process=True, add_encoder=True, add_decoder=True,
    parallel_output=True) -> IdealLLM:
    args = get_args()    
    decoder_config = core_transformer_config_from_args(args)
    gpt_layer_spec = get_gpt_layer_with_transformer_engine_spec(args.num_experts, args.moe_grouped_gemm, args.qk_layernorm)
    proj_config = deepcopy(decoder_config)
    proj_config.add_bias_linear = True
    whisper_config = whisper_transformer_config_from_args(args)
    whisper_layer_spec = get_whisper_layer_with_transformer_engine_spec()
    proj_layer_spec = get_mlp_module_spec().submodules
    language_vocab_size = args.padded_vocab_size
    language_max_sequence_length = args.max_position_embeddings
    model = IdealLLM(
        language_transformer_config=decoder_config,
        language_transformer_layer_spec=gpt_layer_spec,
        language_vocab_size=language_vocab_size,
        language_max_sequence_length=language_max_sequence_length,
        conv1_out_dim = args.whisper_conv1_out_dim,
        speech_transformer_config=whisper_config,
        speech_transformer_layer_spec=whisper_layer_spec,
        drop_speech_class_token=False,
        speech_projection_config=proj_config,
        speech_projection_layer_spec=proj_layer_spec,
        language_position_embedding_type=args.position_embedding_type,
        pre_process=pre_process,
        post_process=post_process,
        language_rotary_base=args.rotary_base,
        input_dim=args.whisper_input_dim, 
        share_embeddings_and_output_weights=not args.untie_embeddings_and_output_weights,       
    )    

    model.freeze(freeze_language_model=args.freeze_llm, freeze_whisper=args.freeze_whisper, freeze_projection=False)    
    # if parallel_state.get_tensor_model_parallel_rank() == 0:
    #     print(model)
    return model

        
def get_batch(data_iterator):
    """Generate a batch"""

    """
    {
        "raw_wav_lengths": raw_wav_lengths,
        "input_features": input_features,
        "input_ids": input_ids,
        "attention_mask": attention_mask,
        "labels": labels,
        "language_id": lid,
        "loss_mask": loss_mask
    }

    add 
    position ids
    loss_masks
    """

    if (not mpu.is_pipeline_first_stage()) and (not mpu.is_pipeline_last_stage()):
        return None, None, None, None, None, None, None
    args = get_args()
    batch = None
    if mpu.get_tensor_model_parallel_rank() == 0:
        data_in = next(data_iterator)
        batch = data_in
    batch = get_batch_on_this_tp_rank_batch_IdealLLM_v2(batch)
    return batch





def loss_func(loss_mask: torch.Tensor, loss_lid: torch.Tensor, loss_ctc: torch.Tensor, output_tensor: torch.Tensor):
    """Loss function.

    Args:
        loss_mask (torch.Tensor): Used to mask out some portions of the loss
        output_tensor (torch.Tensor): The tensor with the losses

    Returns:
        the loss scalar for this micro-batch
        the number of non-padded tokens in this microbatch
        a dict containing reporting metrics on the loss and number of tokens across
            the data parallel ranks
    """
    # pdb.set_trace()
    args = get_args()

    losses = output_tensor.float()
    loss_mask = loss_mask.view(-1).float()
    total_tokens = loss_mask.sum()
    loss = torch.cat([torch.sum(losses.view(-1) * loss_mask).view(1), total_tokens.view(1)])

    if args.context_parallel_size > 1:
        torch.distributed.all_reduce(loss, group=mpu.get_context_parallel_group())

    # Check individual rank losses are not NaN prior to DP all-reduce.
    if args.check_for_nan_in_loss_and_grad:
        global_rank = torch.distributed.get_rank()
        try:
            assert not loss[0].isnan(), (
                f'Rank {global_rank}: found NaN in local forward loss calculation. '
                f'Device: {torch.cuda.current_device()}, node: {os.uname()[1]}'
            )
        except AssertionError as e:
            pdb.set_trace()

    # Reduce loss for logging.
    reporting_loss = loss.clone().detach()
    # print(f"Rank: {torch.distributed.get_rank()}, Reporting Loss: {reporting_loss[0]/reporting_loss[1]}")
    torch.distributed.all_reduce(reporting_loss, group=mpu.get_data_parallel_group())
    local_num_tokens = loss[1].clone().detach().to(torch.int)
    return (
        loss[0] * args.context_parallel_size + (loss_lid + loss_ctc) * local_num_tokens,
        local_num_tokens,
        {'lm loss': (reporting_loss[0], reporting_loss[1]), 'lid loss': loss_lid.clone().detach(), 'ctc loss': loss_ctc.clone().detach()},
    )

def forward_step(data_iterator, model):
    """
    {
        "raw_wav_lengths": raw_wav_lengths,
        "input_features": input_features,
        "input_ids": input_ids,
        "attention_mask": attention_mask,
        "labels": labels,
        "language_id": lid,
        "loss_mask": loss_mask
    }
    """
    """Forward step."""
    args = get_args()
    timers = get_timers()
    timers('batch-generator', log_level=2).start()
    global stimer
    with stimer(bdata=True):
        sample = get_batch(data_iterator)
        input_features = sample['input_features']
        input_ids =  sample['input_ids']
        attention_mask = sample['attention_mask']
        raw_wav_lengths =  sample['raw_wav_lengths']
        language_id =  sample['language_id']
        labels =  sample['labels']
        loss_mask =  sample['loss_mask']

    timers('batch-generator').stop()    
    with stimer:
        output, loss_lid, loss_ctc = model(
            input_features=input_features, #audio_tensor,
            feature_attention_mask=None, #feature_attention_mask,
            raw_wav_lengths=raw_wav_lengths,
            input_ids=input_ids,
            attention_mask=attention_mask,
            language_id=language_id,
            labels=labels,
            loss_mask=loss_mask
        )
    return output, partial(loss_func, loss_mask, loss_lid, loss_ctc)



def add_speech_args(parser):
    
    group = parser.add_argument_group(title="speech")

    group.add_argument('--whisper-input-dim', type=int, default=128,
                       help='Input dimension size')
    group.add_argument('--whisper-conv1-out-dim', type=int, default=1280,
                       help='Output dimension of first convolution layer')
    group.add_argument('--whisper-conv-kernel-size', type=int, default=3,
                       help='Kernel size for convolution')
    group.add_argument('--whisper-conv-stride', type=int, default=1,
                       help='Stride size for convolution')
    group.add_argument('--whisper-seq-length', type=int, default=1500,
                       help='Sequence length')
    group.add_argument('--whisper-vocab-size', type=int, default=4352,
                       help='Vocabulary size')
    group.add_argument('--whisper-pre-process', action='store_true', default=True,
                       help='Enable pre-processing')
    group.add_argument('--whisper-post-process', action='store_true', default=True,
                       help='Enable post-processing')
    group.add_argument('--whisper-hidden-size', type=int, default=1280,
                       help='Hidden size for the model')
    group.add_argument('--whisper-num-attention-heads', type=int, default=20,
                       help='Number of attention heads')
    group.add_argument('--whisper-num-layers', type=int, default=32,
                       help='Number of layers in the model')
    group.add_argument('--whisper-layernorm-epsilon', type=float, default=1e-5,
                       help='Layer normalization epsilon value')
    group.add_argument('--whisper-apply-query-key-layer-scaling', action='store_true', default=False,
                       help='Apply query key layer scaling')
    group.add_argument('--whisper-add-qkv-bias', action='store_true', default=True)
    group.add_argument('--freeze-whisper', action='store_true')
    group.add_argument('--freeze-llm', action='store_true')


    return parser

def cyclic_iter(iter):
    while True:
        for x in iter:
            yield x

def IdealLLM_train_valid_test_datasets_provider(train_val_test_num_samples):
    args = get_args()

    print_rank_0('> building datasets '
                 'for IdealLLM ...')    
    model_path = "/apdcephfs/share_976139/users/adrenzhou/nlp_workdir/pretrained_models/Qwen/Qwen2-Audio-7B-Instruct"
    model_path = "/apdcephfs/share_976139/users/hongfeixue/model/IdealLLM-qwen3-nolora"
    train_data_path = args.train_data_path[0].split(",")
    valid_data_path = args.valid_data_path[0].split(",")
    train_ds = IdealLLMDataset(train_data_path, model_path, seq_len=args.seq_length, ret_id=False, data_aug=False, sample_merge=False)
    valid_ds = IdealLLMDataset(valid_data_path, model_path, seq_len=args.seq_length, ret_id=False, data_aug=False, sample_merge=False)

    # building sampler
    if args.iteration > 0 and args.consumed_train_samples == 0:
        assert args.train_samples is None, \
            'only backward compatiblity support for iteration-based training'
        args.consumed_train_samples = args.iteration * args.global_batch_size
        
    from megatron.legacy.data.data_samplers import MegatronPretrainingRandomSampler
    batch_sampler = MegatronPretrainingRandomSampler(
        dataset=train_ds,
        total_samples=len(train_ds),
        consumed_samples=args.consumed_train_samples,
        micro_batch_size=args.micro_batch_size,
        data_parallel_rank=mpu.get_data_parallel_rank(),
        data_parallel_size=mpu.get_data_parallel_world_size(),
        data_sharding=args.data_sharding,
    )

    batch_sampler_valid = MegatronPretrainingRandomSampler(
        dataset=valid_ds,
        total_samples=len(valid_ds),
        consumed_samples=0,
        micro_batch_size=args.micro_batch_size,
        data_parallel_rank=mpu.get_data_parallel_rank(),
        data_parallel_size=mpu.get_data_parallel_world_size(),
        data_sharding=args.data_sharding,
    )

    train_ds = DataLoader(
        dataset=train_ds,
        batch_sampler=batch_sampler,
        num_workers=args.num_workers,
        pin_memory=True,
        collate_fn=collate_fn,
        persistent_workers=True if args.num_workers > 0 else False,
    )

    valid_ds = DataLoader(
        dataset=valid_ds,
        batch_sampler=batch_sampler_valid,
        num_workers=args.num_workers,
        pin_memory=True,
        collate_fn=collate_fn,
        persistent_workers=True if args.num_workers > 0 else False,
    )
 
    train_ds = iter(cyclic_iter(train_ds))
    valid_ds = iter(cyclic_iter(valid_ds))
    return train_ds, valid_ds, None


if __name__ == "__main__":
    IdealLLM_train_valid_test_datasets_provider.is_distributed = True
    pretrain(
        IdealLLM_train_valid_test_datasets_provider,
        model_provider,
        ModelType.encoder_or_decoder,
        forward_step,
        args_defaults={'tokenizer_type': 'GPT2BPETokenizer'},
        extra_args_provider=add_speech_args,
        # get_embedding_ranks=llava_embedding_ranks,
        # get_position_embedding_ranks=llava_position_embedding_ranks,
    )    
