# coding=utf-8
# copyright (c) 2025 tencent inc. all rights reserved.
# guanyouhe@tencent.com

import os
import warnings
from copy import deepcopy
from functools import partial
from dataclasses import asdict
from typing import Union
from packaging.version import Version

import torch

from examples.multimodal.config import get_vision_model_config
from examples.multimodal.multimodal_args import add_multimodal_extra_args

from gpatch.patch_mcore import init_gpatch_for_mcore
from megatron.core import mpu
from megatron.core.enums import ModelType
from megatron.core.models.multimodal.llava_model import IMAGE_TOKEN
from megatron.core.models.vision.clip_vit_model import get_num_image_embeddings
from megatron.training import get_args, get_tokenizer, print_rank_0, pretrain, get_timers
from megatron.training.arguments import core_transformer_config_from_args
from megatron.training.utils import get_ltor_masks_and_position_ids
from megatron.core import tensor_parallel
from megatron.training.utils import average_losses_across_data_parallel_group
from megatron.core import package_info

from megatron_datasets.args import parse_dataset_config
from megatron_datasets.mega_indexed_jsonl_dataset_v3 import update_consumed

from gpatch.patch_mcore import init_gpatch_for_mcore
from gpatch.training.arguments import gpatch_extra_args
from gpatch.core.models.vision.multimodal_projector import (
    Gemma3MultiModalProjector,
    get_projector_module_spec,
    get_projector_module_spec_te,
)
from gpatch.core.transformer.transformer_config import Gemma3TransformerConfig
from gpatch.core.models.multimodal.llava_model import Gemma3LLaVAModel
from gpatch.core.models.multimodal.llava_model_dpo import LLaVAModelDPO
from gpatch.core.models.multimodal.layer_specs import (
    get_gemma3_layer_spec_te,
    get_layer_spec_te,
)
from gpatch.core.models.gpt.gpt_model import Gemma3GPTModel
from gpatch.core.utils import split_data_cp_rank


def get_vision_projection_config(config: Gemma3TransformerConfig):
    proj_config = Gemma3TransformerConfig(**asdict(config))
    proj_config.image_size = 896
    proj_config.patch_size = 14
    proj_config.mm_tokens_per_image = 256
    proj_config.mm_projector_cls = Gemma3MultiModalProjector
    proj_config.layernorm_zero_centered_gamma = True
    proj_config.add_bias_linear = False
    return proj_config


def get_llava_model_configs(use_te, args):
    base_config = core_transformer_config_from_args(args, Gemma3TransformerConfig)
    base_config.vision_model_type = args.vision_model_type
    base_config.sliding_window = args.sliding_window
    base_config.embed_scale = base_config.hidden_size**0.5
    base_config.hf_vocab_size = get_tokenizer().vocab_size

    language_config = deepcopy(base_config)
    language_config.activation_func = torch.nn.functional.gelu

    if use_te:
        language_transformer_layer_spec = get_gemma3_layer_spec_te(is_vit=False)
    else:
        assert False

    vision_config = deepcopy(base_config)
    vision_config = get_vision_model_config(
        vision_config, apply_query_key_layer_scaling=args.apply_query_key_layer_scaling)

    if use_te:
        vision_transformer_layer_spec = get_layer_spec_te(is_vit=True)
    else:
        assert False

    vision_projection_config = get_vision_projection_config(base_config)

    # --encoder-pipeline-model-parallel-size 1 will enable a separate pipeline stage for the vision model.
    if args.encoder_pipeline_model_parallel_size > 0:
        assert (args.encoder_pipeline_model_parallel_size == 1
                ), "vision model and projection can only live on 1 pipeline stage."

        if args.encoder_tensor_model_parallel_size > 0:
            vision_config.tensor_model_parallel_size = args.encoder_tensor_model_parallel_size
            vision_projection_config.tensor_model_parallel_size = (
                args.encoder_tensor_model_parallel_size)

    # Make sure vision model pipeline parallel size is not inherited from the language model pipeline parallel size.
    # 0 is not a valid for the config value, hence max(1, ).
    vision_config.pipeline_model_parallel_size = max(1, args.encoder_pipeline_model_parallel_size)
    vision_projection_config.pipeline_model_parallel_size = vision_config.pipeline_model_parallel_size

    # Make sure the vision model does not inherit first and last pipeline num layers from the language model.
    vision_config.num_layers_in_first_pipeline_stage = vision_config.num_layers_in_last_pipeline_stage = None

    if use_te:
        vision_projection_layer_spec = get_projector_module_spec_te()
    else:
        vision_projection_layer_spec = get_projector_module_spec()

    # Toggle --recompute* for the vision and language model separately.
    if args.recompute_vision:
        if vision_config.recompute_method is not None and vision_config.recompute_granularity is not None:
            vision_config.recompute_num_layers = vision_config.num_layers
    else:
        vision_config.recompute_granularity = None
        vision_config.recompute_method = None
        vision_config.recompute_num_layers = None

    vision_projection_config.recompute_granularity = None
    vision_projection_config.recompute_method = None
    vision_projection_config.recompute_num_layers = None

    return (
        language_config,
        language_transformer_layer_spec,
        vision_config,
        vision_transformer_layer_spec,
        vision_projection_config,
        vision_projection_layer_spec,
    )


def check_model(args):
    assert args.encoder_pipeline_model_parallel_size <= 1, "LLaVA does not support pp>1 for encoder on it's own pipeline rank"
    assert args.vision_model_type == "siglip", "Gemma3 only supports CLIP vision model"
    assert args.qk_layernorm, f"you should add --qk-layernorm"
    use_te = args.use_te
    assert args.use_te, "Gemma3 only supports TE now"
    print_rank_0('building a multimodal model ...')

    # 现在args.seq_length与args.encoder_seq_length代表了视觉模型的序列长度
    # args.decoder_seq_length代表了语言模型的序列长度
    # 其实就是encoder代表视觉模型，decoder代表语言模型
    num_image_embeddings = get_num_image_embeddings(
        args.img_h,
        args.img_w,
        args.patch_dim,
        args.vision_model_type,
        args.disable_vision_class_token,
        1,
        args.pixel_shuffle,
        args.use_tile_tags,
    )
    old_seq_length = args.seq_length
    args.seq_length = args.encoder_seq_length = num_image_embeddings
    if torch.distributed.get_rank() == 0 and old_seq_length != args.seq_length:
        warnings.warn(
            f"Changed seq_length and encoder_seq_length (vision model sequence length) from {old_seq_length} to num_image_tokens ({num_image_embeddings})"
        )

    assert (args.decoder_seq_length is not None
            ), "Please provide --decoder-seq-length to set the language model sequence length"
    if args.decoder_seq_length > args.max_position_embeddings:
        args.max_position_embeddings = args.decoder_seq_length
        warnings.warn(
            f"Expanded max_position_embeddings to {args.max_position_embeddings} to accommodate the maximum language model sequence length"
        )


def sft_model_provider(pre_process=True,
                       post_process=True,
                       add_encoder=True,
                       add_decoder=True,
                       parallel_output=True) -> Gemma3LLaVAModel:
    """Build the model."""
    args = get_args()
    check_model(args)

    (
        language_config,
        language_transformer_layer_spec,
        vision_config,
        vision_transformer_layer_spec,
        vision_projection_config,
        vision_projection_layer_spec,
    ) = get_llava_model_configs(args.use_te, args)

    tokenizer = get_tokenizer()
    image_token_index = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN)
    assert image_token_index is not None, f"IMAGE_TOKEN={IMAGE_TOKEN} needs to be added using the --special-tokens arg."

    model = Gemma3LLaVAModel(
        language_transformer_config=language_config,
        language_transformer_layer_spec=language_transformer_layer_spec,
        language_vocab_size=args.padded_vocab_size,
        language_max_sequence_length=args.decoder_seq_length,
        vision_transformer_config=vision_config,
        vision_transformer_layer_spec=vision_transformer_layer_spec,
        drop_vision_class_token=args.disable_vision_class_token,
        vision_projection_config=vision_projection_config,
        vision_projection_layer_spec=vision_projection_layer_spec,
        vision_projection_type="custom_cls",
        allow_missing_vision_projection_checkpoint=args.allow_missing_vision_projection_checkpoint,
        parallel_output=parallel_output,
        share_embeddings_and_output_weights=not args.untie_embeddings_and_output_weights,
        language_position_embedding_type=args.position_embedding_type,
        language_rotary_percent=args.rotary_percent,
        pre_process=pre_process,
        post_process=post_process,
        add_encoder=add_encoder,
        add_decoder=add_decoder,
        img_h=args.img_h,
        img_w=args.img_w,
        patch_dim=args.patch_dim,
        language_rotary_base=args.rotary_base,
        language_rope_scaling=args.use_rope_scaling,
        image_token_index=image_token_index,
        pixel_shuffle=args.pixel_shuffle,
        tile_tags=None,
        text_model_cls=Gemma3GPTModel,
    )

    model.freeze(
        freeze_language_model=args.mm_freeze_llm,
        freeze_vision_model=args.mm_freeze_vision_encoder,
        freeze_vision_projection=args.mm_freeze_projector,
    )

    return model


def dpo_model_provider(pre_process=True,
                       post_process=True,
                       add_encoder=True,
                       add_decoder=True,
                       parallel_output=True) -> LLaVAModelDPO:
    """Build the model."""
    args = get_args()
    check_model(args)

    (
        language_config,
        language_transformer_layer_spec,
        vision_config,
        vision_transformer_layer_spec,
        vision_projection_config,
        vision_projection_layer_spec,
    ) = get_llava_model_configs(args.use_te, args)

    tokenizer = get_tokenizer()
    image_token_index = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN)
    assert image_token_index is not None, f"IMAGE_TOKEN={IMAGE_TOKEN} needs to be added using the --special-tokens arg."

    model = LLaVAModelDPO(
        # llava model param
        language_transformer_config=language_config,
        language_transformer_layer_spec=language_transformer_layer_spec,
        language_vocab_size=args.padded_vocab_size,
        language_max_sequence_length=args.decoder_seq_length,
        vision_transformer_config=vision_config,
        vision_transformer_layer_spec=vision_transformer_layer_spec,
        vision_projection_config=vision_projection_config,
        vision_projection_layer_spec=vision_projection_layer_spec,
        vision_projection_type="custom_cls",
        parallel_output=parallel_output,
        share_embeddings_and_output_weights=not args.untie_embeddings_and_output_weights,
        language_position_embedding_type=args.position_embedding_type,
        language_rotary_percent=args.rotary_percent,
        pre_process=pre_process,
        post_process=post_process,
        add_encoder=add_encoder,
        add_decoder=add_decoder,
        llava_model_class=Gemma3LLaVAModel,
        # dpo param
        beta=args.dpo_beta,
        label_smoothing=args.dpo_label_smoothing,
        ftx_gamma=args.dpo_ftx_gamma,
        # llava model extra param
        drop_vision_class_token=args.disable_vision_class_token,
        allow_missing_vision_projection_checkpoint=args.allow_missing_vision_projection_checkpoint,
        img_h=args.img_h,
        img_w=args.img_w,
        patch_dim=args.patch_dim,
        language_rotary_base=args.rotary_base,
        language_rope_scaling=args.use_rope_scaling,
        image_token_index=image_token_index,
        pixel_shuffle=args.pixel_shuffle,
        tile_tags=None,
        text_model_cls=Gemma3GPTModel,
    )

    model.freeze(
        freeze_language_model=args.mm_freeze_llm,
        freeze_vision_model=args.mm_freeze_vision_encoder,
        freeze_vision_projection=args.mm_freeze_projector,
    )

    return model


def model_provider(
    pre_process=True,
    post_process=True,
    add_encoder=True,
    add_decoder=True,
    parallel_output=True,
) -> Union[Gemma3LLaVAModel, LLaVAModelDPO]:
    args = get_args()
    if args.dpo:
        return dpo_model_provider(pre_process, post_process, add_encoder, add_decoder,
                                  parallel_output)

    return sft_model_provider(pre_process, post_process, add_encoder, add_decoder, parallel_output)


def get_batch(data_iterator):
    """Generate a batch"""
    args = get_args()

    # Broadcast data.
    if data_iterator is not None:
        data = next(data_iterator)
    else:
        data = None
    # NOTE(guanyouhe): 这里直接broadcast train_data_consuming_progresses，可以消除保存ckpt的warning
    update_consumed(args.train_data_consuming_progresses, torch.distributed.get_rank(), data)

    keys = ["input_ids", "labels", "has_imgs"]
    data_b = tensor_parallel.broadcast_data(keys, data, torch.int64)
    tokens = data_b["input_ids"].long().contiguous()
    labels = data_b["labels"].long().contiguous()
    has_imgs = data_b["has_imgs"].long().contiguous().tolist()[0]

    keys = ["attention_mask", "sliding_window_attention_mask"]
    if has_imgs > 0:
        keys.append("pixel_values")
    data_b = tensor_parallel.broadcast_data(keys, data, torch.bfloat16)
    attn_mask = data_b["attention_mask"].type(torch.bfloat16).contiguous()
    sliding_window_attention_mask = data_b["sliding_window_attention_mask"].type(
        torch.bfloat16).contiguous()

    if has_imgs > 0:
        # shape: num_imgs x c x h x w
        imgs = data_b["pixel_values"].type(torch.bfloat16).contiguous()
    else:
        imgs = torch.tensor([], dtype=torch.bfloat16, device=tokens.device)

    _, loss_mask, position_ids = get_ltor_masks_and_position_ids(
        labels,
        -100,
        args.reset_position_ids,
        args.reset_attention_mask,
        args.eod_mask_loss,
    )

    if args.context_parallel_size > 1:
        labels = split_data_cp_rank(labels, args.context_parallel_size, 1)
        loss_mask = split_data_cp_rank(loss_mask, args.context_parallel_size, 1)
        if has_imgs > 0:
            imgs = split_data_cp_rank(imgs, args.context_parallel_size, 2)

    return (
        tokens,
        labels,
        loss_mask,
        attn_mask,
        sliding_window_attention_mask,
        position_ids,
        imgs,
    )


def loss_func(loss_mask: torch.Tensor, output_tensor: torch.Tensor):
    """Loss function.

    Args:
        loss_mask (torch.Tensor): Used to mask out some portions of the loss
        output_tensor (torch.Tensor): The tensor with the losses
    """
    args = get_args()

    losses = output_tensor.float()
    loss_mask = loss_mask.view(-1).float()

    loss = torch.stack([torch.sum(losses.view(-1) * loss_mask).view(1), loss_mask.sum().view(1)])
    if args.context_parallel_size > 1:
        torch.distributed.all_reduce(loss, group=mpu.get_context_parallel_group())

    # Check individual rank losses are not NaN prior to DP all-reduce.
    if args.check_for_nan_in_loss_and_grad:
        global_rank = torch.distributed.get_rank()
        assert not loss.isnan().any(), (
            f"Rank {global_rank}: found NaN in local forward loss calculation. "
            f"Device: {torch.cuda.current_device()}, node: {os.uname()[1]}")

    averaged_loss = average_losses_across_data_parallel_group(loss)
    averaged_loss = averaged_loss[0] / averaged_loss[1]
    if Version(package_info.__version__) < Version("0.12.1"):
        bwd_loss = (loss[0] / loss[1]) * args.context_parallel_size
    else:
        bwd_loss = loss[0] / loss[1]

    return bwd_loss, {"lm loss": averaged_loss}


def dpo_loss_func(metrics, output_tensor: torch.Tensor):
    args = get_args()
    loss = output_tensor.mean()

    # Check individual rank losses are not NaN prior to DP all-reduce.
    if args.check_for_nan_in_loss_and_grad:
        global_rank = torch.distributed.get_rank()
        assert not loss.isnan(), (
            f'Rank {global_rank}: found NaN in local forward loss calculation. '
            f'Device: {torch.cuda.current_device()}, node: {os.uname()[1]}')

    # Reduce loss for logging.
    metrics['dpo-metrics/loss'] = loss
    for k, v in metrics.items():
        averaged = average_losses_across_data_parallel_group([v])
        metrics[k] = averaged
    
    if Version(package_info.__version__) < Version("0.12.1"):
        bwd_loss = loss * args.context_parallel_size
    else:
        bwd_loss = loss
    return bwd_loss, metrics


def forward_step(data_iterator, model: Union[Gemma3LLaVAModel, LLaVAModelDPO]):
    """Forward training step.

    Args:
        data_iterator : Input data iterator
        model (GPTModel): The GPT Model
    """
    args = get_args()
    timers = get_timers()
    # Get the batch.
    timers("batch-generator", log_level=1).start()
    (
        tokens,
        labels,
        loss_mask,
        attn_mask,
        sliding_window_attention_mask,
        position_ids,
        imgs,
    ) = get_batch(data_iterator)
    timers("batch-generator").stop()

    timers("model-forward-only", log_level=1).start()
    image_token_id = get_tokenizer()._tokenizer.image_token_id
    output_tensor, loss_mask_or_metric = model(
        images=imgs,
        input_ids=tokens,
        position_ids=position_ids,
        attention_mask=(attn_mask, sliding_window_attention_mask),
        labels=labels,
        loss_mask=loss_mask,
        image_token_index=image_token_id,
    )
    timers("model-forward-only").stop()

    if args.dpo:
        return output_tensor, partial(dpo_loss_func, loss_mask_or_metric)
    return output_tensor, partial(loss_func, loss_mask)


def train_valid_test_data_iter_provider(train_val_test_num_samples=None):
    """Build multimodal train, validation and test dataloaders."""
    args = get_args()
    if args.data_path is None:
        parse_dataset_config(args)
    # tp-rank != 0 返回空，但在use_grpo时，每个tp-rank都会创建dataloader
    if not args.use_grpo and mpu.get_tensor_model_parallel_rank() != 0:
        return None, None, None
    tokenizer = get_tokenizer()

    print_rank_0('> building train, validation, and test dataloader ...')
    from megatron_datasets.gemma3_dataset import build_train_valid_test_data_iter
    train_iter, valid_iter, test_iter = build_train_valid_test_data_iter(
        args,
        tokenizer,
        rank=torch.distributed.get_rank(),
        dp_rank=mpu.get_data_parallel_rank(),
        dp_size=mpu.get_data_parallel_world_size(),
        cp_rank=mpu.get_context_parallel_rank(),
        cp_size=mpu.get_context_parallel_world_size(),
        is_dpo=args.dpo,
    )

    print(f"> world size {mpu.get_data_parallel_world_size()} rank {mpu.get_data_parallel_rank()} "
          f"finished creating dataloader ...")
    return train_iter, valid_iter, test_iter


def add_extra_args(parser):
    """Extra arguments."""
    parser = add_multimodal_extra_args(parser)
    parser = gpatch_extra_args(parser)

    group = parser.add_argument_group(title='gemma3 arguments')
    group.add_argument("--processor-path", type=str, default=None, help="")
    group.add_argument("--tarfile-path", type=str, default="/", help="")
    group.add_argument("--lmdb-port", type=int, default=None, help="lmdb server port")
    group.add_argument("--sliding-window", type=int, default=1024, help="Gemma3 sliding window")
    group.add_argument("--query-pre-attn-scalar",
                       type=int,
                       default=256,
                       help="query_pre_attn_scalar")
    group.add_argument("--mask-history", action='store_true', help="多轮对话只取最后一轮对话为label")
    return parser


if __name__ == "__main__":

    init_gpatch_for_mcore()

    # 每个tp-rank都要运行parse_dataset_config
    setattr(train_valid_test_data_iter_provider, "is_distributed", True)

    pretrain(
        train_valid_test_data_iter_provider,
        model_provider,
        ModelType.encoder_and_decoder,
        forward_step,
        extra_args_provider=add_extra_args,
    )
