# coding=utf-8
# copyright (c) 2025 tencent inc. all rights reserved.
# xiaotaoliu@tencent.com, nrwu@tencent.com, guanyouhe@tencent.com

from typing import Any, Dict, List, Tuple
from typing_extensions import override

import torch

from megatron.core import mpu, parallel_state
from megatron.core.enums import ModelType
from megatron.core.models import vision
from megatron.training import get_args
from megatron.training import get_tokenizer
from megatron.training.utils import unwrap_model

from gpatch.training.v3.ppo_actor import MultiModalPpoActorTrainer, train_ppo_actor_v3
from gpatch.core.device_type import is_wxacc1
from gpatch.patch_mcore import init_gpatch_for_mcore
from gpatch.core.models.gpt import (
    GptPpoActorModel,
    GptPpoRmCriticClientV3,
    GptPpoSamplerClientV3,
    GptPpoGenRmClientV3,
)
from gpatch.core.utils import gen_unique_id, split_data_ulysses_cp_rank
from gpatch.core.aligner_helper import pad_or_truncate_last_dim

from megatron_datasets.mega_indexed_jsonl_dataset_v3 import update_consumed

from tasks.qwen2vl.train_qwen2vl import (
    sft_model_provider,
    train_valid_test_data_iter_provider,
    add_qwen2vl_extra_args,
)


class Qwen2VLPpoActorModel(GptPpoActorModel):

    def padding_images(
            self, vision_data: List[torch.Tensor], vision_grid_thw: List[torch.Tensor]
    ) -> Tuple[bool, torch.Tensor, torch.Tensor]:
        args = get_args()
        hw_factor = args.context_parallel_size * 4
        if args.sequence_parallel:
            hw_factor *= args.tensor_model_parallel_size
        image_seq_len = 0
        for vd in vision_data:
            image_seq_len += vd.size(0)
        image_padded = 0 != image_seq_len % hw_factor
        if image_padded:
            padded_seqlen = (image_seq_len + hw_factor - 1) // hw_factor * hw_factor - image_seq_len
            assert padded_seqlen > 0 and padded_seqlen % 4 == 0
            vision_data.append(
                torch.zeros(
                    [padded_seqlen, vision_data[0].size(-1)],
                    dtype=vision_data[0].dtype,
                    device=vision_data[0].device,
                ))
            vision_grid_thw.append(
                torch.tensor(
                    [[1, 2, padded_seqlen // 2]],
                    dtype=vision_grid_thw[0].dtype,
                    device=vision_grid_thw[0].device,
                ))

        vision_data = torch.cat(vision_data, dim=0)
        vision_grid_thw = torch.cat(vision_grid_thw, dim=0)
        return image_padded, vision_data, vision_grid_thw

    @override
    def prepare_data_for_model_forward_only(self, batches: List[Dict[str, Any]],
                                            seqlen: int) -> Dict[str, Any]:
        tokens_l = []
        position_ids_l = []
        image_input_mask_l = []
        vision_grid_thw_l = []
        vision_data_l = []
        for batch in batches:
            assert batch["tokens"].shape[-1] <= seqlen
            tokens_l.append(pad_or_truncate_last_dim(batch["tokens"], seqlen, self.pad_token_id))
            position_ids_l.append(pad_or_truncate_last_dim(batch["position_ids"], seqlen, 0))
            image_input_mask_l.append(pad_or_truncate_last_dim(batch["image_input_mask"], seqlen, 0))
            vision_grid_thw_l.append(batch["vision_grid_thw"])
            vision_data_l.append(batch["vision_data"])

        non_blocking = False if is_wxacc1() else True
        tokens = torch.stack(tokens_l).view(len(tokens_l), -1).cuda(non_blocking=non_blocking)
        position_ids = torch.cat(position_ids_l, dim=1).cuda(non_blocking=non_blocking)
        image_input_mask = torch.cat(image_input_mask_l, dim=0).cuda(non_blocking=non_blocking)

        # 这里有一个 padding iamge，对齐 DataCollatorForQwen2Vl
        image_padded, vision_data, vision_grid_thw = self.padding_images(
            vision_data_l, vision_grid_thw_l)
        vision_data = vision_data.cuda(non_blocking=non_blocking)
        vision_grid_thw = vision_grid_thw.cuda(non_blocking=non_blocking)
        if self.config.context_parallel_size > 1 and self.config.model_arch == "qwen2vl":
            vision_data = split_data_ulysses_cp_rank(vision_data, self.config.context_parallel_size,
                                                     0)

        return dict(
            input_ids=tokens,
            target=tokens.detach().clone(),
            position_ids=position_ids,
            vision_data=vision_data,
            vision_grid_thw=vision_grid_thw,
            video_start_index=image_input_mask.sum().cpu().item(),
            image_input_mask=image_input_mask,
            image_padded=image_padded,
        )

    @override
    def prepare_data_for_grpo_loss(
        self,
        batches: List[Dict[str, Any]],
        seqlen: int,
    ) -> Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]]:
        tokens_l = []
        position_ids_l = []
        image_input_mask_l = []
        advantages_l = []
        mask_l = []
        logprobs_l = []
        ref_logprobs_l = []

        vision_grid_thw_l = []
        vision_data_l = []
        for batch in batches:
            assert batch["tokens"].shape[-1] <= seqlen
            tokens_l.append(pad_or_truncate_last_dim(batch["tokens"], seqlen, self.pad_token_id))
            position_ids_l.append(pad_or_truncate_last_dim(batch["position_ids"], seqlen, 0))
            image_input_mask_l.append(pad_or_truncate_last_dim(batch["image_input_mask"], seqlen, 0))

            advantages_l.append(pad_or_truncate_last_dim(batch["advantages"], seqlen - 1, 0))
            mask_l.append(pad_or_truncate_last_dim(batch["mask"], seqlen - 1, 0))
            logprobs_l.append(pad_or_truncate_last_dim(batch["logprobs"], seqlen - 1, 0))
            ref_logprobs_l.append(pad_or_truncate_last_dim(batch["ref_logprobs"], seqlen - 1, 0))

            vision_grid_thw_l.append(batch["vision_grid_thw"])
            vision_data_l.append(batch["vision_data"])

        non_blocking = False if is_wxacc1() else True
        tokens = torch.stack(tokens_l).view(len(tokens_l), -1).cuda(non_blocking=non_blocking)
        position_ids = torch.cat(position_ids_l, dim=1).cuda(non_blocking=non_blocking)
        image_input_mask = torch.cat(image_input_mask_l, dim=0).cuda(non_blocking=non_blocking)

        advantages = torch.stack(advantages_l)
        mask = torch.stack(mask_l)
        logprobs = torch.stack(logprobs_l)
        ref_logprobs = torch.stack(ref_logprobs_l)

        # 这里有一个 padding iamge，对齐 DataCollatorForQwen2Vl
        image_padded, vision_data, vision_grid_thw = self.padding_images(
            vision_data_l, vision_grid_thw_l)
        vision_data = vision_data.cuda(non_blocking=non_blocking)
        vision_grid_thw = vision_grid_thw.cuda(non_blocking=non_blocking)
        if self.config.context_parallel_size > 1 and self.config.model_arch == "qwen2vl":
            vision_data = split_data_ulysses_cp_rank(vision_data, self.config.context_parallel_size,
                                                     0)

        batch = {
            "input_ids": tokens,
            "position_ids": position_ids.cuda(non_blocking=non_blocking),
            "vision_data": vision_data.cuda(non_blocking=non_blocking),
            "vision_grid_thw": vision_grid_thw.cuda(non_blocking=non_blocking),
            "video_start_index": image_input_mask.sum().cpu().item(),
            "image_input_mask": image_input_mask.cuda(non_blocking=non_blocking),
            "image_padded": image_padded,
            "advantages": advantages,
            "prev_log_probs": logprobs,
            "mask": mask,
            "ref_log_probs": ref_logprobs,
            'target': tokens.detach().clone(),
        }
        if parallel_state.is_pipeline_last_stage():
            for k in ["mask", "prev_log_probs", "ref_log_probs", "advantages"]:
                batch[k] = batch[k].cuda(non_blocking=non_blocking)

        fwd_kwargs = dict(
            input_ids=batch["input_ids"],
            position_ids=batch["position_ids"],
            vision_data=batch["vision_data"],
            vision_grid_thw=batch["vision_grid_thw"],
            video_start_index=batch["video_start_index"],
            image_input_mask=batch["image_input_mask"],
            image_padded=batch["image_padded"],
        )

        return batch, fwd_kwargs


def actor_provider(model, ref_model_state):
    args = get_args()

    actor_model = Qwen2VLPpoActorModel(
        model=model,
        ref_model_state=ref_model_state,
        unwrap_model_func=unwrap_model,
        # PPO args
        forward_micro_batch_size=args.ppo_logps_fwd_micro_batch_size,
        ppo_rollout_temperature=args.ppo_rollout_temperature,
        # SMART-PAD args
        pad_to_multi_of=args.ppo_rollout_pad_to_multiple_of,
        pad_token_id=get_tokenizer()._tokenizer.pad_token_id
    )

    return actor_model


def rm_critic_client_provider():
    args = get_args()
    tokenizer = get_tokenizer()
    cli = GptPpoRmCriticClientV3(
        pad_token_id=tokenizer._tokenizer.pad_token_id,
        ep_ips=args.ppo_critic_ips,
        ep_ports=args.ppo_critic_ports,
        combine_rm_and_critic_server=args.combine_rm_and_critic_server,
        timeout=args.ppo_rm_critic_client_timeout,
        tokenizer=tokenizer,
        rpc_max_retries=args.grpo_rpc_max_retries,
        num_rm=args.ppo_num_rm,
        ppo_debug_fake_rm_critic=args.ppo_debug_fake_rm_critic,
    )
    return cli


def gen_rm_client_provider():
    args = get_args()
    assert args.use_gen_rm
    cli = GptPpoGenRmClientV3(
        ep_ips=args.ppo_gen_rm_ips,
        ep_ports=args.ppo_gen_rm_ports,
        timeout=args.ppo_gen_rm_client_timeout,
        rpc_max_retries=args.grpo_rpc_max_retries,
        unwrap_model_func=unwrap_model,
    )
    return cli


def sampler_client_provider():
    args = get_args()
    if not args.ppo_standalone_sampler:
        return None

    cli = GptPpoSamplerClientV3(
        ep_ips=args.ppo_sampler_ips,
        ep_ports=args.ppo_sampler_ports,
        timeout=args.ppo_sampler_client_timeout,
        update_timeout=args.ppo_sampler_client_update_timeout,
        rpc_max_retries=args.grpo_rpc_max_retries,
        unwrap_model_func=unwrap_model,
        infer_engine_impl=args.infer_engine_impl,
        update_weight_max_size_mb=args.update_weight_max_size_mb,
    )
    return cli


def rollout_get_batch(data_iterator):
    # 按照设计，只有 mp_head 会走到这里
    args = get_args()

    # Broadcast data.
    assert data_iterator is not None
    data = next(data_iterator)

    if args.px_data_config_path is not None:
        update_consumed(args.train_data_consuming_progresses, torch.distributed.get_rank(), data)

    json_data_list = data['json_data_list']

    tokens = data["input_ids"]
    assert tokens.shape[0] == 1 and len(
        json_data_list) == 1, "--ppo-rollout-micro-batch-size must be 1"
    image_grid_thw = data["image_grid_thw"]
    position_ids = data["position_ids"]
    prompt_len = data["prompt_len"]

    image_input_mask = data["image_input_mask"]
    image_padded = data["image_padded"]
    image_padded = image_padded.bool()[0].item()
    assert not image_padded, f"image padded 必须为 False，因为会被重新组合"

    pixel_values = None
    if "pixel_values" in data:
        pixel_values = data["pixel_values"].type(torch.bfloat16)

    batch_data = dict(
        # type is list
        unique_id=[gen_unique_id()],
        json_data_list=json_data_list,
        tokens=[tokens.squeeze(0)],
        prompt_len=[prompt_len],
        imgs_np_array_list=data["imgs_np_array_list"],
        # save at mm_data_cache, type is tensor
        position_ids=position_ids,
        vision_grid_thw=image_grid_thw,
        image_input_mask=image_input_mask,
        vision_data=pixel_values,
        cache_keys=[
            "position_ids",
            "vision_grid_thw",
            "image_input_mask",
            "vision_data",
        ],
    )
    return batch_data


# 初始化 MultiModalPpoActorTrainer 时拿不到args，无法判断是rm还是gen-rm
# 通过extra_metric_info_provider判断
def extra_metric_info_provider():
    extra_metric_info = [
        {
            'key_name': 'acc_rewards',
            'dtype': torch.float32
        },
        {
            'key_name': 'fmt_rewards',
            'dtype': torch.float32
        },
    ]
    args = get_args()
    if args.use_gen_rm:
        extra_metric_info.append({'key_name': 'rm_rewards', 'dtype': torch.float32})
    return extra_metric_info

if __name__ == "__main__":
    init_gpatch_for_mcore()

    actor_trainer = MultiModalPpoActorTrainer(extra_metric_info=extra_metric_info_provider)
    train_ppo_actor_v3(actor_trainer,
                       sft_model_provider,
                       actor_provider,
                       sampler_client_provider,
                       rm_critic_client_provider,
                       gen_rm_client_provider,
                       train_valid_test_data_iter_provider,
                       rollout_get_batch,
                       None,
                       ModelType.encoder_and_decoder,
                       extra_args_provider=add_qwen2vl_extra_args)
