# Copyright (c) 2024, HUAWEI CORPORATION.  All rights reserved.
import copy
import os
import dataclasses

import ray
import torch
import torch.nn as nn
import dataclasses
import torch_npu
from tensordict import TensorDict

from megatron.training import get_args, initialize_megatron, get_timers, get_tokenizer
from megatron.core import parallel_state as mpu, tensor_parallel
from megatron.core.optimizer import get_megatron_optimizer, OptimizerConfig
from megatron.core.distributed import finalize_model_grads
from megatron.core.utils import get_model_config
from megatron.training.training import get_optimizer_param_scheduler, build_train_valid_test_data_iterators
from megatron.training.checkpointing import load_checkpoint

from mindspeed_llm.tasks.inference.infer_base import add_text_generate_args
from mindspeed_llm.tasks.posttrain.rlxf.single_controller.base.megatron.worker import MegatronWorker
from mindspeed_llm.tasks.posttrain.rlxf.single_controller.base.decorator import register, Dispatch, Execute
from mindspeed_llm.tasks.posttrain.rlxf.utils.protocol import DataProto
from mindspeed_llm.training.utils import get_finetune_data_on_this_tp_rank
from mindspeed_llm.tasks.posttrain.base.model_provider import mcore_model_provider
# from mindspeed_llm.tasks.posttrain.rlxf.workers.hybrid_engine import AllGatherPPModel
from mindspeed_llm.tasks.posttrain.rlxf.workers.megatron_hybrid_engine import AllGatherPPModel
from .actor_train_infer import (MegatronPPOActor, pad_to_tensor_dict, generate_attention_mask,
                                generate_position_ids_from_attention_mask, train_valid_test_datasets_provider)
from .vllm_rollout.vllm_engine import set_num_tp_per_train_tp, set_num_pp_per_train_pp


@ray.remote
class PPOActorWorkerHybrid(MegatronWorker):
    """
    This worker can be instantiated as a standalone actor or a standalone rollout or a standalone reference policy
    or a hybrid engine based on the config.rollout
    """

    def __init__(self, config, role):
        super().__init__()
        self.config = config
        self.role = role
        self.IGNORE_INDEX = -100
        os.environ['CUDA_DEVICE_MAX_CONNECTIONS'] = '1'
        self.counter = 0
        print("config==",config)
        initialize_megatron(extra_args_provider=add_text_generate_args,
                            args_defaults={'no_load_rng': True, 'no_load_optim': True},
                            role=self.role,
                            config=self.config)
        self.args = get_args()

        set_num_tp_per_train_tp(self.args.num_tp_per_train_tp)
        set_num_pp_per_train_pp(self.args.num_pp_per_train_pp)


    def _get_megatron_optimizer(self,
                                model,
                                no_wd_decay_cond=None,
                                scale_lr_cond=None,
                                lr_mult=1.0):
        args = self.args
        timers = get_timers()
        kwargs = {}
        for f in dataclasses.fields(OptimizerConfig):
            if hasattr(args, f.name):
                kwargs[f.name] = getattr(args, f.name)
        config = OptimizerConfig(**kwargs)
        config.timers = timers
        optimizer = get_megatron_optimizer(config, model, no_wd_decay_cond,
                                           scale_lr_cond, lr_mult)
        opt_param_scheduler = get_optimizer_param_scheduler(optimizer)
        return optimizer, opt_param_scheduler

    def _build_model_optimizer(self):
        hybrid_engine = AllGatherPPModel(mcore_model_provider, self.args)

        actor_module = hybrid_engine.this_rank_models
        if isinstance(actor_module, nn.ModuleList):
            actor_module = [actor_module[0]]

        optimizer, opt_param_scheduler = self._get_megatron_optimizer(model=actor_module)

        # load checkpoint
        if self.args.load is not None or self.args.pretrained_checkpoint is not None:
            self.args.iteration, self.args.num_floating_point_operations_so_far = load_checkpoint(
                actor_module, optimizer, opt_param_scheduler)
        else:
            self.args.iteration = 0
            self.args.num_floating_point_operations_so_far = 0

        return actor_module, hybrid_engine, optimizer, opt_param_scheduler

    def _build_rollout(self):
        from mindspeed_llm.tasks.posttrain.rlxf.workers.vllm_rollout import vllmInferenceEngine
        from mindspeed_llm.tasks.posttrain.rlxf.workers.megatron_hybrid_engine import MegatronVLLMShardingManager
        from mindspeed_llm.tasks.posttrain.rlxf.utils.model import normalize_pp_vpp_params
        from transformers import AutoTokenizer, AutoConfig

        # TODO: use megatron things to replace these HF things
        self.tokenizer = AutoTokenizer.from_pretrained(self.args.tokenizer_name_or_path, trust_remote_code=True)
        self.actor_model_config = AutoConfig.from_pretrained(self.args.tokenizer_name_or_path)

        # NOTE(sgm): If the QKV and gate_up projection layer are concate together in actor,
        # we will reorganize their weight format when resharding from actor to rollout.
        layer_name_mapping = {
            "qkv_layer_name": "qkv",
            "gate_proj_layer_name": "linear_fc1.weight"
        }

        # reshard the weight partition from actor to rollout to initialize the rollout class
        # create a new cuda space for parameters not in this pp rank
        self.hybrid_engine.load_params_to_cuda()
        # broadcast the parameters from pp rank to other ranks
        self.hybrid_engine.allgather_params()
        # obtain name to parameters in pp/vpp
        params = self.hybrid_engine.get_all_params()
        # update the param name for the
        params = normalize_pp_vpp_params(params=params,
                                         num_hidden_layers=self.actor_model_config.num_hidden_layers,
                                         layer_name='layers')
        rollout = vllmInferenceEngine(actor_module=None,
                                    args = self.args,
                                    config=self.config,
                              tokenizer=self.tokenizer,
                              model_hf_config=self.actor_model_config,
                              train_tp=mpu.get_tensor_model_parallel_world_size())

        # perform weight resharding between actor and rollout
        sharding_manager = MegatronVLLMShardingManager(
            module=self.hybrid_engine,
            inference_engine=rollout.inference_engine,
            optimizer=self.actor.optimizer,
            model_config=self.actor_model_config,
            layer_name_mapping=layer_name_mapping,
            optimizer_offload=self.args.optimizer_offload
        )

        return rollout, sharding_manager

    @register(dispatch_mode=Dispatch.ONE_TO_ALL)
    def initialize(self):
        self.args = get_args()
        model, hybrid_engine, optimizer, opt_param_scheduler = self._build_model_optimizer()
        from megatron.core.distributed import finalize_model_grads
        from megatron.core.utils import get_model_config
        config = get_model_config(model[0])
        config.finalize_model_grads_func = finalize_model_grads
        self.actor = MegatronPPOActor(model=model, optimizer=optimizer, opt_param_scheduler=opt_param_scheduler)
        self.hybrid_engine = hybrid_engine
        self.rollout, self.sharding_manager = self._build_rollout()
        self.args.dataset_additional_keys = eval(self.args.dataset_additional_keys[0]) if self.args.dataset_additional_keys else []
        torch.cuda.empty_cache()


    @register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO)
    def update_actor(self, data: DataProto):
        device = next(self.actor.model[0].parameters()).device
        data = data.to(device)
        dataloader = self.actor.make_minibatch_iterator(data=data)
        args = get_args()
        if hasattr(args, 'use_kv_cache'):
            args.use_kv_cache = False
        metrics = self.actor.update_policy(dataloader=dataloader)
        if hasattr(args, 'use_kv_cache'):
            args.use_kv_cache = True
        # Since DataProto.concat only use the first meta_info, need to gather metrics across dp ranks manually
        metrics_all = [None for _ in range(mpu.get_data_parallel_world_size())]
        torch.distributed.all_gather_object(metrics_all, metrics, mpu.get_data_parallel_group())
        for key in metrics.keys():
            val_gathered = []
            for m in metrics_all:
                val_gathered += m[key]
            metrics[key] = val_gathered
        output = DataProto(meta_info={'metrics': metrics})
        output = output.to('cpu')
        torch.cuda.empty_cache()
        return output

    @register(dispatch_mode=Dispatch.MEGATRON_PP_AS_DP_PROTO)
    def generate_sequences_with_data(self, batch):
        args = get_args()
        max_new_tokens = args.seq_length - args.max_prompt_length
        assert max_new_tokens % args.pad_to_multiple_of == 0, "please adjust pad_to_multiple_of so that \
                                                        max_new_tokens % args.pad_to_multiple_of == 0"
        tokenizer = get_tokenizer()
        pad_id = tokenizer.pad_token_id if tokenizer.pad_token_id else tokenizer.eos_token_id
        rank = torch.distributed.get_rank()
        import time

        with self.sharding_manager:
            data = self.sharding_manager.preprocess_data(batch)
            idx = data.batch['input_ids']
            additional_dict = {}
            for k in self.args.dataset_additional_keys:
                if not hasattr(additional_dict, k):
                    additional_val = data.batch[k]
                    additional_val = torch.where(additional_val == -100, pad_id, additional_val)
                    additional_dict[k] = additional_val

            batch_size = idx.size(0)

            # 1. Preprocess: remove padding, copy input if needed
            idx_list = []
            additional_dict_list = {}
            for k in self.args.dataset_additional_keys:
                additional_dict_list[k] = []
            for i in range(batch_size):
                prompt_token_ids = idx[i]
                nonzeros = torch.nonzero(prompt_token_ids == pad_id, as_tuple=False)
                if len(nonzeros) != 0:
                    first_pad_index = nonzeros[0][0]
                else:
                    first_pad_index = len(prompt_token_ids)
                token_ids = prompt_token_ids[:first_pad_index].cpu().numpy().tolist()

                for additional_key in self.args.dataset_additional_keys:
                    additional_val = additional_dict[additional_key][i].cpu().numpy().tolist()

                    for _ in range(args.n_samples_per_prompt):
                        additional_dict_list[additional_key].append(copy.deepcopy(additional_val))

                for _ in range(args.n_samples_per_prompt):
                    idx_list.append(copy.deepcopy(token_ids))

                if args.stage == "ray_online_dpo":
                    idx_list.append(copy.deepcopy(token_ids))

            if args.stage == "ray_online_dpo":
                batch_size *= 2
            else:
                batch_size *= args.n_samples_per_prompt

            # 2. Do inference
            responses = self.rollout.generate_sequences(
                idx_list=copy.deepcopy(idx_list),
                temperature=args.temperature,
                top_k=args.top_k,
                top_p=args.top_p,
                max_tokens=max_new_tokens,
            )[0]

        responses = responses.long()
        responses = [response.cpu().numpy().tolist() for response in responses]

        responses_ori_length, responses_pad_length = pad_to_tensor_dict(
            responses,
            pad_multi_of=args.pad_to_multiple_of
        )
        prompts_ori_length, prompts_pad_length = pad_to_tensor_dict(
            idx_list, "left",
            pad_multi_of=args.pad_to_multiple_of
        )

        input_ids = [prompt + response for prompt, response in zip(idx_list, responses)]

        attention_mask = generate_attention_mask(input_ids, prompts_ori_length, prompts_pad_length,
                                                 responses_ori_length, responses_pad_length)

        position_ids = generate_position_ids_from_attention_mask(input_ids, prompts_ori_length, prompts_pad_length)

        batch = TensorDict(
            dict(
                    {
                        "prompts": idx_list,
                        "responses": responses,
                        "input_ids": input_ids,
                        "attention_mask": attention_mask,
                        "position_ids": position_ids,
                        "responses_ori_length": responses_ori_length
                    }, **additional_dict_list
                ),
            batch_size=batch_size
        )

        # add tokenizer meta info
        output = DataProto(batch=batch)
        tokenizer = get_tokenizer()
        meta_info = {'eos_token_id': tokenizer.eos_token_id, 'pad_token_id': tokenizer.pad_token_id,
                     'num_samples_per_step': args.num_samples_per_step}
        output.meta_info.update(meta_info)
        return output


    @register(dispatch_mode=Dispatch.MEGATRON_PP_AS_DP_PROTO)
    def generate_sequences(self):
        args = get_args()
        num_infer_steps = args.global_batch_size // (args.data_parallel_size * args.num_samples_per_step)
        idx_list = []
        idx_list_per_step = []
        max_new_tokens = args.seq_length - args.max_prompt_length
        assert max_new_tokens % args.pad_to_multiple_of == 0, "please adjust pad_to_multiple_of so that \
                                                        max_new_tokens % args.pad_to_multiple_of == 0"

        with self.sharding_manager:
            for i in range(num_infer_steps):
                for j in range(args.num_samples_per_step):
                    tokens = self.get_batch(self.train_data_iterator)
                    idx_list_per_step.append(tokens.view(-1).cpu().numpy().tolist())
                if args.stage == "ray_online_dpo":
                    idx_list_per_step = idx_list_per_step + copy.deepcopy(idx_list_per_step)
                idx_list.extend(idx_list_per_step)
                idx_list_per_step = []
            responses = self.rollout.generate_sequences(idx_list=idx_list)[0]

        responses = [response.cpu().numpy().tolist() for response in responses]

        responses_ori_length, responses_pad_length = pad_to_tensor_dict(responses, pad_multi_of=args.pad_to_multiple_of)
        prompts_ori_length, prompts_pad_length = pad_to_tensor_dict(idx_list, "left",
                                                                    pad_multi_of=args.pad_to_multiple_of)

        input_ids = [prompt + response for prompt, response in zip(idx_list, responses)]

        attention_mask = generate_attention_mask(input_ids, prompts_ori_length, prompts_pad_length,
                                                 responses_ori_length, responses_pad_length)

        position_ids = generate_position_ids_from_attention_mask(input_ids, prompts_ori_length, prompts_pad_length)
        if self.args.stage == "ray_online_dpo":
            batch_size = args.global_batch_size // args.data_parallel_size * 2
        else:
            batch_size = args.global_batch_size // args.data_parallel_size

        batch = TensorDict({
            "prompts": idx_list,
            "responses": responses,
            "input_ids": input_ids,
            "attention_mask": attention_mask,
            "position_ids": position_ids,
        },
            batch_size=batch_size)

        # add tokenizer meta info
        output = DataProto(batch=batch)
        tokenizer = get_tokenizer()
        meta_info = {'eos_token_id': tokenizer.eos_token_id, 'pad_token_id': tokenizer.pad_token_id,
                     'num_samples_per_step': args.num_samples_per_step}
        output.meta_info.update(meta_info)
        return output

    # TODO: remove duplicate code
    @staticmethod
    def get_batch(data_iterator):
        """Generate a batch identical to Llama factory"""
        args = get_args()

        if (not mpu.is_pipeline_first_stage()) and (not mpu.is_pipeline_last_stage()):
            if args.variable_seq_lengths and args.pipeline_model_parallel_size > 2:
                tokens, _ = get_finetune_data_on_this_tp_rank(data_iterator)
            else:
                tokens = None
            return tokens

        # Items and their type.
        keys = ['input_ids']
        data_type = torch.int64

        # Broadcast data.
        data_b = tensor_parallel.broadcast_data(keys, next(data_iterator), data_type)

        # Unpack
        tokens = data_b.get('input_ids').long()
        return tokens

    @register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO)
    def get_log_probs(self, data):
        old_log_probs = self.actor.compute_log_prob(data)
        if old_log_probs is not None:  # pp last stage
            data.batch['old_log_probs'] = old_log_probs
            data = data.to('cpu')
        else:  # pp intermediate stage, no useful results
            data = None
        # clear kv cache
        torch.cuda.empty_cache()
        return data

    @register(dispatch_mode=Dispatch.ONE_TO_ALL)
    def save_checkpoint(self, iteration):
        self.actor.save_checkpoint(iteration)

    @register(dispatch_mode=Dispatch.ONE_TO_ALL)
    def get_iteration(self):
        return self.actor.get_iteration()