# Copyright (c) 2025, HUAWEI CORPORATION. All rights reserved.
from functools import wraps

import ray
from codetiming import Timer

from mindspeed_rl.workers.scheduler.launcher import RayActorGroup
from mindspeed_rl.utils.loggers import Loggers
from mindspeed_rl.utils.metrics import Metric
from mindspeed_rl.trainer.utils.compute_utils import compute_grpo_data_metrics
from mindspeed_rl.utils.utils import metrics_post_processing, compute_tps, metrics_sort, is_multimodal
from mindspeed_rl.trainer.auto_parallel.patch.actor_hybrid_worker_patch import generate_sequences_decorator
from mindspeed_rl.trainer.auto_parallel.patch.actor_hybrid_worker_patch import compute_advantage_decorator


def fit_decorator(trainer):
    def wrapper(data_iters):
        """
        The utils loop of GRPO
        """
        logger = Loggers('grpo_trainer_hybrid')
        metrics = Metric()

        iteration = trainer.actor_worker.get_iteration()

        if trainer.blocking:
            logger.info('sync start grpo training at iteration: {}/{} ...'.format(iteration, trainer.train_iters))
        else:
            logger.info('async start grpo training at iteration: {}/{} ...'.format(iteration, trainer.train_iters))

        trainer.compute_advantage = compute_advantage_decorator(trainer.compute_advantage)
        trainer.actor_worker.generate_sequences = generate_sequences_decorator(trainer.actor_worker, trainer.transfer_dock, trainer.actor_worker.generate_sequences)

        while iteration < trainer.train_iters:
            ray.get(trainer.transfer_dock.clear.remote())

            batch = next(data_iters)
            ray.get(trainer.transfer_dock.put_prompts_experience.remote(batch, trainer.dataset_additional_keys))
            if is_multimodal():
                ray.get(trainer.mm_transfer_dock.clear.remote())
                ray.get(trainer.mm_transfer_dock.put_experience.remote(batch, indexes=[i for i in range(len(batch['prompts']) * trainer.n_samples_per_prompt)]))

            with Timer(name='iteration', logger=None) as all_timer:
                # generate sequences
                trainer.actor_worker.generate_sequences(blocking=trainer.blocking)
                if getattr(trainer.actor_worker.rl_config.auto_parallel, 'launching_task_name') == 'generate_sequences':
                    break

                # compute rm scores.
                rule_reward = []
                for reward_worker in trainer.reward_list:
                    if isinstance(reward_worker, RayActorGroup):
                        reward_worker.compute_rm_score(blocking=trainer.blocking)
                    else:
                        rule_reward.append(reward_worker.compute_rm_score.remote())

                # compute advantages, executed on the driver process
                trainer.compute_advantage(blocking=False, guarantee_order=trainer.guarantee_order)

                # compute reference log_prob
                trainer.ref_worker.compute_ref_log_prob(blocking=trainer.blocking)
                if getattr(trainer.actor_worker.rl_config.auto_parallel, 'launching_task_name') == 'reference':
                    break

                # compute old log_prob
                if not trainer.skip_actor_log_prob:
                    trainer.actor_worker.compute_log_prob(blocking=trainer.blocking)

                trainer.actor_worker.wait_all_ref_objs_run_over()

                trainer.ref_worker.wait_all_ref_objs_run_over()
                for reward in trainer.reward_list:
                    if hasattr(reward, 'wait_all_ref_objs_run_over'):
                        reward.wait_all_ref_objs_run_over()

                # update actor
                trainer.actor_worker.update(trainer.kl_ctrl, trainer.skip_actor_log_prob)

                # collect metrics
                grpo_data_metrics = compute_grpo_data_metrics(trainer.transfer_dock,
                                                              trainer.global_batch_size * trainer.n_samples_per_prompt,
                                                              trainer.tokenizer,
                                                              trainer.global_batch_size * trainer.n_samples_per_prompt,
                                                              trainer.guarantee_order)
                metrics_result = ray.get(trainer.transfer_dock.get_metrics.remote())

            metrics_result = metrics_post_processing(metrics_result)
            metrics_result = metrics_sort(metrics_result, all_timer.last)
            tps = compute_tps(trainer.kwargs, grpo_data_metrics, trainer.global_batch_size, trainer.n_samples_per_prompt, all_timer.last)
            update_tps = compute_tps(trainer.kwargs, grpo_data_metrics, trainer.global_batch_size, trainer.n_samples_per_prompt, metrics_result["timing/update"])
            vllm_tps = compute_tps(trainer.kwargs, grpo_data_metrics, trainer.global_batch_size, trainer.n_samples_per_prompt, metrics_result["timing/rollout"])
            metrics.update(value=metrics_result)
            metrics.update(value=grpo_data_metrics)
            metrics.update("e2e_tps", tps)
            metrics.update("update_tps", update_tps)
            metrics.update("vllm_tps", vllm_tps)
            iteration += 1
            logger.info(metrics.metric, iteration, trainer.train_iters)
            if trainer.tensorboard is not None:
                for k, v in metrics.metric.items():
                    trainer.tensorboard.add_scalar(f"train/{k}", v, iteration)
            if trainer.wandb is not None:
                trainer.wandb.log_metrics(metrics.metric, iteration)
            if iteration % trainer.save_interval == 0 or iteration == trainer.train_iters:
                trainer.save_checkpoint(iteration)

        if trainer.actor_worker.rl_config.auto_parallel is not None:
            from mindspeed_rl.trainer.auto_parallel.profiler import export_profiling
            export_profiling(trainer.actor_worker, trainer.kwargs)

        logger.info('after grpo training is done')
        ray.shutdown()

    return wrapper