# Copyright (c) 2025, HUAWEI CORPORATION. All rights reserved.
import json
import time
from functools import wraps

import torch
import ray

from mindspeed_rl.workers.scheduler.launcher import RayActorGroup
from mindspeed_rl.trainer.utils.transfer_dock import GRPOTransferDock


def generate_sequences_decorator(actor_worker: RayActorGroup, transfer_dock: GRPOTransferDock, generate_sequences):
    @wraps(generate_sequences)
    def wrapper(*args, **kwargs):
        from mindspeed_rl.trainer.auto_parallel.launch import TaskType
        save_path = getattr(actor_worker.rl_config.auto_parallel, 'transfer_dock_path')

        if getattr(actor_worker.rl_config.auto_parallel, 'launching_task_name') == TaskType.GENERATION.value:
            start_time = time.time()
            generate_sequences(*args, **kwargs)
            end_time = time.time()
            prof_data = {
                'rollout_time': end_time - start_time
            }
            if hasattr(actor_worker.rl_config.auto_parallel, 'gen_profile_path'):
                with open(actor_worker.rl_config.auto_parallel.gen_profile_path, 'w') as file:
                    json.dump(prof_data, file)
            if hasattr(actor_worker.rl_config.auto_parallel, 'serialize_transfer_dock'):
                experience_buffer = ray.get(transfer_dock.get_transfer_dock_data.remote())
                torch.save(experience_buffer, save_path)
        else:
            experience_buffer = torch.load(save_path)
            ray.get(transfer_dock.set_transfer_dock_data.remote(experience_buffer))
            for actor in actor_worker.actor_handlers:
                td = ray.get(actor.get_transfer_dock.remote())
                ray.get(
                    td.update_metrics.remote('timing/rollout', value=[float('inf'), 0], cumulate=True)
                )
                ray.get(
                    td.update_metrics.remote('timing/resharding_to_infer', value=[float('inf')], cumulate=True)
                )
    return wrapper


def compute_advantage_decorator(compute_advantage):
    @wraps(compute_advantage)
    def wrapper(*args, **kwargs):
        compute_advantage(blocking=True, guarantee_order=kwargs['guarantee_order'])
    return wrapper
