# Copyright (c) 2025, HUAWEI CORPORATION. All rights reserved.
from functools import wraps
import torch


def forward_step_decorator(fn):
    @wraps(fn)
    def wrapper(*args, **kwargs):
        from mindspeed_rl.trainer.auto_parallel.profiler import AutoProfiler
        worker_name = AutoProfiler.current_profiling_worker

        if worker_name is not None:
            auto_profiler: AutoProfiler = AutoProfiler.get_instance(worker_name)

            start_event, end_event = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
            torch.cuda.synchronize()
            memory_allocated = torch.cuda.memory_allocated()
            torch.cuda.reset_max_memory_allocated()

            start_event.record()
            output_tensor = fn(*args, **kwargs)
            end_event.record()

            torch.cuda.synchronize()
            forward_step_time = start_event.elapsed_time(end_event)
            act_memory = (torch.cuda.max_memory_allocated() - memory_allocated) / auto_profiler.unit_gb

            if 'forward_step_times' not in auto_profiler.context.keys():
                auto_profiler.context['forward_step_times'] = []

            auto_profiler.context['forward_step_mem'] = act_memory
            auto_profiler.context['forward_step_times'].append(forward_step_time)
            auto_profiler.context['forward_step_time'] = auto_profiler.get_average('forward_step_times')

            del start_event
            del end_event
        else:
            output_tensor = fn(*args, **kwargs)
        return output_tensor

    return wrapper


def backward_step_decorator(fn):
    @wraps(fn)
    def wrapper(*args, **kwargs):
        from mindspeed_rl.trainer.auto_parallel.profiler import AutoProfiler
        worker_name = AutoProfiler.current_profiling_worker

        if worker_name is not None:
            auto_profiler: AutoProfiler = AutoProfiler.get_instance(worker_name)

            start_event, end_event = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
            torch.cuda.synchronize()
            memory_allocated = torch.cuda.memory_allocated()
            torch.cuda.reset_max_memory_allocated()
            
            start_event.record()
            input_tensor_grad = fn(*args, **kwargs)
            end_event.record()

            torch.cuda.synchronize()
            backward_step_time = start_event.elapsed_time(end_event)
            act_memory = (torch.cuda.max_memory_allocated() - memory_allocated) / auto_profiler.unit_gb

            if 'backward_step_times' not in auto_profiler.context.keys():
                auto_profiler.context['backward_step_times'] = []

            auto_profiler.context['backward_step_mem'] = act_memory
            auto_profiler.context['backward_step_times'].append(backward_step_time)
            auto_profiler.context['backward_step_time'] = auto_profiler.get_average('backward_step_times')

            del start_event
            del end_event
        else:
            input_tensor_grad = fn(*args, **kwargs)
        return input_tensor_grad

    return wrapper


def execute_adaption(rl_config):
    from mindspeed_llm.tasks.megatron_adaptor import MegatronAdaptation
    MegatronAdaptation.register('megatron.core.pipeline_parallel.schedules.forward_step',
                                forward_step_decorator)
    MegatronAdaptation.register('megatron.core.pipeline_parallel.schedules.backward_step',
                                backward_step_decorator)
    MegatronAdaptation.apply()