# Copyright (c) 2025, HUAWEI CORPORATION. All rights reserved.
import json
import re

import torch

from mindspeed_rl.utils.loggers import Loggers
from mindspeed_rl.trainer.auto_parallel.system_config import SystemConfig


logger = Loggers("profiler")


def export_profiling(actor_worker, kwargs):
    def export(file_path, data):
        with open(file_path, 'w') as file:
            json.dump(data, file, indent=4)
    
    args = kwargs['auto_parallel']

    if hasattr(args, 'grpo_profile_path'):
        modules_info = actor_worker.get_module_params()
        export(args.grpo_profile_path, modules_info)

    from mindspeed_rl.trainer.auto_parallel.launch import TaskType
    if getattr(args, 'launching_task_name') == TaskType.REFERENCE.value and hasattr(args, 'ref_profile_path'):
        reference_info = actor_worker.get_profile_info(worker_name='Reference')
        export(args.ref_profile_path, reference_info)

    if getattr(args, 'launching_task_name') == TaskType.UPDATE.value and hasattr(args, 'profile_path'):
        update_info = actor_worker.get_profile_info(worker_name='Actor')
        export(args.profile_path, update_info)


class AutoProfiler:
    instances: dict = {}
    current_profiling_worker = ''

    def __init__(self, worker):
        self.context = {}
        self.handles = []
        self.worker = worker
        self.profile_modules = ('embedding', '0', '1', 'final_layernorm', 'output_layer')
        self.param_modules = ('embedding', 'self_attention', 'mlp', 'final_layernorm', 'output_layer')
        self.start_event = torch.cuda.Event(enable_timing=True)
        self.end_event = torch.cuda.Event(enable_timing=True)

    @classmethod
    def set_current_profiling_worker(cls, worker_name):
        cls.current_profiling_worker = worker_name

    @classmethod
    def get_instance(cls, worker):
        if worker in cls.instances.keys():
            return cls.instances[worker]
        cls.instances[worker] = AutoProfiler(worker)
        return cls.instances[worker]

    def set_save_path(self, save_path):
        self.save_path = save_path

    def get_average(self, key, sub_key=None):
        tlist = self.context[key]
        if sub_key:
            tlist = self.context[key][sub_key]
        avg = sum(tlist) / len(tlist)
        sub_list = []
        for t in tlist:
            if t < 2 * avg:
                sub_list.append(t)
        return sum(sub_list) / len(sub_list)

    def should_profiling(self):
        return True

    def forward_pre_hook(self, module_name):
        if module_name not in self.context.keys():
            self.context[module_name] = {
                'forward_times': []
            }

        def hook(module, *args, **kargs):
            if self.should_profiling():
                self.context[module_name]['memory'] = torch.cuda.memory_allocated()
                torch.cuda.reset_max_memory_allocated()
                self.start_event.record()
        return hook

    def forward_post_hook(self, module_name):
        def hook(module, *args, **kargs):
            if self.should_profiling():
                self.end_event.record()
                torch.cuda.synchronize()
                self.context[module_name]['forward_times'].append(self.start_event.elapsed_time(self.end_event))
                self.context[module_name]['time'] = self.get_average(module_name, 'forward_times')
                start_memory = self.context[module_name]['memory']
                self.context[module_name]['memory'] = (torch.cuda.max_memory_allocated() - start_memory) / SystemConfig.unit_gb
        return hook

    def register_recursive_hook(self, prefix_name, model, ctx):
        model = model[0] if isinstance(model, list) else model
        for name, module in model.named_children():
            next_name = prefix_name + "." + name if prefix_name != "" else name
            logger.info(f"{self.worker} hook next_name: {next_name}")
            match_ret = re.search(r'[^.]+$', next_name)
            if match_ret and match_ret.group(0) in self.profile_modules:
                self.handles.append(module.register_forward_pre_hook(self.forward_pre_hook(name)))
                self.handles.append(module.register_forward_hook(self.forward_post_hook(name)))
                continue
            self.register_recursive_hook(next_name, module, ctx)

    def get_module_params(self, prefix_name, model):
        model = model[0] if isinstance(model, list) else model
        for name, module in model.named_children():
            next_name = prefix_name + "." + name if prefix_name != "" else name
            match_ret = re.search(r'[^.]+$', next_name)
            if match_ret and match_ret.group(0) in self.param_modules:
                if 'module_params' not in self.context.keys():
                    self.context['module_params'] = dict()
                module_params = sum(p.numel() for p in module.parameters())
                self.context['module_params'][name] = module_params
                continue
            self.get_module_params(next_name, module)

    def get_current_parallel_config(self, actor_config, rl_config):
        if 'parallel_config' not in self.context.keys():
            self.context['parallel_config'] = dict()
        self.context['parallel_config']['pipeline_model_parallel_size'] = actor_config.pipeline_model_parallel_size or -1
        self.context['parallel_config']['tensor_model_parallel_size'] = actor_config.tensor_model_parallel_size or -1
        self.context['parallel_config']['expert_model_parallel_size'] = actor_config.expert_model_parallel_size or -1
        self.context['parallel_config']['micro_batch_size'] = actor_config.micro_batch_size or -1

    @staticmethod
    def register_hook(rl_config, model, model_name, profile_path):
        from mindspeed_rl.trainer.auto_parallel.patch.pipeline_schedulers import execute_adaption
        execute_adaption(rl_config)
        auto_profiler = AutoProfiler.get_instance(model_name)
        auto_profiler.set_save_path(profile_path)
        auto_profiler.register_recursive_hook("", model, auto_profiler.context)