# Copyright (c) 2025, HUAWEI CORPORATION. All rights reserved.
import os
import re
import copy
import json
from pathlib import Path
from abc import ABC, abstractmethod

import numpy as np
from omegaconf import OmegaConf

from mindspeed_rl.utils.loggers import Loggers
from mindspeed_rl.trainer.auto_parallel.system_config import SystemConfig
from mindspeed_rl.trainer.auto_parallel.launch import TaskType, Launch
from mindspeed_rl.trainer.auto_parallel.yaml_config import YamlConfig
from mindspeed_rl.trainer.auto_parallel.search_space import (
    GenerationSearchSpace, ActorSearchSpace, RefSearchSpace, ParallelConfig
)
from mindspeed_rl.trainer.auto_parallel.cost_model import (
    Sheet, GenerateCostModel, ActorCostModel, ReferenceCostModel, CostModel
)


logger = Loggers("search")


class BaseSearch(ABC):
    def __init__(self, config):
        self.config = config
        self.search_results = {}
        self.max_available_memory = 60.0 * SystemConfig.unit_gb # Bytes
        self.world_size = SystemConfig.rl_config.actor_resource.num_npus
        self.generate_available_memory = self.max_available_memory * SystemConfig.generate_config.gpu_memory_utilization

    def build_search_space(self, class_name, world_size, task_type):
        search_space = class_name(self.config, world_size).build_search_space()
        result = []
        for parallel_config in search_space:
            cost_model = CostModel(self.config, parallel_config, task_type, SystemConfig.model_profile_path)
            params, gradient, optimizer = cost_model.compute_static_memory()
            logger.debug(f"build_search_space config: {parallel_config}\n"
                        f"params: {params / SystemConfig.unit_gb:.2f}GB\n"
                        f"gradient: {gradient / SystemConfig.unit_gb:.2f}GB\n"
                        f"optimizer: {optimizer / SystemConfig.unit_gb:.2f}GB")
            
            if task_type == TaskType.GENERATION and params < self.generate_available_memory:
                result.append(parallel_config)
            if task_type == TaskType.REFERENCE and params < self.max_available_memory:
                result.append(parallel_config)
            if task_type == TaskType.UPDATE and (params + gradient + optimizer) < self.max_available_memory:
                result.append(parallel_config)
        
        logger.info(f"build_search_space result:")
        for config in result:
            logger.info(config)
        return result
    
    def get_optimal_config(self, sheet: Sheet, task_type: TaskType):
        def parse_config_str(config_str):
            parts = config_str.split('_')
            result = {}

            for part in parts:
                match = re.match(r'^([a-zA-Z]+)(\d+)$', part)
                if not match:
                    match = re.match(r'^([a-zA-Z]+[A-Z][a-zA-Z]*)(\d+)$', part)
                
                if match:
                    key = match.group(1)
                    value = int(match.group(2))
                    result[key] = value
            return result

        optimal = sheet.get_minimum_e2etime_row()
        optimal_config_str = optimal['config']
        optimal_config_dict = parse_config_str(optimal_config_str)

        if task_type == TaskType.GENERATION:
            result = {
                'pipeline_model_parallel_size': optimal_config_dict['pp'],
                'tensor_model_parallel_size': optimal_config_dict['tp'],
                'rl_config/actor_rollout_dispatch_size': optimal_config_dict['mbs']
            }
        else:
            result = {
                "pipeline_model_parallel_size": optimal_config_dict['pp'],
                'tensor_model_parallel_size': optimal_config_dict['tp'],
                'virtual_pipeline_model_parallel_size': optimal_config_dict['vpp'],
                'context_parallel_size': optimal_config_dict['cp']
            }

            if task_type == TaskType.REFERENCE:
                result['rl_config/ref_forward_micro_batch_size'] = optimal_config_dict['mbs']
            if task_type == TaskType.UPDATE:
                result['megatron_training/micro_batch_size'] = optimal_config_dict['mbs']

        return result

    def collect_transfer_docker(self, use_cache=True):
        if use_cache and Path(SystemConfig.model_profile_path).exists():
            return

        temp_config = copy.deepcopy(self.config)
        temp_config.megatron_training.train_iters = 1

        auto_parallel_config = OmegaConf.to_container(temp_config.rl_config.auto_parallel, resolve=True)
        auto_parallel_config['launching_task_name'] = TaskType.GENERATION.value
        auto_parallel_config['enabled'] = False
        auto_parallel_config['transfer_dock_path'] = SystemConfig.transfer_dock_path
        auto_parallel_config['serialize_transfer_dock'] = True
        auto_parallel_config['grpo_profile_path'] = SystemConfig.model_profile_path

        temp_config.rl_config.auto_parallel = OmegaConf.create(auto_parallel_config)
        Launch.launch(temp_config, TaskType.GENERATION)

    def is_available(self, infer_config, train_config):
        if infer_config.tensor_model_parallel_size > train_config.tensor_model_parallel_size:
            if train_config.data_parallel_size * train_config.tensor_model_parallel_size < infer_config.tensor_model_parallel_size \
                or train_config.data_parallel_size * train_config.tensor_model_parallel_size % infer_config.tensor_model_parallel_size != 0:
                return False
            return True
        else:
            if train_config.tensor_model_parallel_size % infer_config.tensor_model_parallel_size != 0:
                return False
            return True

    def search_on_slave(self):
        while True:
            task_type = Launch.broadcast()
            if task_type == TaskType.EXIT.value:
                break
            Launch.launch_on_slave()

    @abstractmethod
    def search(self):
        return NotImplemented


class IntegratedSearch(BaseSearch):
    """全共卡模式下的并行配置搜索"""

    def __init__(self, config):
        super().__init__(config)
        self.optimal_configs = dict()

    def find_avail_generate_config(self, generate_search_space, config):
        enabled_generate_config = None
        for tmp_config in generate_search_space:
            if self.is_available(tmp_config, config):
                enabled_generate_config = tmp_config
                break
        return enabled_generate_config
    
    def search_generate(self, train_search_space, target_search_space):
        logger.info("Start search generate_config:")
        model_result: Sheet = Sheet(self.config, TaskType.GENERATION.value)
        for config in target_search_space:
            logger.info(config)
            # find enabled train_config
            enabled_actor_config = None
            for act_config in train_search_space:
                if self.is_available(config, act_config):
                    enabled_actor_config = act_config
                    break
            
            if not enabled_actor_config:
                model_result.update(config, np.inf, np.inf)
                continue

            if not Path(SystemConfig.generate_profile_path_fmt.format(config)).exists():
                yaml_config = YamlConfig(self.config)
                yaml_config.update_generate_config(config, TaskType.GENERATION)
                yaml_config.update_ref_config(enabled_actor_config.slice_mbs(), TaskType.GENERATION)
                yaml_config.update_actor_config(enabled_actor_config.slice_mbs(), TaskType.GENERATION)
                Launch.launch(yaml_config.get_yaml_config(), TaskType.GENERATION)

            cost_model = GenerateCostModel(self.config, config, TaskType.GENERATION, SystemConfig.model_profile_path)
            model_result.update(
                config, 
                cost_model.get_memory(), 
                cost_model.get_time([SystemConfig.generate_profile_path_fmt.format(config)])
            )
        return self.get_optimal_config(model_result, TaskType.GENERATION)


    def search_ref(self, generate_search_space, target_search_space):
        logger.info("Start search reference_config:")
        sliced_search_space = set([config.slice_dims(self.world_size, True) for config in target_search_space])
        logger.info(f"sliced_search_space of Reference: len={len(sliced_search_space)}")
        for config in sliced_search_space:
            logger.info(config)
            enabled_generate_config = self.find_avail_generate_config(generate_search_space, config)
            if not enabled_generate_config:
                continue

            yaml_config = YamlConfig(self.config)
            yaml_config.update_generate_config(enabled_generate_config, TaskType.REFERENCE)
            yaml_config.update_ref_config(config, TaskType.REFERENCE)
            yaml_config.update_actor_config(config, TaskType.REFERENCE)

            if not Path(yaml_config.get_yaml_config().rl_config.auto_parallel.ref_profile_path).exists():
                Launch.launch(yaml_config.get_yaml_config(), TaskType.REFERENCE)

        ref_model_result: Sheet = Sheet(self.config, 'Reference')
        # 对搜索空间中的配置进行峰值内存建模
        for config in target_search_space:
            sliced_config = config.slice_dims(self.world_size, with_mbs=True)
            if not Path(SystemConfig.ref_profile_path_fmt.format(sliced_config)).exists():
                ref_model_result.update(config, np.inf, np.inf)
                continue

            cost_model = ReferenceCostModel(self.config, config, SystemConfig.model_profile_path)
            if cost_model.get_memory() > self.max_available_memory:
                ref_model_result.update(config, np.inf, np.inf)
                continue

            sliced_config = config.slice_dims(self.world_size)
            if not Path(SystemConfig.ref_profile_path_fmt.format(sliced_config)).exists():
                enabled_generate_config = self.find_avail_generate_config(generate_search_space, config)
                if not enabled_generate_config:
                    ref_model_result.update(config, cost_model.get_memory(), np.inf)
                    continue

                yaml_config = YamlConfig(self.config)
                yaml_config.update_generate_config(enabled_generate_config, TaskType.REFERENCE)
                yaml_config.update_ref_config(sliced_config, TaskType.REFERENCE)
                yaml_config.update_actor_config(sliced_config.slice_mbs(), TaskType.REFERENCE)
                Launch.launch(yaml_config.get_yaml_config(), TaskType.REFERENCE)

            ref_model_result.update(
                config, 
                cost_model.get_memory() / SystemConfig.unit_gb, 
                cost_model.get_time([SystemConfig.ref_profile_path_fmt.format(sliced_config)])
            )
        return self.get_optimal_config(ref_model_result, TaskType.REFERENCE)
    
    def search_update(self, generate_search_space, target_search_space):
        logger.info("Start search actor_update_config:")
        model_result: Sheet = Sheet(self.config, TaskType.UPDATE.value)
        for config in target_search_space:
            logger.info(config)
            # 获取建模数据
            sliced_config = config.slice_dims(self.world_size, with_mbs=True)
            if not Path(SystemConfig.actor_profile_path_fmt.format(sliced_config)).exists():
                generate_config = self.find_avail_generate_config(generate_search_space, sliced_config)
                if generate_config:
                    yaml_config = YamlConfig(self.config)
                    yaml_config.update_generate_config(generate_config, TaskType.UPDATE)
                    yaml_config.update_ref_config(sliced_config, TaskType.UPDATE)
                    yaml_config.update_actor_config(sliced_config, TaskType.UPDATE)
                    Launch.launch(yaml_config.get_yaml_config(), TaskType.UPDATE)
            
            # 内存建模
            cost_model = ActorCostModel(self.config, config, SystemConfig.model_profile_path)
            if cost_model.get_memory() > (self.max_available_memory * 1.2):
                model_result.update(config, np.inf, np.inf)
                continue

            # 性能建模
            sliced_config = config.slice_dims(self.world_size)
            if not Path(SystemConfig.actor_profile_path_fmt.format(sliced_config)).exists():
                generate_config = self.find_avail_generate_config(generate_search_space, sliced_config)
                if generate_config:
                    yaml_config = YamlConfig(self.config)
                    yaml_config.update_generate_config(generate_config, TaskType.UPDATE)
                    yaml_config.update_ref_config(sliced_config, TaskType.UPDATE)
                    yaml_config.update_actor_config(sliced_config, TaskType.UPDATE)
                    Launch.launch(yaml_config.get_yaml_config(), TaskType.UPDATE)

            model_result.update(
                config,
                cost_model.get_memory() / SystemConfig.unit_gb,
                cost_model.get_time([SystemConfig.actor_profile_path_fmt.format(sliced_config)])
            )
        return self.get_optimal_config(model_result, TaskType.UPDATE)

    def search(self):
        if SystemConfig.node_rank != 0:
            self.search_on_slave()
            return

        self.collect_transfer_docker()

        # 构建多任务搜索空间
        generate_search_space = self.build_search_space(GenerationSearchSpace, self.world_size, TaskType.GENERATION)
        ref_search_space = self.build_search_space(RefSearchSpace, self.world_size, TaskType.REFERENCE)
        actor_search_space = self.build_search_space(ActorSearchSpace, self.world_size, TaskType.UPDATE)
        # genearte
        generate_optimal_config = self.search_generate(actor_search_space, generate_search_space)
        self.search_results['optimal_generate_config'] = generate_optimal_config
        # reference_fwd
        ref_optimal_config = self.search_ref(generate_search_space, ref_search_space)
        self.search_results['optimal_ref_config'] = ref_optimal_config
        # actor_update
        actor_optimal_config = self.search_update(generate_search_space, actor_search_space)
        self.search_results['optimal_actor_config'] = actor_optimal_config
        
        logger.info(f"search_result: \n{json.dumps(self.search_results, indent=4)}")


class SeparateSearch(IntegratedSearch):
    """分离场景下的多任务并行配置搜索"""

    def build_search_space(self, world_size):
        """
        构建分离模式下的资源和并行配置组成的搜索空间
        key: 资源分配策略，如(4, 2, 2, 8)表示generate: 4npus, ref: 2npus, actor_fwd: 2npus, update: 8npus
        val:
            - str: task
            - value: search_space
        """

        # 构建资源分配搜索空间
        def decompose_world_size(N):
            if N != 1 and (N & (N - 1)) != 0:
                raise ValueError(f"world_size: {N} is not pow of 2")

            max_exp = N.bit_length()
            powers_list = sorted([(1 << i) for i in range(max_exp) if (1 << i) <= N])

            combinations = []
            for b in powers_list:
                if 2 * b > N:
                    break
                remainder = N - 2 * b
                for a in powers_list:
                    if a > remainder:
                        break
                    c = remainder - a
                    if c in powers_list:
                        combinations.append((a, b, b, c))
            return combinations

        search_space = {}
        resource_alloc_strategy = decompose_world_size(world_size)
        # 针对每一组资源分配策略构建多任务的并行配置搜索空间
        for _, resources in enumerate(resource_alloc_strategy):
            generate_npus, ref_fwd_npus, actor_fwd_npus, actor_npus = resources
            generate_search_space = super().build_search_space(GenerationSearchSpace, generate_npus, TaskType.GENERATION)
            ref_search_space = super().build_search_space(RefSearchSpace, ref_fwd_npus, TaskType.REFERENCE)
            actor_search_space = super().build_search_space(ActorSearchSpace, actor_npus, TaskType.UPDATE)
            if len(generate_search_space) != 0 and len(ref_search_space) != 0 and len(actor_search_space) != 0:
                search_space[resources] = {
                TaskType.GENERATION.value: generate_search_space,
                TaskType.REFERENCE.value: ref_search_space,
                TaskType.UPDATE.value: actor_search_space
            }
        
        return search_space

    def resource_allocation_descrption(self, alloc_config):
        return 'Generate-{}npus_RefereceFwd-{}npus_ActorFwd-{}npus_Update-{}npus'.format(*alloc_config)

    def get_save_path(self, task, resource_allocation):
        dir_path = SystemConfig.rl_config.auto_parallel.work_dir + os.sep + resource_allocation
        if not Path(dir_path).exists():
            Path(dir_path).mkdir(parents=True, exist_ok=True)
        return dir_path + os.sep + '{}_model_results.csv'.format(task)

    def search_generate(self, search_space, resource_allocation):
        model_result: Sheet = Sheet(self.config, TaskType.GENERATION.value)
        model_result.set_path(
            self.get_save_path(TaskType.GENERATION.value, self.resource_allocation_descrption(resource_allocation))
        )
        for config in search_space:
            if not Path(SystemConfig.generate_profile_path_fmt.format(config)).exists():
                yaml_config = YamlConfig(self.config)
                yaml_config.update_generate_config(config, TaskType.GENERATION)
                yaml_config.update_ref_config(config.slice_mbs(), TaskType.GENERATION)
                yaml_config.update_actor_config(config.slice_mbs(), TaskType.GENERATION)
                yaml_config.update_actor_resource(resource_allocation[0])
                Launch.launch(yaml_config.get_yaml_config(), TaskType.GENERATION)
            cost_model = GenerateCostModel(self.config, config, TaskType.GENERATION, SystemConfig.model_profile_path)
            model_result.update(
                config,
                cost_model.get_memory(),
                cost_model.get_time([SystemConfig.generate_profile_path_fmt.format(config)])
            )
        return self.get_optimal_config(model_result, TaskType.GENERATION), model_result.get_minimum_e2etime_row()['E2Etime']
    
    def search_ref(self, search_space, resource_allocation):
        model_result: Sheet = Sheet(self.config, TaskType.REFERENCE.value)
        model_result.set_path(
            self.get_save_path(TaskType.REFERENCE.value, self.resource_allocation_descrption(resource_allocation))
        )

        oom_configs = set()
        for config in search_space:
            logger.info(f"modeling reference config: {config}")
            sliced_config = config.slice_dims(self.world_size, with_mbs=True, with_dp=True)
            if sliced_config in oom_configs:
                model_result.update(config, np.inf, np.inf)
                continue

            if not Path(SystemConfig.ref_profile_path_fmt.format(sliced_config)).exists():
                yaml_config = YamlConfig(self.config)
                yaml_config.update_generate_config(sliced_config, TaskType.REFERENCE)
                yaml_config.update_ref_config(sliced_config, TaskType.REFERENCE)
                yaml_config.update_actor_config(sliced_config, TaskType.REFERENCE)
                yaml_config.update_actor_resource(resource_allocation[1])
                Launch.launch(yaml_config.get_yaml_config(), TaskType.REFERENCE)
            
            if not Path(SystemConfig.ref_profile_path_fmt.format(sliced_config)).exists():
                oom_configs.add(sliced_config)
                model_result.update(config, np.inf, np.inf)
                continue

            cost_model = ReferenceCostModel(self.config, config, SystemConfig.model_profile_path)
            # 内存建模
            if cost_model.get_memory() > self.max_available_memory:
                model_result.update(config, np.inf, np.inf)
                continue

            # 性能建模
            sliced_config = config.slice_dims(self.world_size, with_mbs=False, with_dp=True)
            path = SystemConfig.ref_profile_path_fmt.format(sliced_config)
            if not Path(path).exists():
                yaml_config = YamlConfig(self.config)
                yaml_config.update_generate_config(sliced_config, TaskType.REFERENCE)
                yaml_config.update_ref_config(sliced_config, TaskType.REFERENCE)
                yaml_config.update_actor_config(sliced_config, TaskType.REFERENCE)
                yaml_config.update_actor_resource(resource_allocation[1])
                Launch.launch(yaml_config.get_yaml_config(), TaskType.REFERENCE)

            model_result.update(config, cost_model.get_memory() / SystemConfig.unit_gb, cost_model.get_time([path]))

        return self.get_optimal_config(model_result, TaskType.REFERENCE), model_result.get_minimum_e2etime_row()['E2Etime']

    def search_update(self, search_space, resource_allocation):
        model_result: Sheet = Sheet(self.config, TaskType.UPDATE.value)
        model_result.set_path(
            self.get_save_path(TaskType.UPDATE.value, self.resource_allocation_descrption(resource_allocation))
        )

        oom_configs = set()
        for config in search_space:
            logger.info(f"modeling update config: {config}")
            sliced_config = config.slice_dims(self.world_size, with_mbs=True, with_dp=True)
            if sliced_config in oom_configs:
                model_result.update(config, np.inf, np.inf)
                continue

            if not Path(SystemConfig.actor_profile_path_fmt.format(sliced_config)).exists():
                yaml_config = YamlConfig(self.config)
                yaml_config.update_generate_config(sliced_config, TaskType.UPDATE)
                yaml_config.update_ref_config(sliced_config, TaskType.UPDATE)
                yaml_config.update_actor_config(sliced_config, TaskType.UPDATE)
                yaml_config.update_actor_resource(resource_allocation[3])
                Launch.launch(yaml_config.get_yaml_config(), TaskType.UPDATE)

            if not Path(SystemConfig.actor_profile_path_fmt.format(sliced_config)).exists():
                oom_configs.add(sliced_config)
                model_result.update(config, np.inf, np.inf)
                continue

            cost_model = ActorCostModel(self.config, config, SystemConfig.model_profile_path)
            if cost_model.get_memory() > self.max_available_memory:
                model_result.update(config, np.inf, np.inf)
                continue

            sliced_config = config.slice_dims(self.world_size, with_mbs=False, with_dp=True)
            path = SystemConfig.actor_profile_path_fmt.format(sliced_config)
            if not Path(path).exists():
                yaml_config = YamlConfig(self.config)
                yaml_config.update_generate_config(sliced_config, TaskType.UPDATE)
                yaml_config.update_ref_config(sliced_config, TaskType.UPDATE)
                yaml_config.update_actor_config(sliced_config, TaskType.UPDATE)
                yaml_config.update_actor_resource(resource_allocation[3])
                Launch.launch(yaml_config.get_yaml_config(), TaskType.UPDATE)

            model_result.update(config, cost_model.get_memory() / SystemConfig.unit_gb, cost_model.get_time([path]))

        return self.get_optimal_config(model_result, TaskType.UPDATE), model_result.get_minimum_e2etime_row()['E2Etime']

    def search(self):
        if SystemConfig.node_rank != 0:
            self.search_on_slave()
            return
        
        self.collect_transfer_docker()

        world_size = SystemConfig.rl_config.auto_parallel.world_size
        search_space = self.build_search_space(world_size)

        search_result = {}
        current_optimal_perf = float('inf')
        for resource_alloc, search_space in search_space.items():
            generate_search_space = search_space[TaskType.GENERATION.value]
            ref_search_space = search_space[TaskType.REFERENCE.value]
            update_search_space = search_space[TaskType.UPDATE.value]

            generate_config, generate_time = self.search_generate(generate_search_space, resource_alloc)
            ref_config, ref_time = self.search_ref(ref_search_space, resource_alloc)
            update_config, update_time = self.search_update(update_search_space, resource_alloc)

            if current_optimal_perf > (generate_time + ref_time + update_time):
                current_optimal_perf = generate_time + ref_time + update_time
                search_result['optimal_resource_alloc'] = self.resource_allocation_descrption(resource_alloc)
                search_result['optimal_generate_config'] = generate_config
                search_result['optimal_ref_config'] = ref_config
                search_result['optimal_update_config'] = update_config

        logger.info(f"search_result: \n{json.dumps(search_result, indent=4)}")


def get_search_method(rl_config, source_config) -> BaseSearch:
    if rl_config.auto_parallel.mode == 'integrated':
        return IntegratedSearch(source_config)
    if rl_config.auto_parallel.mode == 'separated':
        return SeparateSearch(source_config)
    raise ValueError(f"auto_parallel can not support {rl_config.auto_parallel.mode}")