# Copyright (c) 2025, HUAWEI CORPORATION. All rights reserved.
import copy
from abc import ABC, abstractmethod
from dataclasses import dataclass

from mindspeed_rl.workers.scheduler.launcher import get_device_information
from mindspeed_rl.trainer.auto_parallel.system_config import SystemConfig


@dataclass(frozen=True, eq=True)
class ParallelConfig:
    pipeline_model_parallel_size: int
    tensor_model_parallel_size: int
    data_parallel_size: int
    virtual_pipeline_model_parallel_size: int
    context_parallel_size: int
    micro_batch_size: int

    def __repr__(self):
        return "pp{}_tp{}_dp{}_vpp{}_cp{}_mbs{}".format(
            self.pipeline_model_parallel_size,
            self.tensor_model_parallel_size,
            self.data_parallel_size,
            self.virtual_pipeline_model_parallel_size,
            self.context_parallel_size,
            self.micro_batch_size
        )
    
    def slice_dims(self, world_size, with_mbs=False):
        new_dp = world_size // (self.tensor_model_parallel_size * self.context_parallel_size)
        return ParallelConfig(
            pipeline_model_parallel_size=1,
            tensor_model_parallel_size=self.tensor_model_parallel_size,
            data_parallel_size=new_dp,
            virtual_pipeline_model_parallel_size=1,
            context_parallel_size=self.context_parallel_size,
            micro_batch_size=1 if with_mbs else self.micro_batch_size
        )
    
    def slice_mbs(self):
        return ParallelConfig(
            pipeline_model_parallel_size=self.pipeline_model_parallel_size,
            tensor_model_parallel_size=self.tensor_model_parallel_size,
            data_parallel_size=self.data_parallel_size,
            virtual_pipeline_model_parallel_size=self.virtual_pipeline_model_parallel_size,
            context_parallel_size=self.context_parallel_size,
            micro_batch_size=1
        )
    
    @property
    def splited_seq_len(self):
        seq_len = int(SystemConfig.actor_config.seq_length) + int(SystemConfig.generate_config.sampling_config.max_tokens)
        return seq_len // self.context_parallel_size
    

class SearchSpace(ABC):
    def __init__(self, config, world_size):
        self.config = config
        self.world_size = world_size
        num_devices_per_node, num_nodes = get_device_information(self.world_size)
        self.num_devices_per_node = num_devices_per_node
        self.num_nodes = num_nodes

    @abstractmethod
    def build_search_space(self):
        return NotImplemented
    
    def build_basic_search_space(self):
        """
        构造基础搜索空间，包括pp/tp/dp/vpp/up维度
        """
        actor_config = SystemConfig.actor_config

        search_space = []
        for pp in range(1, self.world_size + 1):
            if self.world_size % pp != 0 or actor_config.num_layers % pp != 0:
                continue

            for i in range(self.num_devices_per_node):
                tp = 2 ** i
                if tp > self.num_devices_per_node or tp > (self.world_size // pp):
                    break
                if (actor_config.group_query_attention and actor_config.num_query_groups % tp != 0) \
                    or (actor_config.num_attention_heads % tp != 0):
                    continue

                # 当前长序列仅支持Ulysses
                max_up = self.world_size // pp // tp
                for up in range(1, max_up + 1):
                    if self.world_size % (pp * tp * up) != 0 or actor_config.num_attention_heads % (tp * up) != 0:
                        continue

                    dp = self.world_size // pp // tp // up
                    if actor_config.global_batch_size % dp != 0:
                        continue
                    vpp = 1
                    search_space.append([pp, tp, dp, up, vpp])

        temp_search_space = copy.deepcopy(search_space)
        for (pp, tp, dp, up, _) in temp_search_space:
            num_layers_pp = actor_config.num_layers // pp
            for vpp in range(2, num_layers_pp + 1):
                if pp > 2 and num_layers_pp % vpp == 0:
                    search_space.append([pp, tp, dp, up, vpp])

        return search_space


class GenerationSearchSpace(SearchSpace):
    def build_search_space(self):
        """构造generate task的搜索空间"""
        search_space = []
        global_batch_size = SystemConfig.actor_config.global_batch_size * SystemConfig.rl_config.n_samples_per_prompt
        for (pp, tp, dp, up, vpp) in self.build_basic_search_space():
            mbs = global_batch_size // dp
            search_space.append(ParallelConfig(
                pipeline_model_parallel_size=pp,
                tensor_model_parallel_size=tp,
                data_parallel_size=dp,
                context_parallel_size=up,
                virtual_pipeline_model_parallel_size=vpp,
                micro_batch_size=mbs
            ))
        
        final_search_spaces = []
        for config in search_space:
            cond = [
                config.pipeline_model_parallel_size == 1,
                config.virtual_pipeline_model_parallel_size == 1,
                config.context_parallel_size == 1
            ]
            if all(cond):
                final_search_spaces.append(config)

        return final_search_spaces
    

class RefSearchSpace(SearchSpace):
    def build_search_space(self):
        """构造Reference fwd的搜索空间"""
        search_space = []
        global_batch_size = SystemConfig.actor_config.global_batch_size * SystemConfig.rl_config.n_samples_per_prompt
        for (pp, tp, dp, up, vpp) in self.build_basic_search_space():
            max_mbs = global_batch_size // dp
            for mbs in range(1, max_mbs + 1):
                num_microbatch, remainder = divmod(global_batch_size // dp, mbs)
                if (remainder != 0) or (vpp > 1 and num_microbatch % pp != 0):
                    continue
                search_space.append(ParallelConfig(
                    pipeline_model_parallel_size=pp,
                    tensor_model_parallel_size=tp,
                    data_parallel_size=dp,
                    virtual_pipeline_model_parallel_size=vpp,
                    context_parallel_size=up,
                    micro_batch_size=mbs
                ))
        
        final_search_space = []
        for config in search_space:
            cond = [
                config.splited_seq_len >= 8 * 8192 if config.context_parallel_size > 1 else True,
            ]
            if all(cond):
                final_search_space.append(config)
        return final_search_space
    

class ActorSearchSpace(SearchSpace):
    def build_search_space(self):
        """构造Actor update的搜索空间"""
        search_space = []
        global_batch_size = SystemConfig.actor_config.global_batch_size * SystemConfig.rl_config.n_samples_per_prompt
        for (pp, tp, dp, up, vpp) in self.build_basic_search_space():
            max_mbs = global_batch_size // dp
            for mbs in range(1, max_mbs + 1):
                num_microbatch, remainder = divmod(global_batch_size // dp, mbs)
                if (remainder != 0) or (vpp > 1 and num_microbatch % pp != 0):
                    continue
                search_space.append(ParallelConfig(
                    pipeline_model_parallel_size=pp,
                    tensor_model_parallel_size=tp,
                    data_parallel_size=dp,
                    virtual_pipeline_model_parallel_size=vpp,
                    context_parallel_size=up,
                    micro_batch_size=mbs
                ))
        
        final_search_space = []
        for config in search_space:
            cond = [
                config.splited_seq_len >= 8 * 8192 if config.context_parallel_size > 1 else True,
                config.micro_batch_size in (1, 2, 4)
            ]
            if all(cond):
                final_search_space.append(config)
        return final_search_space