# Copyright (c) 2025, HUAWEI CORPORATION. All rights reserved.
import copy

from omegaconf import OmegaConf

from mindspeed_rl.trainer.auto_parallel.system_config import SystemConfig
from mindspeed_rl.trainer.auto_parallel.launch import TaskType


class YamlConfig:
    def __init__(self, config):
        self.yaml_config = copy.deepcopy(config)
        self.model = config.get('megatron_training').get('model')
        self.update_else_config()

    def get_yaml_config(self):
        return self.yaml_config

    def update_generate_config(self, config, target_task):
        temp_config = OmegaConf.to_container(self.yaml_config, resolve=True)
        temp_config['generate_config']['sampling_config']['max_tokens'] = 128
        temp_config['generate_config']['infer_tensor_parallel_size'] = config.tensor_model_parallel_size
        temp_config['generate_config']['infer_pipeline_parallel_size'] = config.pipeline_model_parallel_size
        temp_config['rl_config']['actor_rollout_dispatch_size'] = config.micro_batch_size
        if target_task == TaskType.GENERATION:
            if 'auto_parallel' not in temp_config['rl_config'].keys():
                temp_config['rl_config']['auto_parallel'] = {}
            temp_config['rl_config']['auto_parallel']['launching_task_name'] = TaskType.GENERATION.value
            temp_config['rl_config']['auto_parallel']['enabled'] = False
            temp_config['rl_config']['auto_parallel']['gen_profile_path'] = SystemConfig.generate_profile_path_fmt.format(config)

        self.yaml_config = OmegaConf.create(temp_config)

    def update_ref_config(self, config, target_task: TaskType):
        if config.pipeline_model_parallel_size > 1:
            raise AssertionError('Must prune pp and vpp dim')
        temp_config = OmegaConf.to_container(self.yaml_config, resolve=True)
        temp_config['actor_config']['tensor_model_parallel_size'] = config.tensor_model_parallel_size
        temp_config['actor_config']['pipeline_model_parallel_size'] = config.pipeline_model_parallel_size
        temp_config['actor_config']['context_parallel_size'] = config.context_parallel_size
        temp_config['rl_config']['ref_forward_micro_batch_size'] = config.micro_batch_size
        
        if target_task != TaskType.GENERATION:
            temp_config['model'][self.model]['num_layers'] = config.pipeline_model_parallel_size * 2
            if 'num_layer_list' in temp_config['actor_config'].keys():
                del temp_config['actor_config']['num_layer_list']
            if 'auto_parallel' not in temp_config['rl_config'].keys():
                temp_config['rl_config']['auto_parallel'] = {}
            temp_config['rl_config']['auto_parallel']['launching_task_name'] = target_task.value
            temp_config['rl_config']['auto_parallel']['enabled'] = False
            temp_config['rl_config']['auto_parallel']['transfer_dock_path'] = SystemConfig.transfer_dock_path
            temp_config['rl_config']['auto_parallel']['ref_profile_path'] = SystemConfig.ref_profile_path_fmt.format(config)

        self.yaml_config = OmegaConf.create(temp_config)

    def update_actor_config(self, config, target_task: TaskType):
        ref_fwd_mbs = self.yaml_config.rl_config.ref_forward_micro_batch_size
        if target_task != TaskType.GENERATION:
            ref_profile_path = self.yaml_config.rl_config.auto_parallel.ref_profile_path

        self.update_ref_config(config, target_task)
        temp_config = OmegaConf.to_container(self.yaml_config, resolve=True)
        temp_config['rl_config']['ref_forward_micro_batch_size'] = ref_fwd_mbs
        temp_config['megatron_training']['micro_batch_size'] = config.micro_batch_size
        if target_task != TaskType.GENERATION:
            temp_config['rl_config']['auto_parallel']['ref_profile_path'] = ref_profile_path
            temp_config['rl_config']['auto_parallel']['profile_path'] = SystemConfig.actor_profile_path_fmt.format(config)
        
        self.yaml_config = OmegaConf.create(temp_config)

    def update_else_config(self):
        temp_config = OmegaConf.to_container(self.yaml_config, resolve=True)
        temp_config['megatron_training']['train_iters'] = 1
        self.yaml_config = OmegaConf.create(temp_config)

            
            
        
