# Copyright 2024 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

"""Megatron arguments."""

import dataclasses
import json
import os
import re
from types import SimpleNamespace
from typing import List
import yaml

import mindspore.common.dtype as mstype

from mindspeed_ms.tools import DictConfig, logger
from mindspeed_ms.training.unused_args import (
    UNUSED_MODEL_PARALLEL_CONFIG,
    UNUSED_TRANSFORMER_CONFIG,
    UNUSED_GLOBAL_ARGS
)
from mindspeed_ms.training.model_parallel_config import ModelParallelConfig
from mindspeed_ms.core.transformer.transformer_config import TransformerConfig, _SUPPORT_INIT_METHOD


str_dtype_to_ms = {
    "float32": mstype.float32,
    "float16": mstype.float16,
    "bfloat16": mstype.bfloat16
}

# Taken from https://stackoverflow.com/questions/65414773/parse-environment-variable-from-yaml-with-pyyaml
# Allows for yaml to use environment variables
env_pattern = re.compile(r".*?\${(.*?)}.*?")


def env_constructor(loader, node):
    value = loader.construct_scalar(node)
    for group in env_pattern.findall(value):
        assert os.environ.get(group) is not None, f"environment variable {group} in yaml not found"
        value = value.replace(f"${{{group}}}", os.environ.get(group))
    return value
yaml.add_implicit_resolver("!pathex", env_pattern)
yaml.add_constructor("!pathex", env_constructor)


def check_unused_args(config_name, args, unused_configs):
    for config in unused_configs:
        if hasattr(args, config):
            if config_name:
                logger.warning(f"Unused argument {config} is set, it would not work")
            else:
                logger.warning(f"Unused argument {config_name}.{config} is set, it would not work")


# pylint: disable=C0301, W0102, W0105
def validate_yaml(args, args_default, defaults={}):
    """validate yaml"""
    # Warn if unused args are set.
    check_unused_args("model_parallel", args.model_parallel, UNUSED_MODEL_PARALLEL_CONFIG)
    check_unused_args("language_model", args.language_model, UNUSED_TRANSFORMER_CONFIG)
    check_unused_args("", args, UNUSED_GLOBAL_ARGS)

    # Fill unspecified arguments.
    for f in dataclasses.fields(ModelParallelConfig):
        if not hasattr(args.model_parallel, f.name):
            setattr(args.model_parallel, f.name, f.default)

    for f in dataclasses.fields(TransformerConfig):
        if f.name not in ModelParallelConfig.__dataclass_fields__ and not hasattr(args.language_model, f.name):
            setattr(args.language_model, f.name, f.default)

    for key, value in args_default.__dict__.items():
        if key in TransformerConfig.__dataclass_fields__:
            continue
        if not hasattr(args, key):
            setattr(args, key, value)

    # This is for legacy script env var setting
    if isinstance(args.data_path, str):
        # If no white space its a single path
        split_data_path = args.data_path.split()
        if len(split_data_path) != 1:
            args.data_path = split_data_path

    # Tensor model parallel size.
    args.model_parallel.tensor_model_parallel_size = min(
        args.model_parallel.tensor_model_parallel_size, args.world_size)
    assert args.world_size % args.model_parallel.tensor_model_parallel_size == 0, 'world size'\
        ' ({}) is not divisible by tensor model parallel size ({})'.format(
            args.world_size, args.model_parallel.tensor_model_parallel_size)
    # Pipeline model parallel size.
    args.model_parallel.pipeline_model_parallel_size = min(
        args.model_parallel.pipeline_model_parallel_size,
        (args.world_size // args.model_parallel.tensor_model_parallel_size))
    args.model_parallel.transformer_pipeline_model_parallel_size = (
        args.model_parallel.pipeline_model_parallel_size - 1
        if args.standalone_embedding_stage else
        args.model_parallel.pipeline_model_parallel_size
    )

    # Checks.
    model_parallel_size = args.model_parallel.pipeline_model_parallel_size * \
        args.model_parallel.tensor_model_parallel_size
    assert args.world_size % (model_parallel_size * args.model_parallel.context_parallel_size) == 0, \
        'world size ({}) is not divisible by tensor parallel size ({}) times ' \
        'pipeline parallel size ({}) times context parallel size ({})'.format(
            args.world_size, args.model_parallel.tensor_model_parallel_size,
            args.model_parallel.pipeline_model_parallel_size, args.model_parallel.context_parallel_size)
    # data_parallel_size is not in model parallel config
    args.data_parallel_size = args.world_size // (
        model_parallel_size * args.model_parallel.context_parallel_size)

    if args.rank == 0:
        print('using world size: {}, data-parallel size: {}, '
              'context-parallel size: {} '
              'tensor-model-parallel size: {}, '
              'pipeline-model-parallel size: {} '.format(
                  args.world_size, args.data_parallel_size,
                  args.model_parallel.context_parallel_size,
                  args.model_parallel.tensor_model_parallel_size,
                  args.model_parallel.pipeline_model_parallel_size), flush=True)
    if args.model_parallel.pipeline_model_parallel_size > 1:
        if args.model_parallel.pipeline_model_parallel_split_rank is not None:
            assert args.model_parallel.pipeline_model_parallel_split_rank < \
                args.model_parallel.pipeline_model_parallel_size, 'split rank needs'\
                ' to be less than pipeline model parallel size ({})'.format(
                    args.model_parallel.pipeline_model_parallel_size)

    if args.model_parallel.tp_comm_overlap:
        assert args.model_parallel.sequence_parallel, 'Tensor parallel communication/GEMM overlap can happen only when sequence parallelism is enabled'

    # Set input defaults.
    for key in defaults:
        # For default to be valid, it should not be provided in the
        # arguments that are passed to the program. We check this by
        # ensuring the arg is set to None.
        if getattr(args, key, None) is not None:
            if args.rank == 0:
                print('WARNING: overriding default arguments for {key}:{v} \
                       with {key}:{v2}'.format(key=key, v=defaults[key],
                                               v2=getattr(args, key)),
                      flush=True)
        else:
            setattr(args, key, defaults[key])

    # Batch size.
    assert args.micro_batch_size is not None
    assert args.micro_batch_size > 0
    if args.global_batch_size is None:
        args.global_batch_size = args.micro_batch_size * args.data_parallel_size
        if args.rank == 0:
            print('setting global batch size to {}'.format(
                args.global_batch_size), flush=True)
    assert args.global_batch_size > 0

    if args.model_parallel.virtual_pipeline_model_parallel_size is None:
        # num_layers_per_virtual_pipeline_stage is not inside model parallel for checkpointing
        if args.num_layers_per_virtual_pipeline_stage is not None:
            # check num_layer_list, full_recompute_list and select_recompute_list is None
            assert args.model_parallel.num_layer_list is None, \
                "num_layer_list is not None when num_layers_per_virtual_pipeline_stage is specified"
            if hasattr(args.language_model, 'recompute_config') and args.language_model.recompute_config is not None:
                required_args = ['recompute', 'select_recompute', 'select_comm_recompute']
                for arg in required_args:
                    if hasattr(args.language_model.recompute_config, arg):
                        _check_arg_is_none(args.language_model.recompute_config, arg, \
                            f"num_layers_per_virtual_pipeline_stage is specified, recompute_config.{arg} is not None")

            assert args.model_parallel.pipeline_model_parallel_size > 2, \
                'pipeline-model-parallel size should be greater than 2 with ' \
                'interleaved schedule'
            assert args.language_model.num_layers % args.model_parallel.transformer_pipeline_model_parallel_size == 0, \
                'number of layers should be divisible by the pipeline parallel size'
            num_layers_per_pipeline_stage = args.language_model.num_layers // args.model_parallel.transformer_pipeline_model_parallel_size
            assert num_layers_per_pipeline_stage % args.num_layers_per_virtual_pipeline_stage == 0, \
                'number of layers per pipeline stage must be divisible number of layers per virtual pipeline stage'
            args.model_parallel.virtual_pipeline_model_parallel_size = num_layers_per_pipeline_stage // \
                args.num_layers_per_virtual_pipeline_stage
        else:
            args.model_parallel.virtual_pipeline_model_parallel_size = None
            # Overlap P2P communication is disabled if not using the interleaved schedule.
            args.model_parallel.overlap_p2p_comm = False
            if args.rank == 0:
                print('WARNING: Setting args.overlap_p2p_comm to False since non-interleaved '
                      'schedule does not support overlapping p2p communication')
    else:
        if args.model_parallel.overlap_p2p_comm:
            assert args.model_parallel.pipeline_model_parallel_size > 1, \
                'when interleaved schedule is used, pipeline-model-parallel size ' \
                'should be greater than 1'
        else:
            assert args.model_parallel.pipeline_model_parallel_size > 2, \
                'when interleaved schedule is used and p2p communication overlap is disabled, ' \
                'pipeline-model-parallel size should be greater than 2 to avoid having multiple ' \
                'p2p sends and recvs between same 2 ranks per communication batch'
        assert args.num_layers_per_virtual_pipeline_stage is None, \
            "num_layers_per_virtual_pipeline_stage must be None when virtual_pipeline_model_parallel_size is specified"
        # check noop_layers
        if args.model_parallel.noop_layers is not None:
            pp_split_num = args.model_parallel.virtual_pipeline_model_parallel_size * \
                args.model_parallel.transformer_pipeline_model_parallel_size
            assert args.language_model.num_layers % pp_split_num == 0, \
                f'The number of model layers is {args.language_model.num_layers}, ' \
                f'but using pipeline parallel required at least ' \
                f'pp({args.model_parallel.transformer_pipeline_model_parallel_size}) * ' \
                f'vpp({args.model_parallel.virtual_pipeline_model_parallel_size}) = ' \
                f'{pp_split_num} layers for splitting'

        _check_list_is_validate("num_layer_list", args.model_parallel.num_layer_list,
                                args.model_parallel.virtual_pipeline_model_parallel_size,
                                args.model_parallel.transformer_pipeline_model_parallel_size)
        if args.language_model.recompute_config is not None:
            required_args = ['recompute', 'select_recompute', 'select_comm_recompute']
            for arg in required_args:
                if hasattr(args.language_model.recompute_config, arg):
                    _check_list_is_validate(arg, getattr(args.language_model.recompute_config, arg),
                                            args.model_parallel.virtual_pipeline_model_parallel_size,
                                            args.model_parallel.transformer_pipeline_model_parallel_size)


    if args.overlap_param_gather:
        assert args.use_distributed_optimizer, \
            '--overlap-param-gather only supported with distributed optimizer'
        assert args.overlap_grad_reduce, \
            '--overlap-grad-reduce should be turned on when using --overlap-param-gather'

    # Parameters dtype.
    if args.model_parallel.fp16:
        assert not args.model_parallel.bf16
        args.model_parallel.params_dtype = mstype.half
        args.model_parallel.compute_dtype = mstype.half
    if args.model_parallel.bf16:
        assert not args.model_parallel.fp16
        args.model_parallel.params_dtype = mstype.bfloat16
        args.model_parallel.compute_dtype = mstype.bfloat16
        # bfloat16 requires gradient accumulation and all-reduce to
        # be done in fp32.
        if not args.accumulate_allreduce_grads_in_fp32:
            args.accumulate_allreduce_grads_in_fp32 = True
            if args.rank == 0:
                print('accumulate and all-reduce gradients in fp32 for '
                      'bfloat16 data type.', flush=True)

    if args.rank == 0:
        print('using {} for parameters ...'.format(args.model_parallel.params_dtype),
              flush=True)

    if args.dataloader_type is None:
        args.dataloader_type = 'single'

    # Consumed tokens.
    args.consumed_train_samples = 0
    args.consumed_valid_samples = 0

    # Iteration-based training.
    if args.train_iters:
        # If we use iteration-based training, make sure the
        # sample-based options are off.
        assert args.train_samples is None, \
            'expected iteration-based training'
        assert args.lr_decay_samples is None, \
            'expected iteration-based learning rate decay'
        assert args.lr_warmup_samples == 0, \
            'expected iteration-based learning rate warmup'
        assert args.rampup_batch_size is None, \
            'expected no batch-size rampup for iteration-based training'
        if args.lr_warmup_fraction is not None:
            assert args.lr_warmup_iters == 0, \
                'can only specify one of lr-warmup-fraction and lr-warmup-iters'

    # Sample-based training.
    if args.train_samples:
        # If we use sample-based training, make sure the
        # iteration-based options are off.
        assert args.train_iters is None, \
            'expected sample-based training'
        assert args.lr_decay_iters is None, \
            'expected sample-based learning rate decay'
        assert args.lr_warmup_iters == 0, \
            'expected sample-based learnig rate warmup'
        if args.lr_warmup_fraction is not None:
            assert args.lr_warmup_samples == 0, \
                'can only specify one of lr-warmup-fraction ' \
                'and lr-warmup-samples'

    # How to handle this better
    if args.language_model.num_layers is not None:
        assert args.encoder_num_layers is None, \
            'cannot have both num-layers and encoder-num-layers specified'
        args.encoder_num_layers = args.language_model.num_layers
    else:
        assert args.encoder_num_layers is not None, \
            'either num-layers or encoder-num-layers should be specified'
        args.language_model.num_layers = args.encoder_num_layers

    # Check required arguments.
    # removed max_position_embeddings from reqs
    required_args = ['num_layers', 'hidden_size', 'num_attention_heads']
    for req_arg in required_args:
        _check_arg_is_not_none(args.language_model, req_arg)

    # Checks.
    if args.language_model.ffn_hidden_size is None:
        if args.language_model.activation_func == "swiglu":
            # reduce the dimnesion for MLP since projections happens on
            # two linear layers. this keeps the number of parameters in
            # the same ballpark as the counterpart with 4*h size
            # we keep it a multiple of 64, which means the actual tensor size
            # will be a multiple of 64 / tp_size
            args.language_model.ffn_hidden_size = int(
                (4 * args.language_model.hidden_size * 2 / 3) / 64) * 64
        else:
            args.language_model.ffn_hidden_size = 4 * args.language_model.hidden_size

    if args.language_model.kv_channels is None:
        assert args.language_model.hidden_size % args.language_model.num_attention_heads == 0
        args.language_model.kv_channels = args.language_model.hidden_size // args.language_model.num_attention_heads

    if args.seq_length is not None:
        assert args.encoder_seq_length is None
        args.encoder_seq_length = args.seq_length
    else:
        assert args.encoder_seq_length is not None
        args.seq_length = args.encoder_seq_length

    if args.seq_length is not None:
        assert args.max_position_embeddings >= args.seq_length
    if args.decoder_seq_length is not None:
        assert args.max_position_embeddings >= args.decoder_seq_length

    if args.model_parallel.context_parallel_size > 1:
        assert args.model_parallel.context_parallel_algo in ["ulysses-cp-algo", "megatron_cp_algo", "hybrid_cp_algo"], "--context-parallel-alog only support ulysses-cp-algo, megatron_cp_algo and hybrid_cp_algo"

    if args.model_parallel.context_parallel_size > 1 and args.model_parallel.context_parallel_algo == 'hybrid_cp_algo':
        assert args.model_parallel.ulysses_degree_in_cp is not None, "--ulysses-degree-in-cp must be specified in hybrid_cp_algo"
        ring_degree, remainder = divmod(args.model_parallel.context_parallel_size, args.model_parallel.ulysses_degree_in_cp)
        assert ring_degree > 1 and remainder == 0, "--ulysses-degree-in-cp must be devisible by --context-parallel-size"

        head, remainder = divmod(args.language_model.num_attention_heads, args.model_parallel.ulysses_degree_in_cp)
        assert head >= 1 and remainder == 0, "--num-attention-heads must be devisible by --ulysses-degree-in-cp in hybrid cp"

        assert args.seq_length % (2 * args.model_parallel.context_parallel_size) == 0, "sequence length must be devisible by 2 * context-parallel-size in hybrid cp"

    if args.lr is not None:
        assert args.min_lr <= args.lr
    if args.save is not None:
        assert args.save_interval is not None
    # Mixed precision checks.
    if args.fp16_lm_cross_entropy:
        assert args.model_parallel.fp16, 'lm cross entropy in fp16 only support in fp16 mode.'
    if args.language_model.fp32_residual_connection:
        assert args.model_parallel.fp16 or args.model_parallel.bf16, \
            'residual connection in fp32 only supported when using fp16 or bf16.'

    if args.language_model.moe_grouped_gemm:
        assert args.model_parallel.bf16, 'Currently GroupedGEMM for MoE only supports bf16 dtype.'

    if args.weight_decay_incr_style == 'constant':
        assert args.start_weight_decay is None
        assert args.end_weight_decay is None
        args.start_weight_decay = args.weight_decay
        args.end_weight_decay = args.weight_decay
    else:
        assert args.start_weight_decay is not None
        assert args.end_weight_decay is not None

    # Now not support the transformer_engine
    if args.transformer_impl == 'transformer_engine':
        logger.warning('transformer_impl would be set to "local"')
        args.transformer_impl = 'local'

    # clone_scatter_output_in_embedding not supported yet
    if args.language_model.clone_scatter_output_in_embedding:
        logger.warning('clone_scatter_output_in_embedding would be set False')
        args.language_model.clone_scatter_output_in_embedding = False

    # bias_dropout_fusion not supported yet
    if args.language_model.bias_dropout_fusion:
        logger.warning('bias_dropout_fusion would be set False')
        args.language_model.bias_dropout_fusion = False

    # normalization
    if args.use_fused_rmsnorm:
        if args.language_model.normalization != "RMSNorm":
            raise AssertionError(
                'use_fused_rmsnorm must enable with '
                'normalization=RMSNorm, but got normalization'
                '={}.'.format(args.normalization))
        args.language_model.normalization = "FusedRMSNorm"

    # Disable new_dataset
    args.new_dataset = False

    # We only support SBH
    args.data_layout = 'SBH'

    # Use mcore or legacy
    args.use_legacy_models = not args.use_mcore_models

    # DDP and zero_level
    args.wrap_with_ddp = True
    if args.model_parallel.zero_level is not None:
        if args.use_distributed_optimizer:
            assert args.model_parallel.zero_level == 'z3', \
                "zero_level must be None or 'z3' when use_distributed_optimizer is ON"
        else:
            args.wrap_with_ddp = False

    # Activation recomputing.
    if args.language_model.recompute_config is None:
        if args.language_model.distribute_saved_activations:
            assert args.model_parallel.tensor_model_parallel_size > 1, 'can distribute ' \
                'recomputed activations only across tensor model ' \
                'parallel groups'
            assert args.language_model.recompute_granularity == 'full', \
                'distributed recompute activations is only '\
                'application to full recompute granularity'
            assert args.language_model.recompute_method is not None, \
                'for distributed recompute activations to work you '\
                'need to use a recompute method '

        if args.language_model.recompute_granularity == 'selective':
            assert args.language_model.recompute_method is None, \
                'recompute method is not yet supported for ' \
                'selective recomputing granularity'
    else:
        logger.info('recompute_config is specified, recompute_granularity, ' \
            'recompute_method and recompute_num_layers would be None')
        args.language_model.recompute_granularity = None
        args.language_model.recompute_method = None
        args.language_model.recompute_num_layers = None

    # disable sequence parallelism when tp=1
    # to avoid change in numerics when
    # sequence_parallelism is enabled.
    if args.model_parallel.tensor_model_parallel_size == 1:
        args.model_parallel.sequence_parallel = False

    # disable async_tensor_model_parallel_allreduce when
    # model parallel memory optimization is enabled
    if args.model_parallel.sequence_parallel:
        args.model_parallel.async_tensor_model_parallel_allreduce = False

    # Retro checks.
    if getattr(args, 'retro_add_retriever', False):
        raise Exception("Retro untested for yaml args. See arguments.py.")

    # Load retro args (used by both Retro & GPT).
    if getattr(args, 'retro_project_dir', None) is not None:
        raise Exception("Retro untested for yaml args. See arguments.py.")

    if args.language_model.rotary_interleaved and args.language_model.apply_rope_fusion:
        raise RuntimeError(
            '--rotary-interleaved does not work with rope_fusion.')

    # MoE Spec check
    if args.language_model.num_moe_experts is not None:
        assert args.spec is None, "Model Spec must be None when using MoEs"
        if args.model_parallel.tensor_model_parallel_size > 1:
            assert args.model_parallel.sequence_parallel, \
                "When using MoE and tensor parallelism, sequence parallelism must be used."

    # Expert parallelism check
    if args.model_parallel.expert_model_parallel_size > 1:
        assert args.language_model.num_moe_experts is not None, "num_experts must be non None to use expert model parallelism"
        assert args.language_model.num_moe_experts % args.model_parallel.expert_model_parallel_size == 0, \
            "Number of experts should be a multiple of expert model parallel_size."
        assert not args.model_parallel.fp16, \
            "Expert parallelism is not supported with fp16 training."

    # Pipe expert layer check
    if args.language_model.use_pipe_expert_layer:
        assert args.model_parallel.expert_model_parallel_size > 1, "Pipe expert is only supported with ep > 1."
        assert not (args.language_model.use_pipe_expert_recompute and args.language_model.use_pipe_expert_swap), \
            "Do not support recompute and swap at the same time now."
        assert args.model_parallel.tensor_model_parallel_size == 1, "Pipe expert is only supported with tp == 1 currently."
    else:
        assert not args.language_model.use_pipe_expert_recompute, \
            "Pipe expert recompute is only supported when using pipe expert layer."
        assert not args.language_model.use_pipe_expert_swap, \
            "Pipe expert swap is only supported when using pipe expert layer."

    if args.profile_save_path is None and args.save:
        args.profile_save_path = os.path.join(args.save, "profile")

    # Print arguments.
    _print_args("arguments", args)

    try:
        args = SimpleNamespace(**args.__dict__, **args.model_parallel.__dict__)
        args = SimpleNamespace(**args.__dict__, **args.language_model.__dict__)
    except Exception as e:
        e.args = (f"{e.args[0]}. Maybe the argument is put in wrong place.",)
        raise e
    # For GPT Layer spec in pretrain_gpt
    args.num_experts = args.language_model.num_moe_experts

    return args


def _print_args(title, args):
    """Print arguments."""
    if args.rank == 0:
        print(f'------------------------ {title} ------------------------',
              flush=True)
        str_list = []
        for arg in vars(args):
            dots = '.' * (48 - len(arg))
            str_list.append('  {} {} {}'.format(arg, dots, getattr(args, arg)))
        for arg in sorted(str_list, key=lambda x: x.lower()):
            print(arg, flush=True)
        print(f'-------------------- end of {title} ---------------------',
              flush=True)


def namespace_to_dictconfig(namespace):
    """Convert dict to DictConfig."""
    if isinstance(namespace, SimpleNamespace):
        input_dict = {}
        for key, value in namespace.__dict__.items():
            input_dict[key] = namespace_to_dictconfig(value)
        return DictConfig(**input_dict)
    return namespace


def core_config_from_args(args, dataclass=TransformerConfig):
    """Builds core config object from namespace args from given dataclass

    Raises exception if argument missing in args

    Args:
        args(SimpleNamespace, optional): Namespace to pull argument values from
        dataclass (dataclass, optional): Core dataclass config to pull argument names from


    Returns:
        SimpleNamespace: The returned namespace to build core config from
    """
    kw_args = {}
    for f in dataclasses.fields(dataclass):
        if hasattr(args, f.name):
            val = getattr(args, f.name)
            if isinstance(val, SimpleNamespace):
                val = namespace_to_dictconfig(val)
            elif isinstance(val, List):
                for index, item in enumerate(val):
                    if isinstance(item, SimpleNamespace):
                        val[index] = namespace_to_dictconfig(item)
            kw_args[f.name] = val
        else:
            raise Exception(
                f"Missing argument {f.name} for {str(dataclass)} config")
    return kw_args


def _check_arg_is_not_none(args, arg):
    assert getattr(args, arg) is not None, '{} argument is None'.format(arg)


def _check_arg_is_none(args, arg, error_msg=None):
    assert getattr(args, arg) is None, error_msg if error_msg else '{} argument is not None'.format(arg)


def _check_list_is_validate(arg_name, arg, vpp, pp):
    if arg is not None:
        assert isinstance(arg, List), f'{arg_name} is not instance of List type'
        assert len(arg) == pp, f"{arg_name}'s length is not equal to pipeline parallel size: {pp}"
        for index, sub_list in enumerate(arg):
            assert isinstance(sub_list, List), f"{arg_name}[{index}] is not instance of List type"
            assert len(sub_list) == vpp,  \
                f"{arg_name}[{index}]'s length is not equal to virtual pipeline parallel size: {vpp}"


def core_transformer_config_from_yaml(args, transfomer_key="language_model"):
    """Transformer config from yaml"""
    bias_swiglu_fusion = args.bias_swiglu_fusion
    # Combine transformer config with model parallel args
    args = SimpleNamespace(**vars(getattr(args, transfomer_key)), **vars(args.model_parallel))
    # Translate args to core transformer configuration
    kw_args = core_config_from_args(args, TransformerConfig)

    # Hardcoded
    kw_args['deallocate_pipeline_outputs'] = True
    kw_args['pipeline_dtype'] = kw_args['params_dtype']
    kw_args['batch_p2p_comm'] = not args.overlap_p2p_comm

    assert args.activation_func in ["swiglu", "gelu", "fast_gelu", "fused_swiglu", "squared_relu", "silu"], \
        f"{args.activation_func} is not a supported activation function"
    if args.activation_func == "swiglu":
        kw_args['activation_func'] = "silu"
        kw_args['gated_linear_unit'] = True
        kw_args['bias_activation_fusion'] = bias_swiglu_fusion
    elif args.activation_func == "gelu":
        if args.add_bias_linear:
            kw_args['bias_activation_fusion'] = False
        else:
            kw_args['bias_activation_fusion'] = args.bias_activation_fusion

    if args.init_method is not None:
        kw_args['init_method'] = _SUPPORT_INIT_METHOD[args.init_method]()

    if args.bias_init is not None:
        kw_args['bias_init'] = _SUPPORT_INIT_METHOD[args.bias_init]()

    # Return Transformer config.
    return TransformerConfig(**kw_args)


def load_yaml(yaml_path):
    print(f"warning using experimental yaml arguments feature, argparse arguments will be ignored")
    with open(yaml_path, "r") as f:
        config = yaml.load(f, Loader=yaml.FullLoader)
        # Convert to nested namespace
        config_namespace = json.loads(json.dumps(
            config), object_hook=lambda item: SimpleNamespace(**item))
        # Add config location to namespace
        config_namespace.yaml_cfg = yaml_path
        return config_namespace
