import argparse
import os

import torch


_GLOBAL_ARGS = None  # TODO lyh new add 2025
def set_args():
    global _GLOBAL_ARGS
    
    parser = argparse.ArgumentParser()
    parser.add_argument('--epoch',type = int,default=1)
    parser.add_argument('--micro-batch',type = int,default=None)
    parser.add_argument('--pipeline-model-parallel-size', type=int, default=1,
                       help='Degree of pipeline model parallelism.')
    parser.add_argument('--tensor-model-parallel-size', type=int, default=1,
                       help='Degree of tensor model parallelism.')

    # new add for vpp
    parser.add_argument('--virtual-pipeline-model-parallel-size', type=int, default=1,
                        help='Degree of virtual pipeline model parallelism.')

    parser.add_argument('--gpus_per_stage', type=int, default=1)
    parser.add_argument('--nstages_per_node',type=int,default=1)
    parser.add_argument('--distributed-backend', default='nccl',
                    choices=['nccl', 'gloo'])
    parser.add_argument('--local-rank', type=int, default=None,
                      help='local rank passed from distributed launcher.')
    parser.add_argument('--micro-batch-size', type=int, default=None,
                      help='batch-size')
    parser.add_argument('--tensor-length', type=int, default=None,
                      help='the len of tensor')
    parser.add_argument('--fp16', type=int, default=None,
                      help='the len of tensor')


    # lyh 2025.06.12 new add
    # 添加模型配置参数（默认值为None）
    parser.add_argument('--hidden-size', type=int, default=None,
                       help='Transformer hidden size')
    parser.add_argument('--num-hidden-layers', type=int, default=None,
                       help='Number of transformer layers')
    parser.add_argument('--num-attention-heads', type=int, default=None,
                       help='Number of attention heads')
    parser.add_argument('--vocab-size', type=int, default=None,
                       help='Vocabulary size')

    # parser.add_argument('--fp16', action='store_true',
    #                    help='Run model in fp16 mode.')
    # parser.add_argument('--no-scatter-gather-tensors-in-pipeline', action='store_false',
    #                 help='Use scatter/gather to optimize communication of tensors in pipeline',
    #                 dest='scatter_gather_tensors_in_pipeline')


    # 添加auto-parallel参数(NOT use)
    parser.add_argument('--auto-parallel', action='store_true', default=False,
                       help='Enable auto parallel mode')

    parser.add_argument('--nnodes', type=int, default=1, help='the number of node in the cluster')
    parser.add_argument('--nproc-per-node', type=int, default=8, help='the number of NPU on each node')
    parser.add_argument('--master-addr', type=str, default=None, help='the ip-address of master node')
    parser.add_argument('--master-port', type=str, default=None, help='the ip-port of master node')
    parser.add_argument('--node-rank', type=int, default=0,
                       help='the rank of nodes in the cluster, starting from 0 and increment by 1')
    # parser.add_argument('--profile-operator', action='store_true', help='')
    # parser.add_argument('--profile-memory', action='store_true', help='')
    parser.add_argument('--prof-file', type=str, default=None, help='')

    # 添加新的参数
    parser.add_argument('--profile-memory', action='store_true', default=False,
                       help='Enable memory profiling')
    parser.add_argument('--module-profile-path', type=str, default=None,
                       help='Path to save module profiling results')

    parser.add_argument('--global-batch-size', type=int, default=None,
                       help='Total batch size across all devices')

    # 新增参数：--in-analyse-autoparallel（默认False）
    parser.add_argument(
        '--in-analyse-autoparallel',
        action='store_true',  # 启用时设为True，否则False
        default=False,
        help='Enable auto-parallel analysis (default: False)'
    )

    parser.add_argument('--profile-ranks', type=int, default=[0],
                       help='Global ranks to profile.The default value of -1 means to profile all ranks')

    args = parser.parse_args()
    #add for test
    args.scatter_gather_tensors_in_pipeline = False


    # Distributed args.
    # args.rank = int(os.getenv('RANK', '0'))
    # args.local_rank = int(os.environ['LOCAL_RANK'])
    # args.world_size = int(os.getenv("WORLD_SIZE", '1'))

    # Distributed args.
    args.rank = int(os.getenv('RANK', '0'))  # 使用 getenv 替代 environ.get
    args.local_rank = int(os.getenv('LOCAL_RANK', '0'))  # 提供默认值
    args.world_size = int(os.getenv('WORLD_SIZE', '1'))
    _GLOBAL_ARGS = args
    # return _GLOBAL_ARGS


def get_args():
    global _GLOBAL_ARGS  # 声明引用全局变量
    return _GLOBAL_ARGS


def set_args_by_params(args):
    global _GLOBAL_ARGS  # 声明要修改全局变量
    _GLOBAL_ARGS = args



import argparse
def get_args_():
    global _GLOBAL_ARGS
    def init_args():
        args = argparse.Namespace(
            # ----- model_args 中的参数 -----
            model_size='llama-7b',
            hidden_size=768,
            num_hidden_layers=12,
            num_attention_heads=12,
            seq_length=128,
            vocab_size=30522,
            # ----- group.add_argument 部分 -----
            set_model_config_manually=1,
            set_layernum_manually=1,
            num_nodes=1,
            num_gpus_per_node=8,
            memory_constraint=24,
            min_bsz=1,
            max_bsz=1024,
            recommend_min_bsz=0,
            settle_bsz=64,
            settle_chunk=8,
            bsz_scale=8,
            search_space="full",
            disable_dp=0,
            disable_tp=0,
            disable_vtp=0,
            disable_pp=0,
            disable_sdp=0,
            disable_ckpt=0,
            disable_tp_consec=0,
            max_tp_deg=8,
            max_pp_deg=8,
            default_dp_type="ddp",
            embed_sdp=0,
            mixed_precision="bf16",
            pipeline_type="pipedream_flush",
            use_pipeline_costmodel=0,
            costmodel_coe=1.0,
            sequence_parallel=False,  # action="store_true" → 默认 False
            make_vocab_size_divisible_by=128,
            fine_grained_mode=0,
            computation_mode="linear",
            # ----- parser.add_argument 部分 -----
            epoch=1,
            micro_batch=8,
            micro_batch_size=2,  # TODO 后面自动并行计算出来要赋值
            tensor_length=None,
            pipeline_model_parallel_size=8,
            tensor_model_parallel_size=1,
            virtual_pipeline_model_parallel_size=1,
            gpus_per_stage=1,
            nstages_per_node=1,
            distributed_backend="nccl",
            local_rank=None,
            fp16=None,  # 这里原本是 int 类型，和 action 版本的互斥
            # hidden_size=None,
            # num_hidden_layers=None,
            # num_attention_heads=None,
            # vocab_size=None,
            nnodes=1,
            nproc_per_node=8,
            master_addr="10.90.1.237",
            master_port=6020,
            node_rank=0,
            prof_file=None,
            gpus_list = None,

            # ----- galvatron_profile_hardware_args 默认值 -----
            max_tp_size=8,
            envs=["NCCL_DEBUG=WARN", "NCCL_IB_DISABLE=0", "NCCL_IB_HCA=mlx5_0,mlx5_1"],
            backend="torch",
            nccl_test_dir="../site_package/nccl-tests",
            mpi_path="/opt/mpi/",
            start_mb=16,
            end_mb=256,
            scale=2,
            hostfile="hostfile",
            avg_or_min_or_first="first",
            overlap_time_multiply=4,
        )
        # 重复名称
        args.nnodes = args.num_nodes
        # args.nproc_per_node = args.num_gpus_per_node
        args.num_gpus_per_node = args.nproc_per_node
        args.tensor_length = args.seq_length
        # !
        args.micro_batch = args.settle_chunk

        # 原gees add for test
        args.scatter_gather_tensors_in_pipeline = False
        # Distributed args. -- 原gees有额外处理这部分，先放过来
        args.rank = int(os.getenv('RANK', '0'))  # 使用 getenv 替代 environ.get
        # 名称重复 node_rank 在search中用到
        args.node_rank = args.rank
        args.local_rank = int(os.getenv('LOCAL_RANK', '0'))  # 提供默认值
        args.world_size = int(os.getenv('WORLD_SIZE', '-1'))

        # TODO 读取文件做替换
        args.master_addr = "10.90.1.237"
        args.master_port = "6003"

        return args

    args = init_args()
    _GLOBAL_ARGS = args
    return _GLOBAL_ARGS