from pathlib import Path
import os
import sys
import argparse
import math
from collections import OrderedDict
from shutil import rmtree
from typing import List, Tuple

cwd = Path(__file__).parent
# pip install openpyxl
try:
    import yaml
    import openpyxl
    import pandas as pd
    import rich
except:
    os.system(f"{sys.executable} -m pip install pyyaml openpyxl rich pandas")
    import yaml
    import openpyxl
    import pandas as pd
    import rich

try:
    import numpy as np
except:
    os.system(f"{sys.executable} -m pip install numpy==1.26.4")
    import numpy as np


path_config_dir = cwd.joinpath("config")

FLAG_TRUE = "√"
FLAG_FALSE = "×"

STR_BASH_PROFILING_ARGS = "PROFILING_ARGS"
STR_PRETRAIN_PY = "train_filename"
STR_WORKSPACE = "workspace"
STR_OUTSIDE_SHELL = "list_outside_cmd"

parser = argparse.ArgumentParser(description="测试脚本生成工具", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-n,--name", dest="name", type=str, default="None", required=True, help="测试名称,需要和表格中的 NAME 词条对应")

parser.add_argument(
    "--table",
    type=str,
    default="test",
    help="""
    加载的表格, 只读取 table下的 xlsx 表格文件, 默认读取 [table/test.xlsx],
    e.g.1 --table test1 # 加载 table/test1.xlsx

    """,
)
parser.add_argument("--smi", action="store_true", help="是否开启 SMI 监控, 默认关闭")

parser.add_argument("--npu-blocking", action="store_true", help="是否开启 NPU 的 ASCEND_LAUNCH_BLOCKING, 默认关闭")
parser.add_argument(
    "--no-plog",
    action="store_true",
    help="""关闭更正 plog 功能。
        开启（默认）：\tplog 等级和路径由运行当前 py 脚本时的环境变量决定）
        关闭：      \tplog 等级和路径由每台机器的运行环境决定，脚本生成工具不写入任何关于 plog 相关的变量）
        """,
)

parser.add_argument("--dev", action="store_true", help="临时开发模式分支")
parser.add_argument("--reset", action="store_true", help="清空配置文件")

parser.add_argument(
    "--plus",
    nargs="+",
    default=["default_plus.sh"],
    help=f"""加载额外的命令脚本, 默认是 default_plus.sh, 输入参数时可以去掉.sh,
    e.g.1 --plus default_plus1 default_plus2 default_plus3

    note. 该参数指定的加载命令会在对应的 {STR_OUTSIDE_SHELL} 后执行

""",
)

parser.add_argument("--calc-memory", action="store_true", help="自动估计显存占用, 该功能在之后将成为一个默认选项")
parser.add_argument("--step", type=int, default=-1, help="控制当前脚本的训练运行步长，默认值为 -1，[STEP] 值表示使用原始参数中的给定值")

str_help_profile = """开启 profile, -p 0 表示开启等级为 0 的 profiling.
    若生成脚本时使用 -p，则生成的脚本将默认执行对应的 profiling 等级，生成脚本前必须知道要采集的卡号
    1. 优先从环境变量中读取 PROFILE_RANKS
    2. 其次读取 `config/default_profiling.yaml` 中的参数: PROFILE_RANKSs

    * NOTE: You can use `export PROFILE_RANKS` to change the profile rank choice,
    split by ',' and ' ' are both supported.

    e.g.1 `export PROFILE_RANKS=0,1,2,3`,

    * NOTE: bash syntax `seq x1 d x2` function is using like python syntax `range(x1, x2 + 1, d)`

    e.g.2 `export PROFILE_RANKS=$(seq 0 1 15)`,
    equal to `export PROFILE_RANKS=( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 )`

    e.g.3 `export PROFILE_RANKS=$(seq 0 16 240)`,
    equal to `export PROFILE_RANKS=( 0 16 32 48 64 80 96 112 128 144 160 176 192 208 224 240 )`
    """
parser.add_argument("-p,--profile", dest="profile", type=int, default=-1, help=str_help_profile)

parser.add_argument(
    "--plog-level",
    type=int,
    default=-1,
    help="""设置应用类日志的日志级别及各模块日志级别，仅支持调试日志。
    取值为：
    0：对应DEBUG级别。
    1：对应INFO级别。
    2：对应WARNING级别。
    3：对应ERROR级别，默认为ERROR级别。
    4：对应NULL级别，不输出日志。
    其他值为非法值。
    """,
)

parser.add_argument("--ip-list", type=str, default=None, help="手动配置 多机的 IP 地址，example: --ip-list 10.104.149.180,10.104.149.200")

args = parser.parse_args()


class Default_Config:
    default_config_base = {
        "common": {STR_OUTSIDE_SHELL: ["auto/default_plus.sh"]},
        "GPU": {
            STR_PRETRAIN_PY: "pretrain_gpt.py",
            STR_WORKSPACE: "Megatron-LM/",  # * GPU 执行脚本时，会 CD 到这个目录
            STR_OUTSIDE_SHELL: [],
        },
        "NPU": {
            STR_PRETRAIN_PY: "pretrain_gpt.py",
            STR_WORKSPACE: "Megatron-LM/",  # * NPU 执行脚本时，会 CD 到这个目录
            STR_OUTSIDE_SHELL: [],
        },
    }
    default_config_profiling = {
        "profile_with_stack": True,
        "profile_start": 4,
        "profile_end": 5,
        "profile_with_memory": True,
        "profile_with_stack": True,
        "profile_record_shapes": True,
        "profile_with_cpu": True,
        "profile_ranks": [0],
        "profile_path": "./profiling",
    }

    #!
    default_config = {
        "generate_base.yaml": default_config_base,
        "generate_profiling.yaml": default_config_profiling,
    }

    def __init__(self):
        pass
        self.check = True
        if args.reset:
            self.reset()
            exit(0)

        self.config_base = self.load_yaml("generate_base.yaml")
        self.config_profiling = self.load_yaml("generate_profiling.yaml")

        if not self.check:
            print("首次运行, 初始化对应的配置文件, 请修改后再次执行")
            exit(0)

        # * base
        self.default_GPU = self.config_base["GPU"]
        self.default_NPU = self.config_base["NPU"]

        # * profiling

        self.default_profile_start = self.config_profiling["profile_start"]
        self.default_profile_end = self.config_profiling["profile_end"]
        self.default_profile_with_memory = self.config_profiling["profile_with_memory"]
        self.default_profile_with_stack = self.config_profiling["profile_with_stack"]
        self.default_profile_record_shapes = self.config_profiling["profile_record_shapes"]
        self.default_profile_with_cpu = self.config_profiling["profile_with_cpu"]
        self.default_profile_path = self.config_profiling["profile_path"]
        if self.default_profile_path.startswith("."):
            self.default_profile_path = cwd.joinpath(self.default_profile_path).absolute()
        else:
            self.default_profile_path = Path(self.default_profile_path).absolute()

    def default_workspace(self, device_type: str) -> str:
        dict_device = {MultiDevice_ShellSegment.NPU: "NPU", MultiDevice_ShellSegment.GPU: "GPU"}
        return self.config_base[dict_device[device_type]][STR_WORKSPACE]

    def default_plus(self, device_type: str = None):
        dict_device = {MultiDevice_ShellSegment.NPU: "NPU", MultiDevice_ShellSegment.GPU: "GPU", None: "common"}
        return self.config_base[dict_device[device_type]][STR_OUTSIDE_SHELL]

    def reset(self):
        for k, _ in self.default_config.items():
            tmp_config = path_config_dir.joinpath(k)
            if tmp_config.is_file():
                os.remove(tmp_config.__str__())
                rich.print("[yellow]DEL[/yellow] " + tmp_config.__str__())
        rich.print("[green]重置配置文件成功[/green]")

    def load_yaml(self, yaml_name: str):

        default_config: dict = self.default_config[yaml_name] if yaml_name in self.default_config else None

        yaml_file = path_config_dir.joinpath(yaml_name)
        rich.print("[info] 加载配置文件: " + yaml_file.__str__())
        if not yaml_file.is_file():
            if default_config is not None:
                with open(yaml_file, "w") as f:
                    yaml.safe_dump(default_config, f, sort_keys=False)
                rich.print(f"[yellow]Warning[/yellow]: 没有找到配置文件: {yaml_file}, 生成默认配置文件")
                self.check = False
                return default_config
            else:
                rich.print(f"[red]ERROR[/red] 加载配置文件: {yaml_file} 失败")
                self.check = False
                raise ValueError(f"加载配置文件: {yaml_file} 失败")

        with open(yaml_file, "r") as f:
            return yaml.safe_load(f)


default_config = Default_Config()


class MultiDevice_ShellSegment:
    NPU = 0
    GPU = 1

    def __init__(self, device_type: int):
        super().__init__()
        self.device_type = device_type

        self.str_cmd_last = "\n#! DO NOT DELETE THIS LINE\n"

    def generate_plog_shell(self, path_save_dir: Path):
        if args.no_plog:
            return ""
        dict_plog_level = {
            0: "DEBUG",
            1: "INFO",
            2: "WARNING",
            3: "ERROR",
            4: "NULL",
        }
        str_cmd = ""
        # * 默认自动更改 plog 的位置
        ASCEND_GLOBAL_LOG_LEVEL = None
        ASCEND_PROCESS_LOG_PATH = path_save_dir.joinpath("plog")

        str_cmd += f"mkdir -p {ASCEND_PROCESS_LOG_PATH}\n"
        str_cmd += f"rm -rf {ASCEND_PROCESS_LOG_PATH}/*\n"
        str_cmd += f"mkdir -p {ASCEND_PROCESS_LOG_PATH}/*\n"
        str_cmd += f"export ASCEND_PROCESS_LOG_PATH={ASCEND_PROCESS_LOG_PATH}/rank_$RANK\n"
        if self.device_type == self.NPU:
            assert args.plog_level in [-1, 0, 1, 2, 3, 4], "plog_level 参数错误, 只能是 -1, 0, 1, 2, 3, 4"
            if args.plog_level != -1:
                # * 优先使用传入参数的中的
                ASCEND_GLOBAL_LOG_LEVEL = args.plog_level
                print(f"\033[1;33m警告！设置了固定的 plog 等级为 {args.plog_level}({dict_plog_level[args.plog_level]})\033[0m")
            else:
                ASCEND_GLOBAL_LOG_LEVEL = os.getenv("ASCEND_GLOBAL_LOG_LEVEL")

            if ASCEND_GLOBAL_LOG_LEVEL is not None:
                str_cmd += f"export ASCEND_GLOBAL_LOG_LEVEL={ASCEND_GLOBAL_LOG_LEVEL}"
            pass
        elif self.device_type == self.GPU:
            pass

        return str_cmd

    def generate_profiling_shell(self):
        assert args.profile in [-1, 0, 1, 2], "profile 参数错误, 只能是 -1, 0, 1, 2"
        dict_profile_env = {}
        if args.profile == -1:
            return "\n", {}

        str_pre_env = "\n\n###### generate_profiling_shell ######\n"

        dict_profile_env["--profile"] = FLAG_TRUE
        dict_profile_env["--profile-step-start"] = default_config.default_profile_start
        dict_profile_env["--profile-step-end"] = default_config.default_profile_end

        if self.device_type == self.NPU:
            dict_profile_env["--profile-level"] = "level" + str(args.profile)
            dict_profile_env["--profile-save-path"] = "$PROFILE_PATH"
            dict_profile_env["--profile-with-stack"] = FLAG_TRUE if default_config.default_profile_with_stack else FLAG_FALSE
            dict_profile_env["--profile-with-memory"] = FLAG_TRUE if default_config.default_profile_with_memory else FLAG_FALSE
            dict_profile_env["--profile-with-cpu"] = FLAG_TRUE if default_config.default_profile_with_cpu else FLAG_FALSE
            dict_profile_env["--profile-record-shapes"] = FLAG_TRUE if default_config.default_profile_record_shapes else FLAG_FALSE

        str_env_profile = os.getenv("PROFILE_RANKS", None)
        if str_env_profile is not None:  #! 优先使用环境变量
            print("\033[1;33m手动配置了 PROFILE_RANKS\033[0m")
            try:
                if "," in str_env_profile:
                    dict_profile_env["--profile-ranks"] = " ".join(str_env_profile.split(","))
                else:
                    dict_profile_env["--profile-ranks"] = str_env_profile.replace("\n", " ")
            except:
                rich.print(f"PROFILE_RANKS 环境变量设置错误: {str_env_profile}, 需要以 [bold][green]逗号[/green][/bold] 或 [bold][green]空格[/green][/bold] 分隔")
                raise ValueError("PROFILE_RANKS 环境变量设置错误, 请检查")

        else:  #! 其次读取配置文件
            assert "profile_ranks" in default_config.config_profiling, "没有找到配置文件中的 profile_ranks 参数"
            assert len(default_config.config_profiling["profile_ranks"]) > 0, "配置文件中的 profile_ranks 长度应当大于 0"
            dict_profile_env["--profile-ranks"] = " ".join(map(str, default_config.config_profiling["profile_ranks"]))

        str_cmd = STR_BASH_PROFILING_ARGS + "=(\n"
        for k, v in dict_profile_env.items():
            if v == FLAG_TRUE:
                str_cmd += f"{k} \n"
            elif v == FLAG_FALSE:
                pass
            else:
                str_cmd += f"{k} {v} \n"
        str_cmd += ")\n"

        return str_pre_env + str_cmd, dict_profile_env

    def generate_smi_shell(self, path_save_dir: Path, nnodes=1, nproc_per_node=1):
        """
        Must be used after DISTRIBUTED_ARGS is generated
        """
        path_save_dir = path_save_dir.joinpath("smi")

        str_cmd = ""
        #! 开启 smi
        if args.smi:
            print("\033[1;33m开启了 SMI 监控\033[0m")
            from optimize_tools.utils.smi_record import SmiRecord

            smi = SmiRecord(args.name, self.device_type, path_save_dir.joinpath("smi_${RANK}.log"))
            str_cmd = smi.generate_simple_implement()
            # todo 由于是最简单的实现，需要手动获取脚本 PID，然后结束该进程

            self.str_cmd_last += f"\nkill -9 ${{smi_pid}}\n"
        return str_cmd

    def generate_last_cmd(self):
        """If you use DSS, there might be some last command to run"""

        return self.str_cmd_last

    @property
    def is_npu(self):
        return self.device_type == self.NPU

    @property
    def is_gpu(self):
        return self.device_type == self.GPU

    def check_type(self, support_device_type: str):

        if support_device_type not in ["both", "npu", "gpu"]:
            raise ValueError(f"参数类型设置错误: {support_device_type}, 只能是 both, npu, gpu")
        if support_device_type == "both":
            return True
        if support_device_type.lower() == "npu" and self.is_npu:
            return True
        if support_device_type.lower() == "gpu" and self.is_gpu:
            return True
        return False


class Test_generate:
    def __init__(self, path_table: Path):
        self.__path_table = path_table
        self.table = pd.read_excel(self.__path_table)

        self.first_column = self.get_columns(0)
        self.default_column = self.get_columns(1)
        self.argtype_column = self.get_columns(2)

        self.idx_row_name = list(self.first_column).index("NAME")
        self.idx_row_device = list(self.first_column).index("DEVICE")

        self.row_name = self.table.iloc[self.idx_row_name]

        self.str_device_type: str = None
        self.env_paramters = {
            "TP": 1,
            "CP": 1,
            "PP": 1,
            "EP": 1,
            "NPROC_PER_NODE": 0,
            "WORLD_SIZE": 0,
        }
        self.export_env_paramters = {}
        self.distribute_paramters = OrderedDict(
            {
                "--nnodes": "$NNODES",
                "--master_addr": "$MASTER_ADDR",
                "--master_port": "$MASTER_PORT",
                "--nproc_per_node": "$NPROC_PER_NODE",
                "--node_rank": "$RANK",
            }
        )

        self.cmd_export_env = "\n\n"
        self.env_profile = {}

    def augment_filter(self, index):
        # self.dict_args_index = {}
        # self.dict_args_others_index = {}

        list_value = self.get_columns(index)

        """
        通过 list_filter 过滤出符合条件的测试
        """
        self.dict_grouped_args = OrderedDict({})
        ungrouped = self.dict_grouped_args["ungrouped"] = {}
        the_group = ungrouped

        self.dict_py_paramters = {}
        self.dict_export_env_paramters = {}

        self.list_py_paramters = []

        def record_value(dict_param, key, value, value_default):
            if str(value) == "nan":
                assert value_default != math.nan, f"{key} 默认值为空，该词条为必须"
                value = value_default
            dict_param[key] = value
            return value
            """
            #! debug 使用代码
            """
            pd.set_option("display.max_rows", 200)

            table = self.table[self.table.columns[index]]
            line_recompute_num_layers = list(self.first_column).index("--recompute-num-layers")
            list(self.table.iloc[line_recompute_num_layers])

            self.table.iloc[:, index + 1][line_recompute_num_layers]
            self.table.iloc[:, index][line_recompute_num_layers]
            self.table.iloc[:, index - 1][line_recompute_num_layers]
            self.table.iloc[:, index - 2][line_recompute_num_layers]
            print(self.table)

        for i, arg_name in enumerate(self.first_column):
            if not isinstance(arg_name, str):
                continue
            arg_name = arg_name.strip()
            v_type = self.argtype_column[i]
            # * 有时候（很罕见，也没找到原因，会把 1 读取成 True，这里进行转换）（只有 √ 代表 True）
            value = list_value[i]

            value_default = self.default_column[i]

            if arg_name.startswith("[") and arg_name.endswith("]"):
                the_group = self.dict_grouped_args[arg_name] = {}
            if str(v_type) != "nan" and self.dss.check_type(v_type):  #! 检查参数归属的设备类型
                if arg_name.startswith("--"):
                    if value == True:
                        rich.print(f"[yellow][WARNING][/yellow] 词条[{arg_name}] 发现了 True 值，已经转换为 1 (只有符号 '√' 代表 True)")
                        value = 1
                    # self.dict_args_index[arg_name] = i
                    the_group[arg_name] = record_value(self.dict_py_paramters, arg_name, value, value_default)
                else:
                    # self.dict_args_others_index[arg_name] = i
                    the_group[arg_name] = record_value(self.dict_export_env_paramters, arg_name, value, value_default)
            else:
                pass

        #! =================================================================================================
        # * 生成 py 运行参数
        # * 根据需要修改训练步长
        if args.step != -1:
            assert args.step > 0, "训练步长必须大于 0"
            print("\033[1;33m警告！设置了固定的训练步长\033[0m")
            self.dict_py_paramters["--train-iters"] = args.step
            self.dict_py_paramters["--lr-decay-iters"] = args.step

        for k, v in self.dict_py_paramters.items():
            #     str_arg = self.default_column[v]
            if v == FLAG_TRUE:
                self.list_py_paramters.append(k + " \\")
            elif v == FLAG_FALSE:
                pass
            else:
                self.list_py_paramters.append(f"{k} {v} \\")

        rich.print(self.dict_py_paramters)

        #! =================================================================================================
        # * 生成 key 环境变量
        self.cmd_env = "\n\n"
        self.cmd_env += '# 避免环境变量中没有 RANK\nif [[ x"$RANK" == x ]];then RANK=0; fi\n'
        for k, v in self.dict_export_env_paramters.items():
            if k in self.env_paramters:
                self.env_paramters[k] = v

        self.env_paramters["NPROC_PER_NODE"] = self.dict_export_env_paramters["单机卡数"]
        self.env_paramters["NNODES"] = self.dict_export_env_paramters["机器数量"]
        self.env_paramters["WORLD_SIZE"] = self.env_paramters["NPROC_PER_NODE"] * self.env_paramters["NNODES"]
        self.env_paramters["PROFILE_PATH"] = default_config.default_profile_path.joinpath(self.name).absolute().__str__()

        self.recommand_log_path = cwd.joinpath("log").joinpath(self.name).absolute()
        path_log = self.recommand_log_path.joinpath("rank_${RANK}.log").absolute()
        path_log.parent.mkdir(exist_ok=True, parents=True)
        # * 保证日志变量出现在最后（因为依赖于 RANK)
        self.env_paramters["LOG_PATHNAME"] = path_log.__str__()

        if self.env_paramters["NNODES"] == 1:
            self.env_paramters["MASTER_ADDR"] = "localhost"
            self.env_paramters["MASTER_PORT"] = "6000"
        else:
            pass

        for k, v in self.env_paramters.items():
            self.cmd_env += f"{k}={v}\n"

        rich.print(self.env_paramters)

        #! =================================================================================================
        self.export_env_paramters = {}

        str_list_num_device = ",".join(map(str, range(int(self.env_paramters["NPROC_PER_NODE"]))))
        self.export_env_paramters["ASCEND_RT_VISIBLE_DEVICES"] = str_list_num_device
        self.export_env_paramters["CUDA_VISIBLE_DEVICES"] = str_list_num_device
        # * 补充 extra export env， 一律存放到字典中然后导出
        list_extra_env = self.dict_export_env_paramters["EXTRA_EXPORT_ENV"]
        if str(list_extra_env) != "nan":
            list_extra_env = list_extra_env.strip().split("\n")
            for arg_name in list_extra_env:

                if arg_name.startswith("export"):
                    arg_name = arg_name.replace("export", "").strip()
                # todo 这里只处理了产生等号的情况，有时候变量中用空格分隔，这里没有处理
                assert "=" in arg_name, f"这里只处理了产生等号的情况，有时候变量中用空格分隔，这里没有处理"
                l = arg_name.split("=")
                k = l[0]
                v = arg_name[len(k) + 1 :]
                self.export_env_paramters[k] = v

        for k, v in self.export_env_paramters.items():
            self.cmd_export_env += f"export {k}={v}\n"
        self.cmd_export_env += "\n\n"
        rich.print(self.export_env_paramters)
        #! =================================================================================================
        self.cmd_profile, self.env_profile = self.dss.generate_profiling_shell()
        rich.print(self.env_profile)

    def resolve_paramters(self, name):
        self.name = name

        list_flag_hit = self.row_name == name
        if sum(list_flag_hit) != 1:
            if sum(list_flag_hit) == 0:
                raise ValueError(f"没有找到测试名称为{name}的测试")
            else:
                raise ValueError(f"找到多个测试名称为{name}的测试, {np.where(list_flag_hit==True)[0].tolist()} ")
        else:
            pass

        index = list(list_flag_hit).index(True)
        self.str_device_type = self.table.iloc[self.idx_row_device, index]
        if self.str_device_type.lower().startswith("npu"):
            self.dss = MultiDevice_ShellSegment(MultiDevice_ShellSegment.NPU)

        elif self.str_device_type.lower().startswith("gpu"):
            self.dss = MultiDevice_ShellSegment(MultiDevice_ShellSegment.GPU)
        else:
            raise ValueError(f"设备类型[DEVICE]没有注明错误: {self.str_device_type}, 必须是 npu 或者 gpu 开头(不区分大小写)")
        self.device_type = self.dss.device_type
        # todo 之后这部分逻辑挪到 DSS 中
        if args.npu_blocking:
            print("\033[1;33m警告！开启了 ASCEND_LAUNCH_BLOCKING\033[0m")
            self.export_env_paramters["ASCEND_LAUNCH_BLOCKING"] = 1

        self.augment_filter(index)

        #! =================================================================================================

    def get_columns(self, column_index: int) -> pd.Series:
        return self.table[self.table.columns[column_index]]

    def generate_multi_machine_cmd(self):
        str_cmd = ""
        # * 提示运行的启动命令
        if args.ip_list is not None:
            print("手动配置多机通讯命令")
            print("\033[1;33m注意，手动配置多机通讯命令后，task 启动命令会失效！\033[0m")
            list_ip = args.ip_list.split(",")
            print(f"IP 地址: {list_ip}")
            str_cmd = f"""
list_ip=({" ".join(list_ip)})
LOCAL_HOST=$(hostname -I | awk '{{print $1}}')
for i in "${{!list_ip[@]}}";do
    if [[ $LOCAL_HOST == ${{list_ip[$i]}} ]];then
        RANK=$i
        echo -e "\\033[33m当前节点的 RANK: $RANK\\033[0m"
        break
    fi
done
if [[ x"$RANK" == x ]];then
    echo -e "\\033[31m没有找到当前节点的 RANK\\033[0m"
    exit 1
fi
MASTER_PORT=${{MASTER_PORT:-"6000"}}
MASTER_ADDR=${{list_ip[0]}}
\n"""

        return str_cmd

    def generate_shell(self) -> Path:
        base_name = f"train_{self.name}.sh"
        cmd_outside_shell = r"""#!/bin/bash
##### source outside cmd  #####
shell_pwd=$(dirname "$(readlink -f "$0")")
shell_name=$(basename "$0")

function check_and_run_cmd(){
    if [[ -f "$1" ]];then
        source $1
    else
        echo -e "\033[31mNot Found $1\033[0m"
        exit 1
    fi
}
"""

        def check_plus_shell(list_path):
            str_cmd = ""
            for s in list_path:
                if not s.endswith(".sh"):
                    s += ".sh"
                str_cmd += "check_and_run_cmd $shell_pwd/../" + s + "\n"

            return str_cmd

        cmd_outside_shell += check_plus_shell(default_config.default_plus())
        cmd_outside_shell += check_plus_shell(default_config.default_plus(self.device_type))
        cmd_outside_shell += check_plus_shell(["auto/" + s for s in args.plus])

        cmd = f"\n\ncd {default_config.default_workspace(self.device_type)} \n"
        cmd += r"torchrun ${DISTRIBUTED_ARGS[@]} "

        if self.dss.is_npu:
            shell_export_path = cwd.joinpath(f"npu_auto").joinpath(base_name)
            cmd += default_config.default_NPU[STR_PRETRAIN_PY] + " \\\n"
            # cmd_outside_shell += f". $shell_pwd/../auto/env_check.sh 2>&1 | tee {self.recommand_log_path.joinpath('env.log')} \n"

        elif self.dss.is_gpu:
            shell_export_path = cwd.joinpath(f"gpu_auto").joinpath(base_name)
            cmd += default_config.default_GPU[STR_PRETRAIN_PY] + " \\\n"

        cmd_dist = ""
        cmd_dist += "DISTRIBUTED_ARGS=(\n" + "\n".join([f"{k} {v}" for k, v in self.distribute_paramters.items()]) + "\n)\n"

        cmd += "\n".join(self.list_py_paramters)
        cmd += "\n${" + STR_BASH_PROFILING_ARGS + "[@]} \\"
        cmd += "\n2>&1 | tee $LOG_PATHNAME \n"

        cmd_logger_echo = r"""
echo -e "\033[33m日志保存在: $LOG_PATHNAME\033[0m"

if [[ x"$flag_profiling" == x1 ]];then
    echo -e "\033[33m采集保存在: $PROFILE_PATH\033[0m"
    du -sh $PROFILE_PATH/*
fi
"""

        shell_export_path.parent.mkdir(exist_ok=True)

        with open(shell_export_path, "w") as f:

            f.write("#!/bin/bash\n")
            f.write(self.cmd_env)

            f.write(self.cmd_export_env)

            f.write(cmd_outside_shell)

            f.write(self.cmd_profile)

            # * 生成手动单机执行情况下的环境变量判断命令，非手动则不生成该部分代码
            f.write(self.generate_multi_machine_cmd())

            f.write(cmd_dist)  # * 生成多进程参数
            f.write(
                self.dss.generate_smi_shell(
                    self.recommand_log_path,
                    nnodes=self.env_paramters["NNODES"],
                    nproc_per_node=self.env_paramters["NPROC_PER_NODE"],
                )
            )
            f.write(
                self.dss.generate_plog_shell(
                    self.recommand_log_path,
                )
            )

            f.write(cmd)  #! 执行运行脚本

            f.write("\n\n\n")
            f.write(cmd_logger_echo)
            f.write(self.dss.generate_last_cmd())

        os.system(f"chmod +x {shell_export_path}")
        print(f"\033[1;32m生成脚本成功: \033[0;33m{shell_export_path.absolute()}\033[0m")
        self.generated_shell_path = shell_export_path
        return self.generated_shell_path

    def print_boss_run_cmd(self):
        """
        BOSS 专有的一个多机启动命令
        """
        cmd_log = Path(self.env_paramters["LOG_PATHNAME"]).with_name(r"cmd_rank_\${RANK}.log")
        cmd_log.parent.mkdir(exist_ok=True)

        # if self.env_paramters["WORLD_SIZE"] != 1:
        run_cmd = f"""
task submit \
    --aflow_queue_name queue-lab-157 \
    --image_tag harbor.weizhipin.com/arsenal-ai/ascend:cann8.0.rc3-mindspeed-cls-pta6.0.rc3-1212 \
    --worker_num {int(self.env_paramters["NNODES"])-1} \
    --master_cmd "{self.generated_shell_path.absolute()} 2>&1 | tee {cmd_log}" \
    --description "BOSS YOCO pretrain, case name: [{self.name}]" \
    --master_memory 1600Gi
        """
        #    --toleration "weizhipin.com/failure" \

        print(f"\033[1;32m运行命令: \n\033[0;33m{run_cmd}\033[0m")


if __name__ == "__main__":
    path_test_excel = Path(f"table/{args.table}.xlsx")
    test_generate = Test_generate(path_test_excel)

    table = test_generate.table
    test_generate.resolve_paramters(args.name)
    generated_shell_path = test_generate.generate_shell()

    if args.calc_memory:
        from optimize_tools.utils.memory_predict import predict_memory_online

        path_memory_calc_shell = generated_shell_path.with_stem(f"memory_calc_{args.name}")

        predict_memory_online(
            {**test_generate.dict_export_env_paramters, **test_generate.env_paramters},
            test_generate.list_py_paramters,
            args.dev,
        )

    test_generate.print_boss_run_cmd()
    pass
