# from llama_megatronDp import pretrain
# from llama_new import pretrain
from llama import pretrain
from geesibling.adapters.pytorch.pipeline.pipeline.set_args import get_args,set_args
from geesibling.adapters.pytorch.pipeline.pipeline.set_args import get_args
from geesibling.adapters.pytorch.auto_parallel.auto_parallel_apply import search_optimal_configuration


if __name__ == "__main__":


    # TODO编写两个脚本，一种是torch run,一种是python
    # # torch run 负责原始脚本以及auto parallel base Launch中的脚本
    # 解决方案:在BaseLauch脚本中添加额外的--In analyseAutoParallel
    set_args()
    args = get_args()
    # gees-AutoParallel
    #  在pretrain里面加？还是在在这里加？
    if args.auto_parallel:          # python3
        # set_args(argument)
        args.analyseAutoParallel = True
        optimial_config = search_optimal_configuration(args)        # 如果开启了autoparallel，返回一个config
        # TODO 根据config跟新args里的参数
        # BaseLauch跑？ ---> TODO 一个BaseLauch用于完整的
    else: # 如果未开启autoparallel           # torchrun
        # args.analyseAutoParallel = False
        pretrain()
