import warnings

# * 采用符号计算


warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=FutureWarning)

from .theoretical_memory_usage import report_theoretical_memory
import argparse
from pathlib import Path
import rich
import os
from itertools import chain

# import torch
# torch.set_warn_always(False)

try:
    from megatron.training import get_args, initialize_megatron
except ModuleNotFoundError:
    # * this can import megatron from any path
    from pathlib import Path
    import megatron.core
    import sys

    sys.path.insert(0, str(Path(megatron.core.__file__).parents[2]))
    del sys.modules["megatron"]
    from megatron.training import get_args, initialize_megatron
# import mindspeed.megatron_adaptor


def predict_memory_offline(
    path_script: Path,
    str_list_env,
    str_line_args,
):
    """
    这个写的不太好
    todo 写一个可以在线自动加载参数，自动执行显存计算的功能
    """
    # * 转换当前脚本输入的dict_args字典参数为 megatron 传参
    from . import theoretical_memory_usage

    # * 生成运行内存检查的命令
    str_env = str_list_env
    str_cmd = f"""
python {Path(theoretical_memory_usage.__file__).absolute()} \\
"""
    for str_line in str_line_args:
        str_cmd += f"{str_line}\n"
    pass
    with open(path_script, "w") as f:
        f.write(str_env)
        f.write(str_cmd)
    os.system(f"chmod +x {path_script}")
    rich.print(f"Memory prediction script saved to {path_script}")


def predict_memory_online(
    dict_env: dict[str, str],
    str_line_args: list[str],
    dev=False,
):
    # * 转换当前脚本输入的dict_args字典参数为 megatron 传参
    from megatron.training import arguments
    from . import theoretical_memory_usage

    parser = argparse.ArgumentParser()
    # Standard arguments.
    parser = arguments._add_network_size_args(parser)
    parser = arguments._add_regularization_args(parser)
    parser = arguments._add_training_args(parser)
    parser = arguments._add_initialization_args(parser)
    parser = arguments._add_learning_rate_args(parser)
    parser = arguments._add_checkpointing_args(parser)
    parser = arguments._add_mixed_precision_args(parser)
    parser = arguments._add_distributed_args(parser)
    parser = arguments._add_validation_args(parser)
    parser = arguments._add_data_args(parser)
    parser = arguments._add_autoresume_args(parser)
    parser = arguments._add_biencoder_args(parser)
    # parser = arguments._add_vision_args(parser)
    parser = arguments._add_moe_args(parser)
    # parser = arguments._add_logging_args(parser)
    parser = arguments._add_straggler_detector_args(parser)
    # parser = arguments._add_inference_args(parser)
    parser = arguments._add_transformer_engine_args(parser)
    parser = arguments._add_retro_args(parser)
    parser = arguments._add_experimental_args(parser)
    # parser = arguments._add_one_logger_args(parser)
    # parser = arguments._add_ft_package_args(parser)
    # parser = arguments._add_config_logger_args(parser)
    parser: argparse.ArgumentParser

    # * YOCO
    group = parser.add_argument_group(title="YOCO")
    group.add_argument("--num-self-attn-layers", type=int, default=None)
    group.add_argument("--selfatten-layer", dest="num_self_attn_layers", type=int, default=None)

    list_args = list(chain(*[i.replace("\\", "").strip().split(" ") for i in str_line_args]))
    # * 只需要保证 tp、ep、pp、cp 等环境变量被解析即可
    # todo 临时修改 --tokenizer-type
    from . import parser_edit

    # list_args = change_list_args(list_args, arg_name="--tokenizer-type", value="Llama2Tokenizer")

    args, unknown_args = parser.parse_known_args(list_args)

    if not hasattr(args, "world_size"):
        args.world_size = dict_env["WORLD_SIZE"]
    if not hasattr(args, "rank"):
        args.rank = 0
    args.data_path = None  # * 无关紧要的参数
    os.environ["CUDA_DEVICE_MAX_CONNECTIONS"] = "1"

    #! 需要计算 padded_vocab_size，有两个方法，首先是从 args 里面取，其次是从 tokenizer 里面取
    try_get_vocabsize(args, list_args)

    # * 生成运行内存检查的命令
    # args = arguments.validate_args(args)
    args = parser_edit.validate_args(args)

    # args.tensor_model_parallel_size = 4
    # args.pipeline_model_parallel_size = 8
    # args.expert_model_parallel_size = 4
    # args.hidden_size = 1024
    # args.ffn_hidden_size = args.hidden_size
    # * Method 1
    report_theoretical_memory(args, verbose=True)
    # * Method 2
    # # todo 暂时先写死
    if dev:
        from optimize_tools.memory import YOCO_Calculator

        print("=" * 50 + "\n\n\n")
        print("实验性功能——开发中")
        n_self = args.num_self_attn_layers
        n_cross = args.num_layers - args.num_self_attn_layers
        YOCO_Calculator(args, num_layer_self=n_self, num_layer_cross=n_cross).report_theoretical_memory(verbose=True)


def try_get_vocabsize(args, list_args):
    if hasattr(args, "padded_vocab_size") and args.padded_vocab_size is not None:
        pass
    else:
        if hasattr(args, "vocab_size") and args.vocab_size is not None:
            args.padded_vocab_size = args.vocab_size
        else:
            # * 从 tokenizer 里获取
            print("由于不存在 args.padded_vocab_size, 需要调用 megatron.training.tokenizer.tokenizer\n并通过构建 tokenizer 获取对应的数据")
            from megatron.training.tokenizer.tokenizer import build_tokenizer

            if not hasattr(args, "tokenizer_type"):  #! 新版本的 megatron 不再解析 --tokenizer-type 这个参数
                args.tokenizer_type = list_args[list_args.index("--tokenizer-type") + 1]
                args.tokenizer_model = list_args[list_args.index("--tokenizer-model") + 1]
            build_tokenizer(args)
