import argparse
import os
import yaml
from rknn.api import RKNN


def parse_model_config(config_path):
    with open(config_path, "r", encoding="utf-8") as f:
        yaml_config = f.read()
        print(yaml_config)
    model_configs = yaml.load(yaml_config, Loader=yaml.FullLoader)
    return model_configs


def str2bool(v):
    if isinstance(v, bool):
        return v
    if v.lower() in ("yes", "true", "t", "y", "1", "True"):
        return True
    elif v.lower() in ("no", "false", "f", "n", "0", "False"):
        return False
    elif v.lower() in ("debug"):
        return "debug"
    else:
        raise argparse.ArgumentTypeError("Boolean value expected.")


def convert_model(
    Config_path,
    Target_platform,
    Output_dir,
    Eval_perf_memory=False,
    Accuracy_analysis=False,
    Verbose=False,
    Device_id=None,
):

    config_path = Config_path
    target_platform = Target_platform
    output_dir = Output_dir
    eval_perf_memory = Eval_perf_memory
    accuracy_analysis = Accuracy_analysis
    verbose = Verbose
    device_id = Device_id

    # 检查配置文件是否存在
    if not (os.path.exists(config_path)):
        print("model config %s not exists!" % config_path)
        return -1

    config_dir = os.path.abspath(os.path.dirname(config_path))

    # 调用配置加载函数加载模型转换的详细配置
    print("=============模型转换配置================")
    model_configs = parse_model_config(config_path)

    model = model_configs["models"]

    # 判断模型转换配置是否存在
    if model is None:
        print("Error: No valid model config in %s !\n" % config_path)
        return -1

    # 配置完整性校验,配置优先级:命令行输入>文件配置
    if target_platform is None:
        target_platform = model.get("target_platform", None)
        if target_platform is None:
            print("Error: No specified target platform in %s !\n" % config_path)
            return -1

    if output_dir is None:
        output_dir = model.get("output_dir", None)
        if output_dir is None:
            print("Error: No specified output dir in %s !\n" % config_path)
            return -1
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    model_file_path = model.get("model_file_path", None)
    if model_file_path is None:
        print("Error: No specified origin model file path  in %s !\n" % config_path)
        return -1

    platform = model.get("platform", None)
    if platform is None:
        print("Error: No specified origin platform in %s !\n" % config_path)
        return -1
    elif not platform == "onnx":
        print("Error: Only onnx to rknn supported !")
        return -1

    # 判断模型输出名是否存在, 主要供输出文件使用
    model_name = model.get("model_name", None)
    if model_name is None:
        print("Error: No specified model name in %s !\n" % config_path)
        return -1

    # 打印日志信息
    print("=========================================")
    print("convert_model:")
    print("  config_path=%s" % config_path)
    print("  config_dir=%s" % str(config_dir))
    print("  out_dir=%s" % output_dir)
    print("  target_platform=%s" % str(target_platform))
    print("=========================================")

    # 构造RKNN实例
    rknn = RKNN(verbose=verbose)

    # 配置RKNN实例
    # 初始化一个字典用于存储非默认参数
    rknn_config_params = {}

    # 检查每个参数是否需要传递
    mean_values = model["configs"].get("mean_values", None)
    if mean_values is not None:
        rknn_config_params["mean_values"] = mean_values

    std_values = model["configs"].get("std_values", None)
    if std_values is not None:
        rknn_config_params["std_values"] = std_values

    quantized_dtype = model["configs"].get("quantized_dtype", "w8a8")
    if quantized_dtype != "w8a8":
        rknn_config_params["quantized_dtype"] = quantized_dtype

    quantized_algorithm = model["configs"].get("quantized_algorithm", "normal")
    if quantized_algorithm != "normal":
        rknn_config_params["quantized_algorithm"] = quantized_algorithm

    quantized_method = model["configs"].get("quantized_method", "channel")
    if quantized_method != "channel":
        rknn_config_params["quantized_method"] = quantized_method

    rgb2bgr = str2bool(model["configs"].get("quant_img_RGB2BGR", False))
    if rgb2bgr:
        rknn_config_params["quant_img_RGB2BGR"] = rgb2bgr

    float_dtype = model["configs"].get("float_dtype", "float16")
    if float_dtype != "float16":
        rknn_config_params["float_dtype"] = float_dtype

    optimization_level = model["configs"].get("optimization_level", 3)
    if optimization_level != 3:
        rknn_config_params["optimization_level"] = optimization_level

    custom_string = model["configs"].get("custom_string", None)
    if custom_string is not None:
        rknn_config_params["custom_string"] = custom_string

    remove_weight = str2bool(model["configs"].get("remove_weight", False))
    if remove_weight:
        rknn_config_params["remove_weight"] = remove_weight

    compress_weight = str2bool(model["configs"].get("compress_weight", False))
    if compress_weight:
        rknn_config_params["compress_weight"] = compress_weight

    single_core_mode = str2bool(model["configs"].get("single_core_mode", False))
    if single_core_mode:
        rknn_config_params["single_core_mode"] = single_core_mode

    dynamic_input = model["configs"].get("dynamic_input", None)
    if dynamic_input is not None:
        rknn_config_params["dynamic_input"] = dynamic_input

    model_pruning = str2bool(model["configs"].get("model_pruning", False))
    if model_pruning:
        rknn_config_params["model_pruning"] = model_pruning

    op_target = model["configs"].get("op_target", None)
    if op_target is not None:
        rknn_config_params["op_target"] = op_target

    quantize_weight = str2bool(model["configs"].get("quantize_weight", False))
    if quantize_weight:
        rknn_config_params["quantize_weight"] = quantize_weight

    remove_reshape = str2bool(model["configs"].get("remove_reshape", False))
    if remove_reshape:
        rknn_config_params["remove_reshape"] = remove_reshape

    sparse_infer = str2bool(model["configs"].get("sparse_infer", False))
    if sparse_infer:
        rknn_config_params["sparse_infer"] = sparse_infer

    enable_flash_attention = str2bool(
        model["configs"].get("enable_flash_attention", False)
    )
    if enable_flash_attention:
        rknn_config_params["enable_flash_attention"] = enable_flash_attention

    # 调用 rknn.config 时传入仅包含非默认值的字典
    rknn.config(
        target_platform=target_platform,
        **rknn_config_params  # 使用字典解包传递非默认参数
    )

    input_size_list = None
    if "subgraphs" in model:
        input_size_list_str = model["subgraphs"].get("input_size_list", None)
        if input_size_list_str:
            input_size_list = []
            for input_size_str in input_size_list_str:
                input_size = list(map(int, input_size_str.split(",")))
                if len(input_size) == 3:
                    input_size.insert(0, 1)
                input_size_list.append(input_size)

        inputs = model["subgraphs"].get("inputs", None)
        outputs = model["subgraphs"].get("outputs", None)

    # 加载原始onnx模型
    rknn.load_onnx(
        model=model_file_path,
        inputs=inputs,
        outputs=outputs,
        input_size_list=input_size_list,
    )

    # 加载量化数据集的相关配置
    do_quantization = model.get("do_quantize", False)
    quantize_dataset_path = model.get("quantize_dataset", None)

    # 构建RKNN模型并执行量化
    ret = rknn.build(do_quantization=do_quantization, dataset=quantize_dataset_path)
    if ret != 0:
        print("Error: rknn build fail " + str(ret))
        return -1

    # 加载模型输出路径
    if model_name[-5:] == ".rknn":
        model_name = model_name[:-5]
    export_rknn_model_path = "%s.rknn" % (os.path.join(output_dir, model_name))

    # 导出RKNN模型
    ret = rknn.export_rknn(export_path=export_rknn_model_path)
    if ret != 0:
        print("Error: rknn build fail " + str(ret))
        return -1
    print("output rknn path: " + export_rknn_model_path)

    # 如果连上板子可以进行性能/内存测试
    if eval_perf_memory:
        if device_id == True:
            device_id = None
        ret = rknn.init_runtime(
            target_platform, perf_debug=False, eval_mem=False, device_id=device_id
        )
        if ret != 0:
            print("Init runtime failed.")
            exit(ret)
        rknn.eval_perf()
        ret = rknn.init_runtime(
            target_platform, perf_debug=False, eval_mem=True, device_id=device_id
        )
        if ret != 0:
            print("Init runtime failed.")
            exit(ret)
        rknn.eval_memory()

    # 精度测试
    if accuracy_analysis is None:
        do_accuracy_analysis = model.get("do_accuracy_analysis", False)
        accuracy_analysis_dataset_path = model.get("accuracy_analysis_dataset", None)
    else:
        do_accuracy_analysis = True
        accuracy_analysis_dataset_path = model.get("accuracy_analysis_dataset", None)
        if os.path.exists(accuracy_analysis):
            accuracy_analysis_dataset_path = accuracy_analysis

    if do_accuracy_analysis:
        accuracy_analysis_result_dir = os.path.join(
            output_dir, model_name + "_accuracy_analysis"
        )
        accuracy_analysis_dataset_dir = os.path.dirname(accuracy_analysis_dataset_path)

        with open(accuracy_analysis_dataset_path, "r", encoding="utf-8") as f:
            accuracy_analysis_dataset = [
                os.path.join(accuracy_analysis_dataset_dir, y.strip())
                for x in f.readlines()
                for y in x.split()
            ]

        if device_id != None:
            if device_id == True:
                device_id = None
            ret = rknn.accuracy_analysis(
                inputs=accuracy_analysis_dataset,
                target=target_platform,
                device_id=device_id,
                output_dir=accuracy_analysis_result_dir,
            )
        else:
            ret = rknn.accuracy_analysis(
                inputs=accuracy_analysis_dataset,
                output_dir=accuracy_analysis_result_dir,
            )
        if ret != 0:
            print("accuracy_analysis failed.")
            exit(ret)
        cp_command = "cp {}/error_analysis.txt {}_error_analysis.txt".format(
            accuracy_analysis_result_dir,
            os.path.join(output_dir, model_name),
        )
        os.system(cp_command)

    results = None

    return results


if __name__ == "__main__":

    # print(
    #        """

    # ███████╗███████╗██╗   ██╗    ███████╗██████╗  █████╗ ██████╗ ██╗  ██╗██╗      █████╗ ██████╗
    # ██╔════╝██╔════╝██║   ██║    ██╔════╝██╔══██╗██╔══██╗██╔══██╗██║ ██╔╝██║     ██╔══██╗██╔══██╗
    # ███████╗█████╗  ██║   ██║    ███████╗██████╔╝███████║██████╔╝█████╔╝ ██║     ███████║██████╔╝
    # ╚════██║██╔══╝  ██║   ██║    ╚════██║██╔═══╝ ██╔══██║██╔══██╗██╔═██╗ ██║     ██╔══██║██╔══██╗
    # ███████║███████╗╚██████╔╝    ███████║██║     ██║  ██║██║  ██║██║  ██╗███████╗██║  ██║██████╔╝
    # ╚══════╝╚══════╝ ╚═════╝     ╚══════╝╚═╝     ╚═╝  ╚═╝╚═╝  ╚═╝╚═╝  ╚═╝╚══════╝╚═╝  ╚═╝╚═════╝

    #          """
    #    )
    # print("=================================================")
    # print("onnx2rknn 基于rknn_toolkit2的模型自动转换工具")
    # print("微信公众号: Spark Lab 科研进展")
    # print("交流&合作: sparklab@qq.com")
    # print("=================================================")
    # input("Press any key to continue...")

    parser = argparse.ArgumentParser(description="ONNX2RKNN tool")

    parser.add_argument(
        "-i",
        "--input_yml",
        help="yml config file path",
        required=True,
    )

    parser.add_argument("-o", "--output_dir", help="output dir", default=None, type=str)

    parser.add_argument(
        "-t",
        "--target_platform",
        help="target_platform, support rv1103/rv1103b/rv1106/rv1106b/rk2118/rk3562/rk3566/rk3568/rk3576/rk3588",
        choices=[
            "rv1103",
            "rv1103b",
            "rv1106",
            "rv1106b",
            "rk2118",
            "rk3562",
            "rk3566",
            "rk3568",
            "rk3576",
            "rk3588",
        ],
        type=str,
    )

    parser.add_argument(
        "-a",
        "--accuracy_analysis",
        help='Usage: -a "dataset.txt" Simulator accuracy_analysis, if want to turn on board accuracy_analysis, please use -d',
        type=str,
        default=None,
    )

    parser.add_argument(
        "-e",
        "--eval_perf_memory",
        help="eval model perf and memory, board debugging is required, multi adb device use -d, default=false",
        action="store_true",
    )

    parser.add_argument(
        "-v",
        "--verbose",
        help="whether to print detailed log information on the screen",
        action="store_true",
    )

    parser.add_argument(
        "-d",
        "--device_id",
        help="Single adb device usage: -d. Multi adb device usage: -d device_id",
        type=str,
        default=None,
    )

    args = parser.parse_args()

    input_path = args.input_yml
    output_dir = args.output_dir
    target_platform = args.target_platform
    accuracy_analysis = args.accuracy_analysis
    eval_perf_memory = args.eval_perf_memory
    verbose = args.verbose
    device_id = args.device_id

    convert_model(
        Config_path=input_path,
        Output_dir=output_dir,
        Target_platform=target_platform,
        Accuracy_analysis=accuracy_analysis,
        Eval_perf_memory=eval_perf_memory,
        Verbose=verbose,
        Device_id=device_id,
    )
