import json
import re

import onnx
from pathlib import Path
import numpy as np
import onnxruntime
import onnxdumper
import subprocess
import os

from config import logger

class MindAccModel:
    _instance = None

    def __new__(cls, *args, **kwargs):
        if not cls._instance:
            cls._instance = super(MindAccModel, cls).__new__(cls, *args, **kwargs)
        return cls._instance

    def __init__(self):
        self.path: Path = None
        self.name: str = None
        self.onnx_session: onnxdumper.InferenceSession = None
        self.input_nodes: list[onnxruntime.NodeArg] = None
        self.output_nodes: list[onnxruntime.NodeArg] = None
        self.precision: np.dtype = None
        self.onnx_input: dict = None
        self.onnx_output_path: Path = "onnx_dumpinrun.npz"
        self.mslite_input: Path = None
        self.ms_output_path = None
        self.ms_model_path: str = None

    def load(self, model_file: str):
        self.path = Path(model_file)
        self.name = self.path.stem
        self.onnx_session = onnxdumper.InferenceSession(self.path)
        self.input_nodes = self.onnx_session.get_inputs()
        self.output_nodes = self.onnx_session.get_outputs()
        # 获取输入精度
        self.precision = onnx.load(self.path).graph.input[0].type.tensor_type
        self.precision = onnx.helper.tensor_dtype_to_np_dtype(self.precision.elem_type)

    def input_generate(self, seed=0) -> dict:
        np.random.seed(seed)
        random_input = {}
        input_node = self.input_nodes[0]
        shape = []
        for each in input_node.shape:
            if isinstance(each , str):
                shape.append(1)
            else:
                shape.append(each)
        input_data = np.random.random(shape).astype(self.precision)

        # 四维输入时 nchw to nhwc
        if len(input_data.shape) == 4:
            ms_input_data = np.transpose(input_data, (0, 2, 3, 1))
        else:
            ms_input_data = input_data
        ms_input_data.tofile("input/mslite_input.bin")
        self.mslite_input = Path("input/mslite_input.bin")
        random_input[input_node.name] = input_data
        self.onnx_input = random_input
        return random_input

    def convert_model(self, os_platform: str, package_root_path: Path, model_path: str):
        if os_platform == "linux":
            converter = package_root_path / "tools/converter/converter/converter_lite"
            os.environ["LD_LIBRARY_PATH"] = (str(package_root_path / "runtime/lib") 
            + ":" 
            + str(package_root_path / "tools/converter/lib"))
            cmd = f"{converter} --fmk=ONNX --modelFile={model_path} --outputFile={model_path}"
            logger.info("Run convert cmd:{}".format(cmd))
            os.system(cmd)
            logger.info("Convert done, output model:{}".format(self.path))
        else:
            converterRuntimeLib = f'set PATH={package_root_path}\\tools\\converter\\lib;%PATH%'
            benchmarkRuntimeLib = f"set PATH={package_root_path}\\runtime\\lib;%PATH%"
            converter = package_root_path / "tools/converter/converter/converter_lite.exe"
            cmd = f"call {converter} --fmk=ONNX --modelFile={model_path} --outputFile={model_path}"
            with open('convert.bat', 'w', encoding='utf-8') as fw:
                fw.write(converterRuntimeLib + "\n" + benchmarkRuntimeLib + "\n" + cmd)
            subprocess.run("convert.bat")
        return model_path + ".ms"

    def input_load(self, input_data: str) -> None:
        ms_input_data = np.fromfile(input_data, dtype=self.precision)

    def run_onnx_dump(self, dump_path="onnx_dumpinrun.npz") -> dict:
        self.onnx_output_path = dump_path
        onnx_outputs = self.onnx_session.run([x.name for x in self.output_nodes], self.onnx_input, dump_path=dump_path)
        return onnx_outputs

    def check_os_system(self):
        return "linux" if os.name == "posix" else "windows"

    def run_ms_converter(self, package_root_path: Path, optimize="") -> Path:
        self.ms_model_path = self.convert_model(os_platform=self.check_os_system(), package_root_path=package_root_path,
                                                model_path=str(self.path))
        return Path(self.ms_model_path)

    def run_ms_dump_in_local(self, enable_fp16, input_shape, os_platform, ms_model_path: str, msLitePath: Path) -> Path:
        global last_dump_dirs
        shapes = self.get_input_shape(input_shape)
        if os_platform == "linux":
            benchmark = msLitePath / "tools/benchmark/benchmark"
            os.environ["LD_LIBRARY_PATH"] = (str(msLitePath / "runtime/lib") + ":" + str(msLitePath / "tools/converter/lib"))
            mslite_benchmark_config = {
                "common_dump_settings": {
                "dump_mode": 0,
                "path": f"{os.getcwd()}/output",
                "net_name": self.name,
                "input_output": 2,
                "kernels": [],
                },
            }
            with open('dump_config.json', 'w') as f:
                json.dump(mslite_benchmark_config, f)
            os.environ["MINDSPORE_DUMP_CONFIG"] = f"{os.getcwd()}/dump_config.json"
            command = [benchmark.resolve(), f"--modelFile={str(self.path) + '.ms'}", f"--inputShapes={','.join(shapes)}", f"--enableFp16={str(enable_fp16).lower()}", "--inDataFile=input/mslite_input.bin"]
            cmd = f"{benchmark.resolve()} --modelFile={str(self.path)+ '.ms'} --inDataFile=input/mslite_input.bin"
            logger.info("Run benchmark cmd:{}".format(cmd))
            result = subprocess.run(command, text=True, capture_output=True)
            logger.info("Benchmark done")
            match = re.search("Dumped file is saved to : (.+)", result.stdout)
            if match:
                outputPath = match.group(1)
            else:
                exit()
        else:
            benchmarkPath = msLitePath / "tools/benchmark/benchmark.exe"
            mslite_benchmark_config = {
                "common_dump_settings": {
                    "dump_mode": 0,
                    "path": "output",
                    "net_name": self.name,
                    "input_output": 2,
                    "kernels": [],
                },
            }
            # 写入文件
            with open('dump_config.json', 'w') as fw:
                json.dump(mslite_benchmark_config, fw)
            dump_cmd = "set MINDSPORE_DUMP_CONFIG=dump_config.json"
            cmd = f"call {benchmarkPath.resolve()} --modelFile={str(self.path) + '.ms'} --inputShapes={','.join(shapes)} --enableFp16={str(enable_fp16).lower()} --inDataFile=input/mslite_input.bin"
            converterRuntimeLib = f'set PATH={msLitePath}\\tools\\converter\\lib;%PATH%'
            benchmarkRuntimeLib = f"set PATH={msLitePath}\\runtime\\lib;%PATH%"
            with open('benchmark.bat', 'w', encoding='utf-8') as fw:
                fw.write(dump_cmd + "\n" + converterRuntimeLib + "\n" + benchmarkRuntimeLib + "\n" + cmd)
            logger.info("Run benchmark cmd:{}".format(cmd))
            result = subprocess.run("benchmark.bat", text=True, capture_output=True)
            logger.info("Benchmark done")
            match = re.search("Dumped file is saved to : (.+)", result.stdout)
            if match:
                outputPath = match.group(1)
            else:
                exit()
        return Path(outputPath)

    def run_ms_dump_in_hdc_devices(self, enable_fp16, input_shape, ms_model_path: str, benchmark_bin: str):
        target_dir = f"/data/local/tmp/{self.name}"
        shapes = ','.join(self.get_input_shape(input_shape))
        result = subprocess.run(f'hdc shell "[ -d {target_dir} ] && echo "exists"', capture_output=True)
        if result.stdout.decode().strip() != 'exits':
            subprocess.run(f'hdc shell "mkdir {target_dir}"')
        subprocess.run(f'hdc shell "mkdir /data/local/tmp/{self.name}/output"')
        mslite_benchmark_config = {
            "common_dump_settings": {
                "dump_mode": 0,
                "path": f"/data/local/tmp/{self.name}/output",
                "net_name": self.name,
                "input_output": 2,
                "kernels": [],
            },
        }
        # 写入文件
        with open('dump_config.json', 'w') as fw:
            json.dump(mslite_benchmark_config, fw)

        for file in [ms_model_path, self.mslite_input, benchmark_bin, "dump_config.json"]:
            subprocess.run(f'hdc file send {file} {target_dir}')

        target_config_dir = target_dir + "/dump_config.json"
        target_benchmark_bin = target_dir + "/benchmark_bin"
        target_mslite_model = target_dir + "/" + self.path.name + ".ms"
        target_inDataFile = target_dir + "/" + self.mslite_input.name
        subprocess.run(f'hdc shell "chmod +x {target_benchmark_bin}"')
        cmd = f'hdc shell "export MINDSPORE_DUMP_CONFIG={target_config_dir} && {target_benchmark_bin} --modelFile={target_mslite_model} --inputShapes={shapes} --enableFp16={str(enable_fp16).lower()} --inDataFile={target_inDataFile}"'
        result = subprocess.run(cmd, capture_output=True, text=True)
        match = re.search("Dumped file is saved to : (.+)", result.stdout)
        if match:
            outputPath = match.group(1)
        else:
            exit()
        target_output_path = Path(*(Path(outputPath).parts[-3:]))

        subprocess.run(f"hdc file recv {outputPath} {target_output_path}")
        return target_output_path

    def get_input_shape(self, input_shape):
        shapes = []
        for each in eval(input_shape):
            if isinstance(each, str):
                shapes.append('1')
            else:
                shapes.append(str(each))
        return shapes
    
    def run_ms_dump(self, device: str, enable_fp16: bool, input_shape: str, ms_model_path: Path, msLitePath: Path):
        if device != "local":
            self.ms_output_path = self.run_ms_dump_in_hdc_devices(enable_fp16, input_shape, ms_model_path, Path('benchmark_bin'))
        else:
            os_platform = self.check_os_system()
            self.ms_output_path = self.run_ms_dump_in_local(enable_fp16, input_shape, os_platform, ms_model_path, msLitePath)
        return self.ms_output_path
