# -*- coding:utf-8 -*-
import functools
import numpy as np
import os
import sys
import json
import acl
import argparse
from aclError import acl_error
from dataloader import save_data, load_data
from constant import ACL_MEMCPY_HOST_TO_DEVICE, \
    ACL_MEM_MALLOC_HUGE_ONLY, ACL_FORMAT_ND, ACL_FORMAT_NCHW, ACL_FORMAT_NHWC, ACL_FORMAT_NC1HWC0,\
    ACL_FORMAT_FRACTAL_Z, ACL_FORMAT_FRACTAL_NZ, acl_dtype, ACL_ERROR_CODE, ACL_MEMCPY_DEVICE_TO_HOST, \
    ACL_FORMAT_HWCN, ACL_FORMAT_NCDHW, ACL_FORMAT_NDC1HWC0, ACL_FORMAT_NDHWC

ACL_ENGINE = 1

attr_funcs = {"bool": acl.op.set_attr_bool, "int": acl.op.set_attr_int, "float": acl.op.set_attr_float,
              "string": acl.op.set_attr_string, "list_bool": acl.op.set_attr_list_bool,
              "list_int": acl.op.set_attr_list_int, "list_float": acl.op.set_attr_list_float,
              "list_string": acl.op.set_attr_list_string, "list_list_int": acl.op.set_attr_list_list_int}

format_dict = {"ND": ACL_FORMAT_ND, "NCHW": ACL_FORMAT_NCHW, "NHWC": ACL_FORMAT_NHWC, "NC1HWC0": ACL_FORMAT_NC1HWC0,
               "FRACTAL_Z": ACL_FORMAT_FRACTAL_Z, "FRACTAL_NZ": ACL_FORMAT_FRACTAL_NZ, "HWCN": ACL_FORMAT_HWCN,
               "NCDHW": ACL_FORMAT_NCDHW, "NDC1HWC0": ACL_FORMAT_NDC1HWC0, "NDHWC": ACL_FORMAT_NDHWC}

ACL_PROF_ACL_API = 0x0001
ACL_PROF_TASK_TIME = 0x0002
ACL_PROF_AICORE_METRICS = 0x0004
ACL_PROF_AICPU_TRACE = 0x0008

def check_ret(message, ret):
    if ret != ACL_ERROR_CODE:
        if ret in acl_error.keys():
            print("\033[31m[> 错误 <]\033[0m" + acl_error[ret])
        print("\033[31m[> 错误 <]\033[0m" + "message:", message)
        print("\033[31m[> 错误 <]\033[0m" + "1.自检脚本是否写错:")
        print("\033[31m[> 错误 <]\033[0m" + " > 输入数据与构造的dict的shape、dtype等是否一致?")
        print("\033[31m[> 错误 <]\033[0m" + " > 输出的shape是否是真实输出的shape?")
        print("\033[31m[> 错误 <]\033[0m" + "2.日志排查:source set_log.sh")
        print("\033[31m[> 错误 <]\033[0m" + "3.在当前路径下,编译生成以算子IR命名的文件夹,有om图,尝试打开看下算子的输入输出参数")
        print("\033[31m[> 错误 <]\033[0m" + " > 在线打开om图:http://hi-lake.rnd.huawei.com/#/netron")
        print("\033[31m[> 错误 <]\033[0m" + "4.你上一步修改的是否合理？尝试回退一下？")
        sys.exit(1)


class AclOp():
    def __init__(self):
        self.context = None
        self.stream = None
        self.op_attr = None

        self.inputs_desc = []
        self.inputs_device = []
        self.inputs_device_buffer = []
        self.inputs_host_buffer = []

        self.outputs_desc = []
        self.outputs_device = []
        self.outputs_device_buffer = []
        self.outputs_host = []
        self.block_dim = None
        self.workspace_sizes = None
        self.workspace_num = 0
        self.kernel_name = None
        self.run_info = None
        self.is_dynamic = False
        self.inplace_input_index = {}

    def _check_data_info(self, inputs_data, inputs_info, is_dynamic=False):
        len_data = len(inputs_data)
        len_info = len(inputs_info)
        if len_data != len_info:
            print("\033[31m[> 错误 <]\033[0m" + "输入numpy数据的个数与inputs_info构造的个数不一致")
            print("\033[31m[> 错误 <]\033[0m" + "输入numpy数据的个数 = %d" % int(len_data))
            print("\033[31m[> 错误 <]\033[0m" + "inputs_info构造的个数 = %d" % int(len_info))
            sys.exit(1)
        for i in range(len_data):
            data = inputs_data[i]
            info = inputs_info[i]

            dtype = str(data.dtype)
            if dtype == '|V2':
                import bfloat16ext
                dtype = 'bfloat16'

            if dtype != info["type"]:
                if info["type"] == "double":
                    continue
                print("\033[31m[> 错误 <]\033[0m" + "第%s个输入dict中的类型是%s, 而真实传入的是%s" % (i, info["type"], dtype))
                sys.exit(1)

            if is_dynamic:
                if len(data.shape) != len(info["shape"]) and -2 not in info["shape"]:
                    print("\033[31m[> error <]\033[0m" + 
                          "第%s个输入dict中的shape长度是%s, 而真实传入的是%s" % (i, len(info["shape"]), len(data.shape)))
                    sys.exit(1)
            else:
                if list(data.shape) != list(info["shape"]):
                    print("\033[31m[> error <]\033[0m" + 
                          "第%s个输入dict中的shape是%s, 而真实传入的是%s" % (i, list(info["shape"]), list(data.shape)))
                    sys.exit(1)

    def get_block_dim(self, kernel_name):
        jsonfile = "./kernel_meta/" + kernel_name + ".json"
        with open(jsonfile, "r") as json_file:
            params = json.load(json_file)
            if not self.run_info:
                self.block_dim = params.get("blockDim", 1)
            else:
                self.block_dim = self.run_info.get("block_dim", 1)
            self.workspace_sizes = params.get("workspace", {}).get('size', [])
            self.workspace_num = params.get("workspace", {}).get('num', 0)
            self.kernel_name = kernel_name

    def release(self, device_id):
        while self.inputs_desc:
            ret = acl.destroy_data_buffer(self.inputs_device_buffer.pop())
            check_ret("acl.destroy_data_buffer_device", ret)
            ret = acl.destroy_data_buffer(self.inputs_host_buffer.pop())
            check_ret("acl.destroy_data_buffer_host", ret)
            ret = acl.rt.free(self.inputs_device.pop())
            check_ret("acl.rt.free", ret)
            acl.destroy_tensor_desc(self.inputs_desc.pop())

        if self.inplace_input_index:
            pop_idx = len(self.outputs_desc) - 1
            while self.outputs_desc:
                if pop_idx not in self.inplace_input_index.keys():
                    ret = acl.destroy_data_buffer(self.outputs_device_buffer.pop())
                    check_ret("acl.destroy_data_buffer", ret)
                    ret = acl.rt.free(self.outputs_device.pop())
                    check_ret("acl.rt.free", ret)
                else:
                    self.outputs_device_buffer.pop()
                    self.outputs_device.pop()
                ret = acl.rt.free_host(self.outputs_host.pop())
                check_ret("acl.rt.free_host", ret)
                acl.destroy_tensor_desc(self.outputs_desc.pop())
                pop_idx -= 1
        else:
            while self.outputs_desc:
                ret = acl.destroy_data_buffer(self.outputs_device_buffer.pop())
                check_ret("acl.destroy_data_buffer", ret)
                ret = acl.rt.free(self.outputs_device.pop())
                check_ret("acl.rt.free", ret)
                ret = acl.rt.free_host(self.outputs_host.pop())
                check_ret("acl.rt.free_host", ret)
                acl.destroy_tensor_desc(self.outputs_desc.pop())

        if self.op_attr:
            acl.op.destroy_attr(self.op_attr)
            self.op_attr = None

        if self.stream:
            ret = acl.rt.destroy_stream(self.stream)
            check_ret("acl.rt.destroy_stream", ret)
            self.stream = None

        if self.context:
            ret = acl.rt.destroy_context(self.context)
            check_ret("acl.rt.destroy_context", ret)
            self.context = None

        ret = acl.rt.reset_device(device_id)
        check_ret("acl.rt.reset_device", ret)
        self.tiling_args = []
        ret = acl.finalize()
        check_ret("acl.finalize", ret)

    def init_resource(self, device_id):
        ret = acl.init()
        check_ret("acl.init", ret)

        ret = acl.rt.set_device(device_id)
        check_ret("acl.rt.set_device", ret)

        self.context, ret = acl.rt.create_context(device_id)
        check_ret("acl.rt.create_context", ret)

        self.stream, ret = acl.rt.create_stream()
        check_ret("acl.rt.create_stream", ret)

        self.op_attr = acl.op.create_attr()

    def _gen_input_tensor(self, inputs_data, inputs_info):
        bytes_list = []

        self._check_data_info(inputs_data, inputs_info, self.is_dynamic)
        length = len(inputs_info)
        for i in range(length):
            data_info = inputs_info[i]
            is_const = data_info.get("is_const", False)
            data = inputs_data[i]

            dtype = str(data.dtype)
            if dtype == "float32":
                dtype = "float"
            elif dtype == '|V2':
                import bfloat16ext
                dtype = 'bfloat16'
            shape = list(data.shape)
            format = data_info["format"]
            format_type = format_dict[format]

            input_desc = acl.create_tensor_desc(acl_dtype[dtype], shape, format_type)
            input_size = acl.get_tensor_desc_size(input_desc)
            input_device, ret = acl.rt.malloc(input_size, ACL_MEM_MALLOC_HUGE_ONLY)
            check_ret("acl.rt.malloc", ret)
            bytes_data = data.tobytes()
            bytes_list.append(bytes_data)
            input_ptr = acl.util.bytes_to_ptr(bytes_data)
            ret = acl.rt.memcpy(input_device, input_size, input_ptr, input_size, ACL_MEMCPY_HOST_TO_DEVICE)

            check_ret("acl.rt.memcpy", ret)
            input_buffer = acl.create_data_buffer(input_device, input_size)
            host_data_buf = acl.create_data_buffer(input_ptr, input_size)

            if is_const:
                ret = acl.set_tensor_const(input_desc, input_ptr, input_size)
                check_ret("acl.set_tensor_const", ret)

            self.inputs_device.append(input_device)
            self.inputs_device_buffer.append(input_buffer)
            self.inputs_host_buffer.append(host_data_buf)
            self.inputs_desc.append(input_desc)
        return bytes_list

    def _gen_output_tensor(self, outputs_info):
        operator_output = []
        for info in outputs_info:
            dtype = info["type"]
            if dtype == "float32":
                dtype = "float"

            shape = list(info["shape"])
            if -1 in shape:
                print("\033[31m[> error <]\033[0m" + "工具的输出dict要给真实的输出shape!")
                sys.exit(1)
            format = info["format"]
            format_type = format_dict[format]
            output_desc = acl.create_tensor_desc(acl_dtype[dtype], shape, format_type)
            operator_output.append(output_desc)

        for idx, desc in enumerate(operator_output):
            output_size = acl.get_tensor_desc_size(desc)
            if idx in self.inplace_input_index.keys():
                self.outputs_device.append(self.inputs_device[self.inplace_input_index[idx]])
                self.outputs_device_buffer.append(self.inputs_device_buffer[self.inplace_input_index[idx]])
            else:
                output_device, ret = acl.rt.malloc(output_size, ACL_MEM_MALLOC_HUGE_ONLY)
                check_ret("acl.rt.malloc", ret)
                self.outputs_device.append(output_device)
                self.outputs_device_buffer.append(acl.create_data_buffer(output_device, output_size))
            self.outputs_host.append(acl.rt.malloc_host(output_size)[0])
            self.outputs_desc.append(desc)

    def _process_attr(self, dtype, value):
        result = value
        if dtype == "bool":
            if value:
                result = 1
            else:
                result = 0
        if dtype == "list_bool":
            result = []
            for v in value:
                if v:
                    result.append(1)
                else:
                    result.append(0)

        def int_list(inner_v):
            return [int(j) for j in inner_v]

        if dtype == "list_int":
            result = [int(i) for i in value]
        if dtype == "list_float":
            result = [float(i) for i in value]
        if dtype == "list_list_int":
            result = [int_list(i) for i in value]

        return result

    def _gen_attr(self, attrs):
        for attr in attrs:
            name = attr["name"]
            dtype = attr["type"]
            value = self._process_attr(dtype, attr["value"])
            attr_func = attr_funcs[dtype]
            ret = attr_func(self.op_attr, name, value)
            check_ret("acl.set_attr_%s" % dtype, ret)

    def load_op(self, kernel_name, op_model_path=None):
        if op_model_path:
            ret = acl.op.set_model_dir(op_model_path)
            check_ret("acl.op.set_model_dir", ret)
        else:
            self.load_kernel(kernel_name)

    def load_kernel(self, kernel_name):
        kernel_id = kernel_name + "__kernel0"
        kernel_path = "./kernel_meta/" + kernel_name + ".o"
        np_kernel = np.fromfile(kernel_path, dtype=np.byte)
        kernel_size = np_kernel.itemsize * np_kernel.size
        self.get_block_dim(kernel_name)

        ret = acl.op.register_compile_func(kernel_name, self.select_kernel)
        check_ret("acl.op.register_compile_func", ret)
        bytes_kernel = np_kernel.tobytes()
        ptr_kernel = acl.util.bytes_to_ptr(bytes_kernel)
        ret = acl.op.create_kernel(kernel_name, kernel_id, kernel_id, ptr_kernel, kernel_size, ACL_ENGINE, 0)
        check_ret("acl.op.create_kernel", ret)

    def select_kernel(self, in_num, in_desc, out_num, out_desc, op_attr, op_kernel_desc):
        tiling_data = []
        if self.run_info:
            tiling_data = self.run_info.get("tiling_data", [])

        args = np.array(tiling_data, dtype=np.int32)
        bytes_args = args.tobytes()
        args_ptr = acl.util.bytes_to_ptr(bytes_args)
        size = args.itemsize * args.size
        kernel_id = self.kernel_name + "__kernel0"
        ret = acl.op.set_kernel_args(op_kernel_desc, kernel_id, self.block_dim, args_ptr, size)
        check_ret("acl.op.set_kernel_args", ret)

        workspace_sizes = np.array(self.workspace_sizes, dtype=np.uint32)
        bytes_workspace = workspace_sizes.tobytes()
        workspace_sizes_ptr = acl.util.bytes_to_ptr(bytes_workspace)
        ret = acl.op.set_kernel_workspace_sizes(op_kernel_desc, self.workspace_num, workspace_sizes_ptr)
        check_ret("acl.op.set_kernel_workspace_sizes", ret)

    def run(self, op_type, inputs_data, inputs_info, outputs_info, attrs=None, device_id=0, op_model_path=None, inplace_input_index={}, run_profiling_mode="acl"):
        self.inplace_input_index = inplace_input_index
        self.init_resource(device_id)
        if inputs_data:
            inputs_data = load_data(inputs_data)
            self._gen_input_tensor(inputs_data, inputs_info)
        self.load_op(op_type, op_model_path)
        self.run_info = None
        self._gen_output_tensor(outputs_info)
        if op_model_path and run_profiling_mode == "acl":
            self._gen_attr(attrs)
            ret = acl.prof.init(op_model_path)
            check_ret("acl.prof.init", ret)
            config = acl.prof.create_config([device_id], 1, 0, ACL_PROF_ACL_API | ACL_PROF_TASK_TIME |
                                            ACL_PROF_AICPU_TRACE | ACL_PROF_AICORE_METRICS)
            if config == 0:
                print("\033[31m[> 错误 <]\033[0m" + "失败的配置采集算子执行耗时 接口: acl.prof.create_config")
            acl.prof.start(config)
            self._forward(op_type, op_model_path)
            acl.prof.stop(config)
            ret = acl.prof.finalize()
            check_ret("acl.prof.finalize", ret)
        else:
            self._gen_attr(attrs)
            self._forward(op_type, op_model_path)
            
        result = self._get_operator_result(outputs_info)
        self.release(device_id)
        if op_model_path:
            result = save_data(op_model_path, result, start_name="output")
        return result

    def _forward(self, op_type, op_model_path):
        if op_model_path:
            ret = acl.op.execute_v2(op_type, self.inputs_desc, self.inputs_device_buffer, self.outputs_desc,
                                    self.outputs_device_buffer, self.op_attr, self.stream)
            if ret != ACL_ERROR_CODE:
                print("执行算子第一次失败，尝试执行第二次中..............")
                ret = acl.op.execute_v2(op_type, self.inputs_desc, self.inputs_device_buffer, self.outputs_desc,
                                        self.outputs_device_buffer, self.op_attr, self.stream)
        else:
            ret = acl.op.update_params(op_type, self.inputs_desc, self.outputs_desc, self.op_attr)
            check_ret("acl.op.update_params", ret)
            ret = acl.op.execute_v2(op_type, self.inputs_desc, self.inputs_device_buffer, self.outputs_desc,
                                    self.outputs_device_buffer, self.op_attr, self.stream)

        check_ret("acl.op.execute_v2", ret)
        ret = acl.rt.synchronize_stream(self.stream)
        check_ret("acl.rt.synchronize_stream", ret)

    def _get_operator_result(self, outputs_info):
        result = []
        for index in range(len(self.outputs_desc)):
            factor = self.outputs_desc[index]
            info = outputs_info[index]
            factor_size = acl.get_tensor_desc_size(factor)
            ret = acl.rt.memcpy(self.outputs_host[index], factor_size, self.outputs_device[index], factor_size,
                                ACL_MEMCPY_DEVICE_TO_HOST)
            check_ret("acl.rt.memcpy", ret)

            data_shape = info["shape"]
            data_type = info["type"]
            np_dtype = np.dtype(data_type)
            data_len = functools.reduce(lambda x, y: x * y, data_shape)
            size = data_len * np_dtype.itemsize
            byte_data = acl.util.ptr_to_bytes(self.outputs_host[index], size)

            np_arr = np.frombuffer(bytearray(byte_data[:data_len * np_dtype.itemsize]), dtype=np_dtype, count=data_len)
            np_arr = np_arr.reshape(data_shape)
            result.append(np_arr)
        return result

acl_op = AclOp()

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="tik tools")
    parser.add_argument("--op_type", type=str)
    parser.add_argument("--inputs_path", type=str)
    parser.add_argument("--inputs_info", type=str)
    parser.add_argument("--outputs_info", type=str)
    parser.add_argument("--attrs", type=str)
    parser.add_argument("--device_id", type=str)
    parser.add_argument("--op_model_path", type=str)
    parser.add_argument("--is_dynamic", type=str)
    parser.add_argument("--inplace_input_index", type=str)
    parser.add_argument("--run_profiling_mode", type=str)

    args = parser.parse_args()

    op_type = args.op_type
    inputs_path = eval(args.inputs_path)
    inputs_info = eval(args.inputs_info)
    outputs_info = eval(args.outputs_info)
    attrs = eval(args.attrs)
    device_id = int(args.device_id)
    op_model_path = args.op_model_path
    is_dynamic = int(args.is_dynamic)
    inplace_input_index = eval(args.inplace_input_index)
    run_profiling_mode = args.run_profiling_mode

    acl_op.is_dynamic = is_dynamic
    res = acl_op.run(op_type, inputs_path, inputs_info, outputs_info, attrs, device_id, op_model_path, inplace_input_index, run_profiling_mode)
