import acl
import time
import torch
# import logging
import numpy as np

ACL_SUCCESS                 = 0
ACL_DEVICE, ACL_HOST        = 0, 1

#! NPU显存分配方式
ACL_MEM_MALLOC_HUGE_FIRST   = 0
ACL_MEM_MALLOC_HUGE_ONLY    = 1
ACL_MEM_MALLOC_NORMAL_ONLY  = 2

#! 内存拷贝场景
ACL_MEMCPY_HOST_TO_HOST     = 0
ACL_MEMCPY_HOST_TO_DEVICE   = 1
ACL_MEMCPY_DEVICE_TO_HOST   = 2
ACL_MEMCPY_DEVICE_TO_DEVICE = 3

#! ACL数据类型
# https://www.hiascend.com/document/detail/zh/canncommercial/700/inferapplicationdev/aclpythondevg/aclpythondevg_01_0902.html
ACL_DT_UNDEFINED            = -1
ACL_FLOAT                   = 0
ACL_FLOAT16                 = 1
ACL_INT8                    = 2
ACL_INT32                   = 3
ACL_UINT8                   = 4
ACL_INT16                   = 6
ACL_UINT16                  = 7
ACL_UINT32                  = 8
ACL_INT64                   = 9
ACL_UINT64                  = 10
ACL_DOUBLE                  = 11
ACL_BOOL                    = 12
ACL_STRING                  = 13
ACL_COMPLEX64               = 16
ACL_COMPLEX128              = 17
ACL_BF16                    = 27
ACL_COMPLEX32               = 33

#! 模型输入输出的张量格式
# https://www.hiascend.com/document/detail/zh/canncommercial/700/inferapplicationdev/aclpythondevg/aclpythondevg_01_0904.html
ACL_FORMAT_UNDEFINED        = -1
ACL_FORMAT_NCHW             = 0
ACL_FORMAT_NHWC             = 1
ACL_FORMAT_ND               = 2
ACL_FORMAT_NC1HWC0          = 3
ACL_FORMAT_FRACTAL_Z        = 4     #! 经常使用的就到这里，后面的几乎不使用
ACL_FORMAT_NC1HWC0_C04      = 12
ACL_FORMAT_HWCN             = 16
ACL_FORMAT_NDHWC            = 27
ACL_FORMAT_FRACTAL_NZ       = 29
ACL_FORMAT_NCDHW            = 30
ACL_FORMAT_NDC1HWC0         = 32
ACL_FRACTAL_Z_3D            = 33

#! 模型输入输出的张量格式转化为字符串
ACL_FORMAT_TO_STR           = { -1  : "ACL_FORMAT_UNDEFINED",
                                0   : "ACL_FORMAT_NCHW",
                                1   : "ACL_FORMAT_NHWC",        
                                2   : "ACL_FORMAT_ND",        
                                3   : "ACL_FORMAT_NC1HWC0",          
                                4   : "ACL_FORMAT_FRACTAL_Z",      
                                12  : "ACL_FORMAT_NC1HWC0_C04",    
                                15  : "ACL_FORMAT_HWCN",            
                                27  : "ACL_FORMAT_NDHWC",           
                                29  : "ACL_FORMAT_FRACTAL_NZ",     
                                30  : "ACL_FORMAT_NCDHW",        
                                32  : "ACL_FORMAT_NDC1HWC0",     
                                33  : "ACL_FRACTAL_Z_3D",    
                            }




#! ACL数据类型与torch数据类型的映射
ACL_DTYPE_TO_TORCH          = { ACL_DT_UNDEFINED  : None,
                                ACL_FLOAT         : torch.float32,
                                ACL_FLOAT16       : torch.float16,
                                ACL_INT8          : torch.int8,
                                ACL_INT32         : torch.int32,
                                ACL_UINT8         : torch.uint8,
                                ACL_INT16         : torch.int16,
                                # ACL_UINT16      : torch.uint16,       # torch无此数据类型
                                # ACL_UINT32      : torch.uint32,       # torch无此数据类型
                                ACL_INT64         : torch.int64,
                                # ACL_UINT64      : torch.uint64,       # torch无此数据类型
                                ACL_DOUBLE        : torch.float64,
                                ACL_BOOL          : torch.bool,
                                ACL_STRING        : str,
                                ACL_COMPLEX64     : torch.complex64,
                                ACL_COMPLEX128    : torch.complex128,
                                ACL_BF16          : torch.bfloat16,
                                ACL_COMPLEX32     : torch.complex32,
                            }

#! torch数据类型与ACL数据类型的映射
TORCH_DTYPE_TO_ACL          = { None              : ACL_DT_UNDEFINED,
                                torch.float32     : ACL_FLOAT,
                                torch.float16     : ACL_FLOAT16,
                                torch.int8        : ACL_INT8,
                                torch.int32       : ACL_INT32,
                                torch.uint8       : ACL_UINT8,
                                torch.int16       : ACL_INT16,
                                # torch.uint16    : ACL_UINT16,         # torch无此数据类型
                                # torch.uint32    : ACL_UINT32,         # torch无此数据类型
                                torch.int64       : ACL_INT64,
                                # torch.uint64    : ACL_UINT64,         # torch无此数据类型
                                torch.float64     : ACL_DOUBLE,
                                torch.bool        : ACL_BOOL,
                                str               : ACL_STRING,
                                torch.complex64   : ACL_COMPLEX64,
                                torch.complex128  : ACL_COMPLEX128,
                                torch.bfloat16    : ACL_BF16,
                                torch.complex32   : ACL_COMPLEX32,
                            }    


ACL_DTYPE_TO_STR            = { ACL_DT_UNDEFINED  : "ACL_DT_UNDEFINED",
                                ACL_FLOAT         : "ACL_FLOAT",
                                ACL_FLOAT16       : "ACL_FLOAT16",
                                ACL_INT8          : "ACL_INT8",
                                ACL_INT32         : "ACL_INT32",
                                ACL_UINT8         : "ACL_UINT8",
                                ACL_INT16         : "ACL_INT16",
                                ACL_UINT16        : "ACL_UINT16",
                                ACL_UINT32        : "ACL_UINT32",
                                ACL_INT64         : "ACL_INT64",
                                ACL_UINT64        : "ACL_UINT64",
                                ACL_DOUBLE        : "ACL_DOUBLE",
                                ACL_BOOL          : "ACL_BOOL",
                                ACL_STRING        : "ACL_STRING",
                                ACL_COMPLEX64     : "ACL_COMPLEX64",
                                ACL_COMPLEX128    : "ACL_COMPLEX128",
                                ACL_BF16          : "ACL_BF16",
                                ACL_COMPLEX32     : "ACL_COMPLEX32",
                            }    

try:
    FileNotFoundError     # 有时候Python弄不清楚FileNotFoundError
except NameError:
    FileNotFoundError = IOError


def checkStatus(status, msg):
    if status != ACL_SUCCESS:
        print(f">>> {msg}, error code is {status}")
        return False
    return True

def GiB(val):
    return val * 1 << 30

class InputOutputDesc(object):
    """ACL模型的输入输出张量信息描述
    """
    def __init__(self, io : int = 0, index : int=0, dtype : int  = ACL_FLOAT, 
            ndim:int = 0, dims:list = None, format:int = ACL_FORMAT_UNDEFINED):
        self.io     = io               # 张量是输入还是输出,0为输入张量,1为输出张量
        self.index  = index            # 该张量在输入或输出中是第几个输入或输出
        self.dtype  = dtype            # 张量的数据类型
        self.ndim   = ndim             # 张量的维度数量
        self.dims   = dims             # 张量维度明细 
        self.format = format           # 张量格式
        
    def __str__(self):
        _dict_rep = {"IO"      : "O" if self.io else "I",
                    "Index"    : self.index,
                    "DataType" : ACL_DTYPE_TO_STR[self.dtype],
                    "NDim"     : self.ndim,    
                    "Dims"     : self.dims,
                    "Format"   : ACL_FORMAT_TO_STR[self.format]
                }
        return str(_dict_rep)
    
    def __repr__(self):
        return self.__str__()
    

class AclInfer(object):
    def __init__(self, model_path:str, device_id:int=0):
        """AI is creating summary for __init__

        Args:
            model_path      (str):            om文件的路径
            device_id       (int, optional):  NPU设备的编号, 默认为0
            model_auto_load (bool, optional): 是否自动加载模型, 默认为True
        """
        self.model_path    = model_path
        self.device_id     = device_id
        self.run_model, _  = acl.rt.get_run_mode()
        if self.run_model == ACL_DEVICE:
            ACL_MEMCPY_H2D = ACL_MEMCPY_DEVICE_TO_DEVICE
            ACL_MEMCPY_D2H = ACL_MEMCPY_DEVICE_TO_DEVICE
        else:
            ACL_MEMCPY_H2D = ACL_MEMCPY_HOST_TO_DEVICE
            ACL_MEMCPY_D2H = ACL_MEMCPY_DEVICE_TO_HOST
            
        # self.context      = None
        # logger            = logging.getLogger(__name__)
        self.stream         = None
        self.model_desc     = None                                               # 模型描述, long int
        self.model_id       = None
        self.num_input      = 0
        self.num_output     = 0
        
        self.input_dataset  = None
        self.output_dataset = None
        self.input_buffer   = None
        self.output_buffer  = None
        
        self._init_tensor   = None
        self._initialized   = False   

    def init(self):
        r"""初始化方法, 申请资源, 加载模型等
        Args:
            None
        Returns:
            success : bool 是否初始化成功
        """
        if self._initialized:
            return True
        #! 避免与PyTorch Adaptor的重复初始化问题
        self._init_tensor = torch.randn(2, 2, dtype=torch.float32).npu()         # 用于初始化acl, 避免调用acl.init()
        # if not checkStatus(acl.init(), "acl init failed"):                     # 初始化acl
        #     return False
        
        checkStatus(acl.rt.set_device(self.device_id), "set device failed")      # 设置device(当机器含多device时, 默认deice_id=0)
        #! 如果要与torch_npu混用, 就不能创建context
        # self.context, ret = acl.rt.create_context(self.device_id)              # 使用特定device_id创建context
        # checkStatus(ret, "create context failed")
        
        self.stream, ret = acl.rt.create_stream()                                # 创建stream                       
        checkStatus(ret, "create stream failed")

        self.model_id, ret = acl.mdl.load_from_file(self.model_path)             # 加载模型文件
        checkStatus(ret, "load model failed")
        
        self.model_desc = acl.mdl.create_desc()                                  # 创建模型描述信息
        ret = acl.mdl.get_desc(self.model_desc, self.model_id)                   # 获取模型的描述信息
        checkStatus(ret, "get model desc failed")
        
        self.input_dataset   = acl.mdl.create_dataset()
        self.output_dataset  = acl.mdl.create_dataset()
        
        self.num_input       = self.getNumInputs()
        self.num_output      = self.getNumOutputs()
        
        #! 先创建空的，然后使用的时候再更新
        self.input_buffer    = [acl.create_data_buffer(0, 0) for _ in range(self.num_input)]
        self.output_buffer   = [acl.create_data_buffer(0, 0) for _ in range(self.num_output)]
        
        for i in range(self.num_input):
            self.input_dataset, _ = acl.mdl.add_dataset_buffer(self.input_dataset,  self.input_buffer[i])
        
        for i in range(self.num_output):
            self.output_dataset,_ = acl.mdl.add_dataset_buffer(self.output_dataset, self.output_buffer[i])
            
        self._initialized = True
        return True
        
    def __call__(self, *inputs, **kwargs):
        """模型前向推理执行, 类似torch.nn.Module的forward方法
        @ref      https://www.hiascend.com/document/detail/zh/canncommercial/700/inferapplicationdev/aclpythondevg/aclpythondevg_01_0137.html
        @version  v1:   ret = acl.mdl.execute(model_id, input, output)     input/output的dataset
                  v2:   ret = acl.mdl.execute_v2(model_id, input, output, stream, handle)
                  v3:   ret = acl.mdl.execute_async(model_id, input, output, stream)
                
        Args:
            *inputs
            **kwargs
                key1: check_inputs :  bool = True   是否对输入张量进行检查(大小, 类型, 格式等)
                key2: exec_async   :  bool = False  是否进行异步推理
        
        Returns:
            outputs: 模型推理完成的输出张量, 类型为device端的torch.Tensor
        """
        check_input = kwargs.get('check_inputs', True)             # 是否检查输入数据的正确性
        if check_input:
            assert self.num_input == len(inputs), "Number of inputs should be the same of loaded model!"
            for index, tensor in enumerate(inputs):
                self._dataCheck(tensor, index=index)                # 检查输入数据的类型,维度等信息
                
        # 使用传入进来的张量的指针
        # TODO: 考虑Dynamic Shape的场景        
        inputs        = [input.to(f"npu:{self.device_id}")  for input in inputs]
        input_ptrs    = [input.contiguous().data_ptr()      for input in inputs]        # 获取输入的device指针
        for i in range(self.num_input):
            ret = acl.update_data_buffer(self.input_buffer[i], input_ptrs[i], inputs[i].nbytes)
            checkStatus(ret, f"update input buffer[{i}] faild")
                    
        # 创建输出张量
        outputs = []
        for i in range(self.num_output):
            output_desc = self.getOutputDescByIndex(i)
            output      = torch.empty(size=tuple(output_desc.dims),
                                    dtype=ACL_DTYPE_TO_TORCH[output_desc.dtype], 
                                    device=torch.device(f'npu:{self.device_id}')
                                )
            outputs.append(output)
        output_ptrs     = [output.contiguous().data_ptr()      for output in outputs]        # 获取输入的device指针
        for i in range(self.num_output):
            ret = acl.update_data_buffer(self.output_buffer[i], output_ptrs[i], outputs[i].nbytes)
            checkStatus(ret, f"update output buffer[{i}] faild")
        
        exec_async   =  kwargs.get('exec_async', False)   
        loops        =  kwargs.get('loops',  1)   
        warmup       =  kwargs.get('warmup', 20)   
        if exec_async:
            if loops > 1:
                for _ in range(warmup):           #! warmup
                    ret = acl.mdl.execute_async(self.model_id, self.input_dataset, self.output_dataset, self.stream)
                
                start = time.time()
                for _ in range(loops):
                    ret = acl.mdl.execute_async(self.model_id, self.input_dataset, self.output_dataset, self.stream)
                ret = acl.rt.synchronize_stream(self.stream)
                print(f"model cost {(time.time() - start)/loops*1000}ms per iter in {loops} iterations!")
            else:
                ret = acl.mdl.execute_async(self.model_id, self.input_dataset, self.output_dataset, self.stream)
        else:
            if loops > 1:
                for _ in range(warmup):           #! warmup
                    ret = acl.mdl.execute(self.model_id, self.input_dataset, self.output_dataset)
                
                start = time.time()
                for _ in range(loops):
                    ret = acl.mdl.execute(self.model_id, self.input_dataset, self.output_dataset)
                print(f"model cost {(time.time() - start)/loops*1000}ms per iter in {loops} iterations!")
            else:
                ret = acl.mdl.execute(self.model_id, self.input_dataset, self.output_dataset)
        checkStatus(ret, "model inference failed")
    
        return outputs[0] if self.num_output == 1 else outputs
        
    
    def _dataCheck(self, data:torch.Tensor, index:int = 0, io:int = 0): 
        """检查输入的torch tensor的数据类型、维度等信息是否与模型的输入输出信息匹配
        Args:
            data  (torch.Tensor):  数据张量
            index (int, optional): 输入输出索引. Defaults to 0.
            io    (int, optional): 数据是输入还是输出(0为输入, 1表示输出)
        Returns:
            None
        """
        assert isinstance(data, torch.Tensor),                               "Input shoule be torch.Tensor"
        input_desc = self.getInputDescByIndex(index)
        assert input_desc.dtype == TORCH_DTYPE_TO_ACL.get(data.dtype, None), f"Datatype of inputs[{index}] should be the same with the model! "
        assert input_desc.ndim  == data.ndim,                                f"Ndim  of inputs[{index}] should be the same with the model!"
        assert input_desc.dims  == list(data.shape),                         f"Shape of inputs[{index}] should be the same with the model!"
        
        
    def getOpAttr(self, op_name:str, attr_name:str):
        """获取模型中某个算子的属性值, 算子除了输入输出之外，还有一些属性参数，如卷积的stride, padding等参数;
        Args:
            op_name   (str): 算子的名字
            attr_name (str): 算子的属性名称

        Returns:
            attr_val  (str): 算子的属性值
        """
        return acl.mdl.get_op_attr(self.model_desc, op_name, attr_name)
        
    def getInputDescByIndex(self, index):
        """获取模型特定位置的输入的描述信息, 包括数据类型, 维度, 格式等信息
        Args:
            index : int = 0  输入张量的索引, 因为模型可能有多个输入
        Returns:
            in_desc : InputOutputDesc 输ru的描述信息
        """
        #! get_input_dims_v2与get_input_dims输出结果是一样的
        dims_info, _   = acl.mdl.get_input_dims(self.model_desc, index)      # dims_info: {"name":"input", "dimCount":4, "dims"=[1, 3, 224, 224]}
        format         = acl.mdl.get_input_format(self.model_desc, index)    # int
        dtype          = acl.mdl.get_input_data_type(self.model_desc, index) # int
        return InputOutputDesc(io = 0, index = index, dtype = dtype, 
                ndim=dims_info['dimCount'], dims=dims_info['dims'], format=format)

    def getOutputDescByIndex(self, index : int = 0):
        """获取模型特定位置的输出的描述信息, 包括数据类型, 维度, 格式等信息
        Args:
            index : int = 0  输出张量的索引, 因为模型可能有多个输出
        Returns:
            out_desc : InputOutputDesc 输出的描述信息
        """
        #! 输出没有get_output_dims_v2接口
        dims_info, _   = acl.mdl.get_output_dims(self.model_desc,   index)    # dims_info: {"name":"input", "dimCount":4, "dims"=[1, 3, 224, 224]}
        format         = acl.mdl.get_output_format(self.model_desc,    index) # int
        dtype          = acl.mdl.get_output_data_type(self.model_desc, index) # int
        return InputOutputDesc(io = 1, index = index, dtype = dtype, 
                ndim=dims_info['dimCount'], dims=dims_info['dims'], format=format)
    
    def getNumInputs(self):
        """获取模型有多少个输入
        Args:
            None
        Returns:
            n_output : int 模型的输出数入
        """
        return acl.mdl.get_num_inputs(self.model_desc)
    
    def getNumOutputs(self):
        """获取模型有多少个输出
        Args:
            None
        Returns:
            n_output : int 模型的输出数量
        """
        return acl.mdl.get_num_outputs(self.model_desc)
    
    def getInputBytes(self, index:int=0):
        """获取模型指定位置的输入张量占的内存空间大小, 单位为bytes
        Args:
            index: int=0, 输入张量的索引, 因为模型可能有多个输入
        Returns:
            n_bytes: int64 模型指定位置的输入张量占的内存空间大小
        """
        return acl.mdl.get_input_size_by_index(self.model_desc, index)
    
    def getOutputBytes(self, index:int=0):
        """获取模型指定位置的输出张量占的内存空间大小, 单位为bytes
        Args:
            index: int=0, 输出张量的索引, 因为模型可能有多个输出
        Returns:
            n_bytes: int64 模型指定位置的输出张量占的内存空间大小
        """
        return acl.mdl.get_output_size_by_index(self.model_desc, index)
    
    def getInputName(self, index:int=0):
        """获取模型指定位置的输入张量名字
        Args:
            index: int=0, 输入张量的索引, 因为模型可能有多个输入
        Returns:
            input_name : str 模型指定位置的输入张量名字
        """
        return acl.mdl.get_input_name_by_index(self.model_desc, index)
    
    def getInputNames(self):
        """获取模型输入的名字列表
        Args:
            None
        Returns:
            input_names : list[str] 模型输入的名字列表
        """
        input_num   = acl.mdl.get_num_inputs(self.model_desc)
        input_names = []
        for idx in range(input_num):
            input_names.append(acl.mdl.get_input_name_by_index(self.model_desc, idx))
    
    def getOutputName(self, index:int=0):
        """获取模型指定位置的输出张量名字
        Args:
            index: int=0, 输出张量的索引, 因为模型可能有多个输出
        Returns:
            output_name : str 模型指定位置的输出张量名字
        """
        return acl.mdl.get_input_name_by_index(self.model_desc, index)
    
    def getOutputNames(self):
        """获取模型输出的名字列表
        Args:
            None
        Returns:
            output_names : list[str] 模型输出的名字列表
        """
        output_num   = acl.mdl.get_num_outputs(self.model_desc)
        output_names = []
        for idx in range(output_num):
            output_names.append(acl.mdl.get_output_name_by_index(self.model_desc, idx))
        
    
    def deinit(self):
        r"""反初始化方法, 释放模型, 回收资源
        Args:
            None
        Returns:
            None
        """
        if not self._initialized:
            return
        
        if self.input_buffer:
            [acl.destroy_data_buffer(buffer) for buffer in self.input_buffer]
            self.input_buffer = None
            
    
        if self.input_dataset:
            acl.mdl.destroy_dataset(self.input_dataset)
            self.input_dataset = None
            
        if self.output_buffer:
            [acl.destroy_data_buffer(buffer) for buffer in self.output_buffer]
            self.output_buffer = None
   
        if self.output_dataset:
            acl.mdl.destroy_dataset(self.output_dataset)
            self.output_dataset = None
        
        if self.model_id:
            ret = acl.mdl.unload(self.model_id)
            checkStatus(ret, "unload model error")
        
        if self.model_desc:
            acl.mdl.destroy_desc(self.model_desc)
            self.model_desc = None
            
        if self.stream:
            ret = acl.rt.destroy_stream(self.stream)
            checkStatus(ret, "destroy stream error")
            self.stream = None
            
        # if self.context:
        #     ret = acl.rt.destroy_context(self.context)
        #     checkStatus(ret, "destroy context error")
        #     self.context = None
            
        ret = acl.rt.reset_device(self.device_id)
        checkStatus(ret, "reset device error")
        
        # ret = acl.finalize()
        # checkStatus(ret, "finalize error")
        self._initialized = False