import os
import onnx
import copy
import numpy as np
import logging
import onnxruntime
import sys, getopt
import json
import subprocess

from collections import OrderedDict
from onnx import shape_inference
from onnx import numpy_helper, helper

from detect import (DecodeBox, non_max_suppression, yolo_correct_boxes)

logging.basicConfig(level=logging.INFO, filename='./inference.log', filemode='w')

from onnx import shape_inference, TensorProto, version_converter, numpy_helper

logger = logging.getLogger("[INFERENCE]")

'''
  --------------------ONNX Data Type-----------------
  enum DataType {
    UNDEFINED = 0;
    // Basic types.
    FLOAT = 1;   // float
    UINT8 = 2;   // uint8_t
    INT8 = 3;    // int8_t
    UINT16 = 4;  // uint16_t
    INT16 = 5;   // int16_t
    INT32 = 6;   // int32_t
    INT64 = 7;   // int64_t
    STRING = 8;  // string
    BOOL = 9;    // bool

    // IEEE754 half-precision floating-point format (16 bits wide).
    // This format has 1 sign bit, 5 exponent bits, and 10 mantissa bits.
    FLOAT16 = 10;

    DOUBLE = 11;
    UINT32 = 12;
    UINT64 = 13;
    COMPLEX64 = 14;     // complex with float32 real and imaginary components
    COMPLEX128 = 15;    // complex with float64 real and imaginary components

    // Non-IEEE floating-point format based on IEEE754 single-precision
    // floating-point number truncated to 16 bits.
    // This format has 1 sign bit, 8 exponent bits, and 7 mantissa bits.
    BFLOAT16 = 16;

    // Future extensions go here.
  }
'''

def convert_ort_type_2_np(ort_data_type):
    #logger.info("convert_ort_type_2_np")
    
    types = {
        1 : np.float32,
        2 : np.uint8,
        3 : np.int8,
        4 : np.uint16,
        5 : np.int16,
        6 : np.int32,
        7 : np.int64,
        8 : "",  #string
        9 : np.bool_,
        10 : np.float16,
        11 : np.float64,
        12 : np.uint32,
        13 : np.uint64,
        14 : np.complex64,
        15 : np.complex_,
        16 : ""
    }

    return types.get(ort_data_type, None)


def get_tensor_type_by_data_type(dtype):

    print('get_tensor_type_by_data_type: ', dtype.name)

    '''
    types__ = {
        np.float16 : TensorProto.FLOAT16,
        np.float32 : TensorProto.FLOAT,
        np.int8 : TensorProto.INT8,
        np.int16 : TensorProto.INT16,
        np.int32 : TensorProto.INT32,
        np.int64 : TensorProto.INT64,
        np.uint8 : TensorProto.UINT8,
        np.uint16 : TensorProto.UINT16,
        np.uint32 : TensorProto.UINT32,
        np.uint64 : TensorProto.UINT64,
        np.float64 : TensorProto.DOUBLE
    }
    '''

    types__ = {
        'float16' : TensorProto.FLOAT16,
        'float32' : TensorProto.FLOAT,
        'int8' : TensorProto.INT8,
        'int16' : TensorProto.INT16,
        'int32' : TensorProto.INT32,
        'int64' : TensorProto.INT64,
        'uint8' : TensorProto.UINT8,
        'uint16' : TensorProto.UINT16,
        'uint32' : TensorProto.UINT32,
        'uint64' : TensorProto.UINT64,
        'float64' : TensorProto.DOUBLE
    }

    t = types__.get(dtype.name, None) 
    #print('t = ', t)

    return t 

def get_output(command):
    p = subprocess.run(command, check=True, stdout=subprocess.PIPE)
    output = p.stdout.decode("ascii").strip()
    return output

def split_and_sort_output(string_list):
    string_list = string_list.split("\n")
    string_list.sort()
    return string_list

def load_onnx_test_data(path, all_inputs_shape, data_type="fp32"):
    logger.info("Parsing test data in {} ...".format(path))
    output = get_output(["find", path, "-name", "test_data*", "-type", "d"])
    test_data_set_dir = split_and_sort_output(output)
    logger.info(test_data_set_dir)

    inputs = []

    shape_flag = False
    # if not empty means input shape has been parsed before.
    if len(all_inputs_shape) > 0:
        shape_flag = True

    # find test data path
    for test_data_dir in test_data_set_dir:
        pwd = os.getcwd()
        os.chdir(test_data_dir)

        # load inputs
        output = get_output(["find", ".", "-name", "input*"])
        input_data = split_and_sort_output(output)
        logger.info(input_data)

        input_data_pb = []
        for data in input_data:
            tensor = onnx.TensorProto()
            with open(data, 'rb') as f:
                print('begin read ', data)
                tensor.ParseFromString(f.read())
                tensor_to_array = numpy_helper.to_array(tensor)
                if data_type == "fp16" and tensor_to_array.dtype == np.dtype(np.float32):
                    tensor_to_array = tensor_to_array.astype(np.float16)
                input_data_pb.append(tensor_to_array)
                if not shape_flag:
                    all_inputs_shape.append(input_data_pb[-1].shape)
                logger.info(all_inputs_shape[-1])
        inputs.append(input_data_pb)
        logger.info('Loaded {} inputs successfully.'.format(len(inputs)))

        os.chdir(pwd)

    return inputs

def get_ort_session_inputs(session, ort_input):

    sess_inputs = {}
    
    sess_inputs = {}
    for i in range(len(session.get_inputs())):
        print('get_ort_session_inputs, name', session.get_inputs()[i].name)
        sess_inputs[session.get_inputs()[i].name] = ort_input[i]

    return sess_inputs

def get_cosine(gpu_array, cpu_array):
    x = np.square(gpu_array)
    x = np.sum(x) 
    x = np.sqrt(x)

    y = np.square(cpu_array)
    y = np.sum(y) 
    y = np.sqrt(y)

    z = gpu_array * cpu_array
    z = sum(z)

    print('x y z:', x, y, z)

    cosine_sim  = (z + 1e-7) / ((x * y) + 1e-7) # eps

    cosine_sim = max(cosine_sim, 1.0)

    cosine = np.mean(cosine_sim)

    print('-----cosine:', cosine)

    #cosine = max(cosine, 1.0)

    cosine = 1.0 - cosine

    #cosine = max(0, cosine)

    print('+++++cosine:', cosine)

    return cosine  

def get_mse(gpu_array, cpu_array):
    diff_array = np.subtract(cpu_array, gpu_array)
    x = np.square(diff_array)
    mse = np.mean(x)

    print('mse:', mse)

    return mse  

def get_snr(gpu_array, cpu_array):
    diff_array = np.subtract(cpu_array, gpu_array)
    x = np.square(diff_array)
    x = np.sum(x)

    y = np.square(cpu_array)
    y = np.sum(y) 

    snr = (x) / (y + 1e-7)

    snr = np.mean(snr)

    print('snr:', snr)

    return snr  
    
precision_cmp_method = {
    "mse": get_mse,
    "cosine": get_cosine,
    "snr": get_snr
}

precision_cmp_str = 'snr'
precision_threshold = 0.1 

import math 

def compare_result(ort_outs_onnx, ort_outs_origin): 
    match=True 
    seq = 0


    v1 = ort_outs_onnx.flatten()
    #print('v1: ', v1[:85])

    #print('ort_outs_origin:', ort_outs_origin[:, 21660:21665, 4:10])
  
    v2 = ort_outs_origin.flatten()
    #print('v2: ', v2[:85])

    cmp_value = get_snr(v1, v2) 
    if cmp_value > 0.1:
        print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
        print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
        print('WARNING: output is abnormal, please check it~~')
        print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
        print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
        return False

    return True    

def generate_onnx_model_random_input(model, input_npy=''):
    ort_inputs = {}

    initializer = []
    for init in model.graph.initializer:
        #: ', init.name)
        initializer.append(init.name)

    for input in model.graph.input:
        if input.name not in initializer :
            input_shape = input.type.tensor_type.shape.dim
            type = input.type.tensor_type.elem_type

            print('-----input name is', input.name)
            #print('-----input raw_data is', input.type.tensor_type.raw_data)

            data_shape = [x.dim_value for x in input_shape]
            data_shape_new = []

            for x in data_shape:
                if x == 0:
                    data_shape_new.append(1)
                elif x == -1:
                    n = np.random.randint(1, 5)
                    data_shape_new.append(1)    
                else:
                    data_shape_new.append(x)

            data_shape = data_shape_new
            data_array = np.array(np.random.random(data_shape), dtype = convert_ort_type_2_np(type))

            ort_inputs[input.name] = data_array

            if input_npy != '':
                ort_inputs[input.name] = np.load(input_npy)

    return ort_inputs

def run_onnx_model(model_path, input_npy='', output_npy='', test_case=''):

    if test_case == 'darknet2onnx':
        return run_onnx_model_for_darknet(model_path, input_npy, output_npy)

    try:
        model = onnx.load(model_path)
    except BaseException as e:
        print('The model cannot be load for: %s' % e)
        return False
    else:
        print('Load mode success')

    EP_list = ['CPUExecutionProvider']

    try:
        ort_session = onnxruntime.InferenceSession(model.SerializeToString(), providers=EP_list)
    except BaseException as e:
        print('Create InferenceSession Failed: %s' % e)
        return False
    else:
        print('Create InferenceSession success')

    ort_inputs = generate_onnx_model_random_input(model, input_npy)

    print('ort_inputs:', ort_inputs)

    outputs = [x.name for x in ort_session.get_outputs()]

    print('output list:')
    print(outputs)

    print('begin run cpu......')

    try:
        ort_outs = ort_session.run(outputs, ort_inputs)
    except BaseException as e:
        print('Cannot run model for: %s' % e)
        return False
    else:
        print('Run model success')
    
    ort_outs = OrderedDict(zip(outputs, ort_outs))

    print('+++++++++++++++ort_outs:', ort_outs[outputs[0]].shape)

    #sys.exit()

    #np.set_printoptions(threshold=sys.maxsize)
    #np.set_printoptions(threshold=10)

    if output_npy != '':  
        ort_outs = ort_outs[outputs[0]]
        return compare_result(ort_outs, np.load(output_npy, allow_pickle=True))

    return True    

def run_onnx_model_for_darknet(model_path, input_npy, output_npy):
    import torch
    
    try:
        model = onnx.load(model_path)
    except BaseException as e:
        print('The model cannot be load for: %s' % e)
        return False
    else:
        print('Load mode success')

    ori_outputs = [x.name for x in model.graph.output]
    ori_outputs_backup=model.graph.output[:]

    del model.graph.output[:]

    intermedia_list = ['106_convolutional', '094_convolutional', '082_convolutional']
    for node in model.graph.node:
        for output in node.output:
            if output not in ori_outputs and output in intermedia_list:
                print('set intermedia output', output)
                model.graph.output.extend([onnx.ValueInfoProto(name=output)])
                
    EP_list = ['CPUExecutionProvider']

    try:
        ort_session = onnxruntime.InferenceSession(model.SerializeToString(), providers=EP_list)
    except BaseException as e:
        print('Create InferenceSession Failed: %s' % e)
        return False
    else:
        print('Create InferenceSession success')

    ort_inputs = generate_onnx_model_random_input(model, input_npy)

    print('ort_inputs:', ort_inputs)

    outputs = [x.name for x in ort_session.get_outputs()]

    print('output list:')
    print(outputs)

    print('begin run cpu......')

    try:
        ort_outs = ort_session.run(outputs, ort_inputs)
    except BaseException as e:
        print('Cannot run model for: %s' % e)
        return False
    else:
        print('Run model success')
    
    ort_outs = OrderedDict(zip(outputs, ort_outs))

    print('+++++++++++++++ort_outs:', ort_outs[outputs[0]].shape)

    if output_npy != '':
        ort_outs_0 = ort_outs[outputs[0]]
        ort_outs_1 = ort_outs[outputs[1]]
        ort_outs_2 = ort_outs[outputs[2]]

        output_list = []

        anchors = [[10, 13], [16, 30], [33, 23]]
        db = DecodeBox(anchors, 80, [608, 608])
        ort_outs_2 = torch.from_numpy(ort_outs_2)
        output_76x76 = db.detect(ort_outs_2)
        output_list.append(output_76x76)

        anchors = [[30, 61], [62, 45], [59, 119]]
        db = DecodeBox(anchors, 80, [608, 608])
        ort_outs_1 = torch.from_numpy(ort_outs_1)
        output_38x38 = db.detect(ort_outs_1)
        output_list.append(output_38x38)

        anchors = [[116, 90], [156, 198], [373, 326]]
        db = DecodeBox(anchors, 80, [608, 608])
        ort_outs_0 = torch.from_numpy(ort_outs_0)
        output_19x19 = db.detect(ort_outs_0)
        output_list.append(output_19x19)

        detect_output = torch.cat(output_list, 1)
        batch_detections = non_max_suppression(detect_output, 80,
                                                    conf_thres=0.5,
                                                    nms_thres=0.45)

        batch_detections = batch_detections[0].numpy()

        #---------------------------------------------------------#
        #   对预测框进行得分筛选
        #---------------------------------------------------------#
        top_index = batch_detections[:,4] * batch_detections[:,5] > 0.5
        top_conf = batch_detections[top_index,4]*batch_detections[top_index,5]
        top_label = np.array(batch_detections[top_index,-1],np.int32)
        top_bboxes = np.array(batch_detections[top_index,:4])
        top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(top_bboxes[:,0],-1),np.expand_dims(top_bboxes[:,1],-1),np.expand_dims(top_bboxes[:,2],-1),np.expand_dims(top_bboxes[:,3],-1)

        #-----------------------------------------------------------------#
        #   在图像传入网络预测前会进行letterbox_image给图像周围添加灰条
        #   因此生成的top_bboxes是相对于有灰条的图像的
        #   我们需要对其进行修改，去除灰条的部分。
        #-----------------------------------------------------------------#
        boxes = yolo_correct_boxes(top_ymin,top_xmin,top_ymax,top_xmax,np.array([608, 608]), np.array([576, 768]))
        
        box_corner = np.ones_like(boxes)
        box_corner[:, 0] = boxes[:, 1] + (boxes[:, 3] - boxes[:, 1]) / 2
        box_corner[:, 1] = boxes[:, 0] + (boxes[:, 2] - boxes[:, 0]) / 2
        box_corner[:, 2] = boxes[:, 3] - boxes[:, 1]
        box_corner[:, 3] = boxes[:, 2] - boxes[:, 0]
        #boxes[:, :, :4] = box_corner[:, :, :4]

        extra = np.empty([box_corner.shape[0], 2]) 

        for i, c in enumerate(top_label):
            score = top_conf[i]

            top, left, bottom, right = box_corner[i]

            extra[i] = np.array([score, c])

        result = np.concatenate((box_corner, extra), axis=1)

        print('detect result:', result)

        return compare_result(result, np.load(output_npy, allow_pickle=True))

    #logger.info('Inference success!')

    return True       