import os
import onnx
import copy
import numpy as np
import logging
import onnxruntime
import sys, getopt
import json

from collections import OrderedDict
from onnx import shape_inference

logging.basicConfig(level=logging.INFO)

from onnx import shape_inference, TensorProto, version_converter, numpy_helper

logger = logging.getLogger("[ONNXOPTIMIZER]")

onnxfile = ''
nodefile = ''
outputfile = ''

'''
  --------------------ONNX Data Type-----------------
  enum DataType {
    UNDEFINED = 0;
    // Basic types.
    FLOAT = 1;   // float
    UINT8 = 2;   // uint8_t
    INT8 = 3;    // int8_t
    UINT16 = 4;  // uint16_t
    INT16 = 5;   // int16_t
    INT32 = 6;   // int32_t
    INT64 = 7;   // int64_t
    STRING = 8;  // string
    BOOL = 9;    // bool

    // IEEE754 half-precision floating-point format (16 bits wide).
    // This format has 1 sign bit, 5 exponent bits, and 10 mantissa bits.
    FLOAT16 = 10;

    DOUBLE = 11;
    UINT32 = 12;
    UINT64 = 13;
    COMPLEX64 = 14;     // complex with float32 real and imaginary components
    COMPLEX128 = 15;    // complex with float64 real and imaginary components

    // Non-IEEE floating-point format based on IEEE754 single-precision
    // floating-point number truncated to 16 bits.
    // This format has 1 sign bit, 8 exponent bits, and 7 mantissa bits.
    BFLOAT16 = 16;

    // Future extensions go here.
  }
'''

def convert_ort_type_2_np(ort_data_type):
    logger.info("convert_ort_type_2_np")
    
    types = {
        1 : np.float32,
        2 : np.uint8,
        3 : np.int8,
        4 : np.uint16,
        5 : np.int16,
        6 : np.int32,
        7 : np.int64,
        8 : "",  #string
        9 : np.bool_,
        10 : np.float16,
        11 : np.float64,
        12 : np.uint32,
        13 : np.uint64,
        14 : np.complex64,
        15 : np.complex_,
        16 : ""
    }

    return types.get(ort_data_type, None)

def get_cosine(gpu_array, cpu_array):
    x = np.square(gpu_array)
    x = np.sum(x) 
    x = np.sqrt(x)

    y = np.square(cpu_array)
    y = np.sum(y) 
    y = np.sqrt(y)

    z = gpu_array * cpu_array
    z = sum(z)

    print('x y z:', x, y, z)

    cosine_sim  = (z + 1e-7) / ((x * y) + 1e-7) # eps

    cosine_sim = max(cosine_sim, 1.0)

    cosine = np.mean(cosine_sim)

    print('-----cosine:', cosine)

    #cosine = max(cosine, 1.0)

    cosine = 1.0 - cosine

    #cosine = max(0, cosine)

    print('+++++cosine:', cosine)

    return cosine  

def get_mse(gpu_array, cpu_array):
    diff_array = np.subtract(cpu_array, gpu_array)
    x = np.square(diff_array)
    mse = np.mean(x)

    print('mse:', mse)

    return mse  

def get_snr(gpu_array, cpu_array):
    diff_array = np.subtract(cpu_array, gpu_array)
    x = np.square(diff_array)
    x = np.sum(x)

    y = np.square(cpu_array)
    y = np.sum(y) 

    snr = (x) / (y + 1e-7)

    snr = np.mean(snr)

    print('snr:', snr)

    return snr  
    
precision_cmp_method = {
    "mse": get_mse,
    "cosine": get_cosine,
    "snr": get_snr
}

precision_cmp_str = 'snr'
precision_threshold = 0.1 

def compare_result(ort_outs_cpu, ort_outs_gpu, node_list): 
    match=True 
    for k,v in ort_outs_cpu.items():
       #print(k, ':', v.shape)
       #print('v.type:', v.__class__)
       assert v.__class__  == np.ndarray
       #print('shape: ', v.shape)
       #print('ndim: ', v.ndim)
       #print('dtype: ', v.dtype)
       if v.__class__  == np.ndarray :
           c=v.flatten()
           print('cpu: ', c)
           assert k in ort_outs_gpu
           g=ort_outs_gpu[k].flatten()
           print('gpu: ', g)
           diff=np.subtract(c, g)
           cmp_value = precision_cmp_method[precision_cmp_str](c, g) #get_snr(diff, c)
           if cmp_value > precision_threshold:
                match=False
                print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
                print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
                print('WARNING: output ', k, ' is abnormal, please check it~~')
                for node in node_list :
                    #print('node_output: ', node['output'], ', node_name: ', node['name'])
                    if k in node['output'] :
                        print('Dismatch node name: ', node['name'], ', input:',  node['input'])
                        break
                print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
                print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')  
                
                break
                  
       if match == False :
             break      
           
    if match == True :
       print('===================================================')
       print('===================================================')
       print('Congratulations, it works well as expected')

def test_model_by_onnxruntime(model):
#def test_model_by_onnxruntime():
    logger.info("Test model by onnxruntime")

    #model=onnx.load(ort_model)

    input_shape = model.graph.input[0].type.tensor_type.shape.dim
    
    type = model.graph.input[0].type.tensor_type.elem_type
    
    print('input type is', type)
    #attribute[0]

    image_shape = [x.dim_value for x in input_shape]
    image_shape_new = []
    for x in image_shape:
        if x == 0:
            image_shape_new.append(1)
        elif x == -1:
            n = np.random.randint(1,5)  
            image_shape_new.append(n) 
        else:
            image_shape_new.append(x)
    
    image_shape = image_shape_new
    img_array = np.array(np.random.random(image_shape), dtype = convert_ort_type_2_np(type))
    img = img_array

    ori_outputs = [x.name for x in model.graph.output]
    ori_outputs_backup=model.graph.output[:]
    #print('ori:', ori_outputs)
    
    node_list=[]
    
    del model.graph.output[:]

    for node in model.graph.node:
        dict={"name":node.name, "input":node.input, "output":node.output}
        node_list.append(dict)
        for output in node.output:
            if output not in ori_outputs:
                model.graph.output.extend([onnx.ValueInfoProto(name=output)])
                
    model.graph.output.extend(ori_outputs_backup)
                
    #for i, val in enumerate(node_list):
    #    print('num:', i, ', node:', val) 

    if nodefile != '' :    
      with open(nodefile, 'w') as f:
         f.write(str(node_list))

    ort_session = onnxruntime.InferenceSession(model.SerializeToString())

    ort_inputs = {}

    for i, input_ele in enumerate(ort_session.get_inputs()):
        ort_inputs[input_ele.name] = img

    outputs = [x.name for x in ort_session.get_outputs()]

    print('output list:')
    print(outputs)

    print('begin run cpu......')

    ort_outs = ort_session.run(outputs, ort_inputs)
    
    #print(ort_outs)

    ort_outs = OrderedDict(zip(outputs, ort_outs))

    #np.set_printoptions(threshold=sys.maxsize)
    np.set_printoptions(threshold=10)

    out_list=[]
    out_dict={}

    for k,v in ort_outs.items():
       #print(k, ':', v)
       #print('v.type:', v.__class__)
       if True == hasattr(v, 'tolist') :       
           dict={"output":k, "value":v.tolist()}
       else:
           dict={"output":k, "value":v} 
           
       out_list.append(dict)
       
    #print('list:', out_list)
    
    if outputfile != '' : 
      with open(outputfile, 'w') as f:
         f.write(str(out_list))

    print('-----------------------------------------------------------')
    print('-----------------------------------------------------------')
    
    print('begin run gpu......')
    
    #just for test,if you want to generate dismatch msg, uncomment belowing lines
    #img_array = np.array(np.random.random(image_shape), dtype = convert_ort_type_2_np(type))
    #img = img_array
    #for i, input_ele in enumerate(ort_session.get_inputs()):
    #    ort_inputs[input_ele.name] = img
    
    #just for test, you should use MacavxExecutionProvider
    EP_list = ['CPUExecutionProvider']
    #EP_list = ['MacavxExecutionProvider']
    
    ort_session_gpu = onnxruntime.InferenceSession(model.SerializeToString(), providers=EP_list)
    
    ort_outs_gpu = ort_session_gpu.run(outputs, ort_inputs)
    
    ort_outs_gpu = OrderedDict(zip(outputs, ort_outs_gpu))

    out_list_gpu=[]

    for k,v in ort_outs_gpu.items():
       #print(k, ':', v)
       #print('v.type:', v.__class__)
       if True == hasattr(v, 'tolist') :       
           dict={"output":k, "value":v.tolist()}
       else:
           dict={"output":k, "value":v} 
           
       out_list_gpu.append(dict)
    
    #print('list:', out_list)
    
    logger.info("Test model by onnxruntime finish")
    
    compare_result(ort_outs, ort_outs_gpu, node_list)

    #del model.graph.output[:]

    #model.graph.output.extend(ori_output)

    return ort_outs
    
def usage():
    print('python onnx_debug.py -i <onnxfile>')
    print('or') 
    print('python onnx_debug.py -i <onnxfile> -n <nodefile> -o <outputfile>', '({})'.format('Not yet support for now'))    

def main(argv):
   global onnxfile
   global nodefile
   global outputfile
   global precision_cmp_str
   global precision_threshold
   
   try:
      opts, args = getopt.getopt(argv,"hi:n:o:c:",["onnx=", "nfile=", "ofile="])
   except getopt.GetoptError:
      usage()
      sys.exit(2)
   for opt, arg in opts:
      if opt == '-h':
         usage()
         sys.exit()
      elif opt in ("-i", "--onnx"):
         onnxfile = arg
      elif opt in ("-n", "--nfile"):
         nodefile = arg   
      elif opt in ("-o", "--ofile"):
         outputfile = arg
      elif opt in ("-c", "--compare_method"):
         precision_cmp_str = arg   
         
   if onnxfile == '' :
      usage()
      sys.exit()
         
   print('model file: ', onnxfile)
   print('node file: ', nodefile)
   print('output file: ', outputfile)
   print('compare method: ', precision_cmp_str)

   if precision_cmp_str == 'snr':
        precision_threshold = 0.1
   elif precision_cmp_str == 'mse':
        precision_threshold = 0.0
   elif precision_cmp_str == 'cosine':
        precision_threshold = 0.03
   else:
        print('precision_cmp_str can only be one of [\'snr\', \'mse\', \'cosine\']')
        sys.exit()
   
   onnx_model = onnx.load(onnxfile)
   
   test_model_by_onnxruntime(onnx_model)

if __name__ == "__main__":
   main(sys.argv[1:])
