import os
import onnx
import copy
import numpy as np
import logging
import onnxruntime
import sys, getopt
import json
import subprocess

from collections import OrderedDict
from onnx import shape_inference
from onnx import numpy_helper, helper

logging.basicConfig(level=logging.INFO)

from onnx import shape_inference, TensorProto, version_converter, numpy_helper

logger = logging.getLogger("[ONNXOPTIMIZER]")

onnxfile = ''
nodefile = ''
outputfile = ''
test_data_folder=''

'''
  --------------------ONNX Data Type-----------------
  enum DataType {
    UNDEFINED = 0;
    // Basic types.
    FLOAT = 1;   // float
    UINT8 = 2;   // uint8_t
    INT8 = 3;    // int8_t
    UINT16 = 4;  // uint16_t
    INT16 = 5;   // int16_t
    INT32 = 6;   // int32_t
    INT64 = 7;   // int64_t
    STRING = 8;  // string
    BOOL = 9;    // bool

    // IEEE754 half-precision floating-point format (16 bits wide).
    // This format has 1 sign bit, 5 exponent bits, and 10 mantissa bits.
    FLOAT16 = 10;

    DOUBLE = 11;
    UINT32 = 12;
    UINT64 = 13;
    COMPLEX64 = 14;     // complex with float32 real and imaginary components
    COMPLEX128 = 15;    // complex with float64 real and imaginary components

    // Non-IEEE floating-point format based on IEEE754 single-precision
    // floating-point number truncated to 16 bits.
    // This format has 1 sign bit, 8 exponent bits, and 7 mantissa bits.
    BFLOAT16 = 16;

    // Future extensions go here.
  }
'''

def convert_ort_type_2_np(ort_data_type):
    #logger.info("convert_ort_type_2_np")
    
    types = {
        1 : np.float32,
        2 : np.uint8,
        3 : np.int8,
        4 : np.uint16,
        5 : np.int16,
        6 : np.int32,
        7 : np.int64,
        8 : "",  #string
        9 : np.bool_,
        10 : np.float16,
        11 : np.float64,
        12 : np.uint32,
        13 : np.uint64,
        14 : np.complex64,
        15 : np.complex_,
        16 : ""
    }

    return types.get(ort_data_type, None)


def get_tensor_type_by_data_type(dtype):

    print('get_tensor_type_by_data_type: ', dtype.name)

    '''
    types__ = {
        np.float16 : TensorProto.FLOAT16,
        np.float32 : TensorProto.FLOAT,
        np.int8 : TensorProto.INT8,
        np.int16 : TensorProto.INT16,
        np.int32 : TensorProto.INT32,
        np.int64 : TensorProto.INT64,
        np.uint8 : TensorProto.UINT8,
        np.uint16 : TensorProto.UINT16,
        np.uint32 : TensorProto.UINT32,
        np.uint64 : TensorProto.UINT64,
        np.float64 : TensorProto.DOUBLE
    }
    '''

    types__ = {
        'float16' : TensorProto.FLOAT16,
        'float32' : TensorProto.FLOAT,
        'int8' : TensorProto.INT8,
        'int16' : TensorProto.INT16,
        'int32' : TensorProto.INT32,
        'int64' : TensorProto.INT64,
        'uint8' : TensorProto.UINT8,
        'uint16' : TensorProto.UINT16,
        'uint32' : TensorProto.UINT32,
        'uint64' : TensorProto.UINT64,
        'float64' : TensorProto.DOUBLE
    }

    t = types__.get(dtype.name, None) 
    #print('t = ', t)

    return t 


def make_test_model(op_type, input_list, output_dict, attributes, init_list):
    print('make_test_model, op_type:', op_type)

    input_tensor = []
    output_tensor = []
    input_name = []

    for input in input_list:
        print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
        print('make_test_model, input name: ', input['name'])
        print('make_test_model, input dim: ', input['dim'])
        print('make_test_model, input shape: ', input['shape'])
        print('make_test_model, input dtype: ', input['dtype'], type(input['dtype']).__name__)

        input_name.append(input['name'])

        t = input['dtype']
        if type(input['dtype']).__name__ == 'dtype':
            t = get_tensor_type_by_data_type(input['dtype'])

        input_tensor.append(helper.make_tensor_value_info(input['name'], t, input['shape']))

    print('-------------------------------------------------------------')
    print('make_test_model, output name: ', output_dict['name'])
    print('make_test_model, output dim: ', output_dict['dim'])
    print('make_test_model, output shape: ', output_dict['shape'])
    print('make_test_model, output dtype: ', output_dict['dtype'], type(output_dict['dtype'])) 

    t = output_dict['dtype']
    if type(output_dict['dtype']).__name__ == 'dtype':
        t = get_tensor_type_by_data_type(output_dict['dtype'])

    output_tensor.append(helper.make_tensor_value_info(output_dict['name'], t, output_dict['shape']))

    for k, v in attributes.items():
        print('attribute:', k, ', value:', v)  

        if len(attributes) > 0 :
            node_def = helper.make_node(
                                    op_type, # node name
                                    input_name,  #inputs
                                    [output_dict['name']], # outputs
                                    **attributes
                                    ) 
        else:
            node_def = helper.make_node(
                                    op_type, # node name
                                    input_name,  #inputs
                                    [output_dict['name']], # outputs
                                    ) 

    graph_def = helper.make_graph(
                                [node_def],
                                'test_model',
                                input_tensor, # graph inputs
                                output_tensor, # graph outputs
                                initializer=init_list,
                                )

    mode_def = helper.make_model(graph_def, producer_name='onnx-example', opset_imports=[helper.make_opsetid('', 11)])
    #onnx.checker.check_model(mode_def)

    save_path = './test_' + op_type
    onnxfile = 'model.onnx'
    save_path = os.path.join(save_path, onnxfile)
    onnx.save(mode_def, save_path)                                                            

def get_output(command):
    p = subprocess.run(command, check=True, stdout=subprocess.PIPE)
    output = p.stdout.decode("ascii").strip()
    return output

def split_and_sort_output(string_list):
    string_list = string_list.split("\n")
    string_list.sort()
    return string_list

def load_onnx_test_data(path, all_inputs_shape, data_type="fp32"):
    logger.info("Parsing test data in {} ...".format(path))
    output = get_output(["find", path, "-name", "test_data*", "-type", "d"])
    test_data_set_dir = split_and_sort_output(output)
    logger.info(test_data_set_dir)

    inputs = []

    shape_flag = False
    # if not empty means input shape has been parsed before.
    if len(all_inputs_shape) > 0:
        shape_flag = True

    # find test data path
    for test_data_dir in test_data_set_dir:
        pwd = os.getcwd()
        os.chdir(test_data_dir)

        # load inputs
        output = get_output(["find", ".", "-name", "input*"])
        input_data = split_and_sort_output(output)
        logger.info(input_data)

        input_data_pb = []
        for data in input_data:
            tensor = onnx.TensorProto()
            with open(data, 'rb') as f:
                print('begin read ', data)
                tensor.ParseFromString(f.read())
                tensor_to_array = numpy_helper.to_array(tensor)
                if data_type == "fp16" and tensor_to_array.dtype == np.dtype(np.float32):
                    tensor_to_array = tensor_to_array.astype(np.float16)
                input_data_pb.append(tensor_to_array)
                if not shape_flag:
                    all_inputs_shape.append(input_data_pb[-1].shape)
                logger.info(all_inputs_shape[-1])
        inputs.append(input_data_pb)
        logger.info('Loaded {} inputs successfully.'.format(len(inputs)))

        os.chdir(pwd)

    return inputs

def get_ort_session_inputs(session, ort_input):

    sess_inputs = {}
    
    sess_inputs = {}
    for i in range(len(session.get_inputs())):
        print('get_ort_session_inputs, name', session.get_inputs()[i].name)
        sess_inputs[session.get_inputs()[i].name] = ort_input[i]

    return sess_inputs

def prepare_dir(path):  # type: (Text) -> None
    #if os.path.exists(path):
    #    shutil.rmtree(path)

    if os.path.exists(path) == False:
        os.makedirs(path)

def get_cosine(gpu_array, cpu_array):
    x = np.square(gpu_array)
    x = np.sum(x) 
    x = np.sqrt(x)

    y = np.square(cpu_array)
    y = np.sum(y) 
    y = np.sqrt(y)

    z = gpu_array * cpu_array
    z = sum(z)

    print('x y z:', x, y, z)

    cosine_sim  = (z + 1e-7) / ((x * y) + 1e-7) # eps

    cosine_sim = max(cosine_sim, 1.0)

    cosine = np.mean(cosine_sim)

    print('-----cosine:', cosine)

    #cosine = max(cosine, 1.0)

    cosine = 1.0 - cosine

    #cosine = max(0, cosine)

    print('+++++cosine:', cosine)

    return cosine  

def get_mse(gpu_array, cpu_array):
    diff_array = np.subtract(cpu_array, gpu_array)
    x = np.square(diff_array)
    mse = np.mean(x)

    print('mse:', mse)

    return mse  

def get_snr(gpu_array, cpu_array):
    diff_array = np.subtract(cpu_array, gpu_array)
    x = np.square(diff_array)
    x = np.sum(x)

    y = np.square(cpu_array)
    y = np.sum(y) 

    snr = (x) / (y + 1e-7)

    snr = np.mean(snr)

    print('snr:', snr)

    return snr  
    
precision_cmp_method = {
    "mse": get_mse,
    "cosine": get_cosine,
    "snr": get_snr
}

precision_cmp_str = 'snr'
precision_threshold = 0.1 

def compare_result(ort_outs_cpu, ort_outs_gpu, node_list, initializer): 
    match=True 
    seq = 0

    save_path = './'
    data_set_dir = './'

    for k,v in ort_outs_gpu.items():
        #print(k, ':', v.shape)
        #print('v.type:', v.__class__)
        assert v.__class__  == np.ndarray
        #print('ndim: ', v.ndim)
        #print('dtype: ', v.dtype)
        if v.__class__  == np.ndarray :
            c=ort_outs_cpu[k].flatten()
            print('cpu: ', c)
            assert k in ort_outs_gpu
            g=v.flatten()
            print('gpu: ', g)
            diff=np.subtract(c, g, dtype=np.float64)
            cmp_value = precision_cmp_method[precision_cmp_str](c, g) 
            index=0
            data_type = str(g.dtype)
            assert g.dtype==c.dtype
            if data_type.startswith("float"):
                error_range = [-0.1, 0.1]
            else:
                error_range = [-3, 3]

            if cmp_value > precision_threshold:
                match=False
                print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
                print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
                print('WARNING: output ', k, ' is abnormal, please check it~~')
                #print('cpu val: ', c[index], 'gpu val: ', g[index])

                node_abnormal = {}
                node_abnormal_attributes = {}
                output_abnornal = {}

                for node in node_list :
                    #print('node_output: ', node['output'], ', node_name: ', node['name'])
                    if k in node['output'] :
                        node_abnormal = node
                        print('Dismatch node name: ', node['name'], ', input:',  node['input'])

                        save_path = './test_' + node['op_type']
                        prepare_dir(save_path)

                        data_set_dir = os.path.join(save_path, 'test_data_set_0')
                        prepare_dir(data_set_dir)

                        with open(os.path.join(data_set_dir, 'output_0.pb'), 'wb') as f:
                            f.write(numpy_helper.from_array(v, k).SerializeToString())

                        if len(node['attribute']) > 0 :
                            print('+++++++++++++++++++ got attribute:')
                            for attr  in node['attribute']:
                                #print('attr.name: ', attr.name, ', attr.type: ', attr.type)
                                value = helper.get_attribute_value(attr)
                                print('parse attr: ', attr.name, attr.type, value)
                                node_abnormal_attributes[attr.name] = value

                                output_abnornal['dim'] = v.ndim
                                output_abnornal['shape'] = v.shape
                                output_abnornal['dtype'] = v.dtype
                                output_abnornal['name'] = k

                        break

                print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
                print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')  
            
                break
                
                #index = index + 1
                    
        if match == False :
                break      
            
        seq = seq + 1            
           
    if match == True :
       print('===================================================')
       print('===================================================')
       print('Congratulations, it works well as expected')
    else:
        abnormal_input_counts = len(node_abnormal['input'])

        for k, v in node_abnormal_attributes.items():
            print('node_abnormal_attributes: ', k, ', value: ', v)

        input_abnormal = []

        j = 0
        for k,v in ort_outs_cpu.items():
            if k in node_abnormal['input']:
                print('find ', k, 'in', node_abnormal['input'])
                with open(os.path.join(data_set_dir, 'input_{}.pb'.format(j)), 'wb') as f:
                    f.write(numpy_helper.from_array(v, k).SerializeToString())
                    j = j + 1

                d={}
                d['dim'] = v.ndim
                d['shape'] = v.shape
                d['dtype'] = v.dtype
                d['name'] = k
                input_abnormal.append(d)

        init_list = []  

        if j != abnormal_input_counts:
            for input in node_abnormal['input'] :  
                for init in initializer:
                    if init.name == input:
                        print('got node from initializer for ', init.name, ', data_type: ', init.data_type, ', dims: ', init.dims)
                        array = numpy_helper.to_array(init)
                        with open(os.path.join(data_set_dir, 'input_{}.pb'.format(j)), 'wb') as f:
                            f.write(numpy_helper.from_array(array, input).SerializeToString())
                            j = j + 1 

                        d={}
                        d['dim'] = len(init.dims)
                        d['shape'] = array.shape#init.dims
                        d['dtype'] = init.data_type
                        d['name'] = init.name
                        input_abnormal.append(d)
                        init_list.append(init)  

        make_test_model(node_abnormal['op_type'], input_abnormal, output_abnornal, node_abnormal_attributes, init_list)           

def generate_onnx_model_random_input(model):
    ort_inputs = {}

    initializer = []
    for init in model.graph.initializer:
        print('got init: ', init.name)
        initializer.append(init.name)

    for input in model.graph.input:
        if input.name not in initializer :
            input_shape = input.type.tensor_type.shape.dim
            type = input.type.tensor_type.elem_type

            print('-----input name is', input.name)
            #print('-----input raw_data is', input.type.tensor_type.raw_data)

            data_shape = [x.dim_value for x in input_shape]
            data_shape_new = []

            for x in data_shape:
                if x == 0:
                    data_shape_new.append(1)
                elif x == -1:
                    n = np.random.randint(1, 5)
                    data_shape_new.append(n)    
                else:
                    data_shape_new.append(x)

            data_shape = data_shape_new
            data_array = np.array(np.random.random(data_shape), dtype = convert_ort_type_2_np(type))

            ort_inputs[input.name] = data_array

    return ort_inputs

def test_model_by_onnxruntime(model):

    ori_outputs = [x.name for x in model.graph.output]
    ori_outputs_backup=model.graph.output[:]
    #print('ori:', ori_outputs)
    
    node_list=[]
    
    del model.graph.output[:]

    for node in model.graph.node:
        dict={"name":node.name, "input":node.input, "output":node.output, "op_type": node.op_type, "attribute":node.attribute}
        node_list.append(dict)
        for output in node.output:
            if output not in ori_outputs:
                model.graph.output.extend([onnx.ValueInfoProto(name=output)])
                
    model.graph.output.extend(ori_outputs_backup)
                
    #for i, val in enumerate(node_list):
    #    print('num:', i, ', node:', val) 

    if nodefile != '' :    
      with open(nodefile, 'w') as f:
         f.write(str(node_list))

    EP_list = ['CUDAExecutionProvider']
    ort_session = onnxruntime.InferenceSession(model.SerializeToString(), providers=EP_list)

    ort_inputs = generate_onnx_model_random_input(model)

    outputs = [x.name for x in ort_session.get_outputs()]

    print('output list:')
    print(outputs)

    if test_data_folder != '':
        inputs = load_onnx_test_data(test_data_folder, [])
        for input in inputs:
            ort_inputs = get_ort_session_inputs(ort_session, input)
            break #only once

    print('begin run cpu......')

    ort_outs = ort_session.run(outputs, ort_inputs)
    
    #print(ort_outs)

    ort_outs = OrderedDict(zip(outputs, ort_outs))

    #np.set_printoptions(threshold=sys.maxsize)
    np.set_printoptions(threshold=10)

    out_list=[]
    out_dict={}

    for k,v in ort_outs.items():
       #print('cpu---- ', k, ':', v)
       #print('v.type:', v.__class__)
       if True == hasattr(v, 'tolist') :       
           dict={"output":k, "value":v.tolist()}
       else:
           dict={"output":k, "value":v} 
           
       out_list.append(dict)
       
    #print('list:', out_list)
    
    if outputfile != '' : 
      with open(outputfile, 'w') as f:
         f.write(str(out_list))

    print('-----------------------------------------------------------')
    print('-----------------------------------------------------------')
    
    print('begin run gpu......')
    
    #just for test,if you want to generate dismatch msg, uncomment belowing lines
    #img_array = np.array(np.random.random(image_shape), dtype = convert_ort_type_2_np(type))
    #img = img_array
    #for i, input_ele in enumerate(ort_session.get_inputs()):
    #    ort_inputs[input_ele.name] = img
    
    #just for test, you should use MacavxExecutionProvider
    EP_list = ['CUDAExecutionProvider']
    #EP_list = ['MacavxExecutionProvider']
    
    ort_session_gpu = onnxruntime.InferenceSession(model.SerializeToString(), providers=EP_list)
    
    ort_outs_gpu = ort_session_gpu.run(outputs, ort_inputs)
    
    ort_outs_gpu = OrderedDict(zip(outputs, ort_outs_gpu))

    out_list_gpu=[]

    for k,v in ort_outs_gpu.items():
       #print('gpu--- ', k, ':', v)
       #print('v.type:', v.__class__)
       if True == hasattr(v, 'tolist') :       
           dict={"output":k, "value":v.tolist()}
       else:
           dict={"output":k, "value":v} 
           
       out_list_gpu.append(dict)
    
    #print('list:', out_list)
    
    logger.info("Test model by onnxruntime finish")
    
    compare_result(ort_outs, ort_outs_gpu, node_list, model.graph.initializer)

    #del model.graph.output[:]

    #model.graph.output.extend(ori_output)

    return ort_outs
    
def usage():
    print('python onnx_debug.py -i <onnxfile>')
    print('or') 
    print('python onnx_debug.py -f <test_data_folder>')
   #print('python onnx_debug.py -i <onnxfile> -n <nodefile> -o <outputfile>', '({})'.format('Not yet support for now'))    

def main(argv):
   global onnxfile
   global nodefile
   global outputfile
   global test_data_folder
   global precision_cmp_str
   global precision_threshold
   
   try:
      opts, args = getopt.getopt(argv,"hi:n:o:f:c:",["onnx=", "nfile=", "ofile=", "folder="])
   except getopt.GetoptError:
      usage()
      sys.exit(2)
   for opt, arg in opts:
      if opt == '-h':
         usage()
         sys.exit()
      elif opt in ("-i", "--onnx"):
         onnxfile = arg
      elif opt in ("-n", "--nfile"):
         nodefile = arg   
      elif opt in ("-o", "--ofile"):
         outputfile = arg
      elif opt in ("-f", "--folder"):
         test_data_folder = arg
      elif opt in ("-c", "--compare_method"):
         precision_cmp_str = arg      

   print('onnx file path: ', onnxruntime.__file__)    
         
   if onnxfile == '' and test_data_folder == '':
      print('Warning: you should specify onnxfile(-i) or test_data_folder(-f)') 
      usage()
      sys.exit()

   if onnxfile != '' and  test_data_folder != '' :
       print('Warning: you should not specify both onnxfile and test_data_folder(use -i or -f only)')
       sys.exit()
            
   print('model file: ', onnxfile)
   print('node file: ', nodefile)
   print('output file: ', outputfile)
   print('test_data_folder: ', test_data_folder)
   print('compare method: ', precision_cmp_str)

   if precision_cmp_str == 'snr':
        precision_threshold = 0.1
   elif precision_cmp_str == 'mse':
        precision_threshold = 0.0
   elif precision_cmp_str == 'cosine':
        precision_threshold = 0.03
   else:
        print('precision_cmp_str can only be one of [\'snr\', \'mse\', \'cosine\']')
        sys.exit()

   if test_data_folder != '' :
       if os.path.exists(test_data_folder) == False or os.path.isfile(test_data_folder):
           print('ERROR: ', test_data_folder, ' is not exist or is not a folder')
           sys.exit()

       if test_data_folder.endswith('/'):
           onnxfile = test_data_folder + 'model.onnx'
       else:
           onnxfile = test_data_folder + '/' + 'model.onnx'

       if os.path.exists(onnxfile) == False :
           print('ERROR: onnx file: ', onnxfile, ' is not exist')
           sys.exit()    

   onnx_model = onnx.load(onnxfile)
   
   test_model_by_onnxruntime(onnx_model)

if __name__ == "__main__":
   main(sys.argv[1:])
