import os
import onnx
import copy
import numpy as np
import logging
import onnxruntime
import sys, getopt
import json
import subprocess
import pickle

from collections import OrderedDict
from onnx import shape_inference
from onnx import numpy_helper, helper

logging.basicConfig(level=logging.INFO)

from onnx import shape_inference, TensorProto, version_converter, numpy_helper

logger = logging.getLogger("[ONNXOPTIMIZER]")

np.random.seed(5) 


onnxfile = ''
input_npy = ''
outputfile = ''
test_data_folder=''

'''
  --------------------ONNX Data Type-----------------
  enum DataType {
    UNDEFINED = 0;
    // Basic types.
    FLOAT = 1;   // float
    UINT8 = 2;   // uint8_t
    INT8 = 3;    // int8_t
    UINT16 = 4;  // uint16_t
    INT16 = 5;   // int16_t
    INT32 = 6;   // int32_t
    INT64 = 7;   // int64_t
    STRING = 8;  // string
    BOOL = 9;    // bool

    // IEEE754 half-precision floating-point format (16 bits wide).
    // This format has 1 sign bit, 5 exponent bits, and 10 mantissa bits.
    FLOAT16 = 10;

    DOUBLE = 11;
    UINT32 = 12;
    UINT64 = 13;
    COMPLEX64 = 14;     // complex with float32 real and imaginary components
    COMPLEX128 = 15;    // complex with float64 real and imaginary components

    // Non-IEEE floating-point format based on IEEE754 single-precision
    // floating-point number truncated to 16 bits.
    // This format has 1 sign bit, 8 exponent bits, and 7 mantissa bits.
    BFLOAT16 = 16;

    // Future extensions go here.
  }
'''

def convert_ort_type_2_np(ort_data_type):
    #logger.info("convert_ort_type_2_np")
    
    types = {
        1 : np.float32,
        2 : np.uint8,
        3 : np.int8,
        4 : np.uint16,
        5 : np.int16,
        6 : np.int32,
        7 : np.int64,
        8 : "",  #string
        9 : np.bool_,
        10 : np.float16,
        11 : np.float64,
        12 : np.uint32,
        13 : np.uint64,
        14 : np.complex64,
        15 : np.complex_,
        16 : ""
    }

    return types.get(ort_data_type, None)


def get_tensor_type_by_data_type(dtype):

    print('get_tensor_type_by_data_type: ', dtype.name)

    '''
    types__ = {
        np.float16 : TensorProto.FLOAT16,
        np.float32 : TensorProto.FLOAT,
        np.int8 : TensorProto.INT8,
        np.int16 : TensorProto.INT16,
        np.int32 : TensorProto.INT32,
        np.int64 : TensorProto.INT64,
        np.uint8 : TensorProto.UINT8,
        np.uint16 : TensorProto.UINT16,
        np.uint32 : TensorProto.UINT32,
        np.uint64 : TensorProto.UINT64,
        np.float64 : TensorProto.DOUBLE
    }
    '''

    types__ = {
        'float16' : TensorProto.FLOAT16,
        'float32' : TensorProto.FLOAT,
        'int8' : TensorProto.INT8,
        'int16' : TensorProto.INT16,
        'int32' : TensorProto.INT32,
        'int64' : TensorProto.INT64,
        'uint8' : TensorProto.UINT8,
        'uint16' : TensorProto.UINT16,
        'uint32' : TensorProto.UINT32,
        'uint64' : TensorProto.UINT64,
        'float64' : TensorProto.DOUBLE
    }

    t = types__.get(dtype.name, None) 
    #print('t = ', t)

    return t 

def get_output(command):
    p = subprocess.run(command, check=True, stdout=subprocess.PIPE)
    output = p.stdout.decode("ascii").strip()
    return output

def generate_onnx_model_random_input(model, input_npy=''):
    ort_inputs = {}

    initializer = []
    for init in model.graph.initializer:
        print('got init: ', init.name)
        initializer.append(init.name)

    for input in model.graph.input:
        if input.name not in initializer :
            input_shape = input.type.tensor_type.shape.dim
            type = input.type.tensor_type.elem_type

            print('-----input name is', input.name)
            #print('-----input raw_data is', input.type.tensor_type.raw_data)

            data_shape = [x.dim_value for x in input_shape]
            data_shape_new = []

            for x in data_shape:
                if x == 0:
                    data_shape_new.append(1)
                elif x == -1:
                    n = np.random.randint(1, 5)
                    data_shape_new.append(1)    
                else:
                    data_shape_new.append(x)

            data_shape = data_shape_new
            data_array = np.array(np.random.random(data_shape), dtype = convert_ort_type_2_np(type))

            ort_inputs[input.name] = data_array

            if input_npy != '':
                ort_inputs[input.name] = np.load(input_npy)

    return ort_inputs

def test_model_by_onnxruntime(model, input_npy=''):

    ori_outputs = [x.name for x in model.graph.output]
    ori_outputs_backup=model.graph.output[:]
    #print('ori:', ori_outputs)
    
    node_list=[]

    np.set_printoptions(threshold=10)
    
    #for i, val in enumerate(node_list):
    #    print('num:', i, ', node:', val) 

    EP_list = ['CPUExecutionProvider']
    ort_session = onnxruntime.InferenceSession(model.SerializeToString(), providers=EP_list)

    #input_npy = '/home/zqiu/test_models/pytorch_dynamic2_input.npy'
    ort_inputs = generate_onnx_model_random_input(model, input_npy)

    outputs = [x.name for x in ort_session.get_outputs()]

    print('output list:')
    print(outputs)

    print('begin run cpu......')

    #print('--ort_inputs:', ort_inputs)

    ort_outs = ort_session.run(outputs, ort_inputs)
    
    ort_outs = OrderedDict(zip(outputs, ort_outs))
    ############
    ####################

    out = ort_outs['start_prob:0']
    print('ort_outs', ort_outs)

    preds_top5 = np.argsort(-out.reshape(-1, 128)[0:1], axis=1)[:, :128]
    ll = preds_top5.flatten().tolist()
    print('preds_top5:', ll)

    #top5 = np.argsort(-ort_outs['InceptionV2/Logits/SpatialSqueeze:0'], axis=1)[:, :5]
    #print('top5:', top5)

    #np.set_printoptions(threshold=sys.maxsize)
    
    with open("1.pkl", "wb") as tf:
        pickle.dump(ort_outs, tf)

    return ort_outs
    
def usage():
    print('python onnx_debug.py -i <onnxfile>')
    print('or') 
    print('python onnx_debug.py -f <test_data_folder>')
   #print('python onnx_debug.py -i <onnxfile> -n <nodefile> -o <outputfile>', '({})'.format('Not yet support for now'))    

def main(argv):
   global onnxfile
   global input_npy
   global outputfile
   global test_data_folder
   global precision_cmp_str
   global precision_threshold
   
   try:
      opts, args = getopt.getopt(argv,"hi:n:o:f:c:",["onnx=", "nfile=", "ofile=", "folder="])
   except getopt.GetoptError:
      usage()
      sys.exit(2)
   for opt, arg in opts:
      if opt == '-h':
         usage()
         sys.exit()
      elif opt in ("-i", "--onnx"):
         onnxfile = arg
      elif opt in ("-n", "--nfile"):
         input_npy = arg   
      elif opt in ("-o", "--ofile"):
         outputfile = arg
      elif opt in ("-f", "--folder"):
         test_data_folder = arg
      elif opt in ("-c", "--compare_method"):
         precision_cmp_str = arg      

   print('onnx file path: ', onnxruntime.__file__)    
         
   if onnxfile == '' and test_data_folder == '':
      print('Warning: you should specify onnxfile(-i) or test_data_folder(-f)') 
      usage()
      sys.exit()

   if onnxfile != '' and  test_data_folder != '' :
       print('Warning: you should not specify both onnxfile and test_data_folder(use -i or -f only)')
       sys.exit()
            
   print('model file: ', onnxfile)
   print('input_npy: ', input_npy)
   print('output file: ', outputfile)

   onnx_model = onnx.load(onnxfile)
   
   test_model_by_onnxruntime(onnx_model, input_npy)

if __name__ == "__main__":
   main(sys.argv[1:])
