# -*- coding: utf-8 -*-
import sys, getopt
import onnx
from onnx import defs, helper, GraphProto, TensorProto, shape_inference
import random
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
import importlib
import importlib.util
import onnx.mapping
import logging
import os.path
import time

#from .test_op  import test_model_by_onnxruntime
import test_op
import op.MappingTypes as MappingTypes

cs_dim = CS.ConfigurationSpace()
cs_dim_value = CS.ConfigurationSpace()

logger = logging.getLogger("[ONNXOPTIMIZER]")

max_dim = 5
max_dim_value = 16#32 #4096

op_set_version = 11
 
#每种数据类型生成的模型个数
model_count_per_data_type = 10

#每个模型的测试次数
test_count_per_model = 10

test_count_for_attributes = 20

test_count_for_initializer = 10

logger = logging.getLogger()
logger.setLevel(logging.INFO)  # Log等级总开关

OpTypeFilter = {
    'Clip':['tensor(int32)', 'tensor(uint32)', 'tensor(int16)', 'tensor(uint16)'], #op: 14
    'BitShift':['tensor(uint16)'],
    'QuantizeLinear': ['tensor(int32)'],
    'DequantizeLinear': ['tensor(int32)'],
    'QLinearMatMul':['tensor(int8)'],
    'QLinearConv':['tensor(int8)'],
    'ConvInteger':['tensor(int8)'],
    'ConcatFromSequence':['seq(tensor(complex64))', 'seq(tensor(complex128))'],
    'Compress':['tensor(complex128)', 'tensor(complex64)'],
    'Add':['tensor(int16)', 'tensor(uint16)', 'tensor(uint64)', 'tensor(uint32)', 'tensor(uint8)', 'tensor(int8)'],
    'ArgMax': ['tensor(int16)', 'tensor(uint16)', 'tensor(int8)', 'tensor(uint8)', 'tensor(uint64)', 'tensor(uint32)', 'tensor(uint64)', 'tensor(int64)'],
    'ArgMin': ['tensor(int16)', 'tensor(uint16)', 'tensor(int8)', 'tensor(uint8)', 'tensor(uint64)', 'tensor(uint32)', 'tensor(uint64)', 'tensor(int64)']
}

InputTypeSeqList = ['ConcatFromSequence']
OutputTypeSeqList = []

def get_op_type_filter(op):
    #print('get_op_type_filter, op:', op)
    return OpTypeFilter.get(op, [])

def init_logger():
    # 创建一个handler，用于写入日志文件
    rq = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))
    log_path = os.getcwd() + '/Logs/'
    print('log path:', log_path)
    log_name = log_path + rq + '.log'
    logfile = log_name
    fh = logging.FileHandler(logfile, mode='w')
    fh.setLevel(logging.DEBUG)  # 输出到file的log等级的开关

    # 定义handler的输出格式
    formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
    fh.setFormatter(formatter)
    logger.addHandler(fh)

def get_initializer_min_test_cnt(op):
    types = {
        'QLinearMatMul' : 2*2,
        'QLinearConv' : 2*2,
        'QuantizeLinear': 2*2,
        'DequantizeLinear': 2*2,
        'BatchNormalization': 2*2
    }

    return types.get(op, 0)

def get_schema(op_type):
    try:
        schema = defs.get_schema(op_type, '')
        print('----------------   op: ', op_type, '  ---------------------------')
        print('max_input:', schema.max_input, ', min_input:', schema.min_input)
        print('max_output:', schema.max_output, ', min_output:', schema.min_output)
        print('input_size: ',  len(schema.inputs))
        print('output_size: ',  len(schema.outputs))
        print('has_type_and_shape_inference_function: ', schema.has_type_and_shape_inference_function)
        
        print('******************************** inputs: ******************************')
        for input in schema.inputs:
            print('input name:', input.name)
            for type in input.types:
                #print('===== type: ', type)
                t = MappingTypes.get_tensor_type(type)
                print('    input type:', type, t)

        print('')

        print('******************************** outputs: ******************************')
        for output in schema.outputs:
            print('output name:', output.name)
            for type in output.types:
                t=MappingTypes.get_tensor_type(type)
                print('    output type:', type, t)
                
        print('')

        #print('attributes:', schema.attributes)
        print('******************************** attributes: ************************')
        for k, v in schema.attributes.items():
            print(k, ' : requied: ', v.required, ', type: ', v.type)
        
        print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')

        return schema, True
    except:
        print('No schema registered for ', op_type)
        return None, False

def get_random_shape():
    shape = []
    s = cs_dim.sample_configuration()
    dim=s.get_dictionary()['dim']
    #print('get dim: ', dim)

    for i in range(dim):
        s = cs_dim_value.sample_configuration()
        dim_value = s.get_dictionary()['dim_value']
        #print('get dim_value: ', dim_value)
        shape.append(dim_value)

    return shape

def get_random_tensor(type):
    shape = get_random_shape()
    Y = helper.make_tensor_value_info('Y', type, shape)

def check_module(module_name):
    """
    Checks if module can be imported without actually
    importing it
    """
    module_spec = importlib.util.find_spec(module_name)
    if module_spec is None:
        print("Module: {} not found".format(module_name))
        return None
    else:
        print("Module: {} can be imported".format(module_name))
        return module_spec

def test_op_set(schema):
    initializer_min_test_cnt = get_initializer_min_test_cnt(schema.name)
    print('initializer_min_test_cnt: ', initializer_min_test_cnt)
    print('test_count_per_model: ', test_count_per_model)
    
    target_module = 'op.' + schema.name
    module_find = check_module(target_module)
    print('----get module: ', module_find)

    if module_find != None :
        module = importlib.import_module(target_module)
        cls = getattr(module, schema.name)
        obj = cls()

    index = 0

    out_type_index = 0

    attributes_len = len(schema.attributes)
    attributes_combination_len = 0

    if attributes_len > 0 :
        if module_find != None :
            #module = importlib.import_module(target_module)
            attributes_combination_len = obj.get_max_attributes_combination()
            print('attributes_combination_len: ', attributes_combination_len)

    attributes_combination_test_times =  1
    if attributes_combination_len > 0:
        attributes_combination_test_times =  attributes_combination_len + test_count_for_attributes

    real_inputs = []
    for input in schema.inputs:
        real_inputs.append(input)

    real_max_input = schema.max_input
    real_min_input = schema.min_input      

    initializer_name_list = []
    if initializer_min_test_cnt != 0:
        if module_find != None :
            initializer_name_list = obj.get_initializer_name_list()
            print('initializer_name_list: ', initializer_name_list)
            real_inputs = []
            for input in schema.inputs:
                if input.name not in initializer_name_list:
                    real_inputs.append(input)

            real_max_input = real_max_input - len(real_inputs)
            real_min_input = real_min_input - len(real_inputs)

    print('++++ real_max_input:', real_max_input, ', real_min_input:', real_min_input)
    if real_max_input > 32:
        real_max_input = 32

    initializer_test_count = 1
    if initializer_min_test_cnt != 0:
        initializer_test_count = test_count_for_initializer + initializer_min_test_cnt               

    #第一层循环：遍历inittializer字段
    #无inittializer字段时，只循环一次
    #有inittializer字段时，遍历次数为最小次数加预定义的测试次数
    for initializer_test_times in range(initializer_test_count):
        #第二层循环：遍历属性
        #无属性时，只循环一次
        #有属性时，先按属性组合列表顺序遍历，完成一轮遍历后，再取随机属性，保证所有属性的组合都可以被抽样到
        for attributes_test_times in range(attributes_combination_test_times) :  
            input_possible_times = real_max_input - real_min_input + 1
            #第三层循环：遍历optinal input，从最小input到最大input,如无optinal input,则只循环一次
            for input_test_times in range(input_possible_times) : 
                print('input_possible_times:', input_possible_times, ', input_test_times:', input_test_times)
                #第四层循环：遍历每种数据类型的模型生成个数
                for model_count in range(model_count_per_data_type) :
                    _inputs = []
                    _inputs_for_logging = []
                    _outputs = []
                    _outputs_for_logging = []
                    _attributes = []
                    _initializer = []

                    #每种数据类型(int64/float32/...)生成一组对应的input/output/attrbutes
                    for type in real_inputs[0].types:
                        if type == 'tensor(double)' or type == 'tensor(bfloat16)' or type == 'tensor(complex64)'or type == 'tensor(complex128)':
                            print('skip double or bfloat16 type for now~~~~')
                            continue

                        type_filter_list = get_op_type_filter(schema.name)
                        #print('---------- type_filter_list: ', type_filter_list)

                        if type in type_filter_list:
                            print('skip {} for op {}'.format(type, schema.name))
                            continue

                        _input = []
                        _input_for_logging = []
                        _output = []
                        _output_for_logging = []

                        shapes = []

                        for i in range(len(real_inputs[:real_min_input + input_test_times])):
                            shape = get_random_shape()
                            shapes.append(shape)

                        if module_find != None :
                            obj.input_shape_correct(shapes) 

                        shape_out = []

                        if attributes_combination_test_times != 1 and attributes_test_times < attributes_combination_len :
                            if module_find != None :
                                attr, shape_out = obj.get_attributes_by_index_or_random(shapes, attributes_test_times)
                                _attributes.append(attr)
                        elif attributes_combination_test_times != 1 and attributes_test_times >= attributes_combination_len :
                            if module_find != None :
                                attr, shape_out = obj.get_attributes_by_index_or_random(shapes, -1)
                                _attributes.append(attr)          

                        if schema.name == 'Concat':
                            tmp_input = real_inputs[0]
                            for j in range(input_test_times):
                                #print('xxxx got input_test_times:', input_test_times, j)
                                real_inputs.append(tmp_input)
                                #print('after adjust, real_inputs:', real_inputs)

                        i = 0
                        for input in real_inputs[:real_min_input + input_test_times]:
                            #print('real_inputs, i = ', i, input_test_times, len(real_inputs))
                            input_type = type
                            input_name = input.name

                            t = MappingTypes.get_tensor_type(input_type)
                            #shape = get_random_shape()
                            if module_find != None :
                                get_input_type = getattr(obj, "get_input_type", None)
                                if callable(get_input_type):
                                    input_type = get_input_type(input.name, input_type)
                                    print('-----get input type:', input_type)
                                    t = MappingTypes.get_tensor_type(input_type)

                                get_input_name = getattr(obj, "get_input_name", None)
                                if callable(get_input_name):
                                    input_name = get_input_name(i)
                                    print('-----get input name:', input_name)

                            if schema.name == 'Concat':
                                tvi = helper.make_tensor_value_info(input_name, t, shapes[0])
                            else: 
                                if schema.name in InputTypeSeqList:
                                    tvi = helper.make_tensor_sequence_value_info(input_name, t, shapes[i])
                                else:
                                    tvi = helper.make_tensor_value_info(input_name, t, shapes[i])

                            _input.append(tvi)

                            dict_logging_in = {}
                            dict_logging_in['name'] = input_name#input.name
                            dict_logging_in['type'] = input_type
                            dims = []
                            for dim in tvi.type.tensor_type.shape.dim:
                                dims.append(dim.dim_value)
                            dict_logging_in['shape'] = dims
                            _input_for_logging.append(dict_logging_in)

                            i = i + 1

                        _inputs.append(_input)
                        _inputs_for_logging.append(_input_for_logging)

                        if len(shape_out) == 0:
                            shape_out = shapes[0]

                        if schema.has_type_and_shape_inference_function == False:
                            if module_find != None :
                                infer_output_shape = getattr(obj, "infer_output_shape", None)
                                if callable(infer_output_shape):
                                    shape_out, succ = infer_output_shape(shapes)  

                        for output in schema.outputs:
                            output_type = type
                            t = MappingTypes.get_tensor_type(output_type)

                            if module_find != None :
                                get_output_type = getattr(obj, "get_output_type", None)
                                if callable(get_output_type):
                                    output_type = get_output_type(out_type_index)
                                    print('-----get output type:', output_type)
                                    t = MappingTypes.get_tensor_type(output_type)

                            print('make_tensor_value_info, name:', output.name, 't:', t, 'shape_out:', shape_out)

                            tvi = helper.make_tensor_value_info(output.name, t, shape_out)
                            _output.append(tvi)

                            dict_logging_out = {}
                            dict_logging_out['name'] = output.name
                            dict_logging_out['type'] = output_type
                            dims = []
                            for dim in tvi.type.tensor_type.shape.dim:
                                dims.append(dim.dim_value)
                            dict_logging_out['shape'] = dims
                            _output_for_logging.append(dict_logging_out)

                        _outputs.append(_output)
                        _outputs_for_logging.append(_output_for_logging)

                        if initializer_min_test_cnt > 0 and module_find != None:
                            init = obj.generate_random_initializer(shapes, shape_out, type)
                            _initializer.append(init)

                        out_type_index = out_type_index + 1    

                    assert(len(_inputs) == len(_outputs))

                    tensor_list = []

                    #每种数据类型的input/output/attributes存入字典列表
                    for i in range(len(_inputs)):
                        tensor_dict = {}
                        tensor_dict['input'] = _inputs[i]
                        tensor_dict['output'] = _outputs[i]

                        tensor_dict['input_for_logging'] = _inputs_for_logging[i]
                        tensor_dict['output_for_logging'] = _outputs_for_logging[i]

                        tensor_dict['attribute'] = {}

                        if len(_attributes) > 0:
                            tensor_dict['attribute'] = _attributes[i]

                        if len(_initializer) > 0:
                            tensor_dict['initializer'] = _initializer[i] 

                        tensor_list.append(tensor_dict)

                    #遍历字典列表，按数据类型生成模型，进行测试
                    for tensor in tensor_list:
                        input_name = []
                        output_name = []
                        #shapes = []

                        #print('--------------- index: ', index)

                        for _in in tensor['input']:
                            #print('=============== input name:', _in.name)
                            input_name.append(_in.name)

                            input_shape = _in.type.tensor_type.shape.dim
                            data_shape = [x.dim_value for x in input_shape]
                            #shapes.append(data_shape)

                        for _out in tensor['output']:
                            #print('=========== output name:', _out.name)
                            output_name.append(_out.name)

                        if initializer_min_test_cnt > 0 and module_find != None:
                            input_name = obj.get_input_name()
                            print('update input_name:', input_name)

                        if initializer_min_test_cnt > 0 and module_find != None:
                            get_output_name = getattr(obj, "get_output_name", None)
                            if callable(get_output_name):
                                output_name = obj.get_output_name(tensor['attribute'])
                                print('update output_name:', output_name)    

                        #test_dict={}
                        #test_dict['strides'] = [1,1]
                        attr = {}
                        if len(_attributes) > 0:
                            attr = tensor['attribute']
                            node_def = helper.make_node(
                                        schema.name, # node name
                                        input_name,  #inputs
                                        output_name, # outputs
                                        # attributes
                                        #strides=[1,1],
                                        **attr
                                        )  
                        else:
                            node_def = helper.make_node(
                                        schema.name, # node name
                                        input_name,  #inputs
                                        output_name, # outputs
                                        # attributes
                                        #strides=[1,1],
                                        #**test_dict
                                        )

                        init = []
                        output_tensor_list = []
                        if schema.has_type_and_shape_inference_function == False:
                            output_tensor_list = tensor['output']

                        has_output_tensor_function = False

                        if module_find != None:
                            get_output_tensor = getattr(obj, "get_output_tensor", None)
                            if callable(get_output_tensor):
                                has_output_tensor_function = True
                                output_tensor_list = get_output_tensor(tensor['input_for_logging'][0]['shape'], 
                                                                            tensor['input_for_logging'][0]['type'],
                                                                            attr)
                                print('-----get_output_tensor:', output_tensor_list)
                            
                        if len(_initializer) > 0:
                            init = tensor['initializer']
                            print('++++++++++++++++ get_output_tensor:', output_tensor_list)
                            graph_def = helper.make_graph(
                                    [node_def],
                                    'test_model',
                                    tensor['input'], # graph inputs
                                    output_tensor_list, #tensor['output'], # graph outputs
                                    initializer=init,
                                    )
                        else:    
                            graph_def = helper.make_graph(
                                    [node_def],
                                    'test_model',
                                    tensor['input'], # graph inputs
                                    output_tensor_list, #tensor['output'], # graph outputs
                                    #initializer=[W],
                                    )

                        mode_def_ = helper.make_model(graph_def, producer_name='onnx-example', opset_imports=[helper.make_opsetid('', op_set_version)])
                        #onnx.checker.check_model(mode_def)
                        if schema.has_type_and_shape_inference_function == False or has_output_tensor_function == True:
                            mode_def = mode_def_
                        else:
                            mode_def = shape_inference.infer_shapes(mode_def_)
                        
                        onnxfile = schema.name + '_' + str(index) + '.onnx'
                        index = index + 1
                        mode_def.ir_version = 4

                        #print('begin dump input:')
                        logging.info('test op:{}, onnxfile:{}'.format(schema.name, onnxfile))
                        logging.info('------------------input---------------------')
                        for in_ in tensor['input_for_logging']:
                            #print('input_name:', in_['name'])
                            #print('input_type:', in_['type'])
                            #print('input_shape:', in_['shape'])
                            logging.info('name:{}, type:{}, shape:{}'.format(in_['name'], in_['type'], in_['shape']))

                        logging.info('------------------output---------------------')    

                        #print('end dump input')  
                        #print('begin dump output:')  

                        for out_ in tensor['output_for_logging']:
                            #print('output_name:', out_['name'])
                            #print('output_type:', out_['type'])
                            #print('output_shape:', out_['shape'])
                            logging.info('name:{}, type:{}, shape:{}'.format(out_['name'], out_['type'], out_['shape'])) 

                        #print('end dump output')

                        if len(attr) > 0:
                            logging.info('------------------attrbutes---------------------') 
                            logging.info('{}'.format(attr))

                        if len(init) > 0:
                            logging.info('------------------initializer---------------------') 
                            logging.info('{}'.format(init))    

                        record = False

                        #第五层循环：每个模型测试若干次
                        for test_count in range(test_count_per_model) : 
                            match = test_op.test_model_by_onnxruntime(mode_def)
                            if match == False and record == False:
                            #if record == False:
                                onnx.save(mode_def, onnxfile)
                                record = True
                                if match == False :
                                    sys.exit() #test

def usage():
    print('python ./make_op.py -o op_type -v op_set_version -n test_times')
    print('ex:') 
    print('python ./make_op.py -o MatMul -v 11 -n 1000')    

def init_cs():
    dim = CSH.UniformIntegerHyperparameter(name='dim', lower=1, upper=max_dim)
    dim_value = CSH.UniformIntegerHyperparameter(name='dim_value', lower=1, upper=max_dim_value)
    cs_dim.add_hyperparameter(dim)
    cs_dim_value.add_hyperparameter(dim_value)
   
def main(argv):
    init_logger()

    _all_schemas = defs.get_all_schemas()
    init_cs()

    op_type = ''

    global test_count_per_model
    global op_set_version

    try:
        opts, args = getopt.getopt(argv,"ho:v:n:",["op_type=", "op_set_version=", "test_times="])
    except getopt.GetoptError:
        usage()
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            usage()
            sys.exit()
        elif opt in ("-o", "--op_type"):
            op_type = arg
        elif opt in ("-v", "--op_set_version"):
            op_set_version = int(arg)   
        elif opt in ("-n", "--test_times"):
            test_count_per_model = int(arg)

    if op_type == '':
        usage()
        sys.exit(2)      

    schema, got = get_schema(op_type)
    if got == True :
        test_op_set(schema)
    else:
        print('valid op list: ') 
        for op in _all_schemas:
            print('---', op.name)   

if __name__ == "__main__":
   main(sys.argv[1:])