import onnx
#from onnx.tools import update_model_dims
import numpy as np
import onnx.helper as helper
from onnx import shape_inference, TensorProto


def onnx_datatype_to_npType(data_type):
    if data_type == 1:      # true
        return np.float32
    elif data_type == 2:    # true
        return np.uint8
    elif data_type == 3:    # true
        return np.int8
    elif data_type == 4:
        return np.uint8
    elif data_type == 5:
        return np.uint8
    elif data_type == 6:   # true
        return np.int32
    elif data_type == 7:   # true
        return np.int64
    elif data_type == 8:
        return np.uint8
    elif data_type == 9:
        return np.bool8
    else:
        return np.float32


def get_raw_data(attr):
    data_type = attr.t.data_type
    data_dims = attr.t.dims
    data = np.frombuffer(attr.t.raw_data, dtype=onnx_datatype_to_npType(data_type))

    return data


def get_onnx_tensortype(data_type):
    if data_type == 1 or data_type == 0:
        tensor_type = onnx.TensorProto.FLOAT
    elif data_type == 2:
        tensor_type = onnx.TensorProto.UINT8
    elif data_type == 3:
        tensor_type = onnx.TensorProto.INT8
    elif data_type == 4:
        tensor_type = onnx.TensorProto.UINT16
    elif data_type == 5:
        tensor_type = onnx.TensorProto.INT16
    elif data_type == 6:
        tensor_type = onnx.TensorProto.INT32
    elif data_type == 7:
        tensor_type = onnx.TensorProto.INT64
    elif data_type == 8:
        tensor_type = onnx.TensorProto.STRING
    elif data_type == 9:
        tensor_type = onnx.TensorProto.BOOL
    elif data_type == 10:
        tensor_type = onnx.TensorProto.FLOAT16
    elif data_type == 11:
        tensor_type = onnx.TensorProto.DOUBLE
    elif data_type == 12:
        tensor_type = onnx.TensorProto.UINT32
    elif data_type == 13:
        tensor_type = onnx.TensorProto.UINT64
    else:
        tensor_type = 0

    return tensor_type


def get_init_data(init):
    if len(init.double_data) != 0:
        data = init.double_data
    elif len(init.float_data) != 0:
        data = init.float_data
    elif len(init.int32_data) != 0:
        data = init.int32_data
    elif len(init.int64_data) != 0:
        data = init.int64_data
    elif len(init.string_data) != 0:
        data = init.string_data
    elif len(init.uint64_data) != 0:
        data = init.uint64_data
    else:
        assert False, "Do not support init data type, %s" % (init.name)

    # data_type = init.data_type
    # if data_type == 1:
    #     data = init.float_data[0]
    # elif data_type in [2, 3, 4, 5, 6, 9, 10]:
    #     data = init.int32_data[0]
    # elif data_type == 7:
    #     data = init.int64_data[0]
    # elif data_type == 8:  # string
    #     assert False, "Do not support string data now"
    # elif data_type == 11:
    #     data = init.double_data[0]
    # elif data_type in [12, 13]:
    #     data = init.uint64_data[0]
    # else:
    #     assert False, "Do not support init data type, %s" % (init.name)
    
    return data


def op_upgrade(model):
    graph = model.graph
    opset_version = model.opset_import[0].version

    for i, node in enumerate(graph.node):
        if node.op_type == 'Split' and opset_version < 13:  # upgrade split node to opset 18
            split = []
            axis = 0
            for attr in node.attribute:
                if attr.name == 'split':
                    if attr.t.raw_data:
                        split = get_raw_data(attr)
                    else:
                        split = attr.ints
                if attr.name == 'axis':
                    if attr.t.raw_data:
                        axis = get_raw_data(attr)
                    else:
                        axis = attr.i

            if len(split) != 0:
                new_split = helper.make_tensor(node.name + '_split', TensorProto.INT64, [len(split)], split)
                node.input.append(node.name + '_split')
                graph.initializer.append(new_split)

            split_node_opspet18 = helper.make_node(
                "Split",
                name=node.name,
                inputs=node.input,
                outputs=node.output,
                axis=axis,
            )

            graph.node.remove(node)
            graph.node.insert(i, split_node_opspet18)

        if node.op_type == 'BatchNormalization' and opset_version < 14:  # upgrade BN node to opset 18
            if len(node.output) > 3:
                epsilon = 1e-5
                momentum = 0.9
                for attr in node.attribute:
                    if attr.name == 'epsilon':
                        if attr.t.raw_data:
                            epsilon = get_raw_data(attr)
                        else:
                            epsilon = attr.f
                    if attr.name == 'momentum':
                        if attr.t.raw_data:
                            momentum = get_raw_data(attr)
                        else:
                            momentum = attr.f

                bn_node_opspet18 = helper.make_node(
                    "BatchNormalization",
                    name=node.name,
                    inputs=node.input,
                    outputs=node.output[:3], # remove output : saved_mean and saved_var 
                    epsilon=epsilon,
                    momentum=momentum
                ) 
                graph.node.remove(node)
                graph.node.insert(i, bn_node_opspet18)

        if node.op_type == 'ReduceMin' and opset_version < 18:  # upgrade reduceMin node to opset 18, add axes to input
            axes = []
            keepdims = 1
            for attr in node.attribute:
                if attr.name == 'axes':
                    if attr.t.raw_data:
                        axes = get_raw_data(attr)
                    else:
                        axes = attr.ints
                if attr.name == 'keepdims':
                    if attr.t.raw_data:
                        keepdims = get_raw_data(attr)
                    else:
                        keepdims = attr.i

            if len(axes) != 0:
                new_axes = helper.make_tensor(node.name + '_axes', TensorProto.INT64, [len(axes)], axes)
                node.input.append(node.name + '_axes')
                graph.initializer.append(new_axes)

            reduceMin_node_opspet18 = helper.make_node(
                "ReduceMin",
                name=node.name,
                inputs=node.input,
                outputs=node.output,
                keepdims=keepdims,
            )

            graph.node.remove(node)
            graph.node.insert(i, reduceMin_node_opspet18)

        if node.op_type == 'ReduceMean' and opset_version < 18:  # upgrade reduceMean node to opset 18, add axes to input
            axes = []
            keepdims = 1
            for attr in node.attribute:
                if attr.name == 'axes':
                    if attr.t.raw_data:
                        axes = get_raw_data(attr)
                    else:
                        axes = attr.ints
                if attr.name == 'keepdims':
                    if attr.t.raw_data:
                        keepdims = get_raw_data(attr)
                    else:
                        keepdims = attr.i

            if len(axes) != 0:
                new_axes = helper.make_tensor(node.name + '_axes', TensorProto.INT64, [len(axes)], axes)
                node.input.append(node.name + '_axes')
                graph.initializer.append(new_axes)

            reduceMean_node_opspet18 = helper.make_node(
                "ReduceMean",
                name=node.name,
                inputs=node.input,
                outputs=node.output,
                keepdims=keepdims,
            )

            graph.node.remove(node)
            graph.node.insert(i, reduceMean_node_opspet18)

        if node.op_type == 'ReduceMax' and opset_version < 18:  # upgrade ReduceMax node to opset 18, add axes to input
            axes = []
            keepdims = 1
            for attr in node.attribute:
                if attr.name == 'axes':
                    if attr.t.raw_data:
                        axes = get_raw_data(attr)
                    else:
                        axes = attr.ints
                if attr.name == 'keepdims':
                    if attr.t.raw_data:
                        keepdims = get_raw_data(attr)
                    else:
                        keepdims = attr.i

            if len(axes) != 0:
                new_axes = helper.make_tensor(node.name + '_axes', TensorProto.INT64, [len(axes)], axes)
                node.input.append(node.name + '_axes')
                graph.initializer.append(new_axes)

            reduceMax_node_opspet18 = helper.make_node(
                "ReduceMax",
                name=node.name,
                inputs=node.input,
                outputs=node.output,
                keepdims=keepdims,
            )

            graph.node.remove(node)
            graph.node.insert(i, reduceMax_node_opspet18)

        if node.op_type == 'ReduceSum' and opset_version < 13:  # upgrade ReduceSum node to opset 18, add axes to input
            axes = []
            keepdims = 1
            for attr in node.attribute:
                if attr.name == 'axes':
                    if attr.t.raw_data:
                        axes = get_raw_data(attr)
                    else:
                        axes = attr.ints
                if attr.name == 'keepdims':
                    if attr.t.raw_data:
                        keepdims = get_raw_data(attr)
                    else:
                        keepdims = attr.i

            if len(axes) != 0:
                new_axes = helper.make_tensor(node.name + '_axes', TensorProto.INT64, [len(axes)], axes)
                node.input.append(node.name + '_axes')
                graph.initializer.append(new_axes)

            reduceSum_node_opspet18 = helper.make_node(
                "ReduceSum",
                name=node.name,
                inputs=node.input,
                outputs=node.output,
                keepdims=keepdims,
            )
  
            graph.node.remove(node)
            graph.node.insert(i, reduceSum_node_opspet18)


        if node.op_type == 'ReduceL2' and opset_version < 18:  # upgrade ReduceL2 node to opset 18, add axes to input
            axes = []
            keepdims = 1
            for attr in node.attribute:
                if attr.name == 'axes':
                    if attr.t.raw_data:
                        axes = get_raw_data(attr)
                    else:
                        axes = attr.ints
                if attr.name == 'keepdims':
                    if attr.t.raw_data:
                        keepdims = get_raw_data(attr)
                    else:
                        keepdims = attr.i

            if len(axes) != 0:
                new_axes = helper.make_tensor(node.name + '_axes', TensorProto.INT64, [len(axes)], axes)
                node.input.append(node.name + '_axes')
                graph.initializer.append(new_axes)

            reduceSum_node_opspet18 = helper.make_node(
                "ReduceL2",
                name=node.name,
                inputs=node.input,
                outputs=node.output,
                keepdims=keepdims,
            )

            graph.node.remove(node)
            graph.node.insert(i, reduceSum_node_opspet18)

        if node.op_type == 'Squeeze' and opset_version < 13:  # upgrade Unsqueeze node to opset 13， add axes to input
            axes = [] 
            for attr in node.attribute:
                if attr.name == 'axes':
                    if attr.t.raw_data:
                        axes = get_raw_data(attr)
                    else:
                        axes = attr.ints
            if len(axes) != 0:
                new_axes = helper.make_tensor(node.name + '_axes', TensorProto.INT64, [len(axes)], axes)
                node.input.append(node.name + '_axes')
                graph.initializer.append(new_axes)

            Squeeze_node_opspet18 = helper.make_node(
                "Squeeze",
                name=node.name,
                inputs=node.input,
                outputs=node.output,
            )

            graph.node.remove(node)
            graph.node.insert(i, Squeeze_node_opspet18)

        if node.op_type == 'Unsqueeze' and opset_version < 13:  # upgrade Unsqueeze node to opset 13， add axes to input
            axes = [] 
            for attr in node.attribute:
                if attr.name == 'axes':
                    if attr.t.raw_data:
                        axes = get_raw_data(attr)
                    else:
                        axes = attr.ints

            new_axes = helper.make_tensor(node.name + '_axes', TensorProto.INT64, [len(axes)], axes)
            node.input.append(node.name + '_axes')

            Unsqueeze_node_opspet18 = helper.make_node(
                "Unsqueeze",
                name=node.name,
                inputs=node.input,
                outputs=node.output,
            )

            graph.initializer.append(new_axes)
            graph.node.remove(node)
            graph.node.insert(i, Unsqueeze_node_opspet18)
            
        # When opset>12， softmax attr: axis default is -1，but defult is 1 in tool
        if node.op_type == 'Softmax' and opset_version > 12:  
            axis = -1 
            for attr in node.attribute:
                if attr.name == 'axis':
                    if attr.t.raw_data:
                        axis = get_raw_data(attr)
                    else:
                        axis = attr.i

            new_softmax_node = helper.make_node(
                "Softmax",
                name=node.name,
                inputs=node.input,
                outputs=node.output,
                axis=axis
            )

            graph.node.remove(node)
            graph.node.insert(i, new_softmax_node)

        if node.op_type == 'Resize' and opset_version > 12:  
            if node.input[1] == '':
                rois = helper.make_tensor(node.name + '_rois', TensorProto.FLOAT, [0], [])
                node.input[1] = node.name + '_rois'
                graph.initializer.append(rois)

        if node.op_type == 'LSTM':  
            for i, input in enumerate(node.input):
                if i == 4 and input == '':
                    for value_info in graph.value_info:
                        if value_info.name == node.input[0]:
                            seq_length = value_info.type.tensor_type.shape.dim[0].dim_value
                    temp_input = helper.make_tensor(node.name + '_temp_input_' + str(i), onnx.TensorProto.INT32, [1], [seq_length])
                    # temp_input = helper.make_tensor(node.name + '_temp_input_' + str(i), onnx.TensorProto.INT32, [0], [])
                    graph.initializer.append(temp_input)
                    node.input[i] = node.name + '_temp_input_' + str(i)

        if node.op_type == 'LayerNormalization':
            if len(node.input) == 3:
                if node.input[2] == '':
                    node.input.remove(node.input[2])

        # If some op input[1] is 0-D tensor, legalization & runtime will get a wrong size. So replace with a 1-D tensor.
        if node.op_type in ['Gather', 'GatherND', 'GatherElements', 'Mod', 'MatMul', 'Add', 'Pow', 'Sub', 'Mul', 'Div', 'TopK']: 
            for j, init in enumerate(graph.initializer):
                if init.name == node.input[1]:
                    if len(init.dims) == 0:  # if dim info empty
                        data_type = init.data_type

                        if init.raw_data:
                            data = np.frombuffer(init.raw_data, dtype=onnx_datatype_to_npType(data_type)).tolist()
                        else:
                            data = get_init_data(init)

                        tensor_type = get_onnx_tensortype(data_type)
                        assert tensor_type != 0, "Unupported constant data type. Node: %s." % (graph.node[i].name)
                        # use new init tensor with dim = [1]
                        new_init = helper.make_tensor(node.name + "_" + node.input[1], tensor_type, dims=[1], vals=data, raw=False)
                        graph.node[i].input[1] = node.name + "_" + node.input[1]

                        graph.initializer.insert(j, new_init)

    model.opset_import[0].version = 18
    # new_model = helper.make_model(graph,
    #                              producer_name='onnx-example',
    #                              opset_imports=[onnx.helper.make_opsetid(domain="", version=18)])

    return model