import numpy as np
import argparse
import json
import onnx
from onnx import mapping, helper, numpy_helper, TensorProto
from load_save_model_shm import *


def process_command():
    parser = argparse.ArgumentParser()
    parser.add_argument("--input", "-i", type=str, required=True, help="Input model path.")
    parser.add_argument("--output", "-o", default="non_qir.onnx", help="Output model path.")
    parser.add_argument("--json", "-j", default="quan_info_bitinfo.json", help="The path of the JSON file that stores quantification information.")
    parser.add_argument("--non_dequan", "-d", action="store_true", help="Dequantize weight or not, default is dequan enable.")
    parser.add_argument("-s1", "--input_model_size", type = int, help="Set input model size")
    parser.add_argument("-f", "--no_file_op", type = int, choices=[0, 1], default = 0, help="Set no_file_op")
    parser.add_argument("-n", "--name", help="Set file name")
    parser.add_argument("-v", "--verbose", type = int, choices=[0, 1, 2, 3, 4], default = 2, help="Set verbose level to <number>, default is 2. (0: fatal, 1: error, 2: warning, 3: index, 4: user)")

    return parser.parse_args()

def save_onnx_model(onnx_model, no_file_op, out_model_path, name, verbose):
    if no_file_op == 1:
        onnx_model = saveonnxmodel_shm(onnx_model, name, verbose)
    else:
        onnx.save(onnx_model, out_model_path)

# Dequantize weight
def dequan(x, bitdepth, nfrac, name):
    # assert (np.max(x) <= 2**bitdepth - 1) and (np.min(x) >= -2**bitdepth), f"The data in {name} should in range [-2^({bitdepth})-1, -2^({bitdepth})]"
    # assert (np.abs(np.round(x) - x)).all() < 1e-30 , f"The data in {name} should be integer"
    scale = 2 ** nfrac
    return x / scale

def compute_dequan(qir_node, init, bitdepth, init_data_dct, non_dequan):
    if not non_dequan:
        if init.HasField("raw_data"):
            dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[init.data_type]
            data = np.frombuffer(init.raw_data, dtype=dtype)
            data = dequan(data, bitdepth, init_data_dct[qir_node.input[3]], init.name).astype(dtype)
            init.raw_data = data.tobytes()
        else:
            for data_idx in range(len(init.float_data)):
                init.float_data[data_idx] = dequan(init.float_data[data_idx], bitdepth, init_data_dct[qir_node.input[3]], init.name)

# Get values
def onnx_attribute_to_dict(onnx_attr):
    if onnx_attr.HasField('name'):
        name = getattr(onnx_attr, 'name')
    if onnx_attr.HasField('t'):
        return name, numpy_helper.to_array(getattr(onnx_attr, 't'))
    for attr_type in ['f', 'i', 's']:
        if onnx_attr.HasField(attr_type):
            return name, getattr(onnx_attr, attr_type)
    for attr_type in ['floats', 'ints', 'strings']:
        if getattr(onnx_attr, attr_type):
            return name, list(getattr(onnx_attr, attr_type))

def create_leakyrelu_node(before_node, after_node, in_name):
    node_name = "LeakyRelu_between_{}_and_{}".format(before_node.name, after_node.name)
    out_name = "LeakyRelu_output_between_{}_and_{}".format(before_node.name, after_node.name)
    leakyrelu_node = helper.make_node(
        "LeakyRelu",
        name=node_name,
        inputs=[in_name],
        outputs=[out_name],
        alpha=1.0
    )
    for after_in_idx, after_in_name in enumerate(after_node.input):
        if after_in_name == in_name:
            after_node.input[after_in_idx] = out_name
    return leakyrelu_node

def get_node_by_in_name(nodes, in_name):
    res = list()
    for idx, node in enumerate(nodes):
        for in_idx, name in enumerate(node.input):
            if name == in_name:
                res.append((node, idx, in_idx))
    return res

def get_node_by_out_name(nodes, out_name):
    res = list()
    for idx, node in enumerate(nodes):
        for out_idx, name in enumerate(node.output):
            if name == out_name:
                res.append((node, idx, out_idx))
    return res

def get_node_by_name(nodes, node_name):
    for idx, node in enumerate(nodes):
        if node.name == node_name:
            return node, idx
    return None, None

def is_same_qir_info(qir_nodes, init_data_dct):
    if len(qir_nodes) <= 1:
        return True
    base_bitdepth, base_sign = get_qir_attrs(qir_nodes[0])
    base_frac = init_data_dct[qir_nodes[0].input[3]]
    for qir_node in qir_nodes[1:]:
        bitdepth, sign = get_qir_attrs(qir_node)
        frac = init_data_dct[qir_node.input[3]]
        if bitdepth != base_bitdepth or sign != base_sign or frac != base_frac:
            return False
    return True

# Insert LeakyRelu node between two QuantizeIR nodes: QuantizeIR -> New LeakyRelu -> QuantizeIR
# Insert LeakyRelu node in structure QuantizeIR->[Flatten, Reshape, Slice, Split, Concat]->QuantizeIR: QuantizeIR -> op(e.g. Flatten) -> New LeakyRelu -> QuantizeIR
def insert_leakyrelu_nodes(graph):
    create_params = []
    nodes = graph.node
    for prev_node in nodes:
        if prev_node.op_type != "QuantizeIR":
            continue
        prev_out_name = prev_node.output[0]
        cur_nodes_and_idxs = get_node_by_in_name(nodes, prev_out_name)
        for cur_node, _, _ in cur_nodes_and_idxs:
            if cur_node is None:
                continue
            if cur_node.op_type == "QuantizeIR":   # QuantizeIR -> QuantizeIR
                create_params.append((prev_node, cur_node, prev_out_name, False))  # This LeakyRelu must have quan_info
            elif cur_node.op_type in ["Flatten", "Reshape", "Slice", "Split", "Concat"]:    # QuantizeIR->op(e.g. Flatten)->QuantizeIR
                for cur_out_name in cur_node.output:
                    next_nodes_and_idxs = get_node_by_in_name(nodes, cur_out_name)
                    for next_node, _, _ in next_nodes_and_idxs:
                        if next_node is None:
                            continue
                        if next_node.op_type == "QuantizeIR":
                            if cur_node.op_type == "Concat":    # QuantizeIR->New LeakyRelu->Concat->QuantizeIR
                                create_params.append((prev_node, cur_node, prev_out_name, True))    #  The last bool is the flag of Reize->Concat
                            else:   # QuantizeIR->op(Flatten,Reshape,Slice,Split)->New LeakyRelu->QuantizeIR
                                create_params.append((cur_node, next_node, cur_out_name, False))    #  The last bool is the flag of Reize->Concat
    res = list()
    for before_node, after_node, in_name, flag in create_params:
        _, before_idx = get_node_by_name(nodes, before_node.name)
        leakyrelu_node = create_leakyrelu_node(before_node, after_node, in_name)
        nodes.insert(before_idx+1, leakyrelu_node)
        # Record the pair of LeakyRelu's name and QuantizeIR(for its quan_info)
        if flag:
            res.append([leakyrelu_node.name, before_node])
    return res


def insert_lrelu_nodes(graph, init_data_dct):
    special_ops = ["QuantizeIR", "Flatten", "Reshape", "Slice", "Split", "Concat"]
    create_params = []
    record_multi_use = []
    nodes = graph.node
    for cur_node in nodes:
        if cur_node.op_type not in special_ops:
            continue
        if cur_node.op_type == "QuantizeIR":
            """
            QuantizeIR -> QuantizeIR
            """
            prevs = get_node_by_out_name(nodes, cur_node.input[0])
            assert len(prevs) <= 1, f"Find two outputs have the same name: {cur_node.input[0]}"
            if len(prevs) == 0:
                continue
            if prevs[0][0].op_type == "QuantizeIR" and not is_same_qir_info([prevs[0][0], cur_node], init_data_dct):
                create_params.append((prevs[0][0], cur_node, prevs[0][0].output[0], False))
            elif prevs[0][0].op_type not in special_ops and prevs[0][0] not in record_multi_use:
                """
                prevs[0][0] 's output has been used twice
                             Node
                               |
                            /    \
            (cur_node)QuantizeIR  QuantizeIR
                """
                same_in_nodes = get_node_by_in_name(nodes, cur_node.input[0])
                if len(same_in_nodes) == 1:
                    continue
                same_in_qirs = []
                for same_in_node, _, _ in same_in_nodes:
                    if same_in_node.op_type == "QuantizeIR":
                        same_in_qirs.append(same_in_node)
                if len(same_in_qirs) > 1 and not is_same_qir_info(same_in_qirs, init_data_dct):
                    for same_in_qir in same_in_qirs[1:]:
                        create_params.append((prevs[0][0], same_in_qir, prevs[0][0]))
        if cur_node.op_type in ["Flatten", "Reshape", "Slice"]:
            """
            QuantizeIR -> Flatten -> QuantizeIR
                                |
                                  --> Node
            """
            prevs = get_node_by_out_name(nodes, cur_node.input[0])
            assert len(prevs) <= 1, f"Find two outputs have the same name: {cur_node.input[0]}"
            if len(prevs) == 0 or prevs[0][0].op_type != "QuantizeIR":
                continue
            nexts = get_node_by_in_name(nodes, cur_node.output[0])
            for next_node, _, _ in nexts:
                if next_node.op_type == "QuantizeIR" and not is_same_qir_info([prevs[0][0], next_node], init_data_dct):
                    create_params.append((cur_node, next_node, cur_node.output[0], False))
        elif cur_node.op_type == "Split":
            """
            QuantizeIR -> Split -> QuantizeIR
                              | 
                               ---> Node
            """
            prevs = get_node_by_out_name(nodes, cur_node.input[0])
            assert len(prevs) <= 1, f"Find two outputs have the same name: {cur_node.input[0]}"
            if len(prevs) == 0 or prevs[0][0].op_type != "QuantizeIR":
                continue
            for cur_out_idx, cur_out_name in enumerate(cur_node.output):
                nexts = get_node_by_in_name(nodes, cur_out_name)
                for next_node, _, _ in nexts:
                    if next_node.op_type == "QuantizeIR" and not is_same_qir_info([prevs[0][0], next_node], init_data_dct):
                        create_params.append((cur_node, next_node, cur_node.output[cur_out_idx], False))
        elif cur_node.op_type == "Concat":
            """
            QuantizeIR -> Concat -> QuantizeIR
                            |   |
            QuantizeIR ---->    --> Node
            """
            nexts = get_node_by_in_name(nodes, cur_node.output[0])
            prev_qirs, next_qirs = [], []
            for next_node, _, _ in nexts:
                if next_node.op_type == "QuantizeIR":
                    next_qirs.append(next_node)
            if len(next_qirs) == 0:
                continue
            for cur_in_idx, cur_in_name in enumerate(cur_node.input):
                prevs = get_node_by_out_name(nodes, cur_in_name)
                assert len(prevs) <= 1, f"Find two outputs have the same name: {cur_node.input[0]}"
                if len(prevs) == 0:
                    continue
                elif len(prevs) == 1 and prevs[0][0].op_type == "QuantizeIR":
                    prev_qirs.append(prevs[0][0])
            if len(prev_qirs) == 0:
                continue
            is_same_prev = True if is_same_qir_info(prev_qirs, init_data_dct) else False
            is_same_next = True if is_same_qir_info(next_qirs, init_data_dct) else False
            if is_same_prev and is_same_next:
                continue
            elif not is_same_prev:
                # insert between prev_qir and cur_node
                for prev_qir in prev_qirs:
                    create_params.append((prev_qir, cur_node, prev_qir.output[0], True))
            elif not is_same_next:
                # insert between cur_node and next_qir
                for next_qir in next_qirs:
                    create_params.append((next_qir, cur_node, next_qir.input[0], True))
    res = list()
    for before_node, after_node, in_name, flag in create_params:
        _, before_idx = get_node_by_name(nodes, before_node.name)
        leakyrelu_node = create_leakyrelu_node(before_node, after_node, in_name)
        nodes.insert(before_idx+1, leakyrelu_node)
        print(f"Warning: Insert LeakyRelu node between {before_node.name} and {after_node.name}.")
        # Record the pair of LeakyRelu's name and QuantizeIR(for its quan_info)
        if flag:
            res.append([leakyrelu_node.name, before_node])
    return res

def create_bn_node(graph, mul_node, add_node):
    node_name = "bnscale_replace_" + mul_node.name + "_and_" + add_node.name
    x = mul_node.input[0]
    scale = mul_node.input[1]
    bias = add_node.input[1]
    mean = node_name + "_mean"
    var = node_name + "_var"
    mean_init = helper.make_tensor(mean, TensorProto.FLOAT, [128], np.full(128, 0, dtype=float))
    var_init = helper.make_tensor(var, TensorProto.FLOAT, [128], np.full(128, 1, dtype=float))
    y = add_node.output[0]
    bn_node = helper.make_node(
        "BatchNormalization",
        name=node_name,
        inputs=[x, scale, bias, mean, var],
        outputs=[y],
        epsilon=1e-10,
    )
    graph.initializer.append(mean_init)
    graph.initializer.append(var_init)
    for node in graph.node:
        if node.output[0] in [mul_node.input[1], add_node.input[1]] and node.op_type == "QuantizeIR":
            qir_init = get_init_by_name(graph, node.input[0])
            assert qir_init is not None, "Cannot find QuantizeIR({name})'s init {input0}".format(name=node.name, input0=node.input[0])
            new_init_value = qir_init.float_data[0]
            new_init_name = qir_init.name + "_broadcast"
            new_init = helper.make_tensor(new_init_name, TensorProto.FLOAT, [128], np.full(128, new_init_value, dtype=float))
            node.input[0] = new_init_name
            graph.initializer.append(new_init)
            graph.initializer.remove(qir_init)
    return bn_node

# find initializer in graph by in_name
def get_init_by_name(graph, in_name):
    for init in graph.initializer:
        if init.name == in_name:
            return init
    return None

# use BatchNormalization to replace Mul->Add
def rewrite_muladd2bn_nodes(graph):
    nodes = graph.node
    rewrite_params = []
    for node_i in nodes:
        if node_i.op_type != "Mul":
            continue
        mul_node = node_i
        after_mul_nodes_and_idxs = get_node_by_in_name(nodes, mul_node.output[0])
        for after_mul_node, _, _ in after_mul_nodes_and_idxs:
            if after_mul_node.op_type !="Add":
                continue
            add_node = after_mul_node
            bn_node = create_bn_node(graph, mul_node, add_node)
            rewrite_params.append((mul_node, add_node, bn_node))
    for mul_node, add_node, bn_node in rewrite_params:
        _, add_idx = get_node_by_name(nodes, add_node.name)
        nodes.insert(add_idx + 1, bn_node)
        nodes.remove(mul_node)
        nodes.remove(add_node)

def get_all_qir_nodes(graph):
    qir_nodes = list()
    for node in graph.node:
        if node.op_type == "QuantizeIR":
            qir_nodes.append(node)
    return qir_nodes

def remove_qir_nodes(graph, qir_nodes):
    for qir_node in qir_nodes:
        next_nodes_and_idxs = get_node_by_in_name(graph.node, qir_node.output[0])
        for next_node, _, next_in_idx in next_nodes_and_idxs:
            next_node.input[next_in_idx] = qir_node.input[0]
        # change output name in structure: QuantizeIR->output
        for output in graph.output:
            if output.name == qir_node.output[0]:
                output.name = qir_node.input[0]
    for qir_node in qir_nodes:
        graph.node.remove(qir_node)

def get_init_data(graph):
    res = dict()
    for init in graph.initializer:
        if len(init.dims) == 0:
            # init.dims.append(1)
            res.setdefault(init.name, int(numpy_helper.to_array(init)))
        if (len(init.dims) == 1 and init.dims[0] == 1):
        # if (len(init.dims) == 1 and init.dims[0] == 1) or len(init.dims) == 0:
            res.setdefault(init.name, int(numpy_helper.to_array(init)[0]))
        else:
            lst = numpy_helper.to_array(init).astype(int).flatten().tolist()
            if len(lst) == 1:
                res.setdefault(init.name, lst[0])
            else:
                res.setdefault(init.name, lst)
    return res

def get_qir_node_by_in0_name(qir_nodes, in0_name):
    for qir_node in qir_nodes:
        if qir_node.input[0] == in0_name:
            return qir_node
    return None

def check_input_type(input_type, sign_bit_num, bitdepth, qir_node):
    input_type = input_type.decode('utf-8')
    bit_type = ('INT' if sign_bit_num == 1 else 'UINT') + str(bitdepth)
    assert (input_type == bit_type), "input_type mismatch sign + num_bits, node name = %s, input_type = %s, sign = %s, num_bits = %s" % (qir_node.name, input_type, str(sign_bit_num), str(bitdepth))
    
def get_qir_attrs(qir_node):
    bitdepth, sign_bit_num = 0, 0
    for attr in qir_node.attribute:
        name, values = onnx_attribute_to_dict(attr)
        if "num_bits" in name:
            bitdepth = values
        elif "sign" in name:
            sign_bit_num = values
        elif 'input_type' == name:
            input_type = values
    check_input_type(input_type, sign_bit_num, bitdepth, qir_node)

    return bitdepth, sign_bit_num

def parse_special_leakyrelu(leakyrelu):
    for attr in leakyrelu.attribute:
        name, values = onnx_attribute_to_dict(attr)
        if "fl" == name:
            fl = values
            leakyrelu.attribute.remove(attr)
            return fl
    return None

def get_all_input_names(graph):
    res = list()
    for input in graph.input:
        res.append(input.name)
    return res

def is_after_input(graph, node):
    input_names = get_all_input_names(graph)
    for in_name in node.input:
        if in_name in input_names:
            return True
    return False

# For Conv, ConvTranspose, Gemm. They both have weight and bias
def get_weight_bias_quan_info(quan_info, graph, qir_nodes, init_data_dct, node, non_dequan=False):
    quan_info.setdefault("layer_type", node.op_type)
    quan_info.setdefault("ori_graph_name", node.name)
    input_op_fl, weight_op_alpha, weight_op_offset, weight_op_fl, weight_op_alpha_fl = 0, 0, 0, 0, 0
    bias_op_alpha, bias_op_offset, bias_op_fl, bias_op_alpha_fl = 0, 0, 0, 0
    for in_idx, in_name in enumerate(node.input):
        qir_node = get_qir_node_by_in0_name(qir_nodes, in_name)
        if qir_node is None:
            continue
        if in_idx == 0: # feature
            input_op_fl = init_data_dct[qir_node.input[3]]
        else:   # weight or bias
            init = get_init_by_name(graph, qir_node.input[0])
            if init is None:
                continue
            bitdepth, sign_bit_num = get_qir_attrs(qir_node)
            if in_idx == 1: # weight
                assert bitdepth in [8, 12] and sign_bit_num == 1, "Illegal weight node, node name: {}, bitdepth: {}, sign_bit_num: {}".format(name=qir_node.name, bitdepth=bitdepth, sign_bit_num=sign_bit_num)
                weight_op_alpha, weight_op_offset, weight_op_fl, weight_op_alpha_fl = [init_data_dct[qir_node.input[idx]] for idx in range(1, 5)]
                quan_info.setdefault("weight_bitdepth", bitdepth)
                quan_info.setdefault("weight_sign_bit_num", sign_bit_num)
                if bitdepth == 8:
                    quan_info.setdefault("weight_frac_bit_num", weight_op_fl + 4)
                elif bitdepth == 12:
                    quan_info.setdefault("weight_frac_bit_num", weight_op_fl)
            elif in_idx == 2:   # bias
                assert bitdepth == 12 and sign_bit_num == 1, "Illegal bias node, node name: {}, bitdepth: {}, sign_bit_num: {}".format(qir_node.name, bitdepth, sign_bit_num)
                bias_op_alpha, bias_op_offset, bias_op_fl, bias_op_alpha_fl = [init_data_dct[qir_node.input[idx]] for idx in range(1, 5)]
                quan_info.setdefault("bias_bitdepth", bitdepth)
                quan_info.setdefault("bias_sign_bit_num", sign_bit_num)
                quan_info.setdefault("bias_frac_bit_num", bias_op_fl)
                # fix wrong bias shape
                bias_init = get_init_by_name(graph, qir_node.input[0])
                assert bias_init is not None, "Bias initializer {} is not exist!".format(qir_node.input[0])
                if len(bias_init.dims) != 1:
                    total_dim = 1
                    for dim in bias_init.dims:
                        total_dim *= dim
                    bias_init.dims[:] = [total_dim]
                for input in graph.input:
                    if input.name == qir_node.input[0]:
                        if len(input.type.tensor_type.shape.dim) != 1:
                            total_dim = 1
                            for dim in input.type.tensor_type.shape.dim:
                                total_dim *= dim.dim_value
                            dim0 = input.type.tensor_type.shape.dim[0]
                            dim0.dim_value = total_dim
                            del input.type.tensor_type.shape.dim[:]
                            input.type.tensor_type.shape.dim.append(dim0)
            # Dequantize weight and bias
            compute_dequan(qir_node, init, bitdepth, init_data_dct, non_dequan)
    # Check
    if len(node.input) == 3:
        assert 0 <= input_op_fl+weight_op_fl-bias_op_fl <= 20, "Illegal node, node name: {}, input.op.fl: {}, weight.op.fl: {}, bias.op.fl: {}".format(node.name, input_op_fl, weight_op_fl, bias_op_fl)
        assert weight_op_alpha==1 and bias_op_alpha==1, "Illegal node, node name: {}, weight.op.alpha: {}, bias.op.alpha: {}".format(node.name, weight_op_alpha, bias_op_alpha)
        assert weight_op_offset==0 and bias_op_offset==0, "Illegal node, node name: {}, weight.op.offset: {}, bias.op.offset: {}".format(node.name, weight_op_offset, bias_op_offset)
        assert weight_op_alpha_fl==0 and bias_op_alpha_fl==0, "Illegal node, node name: {}, weight.op.alpha.fl: {}, bias.op.alpha.fl: {}".format(node.name, weight_op_alpha_fl, bias_op_alpha_fl)

    else:
        assert weight_op_alpha == 1, "Illegal node, node name: {}, weight.op.alpha: {}".format(node.name, weight_op_alpha)
        assert weight_op_offset == 0, "Illegal node, node name: {}, weight.op.offset: {}".format(node.name, weight_op_offset)
        assert weight_op_alpha_fl == 0, "Illegal node, node name: {}, weight.op.alpha.fl: {}".format(node.name, weight_op_alpha_fl)

# For eltwise, like Add, Mul
def get_eltwise_quan_info(quan_info, graph, qir_nodes, init_data_dct, node, non_dequan):
    quan_info.setdefault("layer_type", node.op_type)
    quan_info.setdefault("ori_graph_name", node.name)
    weight_op_alpha, weight_op_offset, weight_op_fl, weight_op_alpha_fl = 0, 0, 0, 0
    for in_name in node.input:
        qir_node = get_qir_node_by_in0_name(qir_nodes, in_name)
        if qir_node is None:
            continue
        init = get_init_by_name(graph, qir_node.input[0])
        if init is None:
            continue
        bitdepth, sign_bit_num = get_qir_attrs(qir_node)
        weight_op_alpha, weight_op_offset, weight_op_fl, weight_op_alpha_fl = [init_data_dct[qir_node.input[idx]] for idx in range(1, 5)]
        quan_info.setdefault("weight_bitdepth", bitdepth)
        quan_info.setdefault("weight_sign_bit_num", sign_bit_num)
        quan_info.setdefault("weight_frac_bit_num", weight_op_fl)
        # Dequantize weight
        compute_dequan(qir_node, init, bitdepth, init_data_dct, non_dequan)
        # Check
        if node.op_type == "Add":
            assert bitdepth == 12 and sign_bit_num == 1, "Illegal node, node name: {}, bitdepth: {}, sign_bit_num: {}".format(node.name, bitdepth, sign_bit_num)
        elif node.op_type == "Mul":
            assert bitdepth == 24 and sign_bit_num == 1, "Illegal node, node name: {}, bitdepth: {}, sign_bit_num: {}".format(node.name, bitdepth, sign_bit_num)
        assert weight_op_alpha == 1, "Illegal node, node name: {}, weight.op.alpha: {}".format(node.name, weight_op_alpha)
        assert weight_op_offset == 0, "Illegal node, node name: {}, weight.op.offset: {}".format(node.name, weight_op_offset)
        assert weight_op_alpha_fl == 0, "Illegal node, node name: {}, weight.op.alpha.fl: {}".format(node.name, weight_op_alpha_fl)

# For Batchnormalization
def get_bn_quan_info(quan_info, graph, qir_nodes, init_data_dct, node, non_dequan=False):
    scale_op_alpha, scale_op_offset, scale_op_fl, scale_op_alpha_fl = 0, 0, 0, 0
    bias_op_alpha, bias_op_offset, bias_op_fl, bias_op_alpha_fl = 0, 0, 0, 0
    for in_idx, in_name in enumerate(node.input):
        qir_node = get_qir_node_by_in0_name(qir_nodes, in_name)
        if qir_node is None:
            continue
        init = get_init_by_name(graph, qir_node.input[0])
        if init is None:
            continue
        bitdepth, sign_bit_num = get_qir_attrs(qir_node)
        if in_idx == 1: # scale
            assert bitdepth==24 and sign_bit_num==1, "Illegal node, node name: {}, bitdepth: {}, sign_bit_num: {}".format(node.name, bitdepth, sign_bit_num)
            scale_op_alpha, scale_op_offset, scale_op_fl, scale_op_alpha_fl = [init_data_dct[qir_node.input[idx]] for idx in range(1, 5)]
            quan_info.setdefault("alpha_bitdepth", bitdepth)
            quan_info.setdefault("alpha_sign_bit_num", sign_bit_num)
            quan_info.setdefault("alpha_frac_bit_num", scale_op_fl)
        elif in_idx == 2:   # bias
            assert bitdepth==12 and sign_bit_num==1, "Illegal node, node name: {}, bitdepth: {}, sign_bit_num: {}".format(node.name, bitdepth, sign_bit_num)
            bias_op_alpha, bias_op_offset, bias_op_fl, bias_op_alpha_fl = [init_data_dct[qir_node.input[idx]] for idx in range(1, 5)]
            quan_info.setdefault("beta_bitdepth", bitdepth)
            quan_info.setdefault("beta_sign_bit_num", sign_bit_num)
            quan_info.setdefault("beta_frac_bit_num", bias_op_fl)
        # Dequantize scale and bias
        compute_dequan(qir_node, init, bitdepth, init_data_dct, non_dequan)
    # Check
    assert scale_op_alpha==1 and bias_op_alpha==1, "Illegal node, node name: {}, scale.op.alpha: {}, bias.op.alpha: {}".format(node.name, scale_op_alpha, bias_op_alpha)
    assert scale_op_offset==0 and bias_op_offset==0, "Illegal node, node name: {}, scale.op.offset: {}, bias.op.offset: {}".format(node.name, scale_op_offset, bias_op_offset)
    assert scale_op_alpha_fl==0 and bias_op_alpha_fl==0, "Illegal node, node name: {}, scale.op.alpha.fl: {}, bias.op.alpha.fl: {}".format(node.name, scale_op_alpha_fl, bias_op_alpha_fl)


# For LayerNormalization
def get_ln_quan_info(quan_info, graph, qir_nodes, init_data_dct, node, non_dequan=False):
    scale_op_alpha, scale_op_offset, scale_op_fl, scale_op_alpha_fl = 0, 0, 0, 0
    bias_op_alpha, bias_op_offset, bias_op_fl, bias_op_alpha_fl = 0, 0, 0, 0
    for in_idx, in_name in enumerate(node.input):
        qir_node = get_qir_node_by_in0_name(qir_nodes, in_name)
        if qir_node is None:
            continue
        init = get_init_by_name(graph, qir_node.input[0])
        if init is None:
            continue
        bitdepth, sign_bit_num = get_qir_attrs(qir_node)
        if in_idx == 1: # scale
            _, _, scale_op_fl, _ = [init_data_dct[qir_node.input[idx]] for idx in range(1, 5)]
            quan_info.setdefault("scale_bitdepth", bitdepth)
            quan_info.setdefault("scale_sign_bit_num", sign_bit_num)
            quan_info.setdefault("scale_frac_bit_num", scale_op_fl)
        elif in_idx == 2:   # bias
            _, _, bias_op_fl, _ = [init_data_dct[qir_node.input[idx]] for idx in range(1, 5)]
            quan_info.setdefault("bias_bitdepth", bitdepth)
            quan_info.setdefault("bias_sign_bit_num", sign_bit_num)
            quan_info.setdefault("bias_frac_bit_num", bias_op_fl)
        # Dequantize scale and bias
        compute_dequan(qir_node, init, bitdepth, init_data_dct, non_dequan)


# For outputs
def get_output_quan_info(quan_info, qir_nodes, init_data_dct, node):
    for out_idx, out_name in enumerate(node.output):
        qir_node = get_qir_node_by_in0_name(qir_nodes, out_name)
        if qir_node is None:
            continue
        bitdepth, sign_bit_num = get_qir_attrs(qir_node)
        qir_node_input3 = qir_node.input[3]
        quan_info.setdefault("output"+str(out_idx)+"_bitdepth", bitdepth)
        quan_info.setdefault("output"+str(out_idx)+"_sign_bit_num", sign_bit_num)
        quan_info.setdefault("output"+str(out_idx)+"_frac_bit_num", init_data_dct[qir_node_input3])

def get_leakyrelu_quan_info(quan_info, init_data_dct, node, leakyrelu_info):
    fl = parse_special_leakyrelu(node)
    if fl is not None:
        quan_info.setdefault("leaky_shift", fl)
    qir_node = None
    for i in range(len(leakyrelu_info)):
        if leakyrelu_info[i][0] == node.name:
            qir_node = leakyrelu_info[i][1]
    if qir_node is None:
        return
    bitdepth, sign_bit_num = get_qir_attrs(qir_node)
    qir_node_input3 = qir_node.input[3]

    quan_info.setdefault("output0_bitdepth", bitdepth)
    quan_info.setdefault("output0_sign_bit_num", sign_bit_num)
    quan_info.setdefault("output0_frac_bit_num", init_data_dct[qir_node_input3])

def get_first_in_quan_info(graph, quan_info, qir_nodes, init_data_dct, node):
    for in_idx, in_name in enumerate(node.input):
        for input in graph.input:
            if input.name == in_name:
                qir_node = get_qir_node_by_in0_name(qir_nodes, in_name)
                if qir_node is None:
                    continue
                bitdepth, sign_bit_num = get_qir_attrs(qir_node)
                qir_node_input3 = qir_node.input[3]
                quan_info.setdefault("input"+str(in_idx)+"_bitdepth", bitdepth)
                quan_info.setdefault("input"+str(in_idx)+"_sign_bit_num", sign_bit_num)
                quan_info.setdefault("input"+str(in_idx)+"_frac_bit_num", init_data_dct[qir_node_input3])

def qir_transform(model, non_dequan):
    model.opset_import[0].domain = ""
    graph = model.graph
    init_data_dct = get_init_data(graph)
    # leakyrelu_info = insert_leakyrelu_nodes(graph, init_data_dct)    # Record the pair of LeakyRelu in_name and QuantizeIR after Concat, for create quan_info for LeakyRelu
    leakyrelu_info = insert_lrelu_nodes(graph, init_data_dct)
    rewrite_muladd2bn_nodes(graph)
    qir_nodes = get_all_qir_nodes(graph)
    remove_qir_nodes(graph, qir_nodes)
    quan_infoes = dict()
    for node in graph.node:
        if node in qir_nodes:
            continue
        quan_info = dict()
        quan_info.setdefault("layer_type", node.op_type)
        quan_info.setdefault("ori_graph_name", node.name)
        if is_after_input(graph, node): # Record input's bitinfo in first Node's in
            get_first_in_quan_info(graph, quan_info, qir_nodes, init_data_dct, node)
        if node.op_type in ["Conv", "ConvTranspose", "Gemm"]:
            get_weight_bias_quan_info(quan_info, graph, qir_nodes, init_data_dct, node, non_dequan)
        elif node.op_type in ["Add", "Mul"]:
            get_eltwise_quan_info(quan_info, graph, qir_nodes, init_data_dct, node, non_dequan)
        elif node.op_type == "BatchNormalization":
            get_bn_quan_info(quan_info, graph, qir_nodes, init_data_dct, node, non_dequan)
        elif node.op_type == "LayerNormalization":
            get_ln_quan_info(quan_info, graph, qir_nodes, init_data_dct, node, non_dequan)
        elif node.op_type == "LeakyRelu": # for LeakyRelu before Concat
            get_leakyrelu_quan_info(quan_info, init_data_dct, node, leakyrelu_info)
        get_output_quan_info(quan_info, qir_nodes, init_data_dct, node)
        if len(quan_info) > 2:
            out_name = node.output[0]
            op_name = node.op_type + "_" + out_name + "_Y"
            quan_infoes.setdefault(op_name, quan_info)
    return model, quan_infoes


if __name__ == "__main__":
    args = process_command()
    if args.no_file_op == 1:
        model = loadonnxmodel_shm(args.input_model_size, args.name, args.verbose)
    else:
        model = onnx.load(args.input)
    non_qir_model, quan_infoes = qir_transform(model, args.non_dequan)
    with open(args.json, "w") as f:
        json.dump(quan_infoes, f, indent="\t")
    # onnx.checker.check_model(non_qir_model)
    save_onnx_model(non_qir_model, args.no_file_op, args.output, args.name, args.verbose)
