import os
import logging
import onnx
import numpy
import struct
import copy

log_file = "conformer_encoder_onnx_modification.log"
logging.basicConfig(level=logging.INFO)

onnx_dir = "/public/ai_platform/models/zte/full/"
src_onnx = "encoder_shapeinfer.onnx"

def Swish_fuse(op_name, inputs, outputs, op_type="Swish"):
    doc_string ="""output = input * Sigmoid(input)
                    也就是 Mul(input, Sigmoid(input))
                """
    
    return onnx.helper.make_node(name=op_name, 
                                    inputs=inputs, 
                                    outputs=outputs, 
                                    op_type=op_type, 
                                    doc_string=doc_string
                                    )

def GLU_fuse(op_name, inputs, outputs, op_type="Glu"):
    doc_string ="""input 沿着 dim 轴切分成 2 个矩阵: a, b
                   GLU(a, b) = a * Sigmoid(b)
                """
    node_glu = onnx.helper.make_node(name=op_name, 
                                    inputs=inputs, 
                                    outputs=outputs, 
                                    op_type=op_type, 
                                    doc_string=doc_string,
                                    
                                    )
    attrs_glu = onnx.helper.make_attribute("axis", 1)
    node_glu.attribute.insert(0, attrs_glu)
    return node_glu

def MatrixbdCompute_fuse(op_name, inputs, outputs, op_type=None):
    doc_string ="""相对位置编码中 matrixbd 计算
                    把 matrixbd 内部的计算变成矩阵乘法, 用 GPU 加速
                """
    op_type = "MatrixbdCompute"
    return onnx.helper.make_node(name=op_name, 
                                    inputs=inputs, 
                                    outputs=outputs, 
                                    op_type=op_type, 
                                    doc_string=doc_string,                             
                                    )


def fuse_glu_swish_matrixbd(onnx_path, save_onnx):
    onnx_model = onnx.load(onnx_path)
    onnx.checker.check_model(onnx_model)
    
    graph = onnx_model.graph
    nodes = graph.node
        
    # get constant output_0 for Where input
    constant_ouput0_where = {}
    for i, node in enumerate(nodes):        
                        
        # Matrixbd_compute fuse for ONNX optimizer and onnxsim
        if node.op_type == "MatMul" and nodes[i+1].op_type == "Shape" \
                                    and nodes[i+2].op_type == "Gather" \
                                    and nodes[i+3].op_type == "Gather" \
                                    and nodes[i+4].op_type == "Gather" \
                                    and nodes[i+5].op_type == "Unsqueeze" \
                                    and nodes[i+6].op_type == "Unsqueeze" \
                                    and nodes[i+7].op_type == "Unsqueeze" \
                                    and nodes[i+8].op_type == "Concat" \
                                    and nodes[i+9].op_type == "ConstantOfShape"\
                                    and nodes[i+10].op_type == "Concat" \
                                    and nodes[i+11].op_type == "Gather" \
                                    and nodes[i+12].op_type == "Add" \
                                    and nodes[i+13].op_type == "Unsqueeze" \
                                    and nodes[i+14].op_type == "Concat" \
                                    and nodes[i+15].op_type == "Reshape" \
                                    and nodes[i+16].op_type == "Slice" \
                                    and nodes[i+17].op_type == "Reshape" \
                                    and nodes[i+18].op_type == "Slice" \
                                    and nodes[i+19].op_type == "Squeeze" \
                                    and nodes[i+20].op_type == "Div" \
                                    and nodes[i+21].op_type == "Add" \
                                    and nodes[i+22].op_type == "Unsqueeze" \
                                    and nodes[i+23].op_type == "Slice":
            # create fused-op MatrixbdCompute
            matrixbd_name = nodes[i+1].name.replace("Shape", "MatrixbdCompute") #+ "_{}".format(i)       
            node_matrixbd = MatrixbdCompute_fuse(onnx_model, matrixbd_name, nodes[i+1].input, nodes[i+23].output)

            for n in range(1, 24):
                graph.node.remove(nodes[i+24-n])
            graph.node.insert(i+1, node_matrixbd)
                
        # Matrixbd_compute fuse for raw ONNX model opset=17
        if node.op_type == "MatMul" and nodes[i+1].op_type == "Shape"   \
                                    and nodes[i+17].op_type == "Concat" \
                                    and nodes[i+18].op_type == "ConstantOfShape"\
                                    and nodes[i+19].op_type == "Concat" \
                                    and nodes[i+24].op_type == "Add" \
                                    and nodes[i+33].op_type == "Concat" \
                                    and nodes[i+34].op_type == "Reshape" \
                                    and nodes[i+39].op_type == "Slice" \
                                    and nodes[i+41].op_type == "Reshape" \
                                    and nodes[i+46].op_type == "Slice" \
                                    and nodes[i+48].op_type == "Squeeze" \
                                    and nodes[i+50].op_type == "Div" \
                                    and nodes[i+54].op_type == "Add" \
                                    and nodes[i+57].op_type == "Unsqueeze" \
                                    and nodes[i+60].op_type == "Slice":
            # create fused-op MatrixbdCompute
            matrixbd_name = nodes[i+1].name.replace("Shape", "MatrixbdCompute") + "_{}".format(i)
            
            # add Constant node for MatrixbdCompute input
            length = 130
            feat_len = f"feat_len_{i}"
            len_np = numpy.array([length], dtype = numpy.int64)
            tensor_len = onnx.helper.make_tensor(name = feat_len, data_type=onnx.TensorProto.INT64, dims=len_np.shape, vals=len_np)
            node_len = onnx.helper.make_node(op_type="Constant", name = f"len_{i}", inputs=[], outputs=[feat_len], value=tensor_len)    
            onnx_model.graph.node.append(node_len)
            
            node_matrixbd = MatrixbdCompute_fuse(matrixbd_name, inputs=[node.output[0], feat_len], outputs=nodes[i+60].output)

            for n in range(1, 61):
                graph.node.remove(nodes[i+61-n])
            graph.node.insert(i+1, node_matrixbd)
        
        # GLU fuse
        if node.op_type == "Sigmoid" and nodes[i+1].op_type == "Mul" \
                                    and nodes[i-1].op_type == "Split" and 0:
            # create fused-op Glu       
            glu_name = node.name.replace("Sigmoid", "Glu") #+ "_{}".format(i)
            glu_input = nodes[i-1].input  #[0].replace("Split", "Glu")
            glu_output = nodes[i+1].output  # [0].replace("Mul", "Glu")

            node_glu = GLU_fuse(glu_name, glu_input, glu_output)

            # remove useless nodes and insert new fused-op
            for n in range(3): # 0, 1, 2
                graph.node.remove(nodes[i+1-n])
            graph.node.insert(i-1, node_glu)

        # Swish fuse
        if node.op_type == "Sigmoid" and nodes[i+1].op_type == "Mul" \
                                    and nodes[i-1].op_type != "Split":
            # create fused-op Swish
            swish_name = node.name.replace("Sigmoid", "Swish") #+ "_{}".format(i)
            swish_input = nodes[i].input #[0].replace("Sigmoid", "Swish")
            swish_output = nodes[i+1].output[0].replace("Mul", "Swish")

            node_swish = Swish_fuse(swish_name, swish_input, [swish_output])
            
            # set input/output connection
            nodes[i+2].input[0] = node_swish.output[0]            
            # remove single op of Sigmoid and Mul
            # insert fused-op Swish
            for n in range(2): # 0, 1
                graph.node.remove(nodes[i+1-n])
            graph.node.insert(i, node_swish)
        
        # get constant output_0 for Where input
        if node.op_type == "Where" and nodes[i-1].op_type == "Constant" and 0:
            # get constant output_0 for Where input
            constant_ouput0_where = {}
            np_dtype = onnx.helper.tensor_dtype_to_np_dtype(node[i-1].attribute[0].t.data_type)
            const_value = numpy.frombuffer(node.attribute[0].t.raw_data, dtype=np_dtype)
            constant_ouput0_where[node.output[0]] = const_value
            
        # replace Where with Add or Mul

    onnx_model = onnx.shape_inference.infer_shapes(onnx_model)
    onnx.save(onnx_model, save_onnx)   


def remove_pattern_and_make_static(onnx_path, save_onnx):
    onnx_model = onnx.load(onnx_path)
    graph = onnx_model.graph
    nodes = onnx_model.graph.node
    
    input_mask_node_0 = onnx.helper.make_tensor_value_info(name='input_mask_0', elem_type=1, shape=[4, 511, 511])
    graph.input.append(input_mask_node_0)
    
    rm_nodes = []
    reshape_dim3_64 = []
    reshape_dim3_256 = []
    
    # encoders/self_attn /QK : remove shape-gather-unsqueeze-concat
    for i, node in enumerate(nodes):
        if node.op_type == "Concat" and nodes[i-4].op_type == "Unsqueeze":
            rm_nodes.extend(nodes[i-5: i+1]) 
            for j, node_next in enumerate(nodes):
                if node_next.op_type == "Gather" and \
                                    node_next.output[0] == nodes[i-4].input[0]:
                    rm_nodes.extend(nodes[j-2: j+1])

    # encoders/self_attn / QKV and linear_pos (1, -1, 4, 64): concat_reshape
    # encoders/self_attn / afterV_view(1, -1, 256): concat_reshape
    for i, node0 in enumerate(nodes):
        if node0.op_type == "Reshape" and 'self_attn/Concat' in node0.input[1]:
            for j, node1 in enumerate(nodes):
                if node1.output[0] == node0.input[1]:
                    if len(node1.input) == 4:
                        # print(" ### node1 :", node1)
                        # print(" ### node1 :", len(node1.input))
                        reshape_dim3_64.append(node0)
                    if len(node1.input) == 3:
                        # print(" ### node0 :", node0.op_type, node1.op_type)
                        # print(" ### node0 :", len(node0.input))
                        if nodes[i + 1].name == '/embed/model/Reshape':
                            continue
                        reshape_dim3_256.append(node0)
    print(" *** reshape_dim3_256: ", len(reshape_dim3_256))
    # QKV and linear_pos
    reshape_consts = []
    for i, reshape_node in enumerate(reshape_dim3_64):
        reshape_to = numpy.array([-1, 4, 64]).astype(numpy.int64)
        reshape_shape_tensor = onnx.helper.make_tensor(name=f'Reshape_shape_{i}', \
                                                        data_type=onnx.TensorProto.INT64, \
                                                        dims=reshape_to.shape, \
                                                        vals=reshape_to)
        const_i = onnx.helper.make_node(name=f'Reshape_new_{i}',op_type='Constant', \
                                        inputs=[], outputs=[f'Reshape_out_{i}'],    \
                                        value=reshape_shape_tensor)
        onnx_model.graph.node.append(const_i)
        reshape_node.input[1] = const_i.output[0]
        reshape_consts.append(f'Reshape_new_{i}')
    print(" *** Add [-1, 4, 64]: ", reshape_consts)
    # afterV_view()
    len64 = len(reshape_dim3_64)
    for i, reshape_node in enumerate(reshape_dim3_256):
        reshape_to = numpy.array([1, -1, 256]).astype(numpy.int64)
        value = onnx.helper.make_tensor(name=f'Reshape_shape_{len64 + i}', data_type=onnx.TensorProto.INT64, dims=reshape_to.shape, vals=reshape_to)
        const_i = onnx.helper.make_node(name=f'Reshape_new_{len64 + i}', op_type='Constant', inputs=[], outputs=[f'Reshape_out_{len64 + i}'], value=value)
        onnx_model.graph.node.append(const_i)
        reshape_node.input[1] = const_i.output[0]
    
    # embedding reshape
    for i, node0 in enumerate(nodes):
        if node0.name == '/embed/model/Reshape':
            reshape_to = numpy.array([1, 511, 4864]).astype(numpy.int64)
            value = onnx.helper.make_tensor(name='Reshape_511_4864', data_type=onnx.TensorProto.INT64, dims=reshape_to.shape, vals=reshape_to)
            const_i = onnx.helper.make_node(name='Reshape_511_4864', op_type='Constant', inputs=[], outputs=['Reshape_511_4864_out'], value=value)
            nodes.append(const_i)
            node0.input[1] = const_i.output[0]
    
    # embedding transpose_reshape: change transpose from 4dims to 3dims
    for i, node0 in enumerate(nodes):
        if node0.op_type == "Transpose":
            orig_attr = node0.attribute[0].ints
            if node0.name == "/embed/model/Transpose" or len(orig_attr) == 3: continue
            new_attr = onnx.helper.make_attribute('perm', numpy.array(orig_attr[1:]) - 1)
            del node0.attribute[0]
            node0.attribute.insert(0, new_attr)

    # # mul and div input[1]: scalar to vec
    # for i, node0 in enumerate(nodes):
    #     if node0.op_type == "Mul" or node0.op_type == "Div":
    #         for j, node1 in enumerate(nodes):
    #             if node1.op_type == "Constant" and node0.input[1] == node1.output[0]:
    #                 raw_data = node1.attribute[0].t.raw_data
    #                 data = struct.unpack('f', raw_data)
    #                 data = list(data)[0]
    #                 if node0.op_type == "Mul":
    #                     data_array = numpy.full([256],fill_value=data)
    #                 elif node0.op_type == "Div":
    #                     data_array = numpy.full([511],fill_value=data)
    #                 value = onnx.helper.make_tensor(name='value', data_type=onnx.TensorProto.FLOAT, dims=data_array.shape, vals=data_array)
    #                 new_scale_node = onnx.helper.make_node(op_type='Constant', inputs=[], outputs=[node1.output[0]], value=value)
    #                 nodes.remove(node1)
    #                 nodes.insert(j, new_scale_node)     
    
    del graph.input[1]
    
    # encoders/self_attn: where-software 的输出直接连接到 第2个 Where 后的 matmul, 把第2个 where 断开
    for i, node0 in enumerate(nodes):
        if node0.op_type == 'Where' and nodes[i + 1].op_type == 'Softmax':
            nodes[i + 5].input[0] = nodes[i + 1].output[0]
            if 'encoders.0' in node0.name:
                rm_nodes.extend(nodes[i - 9 : i + 1]) # 删除断开后的第2个 where 来源链路上的算子
            else:
                rm_nodes.extend(nodes[i - 5 : i + 1])
            rm_nodes.extend(nodes[i + 2 : i + 5])  # 删除下方 reshape 输入链路上的算子 # 可重用
            rm_nodes.extend(nodes[i + 7 : i + 12]) # 删除下方 reshape 输入链路上的算子 # 可重用
            add_node = onnx.helper.make_node(name=f'Add_new_{i}', op_type='Add', inputs=[node0.input[2], 'input_mask_0'], \
                                                outputs=[f'Add_new_{i}_out'])
            nodes[i + 1].input[0] = add_node.output[0]
            nodes.append(add_node)

    # encoders/self_attn: 删除 unsqueeze-equal
    for i, node0 in enumerate(nodes):
        if node0.name in ['/encoders/encoders.0/self_attn/Unsqueeze_26']:
            rm_nodes.extend(nodes[i - 1: i + 4])

    # 网络输入的 unsqueeze算子 输入shape
    for i, node0 in enumerate(nodes):
        if node0.name == '/embed/model/Unsqueeze':
            val = numpy.array([1]).astype(numpy.int64)
            value = onnx.helper.make_tensor(name="/embed/model/Constant_output_0", data_type=onnx.TensorProto.INT64, dims=val.shape, vals=val)
            const = onnx.helper.make_node(name="/embed/model/Constant_output_0", op_type='Constant', inputs=[], outputs=[node0.input[1]], value=value)
            nodes.append(const)

    # encoders/self_attn: where 旁边的 reshape 添加输入 tensor
    for i, node0 in enumerate(nodes):
        if node0.name in ['Reshape_new_4', 'Reshape_new_9', 'Reshape_new_14', 'Reshape_new_19', \
            'Reshape_new_24', 'Reshape_new_29', 'Reshape_new_34', 'Reshape_new_39',\
            'Reshape_new_44', 'Reshape_new_49', 'Reshape_new_54', 'Reshape_new_59']:
            rm_nodes.append(node0)

    # extend_pe 出来的 pos_embed 直接取值去掉 matmul
    matmul_list = []    
    for i, node0 in enumerate(nodes):
        if node0.op_type == "MatMul" and node0.input[0] == "/embed/model/out/out.1/Slice_output_0":
            matmul_list.append(node0)

    # 直接保存 ONNX 图上 slice 的输入 data
    # slice_data = np.load('slice546.npy')
    slice_data = numpy.load('Slice_673.npy')
    matmul_input0_data = slice_data[:, :511, :]

    matmul_input1 = {}
    for i, matmul_node in enumerate(matmul_list):
        for t in graph.initializer:
            # 获取 matmul weight 权重
            if t.name == matmul_node.input[1]:
                matmul_input1[t.name] = onnx.numpy_helper.to_array(t)
            
    for i, node0 in enumerate(nodes):
        if node0 in matmul_list:
            matmul_input1_data = matmul_input1[node0.input[1]]
            rm_nodes.append(node0)
            if node0.name == '/encoders/encoders.0/self_attn/linear_pos/MatMul':
                reshape_node = nodes[i + 91]  # matmul+reshape reshape 的位置
            else:
                reshape_node = nodes[i + 1]
            value_matmul = numpy.matmul(matmul_input0_data, matmul_input1_data).astype(numpy.float32)
            name = f'{reshape_node.name}_input0'
            value = onnx.helper.make_tensor(name=name, data_type=onnx.TensorProto.FLOAT, dims=value_matmul.shape, vals=value_matmul)
            const_i = onnx.helper.make_node(name=name, op_type='Constant', inputs=[], outputs=[name], value=value)
            nodes.append(const_i)
            reshape_node.input[0] = const_i.output[0]

    rm_nodes.extend(nodes[0 : 17])
    rm_nodes.extend(nodes[22 : 34])
    rm_nodes.extend(nodes[35 : 43])
    rm_nodes.extend(nodes[48 : 75])
    rm_nodes.append(nodes[252])
    # rm_nodes.extend(nodes[1659 : 1662]) # if GLU fused
    rm_nodes.extend(nodes[1689 : 1692]) # if GLU not fused
            
    nodes_all = copy.deepcopy(nodes)
    del graph.node[:]
    del graph.output[1]
    for node in nodes_all:
        if node not in rm_nodes:
            graph.node.append(node)
            
    onnx.save(onnx_model, save_onnx)


def reverse_mode(onnx_path):
    onnx_model = onnx.load(onnx_path)
    nodes = onnx_model.graph.node
    
    txt_file = os.path.splitext(os.path.basename(onnx_path))[0] + ".txt"
    if os.path.exists(txt_file): os.remove(txt_file)
    
    with open(txt_file, "a+", encoding="utf-8") as txt:
        for i, node in enumerate(nodes):
            txt.write("## {:<6} {:<22} {} \n".format(i, node.op_type, node.name))

# extrace all pointwise conv weight and bias
def extrat_conv_tensors(onnx_model, wts_name, bias_name, rm_wts):
    wts_value = None
    bias_value = None
    for n, t in enumerate(onnx_model.graph.initializer):
        if t.name == wts_name:
            wts_value = onnx.numpy_helper.to_array(t)
            rm_wts.append(t.name)
        if t.name == bias_name:
            bias_value = onnx.numpy_helper.to_array(t)

    wts_value = wts_value.squeeze().transpose(1, 0)
    
    wts_const_tensor = onnx.helper.make_tensor(name=f'{wts_name}_new', data_type=onnx.TensorProto.FLOAT, dims=wts_value.shape, vals=wts_value)
    bias_const_tensor = onnx.helper.make_tensor(name=f'{bias_name}_new', data_type=onnx.TensorProto.FLOAT, dims=bias_value.shape, vals=bias_value)
    node_wts_const = onnx.helper.make_node(op_type='Constant', inputs=[], outputs=[wts_name], value=wts_const_tensor, name=f'node_{wts_name}')
    node_bias_const = onnx.helper.make_node(op_type='Constant', inputs=[], outputs=[bias_name], value=bias_const_tensor, name=f'node_{bias_name}')

    onnx_model.graph.node.append(node_wts_const)
    onnx_model.graph.node.append(node_bias_const)
    return node_wts_const, node_bias_const

# convert PointwiseConvs in conv_modules to Matmul+AddBias
def conv2matmul_add_input_mask(onnx_path, save_onnx):
    # extrace all pointwise conv weight and bias
    onnx_model = onnx.load(onnx_path)
    graph = onnx_model.graph
    nodes = onnx_model.graph.node
    
    for i, node in enumerate(nodes):
        wts_name, bias_name = None, None
        if node.op_type == "Conv" and "encoders" in node.name and "conv_module" in node.name \
                                    and "pointwise_conv" in node.name:
            wts_name = node.input[1]
            bias_name = node.input[2]
            node_wts_const, node_bias_const = extrat_conv_tensors(onnx_model, wts_name, bias_name)
            node_matmul = onnx.helper.make_node('MatMul', inputs=['A', 'B'], outputs=['Y'], name=node.name + "_Matmul_PWC")
            node_add = onnx.helper.make_node('Add', inputs=['A', 'B'], outputs=['C'], name=node.name + "_Add_PWC")
            
            # add matmul(,wts) + add(,bias) to replace pointwise_conv
            node_matmul.input[1] = node_wts_const.output[0]
            node_matmul.output[0] = node.output[0] + "_Matmul_PWC"
            
            node_add.input[0] = node_matmul.output[0]
            node_add.input[1] = node_bias_const.output[0]
            node_add.output[0] = node.output[0] + "_Add_PWC"
            
            # [Add_Transpose]_Conv changed to [Add_Matmul_Add_Transpose]
            if nodes[i-1].op_type == "Transpose" and nodes[i-2].op_type == "Add":
                node_matmul.input[0] = nodes[i-2].output[0]
                nodes[i-1].input[0] = node_add.output[0]
                # shortcut PointwiseConv
                nodes[i-1].output[0] = nodes[i+1].input[0]
                # remove PointwiseConv
                nodes.remove(node)
            # Conv_[Transpose_Mul] changed to [Transpose_Matmul_Add_Mul]
            if nodes[i+1].op_type == "Transpose" and nodes[i+2].op_type == "Mul":
                node_matmul.input[0] = nodes[i+1].output[0]
                nodes[i+2].input[0] = node_add.output[0]
                # shortcut PointwiseConv
                nodes[i+1].input[0] = node.input[0]
                # remove PointwiseConv
                nodes.remove(node)
    
    # input_mask_1 for padding embedding conv feature masking
    input_mask_node = onnx.helper.make_tensor_value_info(name='input_mask_1', elem_type=1, shape=[1, 256, 511, 19])
    graph.input.append(input_mask_node)

    input_mask_node_2 = onnx.helper.make_tensor_value_info(name='input_mask_2', elem_type=1, shape=[1, 256, 511])
    graph.input.append(input_mask_node_2)

    input_mask_node_3 = onnx.helper.make_tensor_value_info(name='input_mask_3', elem_type=1, shape=[1, 511, 256])
    graph.input.append(input_mask_node_3)
    
    # embedding: insert only input_mask_1 with Mul after embedding CBR
    for i, node in enumerate(nodes):        
        if node.name == "/embed/model/conv/conv.3/Relu":
            node_mul = onnx.helper.make_node(name='Cbr_mask', op_type='Mul', inputs=[node.output[0], 'input_mask_1'], \
                                            outputs=['Cbr_mask_out'])
            nodes.append(node_mul)
            for j, node_next in enumerate(nodes):
                if len(node_next.input) > 0 and node.output[0] == node_next.input[0]:
                    if node_next.name == "Cbr_mask": continue
                    node_next.input[0] = node_mul.output[0]
    
        # insert input_mask_2 with Mul before DWC in all 15 Conv Modules
        if (node.op_type == "Mul" and node.name != "Cbr_mask" and "conv_module/Mul" in node.name) \
            or (node.op_type == "Glu" and "conv_module/Glu" in node.name):
            # This situation is used in seperate GLU (split_sigmoid_mul) nodes
            node_mul_2 = onnx.helper.make_node(name=f'Mul_new_mask_2_{i}', op_type='Mul', \
                                            inputs=[node.output[0], 'input_mask_2'], \
                                            outputs=[f'Mul_new_mask_2_out{i}'])
            nodes.append(node_mul_2)
            for j, node_next in enumerate(nodes):
                if node_next.op_type == "Conv" and node.output[0] == node_next.input[0] and "depthwise_conv" in node_next.name:
                    if node.name == f"Mul_new_mask_2_{i}": continue
                    node_next.input[0] == node.output[0]
        
        # insert input_mask_3 with Mul after DWC in
        # depends on before PointwiseConv modification
        if node.op_type == "Add" and "_Add_PWC" in node.name and "pointwise_conv2" in node.name:
            node_mul_3 = onnx.helper.make_node(name=f'Mul_new_mask_3_{i}', op_type="Mul",   \
                                                inputs=[node.output[0], 'input_mask_3'],   \
                                                outputs=[f'Mul_new_mask_3_out{i}'])
            nodes.append(node_mul_3)
            for j, node_next in enumerate(nodes):
                if node_next.op_type == "Mul" and node_next.input[0] == node.output[0]:
                    if node_next.name == "Mul_new_mask_3_{i}": continue
                    node_next.input[0] = node.output[0]
    
    # onnx_model = onnx.shape_inference.infer_shapes(onnx_model)
    onnx.save(onnx_model, save_onnx)                   


def add_shape_to_Reshape():
    onnx_model = onnx.load(onnx_path)
    graph = onnx_model.graph
    nodes = onnx_model.graph.node
    
    for i, node0 in enumerate(graph.node):
        # if node0.op_type == "Reshape" and ((int(node0.name.split("_")[-1]) - 281) % 201 == 0 or (int(node0.name.split("_")[-1]) - 287) % 201 == 0 \
        #     or (int(node0.name.split("_")[-1]) - 293) % 201 == 0):
        if node0.op_type == "Reshape" and (node0.name.endswith("self_attn/Reshape") or node0.name.endswith("self_attn/Reshape_1") \
            or node0.name.endswith("self_attn/Reshape_2")):
            for j, node1 in enumerate(graph.node):
                if node1.op_type == "Constant" and node1.output[0] == node0.input[1]:
                    data_array = np.array([4, 64, -1]).astype(np.int64)
                    value = onnx.helper.make_tensor(name='value', data_type=onnx.TensorProto.INT64, dims=data_array.shape, vals=data_array)
                    new_scale_node = onnx.helper.make_node(op_type='Constant', inputs=[], outputs=[node1.output[0]], value=value)
                    graph.node.remove(node1)
                    graph.node.append(new_scale_node) 


if __name__ == '__main__':
    # onnx_path = os.path.join(onnx_dir, src_onnx)
    # onnx_path = "encoder_shapeinfer.onnx"
    # fused_onnx = "encoder_shapeinfer_fusednoGLU.onnx"
    # static_onnx = "encoder_shapeinfer_fusednoGLU_static.onnx"
    # final_onnx = "encoder_shapeinfer_final.onnx"
    # inputmask_onnx = "encoder_shapeinfer_fusednoGLU_static_inputmask.onnx"
    
    from onnx_files import onnx_path, fused_onnx, static_onnx, inputmask_onnx, const_onnx
    
    # onnx_path = "D:\\vbox\\encoder_shapeinfer.onnx"

    
    # fuse_glu_swish_matrixbd(onnx_path, fused_onnx)
    # remove_pattern_and_make_static(fused_onnx, static_onnx)  
    
    # onnx_path = "/public/ai_platform/wangc/full/xformer_encoder.onnx"
    
    # reverse_mode(static_onnx)
    
    # conv2matmul_add_input_mask(last_two, final_onnx)
    
    # onnx_path = "opt/conformer_encoder_inf_3m2_scr_em2.onnx"
    # onnx_path = "conformer_onnx_210/opt/conformer_encoder_inf_3m2_scr_em2.onnx"
    # reverse_mode(onnx_path)
    # reverse_mode("/home/gyf/pkg/conformer_new/asr_conformer/pytorch_espnet/encoder_shapeinfer_fused.onnx")
    
    
    # 1. onnx_modify.py :  
    fuse_glu_swish_matrixbd(onnx_path, fused_onnx)
    # reverse_mode(fused_onnx)
    # remove_pattern_and_make_static(fused_onnx, static_onnx) 
    # ##
    # # 2. cut_modified_nodes_new.py  :  conv2matmul_add_input_mask
    # # static_onnx -> inputmask_onnx
    # os.system("python cut_modified_nodes_new.py")
    # # os.system("ls -l")
    # # os.system("sudo cp encoder_shapeinfer_fusednoGLU_static* /public/ai_platform/yfguo/conformer_onnx_modify/")
    # ##
    # # 3. encoder_optimize.py :  Add Constant to Reshape ;   TransposeMatmulAdd
    # # 
    # os.system("python encoder_optimize.py")
    # os.system("ls -l")
    # os.system("sudo cp encoder_shapeinfer_fusednoGLU_static* /public/ai_platform/yfguo/conformer_onnx_modify/")
    