import onnx.numpy_helper as numpy_helper
import onnx
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import onnxruntime
import struct

# model_file = "/public/ai_platform/wangc/full/xformer_encoder.onnx"
fused_onnx = "encoder_shapeinfer_fused.onnx"

model = onnx.load(fused_onnx)
graph = model.graph

input_mask_node_0 = onnx.helper.make_tensor_value_info(name='input_mask_0', elem_type=1, shape=[4, 511, 511])
graph.input.append(input_mask_node_0)

rmv_list = []
reshape_list_64 = []
reshape_list_256 = []

# add attr:split for split
for i, node0 in enumerate(graph.node):
    if node0.op_type == 'Split':
        new_attr = onnx.helper.make_attribute('split', [256, 256])
        node0.attribute.insert(1, new_attr)

# encoders/self_attn 消除 shape-gather-unsqueeze-concat 
# mv identity for reshape[1, -1, 4, 64]
for i, node0 in enumerate(graph.node):
    if node0.op_type == "Concat" and graph.node[i - 4].op_type == "Unsqueeze":
        rmv_list.extend(graph.node[i - 5: i + 1])
        for j, node1 in enumerate(graph.node):
            if node1.op_type == "Gather" and node1.output[0] == graph.node[i - 4].input[0]:
                rmv_list.extend(graph.node[j - 2: j + 1])

# shape-gather-unsqueeze-concat 之后的 reshape 添加输入的shape tensor
for i, node0 in enumerate(graph.node):
    if node0.op_type == "Reshape" and 'self_attn/Concat' in node0.input[1]:
        for j, node1 in enumerate(graph.node):
            if node1.output[0] == node0.input[1]:
                if len(node1.input) == 4:
                    reshape_list_64.append(node0)
                elif len(node0.input) == 3:
                    if graph.node[i + 1].name == '/embed/model/Reshape':
                        continue
                    reshape_list_256.append(node0)
reshape_new_list = []
for i, reshape_node in enumerate(reshape_list_64):
    value_reshape = np.array([-1, 4, 64]).astype(np.int64)
    value = onnx.helper.make_tensor(name=f'Reshape_shape_{i}', data_type=onnx.TensorProto.INT64, dims=value_reshape.shape, vals=value_reshape)
    const_i = onnx.helper.make_node(name=f'Reshape_new_{i}',op_type='Constant', inputs=[], outputs=[f'Reshape_out_{i}'], value=value)
    model.graph.node.append(const_i)
    reshape_node.input[1] = const_i.output[0]
    reshape_new_list.append(f'Reshape_new_{i}')
print(reshape_new_list)
len64 = len(reshape_list_64)
print(len64)
for i, reshape_node in enumerate(reshape_list_256):
    value_reshape = np.array([1, -1, 256]).astype(np.int64)
    value = onnx.helper.make_tensor(name=f'Reshape_shape_{len64 + i}', data_type=onnx.TensorProto.INT64, dims=value_reshape.shape, vals=value_reshape)
    const_i = onnx.helper.make_node(name=f'Reshape_new_{len64 + i}', op_type='Constant', inputs=[], outputs=[f'Reshape_out_{len64 + i}'], value=value)
    model.graph.node.append(const_i)
    reshape_node.input[1] = const_i.output[0]

for i, node0 in enumerate(graph.node):
    if node0.name == '/embed/model/Reshape':
        value_reshape = np.array([1, 511, 4864]).astype(np.int64)
        value = onnx.helper.make_tensor(name='Reshape_511_4864', data_type=onnx.TensorProto.INT64, dims=value_reshape.shape, vals=value_reshape)
        const_i = onnx.helper.make_node(name='Reshape_511_4864', op_type='Constant', inputs=[], outputs=['Reshape_511_4864_out'], value=value)
        model.graph.node.append(const_i)
        node0.input[1] = const_i.output[0]

# # matrixbd_compute
# for i, node0 in enumerate(graph.node):
#     if node0.op_type == "MatMul" and "self_attn/MatMul_1" in node0.output[0]:
#         graph.node[i + 42].input[1] = node0.output[0]
#         rmv_list.extend(graph.node[i + 1: i + 42])

# # transpose to 3 dims
for i, node0 in enumerate(graph.node):
    if node0.op_type == "Transpose":
        orig_attr = node0.attribute[0].ints
        if node0.name == "/embed/model/Transpose" or len(orig_attr) == 3:
            continue
        new_attr = onnx.helper.make_attribute('perm', np.array(orig_attr[1:]) - 1)
        del node0.attribute[0]
        node0.attribute.insert(0, new_attr)

# suinfer 不支持 Mul , div 运算标量
# mul and div input[1]: scalar to vec
for i, node0 in enumerate(graph.node):
    if node0.op_type == "Mul" or node0.op_type == "Div":
        for j, node1 in enumerate(graph.node):
            if node1.op_type == "Constant" and node0.input[1] == node1.output[0]:
                raw_data = node1.attribute[0].t.raw_data
                data = struct.unpack('f', raw_data)
                data = list(data)[0]
                if node0.op_type == "Mul":
                    data_array = np.full([256],fill_value=data)
                elif node0.op_type == "Div":
                    data_array = np.full([511],fill_value=data)
                value = onnx.helper.make_tensor(name='value', data_type=onnx.TensorProto.FLOAT, dims=data_array.shape, vals=data_array)
                new_scale_node = onnx.helper.make_node(op_type='Constant', inputs=[], outputs=[node1.output[0]], value=value)
                graph.node.remove(node1)
                graph.node.insert(j, new_scale_node) 

del graph.input[1]

# where-softmax 的输出直接连接到 第2个 Where 后的 matmul, 把第2个 where 断开
for i, node0 in enumerate(graph.node):
    if node0.op_type == 'Where' and graph.node[i + 1].op_type == 'Softmax':
        # graph.node[i + 1].input[0] = node0.input[2]
        graph.node[i + 5].input[0] = graph.node[i + 1].output[0]
        if 'encoders.0' in node0.name:
            rmv_list.extend(graph.node[i - 9 : i + 1]) # 删除断开后的第2个 where 来源链路上的算子
        else:
            rmv_list.extend(graph.node[i - 5 : i + 1])
        rmv_list.extend(graph.node[i + 2 : i + 5])  # 删除下方 reshape 输入链路上的算子 # 可重用
        rmv_list.extend(graph.node[i + 7 : i + 12]) # 删除下方 reshape 输入链路上的算子 # 可重用
        add_node = onnx.helper.make_node(name=f'Add_new_{i}', op_type='Add', inputs=[node0.input[2], 'input_mask_0'], \
                                            outputs=[f'Add_new_{i}_out'])
        graph.node[i + 1].input[0] = add_node.output[0]
        model.graph.node.append(add_node)

# 删除 unsqueeze-equal
for i, node0 in enumerate(graph.node):
    # if node0.name in ['/encoders/encoders.0/self_attn/Unsqueeze_22']:
    if node0.name in ['/encoders/encoders.0/self_attn/Unsqueeze_26']:
        rmv_list.extend(graph.node[i - 1: i + 4])

# 网络输入的 unsqueeze算子 输入shape
for i, node0 in enumerate(graph.node):
    if node0.name == '/embed/model/Unsqueeze':
        val = np.array([1]).astype(np.int64)
        value = onnx.helper.make_tensor(name="/embed/model/Constant_output_0", data_type=onnx.TensorProto.INT64, dims=val.shape, vals=val)
        const = onnx.helper.make_node(name="/embed/model/Constant_output_0", op_type='Constant', inputs=[], outputs=[node0.input[1]], value=value)
        model.graph.node.append(const)

# where 旁边的 reshape 添加输入 tensor
for i, node0 in enumerate(graph.node):
    if node0.name in ['Reshape_new_4', 'Reshape_new_9', 'Reshape_new_14', 'Reshape_new_19', \
        'Reshape_new_24', 'Reshape_new_29', 'Reshape_new_34', 'Reshape_new_39',\
        'Reshape_new_44', 'Reshape_new_49', 'Reshape_new_54', 'Reshape_new_59']:
    # if node0.name in reshape_new_list:
        rmv_list.append(node0)

# extend_pe 出来的 pos_embed 直接取值去掉 matmul
matmul_list = []    
for i, node0 in enumerate(graph.node):
    if node0.op_type == "MatMul" and node0.input[0] == "/embed/model/out/out.1/Slice_output_0":
        matmul_list.append(node0)

# 直接保存 ONNX 图上 slice 的输入 data
# slice_data = np.load('slice546.npy')
slice_data = np.load('Slice_673.npy')
matmul_input0_data = slice_data[:, :511, :]

matmul_input1 = {}
for i, matmul_node in enumerate(matmul_list):
    for t in model.graph.initializer:
        # 获取 matmul weight 权重
        if t.name == matmul_node.input[1]:
            matmul_input1[t.name] = numpy_helper.to_array(t)
        
for i, node0 in enumerate(graph.node):
    if node0 in matmul_list:
        matmul_input1_data = matmul_input1[node0.input[1]]
        rmv_list.append(node0)
        if node0.name == '/encoders/encoders.0/self_attn/linear_pos/MatMul':
            # reshape_node = graph.node[i + 73]
            reshape_node = graph.node[i + 91]  # matmul+reshape reshape 的位置
        else:
            reshape_node = graph.node[i + 1]
        value_matmul = np.matmul(matmul_input0_data, matmul_input1_data).astype(np.float32)
        name = f'{reshape_node.name}_input0'
        value = onnx.helper.make_tensor(name=name, data_type=onnx.TensorProto.FLOAT, dims=value_matmul.shape, vals=value_matmul)
        const_i = onnx.helper.make_node(name=name, op_type='Constant', inputs=[], outputs=[name], value=value)
        model.graph.node.append(const_i)
        reshape_node.input[0] = const_i.output[0]

rmv_list.extend(graph.node[0 : 17])
rmv_list.extend(graph.node[22 : 34])
rmv_list.extend(graph.node[35 : 43])
# rmv_list.extend(graph.node[48 : 68])
# rmv_list.append(graph.node[298])
# rmv_list.extend(graph.node[2487 : 2490])
rmv_list.extend(graph.node[48 : 75])
rmv_list.append(graph.node[252])
rmv_list.extend(graph.node[1659 : 1662])
        
nodes = copy.deepcopy(graph.node)
del graph.node[:]
del graph.output[1]
for node in nodes:
    if node not in rmv_list:
        graph.node.append(node)
        
# onnx.save(model, "ce_modify_210.onnx")
from onnx_files import static_onnx
onnx.save(model, static_onnx)