import os
import onnx
import numpy

from configure import logger, level, msg_fmt, __LINE__, __FUNC__
from utils import rmnodes_saveonnx

file_name = os.path.basename(__file__)
logger.set_level(level=level, name=file_name)

class OpModify:
    def __init__(self, onnx_model, opt_sim=False, logger=logger, level=level):
        self.model_buffer = onnx_model
        self.graph = self.model_buffer.graph
        self.opt_sim = opt_sim      
        self.logger = logger


    def add_input_mask3_for_pwc2(self):
        
        input_mask_3 = onnx.helper.make_tensor_value_info(name="input_mask_3", elem_type = onnx.TensorProto.FLOAT, shape=[1, 511, 256])
        self.model_buffer.graph.input.append(input_mask_3)
        
        transpose_1_ops = [f"/encoders/encoders.{i}/conv_module/Transpose_12" for i in range(15)]
        add_2_ops = [f"/encoders/encoders.{i}/Add_2" for i in range(15)]
        print(add_2_ops)
        transpose_1_nodes = {}
        add_2_nodes = {}
        for i, nd in enumerate(self.model_buffer.graph.node):
            for nd.name in transpose_1_ops:
                transpose_1_nodes[nd.name] = nd
            for nd.name in add_2_ops:
                add_2_nodes[nd.name] = nd
        
        for i in range(15):
            transpose_1_output = f"/encoders/encoders.{i}/conv_module/Transpose_1_output_0"
                        
            mul_name = f"/encoders/encoders.{i}/conv_module/Mul_mask_pwc_2"
            mul_output = f"/encoders/encoders.{i}/conv_module/Mul_mask_pwc_2_output_0"
            node_mul = onnx.helper.make_node(name=mul_name, 
                                                op_type="Mul", 
                                                inputs=[transpose_1_output, "input_mask_3"],
                                                outputs=[mul_output]
                                                )
            self.model_buffer.graph.node.append(node_mul)   

            for i, nd in enumerate(self.model_buffer.graph.node):
                if nd.name in add_2_ops:
                    print(nd.name)
                    # print(nd)
                    # nd.input[1] = mul_output
    
    
    def split_add_attr(self):
        for x, nt in enumerate(self.model_buffer.graph.node):
            if nt.op_type == "Split":
                new_attr = onnx.helper.make_attribute("split", [256, 256])
                nt.attribute.insert(1, new_attr)    
    
    def matmul_dim4_to_dim3(self):        
        reshape_ops = ["/encoders/encoders.{}/self_attn/Reshape".format(i) for i in range(15)]
        reshape_1_ops = ["/encoders/encoders.{}/self_attn/Reshape_1".format(i) for i in range(15)]
        reshape_2_ops = ["/encoders/encoders.{}/self_attn/Reshape_2".format(i) for i in range(15)]
        
        transpose_ops = ["/encoders/encoders.{}/self_attn/Transpose".format(i) for i in range(15)]
        transpose_1_ops = ["/encoders/encoders.{}/self_attn/Transpose_1".format(i) for i in range(15)]
        transpose_2_ops = ["/encoders/encoders.{}/self_attn/Transpose_2".format(i) for i in range(15)]
        transpose_3_ops = ["/encoders/encoders.{}/self_attn/Transpose_3".format(i) for i in range(15)]
        
        transpose_5_ops = ["/encoders/encoders.{}/self_attn/Transpose_5".format(i) for i in range(15)]
        
        matrixb_consts = ["/encoders/encoders.{}/self_attn/Transpose_4_output_0".format(i) for i in range(15)]
        
        # 只修改 Reshape_1 input_1 : initializer Tensor 值 
        # 通过 op_name 强行修改, 与 其他算子不相关
        def reshape_shape_to_3dims(reshape_ops_list):
            reshape_to = numpy.array([-1, 4, 64]).astype(numpy.int64)
            name = "/encoders/encoders.0/self_attn/Concat_output_0_3Dims"
            shape_tensor = onnx.numpy_helper.from_array(reshape_to, name=name)
            self.model_buffer.graph.initializer.insert(-1, shape_tensor)
            
            for k, node in enumerate(self.model_buffer.graph.node):
                if node.op_type == "Reshape" and node.name in reshape_ops_list:
                        # and node.output[0] == node_upper.input[0]:
                    node.input[1] = name           
                            
        # 只修改 Transpose_3 attribute[0]-perm : attribute 值 
        # 通过 op_name 强行修改, 与 其他算子不相关
        def transpose_perm_to_3dims(transpose_ops_list):
            for j, node_upper in enumerate(self.model_buffer.graph.node):
                if node_upper.op_type == "Transpose" and node_upper.name in transpose_ops_list:
                        # and node_upper.output[0] == node.input[1]:
                    perm_4dim = node_upper.attribute[0].ints
                    attr_perm_3dim = onnx.helper.make_attribute("perm", numpy.array(perm_4dim[1:], dtype=numpy.int64) - 1)
                    self.model_buffer.graph.node[j].attribute.insert(0, attr_perm_3dim)
                    del self.model_buffer.graph.node[j].attribute[1]

        # 只修改 matmul_1 矩阵B 的 initializer 值 
        # 通过 initializer_name 强行修改, 与 其他算子不相关
        def matmul_b_init_to_3dims(matrixb_consts):
            for i, wt_4dims in enumerate(self.model_buffer.graph.initializer):
                if wt_4dims.name in matrixb_consts:
                    wt_4dims_npdata = onnx.numpy_helper.to_array(wt_4dims)
                    wt_3dims_npdata = numpy.squeeze(wt_4dims_npdata)
                    wt_3dims = onnx.numpy_helper.from_array(wt_3dims_npdata, name=wt_4dims.name)
                    self.model_buffer.graph.initializer[i].CopyFrom(wt_3dims)
        
        # op lib 只支持 matmul 3dims compute; 且默认 B 矩阵为 weight
        for i, node in enumerate(self.model_buffer.graph.node):        
            # 只有 self_attn 模块中 matmul 的计算有4dims 的情况
            # 1. self_attn/MatMul: /encoders/encoders.0/self_attn/MatMul
            #           A name: /encoders/encoders.0/self_attn/Transpose_1_output_0
            #           tensor: float32[1,4,511,64]
            #           B name: /encoders/encoders.0/self_attn/Transpose_3_output_0
            #           tensor: float32[1,4,64,511]
            # 2. self_attn/MatMul_1: /encoders/encoders.0/self_attn/MatMul_1
            #           B name: /encoders/encoders.0/self_attn/Transpose_4_output_0
            #           category: Initializer
            #           tensor: float32[1,4,64,1021]
            # 3. self_attn/MatMul_2 : /encoders/encoders.0/self_attn/MatMul_2
            #           A name: /encoders/encoders.0/self_attn/Where_1_output_0
            #           B name: /encoders/encoders.0/self_attn/Transpose_output_0
            #           tensor: float32[1,4,511,64]
            if node.op_type == "MatMul" and "encoders" in node.name \
                                        and node.name.endswith("self_attn/MatMul"):
                pass # 修改MatMul 2个输入: Reshape_Transpose_1_Transpose_3
            if node.op_type == "MatMul" and "encoders" in node.name \
                                        and node.name.endswith("self_attn/MatMul_1"):
                pass # 修改MatMul 1个输入: A_Reshape_Transpose2_MatMul-B
            if node.op_type == "MatMul" and "encoders" in node.name \
                                        and node.name.endswith("self_attn/MatMul_2"):
                pass # 修改MatMul 2个输入: Reshape_2_Transpose; 输出: Transpose_5
        
        reshape_shape_to_3dims(reshape_ops)
        transpose_perm_to_3dims(transpose_1_ops)
        transpose_perm_to_3dims(transpose_2_ops)
        
        reshape_shape_to_3dims(reshape_1_ops)
        transpose_perm_to_3dims(transpose_3_ops)
        matmul_b_init_to_3dims(matrixb_consts)
        reshape_shape_to_3dims(reshape_2_ops)
        transpose_perm_to_3dims(transpose_ops)                                     
        transpose_perm_to_3dims(transpose_5_ops)                                     
                                        
    
    def Where_to_Add_or_Mul(self):
        self.rm_nodes = []

        where_ops = [f"/encoders/encoders.{i}/self_attn/Where" for i in range(15)]
        where_1_ops = [f"/encoders/encoders.{i}/self_attn/Where_1" for i in range(15)]
        softmax_ops = [f"/encoders/encoders.{i}/self_attn/Softmax" for i in range(15)]
        matmul_2_ops = [f"/encoders/encoders.{i}/self_attn/MatMul_2" for i in range(15)]
        
        where_nodes = {}
        where_1_nodes = {}
        softmax_nodes = {}
        matmul_2_nodes = {}
                
        for k, node in enumerate(self.model_buffer.graph.node):
            # 第1个 Where: 可以被替换成 Add 算子 [负无穷, 输入数据]
            if node.op_type == "Where" and node.name in where_ops:
                where_nodes[node.name] = node
            if node.op_type == "Where" and node.name in where_1_ops:
                where_1_nodes[node.name] = node
            if node.op_type == "Softmax" and node.name in softmax_ops:
                softmax_nodes[node.name] = node
            if node.op_type == "MatMul" and node.name in matmul_2_ops:
                matmul_2_nodes[node.name] = node
                
        for i in range(15):
            where_name = f"/encoders/encoders.{i}/self_attn/Where"
            where_1_name = f"/encoders/encoders.{i}/self_attn/Where_1"
            softmax_name = f"/encoders/encoders.{i}/self_attn/Softmax"
            matmul_2_name = f"/encoders/encoders.{i}/self_attn/MatMul_2"
            
            add_name = f"/encoders/encoders.{i}/self_attn/Add_replace_Where"
            
            node_where = where_nodes[where_name]
            node_add = onnx.helper.make_node(name=add_name, 
                                                op_type="Add", 
                                                inputs=[node_where.input[2], "input_mask_1"],
                                                outputs=node_where.output
                                                )
            self.model_buffer.graph.node.append(node_add)   
            self.rm_nodes.append(node_where)
            
            # 第二个 Where: 可以直接删除 [0, 输入数据], 输入数据来自于 第1个 Where_Softmax
            # softmax([负无穷, 输入数据]) = [0, softmax(输入数据)]; 
            # 经过第2个Where仍等于   [0, softmax(输入数据)], 第2个 Where 可直接删除
            self.rm_nodes.append(where_1_nodes[where_1_name])
            matmul_2_nodes[matmul_2_name].input[0] = softmax_nodes[softmax_name].output[0]
    
    
    def squeeze_initializer_dim_1(self, initia_name):
        for i, wt_4dims in enumerate(self.model_buffer.graph.initializer):
            if wt_4dims.name == initia_name:
                wt_4dims_npdata = onnx.numpy_helper.to_array(wt_4dims)
                wt_3dims_npdata = numpy.squeeze(wt_4dims_npdata, axis=0)
                wt_3dims = onnx.numpy_helper.from_array(wt_3dims_npdata, name=wt_4dims.name)
                self.model_buffer.graph.initializer[i].CopyFrom(wt_3dims)
        rmnodes_saveonnx(None, f"initializer: {initia_name}, 4 dims to 3 dims")
    
    def reshape_shape_to_3dims(self, reshape_ops_list, shape_dims_list, new_inita_name):
        reshape_to = numpy.array(shape_dims_list).astype(numpy.int64)
        # name = "/encoders/encoders.0/self_attn/Concat_output_0_3Dims"
        shape_tensor = onnx.numpy_helper.from_array(reshape_to, name=new_inita_name)
        self.model_buffer.graph.initializer.insert(-1, shape_tensor)
        
        for k, node in enumerate(self.model_buffer.graph.node):
            if node.op_type == "Reshape" and node.name in reshape_ops_list:
                    # and node.output[0] == node_upper.input[0]:
                node.input[1] = new_inita_name
        rmnodes_saveonnx(None, f"initializer: {new_inita_name}, 4 dims to 3 dims")
    
    def modify_attr(self, ops, pos, attr_name, att_value_list, replace=True):
        for x, nt in enumerate(self.model_buffer.graph.node):
            if nt.name in ops:
                new_attr = onnx.helper.make_attribute(attr_name, att_value_list)
                nt.attribute.insert(pos + 1, new_attr)
                if replace: del nt.attribute[pos]
    
    def decrease_initializer_value(self, initia_name, delta=1):
        for i, wt in enumerate(self.model_buffer.graph.initializer):
            if wt.name == initia_name:
                wt_npdata = onnx.numpy_helper.to_array(wt)
                wt_do_delta = wt_npdata - 1
                wt_3dims = onnx.numpy_helper.from_array(wt_do_delta, name=wt.name)
                self.model_buffer.graph.initializer[i].CopyFrom(wt_3dims)
        rmnodes_saveonnx(None, f"initializer: {initia_name}, decrease 1")
    
    def rel_shift_3dims(self):
        # concat_5 input_0 initializer 4dims to 3dims
        concat_5_input0_init = "/encoders/encoders.0/self_attn/ConstantOfShape_output_0"
        self.squeeze_initializer_dim_1(concat_5_input0_init)
        
        reshape_4_input1_shapeinit = "/encoders/encoders.0/self_attn/Concat_6_output_0" + "_3dims"
        reshape_4_ops = [f"/encoders/encoders.{i}/self_attn/Reshape_4" for i in range(15)]
        shape_dims = [4, 1022, 511]
        self.reshape_shape_to_3dims(reshape_4_ops, shape_dims, reshape_4_input1_shapeinit)
        
        reshape_5_input1_shapeinit = "2586" + "_3dims"
        reshape_5_ops = [f"/encoders/encoders.{i}/self_attn/Reshape_5" for i in range(15)]
        shape_dims = [4, 511, 1021]
        self.reshape_shape_to_3dims(reshape_5_ops, shape_dims, reshape_5_input1_shapeinit)
        
        slice_inita = "/embed/Constant_output_0"
        slice_ops = [f"/encoders/encoders.{i}/self_attn/Slice" for i in range(15)]
        self.decrease_initializer_value(slice_inita)
        
        slice_2_inita = "/encoders/encoders.0/self_attn/Constant_74_output_0"
        slice_2_ops = [f"/encoders/encoders.{i}/self_attn/Slice_2" for i in range(15)]
        self.decrease_initializer_value(slice_2_inita)
        
    
    def clean_and_re_shape_inference(self):
        for value_info in self.model_buffer.graph.value_info:
            value_info.ClearField("type")
        self.model_buffer = onnx.shape_inference.infer_shapes(self.model_buffer)
        rmnodes_saveonnx(None, "clean and re_shape_inference")
    
    def __call__(self, save_onnx):
        # self.add_input_mask3_for_pwc2()
        msg = "Add Mul and input_mask_3 after pointwise_Conv2"
        rmnodes_saveonnx(self.model_buffer, msg)
        
        self.split_add_attr()
        msg = "Add split attribute to Split Op"
        rmnodes_saveonnx(self.model_buffer, msg)
        
        self.matmul_dim4_to_dim3()
        msg = "change Matmul matrix B to 3Dims"
        # save_tmp = "encoder_matrixB_wt3Dims.onnx"
        save_tmp = None
        rmnodes_saveonnx(self.model_buffer, msg, save_onnx=save_tmp)
        
        self.Where_to_Add_or_Mul()
        self.rel_shift_3dims()
        self.clean_and_re_shape_inference()
        msg = "Add_op replace Where & remove Where_1"
        rmnodes_saveonnx(self.model_buffer, msg, rm_nodes=self.rm_nodes, save_onnx=save_onnx)


if __name__ == '__main__':
    
    # onnx_path = "../xformer_encoder_infer_new_split.onnx"
    onnx_path = "xformer_encoder_sim.onnx"
    user_name = "gyf"
    # onnx_path = "xformer_encoder.onnx"    
    # os.system(f"sudo cp /public/ai_platform/dsq/conformer_new_version/{onnx_path} {onnx_path}")
    # os.system(f"sudo chown {user_name}:{user_name} ./{onnx_path}")
    # os.system(f"chmod 777 ./{onnx_path}")
    
    onnx_model = onnx.load(onnx_path)
    # for i, node in enumerate(onnx_model.graph.node):
    #     if node.name == "/encoders/encoders.0/self_attn/Reshape":
    #         for j, vi in enumerate(onnx_model.graph.value_info):
    #             if vi.name == node.output[0]:
    #                 print(vi.type.tensor_type.shape.Dimension)

            # print(node.output[0])
            # print(onnx_model.graph.value_info.name == node.output[0])
    
    # save_onnx = "/home/gyf/pkg/conformer_new/asr_conformer/pytorch_espnet/wo_decoder_onnx_modify/encoder_sim_3dims_nowhere.onnx"
    save_onnx = "encoder_sim_3dims_nowhere.onnx"
    target_onnx = "encoder.onnx"

    OpModify(onnx_model)(save_onnx)
    
    os.system(f"sudo cp {save_onnx} /public/ai_platform/yfguo/conformer_onnx_modify/{save_onnx}")
    os.system(f"sudo cp /public/ai_platform/yfguo/conformer_onnx_modify/{save_onnx} \
                        /public/ai_platform/yfguo/conformer_onnx_modify/update/conformer_onnx/{target_onnx}")
