import os
from typing import Any
import onnx
import numpy
import copy

from op_remove import RemoveWhere
from configure import logger, level, msg_fmt, __LINE__, __FUNC__
from utils import rmnodes_saveonnx

file_name = os.path.basename(__file__)
logger.set_level(level=level, name=file_name)


class RMFeatsLength2EncoderOutLen:
    def __init__(self, encoder_model, logger=logger, level=level) -> None:
        self.encoder_model = encoder_model
        self.graph = self.encoder_model.graph
        # self.nodes = encoder_graph.node   
        self.logger = logger
        self.rm_nodes = []
        # self.rmequal = RMUnsqueezeEqual(encoder_model=encoder_model,
        #                                 logger=logger, level=level)
    
    def extend_pe_RM_MatmulSliceConst(self):
        matmul_nodes = []    
        for i, node in enumerate(self.graph.node):
            if node.op_type == "MatMul" and \
                node.input[0] == "/embed/model/out/out.1/Slice_output_0": 
                # slice output a const to Matmul
                matmul_nodes.append(node)
        
        slice_data = numpy.load('Slice_673.npy')
        matmul_input0_data = slice_data[:, :511, :]        
        matmul_input1 = {}
        for i, matmul_node in enumerate(matmul_nodes):
            for t in self.encoder_model.graph.initializer:
                # 获取 matmul weight 权重
                if t.name == matmul_node.input[1]:
                    matmul_input1[t.name] = onnx.numpy_helper.to_array(t)
                
        for i, node in enumerate(self.graph.node):
            if node in matmul_nodes:
                matmul_input1_data = matmul_input1[node.input[1]]
                self.rm_nodes.append(node)
                if node.name == '/encoders/encoders.0/self_attn/linear_pos/MatMul':
                    reshape_node = self.graph.node[i + 91]  # matmul+reshape reshape 的位置
                else:
                    reshape_node = self.graph.node[i + 1]
                value_matmul = numpy.matmul(matmul_input0_data, matmul_input1_data)\
                                                            .astype(numpy.float32)
                name = f'{reshape_node.name}_input0'
                value = onnx.helper.make_tensor(name=name, \
                                                data_type=onnx.TensorProto.FLOAT, \
                                                dims=value_matmul.shape, vals=value_matmul)
                const_i = onnx.helper.make_node(name=name, op_type='Constant', \
                                                inputs=[], outputs=[name], value=value)
                self.encoder_model.graph.node.append(const_i)
                reshape_node.input[0] = const_i.output[0]
    
    
    def __call__(self, save_onnx="encoder_fused_rmlength.onnx"):
        # 删除 input_1
        del self.graph.input[1]
        # 删除 input_1: feat_length 到 encoder_len_out
        nodes = self.graph.node
        rm_nodes = []
        rm_nodes.extend(nodes[0 : 17])
        # rm_nodes.extend(nodes[22 : 34]) # 网络开头的Relu右侧
        # rm_nodes.extend(nodes[35 : 43]) # 网络开头的Relu右侧
        rm_nodes.extend(nodes[48 : 75]) # Mul 右侧
        rm_nodes.append(nodes[252])
        # rm_nodes.extend(nodes[1659 : 1662]) # if GLU fused
        rm_nodes.extend(nodes[1689 : 1692]) # if GLU not fused
        
        self.rm_nodes = rm_nodes       
                
        nodes_all = copy.deepcopy(nodes)
        del self.graph.node[:]
        del self.graph.output[1]
        for node in nodes_all:
            if node not in rm_nodes:
                self.graph.node.append(node)
        
        msg = "remove feats_length : encoder_out_len, Unsqueeze_Equal_Cast"
        self.logger.log(msg_fmt.format(__LINE__, __FUNC__, msg), level=level)
        
        if save_onnx is not None:
            if len(save_onnx) and save_onnx.lower().endswith(".onnx"):
                onnx.save(self.encoder_model, save_onnx)
                msg = "save {}".format(save_onnx)
                self.logger.log(msg_fmt.format(__LINE__, __FUNC__, msg), level=level)
        
        # self.rmequal(save_onnx=save_onnx)
        RemoveWhere(self.encoder_model)()        
        self.extend_pe_RM_MatmulSliceConst()
        msg = "remove Where and matmul"
        save_onnx = "encoder_fused_rmlength_where.onnx"
        rmnodes_saveonnx(self.encoder_model, self.rm_nodes, msg, save_onnx)


class OpFuse:
    def __init__(self, encoder_graph, opt_sim=False, logger=logger, level=level) -> None:
        self.graph = encoder_graph
        # self.nodes = encoder_graph.node
        self.opt_sim = opt_sim      
        self.logger = logger

    def Swish_node(self, op_name, inputs, outputs, op_type="Swish"):
        doc_string ="""output = input * Sigmoid(input)
                        也就是 Mul(input, Sigmoid(input))
                    """
        
        return onnx.helper.make_node(name=op_name, 
                                        inputs=inputs, 
                                        outputs=outputs, 
                                        op_type=op_type, 
                                        doc_string=doc_string
                                        )

    def GLU_node(self, op_name, inputs, outputs, op_type="Glu"):
        doc_string ="""input 沿着 dim 轴切分成 2 个矩阵: a, b
                    GLU(a, b) = a * Sigmoid(b)
                    """
        node_glu = onnx.helper.make_node(name=op_name, 
                                        inputs=inputs, 
                                        outputs=outputs, 
                                        op_type=op_type, 
                                        doc_string=doc_string,
                                        
                                        )
        attrs_glu = onnx.helper.make_attribute("axis", 1)
        node_glu.attribute.insert(0, attrs_glu)
        return node_glu

    def MatrixbdCompute_node(self, op_name, inputs, outputs, op_type=None):
        doc_string ="""相对位置编码中 matrixbd 计算
                        把 matrixbd 内部的计算变成矩阵乘法, 用 GPU 加速
                    """
        op_type = "MatrixbdCompute"
        return onnx.helper.make_node(name=op_name, 
                                        inputs=inputs, 
                                        outputs=outputs, 
                                        op_type=op_type, 
                                        doc_string=doc_string,                             
                                        )

    # Swish fuse
    def swish_fuse(self):
        graph = self.graph
        nodes = graph.node
        for i, node in enumerate(nodes):  
        # Swish fuse: used in specifical pattern with ONNX op_type
            if node.op_type == "Sigmoid" and nodes[i+1].op_type == "Mul" \
                                        and nodes[i-1].op_type != "Split":
                # create fused-op Swish
                swish_name = node.name.replace("Sigmoid", "Swish") #+ "_{}".format(i)
                swish_input = nodes[i].input #[0].replace("Sigmoid", "Swish")
                swish_output = nodes[i+1].output[0].replace("Mul", "Swish")
                node_swish = self.Swish_node(swish_name, swish_input, [swish_output])                
                # set input/output connection
                nodes[i+2].input[0] = node_swish.output[0]            
                # remove single op of Sigmoid and Mul
                # insert fused-op Swish
                for n in range(2): # 0, 1
                    graph.node.remove(nodes[i+1-n])
                graph.node.insert(i, node_swish)
        self.graph = graph
        msg = "Finish Swish"
        self.logger.log(msg_fmt.format(__LINE__, __FUNC__, msg), level=level)

    # GLU fuse
    def glu_fuse(self):
        graph = self.graph
        nodes = graph.node
        for i, node in enumerate(nodes):  
        # GLU_fuse: used in specifical pattern with ONNX op_type
            if node.op_type == "Sigmoid" and nodes[i+1].op_type == "Mul" \
                                        and nodes[i-1].op_type == "Split":
                # create fused-op Glu       
                glu_name = node.name.replace("Sigmoid", "Glu") #+ "_{}".format(i)
                glu_input = nodes[i-1].input  #[0].replace("Split", "Glu")
                glu_output = nodes[i+1].output  # [0].replace("Mul", "Glu")
                node_glu = self.GLU_node(glu_name, glu_input, glu_output)
                # remove useless nodes and insert new fused-op
                for n in range(3): # 0, 1, 2
                    graph.node.remove(nodes[i+1-n])
                graph.node.insert(i-1, node_glu)
        self.graph = graph
        msg = "Finish GLU"
        self.logger.log(msg_fmt.format(__LINE__, __FUNC__, msg), level=level)

    def matrixbdcompute_fuse(self):
        graph = self.graph
        opt_sim = self.opt_sim
                        
        def opt_sim_pattern(nodes, i):
            return nodes[i].op_type == "MatMul" and nodes[i+1].op_type == "Shape" \
                                    and nodes[i+2].op_type == "Gather" \
                                    and nodes[i+3].op_type == "Gather" \
                                    and nodes[i+4].op_type == "Gather" \
                                    and nodes[i+5].op_type == "Unsqueeze" \
                                    and nodes[i+6].op_type == "Unsqueeze" \
                                    and nodes[i+7].op_type == "Unsqueeze" \
                                    and nodes[i+8].op_type == "Concat" \
                                    and nodes[i+9].op_type == "ConstantOfShape"\
                                    and nodes[i+10].op_type == "Concat" \
                                    and nodes[i+11].op_type == "Gather" \
                                    and nodes[i+12].op_type == "Add" \
                                    and nodes[i+13].op_type == "Unsqueeze" \
                                    and nodes[i+14].op_type == "Concat" \
                                    and nodes[i+15].op_type == "Reshape" \
                                    and nodes[i+16].op_type == "Slice" \
                                    and nodes[i+17].op_type == "Reshape" \
                                    and nodes[i+18].op_type == "Slice" \
                                    and nodes[i+19].op_type == "Squeeze" \
                                    and nodes[i+20].op_type == "Div" \
                                    and nodes[i+21].op_type == "Add" \
                                    and nodes[i+22].op_type == "Unsqueeze" \
                                    and nodes[i+23].op_type == "Slice"
        
        def normal_pattern_opset17(nodes, i):
            return nodes[i].op_type == "MatMul" and nodes[i+1].op_type == "Shape"   \
                                    and nodes[i+17].op_type == "Concat" \
                                    and nodes[i+18].op_type == "ConstantOfShape"\
                                    and nodes[i+19].op_type == "Concat" \
                                    and nodes[i+24].op_type == "Add" \
                                    and nodes[i+33].op_type == "Concat" \
                                    and nodes[i+34].op_type == "Reshape" \
                                    and nodes[i+39].op_type == "Slice" \
                                    and nodes[i+41].op_type == "Reshape" \
                                    and nodes[i+46].op_type == "Slice" \
                                    and nodes[i+48].op_type == "Squeeze" \
                                    and nodes[i+50].op_type == "Div" \
                                    and nodes[i+54].op_type == "Add" \
                                    and nodes[i+57].op_type == "Unsqueeze" \
                                    and nodes[i+60].op_type == "Slice"
        
        nodes = graph.node
        for i, node in enumerate(nodes):
            norm_fuse = normal_pattern_opset17(nodes, i)
            opt_sim_fuse = opt_sim_pattern(nodes, i)
            if norm_fuse or opt_sim_fuse:
                # create fused-op MatrixbdCompute
                matrixbd_name = nodes[i+1].name.replace("Shape", "matrixdbcompute") \
                                                                    + "_{}".format(i)                
                # add Constant node for MatrixbdCompute input
                length = 130
                feat_len = f"feat_len_{i}"
                len_np = numpy.array([length], dtype = numpy.int64)
                tensor_len = onnx.helper.make_tensor(name = feat_len, \
                                                        data_type=onnx.TensorProto.INT64, \
                                                        dims=len_np.shape, \
                                                        vals=len_np)
                node_len = onnx.helper.make_node(op_type="Constant", \
                                                    name = f"len_{i}", \
                                                    inputs=[], \
                                                    outputs=[feat_len], \
                                                    value=tensor_len)    
                nodes.append(node_len)
                # insert Matrixbd node to graph           
                node_matrixbd = self.MatrixbdCompute_node(matrixbd_name, \
                                                            inputs=[node.output[0], \
                                                            feat_len], \
                                                            outputs=nodes[i+60].output)
                ops_cnt = 61
                if opt_sim_fuse: ops_cnt = 24
                for n in range(1, ops_cnt):
                    graph.node.remove(nodes[i+ops_cnt-n])
                graph.node.insert(i+1, node_matrixbd)
        self.graph = graph
        msg = "Finish MatrixbdCompute"
        self.logger.log(msg_fmt.format(__LINE__, __FUNC__, msg), level=level)

class OpFusePipeline:
    def __init__(self, encoder_model, logger=logger, level=level) -> None:
        self.encoder_model = encoder_model
        self.graph = self.encoder_model.graph
        self.fuseop_obj = OpFuse(self.graph, logger=logger, level=level)
        self.logger = logger
        
        self.rmlength = RMFeatsLength2EncoderOutLen(encoder_model=encoder_model,
                                                    logger=logger, level=level)

    def __call__(self, fuse_pattern=["swish", "matrixbdcompute"], \
                        save_onnx="encoder_fused.onnx",\
                        save_rmlen=None):

        if not (isinstance(fuse_pattern, list) or isinstance(fuse_pattern, tuple)):
            msg = "fuse_pattern should be fuse_op with string list or tuple"
            self.logger.log(msg_fmt.format(__LINE__, __FUNC__, msg), level="error")

        fuse_pattern = [item.lower() for item in fuse_pattern]
        if len(fuse_pattern):
            for n, fuse_name in enumerate(fuse_pattern):
                fuse_name += "_fuse"
                msg = "No.{} do {} fusing".format(n+1, fuse_name)
                self.logger.log(msg_fmt.format(__LINE__, __FUNC__, msg), level=level)
                fuseop = getattr(self.fuseop_obj, fuse_name, None)
                fuseop()
        
        if save_onnx is not None:
            if len(save_onnx):
                onnx.save(self.encoder_model, save_onnx)
                msg = "save {}".format(save_onnx)
                self.logger.log(msg_fmt.format(__LINE__, __FUNC__, msg), level=level)
        
        self.rmlength(save_onnx=save_rmlen)
    
    def save_onnx(self, onnx_file):
        onnx.save(self.encoder_model, onnx_file)



if __name__ == '__main__':
    
    onnx_path = "D:\\vbox\\encoder_shapeinfer.onnx"
    onnx_model = onnx.load(onnx_path)
    
    save_onnx = "encoder_fused.onnx"
    save_rmlen = "encoder_fused_rmlength.onnx"
    OpFusePipeline(onnx_model)(save_onnx=save_onnx, save_rmlen=save_rmlen)
    