import os
import copy
import onnx

from configure import logger, level, msg_fmt, __LINE__, __FUNC__
from utils import rmnodes_saveonnx

file_name = os.path.basename(__file__)
logger.set_level(level=level, name=file_name)


class RMUnsqueezeEqual:
    def __init__(self, encoder_model, logger=logger, level=level) -> None:
        self.encoder_model = encoder_model
        self.graph = self.encoder_model.graph   
        self.logger = logger    
    
    def __call__(self, save_onnx="encoder_fused_rmlength_equal.onnx"):
        if save_onnx is not None:
            save_onnx="encoder_fused_rmlength_equal.onnx"
        rm_nodes = []
        for i, node in enumerate(self.graph.node):
            if node.name in ['/encoders/encoders.0/self_attn/Unsqueeze_26']:
                rm_nodes.extend(self.graph.node[i - 1: i + 4])
        msg = "remove Unsqueeze_Equal"
        rmnodes_saveonnx(self.encoder_model, rm_nodes, msg, save_onnx)

class RemoveWhere:
    def __init__(self, encoder_model, logger=logger, level=level) -> None:
        self.encoder_model = encoder_model
        self.graph = self.encoder_model.graph 
        self.logger = logger
        self.level = level
    
    def __call__(self, save_onnx="encoder_fused_rmlength_where.onnx"):
        graph = self.graph
        input_mask_node_0 = onnx.helper.make_tensor_value_info(name='input_mask_0', elem_type=1, shape=[4, 511, 511])
        graph.input.append(input_mask_node_0)

        rm_nodes = []
        # where-softmax 的输出直接连接到 第2个 Where 后的 matmul, 把第2个 where 断开
        for i, node in enumerate(graph.node):
            if node.op_type == 'Where' and graph.node[i + 1].op_type == 'Softmax':
                # 第2个 where 后的 matmul 输入0[之前是where的输出] 连接到 softmax 输出0
                # 断开了第2个 where 的输入和输出
                graph.node[i + 5].input[0] = graph.node[i + 1].output[0]
                if 'encoders.0' in node.name:
                    # 删除 第1个 where 输入的: where -> cast2-> equal-> unsqueeze
                    # 删除 第2个 where 后的 matmul 输入1 的 transpose 的另一个输出的2个算子
                    rm_nodes.extend(graph.node[i - 9 : i + 1])
                else:
                    # 删除 第1个 where 输入的: where -> cast2
                    # 删除 第2个 where 后的 matmul 输入1 的 transpose 的另一个输出的2个算子
                    rm_nodes.extend(graph.node[i - 5 : i + 1])
                # 删除第2个 where -> cast3 
                rm_nodes.extend(graph.node[i + 2 : i + 5])
                # 删除 第2个 where 后的 matmul 输入1 的 transpose的另一个输出的unsqueeze_concat
                rm_nodes.extend(graph.node[i + 7 : i + 12])
                # Add to replace first Where, input_mask_0 used as Add input[1]
                add_node = onnx.helper.make_node(name=f'Where_Add_{i}', op_type='Add', \
                                                    inputs=[node.input[2], 'input_mask_0'], \
                                                    outputs=[f'Where_Add_{i}_out'])
                graph.node[i + 1].input[0] = add_node.output[0]
                graph.node.append(add_node)
        
        nodes_all = copy.deepcopy(graph.node)
        del graph.node[:]
        for node in nodes_all:
            if node not in rm_nodes:
                graph.node.append(node)
        
        msg = "remove Where"
        self.logger.log(msg_fmt.format(__LINE__, __FUNC__, msg), level=level)
        
        self.graph = graph
        if save_onnx is not None:
            if len(save_onnx) and save_onnx.lower().endswith(".onnx"):
                onnx.save(self.encoder_model, save_onnx)
        
        return self.graph


if __name__ == '__main__':
    
    save_rmlen = "encoder_fused_rmlength.onnx"
    onnx_model = onnx.load(save_rmlen)    
    
    RemoveWhere(onnx_model)()