import os
import sys
import shutil
import onnx
import numpy
import numpy as np
import torch

from configure import logger, level, msg_fmt, __LINE__, __FUNC__
from utils import rmnodes_saveonnx

file_name = os.path.basename(__file__)
logger.set_level(level=level, name=file_name)


from MM_dump import *

class GenData:
    
    def __call__(self, feat_npy):
        feats = np.load(feat_npy)
        feat_len = feats.shape[1]
        length2 = self.calculate_len(feat_len)
        print(length2)
        self.feats_gen(feat_len, feats)
        self.mask_gen(length2)

    def calculate_len(self, feat_len):
        length = (feat_len - 3) // 2 + 1
        length2 = (length - 3) // 2 + 1
        return length2

    def feats_gen(self, length, feats):
        feats_new = np.zeros((1, 2048, 80)).astype(np.float32)
        feats_new[:, :length, :] = feats
        feats_new.tofile('enc_input/feats.bin')
        # br_dump_MMActivation('br_feats', torch.tensor(feats_new.squeeze(0)), dtype='bf16')

    def mask_gen(self, length2):
        input_mask_0 = np.full((4, 511, 511), np.NINF).astype(np.float32)
        input_mask_0[:, :, :length2] = np.zeros((4, 511, length2)).astype(np.float32)
        input_mask_0.tofile('enc_input/input_mask_1.bin')
        # br_dump_MMActivation('br_input_mask_0', torch.tensor(input_mask_0).unsqueeze(0).unsqueeze(0), dtype='bf16')

        # input_mask_1 = np.zeros((1, 256, 511, 19)).astype(np.float32)
        # input_mask_1[:, :, :length2, :] = np.ones((1, 256, length2, 19)).astype(np.float32)
        # input_mask_1.tofile('input_mask_1.bin')
        # # br_dump_ConvActivation_in_bf16('br_input_mask_1', torch.tensor(input_mask_1), dtype="bf16", spc_idx=None)

        input_mask_2 = np.zeros((1, 256, 511)).astype(np.float32)
        input_mask_2[:, :, :length2] = np.ones((1, 256, length2)).astype(np.float32)
        input_mask_2.tofile('enc_input/input_mask_2.bin')
        # br_dump_MMActivation('br_input_mask_2', torch.tensor(input_mask_0.squeeze(0)), dtype='bf16')

        # input_mask_3 = np.zeros((1, 511, 256)).astype(np.float32)
        # input_mask_3[:, :length2, :] = np.ones((1, length2, 256)).astype(np.float32)
        # input_mask_3.tofile('input_mask_3.bin')
        # br_dump_MMActivation('br_input_mask_3', torch.tensor(input_mask_0.squeeze(0)), dtype='bf16')


class MatMul():
    @staticmethod
    def export() -> None:
        node = onnx.helper.make_node(
            "MatMul",
            inputs=["a", "b"],
            outputs=["c"],
        )

        # 2d
        a = numpy.random.randn(3, 4).astype(numpy.float32)
        b = numpy.random.randn(4, 3).astype(numpy.float32)
        c = numpy.matmul(a, b)
        expect(node, inputs=[a, b], outputs=[c], name="test_matmul_2d")

        # 3d
        a = numpy.random.randn(2, 3, 4).astype(numpy.float32)
        b = numpy.random.randn(2, 4, 3).astype(numpy.float32)
        c = numpy.matmul(a, b)
        expect(node, inputs=[a, b], outputs=[c], name="test_matmul_3d")

        # 4d
        a = numpy.random.randn(1, 2, 3, 4).astype(numpy.float32)
        b = numpy.random.randn(1, 2, 4, 3).astype(numpy.float32)
        c = numpy.matmul(a, b)
        expect(node, inputs=[a, b], outputs=[c], name="test_matmul_4d")

class OpGenerate:
    def __init__(self, onnx_model, logger=logger, level=level) -> None:
        self.model_buffer = onnx_model
        self.graph = self.model_buffer.graph     
        self.logger = logger
        

    
    def matmul(self):
        matmul_dir = "matmul_onnx"
        if os.path.exists(matmul_dir):
            shutil.rmtree(matmul_dir)
        os.mkdir(matmul_dir)
        
        matmul_ops = ["/embed/model/out/out.0/MatMul"]
        # A, B 矩阵皆为输入
        attn_matmul = [f"/encoders/encoders.{i}/self_attn/MatMul" for i in range(15)]
        attn_matmul_1 = [f"/encoders/encoders.{i}/self_attn/MatMul_1" for i in range(15)]
        attn_matmul_2 = [f"/encoders/encoders.{i}/self_attn/MatMul_2" for i in range(15)]
        
        for i, node in enumerate(self.model_buffer.graph.node):
            for j in range(len(attn_matmul)):
                if attn_matmul[j] == node.name:
                    # A = onnx.helper.make_tensor_value_info("A", onnx.TensorProto.FLOAT, [1, 4, 511, 64])
                    A = onnx.helper.make_tensor_value_info("A", onnx.TensorProto.FLOAT, [4, 511, 64])
                    B = onnx.helper.make_tensor_value_info("B", onnx.TensorProto.FLOAT, [4, 64, 511])
                    # C = onnx.helper.make_tensor_value_info("C", onnx.TensorProto.FLOAT, [1, 4, 511, 511])
                    C = onnx.helper.make_tensor_value_info("C", onnx.TensorProto.FLOAT, [4, 511, 511])
                    
                    matmul_onnx = f"encoders_{j}_self_attn_MatMul.onnx"
                    node = onnx.helper.make_node(
                                                "MatMul",
                                                inputs=["A", "B"],
                                                outputs=["C"],
                                                name=node.name
                    )
                    # A[1, 4, 511, 64], B[4, 64, 511]
                    graph = onnx.helper.make_graph(
                                                    [node],
                                                    "MatMul_4x3_2inputs",
                                                    [A, B],
                                                    [C]
                    )
                    model = onnx.helper.make_model(graph=graph, producer_name="taylor_made")
                    print(f"\nNo.{j} model created:\n{model}")
                    onnx.checker.check_model(model)
                    print(f"model {node.name} is checked!")
                    onnx.save(model, matmul_dir + "/" + matmul_onnx)
                    
                    # a = numpy.ones([1, 4, 511, 64]).astype(numpy.float32)
                    # a = numpy.ones([4, 511, 64]).astype(numpy.float32)
                    # b = numpy.ones([4, 64, 511]).astype(numpy.float32)
                    # 对齐 8 的倍数
                    a = numpy.zeros([1, 511, 64]).astype(numpy.float32)
                    b = numpy.zeros([1, 64, 511]).astype(numpy.float32)
                    a[:, :511, :] = 1
                    b[:, :, :511] = 1
                    c = numpy.matmul(a, b)
                    print(c[:,:3,:3])
                    a.tofile(matmul_dir + "/" + "a_1.bin")
                    b.tofile(matmul_dir + "/" + "b_1.bin")

if __name__ == '__main__':
    
    onnx_path = "xformer_encoder_infer_new_split.onnx"

    save_onnx = "encoder_sim_3dims_nowhere.onnx"
    model = onnx.load(save_onnx)

    # OpGenerate(model).matmul()
    # os.system(f"sudo cp -r matmul_onnx /public/ai_platform/yfguo/conformer_onnx_modify/")
    
    feat_npy = "/home/gyf/pkg/conformer_new/asr_conformer/tensorrt/feats.npy"
    feat_npy = "feats.npy"
    GenData()(feat_npy)