from lib import *

def MK2K1MK0_DDR2L1(matrix_mk, tensor_m, tensor_k, start_m, start_k, tile_m, tile_k):
    K1 = ceil_div(tile_k, K0)
    matrix_k1mk0 = np.zeros((K1, tile_m, K0))
    for k1 in range(K1):
        for m in range(tile_m):
            for k0 in range(K0):
              k = k1 * K0 + k0
              if(k < tile_k):
                matrix_k1mk0[k1][m][k0] = matrix_mk[m+start_m][k+start_k]
    return matrix_k1mk0

def matmul_m1k1m0k0_n1k1n0k0(m1n1m0n0, m1k1m0k0, n1k1n0k0, bias_n1n0, deq_n1n0, M1, N1, K1, bias_en, psum_en, deq_en):
    assert(m1k1m0k0.shape[1] == n1k1n0k0.shape[1])
    assert(m1k1m0k0.shape[3] == n1k1n0k0.shape[3])
    for m1 in range(M1):
        for n1 in range(N1):
            temp = np.zeros((M0, N0))
            for k1 in range(K1):
              temp = temp + np.matmul(m1k1m0k0[m1][k1],n1k1n0k0[n1][k1].transpose())
            if(bias_en):
                for m0 in range(M0):
                  for n0 in range(N0):
                    temp[m0][n0] = temp[m0][n0] + bias_n1n0[n1][n0]
            if(psum_en):
                m1n1m0n0[m1][n1] += temp
            else:
                m1n1m0n0[m1][n1] = temp
    if(deq_en):
      for m1 in range(M1):
        for n1 in range(N1):
          for m0 in range(M0):
            for n0 in range(N0):
              m1n1m0n0[m1][n1][m0][n0] = m1n1m0n0[m1][n1][m0][n0]*deq_n1n0[n1][n0]
    return m1n1m0n0 

def test_matmul():
    # 定义输入参数
    TENSOR_M = np.random.randint(3, 100)
    TENSOR_N = np.random.randint(3, 100)
    TENSOR_K = np.random.randint(3, 100)
    # M_L1 K_L1 N_L1不要求整数倍
    TILE_M = np.random.randint(2, TENSOR_M)
    TILE_N = np.random.randint(2, TENSOR_N)
    TILE_K = np.random.randint(2, TENSOR_K)
    # M_L0 K_L0 N_L0 必须是整数倍
    SLICE_M = ceil_align(np.random.randint(1, TILE_M), M0)
    SLICE_N = ceil_align(np.random.randint(1, TILE_M), N0)
    SLICE_K = ceil_align(np.random.randint(1, TILE_M), K0)
    SLICE_M1 = ceil_div(SLICE_M, M0)
    SLICE_N1 = ceil_div(SLICE_N, N0)

    # 定义矩阵乘法参数
    layer = {
        'M': TENSOR_M,
        'N': TENSOR_N,
        'K': TENSOR_K
    }
    print(layer)
    """
    layer2 = {
         'TILE_M': TILE_M,
         'TILE_N': TILE_N,
         'TILE_K': TILE_K,
         'SLICE_M': SLICE_M,        
         'SLICE_N': SLICE_N,
         'SLICE_K': SLICE_K
    }
    print(layer2)
    """
    # 创建输入数据
    left = np.random.randint(-128, 127, size=(TENSOR_M, TENSOR_K))
    right = np.random.randint(-128, 127, size=(TENSOR_K, TENSOR_N))
    bias = np.random.randint(-128, 127, size=(TENSOR_N))
    deq = np.random.rand(TENSOR_N)
    right_nk = right.transpose()
    # 不补零
    result_mn = np.zeros((TENSOR_M, TENSOR_N))
    # 这个步长能跑完吗？可以的
    for tile_n_start_in_tensor in range(0, TENSOR_N, TILE_N):
        
        n_size_tile = min(TILE_N, TENSOR_N - tile_n_start_in_tensor)
        bias_n_tile = bias[tile_n_start_in_tensor:tile_n_start_in_tensor+n_size_tile]
        deq_n_tile = deq[tile_n_start_in_tensor:tile_n_start_in_tensor+n_size_tile]

        for tile_m_start_in_tensor in range(0, TENSOR_M, TILE_M):
            
            m_size_tile = min(TILE_M, TENSOR_M - tile_m_start_in_tensor)
            #穷图匕现 一大块按TILE_L1进行处理(第一行是TILE块地址)
            result_m2n2m1n1m0n0_psb = np.zeros((ceil_div(m_size_tile, SLICE_M)*ceil_div(n_size_tile, SLICE_N), SLICE_M1, SLICE_N1, M0, N0)) # 已补零
            for tile_k_start_in_tensor in range(0, TENSOR_K, TILE_K):
                
                k_size_tile = min(TILE_K, TENSOR_K - tile_k_start_in_tensor)
                # k_size_tile_align_k0是对一个TILE_K进行的补零操作，向下区域K0
                k_size_tile_align_k0 = (k_size_tile + K0 - 1) // K0 * K0 # 这里不直接用函数ceil_align()
                # 直接取数据输入进去取的便是m_size n_size k_size(里面已经补了K0的零 按照的K0进行划分dataLayout)
                left_k1mk0_tile = MK2K1MK0_DDR2L1(left, TENSOR_M, TENSOR_K, tile_m_start_in_tensor, tile_k_start_in_tensor, m_size_tile, k_size_tile)
                right_k1nk0_tile = MK2K1MK0_DDR2L1(right_nk, TENSOR_N, TENSOR_K, tile_n_start_in_tensor, tile_k_start_in_tensor, n_size_tile, k_size_tile)
                # 这会分解的是TILE_L1
                for slice_n_start_in_tile in range(0, n_size_tile, SLICE_N):
                    # 同样的取的TILE的 bias 和 deq反量化数据
                    n_size_slice = min(SLICE_N, n_size_tile - slice_n_start_in_tile)
                    bias_n1n0_pmb = N2N1N0_L12PMB(bias_n_tile, slice_n_start_in_tile, n_size_slice)
                    deq_n1n0_pmb = N2N1N0_L12PMB(deq_n_tile, slice_n_start_in_tile, n_size_slice)
                    for slice_m_start_in_tile in range(0, m_size_tile, SLICE_M):
                        # 同样的TILE_L1不要求是 SLICE_L0的整数倍，所以也是按照Tensor 分快TILE那样子分块
                        m_size_slice = min(SLICE_M, m_size_tile - slice_m_start_in_tile)
                        for slice_k_start_in_tile in range(0, k_size_tile_align_k0, SLICE_K):
                            k_size_slice = min(SLICE_K, k_size_tile_align_k0 - slice_k_start_in_tile)
                            #分出的SLICE块必须是M0 k0 L0整数倍
                            assert(k_size_slice % K0 == 0)
                            k1_size_slice = k_size_slice // K0
                            # Silce分块数
                            slice_k1_start_in_tile = slice_k_start_in_tile // K0
                            # 是的在K1MK0的基础上，把M给分了，数据照样拿走里。这次是补的M0的0，上次的是补的K0的0
                            left_m1k1m0k0_lmb = K1MK02M1K1M0K0_L12LMB(left_k1mk0_tile, m_size_tile, slice_m_start_in_tile, slice_k1_start_in_tile, m_size_slice, k1_size_slice)
                            right_n1k1n0k0_rmb = K1NK02N1K1N0K0_L12RMB(right_k1nk0_tile, n_size_tile, slice_n_start_in_tile, slice_k1_start_in_tile, n_size_slice, k1_size_slice)
                            #bias 和 deq到最后一个SLICE块进行乘加的时候再使能，不应该是等的是最后一个TILE块进行乘加再使能
                            #加到最后一块TILE才算是加到最后一块 同时还得是 TILE块的最后一块
                            bias_en = ((tile_k_start_in_tensor+TILE_K) >= TENSOR_K) and ((slice_k_start_in_tile + SLICE_K) >= k_size_tile_align_k0)
                            psum_en = 1 # 目前来看是要一直保持的 TILE块的矩阵乘法也得是分开来整
                            deq_en = ((tile_k_start_in_tensor+ TILE_K) >= TENSOR_K) and ((slice_k_start_in_tile + SLICE_K) >= k_size_tile_align_k0)
                            psb_addr = (slice_m_start_in_tile//SLICE_M * ceil_div(n_size_tile, SLICE_N) + slice_n_start_in_tile//SLICE_N)
                            #一次进行的是SLICE块的矩阵乘法666
                            result_m2n2m1n1m0n0_psb[psb_addr] = matmul_m1k1m0k0_n1k1n0k0(\
                            result_m2n2m1n1m0n0_psb[psb_addr], left_m1k1m0k0_lmb, right_n1k1n0k0_rmb, bias_n1n0_pmb, deq_n1n0_pmb, left_m1k1m0k0_lmb.shape[0], right_n1k1n0k0_rmb.shape[0], left_m1k1m0k0_lmb.shape[1], bias_en, psum_en, deq_en)
                            if(deq_en):
                                result_mn = M1N1M0N02MN_PSB2DDR(result_mn, result_m2n2m1n1m0n0_psb[psb_addr], TENSOR_N, tile_m_start_in_tensor+slice_m_start_in_tile, tile_n_start_in_tensor+slice_n_start_in_tile, m_size_slice, n_size_slice)
    golden_mn = matmul_mk_kn(left, right, bias, deq)
    compare(result_mn, golden_mn)
    return True

for i in range(10):
    test_matmul()