from lib import *
#这里包是没有问题的说,照抄的step2
def BHWC2C1BHWC0_DDR2L1(ifm, batch, tensor_h, tensor_w, tensor_c, start_tensor_h, start_tensor_w, start_tensor_c, tile_h, tile_w, tile_c):
    C1 = ceil_div(tile_c, C0)
    ifm_C1BHWC0 = np.zeros((C1, batch, tile_h, tile_w, C0))
    # start这一排的输入是 tile_ih_start_in_tensor, tile_iw_start_in_tensor, tile_ic_start_in_tensor
    # Tile_size 这一排的输入是 ih_size_tile, iw_size_tile, ic_size_tile
    # 这里的ifm维度是BHWC，(Tensor切分到TILE去)
    assert(ifm.shape[1] == tensor_h)
    assert(ifm.shape[2] == tensor_w)
    assert(ifm.shape[3] == tensor_c)
    for c1 in range(C1):
        for b in range(batch):
            for h in range(tile_h):
                for w in range(tile_w):
                    for c0 in range(C0):
                        # start_tensor_c + c0 不能超过tensor_c、、(c1 < tile_c) and ((start_tensor_c + c1 * C0 + c0) < tensor_c)
                        c = c1 * C0 +c0
                        if(c < tile_c):
                            ifm_C1BHWC0[c1][b][h][w][c0] = ifm[b][start_tensor_h + h][start_tensor_w + w][start_tensor_c + c]

    return ifm_C1BHWC0.reshape((C1, batch, tile_h, tile_w, C0))
#这里包是没有问题的说,照抄的step2
def matmul_m1k1m0k0_n1k1n0k0(m1n1m0n0, m1k1m0k0, n1k1n0k0, bias_n1n0, deq_n1n0, M1, N1, K1, bias_en, psum_en, deq_en):
    assert(m1k1m0k0.shape[1] == n1k1n0k0.shape[1])
    assert(m1k1m0k0.shape[3] == n1k1n0k0.shape[3])
    for m1 in range(M1):
        for n1 in range(N1):
            temp = np.zeros((M0, N0))
            for k1 in range(K1):
              temp = temp + np.matmul(m1k1m0k0[m1][k1],n1k1n0k0[n1][k1].transpose())
            if(bias_en):
              for m0 in range(M0):
                  for n0 in range(N0):
                    temp[m0][n0] = temp[m0][n0] + bias_n1n0[n1][n0]
            if(psum_en):
                m1n1m0n0[m1][n1] += temp
            else:
                m1n1m0n0[m1][n1] = temp
    if(deq_en):
      for m1 in range(M1):
        for n1 in range(N1):
          for m0 in range(M0):
            for n0 in range(N0):
              m1n1m0n0[m1][n1][m0][n0] = m1n1m0n0[m1][n1][m0][n0]*deq_n1n0[n1][n0]
    return m1n1m0n0 
# 从输出特征图TILE中划分数据给SLICE，SLICE必须是M0 N0 K0的整数倍这一块
def C1BHWC02M1K1M0K0_L12L0(ifm, batch, slice_oh, slice_ow, slice_c1, start_tile_h, start_tile_w, start_tile_c1, kernel_h, kernel_w, stride_h, stride_w, pad_t, pad_b, pad_l, pad_r, dilation_h, dilation_w, padding_value=0):
    # 内部不补零，但是我数据还是照取不误，将ifm变成M1K1M0K0，这里没有Batch的说，好似Tile里的C1BHWC0
    # 这里为什么要-1的说？这里的输入是slice_oh_size, slice_ow_size, slice_ci1_size 是尺寸，kernel尺寸-1那倒是没有问题、
    # 这里是直接约束范围的说
    # Start里的slice_ih_start_in_tile, slice_iw_start_in_tile, slice_ci1_start_in_tile(这里是输入特征图Slice取真正数据的大小)
    slice_ih = (slice_oh - 1) * stride_h + dilation_h * (kernel_h - 1) - pad_t - pad_b + 1
    slice_iw = (slice_ow - 1) * stride_w + dilation_w * (kernel_w - 1) - pad_l - pad_r + 1
    C1 = slice_c1 # 一小块通道channel 已经是K0的整数倍数了
    M = batch * slice_oh * slice_ow # BHW
    M1 = ceil_div(M, M0) # 这里补了零
    K1 = C1*kernel_h*kernel_w # 这里是卷积核
    ifm_m1k1m0k0 = np.zeros((M1, K1, M0, K0))
    # print(ifm.shape)
    # print(start_tile_h, start_tile_w, start_tile_c1,slice_ih,slice_iw)
    for ic1 in range(C1):
        for kh in range(kernel_h):
            for kw in range(kernel_w):
                for b in range(batch):
                    for oh in range(slice_oh):
                        for ow in range(slice_ow): # 这里我基本模仿了其DataLayout来进行的，具体理解这一块不知道正确与否
                            # 这个时候逆推输入特征图的位置还需要-补零边缘pad吗？我这里的Tile是不包含pad，pad是自己补上的
                            # 这里应该没啥问题，就是确定要的取的值而已(很明显了，这里就是pad的问题)
                            # 并不是pad的问题，就是
                            ih = oh * stride_h + dilation_h * kh  - pad_t 
                            iw = ow * stride_w + dilation_w * kw  - pad_l
                            m_index = b * slice_oh * slice_ow + oh * slice_ow + ow # Batch的作用,定位到M1里去，M1已经补零了，用不到似乎
                            k1_idx = ic1 * kernel_h * kernel_w + kh * kernel_w +kw # 定位到K1里去，K1
                            m1_idx = m_index // M0
                            m0_idx = m_index % M0
                            if (0 <= ih < slice_ih) and (0 <= iw < slice_iw): # 取数据这一块是多少，不知道是多少 感觉是不是少了一个维度，C0维度去了，噢噢K0=C0,没问题了说是
                                for k0_idx in range(K0):# 这里的ifm输入是ifm_C1BHWC0_tile
                                    ifm_m1k1m0k0[m1_idx][k1_idx][m0_idx][k0_idx] = ifm[start_tile_c1 + ic1][b][start_tile_h + ih][start_tile_w + iw][k0_idx]
                            else:
                                for k0_idx in range(K0):
                                    ifm_m1k1m0k0[m1_idx][k1_idx][m0_idx][k0_idx] = padding_value
    # print("ifm_m1k1m0k0 是否为零矩阵：", np.all(ifm_m1k1m0k0 == 0))——肯定不是非空的啊
    return ifm_m1k1m0k0

def test_convolution():
    # batch一批特征图
    batch = np.random.randint(1, 4)
    # 对应OpenNPU的切分策略中的H_L2, W_L2, IC_L2, OC_L2(Tensor)
    in_height = np.random.randint(3, 100)
    in_width = np.random.randint(3, 100)
    in_channel = np.random.randint(3, 100)
    out_channel = np.random.randint(3, 100)
    # 卷积参数 kernel stride pad dialtion
    kernel_h = np.random.randint(1,7)
    kernel_w = np.random.randint(1,7)
    stride_h = np.random.randint(1,7)
    stride_w = np.random.randint(1,7)
    pad_h = np.random.randint(0, kernel_h)
    pad_w = np.random.randint(0, kernel_w)
    dilation_h = np.random.randint(1,7)
    dilation_w = np.random.randint(1,7)
    # 公式计算输出特征图
    out_height = (in_height + 2 * pad_h - dilation_h * (kernel_h - 1) - 1) // stride_h + 1
    out_width = (in_width + 2 * pad_w - dilation_w * (kernel_w - 1) - 1) // stride_w + 1
    # 输出特征图太小没有参考意义？
    if(out_height <= 2 or out_width <= 2):
        return False
    # 拆Tensor特征图 TILE
    H_L1 = np.random.randint(2, out_height)
    W_L1 = np.random.randint(2, out_width)
    CI_L1 = np.random.randint(2, in_channel)
    CO_L1 = ceil_align(np.random.randint(1, out_channel), N0)
    # 拆TILE特征图 SLICE
    H_L0 = np.random.randint(1, H_L1)
    W_L0 = np.random.randint(1, W_L1)
    CI1_L0 = ceil_div(np.random.randint(1, CI_L1), K0)
    CO_L0 = ceil_align(np.random.randint(1, CO_L1), N0)
    # 定义卷积层参数
    layer = {
    'batch': batch,
    'in_height': in_height,
    'in_width': in_width,
    'out_height': out_height,
    'out_width': out_width,
    'in_channel': in_channel,
    'out_channel': out_channel,
    'kernel_h': kernel_h,
    'kernel_w': kernel_w,
    'stride_h': stride_h,
    'stride_w': stride_w,
    'pad_h': pad_h,
    'pad_w': pad_w,
    'dilation_h': dilation_h,
    'dilation_w': dilation_w,
    'CO_L1': CO_L1,
    'H_L1': H_L1,
    'W_L1': W_L1,
    'CI_L1': CI_L1,
    'CO_L0': CO_L0,
    'H_L0': H_L0,
    'W_L0': W_L0,
    'CI1_L0': CI1_L0
    }
    print(layer)

    # 创建输入数据
    ifm_BCHW = np.random.randn(batch, in_channel, in_height, in_width)
    weight = np.random.randn(out_channel, in_channel, kernel_h, kernel_w)
    bias = np.random.randn(out_channel)
    deq = np.random.rand(out_channel)
    #输出结果在这里
    ofm_BHWC = np.zeros((batch, out_height, out_width, out_channel))
    # 先转成C1HWC0, 再使用im2col和gemm
    ifm_BHWC = ifm_BCHW.transpose((0, 2, 3, 1))
    for tile_oc_start_in_tensor in range(0, out_channel, CO_L1):
        for tile_oh_start_in_tensor in range(0, out_height, H_L1):
            for tile_ow_start_in_tensor in range(0, out_width, W_L1):
                # 分块操作，以0C_L1,H_L1,W_L1为长宽高为大小的矩形进行分块
                # 就Bias_en deq_en要注意最后一块对吧
                # 一次处理得到的是Co_L1块HW输出特征图 一个Tensor 就是 M(out_weight) N(out_higeht)
                # 不要求是整数倍，依旧是同样的操作
                # 下边界的oh_size_tile肯定是不能整除那一块
                oc_size_tile = min(CO_L1, out_channel - tile_oc_start_in_tensor)
                oh_size_tile = min(H_L1, out_height - tile_oh_start_in_tensor)
                ow_size_tile = min(W_L1, out_width - tile_ow_start_in_tensor)
                # im2col操作后特征图使用矩阵表达的维度: M=Ho*Wo, K=CiKhKw, N=Co
                # 倒推做im2col膨胀前使用矩阵表达的维度: M=Hi*Wi, K=Ci, N=Co
                # 矩阵在L1的layout是K1MK0, 特征图在L1的layout是Ci1 (HiWi) Ci0
                # --这里的意思是输出特征图同时处理batch 批输入特征图？
                M = batch * H_L0 * W_L0
                N = oc_size_tile
                # M0，N0，K0是Cube Unit进行一次矩阵乘法最小的粒度。M1、N1、K1是矩阵分块的索引，比如M1=0, N1=0指输出结果矩阵的第一个分块。
                # 向上取整显示出了矩阵的对齐策略: 如果M和N不是M0和N0的整数倍，需要填充到M0和N0的整数倍。
                M1 = ceil_div(M, M0)
                N1 = ceil_div(N, N0)
                # psb: partial sum buffer，卷积产生的中间数据(CU产生的部分和存入PSB，CU计算时从PSB加载部分和，VU从PSB读数据写入GM)
                # 向上取整等于已经补零了，m2(H_idx *W_idx)n2(C_idx)等同于已经划分成一个维度用于索引地址
                # 这个是临时的PSB 用于存储，最后的时候才输出给结果ofm_BHWC
                # 注意维度 (batch*ceil_div(oh_size_tile, H_L0)*ceil_div(ow_size_tile, W_L0)) * (ceil_div(oc_size_tile, CO_L0)) = M*N
                # 还是按照理解看就是 batch 批 oh ow oc的输出特征图处理
                result_m2n2m1n1m0n0_psb = np.zeros((batch*ceil_div(oh_size_tile, H_L0)*ceil_div(ow_size_tile, W_L0)*ceil_div(oc_size_tile, CO_L0), M1, N1, M0, N0))
                for tile_ic_start_in_tensor in range(0, in_channel, CI_L1):
                    # tile_oh_end_tensor指本次L1计算的特征图结束行在L2中的坐标
                    # (用于输出最终结果？)这儿的-1不知道是什么意味，最后一块tile的最后一个数据的坐标吗？确实想，屁股那一位
                    tile_oh_end_in_tensor = min(tile_oh_start_in_tensor + H_L1, out_height) - 1
                    tile_ow_end_in_tensor = min(tile_ow_start_in_tensor + W_L1, out_width) - 1
                    # 根据卷积操作输出特征图的起始坐标和终止坐标倒推输入特征图的起始坐标和终止坐标
                    # 这里的逆推没有看懂的说
                    tile_ih_start_in_tensor = max(tile_oh_start_in_tensor * stride_h - pad_h, 0)
                    tile_iw_start_in_tensor = max(tile_ow_start_in_tensor * stride_w - pad_w, 0)
                    #还是得计算（卧槽），复杂度上升一个档次(逆推得出最终ih与iw在里面的那个值)
                    tile_ih_end_in_tensor = min(tile_oh_end_in_tensor * stride_h - pad_h + dilation_h * (kernel_h - 1), in_height - 1)
                    tile_iw_end_in_tensor = min(tile_ow_end_in_tensor * stride_w - pad_w + dilation_w * (kernel_w - 1), in_width - 1)
                    tile_ic_end_in_tensor = min(tile_ic_start_in_tensor + CI_L1, in_channel) - 1
                    # 计算本次L1计算的输入特征图的大小
                    ih_size_tile = max(tile_ih_end_in_tensor - tile_ih_start_in_tensor + 1, 0)
                    iw_size_tile = max(tile_iw_end_in_tensor - tile_iw_start_in_tensor + 1, 0)
                    ic_size_tile = max(tile_ic_end_in_tensor - tile_ic_start_in_tensor + 1, 0)
                    # ci1_size_tile是本次L1计算的输入特征图从DDR搬运到L1变成C1BHWC0后的C1的大小
                    # 这次K1MK0中的M变成了(BHW)，Layout不是一般的走姿别致(行走读取数据)
                    # 搬运几次的问题？一个可以搬运K0 = 5层输入特征图,一般就是IC_L1层输入特征图
                    ci1_size_tile = ceil_div(ic_size_tile, K0)
                    # BHWC 输入特征图 变成 C1 BHW C0(从Tensor 取出 TILE)
                    ifm_C1BHWC0_tile = BHWC2C1BHWC0_DDR2L1(ifm_BHWC, batch, in_height, in_width, in_channel, tile_ih_start_in_tensor, tile_iw_start_in_tensor, tile_ic_start_in_tensor, ih_size_tile, iw_size_tile, ic_size_tile)
                    # 权重值从 OC IC Kh Kw 变成 IC1 Kh Kw OC IC0？似乎是的
                    # weight组成的啥？
                    weight_k1nk0_tile = OCICKhKw2IC1KhKwOIC0(weight, out_channel, in_channel, kernel_h, kernel_w, tile_oc_start_in_tensor, tile_ic_start_in_tensor, oc_size_tile, ic_size_tile)
                    for slice_oc_start_in_tile in range(0, oc_size_tile, CO_L0):
                        for slice_oh_start_in_tile in range(0, oh_size_tile, H_L0):
                            for slice_ow_start_in_tile in range(0, ow_size_tile, W_L0):
                                # 以上循环用来确定处理的psb的位置
                                for slice_ci1_start_in_tile in range(0, ci1_size_tile, CI1_L0):
                                    # 四层循环卧槽——牛波
                                    # 在 Tensor 的哪个TILE的哪个SLICE的起始位置
                                    slice_oc_start_in_tensor = tile_oc_start_in_tensor + slice_oc_start_in_tile
                                    slice_oh_start_in_tensor = tile_oh_start_in_tensor + slice_oh_start_in_tile
                                    slice_ow_start_in_tensor = tile_ow_start_in_tensor + slice_ow_start_in_tile
                                    # 结束在Tensor的SLICE块？
                                    # 下边界以及右边界肯定用的是Slice_oh_end_in_tensor以及slice_ow_end_in_tensor来看补零
                                    # 右边最后就是的out_height,out_width
                                    slice_oh_end_in_tensor = min(slice_oh_start_in_tensor + H_L0, tile_oh_start_in_tensor + oh_size_tile) - 1
                                    slice_ow_end_in_tensor = min(slice_ow_start_in_tensor + W_L0, tile_ow_start_in_tensor + ow_size_tile) - 1
                                    # 起始在Tensor的SLICE块？
                                    # 得确定slice_ih_end_in_tensor，然后就可以用来判断补零个数
                                    slice_ih_start_in_tensor = max(slice_oh_start_in_tensor * stride_h - pad_h, 0)
                                    slice_iw_start_in_tensor = max(slice_ow_start_in_tensor * stride_w - pad_w, 0)
                                    # 其实在TILE的SLICE块
                                    slice_ih_start_in_tile = slice_ih_start_in_tensor - tile_ih_start_in_tensor
                                    slice_iw_start_in_tile = slice_iw_start_in_tensor - tile_iw_start_in_tensor
                                    # 输出特征图大小？
                                    slice_ow_size = slice_ow_end_in_tensor - slice_ow_start_in_tensor + 1 #当前处理的输出特征图的ow大小
                                    slice_oh_size = slice_oh_end_in_tensor - slice_oh_start_in_tensor + 1 #当前处理的输出特征图的oh大小
                                    slice_ci1_size = min(ci1_size_tile - slice_ci1_start_in_tile, CI1_L0) # 当前权重需要滑动的输入特征图ic个数
                                    slice_oc_size = min(oc_size_tile - slice_oc_start_in_tile, CO_L0) # 当前输出特征图权重通道oc大小
                                    # pad_t, pad_b, pad_l, pad_r分别表示上下左右填充的行数
                                    # 判断条件式看起是否在边缘地带，在的话就得补零(在边缘就老实补零)
                                    # Tensor是 (out_height)(out_width)倒推的ih和iw
                                    # 逆推公式 照抄Tile的 tile_oh_end_in_tensor * stride_h - pad_h + dilation_h * (kernel_h - 1)
                                    # 以及tile_ow_end_in_tensor * stride_w - pad_w + dilation_w * (kernel_w - 1)
                                    if(slice_oh_start_in_tensor * stride_h - pad_h < 0):
                                        slice_pad_t = pad_h - slice_oh_start_in_tensor * stride_h
                                    else:
                                        slice_pad_t = 0
                                    if(slice_oh_end_in_tensor * stride_h - pad_h + dilation_h * (kernel_h - 1) +1 > in_height): # 向下突破边界
                                        slice_pad_b = slice_oh_end_in_tensor * stride_h - pad_h + dilation_h * (kernel_h - 1) - in_height+1 
                                    else:
                                        slice_pad_b = 0
                                    if(slice_ow_start_in_tensor * stride_w - pad_w < 0):
                                        slice_pad_l = pad_w - slice_ow_start_in_tensor * stride_w
                                    else:
                                        slice_pad_l = 0
                                    if(slice_ow_end_in_tensor  * stride_w - pad_w + dilation_w * (kernel_w - 1) +1 > in_width): # 向右突破边界
                                        slice_pad_r = slice_ow_end_in_tensor  * stride_w - pad_w + dilation_w * (kernel_w - 1) - in_width+1 
                                    else:
                                        slice_pad_r = 0
                                    # IC方向的第一次计算需要加偏置
                                    # 肯定是权重滑动一遍的时候才算是真正的结束，in_channel
                                    # 他相当于是把我每次权重滑动的输入特征图变成M(BHW)K(in_channel*kernel_h*kernel_w)
                                    # 然后整体的权重变成K(in_channel*kernel_h*kernel_w)N(out_channel*kernel_h*kernel_w)
                                    # in_channel最后一块TILE的最后一块Silce做完再加偏置(主要还是看的ic,即in_channel)
                                    bias_en = (tile_ic_end_in_tensor == (in_channel -1)) and (slice_ci1_start_in_tile + CI1_L0 >=  ci1_size_tile)
                                    # 思考一下什么情况需要跟上一次的结果累加
                                    # M0 = 3,N0=4,K0=C0=5(没到in_channel最后一块TILE的最后一块Silce做完前都要累加)
                                    # for slice_ci1_start_in_tile in range(0, ci1_size_tile, CI1_L0):这一循环内都要做
                                    # for tile_ic_start_in_tensor in range(0, in_channel, CI_L1):
                                    psum_en = (slice_ci1_start_in_tile < ci1_size_tile) and (tile_ic_start_in_tensor < in_channel)
                                    # 卷积计算不在KhKw维度切分，只有在卷积计算的IC方向彻底做完，对应矩阵的K维度彻底做完才能进行dequant
                                    # 和Bias坐一桌
                                    deq_en = (tile_ic_end_in_tensor == (in_channel -1)) and (slice_ci1_start_in_tile + CI1_L0 >=  ci1_size_tile)
                                    
                                    # 维度拉伸了属于是，将C1BHWC0 变成 M1K1M0K0(TIlE变Slice里去)
                                    ifm_M1K1M0K0 = C1BHWC02M1K1M0K0_L12L0(ifm_C1BHWC0_tile, batch, slice_oh_size, slice_ow_size, slice_ci1_size, slice_ih_start_in_tile, slice_iw_start_in_tile, slice_ci1_start_in_tile, kernel_h, kernel_w, stride_h, stride_w, slice_pad_t, slice_pad_b, slice_pad_l, slice_pad_r, dilation_h, dilation_w)
                                    weight_N1K1N0K0_slice = K1NK02N1K1N0K0_L12RMB(weight_k1nk0_tile, oc_size_tile, slice_oc_start_in_tile, slice_ci1_start_in_tile*kernel_h*kernel_w, slice_oc_size, slice_ci1_size*kernel_h*kernel_w)
                                    bias_N1N0 = N2N1N0_L12PMB(bias, slice_oc_start_in_tensor, slice_oc_size)
                                    deq_N1N0 = N2N1N0_L12PMB(deq, slice_oc_start_in_tensor, slice_oc_size)
                                    # psb的地址范围在这里，batch贴合的乘在OC_size上？一样的扩大？没，M1已经包含了M1
                                    # batch*ceil_div(oh_size_tile, H_L0)*ceil_div(ow_size_tile, W_L0)*ceil_div(oc_size_tile, CO_L0)
                                    # 需要确认当前坐的是哪一个M1 N1 M0 N0,维度就从oc_size_tile,oh_size_tile,ow_size_tile里取
                                    # (slice_m_start_in_tile//SLICE_M * ceil_div(n_size_tile, SLICE_N) + slice_n_start_in_tile//SLICE_N)
                                    psb_addr = ((slice_oh_start_in_tile//H_L0)*ceil_div(ow_size_tile, W_L0) + slice_ow_start_in_tile//W_L0)*ceil_div(oc_size_tile, CO_L0) + slice_oc_start_in_tile//CO_L0
                                    result_m2n2m1n1m0n0_psb[psb_addr] = matmul_m1k1m0k0_n1k1n0k0(\
                                    result_m2n2m1n1m0n0_psb[psb_addr], ifm_M1K1M0K0, weight_N1K1N0K0_slice, bias_N1N0, deq_N1N0, ifm_M1K1M0K0.shape[0], weight_N1K1N0K0_slice.shape[0], ifm_M1K1M0K0.shape[1], bias_en, psum_en, deq_en)
                                    # print("result_m2n2m1n1m0n0_psb[psb_addr] 是否为零矩阵：", np.all(result_m2n2m1n1m0n0_psb[psb_addr] == 0))
                                    # print(tile_ic_start_in_tensor,tile_ic_end_in_tensor,slice_ci1_start_in_tile,deq_en,psum_en) 用于检测哪里调试出了带你问题，debug这一块
                                    if(deq_en):
                                        ofm_BHWC = M1N1M0N02BHWC_PSB2DDR(ofm_BHWC, result_m2n2m1n1m0n0_psb[psb_addr], batch, out_height, out_width, out_channel, slice_oh_start_in_tensor, slice_ow_start_in_tensor, slice_oc_start_in_tensor, slice_oh_size, slice_ow_size, slice_oc_size)
    
    # 使用torch验证
    ifm_torch = torch.tensor(ifm_BCHW, dtype=torch.float32)
    weight_torch = torch.tensor(weight, dtype=torch.float32)
    bias_torch = torch.tensor(bias, dtype=torch.float32)
    deq_torch = torch.tensor(deq, dtype=torch.float32)
    ofm_torch = nn.functional.conv2d(ifm_torch, weight_torch, bias_torch, stride=(stride_h, stride_w), padding=(pad_h, pad_w), dilation=(dilation_h, dilation_w))
    ofm_torch = ofm_torch * deq_torch.view(1, -1, 1, 1)
    ofm_BCHW = ofm_BHWC.reshape((batch, out_height, out_width, out_channel)).transpose((0, 3, 1, 2))
    # print("ofm_BHWC 是否为零矩阵：", np.all(ofm_BHWC == 0))  # 判断是否输出的为零矩阵？
    # print("ofm_torch 是否为零矩阵：", np.all(ofm_torch.numpy().flatten() == 0))  # 判断是否输出的为零矩阵？
    test_pass = compare(ofm_BCHW.flatten(), ofm_torch.numpy().flatten())
    return test_pass

for i in range(20):
    test_convolution()