import numpy as np

M0 = 4
N0 = 4
K0 = c = 4

M_L1 = 24
N_L1 = 25
K_L1 = 17

M1_L0 = 2
N1_L0 = 2
K1_L0 = 2

M_L0 = M1_L0 * M0
N_L0 = N1_L0 * N0
K_L0 = K1_L0 * K0

def ceil_div(x, y):
    return (x + y - 1) // y

def MK2K1MK0_DDR2L1(matrix_mk, tensor_m, tensor_k, start_m, start_k, tile_m, tile_k):
    tiled_matrix_mk = np.zeros((tile_m, tile_k))
    for m in range(tile_m):
        for k in range(tile_k):
            m_ = start_m + m
            k_ = start_k + k
            if(m_ < tensor_m and k_ < tensor_k):
                tiled_matrix_mk[m][k] = matrix_mk[m_][k_]
    tiled_matrix_k1mk0 = MK2K1MK0(tiled_matrix_mk)
    return tiled_matrix_k1mk0

def KN2N1KN0_DDR2L1(matrix_kn, start_k, start_n, tile_k, tile_n):
    tiled_matrix_kn = np.zeros((tile_k, tile_n))
    for k in range(tile_k):
        for n in range(tile_n):
            k_ = start_k + k
            n_ = start_n + n
            if(k_ < matrix_kn.shape[0] and n_ < matrix_kn.shape[1]):
                tiled_matrix_kn[k][n] = matrix_kn[k_][n_]
    tiled_matrix_n1kn0 = KN2N1KN0(tiled_matrix_kn)
    return tiled_matrix_n1kn0

def KN2K1NK0_DDR2L1(matrix_kn, start_k, start_n, tile_k, tile_n):
    matrix_kn_tile = matrix_kn[start_k:start_k+tile_k, start_n:start_n+tile_n]
    matrix_k1nk0_tile = KN2K1NK0(matrix_kn_tile)
    return matrix_k1nk0_tile

def K1MK02M1K1M0K0_L12LMB(matrix_k1mk0, start_m, start_k1, tile_m, tile_k1):
    return K1MK02M1K1M0K0(matrix_k1mk0[start_k1:start_k1+tile_k1, start_m:start_m+tile_m])

def K1NK02N1K1N0K0_L12RMB(matrix_k1nk0, start_n, start_k1, tile_n, tile_k1):
    K1 = tile_k1
    N1 = ceil_div(tile_n, N0)
    matrix_n1k1n0k0 = np.zeros((N1, K1, N0, K0))
    for n1 in range(N1):
        for k1 in range(K1):
            for n0 in range(N0):
                n = n1 * N0 + n0
                if(n < tile_n):
                    matrix_n1k1n0k0[n1][k1][n0] = matrix_k1nk0[start_k1+k1][start_n+n]
    return matrix_n1k1n0k0

def M1N1M0N02MN_PSB2DDR(matrix_mn, matrix_m1n1m0n0, m_start, n_start, m_size, n_size):
    matrix_mn[m_start:m_start+m_size, n_start:n_start+n_size] = M1N1M0N02MN(matrix_m1n1m0n0, m_size, n_size)

def N2N1N0_L12MMB(vector_n, start_n, tile_n):
    tiled_vector_n = np.zeros(tile_n)
    for n in range(tile_n):
        n_ = start_n + n
        if(n_ < vector_n.shape[0]):
            tiled_vector_n[n] = vector_n[n_]
    tiled_vector_n1n0 = N2N1N0(tiled_vector_n)
    return tiled_vector_n1n0


def MK2K1MK0(matrix_mk):
    matrix_M = matrix_mk.shape[0]
    matrix_K = matrix_mk.shape[1]
    K1 = (matrix_K + K0 - 1) // K0
    matrix_mk1k0_ = np.zeros((matrix_M, K1*K0))
    matrix_mk1k0_[:, :matrix_K] = matrix_mk
    matrix_mk1k0 = matrix_mk1k0_.reshape((matrix_M, K1, K0))
    matrix_k1mk0 = matrix_mk1k0.transpose((1, 0, 2))
    return matrix_k1mk0

def KN2N1KN0(matrix_kn):
    matrix_K = matrix_kn.shape[0]
    matrix_N = matrix_kn.shape[1]
    N1 = (matrix_N + N0 - 1) // N0
    matrix_kn1n0_ = np.zeros((matrix_K, N1*N0))
    matrix_kn1n0_[:, :matrix_N] = matrix_kn
    matrix_kn1n0 = matrix_kn1n0_.reshape((matrix_K, N1, N0))
    matrix_n1kn0 = matrix_kn1n0.transpose((1, 0, 2))
    return matrix_n1kn0

def KN2K1NK0(matrix_kn):
    matrix_K = matrix_kn.shape[0]
    matrix_N = matrix_kn.shape[1]
    K1 = (matrix_K + K0 - 1) // K0
    matrix_k1nk0_ = np.zeros((K1*K0, matrix_N))
    matrix_k1nk0_[:matrix_K, :] = matrix_kn
    matrix_k1nk0 = matrix_k1nk0_.reshape((K1, K0, matrix_N))
    matrix_k1nk0 = matrix_k1nk0.transpose((0, 2, 1))
    return matrix_k1nk0

def K1MK02M1K1M0K0(matrix_k1mk0):
    K1 = matrix_k1mk0.shape[0]
    M = matrix_k1mk0.shape[1]
    K0 = matrix_k1mk0.shape[2]
    M1 = (M + M0 - 1) // M0
    matrix_m1k1m0k0 = np.zeros((M1, K1, M0, K0))
    for m1 in range(M1):
        for k1 in range(K1):
            for m0 in range(M0):
                m = m1 * M0 + m0
                if(m < M):
                    matrix_m1k1m0k0[m1][k1][m0] = matrix_k1mk0[k1][m]
    return matrix_m1k1m0k0

def N1KN02K1N1K0N0(matrix_n1kn0):
    N1 = matrix_n1kn0.shape[0]
    K = matrix_n1kn0.shape[1]
    N0 = matrix_n1kn0.shape[2]
    K1 = (K + K0 - 1) // K0
    matrix_k1n1k0n0 = np.zeros((K1, N1, K0, N0))
    for k1 in range(K1):
        for n1 in range(N1):
            for k0 in range(K0):
                k = k1 * K0 + k0
                if(k < K):
                    matrix_k1n1k0n0[k1][n1][k0] = matrix_n1kn0[n1][k]
    return matrix_k1n1k0n0

def K1NK02N1K1N0K0(matrix_k1nk0):
    K1 = matrix_k1nk0.shape[0]
    N = matrix_k1nk0.shape[1]
    K0 = matrix_k1nk0.shape[2]
    N1 = (N + N0 - 1) // N0
    matrix_n1k1n0k0 = np.zeros((N1, K1, N0, K0))
    for n1 in range(N1):
        for k1 in range(K1):
            for n0 in range(N0):
                n = n1 * N0 + n0
                if(n < N):
                    matrix_n1k1n0k0[n1][k1][n0] = matrix_k1nk0[k1][n]
    return matrix_n1k1n0k0

def M1N1M0N02MN(matrix_m1n1m0n0, M, N):
    M1 = matrix_m1n1m0n0.shape[0]
    N1 = matrix_m1n1m0n0.shape[1]
    M0 = matrix_m1n1m0n0.shape[2]
    N0 = matrix_m1n1m0n0.shape[3]
    matrix_mn = np.zeros((M, N))
    for m1 in range(M1):
        for n1 in range(N1):
            for m0 in range(M0):
                for n0 in range(N0):
                    m = m1 * M0 + m0
                    n = n1 * N0 + n0
                    if(m < M and n < N):
                        matrix_mn[m][n] = matrix_m1n1m0n0[m1][n1][m0][n0]
    return matrix_mn

def CONV_M1N1M0N02MN(ifm):
    M1 = ifm.shape[0]
    N1 = ifm.shape[1]
    M0 = ifm.shape[2]
    N0 = ifm.shape[3]
    M = M1 * M0
    N = N1 * N0
    ofm = np.zeros((M, N))
    for m1 in range(M1):
        for n1 in range(N1):
            for m0 in range(M0):
                for n0 in range(N0):
                    m = m1 * M0 + m0
                    n = n1 * N0 + n0
                    ofm[m][n] = ifm[m1][n1][m0][n0]
    return ofm

def MK2M1K1M0K0(matrix_mk):
    matrix_M = matrix_mk.shape[0]
    matrix_K = matrix_mk.shape[1]
    M1 = (matrix_M + M0 - 1) // M0
    K1 = (matrix_K + K0 - 1) // K0
    matrix_m1k1m0k0 = np.zeros((M1, K1, M0, K0))
    for m1 in range(M1):
        for k1 in range(K1):
            for m0 in range(M0):
                for k0 in range(K0):
                    m = m1 * M0 + m0
                    k = k1 * K0 + k0
                    if(m < matrix_M and k < matrix_K):
                        matrix_m1k1m0k0[m1][k1][m0][k0] = matrix_mk[m][k]
    return matrix_m1k1m0k0

def KN2K1N1K0N0(matrix_kn):
    matrix_K = matrix_kn.shape[0]
    matrix_N = matrix_kn.shape[1]
    K1 = (matrix_K + K0 - 1) // K0
    N1 = (matrix_N + N0 - 1) // N0
    matrix_k1n1k0n0 = np.zeros((K1, N1, K0, N0))
    for k1 in range(K1):
        for n1 in range(N1):
            for k0 in range(K0):
                for n0 in range(N0):
                    k = k1 * K0 + k0
                    n = n1 * N0 + n0
                    if(k < matrix_K and n < matrix_N):
                        matrix_k1n1k0n0[k1][n1][k0][n0] = matrix_kn[k][n]
    return matrix_k1n1k0n0

def N2N1N0(bias):
    N = bias.shape[0]
    N1 = (N + N0 - 1) // N0
    bias_N1N0 = np.zeros((N1, N0))
    for n1 in range(N1):
        for n0 in range(N0):
            n = n1 * N0 + n0
            if(n < N):
                bias_N1N0[n1][n0] = bias[n]
    return bias_N1N0

def N2N1N0_L12MMB(bias, start_n, tile_n):
    N1 = ceil_div(tile_n, N0)
    bias_N1N0 = np.zeros((N1, N0))
    for n1 in range(N1):
        for n0 in range(N0):
            n = n1 * N0 + n0
            if(n < tile_n):
                bias_N1N0[n1][n0] = bias[start_n+n]
    return bias_N1N0

def matmul_m1k1m0k0_k1n1k0n0(matrix_m1k1m0k0, matrix_k1n1k0n0, bias_n1n0):
    M1 = matrix_m1k1m0k0.shape[0]
    K1 = matrix_m1k1m0k0.shape[1]
    M0 = matrix_m1k1m0k0.shape[2]
    N1 = matrix_k1n1k0n0.shape[1]
    N0 = matrix_k1n1k0n0.shape[3]
    matrix_m1n1m0n0 = np.zeros((M1, N1, M0, N0))
    for m1 in range(M1):
        for n1 in range(N1):
            temp = np.zeros((M0, N0))
            for k1 in range(K1):
                temp += np.matmul(matrix_m1k1m0k0[m1][k1], matrix_k1n1k0n0[k1][n1])
            for n0 in range(N0):
                temp[:, n0] += bias_n1n0[n1][n0]
            matrix_m1n1m0n0[m1][n1] = temp
    return matrix_m1n1m0n0

def matmul_mk_kn(matrix_mk, matrix_kn, bias, deq):
    M = matrix_mk.shape[0]
    K = matrix_mk.shape[1]
    N = matrix_kn.shape[1]
    matrix_mn = np.matmul(matrix_mk, matrix_kn)
    matrix_mn = matrix_mn.astype(np.float32)
    for m in range(M):
        for n in range(N):
            matrix_mn[m][n] += bias[n]
            matrix_mn[m][n] *= deq[n]
    return matrix_mn

def convolution(ifm, weight, bias, layer):
    out_height = layer['out_height']
    out_width = layer['out_width']
    in_height = layer['in_height']
    in_width = layer['in_width']
    in_channel = layer['in_channel']
    out_channel = layer['out_channel']
    kernel_h = layer['kernel_h']
    kernel_w = layer['kernel_w']
    stride_h = layer['stride_h']
    stride_w = layer['stride_w']
    pad_h = layer['pad_h']
    pad_w = layer['pad_w']
    dilation_h = layer['dilation_h']
    dilation_w = layer['dilation_w']
    ofm = np.zeros((out_channel, out_height, out_width))
    for oh in range(out_height):
        for ow in range(out_width):
            for oc in range(out_channel):
                y_data = 0
                for kh in range(kernel_h):
                    for kw in range(kernel_w):
                        ih = oh * stride_h - pad_h + kh * dilation_h
                        iw = ow * stride_w - pad_w + kw * dilation_w
                        if (0 <= ih < in_height) and (0 <= iw < in_width):
                            for ic in range(in_channel):
                                ifm_index = ic * in_height * in_width + ih * in_width + iw
                                wgt_index = oc * kernel_h * kernel_w * in_channel + ic * kernel_h * kernel_w + kh * kernel_w + kw
                                x_data = ifm[ifm_index]
                                w_data = weight[wgt_index]
                                y_data += x_data * w_data
                ofm[oc][oh][ow] = y_data + bias[oc]
    return ofm

def CHW2CHWc(ifm, in_channel, in_height, in_width):
    C = (in_channel + c - 1) // c
    ifm_HWC = ifm.transpose((1, 2, 0))
    ifm_HWC_ = np.zeros((in_height, in_width, C*c))
    for ic in range(in_channel):
        ifm_HWC_[:, :, ic] = ifm_HWC[:, :, ic]
    ifm_HWCc = ifm_HWC_.reshape((in_height, in_width, C, c))
    ifm_CHWc = ifm_HWCc.transpose((2, 0, 1, 3))
    return ifm_CHWc

def HWC2CHWc_DDR2L1(ifm, ic_start, ih_start, iw_start, ic_size, ih_size, iw_size):
    ifm_HWC = np.zeros((ih_size, iw_size, ic_size))
    ifm_HWC = ifm[ih_start:ih_start+ih_size, iw_start:iw_start+iw_size, ic_start:ic_start+ic_size]
    C = (ic_size + c - 1) // K0
    ifm_HWC_ = np.zeros((ih_size, iw_size, C*K0))
    for ic in range(ic_size):
        ifm_HWC_[:, :, ic] = ifm_HWC[:, :, ic]
    ifm_HWCc = ifm_HWC_.reshape((ih_size, iw_size, C, K0))
    ifm_CHWc = ifm_HWCc.transpose((2, 0, 1, 3))
    return ifm_CHWc
                   
def CHWc2M1K1M0K0_L12L0(ifm, ih_size, iw_size, iC_size, ih_start, iw_start, iC_start, kernel_h, kernel_w, stride_h, stride_w, pad_t, pad_b, pad_l, pad_r, dilation_h, dilation_w):
    oh_size = (ih_size + pad_t + pad_b - dilation_h * (kernel_h - 1) - 1) // stride_h + 1
    ow_size = (iw_size + pad_l + pad_r - dilation_w * (kernel_w - 1) - 1) // stride_w + 1
    M = oh_size * ow_size
    M1 = ceil_div(M, M0)
    K1 = iC_size * kernel_h * kernel_w
    ofm = np.zeros((M1, K1, M0, K0))
    for m1 in range(M1):
        for k1 in range(K1):
            for m0 in range(M0):
                m = m1 * M0 + m0
                oh = m // ow_size
                ow = m % ow_size
                C_idx = k1 // (kernel_h * kernel_w)
                kh = (k1 // kernel_w) % kernel_h
                kw = k1 % kernel_w
                ih = oh * stride_h - pad_t + kh * dilation_h
                iw = ow * stride_w - pad_l + kw * dilation_w
                if (0 <= ih < ih_size) and (0 <= iw < iw_size):
                    ofm[m1][k1][m0] = ifm[iC_start + C_idx][ih_start + ih][iw_start + iw]
    return ofm

def OCICKhKw2OCCKhKwc_DDR2L1(weight, oc_start, ic_start, oc_size, ic_size):
    kernel_h = weight.shape[2]
    kernel_w = weight.shape[3]
    C = (ic_size + c - 1) // c
    weight_OCICKhKw = weight[oc_start:oc_start + oc_size, ic_start:ic_start + ic_size]
    weight_OCKhKwIC = weight_OCICKhKw.transpose((0, 2, 3, 1))
    weight_OCKhKwIC_ = np.zeros((oc_size, kernel_h, kernel_w, C*c))
    for ic in range(ic_size):
        weight_OCKhKwIC_[:, :, :, ic] = weight_OCKhKwIC[:, :, :, ic]
    weight_OCKhKwCc = weight_OCKhKwIC_.reshape((oc_size, kernel_h, kernel_w, C, c))
    weight_OCCKhKwc = weight_OCKhKwCc.transpose((0, 3, 1, 2, 4))
    return weight_OCCKhKwc

def OCCKhKwc2K1N1K0N0(weight):
    C = weight.shape[1]
    kernel = weight.shape[2]*weight.shape[3]
    N = weight.shape[0]
    K = C * kernel * c
    N1 = (N + N0 - 1) // N0
    K1 = K // K0
    assert( K % K0 == 0)

    weight_KN = weight.reshape((N, K)).transpose()
    weight_K1N1K0N0 = np.zeros((K1, N1, K0, N0))
    for k1 in range(K1):
        for n1 in range(N1):
            for k0 in range(K0):
                for n0 in range(N0):
                    k = k1 * K0 + k0
                    n = n1 * N0 + n0
                    if(k < K and n < N):
                        weight_K1N1K0N0[k1][n1][k0][n0] = weight_KN[k][n]
    return weight_K1N1K0N0

def OCCKhKwc2N1K1N0K0(weight, oc_size, iC_size, oc_start, iC_start):
    weight_OCCKhKwc_l1 = weight[oc_start:oc_start+oc_size, iC_start:iC_start+iC_size]
    weight_NK1K0 = weight_OCCKhKwc_l1.reshape((oc_size, -1, K0))
    weight_K1NK0 = weight_NK1K0.transpose((1, 0, 2))
    return N1KN02K1N1K0N0(weight_K1NK0)

def matmul_m1k1m0k0_k1n1k0n0(psum, ifm, weight, bias, bias_en, psum_en):
    assert(ifm.shape[1] == weight.shape[0])
    assert(ifm.shape[3] == weight.shape[2])
    K1 = ifm.shape[1]
    K0 = ifm.shape[3]
    M1 = ifm.shape[0]
    M0 = ifm.shape[2]
    N1 = weight.shape[1]
    N0 = weight.shape[3]
    #print("matmul_m1k1m0k0_k1n1k0n0.shape", psum.shape)
    for m1 in range(M1):
        for n1 in range(N1):
            temp = np.zeros((M0, N0))
            for k1 in range(K1):
                temp += np.matmul(ifm[m1][k1], weight[k1][n1])
            if(bias_en):
                for n0 in range(N0):
                    temp[:, n0] += bias[n1][n0]
            if(psum_en):
                psum[m1][n1] += temp
            else:
                psum[m1][n1] = temp
    return psum

def matmul_m1k1m0k0_n1k1n0k0(m1n1m0n0, m1k1m0k0, n1k1n0k0, bias_n1n0, deq_n1n0, M1, N1, K1, bias_en, psum_en, deq_en):
    assert(m1k1m0k0.shape[1] == n1k1n0k0.shape[1])
    assert(m1k1m0k0.shape[3] == n1k1n0k0.shape[3])
    for m1 in range(M1):
        for n1 in range(N1):
            temp = np.zeros((M0, N0))
            for k1 in range(K1):
                temp += np.matmul(m1k1m0k0[m1][k1], n1k1n0k0[n1][k1].transpose())
            if(bias_en):
                for n0 in range(N0):
                    temp[:, n0] += bias_n1n0[n1][n0]
            if(psum_en):
                m1n1m0n0[m1][n1] += temp
            else:
                m1n1m0n0[m1][n1] = temp
    if(deq_en):
        for n1 in range(N1):
            for n0 in range(N0):
                for m1 in range(M1):
                    for m0 in range(M0):
                        m1n1m0n0[m1][n1][m0][n0] = m1n1m0n0[m1][n1][m0][n0] * deq_n1n0[n1][n0]
    return m1n1m0n0 

def MN2CHW(fm, out_channel, out_height, out_width):
    ofm_CHW = np.zeros((out_channel, out_height, out_width))
    for oc in range(out_channel):
        for oh in range(out_height):
            for ow in range(out_width):
                ofm_CHW[oc][oh][ow] = fm[oh*out_width+ow][oc]
    return ofm_CHW

def CHW2MK(ifm, layer):
    out_height = layer['out_height']
    out_width = layer['out_width']
    in_height = layer['in_height']
    in_width = layer['in_width']
    in_channel = layer['in_channel']
    kernel_h = layer['kernel_h']
    kernel_w = layer['kernel_w']
    stride_h = layer['stride_h']
    stride_w = layer['stride_w']
    pad_h = layer['pad_h']
    pad_w = layer['pad_w']
    dilation_h = layer['dilation_h']
    dilation_w = layer['dilation_w']
    M = out_height * out_width
    K = in_channel * kernel_h * kernel_w

    ofm = np.zeros((M, K))
    for oh in range(out_height):
        for ow in range(out_width):
            for kh in range(kernel_h):
                for kw in range(kernel_w):
                    ih = oh * stride_h - pad_h + kh * dilation_h
                    iw = ow * stride_w - pad_w + kw * dilation_w
                    if (0 <= ih < in_height) and (0 <= iw < in_width):
                        for ic in range(in_channel):
                            m_index = oh*out_width + ow
                            k_index = ic*kernel_h*kernel_w + kh*kernel_w + kw
                            ofm[m_index][k_index] = ifm[ic][ih][iw]
    return ofm

def Conv2d(ifm, weight, bias, deq, layer):
    in_channel = layer['in_channel']
    out_channel = layer['out_channel']
    kernel_h = layer['kernel_h']
    kernel_w = layer['kernel_w']
    weight_nk = weight.reshape((out_channel, in_channel*kernel_h*kernel_w))
    ifm_mk = CHW2MK(ifm, layer)
    ofm_mn = np.matmul(ifm_mk, weight_nk.transpose())
    ofm_nm = ofm_mn.transpose()
    for i in range(out_channel):
        ofm_nm[i] += bias[i]
        ofm_nm[i] *= deq[i]
    return ofm_nm

def QConv2dAsym(ifm, weight, bias, layer, n = 8):
    in_channel = layer['in_channel']
    out_channel = layer['out_channel']
    kernel_h = layer['kernel_h']
    kernel_w = layer['kernel_w']
    weight_nk = weight.reshape((out_channel, in_channel*kernel_h*kernel_w))
    scale_ifm = (2**n - 1) / (np.max(ifm) - np.min(ifm))
    quant_ifm = np.round(scale_ifm*(ifm - np.min(ifm)))
    offset = np.ones((ifm.shape))*np.min(ifm)

    quant_weight = np.zeros(weight_nk.shape, dtype=np.float32)
    scale_weight = np.zeros(out_channel, dtype=np.float32)
    for i in range(out_channel):
        scale_weight[i] = (2**(n-1) - 1) / np.max(np.abs(weight_nk[i]))
        quant_weight[i] = np.round(scale_weight[i] * weight_nk[i])
    zero_bias = np.zeros(bias.shape, dtype=np.float32)
    quant_ofm_nm = Conv2d(quant_ifm, quant_weight, zero_bias, layer)
    quant_offset_nm = Conv2d(offset, quant_weight, zero_bias, layer)
    quant_bias = np.zeros(quant_ofm_nm.shape, dtype=np.float32)
    for i in range(out_channel):
        quant_bias[i] = quant_offset_nm[i] * scale_ifm + bias[i] * scale_ifm * scale_weight[i]
    ofm_nm = np.zeros(quant_ofm_nm.shape, dtype=np.float32)
    for i in range(out_channel):
        ofm_nm[i] = (quant_ofm_nm[i] + quant_bias[i]) / (scale_ifm * scale_weight[i])
    return ofm_nm

def QConv2dSym(ifm, weight, bias, layer, n = 8):
    in_channel = layer['in_channel']
    out_channel = layer['out_channel']
    kernel_h = layer['kernel_h']
    kernel_w = layer['kernel_w']
    weight_nk = weight.reshape((out_channel, in_channel*kernel_h*kernel_w))
    scale_ifm = (2**(n-1) - 1) / np.max(np.abs(ifm))
    quant_ifm = np.round(scale_ifm*ifm)

    quant_weight = np.zeros(weight_nk.shape, dtype=np.float32)
    scale_weight = np.zeros(out_channel, dtype=np.float32)
    for i in range(out_channel):
        scale_weight[i] = (2**(n-1) - 1) / np.max(np.abs(weight_nk[i]))
        quant_weight[i] = np.round(scale_weight[i] * weight_nk[i])
    zero_bias = np.zeros(bias.shape, dtype=np.float32)
    quant_ofm_nm = Conv2d(quant_ifm, quant_weight, zero_bias, layer)
    quant_bias = np.zeros(quant_ofm_nm.shape, dtype=np.float32)
    for i in range(out_channel):
        quant_bias[i] = bias[i] * scale_ifm * scale_weight[i]
    ofm_nm = np.zeros(quant_ofm_nm.shape, dtype=np.float32)
    for i in range(out_channel):
        ofm_nm[i] = (quant_ofm_nm[i] + quant_bias[i]) / (scale_ifm * scale_weight[i])
    return ofm_nm

def QuantSingle(ifm, weight, bias, layer, n = 8):
    in_channel = layer['in_channel']
    out_channel = layer['out_channel']
    kernel_h = layer['kernel_h']
    kernel_w = layer['kernel_w']
    weight_nk = weight.reshape((out_channel, in_channel*kernel_h*kernel_w))
    scale_ifm = (2**(n-1) - 1) / np.max(np.abs(ifm))
    scale_weight = np.zeros(out_channel, dtype=np.float32)
    scale = np.zeros(out_channel, dtype=np.float32)
    quant_bias = np.zeros(out_channel, dtype=np.float32)
    for i in range(out_channel):
        scale_weight[i] = (2**(n-1) - 1) / np.max(np.abs(weight_nk[i]))
        scale[i] = scale_ifm[i] * scale_weight[i]
        quant_bias[i] = bias[i] * scale[i]
    return scale, quant_bias

