#include <immintrin.h>
#include <stdint.h>
#include <stdio.h>

// 6x64 int8o32 kernel
void lpgemm(const size_t m0,
            const size_t n0,
            const size_t k0,
            const int8_t* a,
            const size_t rs_a,
            const size_t cs_a,
            const size_t ps_a,
            const int8_t* b,
            const size_t rs_b,
            const size_t cs_b,
            int32_t* c,
            const size_t rs_c,
            const size_t cs_c)
{

	size_t MR = 6;
	size_t NR = 64;

	size_t m_full_pieces = m0 / MR;
	size_t m_full_pieces_loop_limit = m_full_pieces * MR;
	size_t m_partial_pieces = m0 % MR;

	size_t k_full_pieces = k0 / 4;
	size_t k_partial_pieces = k0 % 4;

	if ( n0 < NR )
	{
		size_t n0_rem = n0 % 16;

		// Split into multiple smaller fringe kernels, so as to maximize
		// vectorization. Any n0 < NR(64) can be expressed as n0 = 48 + n`
		// or n0 = 32 + n` or n0 = 16 + n`, where n` < 16.
		size_t n0_48 = n0 / 48;
		size_t n0_32 = n0 / 32;
		size_t n0_16 = n0 / 16;

		// KC when not multiple of 4 will have padding to make it multiple of
		// 4 in packed buffer. Also the k0 cannot be passed as the updated
		// value since A matrix is not packed and requires original k0.
		size_t k0_updated = k0;
		if ( k_partial_pieces > 0 )
		{
			printf("dont support, please set the K dimension to be multiple of 4\n");
      return;
		}
		if ( n0_48 == 1 | n0_32 ==1 | n0_16==1 | n0_rem > 0 )
    {
      printf("dont support, please set the N dimension to be multiple of 64\n");
      return;
    }
	}

	// B matrix storage.
	__m512i b0 = _mm512_setzero_epi32();
	__m512i b1 = _mm512_setzero_epi32();
	__m512i b2 = _mm512_setzero_epi32();
	__m512i b3 = _mm512_setzero_epi32();

	// A matrix storage.
	__m512i a_int32_0 = _mm512_setzero_epi32();
	__m512i a_int32_1 = _mm512_setzero_epi32();

	uint8_t cvt_uint8 = 128;
	__m512i vec_uint8 = _mm512_set1_epi8 (cvt_uint8);

	for ( size_t ir = 0; ir < m_full_pieces_loop_limit; ir += MR )
	{
		__m512 acc_00, acc_01, acc_02, acc_03;
		__m512 acc_10, acc_11, acc_12, acc_13;
		__m512 acc_20, acc_21, acc_22, acc_23;
		__m512 acc_30, acc_31, acc_32, acc_33;
		__m512 acc_40, acc_41, acc_42, acc_43;
		__m512 acc_50, acc_51, acc_52, acc_53;

		// Registers to use for accumulating C.
		__m512i c_int32_0p0 = _mm512_setzero_epi32();
		__m512i c_int32_0p1 = _mm512_setzero_epi32();
		__m512i c_int32_0p2 = _mm512_setzero_epi32();
		__m512i c_int32_0p3 = _mm512_setzero_epi32();

		__m512i c_int32_1p0 = _mm512_setzero_epi32();
		__m512i c_int32_1p1 = _mm512_setzero_epi32();
		__m512i c_int32_1p2 = _mm512_setzero_epi32();
		__m512i c_int32_1p3 = _mm512_setzero_epi32();

		__m512i c_int32_2p0 = _mm512_setzero_epi32();
		__m512i c_int32_2p1 = _mm512_setzero_epi32();
		__m512i c_int32_2p2 = _mm512_setzero_epi32();
		__m512i c_int32_2p3 = _mm512_setzero_epi32();

		__m512i c_int32_3p0 = _mm512_setzero_epi32();
		__m512i c_int32_3p1 = _mm512_setzero_epi32();
		__m512i c_int32_3p2 = _mm512_setzero_epi32();
		__m512i c_int32_3p3 = _mm512_setzero_epi32();

		__m512i c_int32_4p0 = _mm512_setzero_epi32();
		__m512i c_int32_4p1 = _mm512_setzero_epi32();
		__m512i c_int32_4p2 = _mm512_setzero_epi32();
		__m512i c_int32_4p3 = _mm512_setzero_epi32();

		__m512i c_int32_5p0 = _mm512_setzero_epi32();
		__m512i c_int32_5p1 = _mm512_setzero_epi32();
		__m512i c_int32_5p2 = _mm512_setzero_epi32();
		__m512i c_int32_5p3 = _mm512_setzero_epi32();

		for ( size_t kr = 0; kr < k_full_pieces; kr += 1 )
		{
			// The instructions are arranged in a mixed way to reduce data
			// chain dependencies.

			// Load 4 rows with 64 elements each from B to 4 ZMM registers. It
			// is to be noted that the B matrix is packed for use in vnni
			// instructions and each load to ZMM register will have 4 elements
			// along k direction and 16 elements across n directions, so 4x16
			// elements to a ZMM register.
			b0 = _mm512_loadu_si512( b + ( rs_b * kr ) + ( cs_b * 0 ) );

			// Broadcast a[0,kr:kr+4].
			a_int32_0 = _mm512_set1_epi32( *( int32_t* )( a + ( rs_a * 0 ) + ( cs_a * kr ) ) );

			//convert signed int8 to uint8 for VNNI
			// a_int32_0 = _mm512_add_epi8( a_int32_0, vec_uint8 );

			b1 = _mm512_loadu_si512( b + ( rs_b * kr ) + ( cs_b * 1 ) );
			b2 = _mm512_loadu_si512( b + ( rs_b * kr ) + ( cs_b * 2 ) );
			b3 = _mm512_loadu_si512( b + ( rs_b * kr ) + ( cs_b * 3 ) );

			// Perform column direction mat-mul with k = 4.
			// c[0,0-63] = a[0,kr:kr+4]*b[kr:kr+4,0-63]
			c_int32_0p0 = _mm512_dpbusd_epi32( c_int32_0p0, a_int32_0, b0 );

			// Broadcast a[1,kr:kr+4].
			a_int32_1 = _mm512_set1_epi32( *( int32_t* )( a + ( rs_a * 1 ) + ( cs_a * kr ) ) );

			//convert signed int8 to uint8 for VNNI
			// a_int32_1 = _mm512_add_epi8 (a_int32_1, vec_uint8);

			c_int32_0p1 = _mm512_dpbusd_epi32( c_int32_0p1, a_int32_0, b1 );
			c_int32_0p2 = _mm512_dpbusd_epi32( c_int32_0p2, a_int32_0, b2 );
			c_int32_0p3 = _mm512_dpbusd_epi32( c_int32_0p3, a_int32_0, b3 );

			// Perform column direction mat-mul with k = 4.
			// c[1,0-63] = a[1,kr:kr+4]*b[kr:kr+4,0-63]
			c_int32_1p0 = _mm512_dpbusd_epi32( c_int32_1p0, a_int32_1, b0 );

			// Broadcast a[2,kr:kr+4].
			a_int32_0 = _mm512_set1_epi32( *( int32_t* )( a + ( rs_a * 2 ) + ( cs_a * kr ) ) );

			//convert signed int8 to uint8 for VNNI
			// a_int32_0 = _mm512_add_epi8( a_int32_0, vec_uint8 );

			c_int32_1p1 = _mm512_dpbusd_epi32( c_int32_1p1, a_int32_1, b1 );
			c_int32_1p2 = _mm512_dpbusd_epi32( c_int32_1p2, a_int32_1, b2 );
			c_int32_1p3 = _mm512_dpbusd_epi32( c_int32_1p3, a_int32_1, b3 );

			// Perform column direction mat-mul with k = 4.
			// c[2,0-63] = a[2,kr:kr+4]*b[kr:kr+4,0-63]
			c_int32_2p0 = _mm512_dpbusd_epi32( c_int32_2p0, a_int32_0, b0 );

			// Broadcast a[3,kr:kr+4].
			a_int32_1 = _mm512_set1_epi32( *( int32_t* )( a + ( rs_a * 3 ) + ( cs_a * kr ) ) );

			//convert signed int8 to uint8 for VNNI
			// a_int32_1 = _mm512_add_epi8 (a_int32_1, vec_uint8);

			c_int32_2p1 = _mm512_dpbusd_epi32( c_int32_2p1, a_int32_0, b1 );
			c_int32_2p2 = _mm512_dpbusd_epi32( c_int32_2p2, a_int32_0, b2 );
			c_int32_2p3 = _mm512_dpbusd_epi32( c_int32_2p3, a_int32_0, b3 );

			// Perform column direction mat-mul with k = 4.
			// c[3,0-63] = a[3,kr:kr+4]*b[kr:kr+4,0-63]
			c_int32_3p0 = _mm512_dpbusd_epi32( c_int32_3p0, a_int32_1, b0 );

			// Broadcast a[4,kr:kr+4].
			a_int32_0 = _mm512_set1_epi32( *( int32_t* )( a + ( rs_a * 4 ) + ( cs_a * kr ) ) );

			//convert signed int8 to uint8 for VNNI
			// a_int32_0 = _mm512_add_epi8( a_int32_0, vec_uint8 );

			c_int32_3p1 = _mm512_dpbusd_epi32( c_int32_3p1, a_int32_1, b1 );
			c_int32_3p2 = _mm512_dpbusd_epi32( c_int32_3p2, a_int32_1, b2 );
			c_int32_3p3 = _mm512_dpbusd_epi32( c_int32_3p3, a_int32_1, b3 );

			// Perform column direction mat-mul with k = 4.
			// c[4,0-63] = a[4,kr:kr+4]*b[kr:kr+4,0-63]
			c_int32_4p0 = _mm512_dpbusd_epi32( c_int32_4p0, a_int32_0, b0 );

			// Broadcast a[5,kr:kr+4].
			a_int32_1 = _mm512_set1_epi32( *( int32_t* )( a + ( rs_a * 5 ) + ( cs_a * kr ) ) );

			//convert signed int8 to uint8 for VNNI
			// a_int32_1 = _mm512_add_epi8 (a_int32_1, vec_uint8);

			c_int32_4p1 = _mm512_dpbusd_epi32( c_int32_4p1, a_int32_0, b1 );
			c_int32_4p2 = _mm512_dpbusd_epi32( c_int32_4p2, a_int32_0, b2 );
			c_int32_4p3 = _mm512_dpbusd_epi32( c_int32_4p3, a_int32_0, b3 );

			// Perform column direction mat-mul with k = 4.
			// c[5,0-63] = a[5,kr:kr+4]*b[kr:kr+4,0-63]
			c_int32_5p0 = _mm512_dpbusd_epi32( c_int32_5p0, a_int32_1, b0 );
			c_int32_5p1 = _mm512_dpbusd_epi32( c_int32_5p1, a_int32_1, b1 );
			c_int32_5p2 = _mm512_dpbusd_epi32( c_int32_5p2, a_int32_1, b2 );
			c_int32_5p3 = _mm512_dpbusd_epi32( c_int32_5p3, a_int32_1, b3 );
		}

    //Subtract B matrix sum column values to compensate 
    //for addition of 128 to A matrix elements
    /*
    int32_t* bsumptr = post_ops_attr.b_col_sum_vec;

    b0 = _mm512_loadu_si512( bsumptr );
    c_int32_0p0 = _mm512_sub_epi32( c_int32_0p0 , b0 );
    c_int32_1p0 = _mm512_sub_epi32( c_int32_1p0 , b0 );
    c_int32_2p0 = _mm512_sub_epi32( c_int32_2p0 , b0 );
    c_int32_3p0 = _mm512_sub_epi32( c_int32_3p0 , b0 );
    c_int32_4p0 = _mm512_sub_epi32( c_int32_4p0 , b0 );
    c_int32_5p0 = _mm512_sub_epi32( c_int32_5p0 , b0 );

    b0 = _mm512_loadu_si512( bsumptr + 16 );
    c_int32_0p1 = _mm512_sub_epi32( c_int32_0p1 , b0 );
    c_int32_1p1 = _mm512_sub_epi32( c_int32_1p1 , b0 );
    c_int32_2p1 = _mm512_sub_epi32( c_int32_2p1 , b0 );
    c_int32_3p1 = _mm512_sub_epi32( c_int32_3p1 , b0 );
    c_int32_4p1 = _mm512_sub_epi32( c_int32_4p1 , b0 );
    c_int32_5p1 = _mm512_sub_epi32( c_int32_5p1 , b0 );

    b0 = _mm512_loadu_si512( bsumptr + 32 );
    c_int32_0p2 = _mm512_sub_epi32( c_int32_0p2 , b0 );
    c_int32_1p2 = _mm512_sub_epi32( c_int32_1p2 , b0 );
    c_int32_2p2 = _mm512_sub_epi32( c_int32_2p2 , b0 );
    c_int32_3p2 = _mm512_sub_epi32( c_int32_3p2 , b0 );
    c_int32_4p2 = _mm512_sub_epi32( c_int32_4p2 , b0 );
    c_int32_5p2 = _mm512_sub_epi32( c_int32_5p2 , b0 );

    b0 = _mm512_loadu_si512( bsumptr + 48 );
    c_int32_0p3 = _mm512_sub_epi32( c_int32_0p3 , b0 );
    c_int32_1p3 = _mm512_sub_epi32( c_int32_1p3 , b0 );
    c_int32_2p3 = _mm512_sub_epi32( c_int32_2p3 , b0 );
    c_int32_3p3 = _mm512_sub_epi32( c_int32_3p3 , b0 );
    c_int32_4p3 = _mm512_sub_epi32( c_int32_4p3 , b0 );
    c_int32_5p3 = _mm512_sub_epi32( c_int32_5p3 , b0 );
		*/

    // Store the results directly from c_int32_x registers.
    // c[0,0-15]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 0 ) ) + ( 0*16 ), c_int32_0p0 );
    // c[0,16-31]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 0 ) ) + ( 1*16 ), c_int32_0p1 );
    // c[0,32-47]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 0 ) ) + ( 2*16 ), c_int32_0p2 );
    // c[0,48-63]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 0 ) ) + ( 3*16 ), c_int32_0p3 );
    // c[1,0-15]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 1 ) ) + ( 0*16 ), c_int32_1p0 );
    // c[1,16-31]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 1 ) ) + ( 1*16 ), c_int32_1p1 );
    // c[1,32-47]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 1 ) ) + ( 2*16 ), c_int32_1p2 );
    // c[1,48-63]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 1 ) ) + ( 3*16 ), c_int32_1p3 );
    // c[2,0-15]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 2 ) ) + ( 0*16 ), c_int32_2p0 );
    // c[2,16-31]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 2 ) ) + ( 1*16 ), c_int32_2p1 );
    // c[2,32-47]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 2 ) ) + ( 2*16 ), c_int32_2p2 );
    // c[2,48-63]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 2 ) ) + ( 3*16 ), c_int32_2p3 );
    // c[3,0-15]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 3 ) ) + ( 0*16 ), c_int32_3p0 );
    // c[3,16-31]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 3 ) ) + ( 1*16 ), c_int32_3p1 );
    // c[3,32-47]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 3 ) ) + ( 2*16 ), c_int32_3p2 );
    // c[3,48-63]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 3 ) ) + ( 3*16 ), c_int32_3p3 );
    // c[4,0-15]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 4 ) ) + ( 0*16 ), c_int32_4p0 );
    // c[4,16-31]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 4 ) ) + ( 1*16 ), c_int32_4p1 );
    // c[4,32-47]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 4 ) ) + ( 2*16 ), c_int32_4p2 );
    // c[4,48-63]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 4 ) ) + ( 3*16 ), c_int32_4p3 );
    // c[5,0-15]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 5 ) ) + ( 0*16 ), c_int32_5p0 );
    // c[5,16-31]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 5 ) ) + ( 1*16 ), c_int32_5p1 );
    // c[5,32-47]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 5 ) ) + ( 2*16 ), c_int32_5p2 );
    // c[5,48-63]
    _mm512_storeu_si512( c + ( rs_c * ( ir + 5 ) ) + ( 3*16 ), c_int32_5p3 );

		a = a + ( MR * ps_a );
	}

	if ( m_partial_pieces > 0 )
	{
		if ( m_partial_pieces != 0 )
		{
			// In cases where A matrix is packed cs_a is set to 24, since the
			// next column in a given row is accessed after 4*6 elements, where
			// 6 is MR and 4 elements are broadcasted each time from A (vnni).
			// In fringe case, where m < MR, the next column will be after m'*4
			// elements, and subsequently following adjustment of cs_a is
			// required before calling m fringe kernels.
			size_t cs_a_use = ( cs_a == 4 ) ? 4 : ( ( cs_a / 6 ) * 5 );
      printf("dont support, please set the K dimension to be multiple of 4\n");
		}
	}
}

int vnni_avx512_zmm(int niters, int8_t *A, int8_t *B, int32_t *C, size_t M, size_t N, size_t K) {
    for(int i = 0; i < niters; ++i) {
        lpgemm(M, N, K,
          A, K, 4, K,
          B, N * 4, 64,
          C, N, 1);
    }

    return 0;
}

#include <stdlib.h>
#include <stdint.h>
#include <immintrin.h>
#include <time.h>
#include <math.h>
#include <string.h>
#include <assert.h>

void reference_gemm(int M, int N, int K, const int8_t* A, const int8_t* B, int32_t* C)
{
    for (int i = 0; i < M; i++) {
        for (int j = 0; j < N; j++) {
            int32_t sum = 0;
            for (int k = 0; k < K; k++) {
                sum += (int32_t)A[i*K + k] * (int32_t)B[k*N + j];
            }
            C[i*N + j] = sum;
        }
    }
}

void pack_B_vnni_k4n16(const int8_t* B, int8_t* Bp, size_t K, size_t N)
{
    assert(K % 4 == 0 && N % 16 == 0);

    for (size_t k = 0; k < K; k += 4)
    {
        for (size_t n = 0; n < N; n += 16)
        {
            for (size_t nn = 0; nn < 16; ++nn)
            {
                for (size_t kk = 0; kk < 4; ++kk)
                {
                    size_t src_idx = (k + kk) * N + (n + nn);           // B[k+kk][n+nn]
                    size_t dst_idx = k * N + n * 4 + nn * 4 + kk;       // pack to [k][n][kk]
                    Bp[dst_idx] = B[src_idx];
                }
            }
        }
    }
}

void post_process_int8_to_uint8_correction(
    int32_t* C,         // M x N 矩阵，存储int32累加结果
    const int32_t* B_col_sum, // 长度为N的数组，表示B每列元素和
    int M,              // 矩阵行数
    int N               // 矩阵列数
) {
    for (int i = 0; i < M; i++) {
        for (int j = 0; j < N; j++) {
            // 每个元素减去128 * B_col_sum[j]
            C[i * N + j] -= 128 * B_col_sum[j];
        }
    }
}

void print_matrix_i8(const char* filename, const int8_t* mat, size_t rows, size_t cols, size_t ld)
{
    FILE* f = fopen(filename, "w");
    if (!f) {
        perror("Failed to open file");
        return;
    }

    for (size_t i = 0; i < rows; ++i)
    {
        for (size_t j = 0; j < cols; ++j)
        {
            fprintf(f, "%4d ", mat[i * ld + j]);
        }
        fprintf(f, "\n");
    }

    fclose(f);
}

// 打印 int32_t 矩阵到文件
void print_matrix_i32(const char* filename, const int32_t* mat, size_t rows, size_t cols, size_t ld)
{
    FILE* f = fopen(filename, "w");
    if (!f) {
        perror("Failed to open file");
        return;
    }

    for (size_t i = 0; i < rows; ++i)
    {
        for (size_t j = 0; j < cols; ++j)
        {
            fprintf(f, "%8d ", mat[i * ld + j]);
        }
        fprintf(f, "\n");
    }

    fclose(f);
}

size_t roundup64(size_t n) {
    return ((n + 63) / 64) * 64;
}

// int main(int argc, char** argv)
// {
//     if (argc != 4) {
//       printf("Usage: %s <M> <N> <K>\n", argv[0]);
//       return -1;
//     }

//     const int M = atoi(argv[1]);   // Must be multiple of 6
//     const int N = atoi(argv[2]);   // Must be multiple of 64
//     const int K = atoi(argv[3]);   // Must be multiple of 4

//     if (M % 6 != 0 || N % 64 != 0 || K % 4 != 0) {
//       printf("Error: M must be multiple of 6, N must be multiple of 64, and K must be multiple of 4.\n");
//       return -1;
//     }

//     size_t A_size = roundup64(M * K);
//     size_t B_size = roundup64(K * N);
//     size_t C_size = roundup64(M * N * sizeof(int32_t));

//     // Allocate and zero
//     int8_t* A = (int8_t*) aligned_alloc(64, A_size);
//     int8_t* B = (int8_t*) aligned_alloc(64, B_size);
//     int8_t* B_pack = (int8_t*) aligned_alloc(64, B_size);
//     int32_t* B_col_sum = (int32_t*) aligned_alloc(64, roundup64(N * sizeof(int32_t)));
//     int32_t* C = (int32_t*) aligned_alloc(64, C_size);
//     int32_t* C_ref = (int32_t*) aligned_alloc(64, C_size);

//     memset(C, 0, M * N * sizeof(int32_t));
//     memset(C_ref, 0, M * N * sizeof(int32_t));

//     // Init A/B
//     // lpgemm kernel 期待A(u8)B(s8)，若A为s8则需要在kernel内外做处理。目前代码实现为A(s8)但初始化为正数，结果正确。
//     for (int i = 0; i < M * K; i++) A[i] = (int8_t)(rand() % 10);
//     for (int i = 0; i < K * N; i++) B[i] = (int8_t)(rand() % 10);

//     // Pack B
//     print_matrix_i8("A.txt", A, M, K, K);
//     print_matrix_i8("B.txt", B, K, N, N);
//     pack_B_vnni_k4n16(B, B_pack, K, N);
//     print_matrix_i8("B_pack.txt", B_pack, K, N, 64); // 打包后每列间隔64字节

//     // ---------------------------正确性验证-----------------------------
//     lpgemm(M, N, K,
//     A, K, 4, K,
//     B_pack, N * 4, 64,  // packed B layout
//     C, N, 1);
//     // 结果后处理（若启用加128补偿）
//     // post_process_int8_to_uint8_correction(C, B_col_sum, M, N);
//     print_matrix_i32("C.txt", C, M, N, N);

//     // 验证参考实现
//     reference_gemm(M, N, K, A, B, C_ref);
//     print_matrix_i32("C_ref.txt", C_ref, M, N, N);
//     int error_count = 0;
//     for (int i = 0; i < M * N; i++) {
//         if (C[i] != C_ref[i]) {
//             if (error_count < 10)
//                 printf("Mismatch at %d: got %d, expected %d\n", i, C[i], C_ref[i]);
//             error_count++;
//         }
//     }
//     if (error_count == 0)
//         printf("✅ Results match reference\n");
//     else
//         printf("❌ %d mismatches found\n", error_count);

//     // ---------------------------测试-----------------------------
//     // Warmup
//     for (int i = 0; i < 100000000; i++) {
//       lpgemm(M, N, K,
//           A, K, 4, K,
//           B_pack, N * 4, 64,  // packed B layout
//           C, N, 1);
//     }

//     // 时间测量
//     struct timespec t1, t2;
//     int iter_num = 100;
//     clock_gettime(CLOCK_MONOTONIC, &t1);

//     for(int i=0; i<iter_num; i++){
//       lpgemm(M, N, K,
//             A, K, 4, K,
//             B_pack, N * 4, 64,  // packed B layout
//             C, N, 1);
//     }
//     clock_gettime(CLOCK_MONOTONIC, &t2);
//     double time_ms = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_nsec - t1.tv_nsec) / 1e6;
//     printf("Kernel execution time: %.6f ms\n", time_ms/iter_num);
//     // Calculate equivalent FLOPS
//     double flops = 2.0 * M * N * K; // 2 operations (multiply and add) per element
//     double gflops = (flops / ((time_ms/iter_num)/ 1000.0)) / 1e9; // Convert to GFLOPS
//     printf("Equivalent GFLOPS: %.3f\n", gflops);
//     const double peak_gflops = 2.0 * 64.0 * 2.0 * 3.1; // 64 ops per cycle, 2 FLOPs per op, 2.5 GHz clock
//     printf("Peak theoretical GFLOPS: %.3f\n", peak_gflops);
//     double utilization = (gflops / peak_gflops) * 100.0;
//     printf("VNNI compute unit utilization: %.2f%%\n", utilization);

//     free(A); free(B); free(B_pack); free(B_col_sum); free(C); free(C_ref);
//     return 0;
// }