#include <stdio.h>
#include <immintrin.h>
#include <string.h>
#include <time.h>
#include <stdint.h>
#include "argtable3.h"

#include <unistd.h>
#include <sys/syscall.h>
#include <omp.h>

#define __ARGTABLE
struct arg_lit  *help;
struct arg_lit  *version;
struct arg_int  *smt;
struct arg_lit  *dbg;
struct arg_end  *argend;

#define DisplayIntegerMatrix(matrix, size, title, flag) {\
    if (title) \
    { \
        printf("\n\033[0;31m%s\033[0m", title); \
    } \
    else \
    { \
        printf("\033[0;31m%s\033[0m", #matrix": "); \
    } \
    for (int i = 0; i < size; i++) \
    { \
        printf("%10d", (matrix)[i]); \
        if (flag && ((i + 1) % flag == 0)) \
        { \
            putchar(10); \
        } \
    } \
    putchar(10); \
}

#define XFEATURE_XTILECFG	17
#define XFEATURE_XTILEDATA	18
#define XFEATURE_MASK_XTILECFG	(1 << XFEATURE_XTILECFG)
#define XFEATURE_MASK_XTILEDATA	(1 << XFEATURE_XTILEDATA)
#define XFEATURE_MASK_XTILE	(XFEATURE_MASK_XTILECFG | XFEATURE_MASK_XTILEDATA)

#define ARCH_GET_XCOMP_PERM	0x1022
#define ARCH_REQ_XCOMP_PERM	0x1023

void req_xtiledata_permission(void)
{
	syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA);
}

struct TileConfig
{
	uint8_t  palette_id;
	uint8_t  start_row;
	uint8_t  reserved_0[14];
	uint16_t colsb[8];
	uint16_t reserved_1[8];
	uint8_t  rows[8];
	uint8_t  reserved_2[8];
};

#define AMX_ALIGN(x)        __attribute__((aligned(x)))
#define AMX_ALIGN_STRUCT(x) struct __attribute__((aligned(x)))
#define AMX_ALIGN_CLASS(x)  class  __attribute__((aligned(x)))

#define TMM0    0
#define TMM1    1
#define TMM2    2
#define TMM3    3
#define TMM4    4
#define TMM5    5
#define TMM6    6
#define TMM7    7

// AMX tile configuration and matrix multiplication intrinsics
#define TILE_M 16
#define TILE_K 64
#define TILE_N 16
#define PALETTE_ID 1
#define M_ACC 2
#define K_ACC 2
#define N_ACC 2
#define KPACK 4

#define Verify  (0)

#if Verify==1
#define MTX_M   (1024)
#define MTX_K   (5120)
#define MTX_N   (1280) 
#define NUM_ITERATIONS 1
#define M_CACHE (TILE_M*8)
#define K_CACHE (TILE_K*8)
#define N_CACHE (TILE_N*8)
#else
#define MTX_XXX (4)
#define MTX_M   (1024*MTX_XXX)
#define MTX_K   (5120*MTX_XXX)
#define MTX_N   (1280*MTX_XXX*2)
#define NUM_ITERATIONS 20
#define M_CACHE (TILE_M*32)
#define K_CACHE (TILE_K*32)
#define N_CACHE (TILE_N*32)
#endif

// #define COLSB_0 (TILE_K)   //colsb[A]
// #define COLSB_1 (TILE_N*4) //colsb[B]
// #define COLSB_2 (TILE_N*sizeof(uint32_t)) //colsb[C]
// #define ROWS_0  (TILE_M)   //row[A]
// #define ROWS_1  (TILE_K/4) //row[B]
// #define ROWS_2  (TILE_M)   //row[C]

// Function to configure AMX tiles
void configure_tiles() {

    struct TileConfig tcfg_ld AMX_ALIGN(64) = 
    {
         1,0,{0},
         {64,64,64,64,64,64,64,64},
         {0},
         {16,16,16,16,16,16,16,16},
         {0}
    };

    // Load tile configuration
    _tile_loadconfig(&tcfg_ld);
}

#if 0
// Function to initialize matrices with simple values
void init_matrices(int8_t *A, int8_t *B, int32_t *C) 
{
    for (int mb = 0; mb < MTX_M; mb += M_CACHE) {
        for (int nb = 0; nb < MTX_K; nb += K_CACHE) {
            for (int m = mb; m < mb + M_CACHE; m += M_ACC*TILE_M) {
                for (int n = nb; n < nb + K_CACHE; n += K_ACC*TILE_K) {
                    for (int j = n; j < n + K_ACC*TILE_K; j++) {
                        for(int i = m; i < m + M_ACC*TILE_M; i++) {
                            A[i*MTX_K+j] = 1;
                            // A[i] = (int8_t)(i % 127 + 1);
                        }
                    }
                }
            }
        }
    }
        
    for (int mb = 0; mb < MTX_K; mb += K_CACHE) {
        for (int nb = 0; nb < MTX_N; nb += N_CACHE) {
            for (int m = mb; m < mb + K_CACHE; m += K_ACC*TILE_K) {
                for (int n = nb; n < nb + N_CACHE; n += N_ACC*TILE_N) {
                    for(int i = m; i < m + K_ACC*TILE_K; i++) {
                        for (int j = n; j < n + N_ACC*TILE_N; j++) {
                            B[i*MTX_N+j] = 1;
                            // B[i] = (int8_t)(i % 127 + 1);
                        }
                    }
                }
            }
        }
    }

    for (int mb = 0; mb < MTX_M; mb += M_CACHE) {
        for (int nb = 0; nb < MTX_N; nb += N_CACHE) {
            for (int m = mb; m < mb + M_CACHE; m += M_ACC*TILE_M) {
                for (int n = nb; n < nb + N_CACHE; n += N_ACC*TILE_N) {
                    for(int i = m; i < m + M_ACC*TILE_M; i++) {
                        for (int j = n; j < n + N_ACC*TILE_N; j++) {
                            C[i*MTX_N+j] = 1;
                            // B[i] = (int8_t)(i % 127 + 1);
                        }
                    }
                }
            }
        }
    }
}
#else
void init_matrices(int8_t *A, int8_t *B, int32_t *C) 
{
    for (int i = 0; i < MTX_M*MTX_K; i++) 
            // A[i] = 0;
        A[i] = (int8_t)(i % 127 + 1);

    for (int i = 0; i < MTX_K*MTX_N; i++) 
            // B[i] = 0;
        B[i] = (int8_t)(i % 127 + 1);

    for (int i = 0; i < MTX_M*MTX_N; i++) 
            C[i] = 0;
}
#endif

void matmul_dpbuud_cpu(uint32_t *c_buf, uint8_t *a_buf, uint8_t *b_buf, 
    uint32_t M, uint32_t K, uint32_t N)
{
    int m, n, k;

    for (m = 0; m < M; m++)
    {
        for (n = 0; n < N; n++)
        {
            c_buf[m * N + n] = 0;
            for (k = 0; k < K; k++)
            {
                c_buf[m * N + n] +=
                ((uint32_t) a_buf[m * K + k]) *
                ((uint32_t) b_buf[k * N + n]);
            }
        }
    }
}

uint32_t matrix_is_equal_i32(uint32_t *a_buf, uint32_t *b_buf, uint32_t dim)
{
    // 使用 memcmp 比较整个数组
    if (memcmp(a_buf, b_buf, dim * sizeof(uint32_t)) != 0) {
        // 如果不相等，找到第一个不相等的元素并打印
        for (uint32_t i = 0; i < dim; ++i) {
            if (a_buf[i] != b_buf[i]) {
                printf("elem[%u] found bad! a=0x%x, b=0x%x\n", i, a_buf[i], b_buf[i]);
                return 0;
            }
        }
    }
    return 1;
}

static void amx_b_layout_transform(uint8_t *src, uint8_t *dst, uint32_t rows_src, uint32_t cols_src)
{
    for (int k = 0; k < rows_src/4; k++)
    {
        for (int i = 0; i < cols_src; i ++)
        {
            for (int j = 0; j < 4; j++)
            {
                dst[k*cols_src*4 + i*4 + j] = src[k*cols_src*4 + j*cols_src + i];
            }
        }
    }
}

void amx_bench_1(int8_t *A, int8_t *B, int32_t *C)
{
    for(int mi=0; mi<(MTX_M/TILE_M); ++mi)
    {
        for(int ni=0; ni<(MTX_N/TILE_N); ++ni)
        {
            // _tile_loadd(2, C, TILE_N * 4); // Load C
            _tile_zero(TMM2);
            for(int ki=0; ki<(MTX_K/TILE_K); ++ki)
            {
                _tile_loadd(TMM0, A+mi*TILE_M*MTX_K+ki*TILE_K, MTX_K); // Load A
                _tile_loadd(TMM1, B+ki*(TILE_K/4)*(MTX_N*4)+ni*TILE_N*4, MTX_N*4); // Load B
                _tile_dpbuud(TMM2, TMM0, TMM1); // C += A * B
            }
            _tile_stored(TMM2, (int8_t *)C+mi*TILE_M*MTX_N*sizeof(int32_t)+ni*TILE_N*sizeof(int32_t), MTX_N*sizeof(int32_t)); // Store C
        }
    }
}

void amx_bench_2(int8_t *A, int8_t *B, int32_t *C)
{
    for(int mi=0; mi<(MTX_M/TILE_M); mi+=2)
    {
        for(int ni=0; ni<(MTX_N/TILE_N); ni+=2)
        {
            _tile_zero(TMM4);
            _tile_zero(TMM5);
            _tile_zero(TMM6);
            _tile_zero(TMM7);
            for(int ki=0; ki<(MTX_K/TILE_K); ++ki)
            {
                _tile_loadd(TMM0, A+mi*TILE_M*MTX_K+ki*TILE_K, MTX_K); // Load A
                _tile_loadd(TMM1, A+(mi+1)*TILE_M*MTX_K+ki*TILE_K, MTX_K); // Load A+1
                _tile_loadd(TMM2, B+ki*(TILE_K/4)*(MTX_N*4)+ni*TILE_N*4, MTX_N*4); // Load B
                _tile_loadd(TMM3, B+ki*(TILE_K/4)*(MTX_N*4)+(ni+1)*TILE_N*4, MTX_N*4); // Load B+1
                _tile_dpbuud(TMM4, TMM0, TMM2); // C += A * B
                _tile_dpbuud(TMM5, TMM0, TMM3); // C += A * B
                _tile_dpbuud(TMM6, TMM1, TMM2); // C += A * B
                _tile_dpbuud(TMM7, TMM1, TMM3); // C += A * B
            }
            _tile_stored(TMM4, (int8_t *)C+mi*TILE_M*MTX_N*sizeof(int32_t)+ni*TILE_N*sizeof(int32_t), MTX_N*sizeof(int32_t)); // Store C00
            _tile_stored(TMM5, (int8_t *)C+mi*TILE_M*MTX_N*sizeof(int32_t)+(ni+1)*TILE_N*sizeof(int32_t), MTX_N*sizeof(int32_t)); // Store C01
            _tile_stored(TMM6, (int8_t *)C+(mi+1)*TILE_M*MTX_N*sizeof(int32_t)+ni*TILE_N*sizeof(int32_t), MTX_N*sizeof(int32_t)); // Store C10
            _tile_stored(TMM7, (int8_t *)C+(mi+1)*TILE_M*MTX_N*sizeof(int32_t)+(ni+1)*TILE_N*sizeof(int32_t), MTX_N*sizeof(int32_t)); // Store C11
        }
    }
}

void amx_bench_3(int8_t *A, int8_t *B, int32_t *C, int8_t *Apack, int8_t *Bpack)
{
    #pragma omp parallel for collapse(2)
    for (int nb = 0; nb < MTX_N; nb += N_CACHE) {
        for (int mb = 0; mb < MTX_M; mb += M_CACHE) {
            for (int kb = 0; kb < MTX_K; kb += K_CACHE) {

                // packing block B into continuous memory area
                for (int n = nb; n < nb + N_CACHE; n += N_ACC*TILE_N) {
                    int np=(n-nb)/(N_ACC*TILE_N);
                    for (int k = kb; k < kb + K_CACHE; k += TILE_K) {
                        _tile_loadd(TMM0,  B+(k/4)*MTX_N*sizeof(uint32_t)+n*4,          MTX_K); // Load B
                        _tile_loadd(TMM1,  B+(k/4)*MTX_N*sizeof(uint32_t)+(n+TILE_N)*4, MTX_K); // Load B+1
                        int kp=(k-kb)/TILE_K;
                        _tile_stored(TMM0, Bpack+np*(K_CACHE/TILE_K)*TILE_N*4*2*TILE_M+kp*TILE_K*2,        (K_CACHE/TILE_K)*TILE_N*4*2); // Store C00
                        _tile_stored(TMM1, Bpack+np*(K_CACHE/TILE_K)*TILE_N*4*2*TILE_M+kp*TILE_K*2+TILE_K, (K_CACHE/TILE_K)*TILE_N*4*2); // Store C00
                    }
                }

                for (int m = mb; m < mb + M_CACHE; m += M_ACC*TILE_M) {
                    // packing strip A into continuous memory area
                    for (int k = kb; k < kb + K_CACHE; k += TILE_K) {
                        _tile_loadd(TMM0, A+m*MTX_K+k,          MTX_K); // Load A
                        _tile_loadd(TMM1, A+(m+TILE_M)*MTX_K+k, MTX_K); // Load A+1
                        int kp=(k-kb)/TILE_K;
                        _tile_stored(TMM0, Apack+kp*TILE_K*2,        (K_CACHE/TILE_K)*TILE_K*2); // Store A
                        _tile_stored(TMM1, Apack+kp*TILE_K*2+TILE_K, (K_CACHE/TILE_K)*TILE_K*2); // Store A
                    }

                    for (int n = nb; n < nb + N_CACHE; n += N_ACC*TILE_N) {

                        if (kb == 0) {
                            _tile_zero(TMM4);
                            _tile_zero(TMM5);
                            _tile_zero(TMM6);
                            _tile_zero(TMM7);
                        }
                        else {
                            _tile_loadd(TMM4, (int8_t *)C+m*MTX_K+n*4,                      MTX_N*sizeof(uint32_t)); // Load C0
                            _tile_loadd(TMM5, (int8_t *)C+m*MTX_K+(n+TILE_N)*4,             MTX_N*sizeof(uint32_t)); // Load C1
                            _tile_loadd(TMM6, (int8_t *)C+(m+TILE_M)*MTX_K+n*4,             MTX_N*sizeof(uint32_t)); // Load C2
                            _tile_loadd(TMM7, (int8_t *)C+(m+TILE_M)*MTX_K+(n+TILE_N)*4,    MTX_N*sizeof(uint32_t)); // Load C3
                        }

                        int np=(n-nb)/(N_ACC*TILE_N);
                        for (int k = kb; k < kb + K_CACHE; k += TILE_K) {
                            int kp=(k-kb)/TILE_K;

                            // _tile_stream_loadd(TMM0, A+m*MTX_K+k,                                  MTX_K); // Load A
                            // _tile_stream_loadd(TMM1, A+(m+TILE_M)*MTX_K+k,                         MTX_K); // Load A+1
                            // _tile_loadd(TMM0, A+m*MTX_K+k,                                  MTX_K); // Load A
                            // _tile_loadd(TMM1, A+(m+TILE_M)*MTX_K+k,                         MTX_K); // Load A+1
                            _tile_loadd(TMM0, Apack+kp*TILE_K*2,        (K_CACHE/TILE_K)*TILE_K*2); // Store A
                            _tile_loadd(TMM1, Apack+kp*TILE_K*2+TILE_K, (K_CACHE/TILE_K)*TILE_K*2); // Store A

                            // _tile_stream_loadd(TMM2, B+(k/4)*MTX_N*sizeof(uint32_t)+n*4,           MTX_N*4); // Load B
                            // _tile_stream_loadd(TMM3, B+(k/4)*MTX_N*sizeof(uint32_t)+(n+TILE_N)*4,  MTX_N*4); // Load B+1
                            _tile_stream_loadd(TMM2, Bpack+np*(K_CACHE/TILE_K)*TILE_N*4*2*TILE_M+kp*TILE_K*2,         (K_CACHE/TILE_K)*TILE_N*4*2); // Load B
                            _tile_stream_loadd(TMM3, Bpack+np*(K_CACHE/TILE_K)*TILE_N*4*2*TILE_M+kp*TILE_K*2+TILE_K,  (K_CACHE/TILE_K)*TILE_N*4*2); // Load B+1

                            _tile_dpbuud(TMM4, TMM0, TMM2); // C += A * B
                            _tile_dpbuud(TMM5, TMM0, TMM3); // C += A * B
                            _tile_dpbuud(TMM6, TMM1, TMM2); // C += A * B
                            _tile_dpbuud(TMM7, TMM1, TMM3); // C += A * B
                        }
                        _tile_stored(TMM4, (int8_t *)C+m*MTX_N*4+n*4,                   MTX_N*sizeof(int32_t)); // Store C00
                        _tile_stored(TMM5, (int8_t *)C+m*MTX_N*4+(n+TILE_N)*4,          MTX_N*sizeof(int32_t)); // Store C01
                        _tile_stored(TMM6, (int8_t *)C+(m+TILE_M)*MTX_N*4+n*4,          MTX_N*sizeof(int32_t)); // Store C10
                        _tile_stored(TMM7, (int8_t *)C+(m+TILE_M)*MTX_N*4+(n+TILE_N)*4, MTX_N*sizeof(int32_t)); // Store C11
                    }
                }

            }
        }
    }
}

// #include <stdio.h>
// #include <fcntl.h>
// #include <unistd.h>
// #include <sys/ioctl.h>

// #define MSR_IA32_MISC_ENABLE 0x1A4

// void write_msr(int cpu, unsigned long value) {
//     char msr_path[32];
//     sprintf(msr_path, "/dev/cpu/%d/msr", cpu);
//     int fd = open(msr_path, O_WRONLY);
//     if (fd < 0) {
//         perror("Failed to open MSR");
//         return;
//     }
//     pwrite(fd, &value, sizeof(value), MSR_IA32_MISC_ENABLE);
//     close(fd);
// }

#define DIM_M   (16)
#define DIM_K   (64)
#define DIM_N   (16)

#define TILE    (1000)
#define STRIDE  (100)

uint8_t A[DIM_M*DIM_K*TILE*STRIDE]         AMX_ALIGN(64) = {0};
uint8_t B[DIM_M*DIM_K*TILE*STRIDE]         AMX_ALIGN(64) = {0};
// uint8_t B[(DIM_K/4)*(DIM_N*4)] AMX_ALIGN(64) = {0};
// uint32_t C[DIM_M*DIM_N]        AMX_ALIGN(64) = {0};
uint32_t C[DIM_M*DIM_N]     AMX_ALIGN(64) = {0};

int main(int argc, char *argv[])
{
    struct timespec start, end;
    static int num_threads = 0;
    static int dbg_enable = 0;

    // Check for AMX support
    if (!(__builtin_cpu_supports("amx-int8") && __builtin_cpu_supports("amx-tile"))) {
        printf("AMX not supported on this CPU.\n");
        return 1;
    } 
    req_xtiledata_permission();

    for(int i=0;i<DIM_M*DIM_K*TILE*STRIDE;i++)
    {
        A[i] = (int8_t)(i % 127 + 1);
        B[i] = (int8_t)(i % 127 + 1);
    }

    struct TileConfig tcfg_ld AMX_ALIGN(64) = 
    {
         1,0,{0},{64,64,64,64,64,64,64,64},{0},{16,16,16,16,16,16,16,16},{0}
    };
    int64_t stride_a = DIM_K, stride_b = DIM_N*4, stride_c = DIM_N*sizeof(int32_t);
    int64_t loop_cnt = 50000000;
    
    configure_tiles();

    for(int iter=0; iter<100000; iter++)
    {
        for(int ni=0; ni<(TILE); ni+=1)
        {

                _tile_loadd(TMM0, A+DIM_K*STRIDE*DIM_M*ni, DIM_K*STRIDE); // Load B
                _tile_loadd(TMM1, B+DIM_K*STRIDE*DIM_M*ni, DIM_K*STRIDE); // Load B+1
                _tile_dpbuud(TMM1, TMM0, TMM2); // C += A * B
                // _tile_dpbuud(TMM1, TMM0, TMM2); // C += A * B
                // _tile_dpbuud(TMM1, TMM0, TMM2); // C += A * B
                // _tile_dpbuud(TMM1, TMM0, TMM2); // C += A * B
        }
        // _tile_stored(TMM2, (int32_t *)C, 16*sizeof(int32_t)); // Store C11
    }



    return 0;
}
