extern "C" {
#include <immintrin.h>
}

#define A(i, j) A[(i) + (j) * (LDA)]
#define B(i, j) B[(i) + (j) * (LDB)]
#define C(i, j) C[(i) + (j) * (LDC)]

static void mm_v8_corner(int M, int N, int K, double *A, int LDA, double *B,
                         int LDB, double *C, int LDC) {
    double tmp = 0;
    for (int i = 0; i < M; i++) {
        for (int j = 0; j < N; j++) {
            tmp = C(i, j); // 矩阵分块之后，一定要基于之前的值累加
            for (int k = 0; k < K; k++) {
                tmp += A(i, k) * B(k, j);
            }
            C(i, j) = tmp;
        }
    }
}

#define KERNEL_K1                                                              \
    do {                                                                       \
        __m256d a0 = _mm256_loadu_pd(&A(i, k));                                \
        __m256d a1 = _mm256_loadu_pd(&A(i + 4, k));                            \
        __m256d b0 = _mm256_broadcast_sd(&B(k, j));                            \
        __m256d b1 = _mm256_broadcast_sd(&B(k, j + 1));                        \
        __m256d b2 = _mm256_broadcast_sd(&B(k, j + 2));                        \
        __m256d b3 = _mm256_broadcast_sd(&B(k, j + 3));                        \
        c00 = _mm256_fmadd_pd(a0, b0, c00);                                    \
        c01 = _mm256_fmadd_pd(a0, b1, c01);                                    \
        c02 = _mm256_fmadd_pd(a0, b2, c02);                                    \
        c03 = _mm256_fmadd_pd(a0, b3, c03);                                    \
        c10 = _mm256_fmadd_pd(a1, b0, c10);                                    \
        c11 = _mm256_fmadd_pd(a1, b1, c11);                                    \
        c12 = _mm256_fmadd_pd(a1, b2, c12);                                    \
        c13 = _mm256_fmadd_pd(a1, b3, c13);                                    \
    } while (0);

// AVX 8x4分块 cache blocking
// A:[M, K] B:[K, N] C:[M, N]
static void macro_kernel_mm_v8(int M, int N, int K, double *A, int LDA,
                               double *B, int LDB, double *C, int LDC) {
    int m8 = M & -8;
    int n4 = N & -4;
    int k4 = K & -4;
    for (int i = 0; i < m8; i += 8) {
        for (int j = 0; j < n4; j += 4) {
            __m256d c00 = _mm256_setzero_pd();
            __m256d c01 = _mm256_setzero_pd();
            __m256d c02 = _mm256_setzero_pd();
            __m256d c03 = _mm256_setzero_pd();
            __m256d c10 = _mm256_setzero_pd();
            __m256d c11 = _mm256_setzero_pd();
            __m256d c12 = _mm256_setzero_pd();
            __m256d c13 = _mm256_setzero_pd();
            for (int k = 0; k < k4;) {
                KERNEL_K1;
                k++;
                KERNEL_K1;
                k++;
                KERNEL_K1;
                k++;
                KERNEL_K1;
                k++;
            }
            for (int k = k4; k < K;) {
                KERNEL_K1;
                k++;
            }
            // 矩阵分块之后，一定要基于之前的值累加
            _mm256_storeu_pd(&C(i, j),
                             _mm256_add_pd(c00, _mm256_loadu_pd(&C(i, j))));
            _mm256_storeu_pd(&C(i, j + 1),
                             _mm256_add_pd(c01, _mm256_loadu_pd(&C(i, j + 1))));
            _mm256_storeu_pd(&C(i, j + 2),
                             _mm256_add_pd(c02, _mm256_loadu_pd(&C(i, j + 2))));
            _mm256_storeu_pd(&C(i, j + 3),
                             _mm256_add_pd(c03, _mm256_loadu_pd(&C(i, j + 3))));
            _mm256_storeu_pd(&C(i + 4, j),
                             _mm256_add_pd(c10, _mm256_loadu_pd(&C(i + 4, j))));
            _mm256_storeu_pd(
                &C(i + 4, j + 1),
                _mm256_add_pd(c11, _mm256_loadu_pd(&C(i + 4, j + 1))));
            _mm256_storeu_pd(
                &C(i + 4, j + 2),
                _mm256_add_pd(c12, _mm256_loadu_pd(&C(i + 4, j + 2))));
            _mm256_storeu_pd(
                &C(i + 4, j + 3),
                _mm256_add_pd(c13, _mm256_loadu_pd(&C(i + 4, j + 3))));
        }
    }
    if (M == m8 && N == n4) {
        return;
    }
    if (M != m8) {
        mm_v8_corner(M - m8, N, K, &A(m8, 0), LDA, &B(0, 0), LDB, &C(m8, 0),
                     LDC);
    }
    if (N != n4) {
        mm_v8_corner(m8, N - n4, K, &A(0, 0), LDA, &B(0, n4), LDB, &C(0, n4),
                     LDC);
    }
}
#undef KERNEL_K1

inline void mm_v8(int M, int N, int K, double *A, int LDA, double *B, int LDB,
                  double *C, int LDC) {
    const int mBlockSize = 2048;
    const int nBlockSize = 384;
    const int kBlockSize = 192;
    int mInc = 0, nInc = 0, kInc = 0;
    for (int mCount = 0; mCount < M; mCount += mInc) {
        mInc = (M - mCount > mBlockSize) ? mBlockSize : M - mCount;
        for (int nCount = 0; nCount < N; nCount += nInc) {
            nInc = (N - nCount > nBlockSize) ? nBlockSize : N - nCount;
            for (int kCount = 0; kCount < K; kCount += kInc) {
                kInc = (K - kCount > kBlockSize) ? kBlockSize : K - kCount;
                macro_kernel_mm_v8(mInc, nInc, kInc, &A(mCount, kCount), LDA,
                                   &B(kCount, nCount), LDB, &C(mCount, nCount),
                                   LDC);
            }
        }
    }
}
#undef A
#undef B
#undef C
