#include <x86intrin.h>
#include <stddef.h>

#define UNROLL 4
#define BLOCKSIZE 32

static inline void do_block(int n, int si, int sj, int sk,
			double *A, double *B, double *C)
{
	__m256d c[UNROLL];
	for (int i = si; i < si + BLOCKSIZE; ++i) {
		for (int j = sj; j < sj + BLOCKSIZE; j += 4 * UNROLL) {
			
			c[0] = _mm256_load_pd(C + i * n + 4 * 0 + j);
			c[1] = _mm256_load_pd(C + i * n + 4 * 1 + j);
			c[2] = _mm256_load_pd(C + i * n + 4 * 2 + j);
			c[3] = _mm256_load_pd(C + i * n + 4 * 3 + j);

			for (int k = sk; k < sk + BLOCKSIZE; k++) {
				__m256d a = _mm256_broadcast_sd(A + i * n + k);

				c[0] = _mm256_add_pd(c[0],
						_mm256_mul_pd(_mm256_load_pd(B + k * n + 4 * 0 + j),
							a));
				c[1] = _mm256_add_pd(c[1],
						_mm256_mul_pd(_mm256_load_pd(B + k * n + 4 * 1 + j),
							a));
				c[2] = _mm256_add_pd(c[2],
						_mm256_mul_pd(_mm256_load_pd(B + k * n + 4 * 2 + j),
							a));
				c[3] = _mm256_add_pd(c[3],
						_mm256_mul_pd(_mm256_load_pd(B + k * n + 4 * 3 + j),
							a));
			}

			_mm256_store_pd(C + i * n + 4 * 0 + j, c[0]);
			_mm256_store_pd(C + i * n + 4 * 1 + j, c[1]);
			_mm256_store_pd(C + i * n + 4 * 2 + j, c[2]);
			_mm256_store_pd(C + i * n + 4 * 3 + j, c[3]);
		}
	}
}

void dgemm_avx_unroll_blk(size_t n, double *A, double *B, double *C)
{
	for (int si = 0; si < n; si += BLOCKSIZE) {
		for (int sj = 0; sj < n; sj += BLOCKSIZE) {
			for (int sk = 0; sk < n; sk += BLOCKSIZE) {
				do_block(n, si, sj, sk, A, B, C);
			}
		}
	}
}
