const char* dgemm_desc = "Simple blocked dgemm.";

#include <stdlib.h>
#include <x86intrin.h>
#include <memory.h>
#include <string.h>
#include <stdio.h>

/* Need to autotune block size */
#if !defined(BLOCK_SIZE)
#define BLOCK_SIZE 2 // was 41
#endif

#define min(a,b) (((a)<(b))?(a):(b))

/* This auxiliary subroutine performs a smaller dgemm operation
 *  C := C + A * B
 * where C is M-by-N, A is M-by-K, and B is K-by-N. */
static void do_block (int lda, int M, int N, int K, double* restrict A, double* restrict B, double* restrict C)
{

  /* For each row i of A */
  for (int i = 0; i < M; ++i)
    /* For each column j of B */ 
    for (int j = 0; j < N; ++j) 
    {
      /* Compute C(i,j) */
      double cij = C[i+j*lda];
      for (int k = 0; k < K; ++k)
	cij += A[i+k*lda] * B[k+j*lda];
      C[i+j*lda] = cij;
    }
}

/* This subroutine does 2x2 matrix multiplication using the example given in class */
/*static void do_block2x2 (int lda, double* A, double* B, double* restrict C)
{
__m128d c1 = _mm_loadu_pd(C+0*lda);
__m128d c2 = _mm_loadu_pd(C+1*lda);

__m128d a,b1,b2;

for( int i = 0; i < 2; i++)
{
    // load column i of A
    a = _mm_loadu_pd(A+i*lda);
    // load 1st and 2nd value of row i of B
    b1 = _mm_load1_pd(B+i+0*lda);
    b2 = _mm_load1_pd(B+i+1*lda);
    // multiply column i by b1 and then by b2
    c1 = _mm_add_pd(c1,_mm_mul_pd(a,b1));
    c2 = _mm_add_pd(c2,_mm_mul_pd(a,b2));
}

_mm_storeu_pd(C+0*lda,c1);
_mm_storeu_pd(C+1*lda,c2);
}*/

/* Attempt to extend 2x2 routine to nxn  where n is our BLOCK_SIZE.
 * where C is M-by-N, A is M-by-K, and B is K-by-N. */
static void do_blocknxn (int lda, int M, int N, int K, double* restrict A, double* restrict B, double* restrict C)
{
/*__m128d c[M*N];
__m128d a[M*K];
__m128d b[K*N];*/

__m128d c1 = _mm_loadu_pd(C+0*lda);
__m128d c2 = _mm_loadu_pd(C+1*lda);
__m128d a,b1,b2;

// Pad A if it is rectangular? I don't know how to deal with M<2 and K<2 in terms of memory 
if(M<2||K<2)
    do_block(lda,M,N,K,A,B,C);
else{

for( int i = 0; i < K; i++)
{
    // load column i of A
    a = _mm_loadu_pd(A+i*lda);
    // load 1st value of row i of B
    b1 = _mm_load1_pd(B+i+0*lda);
    c1 = _mm_add_pd(c1,_mm_mul_pd(a,b1));

    if (N == 2){
    b2 = _mm_load1_pd(B+i+1*lda);
    c2 = _mm_add_pd(c2,_mm_mul_pd(a,b2));
    }
}

_mm_storeu_pd(C+0*lda,c1);

if (N == 2){
_mm_storeu_pd(C+1*lda,c2);
}

}

}



/* This routine performs a dgemm operation
 *  C := C + A * B
 * where A, B, and C are lda-by-lda matrices stored in column-major format. 
 * On exit, A and B maintain their input values. 
 * Added restrict to C */  
void square_dgemm (int lda, double* A, double* B, double* restrict C)
{
  /* Allocate aligned memory, preferably with posix_memalign */
  double* restrict Atemp;
  posix_memalign(void ** Atempvoid,128,sizeof(double)*lda*lda);
  //Atemp = (double *) Atempvoid;
  //Atemp = (double *) malloc(sizeof(double)*lda*lda);
  memcpy(Atemp,A,sizeof(double)*lda*lda);

  double* restrict Btemp;
  posix_memalign(void ** Btempvoid,128,sizeof(double)*lda*lda);
  //Btemp = (double *) Btempvoid;
  //Btemp = (double *) malloc(sizeof(double)*lda*lda);
  memcpy(Btemp,B,sizeof(double)*lda*lda);


  /* For each block-row of A */ 
  for (int i = 0; i < lda; i += BLOCK_SIZE)
    /* For each block-column of B */
    for (int j = 0; j < lda; j += BLOCK_SIZE)
      /* Accumulate block dgemms into block of C */
      for (int k = 0; k < lda; k += BLOCK_SIZE)
      {
	/* Correct block dimensions if block "goes off edge of" the matrix */
	int M = min (BLOCK_SIZE, lda-i);
	int N = min (BLOCK_SIZE, lda-j);
	int K = min (BLOCK_SIZE, lda-k);
	
	/* Perform individual block dgemm */
	// with A in row major form
	/*do_block(lda, M, N, K, A + i*lda + k, B + k + j*lda, C + i + j*lda);*/

	do_blocknxn(lda, M, N, K, Atemp + i + k*lda, Btemp + k + j*lda, C + i + j*lda);
      }

  free(Atemp);
  free(Btemp);
  // free(Atempvoid);
  // free(Btempvoid);
}
