/*In case you're wondering, dgemm stands for Double-precision, GEneral Matrix-Matrix multiplication.*/

#include <emmintrin.h>
#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <float.h>

const char* dgemm_desc = "Dgemm optimized for AMD Opteron, by Erin Carson and Matthieu Nahoum.";

//Optimal block size found by auto-tuning
#define CACHE_BLOCK_SIZE 120 
 
#define min(a,b) (((a)<(b))?(a):(b))

//Optimized matrix multiply method. Takes 3 matrices of the same dimension (lda)
void square_dgemm( int lda, double *A, double *B, double *C)
{
	//For each cache block of the matrix
	for( int i = 0; i < lda; i += CACHE_BLOCK_SIZE)
	{			
		for(int j = 0; j < lda; j += CACHE_BLOCK_SIZE)
		{
			for(int k = 0; k < lda; k += CACHE_BLOCK_SIZE)
			{
			        //Get the correct number of times to iterate- handles fringe cases where 
				//there is less than a full block size left	
				int M = min( CACHE_BLOCK_SIZE, lda - i);
				int N = min( CACHE_BLOCK_SIZE, lda - j);
				int K = min( CACHE_BLOCK_SIZE, lda - k);	
				
				//For each register block of the cache block
				//We tuned to find the optimal size of 4x12x4
				for(int ii = 0; ii < M; ii+=4)
				{
					for(int jj = 0; jj < N; jj+=4)
					{
						for(int kk=0; kk < K; kk+=12)
						{
							//Compute the current starting indices for A,B,C
							int Cadd = i + j * lda + ii + jj * lda;
							int Aadd = i + k * lda + ii + kk * lda;
							int Badd = k + j * lda + kk + jj * lda;
							
							//Handle the fringe cases. Could either be a full block, a block 
							//that runs off the end of the matrix, or a register block that 
							//doesn't fit in the cache block
							int MM = min(lda-i-ii, min( 4, CACHE_BLOCK_SIZE-ii));
 							int NN= min(lda-j-jj, min(4, CACHE_BLOCK_SIZE-jj));
							int KK = min(lda-k-kk, min( 12, CACHE_BLOCK_SIZE-kk));
							
							//If this is a full 4x12x4 block, use blocked SIMD code
							if(MM == 4 && NN == 4 && KK == 12)
							{
								//If this is a matrix with even dimensions, we can take
								//advantage of the aligned loads/stores for free
								if(lda%2==0)
								{

									//Aligned load 4x4 block of C into SIMD registers 
									__m128d C1_data = _mm_load_pd(C+Cadd);
									__m128d C2_data = _mm_load_pd(C+Cadd+lda);
									__m128d C3_data = _mm_load_pd(C+Cadd+2);
									__m128d C4_data = _mm_load_pd(C+Cadd+2+lda);
									__m128d C5_data = _mm_load_pd(C+Cadd+2*lda);
									__m128d C6_data = _mm_load_pd(C+Cadd+3*lda);
									__m128d C7_data = _mm_load_pd(C+Cadd+2+2*lda);
									__m128d C8_data = _mm_load_pd(C+Cadd+2+3*lda);

									//Iterate through the block. This loop could be 
									//unrolled, but in our experience doing this 
									//explicitely added little to the optimization
									//provided by the GNU compiler
									for(int s = 0; s < 12; s++)
									{	
										//Aligned load sth col of A into SIMD registers
										__m128d A1_data = _mm_load_pd(A+Aadd+s*lda);
										__m128d A2_data = _mm_load_pd(A+Aadd+2+s*lda);
										//Aligned load sth row of B into SIMD registers
										__m128d B1_data = _mm_load1_pd(B+Badd+s);
										__m128d B2_data = _mm_load1_pd(B+Badd+s+lda);
										__m128d B3_data = _mm_load1_pd(B+Badd+s+2*lda);
										__m128d B4_data = _mm_load1_pd(B+Badd+s+3*lda);
										//Perform rank-1 updates
										C1_data = _mm_add_pd(C1_data, _mm_mul_pd(A1_data, B1_data));
										C2_data = _mm_add_pd(C2_data, _mm_mul_pd(A1_data, B2_data));
										C3_data = _mm_add_pd(C3_data, _mm_mul_pd(A2_data, B1_data));
										C4_data = _mm_add_pd(C4_data, _mm_mul_pd(A2_data, B2_data));
										C5_data = _mm_add_pd(C5_data, _mm_mul_pd(A1_data, B3_data));
										C6_data = _mm_add_pd(C6_data, _mm_mul_pd(A1_data, B4_data));
										C7_data = _mm_add_pd(C7_data, _mm_mul_pd(A2_data, B3_data));
										C8_data = _mm_add_pd(C8_data, _mm_mul_pd(A2_data, B4_data));
									}
							
									//Aligned store results back to the C matrix
									_mm_store_pd(C+Cadd, C1_data);
									_mm_store_pd(C+Cadd+lda, C2_data);
									_mm_store_pd(C+Cadd+2, C3_data);
									_mm_store_pd(C+Cadd+2+lda, C4_data);
									_mm_store_pd(C+Cadd+2*lda, C5_data);
									_mm_store_pd(C+Cadd+3*lda, C6_data);
									_mm_store_pd(C+Cadd+2+2*lda, C7_data);
									_mm_store_pd(C+Cadd+2+3*lda, C8_data);
								}
				
								//Otherwise, we will use unaligned loads and stores
								else
								{
									
									//Unligned load 4x4 block of C into SIMD registers 
									__m128d C1_data = _mm_loadu_pd(C+Cadd);
									__m128d C2_data = _mm_loadu_pd(C+Cadd+lda);
									__m128d C3_data = _mm_loadu_pd(C+Cadd+2);
									__m128d C4_data = _mm_loadu_pd(C+Cadd+2+lda);
									__m128d C5_data = _mm_loadu_pd(C+Cadd+2*lda);
									__m128d C6_data = _mm_loadu_pd(C+Cadd+3*lda);
									__m128d C7_data = _mm_loadu_pd(C+Cadd+2+2*lda);
									__m128d C8_data = _mm_loadu_pd(C+Cadd+2+3*lda);

									//Iterate through the block. This loop could be 
									//unrolled, but in our experience doing this 
									//explicitely added little to the optimization
									//provided by the GNU compiler
									for(int s = 0; s < 12; s++){

										//Unaligned load sth col of A into SIMD registers
										__m128d A1_data = _mm_loadu_pd(A+Aadd+s*lda);
										__m128d A2_data = _mm_loadu_pd(A+Aadd+2+s*lda);

										//Unaligned load sth row of B into SIMD registers
										__m128d B1_data = _mm_load1_pd(B+Badd+s);
										__m128d B2_data = _mm_load1_pd(B+Badd+s+lda);
										__m128d B3_data = _mm_load1_pd(B+Badd+s+2*lda);
										__m128d B4_data = _mm_load1_pd(B+Badd+s+3*lda);

										//Perform rank-1 updates
										C1_data = _mm_add_pd(C1_data, _mm_mul_pd(A1_data, B1_data));
										C2_data = _mm_add_pd(C2_data, _mm_mul_pd(A1_data, B2_data));
										C3_data = _mm_add_pd(C3_data, _mm_mul_pd(A2_data, B1_data));
										C4_data = _mm_add_pd(C4_data, _mm_mul_pd(A2_data, B2_data));
										C5_data = _mm_add_pd(C5_data, _mm_mul_pd(A1_data, B3_data));
										C6_data = _mm_add_pd(C6_data, _mm_mul_pd(A1_data, B4_data));
										C7_data = _mm_add_pd(C7_data, _mm_mul_pd(A2_data, B3_data));
										C8_data = _mm_add_pd(C8_data, _mm_mul_pd(A2_data, B4_data));
									}

									//Unaligned store results back to the C matrix
									_mm_storeu_pd(C+Cadd, C1_data);
									_mm_storeu_pd(C+Cadd+lda, C2_data);
									_mm_storeu_pd(C+Cadd+2, C3_data);
									_mm_storeu_pd(C+Cadd+2+lda, C4_data);
									_mm_storeu_pd(C+Cadd+2*lda, C5_data);
									_mm_storeu_pd(C+Cadd+3*lda, C6_data);
									_mm_storeu_pd(C+Cadd+2+2*lda, C7_data);
									_mm_storeu_pd(C+Cadd+2+3*lda, C8_data);

								}
							}

							//If we have a fringe block, just use 3 nested loops
							else
							{
								for(int iii = 0; iii < MM; iii++)
								{
									for(int jjj = 0; jjj < NN; jjj++)
									{
										double cij = C[Cadd+iii+jjj*lda];
										for(int kkk = 0; kkk < KK; kkk++)
										{
											cij += A[Aadd+iii+kkk*lda]*B[Badd+kkk+jjj*lda];
										}
										C[Cadd+iii+jjj*lda]=cij;
									}
								}		
							}
						}
					}
				}
			}
		}
	}
}
