#include <stdio.h>
#include <emmintrin.h>
#include <omp.h>
#include <string.h>

#define ROWS_PROCESSED 48	// Each memory_vector processes 4 rows.  This corresponds to the number of ROWS, NOT VECTORS.  Update memory_vector array dimension.
#define COLS_PROCESSED 1
#define VECTOR_SIZE 4	// How many items can fit into a single 128-bit vector.

static void allocation_failed();
float* pad_and_copy_matrix(int m, int n, float *src);
void copy_matrix(int m, int n, float *src, float *dest);

int padded_m, padded_n;

/**
 * allocation_failed() generates an error whenever a memory operation fails.
 */
static void allocation_failed() {
	fprintf(stderr, "Out of memory.\n");
	exit(1);
}

/**
 * pad_and_copy_matrix() takes the source matrix and returns the address of a padded version of the matrix.
 * It is the CALLER's responsibility to free memory using the pointer that is returned from this function.
 *
 * NOTICE: THIS FUNCTION APPEARS TO BE BUGGY.  DO NOT USE (OR FIX). // TODO
 *
 * Arguments:
 * m -- the number of rows in the source matrix
 * n -- the number of columns in the source matrix
 * src -- pointer to the source matrix from which to copy
 *
 * Returns:
 * dest -- pointer to the padded matrix
 */
float* pad_and_copy_matrix(int m, int n, float *src) {
	float *dest = (float*) calloc(padded_m * padded_n, sizeof(float));
	if (dest == NULL)
		allocation_failed();

	//#pragma omp parallel for
	for (int i = 0; i < n; i++) {
		memcpy(dest + i*padded_m, src + i*m, m * sizeof(float));
	}
	return dest;
}

/**
 * copy_matrix() copies the source matrix back into the destination matrix.  It is used to "unpad"
 * the padded source matrix.  This function does NOT allocate or deallocate memory.
 *
 * Arguments:
 * m -- the number of rows in the destination matrix  (Note: m <= padded_m)
 * n -- the number of columns in the source matrix   (Note: n <= padded_n)
 * dest -- pointer to the destination matrix to copy into
 * src -- pointer to the source matrix from which to copy
 */
void copy_matrix(int m, int n, float *dest, float *src) {
	//#pragma omp parallel for
	for (int i = 0; i < n; i++) {
		memcpy(dest + i*m, src + i*padded_m, m * sizeof(float));
	}
}

/**
 * sgemm() performs matrix multiply.  WARNING: This function uses memory management; it is THIS FUNCTION'S
 * responsibility to deallocate any memory that is allocated by a subroutine.
 *
 * Arguments:
 * m -- number of rows in matrix A
 * n -- number of columns in matrix A
 * A -- pointer to matrix A to be used in computation
 * C -- pointer to matrix C to store result of matrix multiply
 */
void sgemm( int m, int n, float *A, float *C ) {
	// Calculated padded dimensions.
	padded_m = (m / VECTOR_SIZE + 1) * VECTOR_SIZE;
	padded_n = n;

	// Pad the two matrices.  These variables are NOT to be manipulated.
	float *padded_A = (float*) calloc(padded_m * padded_n, sizeof(float));
	if (padded_A == NULL) {
		printf("padded_C failed.\n");
		allocation_failed();
	}
	//#pragma omp parallel for
	for (int col = 0; col < n; col++) {
		memcpy(padded_A + col*padded_m, A + col*m, m * sizeof(float));
	}

	float *padded_C = (float*) calloc(padded_m * padded_m, sizeof(float));
	if (padded_C == NULL) {
		free(padded_A);
		printf("padded_C failed.\n");
		allocation_failed();
	}
	// #pragma omp parallel for
	for (int col = 0; col < m; col++) {
		memcpy(padded_C + col*padded_m, C + col*m, m * sizeof(float));
	}

	// Declare variables and vectors here.
	__m128 memory_vector[12], pivot_vector[4], column_vector, product_vector;
	float *memory_location, *col_offset_A, *pivot_offset_A;

#pragma omp parallel for private(memory_vector, pivot_vector, column_vector, product_vector, memory_location, col_offset_A, pivot_offset_A)
	for (int j = 0; j < padded_m; j++) {
		pivot_offset_A = padded_A + j;
		for (int i = 0; i < padded_m/ROWS_PROCESSED*ROWS_PROCESSED; i+=ROWS_PROCESSED) {
			memory_location = padded_C + i + j*padded_m;

			// Load memory_vectors from C into registers.
			*memory_vector = _mm_loadu_ps(memory_location);
			*(memory_vector+1) = _mm_loadu_ps(memory_location+4);
			*(memory_vector+2) = _mm_loadu_ps(memory_location+8);
			*(memory_vector+3) = _mm_loadu_ps(memory_location+12);
			*(memory_vector+4) = _mm_loadu_ps(memory_location+16);
			*(memory_vector+5) = _mm_loadu_ps(memory_location+20);
			*(memory_vector+6) = _mm_loadu_ps(memory_location+24);
			*(memory_vector+7) = _mm_loadu_ps(memory_location+28);
			*(memory_vector+8) = _mm_loadu_ps(memory_location+32);
			*(memory_vector+9) = _mm_loadu_ps(memory_location+36);
			*(memory_vector+10) = _mm_loadu_ps(memory_location+40);
			*(memory_vector+11) = _mm_loadu_ps(memory_location+44);

			for (int k = 0; k < padded_n; k++) {
				// Constants for this loop: the column offset col_offset_A and the pivot vector.
				col_offset_A = padded_A + i + k*padded_m;
				*pivot_vector = _mm_load1_ps(pivot_offset_A + k*padded_m);

				// Computation.
				column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*memory_vector = _mm_add_ps(*memory_vector, product_vector);

				column_vector = _mm_loadu_ps(col_offset_A + 4);	// A + (i+4) + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

				column_vector = _mm_loadu_ps(col_offset_A + 8);	// A + (i+8) + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

				column_vector = _mm_loadu_ps(col_offset_A + 12);	// A + (i+12) + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

				column_vector = _mm_loadu_ps(col_offset_A + 16);	// A + (i+16) + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

				column_vector = _mm_loadu_ps(col_offset_A + 20);	// A + (i+20) + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

				column_vector = _mm_loadu_ps(col_offset_A + 24);	// A + (i+24) + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

				column_vector = _mm_loadu_ps(col_offset_A + 28);	// A + (i+28) + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

				column_vector = _mm_loadu_ps(col_offset_A + 32);	// A + (i+32) + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

				column_vector = _mm_loadu_ps(col_offset_A + 36);	// A + (i+36) + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

				column_vector = _mm_loadu_ps(col_offset_A + 40);	// A + (i+40) + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

				column_vector = _mm_loadu_ps(col_offset_A + 44);	// A + (i+44) + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);
			}

			// Store manipulated memory_vectors back into C.
			_mm_storeu_ps(memory_location, *memory_vector);
			_mm_storeu_ps(memory_location+4, *(memory_vector+1));
			_mm_storeu_ps(memory_location+8, *(memory_vector+2));
			_mm_storeu_ps(memory_location+12, *(memory_vector+3));
			_mm_storeu_ps(memory_location+16, *(memory_vector+4));
			_mm_storeu_ps(memory_location+20, *(memory_vector+5));
			_mm_storeu_ps(memory_location+24, *(memory_vector+6));
			_mm_storeu_ps(memory_location+28, *(memory_vector+7));
			_mm_storeu_ps(memory_location+32, *(memory_vector+8));
			_mm_storeu_ps(memory_location+36, *(memory_vector+9));
			_mm_storeu_ps(memory_location+40, *(memory_vector+10));
			_mm_storeu_ps(memory_location+44, *(memory_vector+11));
		}

		// Special case handling: 4 rows at a time instead of 48.
		for (int i = padded_m/ROWS_PROCESSED*ROWS_PROCESSED; i < padded_m; i+=VECTOR_SIZE) {
			memory_location = padded_C + i + j*padded_m;
			*memory_vector = _mm_loadu_ps(memory_location);

			for (int k = 0; k < padded_n; k++) {
				// Constants for this loop: the column offset col_offset_A and the pivot vector.
				col_offset_A = padded_A + i + k*padded_m;
				*pivot_vector = _mm_load1_ps(pivot_offset_A + k*padded_m);

				// Computation.
				column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*memory_vector = _mm_add_ps(*memory_vector, product_vector);
			}

			_mm_storeu_ps(memory_location, *memory_vector);
		}
	}

	// Unpad the result matrix, then free the memory that was allocated at the beginning.
	copy_matrix(m, m, C, padded_C);

	free(padded_A);
	free(padded_C);
}
