#include <stdio.h>
#include <emmintrin.h>
#include <omp.h>

#define ROWS_PROCESSED 48	// Each memory_vector processes 4 rows.  This corresponds to the number of VECTORS, NOT ROWS.  Update memory_vector array dimension.
#define COLS_PROCESSED 4	// Special case handling not required, since BLOCKSIZE_COLS is a multiple of this value.  Special case handling occurs in kb case.
#define VECTOR_SIZE 4	// How many items can fit into a single 128-bit vector.
#define BLOCKSIZE_COLS 8

void sgemm( int m, int n, float *A, float *C ) {
	__m128 memory_vector[12], pivot_vector[4], column_vector, product_vector;
	int NUM_EXTRA_ROWS = m % VECTOR_SIZE, NUM_EXTRA_COLS = n % BLOCKSIZE_COLS, l = n - NUM_EXTRA_COLS;
	float *memory_location, *col_offset_A, *pivot_offset_A;
	float pivot, *edge_location_A;

#pragma omp parallel for private (memory_vector, pivot_vector, column_vector, product_vector, memory_location, col_offset_A, pivot_offset_A, pivot, edge_location_A)
	for (int j = 0; j < m; j++) {	// TODO Outermost loop, general case.
		pivot_offset_A = A + j;
		float *col_offset_C = C + (m-NUM_EXTRA_ROWS) + j*m;
		for (int kb = 0; kb < n/BLOCKSIZE_COLS * BLOCKSIZE_COLS; kb+=BLOCKSIZE_COLS) {	// TODO Special case handling for columns if n not divisible by 4.
			for (int i = 0; i < m/ROWS_PROCESSED*ROWS_PROCESSED; i+=ROWS_PROCESSED) {	// TODO Middle loop, general case.
				memory_location = C + i + j*m;

				// Load memory_vectors from C into registers.
				*memory_vector = _mm_loadu_ps(memory_location);
				*(memory_vector+1) = _mm_loadu_ps(memory_location+4);
				*(memory_vector+2) = _mm_loadu_ps(memory_location+8);
				*(memory_vector+3) = _mm_loadu_ps(memory_location+12);
				*(memory_vector+4) = _mm_loadu_ps(memory_location+16);
				*(memory_vector+5) = _mm_loadu_ps(memory_location+20);
				*(memory_vector+6) = _mm_loadu_ps(memory_location+24);
				*(memory_vector+7) = _mm_loadu_ps(memory_location+28);
				*(memory_vector+8) = _mm_loadu_ps(memory_location+32);
				*(memory_vector+9) = _mm_loadu_ps(memory_location+36);
				*(memory_vector+10) = _mm_loadu_ps(memory_location+40);
				*(memory_vector+11) = _mm_loadu_ps(memory_location+44);

				for (int k = kb; k < (kb+BLOCKSIZE_COLS) / COLS_PROCESSED * COLS_PROCESSED; k+= COLS_PROCESSED) {	// TODO Inner loop, general case.
					// Constants for this loop: col_offset_A for selecting the column_vector, *pivot_vector used.
					col_offset_A = A + i + k*m;
					float* curr_col_offset = col_offset_A;
					float* curr_pivot_offset = pivot_offset_A + k*m;

					*pivot_vector = _mm_load1_ps(curr_pivot_offset);
					*(pivot_vector+1) = _mm_load1_ps(curr_pivot_offset + m);
					*(pivot_vector+2) = _mm_load1_ps(curr_pivot_offset + 2*m);
					*(pivot_vector+3) = _mm_load1_ps(curr_pivot_offset + 3*m);

					column_vector = _mm_loadu_ps(curr_col_offset);	// A + i + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*memory_vector = _mm_add_ps(*memory_vector, product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset + m);	// A + i + (k+1)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+1));
					*memory_vector = _mm_add_ps(*memory_vector, product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset + 2*m);	// A + i + (k+2)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+2));
					*memory_vector = _mm_add_ps(*memory_vector, product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset + 3*m);	// A + i + (k+3)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+3));
					*memory_vector = _mm_add_ps(*memory_vector, product_vector);

					curr_col_offset += 4;

					column_vector = _mm_loadu_ps(curr_col_offset);	// A + (i+4) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+m);	// A + (i+4) + (k+1)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+1));
					*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+2*m);	// A + (i+4) + (k+2)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+2));
					*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+3*m);	// A + (i+4) + (k+3)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+3));
					*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

					curr_col_offset += 4;

					column_vector = _mm_loadu_ps(curr_col_offset);	// A + (i+8) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+m);	// A + (i+8) + (k+1)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+1));
					*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+2*m);	// A + (i+8) + (k+2)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+2));
					*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+3*m);	// A + (i+8) + (k+3)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+3));
					*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

					curr_col_offset += 4;

					column_vector = _mm_loadu_ps(curr_col_offset);	// A + (i+12) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+m);	// A + (i+12) + (k+1)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+1));
					*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+2*m);	// A + (i+12) + (k+2)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+2));
					*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+3*m);	// A + (i+12) + (k+3)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+3));
					*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

					curr_col_offset += 4;

					column_vector = _mm_loadu_ps(curr_col_offset);	// A + (i+16) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+m);	// A + (i+16) + (k+1)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+1));
					*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+2*m);	// A + (i+16) + (k+2)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+2));
					*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+3*m);	// A + (i+16) + (k+3)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+3));
					*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

					curr_col_offset += 4;

					column_vector = _mm_loadu_ps(curr_col_offset);	// A + (i+20) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+m);	// A + (i+20) + (k+1)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+1));
					*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+2*m);	// A + (i+20) + (k+2)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+2));
					*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+3*m);	// A + (i+20) + (k+3)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+3));
					*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

					curr_col_offset += 4;

					column_vector = _mm_loadu_ps(curr_col_offset);	// A + (i+24) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+m);	// A + (i+24) + (k+1)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+1));
					*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+2*m);	// A + (i+24) + (k+2)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+2));
					*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+3*m);	// A + (i+24) + (k+3)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+3));
					*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

					curr_col_offset += 4;

					column_vector = _mm_loadu_ps(curr_col_offset);	// A + (i+28) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+m);	// A + (i+28) + (k+1)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+1));
					*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+2*m);	// A + (i+28) + (k+2)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+2));
					*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+3*m);	// A + (i+28) + (k+3)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+3));
					*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

					curr_col_offset += 4;

					column_vector = _mm_loadu_ps(curr_col_offset);	// A + (i+32) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+m);	// A + (i+32) + (k+1)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+1));
					*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+2*m);	// A + (i+32) + (k+2)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+2));
					*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+3*m);	// A + (i+32) + (k+3)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+3));
					*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

					curr_col_offset += 4;

					column_vector = _mm_loadu_ps(curr_col_offset);	// A + (i+36) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+m);	// A + (i+36) + (k+1)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+1));
					*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+2*m);	// A + (i+36) + (k+2)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+2));
					*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+3*m);	// A + (i+36) + (k+3)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+3));
					*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

					curr_col_offset += 4;

					column_vector = _mm_loadu_ps(curr_col_offset);	// A + (i+40) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+m);	// A + (i+40) + (k+1)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+1));
					*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+2*m);	// A + (i+40) + (k+2)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+2));
					*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+3*m);	// A + (i+40) + (k+3)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+3));
					*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

					curr_col_offset += 4;

					column_vector = _mm_loadu_ps(curr_col_offset);	// A + (i+44) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+m);	// A + (i+44) + (k+1)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+1));
					*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+2*m);	// A + (i+44) + (k+2)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+2));
					*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);
					column_vector = _mm_loadu_ps(curr_col_offset+3*m);	// A + (i+44) + (k+3)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+3));
					*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);


				}	// TODO End inner loop, general case.

				_mm_storeu_ps(memory_location, *memory_vector);
				_mm_storeu_ps(memory_location+4, *(memory_vector+1));
				_mm_storeu_ps(memory_location+8, *(memory_vector+2));
				_mm_storeu_ps(memory_location+12, *(memory_vector+3));
				_mm_storeu_ps(memory_location+16, *(memory_vector+4));
				_mm_storeu_ps(memory_location+20, *(memory_vector+5));
				_mm_storeu_ps(memory_location+24, *(memory_vector+6));
				_mm_storeu_ps(memory_location+28, *(memory_vector+7));
				_mm_storeu_ps(memory_location+32, *(memory_vector+8));
				_mm_storeu_ps(memory_location+36, *(memory_vector+9));
				_mm_storeu_ps(memory_location+40, *(memory_vector+10));
				_mm_storeu_ps(memory_location+44, *(memory_vector+11));

			}	// TODO End middle loop, general case.

			// Special case handling for rows.
			for (int i = m/ROWS_PROCESSED * ROWS_PROCESSED; i < m/VECTOR_SIZE * VECTOR_SIZE; i+= VECTOR_SIZE) {	// TODO Middle loop, columns special case.
				memory_location = C + i + j*m;
				*memory_vector = _mm_loadu_ps(memory_location);

				for (int k = kb; k < (kb + BLOCKSIZE_COLS) / COLS_PROCESSED * COLS_PROCESSED; k+= COLS_PROCESSED) {	// TODO Inner loop, general case.
					// Constants for this loop: col_offset_A for selecting the column_vector, *pivot_vector used.
					col_offset_A = A + i + k*m;
					float* curr_pivot_offset = pivot_offset_A + k*m;

					*pivot_vector = _mm_load1_ps(curr_pivot_offset);
					*(pivot_vector + 1) = _mm_load1_ps(curr_pivot_offset + m);
					*(pivot_vector + 2) = _mm_load1_ps(curr_pivot_offset + 2*m);
					*(pivot_vector + 3) = _mm_load1_ps(curr_pivot_offset + 3*m);

					column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*memory_vector = _mm_add_ps(*memory_vector, product_vector);
					column_vector = _mm_loadu_ps(col_offset_A + m);	// A + i + (k+1)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+1));
					*memory_vector = _mm_add_ps(*memory_vector, product_vector);
					column_vector = _mm_loadu_ps(col_offset_A + 2*m);	// A + i + (k+2)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+2));
					*memory_vector = _mm_add_ps(*memory_vector, product_vector);
					column_vector = _mm_loadu_ps(col_offset_A + 3*m);	// A + i + (k+3)*m
					product_vector = _mm_mul_ps(column_vector, *(pivot_vector+3));
					*memory_vector = _mm_add_ps(*memory_vector, product_vector);

				}	// TODO End inner loop, general case.
				_mm_storeu_ps(memory_location, *memory_vector);
			}	// TODO End middle loop, rows special case.

			switch (NUM_EXTRA_ROWS) {
			case 0:
				break;
			case 1:
				for (int k = kb; k < kb+BLOCKSIZE_COLS; k++) {
					pivot = *(pivot_offset_A + k*m);
					edge_location_A = A + (m-NUM_EXTRA_ROWS) + k*m;
					*(col_offset_C) += pivot * *edge_location_A;
				}
				break;
			case 2:
				for (int k = kb; k < kb+BLOCKSIZE_COLS; k++) {
					pivot = *(pivot_offset_A + k*m);
					edge_location_A = A + (m-NUM_EXTRA_ROWS) + k*m;
					*(col_offset_C) += pivot * *edge_location_A;
					*(col_offset_C + 1) += pivot * *(edge_location_A+1);
				}
				break;
			case 3:
				for (int k = kb; k < kb+BLOCKSIZE_COLS; k++) {
					pivot = *(pivot_offset_A + k*m);
					edge_location_A = A + (m-NUM_EXTRA_ROWS) + k*m;
					*(col_offset_C) += pivot * *edge_location_A;
					*(col_offset_C + 1) += pivot * *(edge_location_A+1);
					*(col_offset_C + 2) += pivot * *(edge_location_A+2);
				}
				break;
			default:
				printf("*** If you see this message, something is wrong with how row special cases are handled.");
				break;
			}
		}	// TODO End second loop, column-neighborhood bounds.

		// int l = n - NUM_EXTRA_COLS;
		float* pivot_offset_Al = pivot_offset_A + l*m;

		switch (NUM_EXTRA_COLS) {
		case 0:
			break;
		case 1:	// TODO
			for (int i = 0; i < m/ROWS_PROCESSED*ROWS_PROCESSED; i+=ROWS_PROCESSED) {	// TODO Middle loop, general case.
				memory_location = C + i + j*m;

				// Load memory_vectors from C into registers.
				*memory_vector = _mm_loadu_ps(memory_location);
				*(memory_vector+1) = _mm_loadu_ps(memory_location+4);
				*(memory_vector+2) = _mm_loadu_ps(memory_location+8);
				*(memory_vector+3) = _mm_loadu_ps(memory_location+12);
				*(memory_vector+4) = _mm_loadu_ps(memory_location+16);
				*(memory_vector+5) = _mm_loadu_ps(memory_location+20);
				*(memory_vector+6) = _mm_loadu_ps(memory_location+24);
				*(memory_vector+7) = _mm_loadu_ps(memory_location+28);
				*(memory_vector+8) = _mm_loadu_ps(memory_location+32);
				*(memory_vector+9) = _mm_loadu_ps(memory_location+36);
				*(memory_vector+10) = _mm_loadu_ps(memory_location+40);
				*(memory_vector+11) = _mm_loadu_ps(memory_location+44);

				// Begin unrolled k loop.
				col_offset_A = A + i + l*m;
				*pivot_vector = _mm_load1_ps(pivot_offset_Al);

				column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*memory_vector = _mm_add_ps(*memory_vector, product_vector);

				column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

				column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

				column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

				column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

				column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

				column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

				column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

				column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

				column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

				column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

				column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);
				// End unrolled k loop.

				_mm_storeu_ps(memory_location, *memory_vector);
				_mm_storeu_ps(memory_location+4, *(memory_vector+1));
				_mm_storeu_ps(memory_location+8, *(memory_vector+2));
				_mm_storeu_ps(memory_location+12, *(memory_vector+3));
				_mm_storeu_ps(memory_location+16, *(memory_vector+4));
				_mm_storeu_ps(memory_location+20, *(memory_vector+5));
				_mm_storeu_ps(memory_location+24, *(memory_vector+6));
				_mm_storeu_ps(memory_location+28, *(memory_vector+7));
				_mm_storeu_ps(memory_location+32, *(memory_vector+8));
				_mm_storeu_ps(memory_location+36, *(memory_vector+9));
				_mm_storeu_ps(memory_location+40, *(memory_vector+10));
				_mm_storeu_ps(memory_location+44, *(memory_vector+11));

			}	// TODO End middle loop, general case.

			// Special case handling for rows.
			for (int i = m/ROWS_PROCESSED * ROWS_PROCESSED; i < m/VECTOR_SIZE * VECTOR_SIZE; i+= VECTOR_SIZE) {	// TODO Middle loop, columns special case.
				memory_location = C + i + j*m;
				*memory_vector = _mm_loadu_ps(memory_location);
				// Begin unrolled k loop.
				col_offset_A = A + i + l*m;
				*pivot_vector = _mm_load1_ps(pivot_offset_Al);

				column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
				product_vector = _mm_mul_ps(column_vector, *pivot_vector);
				*memory_vector = _mm_add_ps(*memory_vector, product_vector);
				// End unrolled k loop.

				_mm_storeu_ps(memory_location, *memory_vector);
			}	// TODO End middle loop, rows special case.

			switch (NUM_EXTRA_ROWS) {
			case 0:
				break;
			case 1:
				pivot = *(pivot_offset_Al);
				edge_location_A = A + (m-NUM_EXTRA_ROWS) + l*m;
				*(col_offset_C) += pivot * *edge_location_A;
				break;
			case 2:
				pivot = *(pivot_offset_Al);
				edge_location_A = A + (m-NUM_EXTRA_ROWS) + l*m;
				*(col_offset_C) += pivot * *edge_location_A;
				*(col_offset_C + 1) += pivot * *(edge_location_A+1);
				break;
			case 3:
				pivot = *(pivot_offset_Al);
				edge_location_A = A + (m-NUM_EXTRA_ROWS) + l*m;
				*(col_offset_C) += pivot * *edge_location_A;
				*(col_offset_C + 1) += pivot * *(edge_location_A+1);
				*(col_offset_C + 2) += pivot * *(edge_location_A+2);
				break;
			default:
				printf("*** If you see this message, something is wrong with how row special cases are handled.");
				break;
			}
			break;
			case 2:
				for (int i = 0; i < m/ROWS_PROCESSED*ROWS_PROCESSED; i+=ROWS_PROCESSED) {	// TODO Middle loop, general case.
					memory_location = C + i + j*m;

					// Load memory_vectors from C into registers.
					*memory_vector = _mm_loadu_ps(memory_location);
					*(memory_vector+1) = _mm_loadu_ps(memory_location+4);
					*(memory_vector+2) = _mm_loadu_ps(memory_location+8);
					*(memory_vector+3) = _mm_loadu_ps(memory_location+12);
					*(memory_vector+4) = _mm_loadu_ps(memory_location+16);
					*(memory_vector+5) = _mm_loadu_ps(memory_location+20);
					*(memory_vector+6) = _mm_loadu_ps(memory_location+24);
					*(memory_vector+7) = _mm_loadu_ps(memory_location+28);
					*(memory_vector+8) = _mm_loadu_ps(memory_location+32);
					*(memory_vector+9) = _mm_loadu_ps(memory_location+36);
					*(memory_vector+10) = _mm_loadu_ps(memory_location+40);
					*(memory_vector+11) = _mm_loadu_ps(memory_location+44);

					// Begin unrolled k loop.
					col_offset_A = A + i + l*m;
					*pivot_vector = _mm_load1_ps(pivot_offset_Al);

					column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*memory_vector = _mm_add_ps(*memory_vector, product_vector);

					column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

					column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

					column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

					column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

					column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

					column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

					column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

					column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

					column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

					column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

					column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

					col_offset_A += m; // col_offset_A = A + i + (l+1)*m;
					*pivot_vector = _mm_load1_ps(pivot_offset_Al + m);

					column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*memory_vector = _mm_add_ps(*memory_vector, product_vector);

					column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

					column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

					column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

					column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

					column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

					column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

					column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

					column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

					column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

					column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

					column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);
					// End unrolled k loop.

					_mm_storeu_ps(memory_location, *memory_vector);
					_mm_storeu_ps(memory_location+4, *(memory_vector+1));
					_mm_storeu_ps(memory_location+8, *(memory_vector+2));
					_mm_storeu_ps(memory_location+12, *(memory_vector+3));
					_mm_storeu_ps(memory_location+16, *(memory_vector+4));
					_mm_storeu_ps(memory_location+20, *(memory_vector+5));
					_mm_storeu_ps(memory_location+24, *(memory_vector+6));
					_mm_storeu_ps(memory_location+28, *(memory_vector+7));
					_mm_storeu_ps(memory_location+32, *(memory_vector+8));
					_mm_storeu_ps(memory_location+36, *(memory_vector+9));
					_mm_storeu_ps(memory_location+40, *(memory_vector+10));
					_mm_storeu_ps(memory_location+44, *(memory_vector+11));

				}	// TODO End middle loop, general case.

				// Special case handling for rows.
				for (int i = m/ROWS_PROCESSED * ROWS_PROCESSED; i < m/VECTOR_SIZE * VECTOR_SIZE; i+= VECTOR_SIZE) {	// TODO Middle loop, columns special case.
					memory_location = C + i + j*m;
					*memory_vector = _mm_loadu_ps(memory_location);

					// Unrolled k loop.
					col_offset_A = A + i + l*m;
					*pivot_vector = _mm_load1_ps(pivot_offset_Al);

					column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*memory_vector = _mm_add_ps(*memory_vector, product_vector);

					col_offset_A += m; // col_offset_A = A + i + (l+1)*m;
					*pivot_vector = _mm_load1_ps(pivot_offset_Al + m);

					column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
					product_vector = _mm_mul_ps(column_vector, *pivot_vector);
					*memory_vector = _mm_add_ps(*memory_vector, product_vector);
					// End unrolled k loop.

					_mm_storeu_ps(memory_location, *memory_vector);
				}	// TODO End middle loop, rows special case.

				switch (NUM_EXTRA_ROWS) {
				case 0:
					break;
				case 1:
					pivot = *(pivot_offset_Al);
					edge_location_A = A + (m-NUM_EXTRA_ROWS) + l*m;
					*(col_offset_C) += pivot * *edge_location_A;

					pivot = *(pivot_offset_Al + m);
					edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+1)*m;
					*(col_offset_C) += pivot * *edge_location_A;
					break;
				case 2:
					pivot = *(pivot_offset_Al);
					edge_location_A = A + (m-NUM_EXTRA_ROWS) + l*m;
					*(col_offset_C) += pivot * *edge_location_A;
					*(col_offset_C + 1) += pivot * *(edge_location_A+1);

					pivot = *(pivot_offset_Al + m);
					edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+1)*m;
					*(col_offset_C) += pivot * *edge_location_A;
					*(col_offset_C + 1) += pivot * *(edge_location_A+1);
					break;
				case 3:
					pivot = *(pivot_offset_Al);
					edge_location_A = A + (m-NUM_EXTRA_ROWS) + l*m;
					*(col_offset_C) += pivot * *edge_location_A;
					*(col_offset_C + 1) += pivot * *(edge_location_A+1);
					*(col_offset_C + 2) += pivot * *(edge_location_A+2);

					pivot = *(pivot_offset_Al + m);
					edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+1)*m;
					*(col_offset_C) += pivot * *edge_location_A;
					*(col_offset_C + 1) += pivot * *(edge_location_A+1);
					*(col_offset_C + 2) += pivot * *(edge_location_A+2);
					break;
				default:
					printf("*** If you see this message, something is wrong with how row special cases are handled.");
					break;
				}
				break;
				case 3:
					for (int i = 0; i < m/ROWS_PROCESSED*ROWS_PROCESSED; i+=ROWS_PROCESSED) {	// TODO Middle loop, general case.
						memory_location = C + i + j*m;

						// Load memory_vectors from C into registers.
						*memory_vector = _mm_loadu_ps(memory_location);
						*(memory_vector+1) = _mm_loadu_ps(memory_location+4);
						*(memory_vector+2) = _mm_loadu_ps(memory_location+8);
						*(memory_vector+3) = _mm_loadu_ps(memory_location+12);
						*(memory_vector+4) = _mm_loadu_ps(memory_location+16);
						*(memory_vector+5) = _mm_loadu_ps(memory_location+20);
						*(memory_vector+6) = _mm_loadu_ps(memory_location+24);
						*(memory_vector+7) = _mm_loadu_ps(memory_location+28);
						*(memory_vector+8) = _mm_loadu_ps(memory_location+32);
						*(memory_vector+9) = _mm_loadu_ps(memory_location+36);
						*(memory_vector+10) = _mm_loadu_ps(memory_location+40);
						*(memory_vector+11) = _mm_loadu_ps(memory_location+44);

						// Begin unrolled k loop.
						col_offset_A = A + i + l*m;
						*pivot_vector = _mm_load1_ps(pivot_offset_Al);

						column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*memory_vector = _mm_add_ps(*memory_vector, product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

						col_offset_A += m; // col_offset_A = A + i + (l+1)*m;
						*pivot_vector = _mm_load1_ps(pivot_offset_Al + m);

						column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*memory_vector = _mm_add_ps(*memory_vector, product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

						col_offset_A += m; // col_offset_A = A + i + (l+2)*m;
						*pivot_vector = _mm_load1_ps(pivot_offset_Al + 2*m);

						column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*memory_vector = _mm_add_ps(*memory_vector, product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

						column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

						// End unrolled k loop.

						_mm_storeu_ps(memory_location, *memory_vector);
						_mm_storeu_ps(memory_location+4, *(memory_vector+1));
						_mm_storeu_ps(memory_location+8, *(memory_vector+2));
						_mm_storeu_ps(memory_location+12, *(memory_vector+3));
						_mm_storeu_ps(memory_location+16, *(memory_vector+4));
						_mm_storeu_ps(memory_location+20, *(memory_vector+5));
						_mm_storeu_ps(memory_location+24, *(memory_vector+6));
						_mm_storeu_ps(memory_location+28, *(memory_vector+7));
						_mm_storeu_ps(memory_location+32, *(memory_vector+8));
						_mm_storeu_ps(memory_location+36, *(memory_vector+9));
						_mm_storeu_ps(memory_location+40, *(memory_vector+10));
						_mm_storeu_ps(memory_location+44, *(memory_vector+11));

					}	// TODO End middle loop, general case.

					// Special case handling for rows.
					for (int i = m/ROWS_PROCESSED * ROWS_PROCESSED; i < m/VECTOR_SIZE * VECTOR_SIZE; i+= VECTOR_SIZE) {	// TODO Middle loop, columns special case.
						memory_location = C + i + j*m;
						*memory_vector = _mm_loadu_ps(memory_location);

						// Unrolled k loop (inner); special case.
						col_offset_A = A + i + l*m;
						*pivot_vector = _mm_load1_ps(pivot_offset_Al);

						column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*memory_vector = _mm_add_ps(*memory_vector, product_vector);

						col_offset_A += m; // col_offset_A = A + i + (l+1)*m;
						*pivot_vector = _mm_load1_ps(pivot_offset_Al + m);

						column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*memory_vector = _mm_add_ps(*memory_vector, product_vector);

						col_offset_A += m; // col_offset_A = A + i + (l+2)*m;
						*pivot_vector = _mm_load1_ps(pivot_offset_Al + 2*m);

						column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
						product_vector = _mm_mul_ps(column_vector, *pivot_vector);
						*memory_vector = _mm_add_ps(*memory_vector, product_vector);
						// End unrolled k loop (inner); special case.

						_mm_storeu_ps(memory_location, *memory_vector);
					}	// TODO End middle loop, rows special case.

					switch (NUM_EXTRA_ROWS) {
					case 0:
						break;
					case 1:
						pivot = *(pivot_offset_Al);
						edge_location_A = A + (m-NUM_EXTRA_ROWS) + l*m;
						*(col_offset_C) += pivot * *edge_location_A;

						pivot = *(pivot_offset_Al + m);
						edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+1)*m;
						*(col_offset_C) += pivot * *edge_location_A;

						pivot = *(pivot_offset_Al + 2*m);
						edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+2)*m;
						*(col_offset_C) += pivot * *edge_location_A;
						break;
					case 2:
						pivot = *(pivot_offset_Al);
						edge_location_A = A + (m-NUM_EXTRA_ROWS) + l*m;
						*(col_offset_C) += pivot * *edge_location_A;
						*(col_offset_C + 1) += pivot * *(edge_location_A+1);

						pivot = *(pivot_offset_Al + m);
						edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+1)*m;
						*(col_offset_C) += pivot * *edge_location_A;
						*(col_offset_C + 1) += pivot * *(edge_location_A+1);

						pivot = *(pivot_offset_Al + 2*m);
						edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+2)*m;
						*(col_offset_C) += pivot * *edge_location_A;
						*(col_offset_C + 1) += pivot * *(edge_location_A+1);
						break;
					case 3:
						pivot = *(pivot_offset_Al);
						edge_location_A = A + (m-NUM_EXTRA_ROWS) + l*m;
						*(col_offset_C) += pivot * *edge_location_A;
						*(col_offset_C + 1) += pivot * *(edge_location_A+1);
						*(col_offset_C + 2) += pivot * *(edge_location_A+2);

						pivot = *(pivot_offset_Al + m);
						edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+1)*m;
						*(col_offset_C) += pivot * *edge_location_A;
						*(col_offset_C + 1) += pivot * *(edge_location_A+1);
						*(col_offset_C + 2) += pivot * *(edge_location_A+2);

						pivot = *(pivot_offset_Al + 2*m);
						edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+2)*m;
						*(col_offset_C) += pivot * *edge_location_A;
						*(col_offset_C + 1) += pivot * *(edge_location_A+1);
						*(col_offset_C + 2) += pivot * *(edge_location_A+2);
						break;
					default:
						printf("*** If you see this message, something is wrong with how row special cases are handled.");
						break;
					}

					break;
					case 4:
						for (int i = 0; i < m/ROWS_PROCESSED*ROWS_PROCESSED; i+=ROWS_PROCESSED) {	// TODO Middle loop, general case.
							memory_location = C + i + j*m;

							// Load memory_vectors from C into registers.
							*memory_vector = _mm_loadu_ps(memory_location);
							*(memory_vector+1) = _mm_loadu_ps(memory_location+4);
							*(memory_vector+2) = _mm_loadu_ps(memory_location+8);
							*(memory_vector+3) = _mm_loadu_ps(memory_location+12);
							*(memory_vector+4) = _mm_loadu_ps(memory_location+16);
							*(memory_vector+5) = _mm_loadu_ps(memory_location+20);
							*(memory_vector+6) = _mm_loadu_ps(memory_location+24);
							*(memory_vector+7) = _mm_loadu_ps(memory_location+28);
							*(memory_vector+8) = _mm_loadu_ps(memory_location+32);
							*(memory_vector+9) = _mm_loadu_ps(memory_location+36);
							*(memory_vector+10) = _mm_loadu_ps(memory_location+40);
							*(memory_vector+11) = _mm_loadu_ps(memory_location+44);

							// Begin unrolled k loop.
							col_offset_A = A + i + l*m;
							*pivot_vector = _mm_load1_ps(pivot_offset_Al);

							column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*memory_vector = _mm_add_ps(*memory_vector, product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

							col_offset_A += m; // col_offset_A = A + i + (l+1)*m;
							*pivot_vector = _mm_load1_ps(pivot_offset_Al + m);

							column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*memory_vector = _mm_add_ps(*memory_vector, product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

							col_offset_A += m; // col_offset_A = A + i + (l+2)*m;
							*pivot_vector = _mm_load1_ps(pivot_offset_Al + 2*m);

							column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*memory_vector = _mm_add_ps(*memory_vector, product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

							col_offset_A += m; // col_offset_A = A + i + (l+3)*m;
							*pivot_vector = _mm_load1_ps(pivot_offset_Al + 3*m);

							column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*memory_vector = _mm_add_ps(*memory_vector, product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

							column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

							// End unrolled k loop.

							_mm_storeu_ps(memory_location, *memory_vector);
							_mm_storeu_ps(memory_location+4, *(memory_vector+1));
							_mm_storeu_ps(memory_location+8, *(memory_vector+2));
							_mm_storeu_ps(memory_location+12, *(memory_vector+3));
							_mm_storeu_ps(memory_location+16, *(memory_vector+4));
							_mm_storeu_ps(memory_location+20, *(memory_vector+5));
							_mm_storeu_ps(memory_location+24, *(memory_vector+6));
							_mm_storeu_ps(memory_location+28, *(memory_vector+7));
							_mm_storeu_ps(memory_location+32, *(memory_vector+8));
							_mm_storeu_ps(memory_location+36, *(memory_vector+9));
							_mm_storeu_ps(memory_location+40, *(memory_vector+10));
							_mm_storeu_ps(memory_location+44, *(memory_vector+11));

						}	// TODO End middle loop, general case.

						// Special case handling for rows.
						for (int i = m/ROWS_PROCESSED * ROWS_PROCESSED; i < m/VECTOR_SIZE * VECTOR_SIZE; i+= VECTOR_SIZE) {	// TODO Middle loop, columns special case.
							memory_location = C + i + j*m;
							*memory_vector = _mm_loadu_ps(memory_location);

							// Unrolled k loop (inner); special case.
							col_offset_A = A + i + l*m;
							*pivot_vector = _mm_load1_ps(pivot_offset_Al);

							column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*memory_vector = _mm_add_ps(*memory_vector, product_vector);

							col_offset_A += m; // col_offset_A = A + i + (l+1)*m;
							*pivot_vector = _mm_load1_ps(pivot_offset_Al + m);

							column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*memory_vector = _mm_add_ps(*memory_vector, product_vector);

							col_offset_A += m; // col_offset_A = A + i + (l+2)*m;
							*pivot_vector = _mm_load1_ps(pivot_offset_Al +2*m);

							column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*memory_vector = _mm_add_ps(*memory_vector, product_vector);

							col_offset_A += m; // col_offset_A = A + i + (l+3)*m;
							*pivot_vector = _mm_load1_ps(pivot_offset_Al + 3*m);

							column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
							product_vector = _mm_mul_ps(column_vector, *pivot_vector);
							*memory_vector = _mm_add_ps(*memory_vector, product_vector);
							// End unrolled k loop (inner); special case.

							_mm_storeu_ps(memory_location, *memory_vector);
						}	// TODO End middle loop, rows special case.

						switch (NUM_EXTRA_ROWS) {
						case 0:
							break;
						case 1:
							pivot = *(pivot_offset_Al);
							edge_location_A = A + (m-NUM_EXTRA_ROWS) + l*m;
							*(col_offset_C) += pivot * *edge_location_A;

							pivot = *(pivot_offset_Al + m);
							edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+1)*m;
							*(col_offset_C) += pivot * *edge_location_A;

							pivot = *(pivot_offset_Al + 2*m);
							edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+2)*m;
							*(col_offset_C) += pivot * *edge_location_A;

							pivot = *(pivot_offset_Al + 3*m);
							edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+3)*m;
							*(col_offset_C) += pivot * *edge_location_A;
							break;
						case 2:
							pivot = *(pivot_offset_Al);
							edge_location_A = A + (m-NUM_EXTRA_ROWS) + l*m;
							*(col_offset_C) += pivot * *edge_location_A;
							*(col_offset_C + 1) += pivot * *(edge_location_A+1);

							pivot = *(pivot_offset_Al + m);
							edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+1)*m;
							*(col_offset_C) += pivot * *edge_location_A;
							*(col_offset_C + 1) += pivot * *(edge_location_A+1);

							pivot = *(pivot_offset_Al + 2*m);
							edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+2)*m;
							*(col_offset_C) += pivot * *edge_location_A;
							*(col_offset_C + 1) += pivot * *(edge_location_A+1);

							pivot = *(pivot_offset_Al + 3*m);
							edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+3)*m;
							*(col_offset_C) += pivot * *edge_location_A;
							*(col_offset_C + 1) += pivot * *(edge_location_A+1);
							break;
						case 3:
							pivot = *(pivot_offset_Al);
							edge_location_A = A + (m-NUM_EXTRA_ROWS) + l*m;
							*(col_offset_C) += pivot * *edge_location_A;
							*(col_offset_C + 1) += pivot * *(edge_location_A+1);
							*(col_offset_C + 2) += pivot * *(edge_location_A+2);

							pivot = *(pivot_offset_Al + m);
							edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+1)*m;
							*(col_offset_C) += pivot * *edge_location_A;
							*(col_offset_C + 1) += pivot * *(edge_location_A+1);
							*(col_offset_C + 2) += pivot * *(edge_location_A+2);

							pivot = *(pivot_offset_Al + 2*m);
							edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+2)*m;
							*(col_offset_C) += pivot * *edge_location_A;
							*(col_offset_C + 1) += pivot * *(edge_location_A+1);
							*(col_offset_C + 2) += pivot * *(edge_location_A+2);

							pivot = *(pivot_offset_Al + 3*m);
							edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+3)*m;
							*(col_offset_C) += pivot * *edge_location_A;
							*(col_offset_C + 1) += pivot * *(edge_location_A+1);
							*(col_offset_C + 2) += pivot * *(edge_location_A+2);
							break;
						default:
							printf("*** If you see this message, something is wrong with how row special cases are handled.");
							break;
						}
						break;
						case 5:
							for (int i = 0; i < m/ROWS_PROCESSED*ROWS_PROCESSED; i+=ROWS_PROCESSED) {	// TODO Middle loop, general case.
								memory_location = C + i + j*m;

								// Load memory_vectors from C into registers.
								*memory_vector = _mm_loadu_ps(memory_location);
								*(memory_vector+1) = _mm_loadu_ps(memory_location+4);
								*(memory_vector+2) = _mm_loadu_ps(memory_location+8);
								*(memory_vector+3) = _mm_loadu_ps(memory_location+12);
								*(memory_vector+4) = _mm_loadu_ps(memory_location+16);
								*(memory_vector+5) = _mm_loadu_ps(memory_location+20);
								*(memory_vector+6) = _mm_loadu_ps(memory_location+24);
								*(memory_vector+7) = _mm_loadu_ps(memory_location+28);
								*(memory_vector+8) = _mm_loadu_ps(memory_location+32);
								*(memory_vector+9) = _mm_loadu_ps(memory_location+36);
								*(memory_vector+10) = _mm_loadu_ps(memory_location+40);
								*(memory_vector+11) = _mm_loadu_ps(memory_location+44);

								// Begin unrolled k loop.
								col_offset_A = A + i + l*m;
								*pivot_vector = _mm_load1_ps(pivot_offset_Al);

								column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*memory_vector = _mm_add_ps(*memory_vector, product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

								col_offset_A += m; // col_offset_A = A + i + (l+1)*m;
								*pivot_vector = _mm_load1_ps(pivot_offset_Al + m);

								column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*memory_vector = _mm_add_ps(*memory_vector, product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

								col_offset_A += m; // col_offset_A = A + i + (l+2)*m;
								*pivot_vector = _mm_load1_ps(pivot_offset_Al + 2*m);

								column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*memory_vector = _mm_add_ps(*memory_vector, product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

								col_offset_A += m; // col_offset_A = A + i + (l+3)*m;
								*pivot_vector = _mm_load1_ps(pivot_offset_Al + 3*m);

								column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*memory_vector = _mm_add_ps(*memory_vector, product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

								col_offset_A += m; // col_offset_A = A + i + (l+4)*m;
								*pivot_vector = _mm_load1_ps(pivot_offset_Al + 4*m);

								column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*memory_vector = _mm_add_ps(*memory_vector, product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

								column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

								// End unrolled k loop.

								_mm_storeu_ps(memory_location, *memory_vector);
								_mm_storeu_ps(memory_location+4, *(memory_vector+1));
								_mm_storeu_ps(memory_location+8, *(memory_vector+2));
								_mm_storeu_ps(memory_location+12, *(memory_vector+3));
								_mm_storeu_ps(memory_location+16, *(memory_vector+4));
								_mm_storeu_ps(memory_location+20, *(memory_vector+5));
								_mm_storeu_ps(memory_location+24, *(memory_vector+6));
								_mm_storeu_ps(memory_location+28, *(memory_vector+7));
								_mm_storeu_ps(memory_location+32, *(memory_vector+8));
								_mm_storeu_ps(memory_location+36, *(memory_vector+9));
								_mm_storeu_ps(memory_location+40, *(memory_vector+10));
								_mm_storeu_ps(memory_location+44, *(memory_vector+11));

							}	// TODO End middle loop, general case.

							// Special case handling for rows.
							for (int i = m/ROWS_PROCESSED * ROWS_PROCESSED; i < m/VECTOR_SIZE * VECTOR_SIZE; i+= VECTOR_SIZE) {	// TODO Middle loop, columns special case.
								memory_location = C + i + j*m;
								*memory_vector = _mm_loadu_ps(memory_location);

								// Unrolled k loop (inner); special case.
								col_offset_A = A + i + l*m;
								*pivot_vector = _mm_load1_ps(pivot_offset_Al);

								column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*memory_vector = _mm_add_ps(*memory_vector, product_vector);

								col_offset_A += m; // col_offset_A = A + i + (l+1)*m;
								*pivot_vector = _mm_load1_ps(pivot_offset_Al + m);

								column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*memory_vector = _mm_add_ps(*memory_vector, product_vector);

								col_offset_A += m; // col_offset_A = A + i + (l+2)*m;
								*pivot_vector = _mm_load1_ps(pivot_offset_Al +2*m);

								column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*memory_vector = _mm_add_ps(*memory_vector, product_vector);

								col_offset_A += m; // col_offset_A = A + i + (l+3)*m;
								*pivot_vector = _mm_load1_ps(pivot_offset_Al + 3*m);

								column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*memory_vector = _mm_add_ps(*memory_vector, product_vector);

								col_offset_A += m; // col_offset_A = A + i + (l+4)*m;
								*pivot_vector = _mm_load1_ps(pivot_offset_Al + 4*m);

								column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
								product_vector = _mm_mul_ps(column_vector, *pivot_vector);
								*memory_vector = _mm_add_ps(*memory_vector, product_vector);
								// End unrolled k loop (inner); special case.

								_mm_storeu_ps(memory_location, *memory_vector);
							}	// TODO End middle loop, rows special case.

							switch (NUM_EXTRA_ROWS) {
							case 0:
								break;
							case 1:
								pivot = *(pivot_offset_Al);
								edge_location_A = A + (m-NUM_EXTRA_ROWS) + l*m;
								*(col_offset_C) += pivot * *edge_location_A;

								pivot = *(pivot_offset_Al + m);
								edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+1)*m;
								*(col_offset_C) += pivot * *edge_location_A;

								pivot = *(pivot_offset_Al + 2*m);
								edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+2)*m;
								*(col_offset_C) += pivot * *edge_location_A;

								pivot = *(pivot_offset_Al + 3*m);
								edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+3)*m;
								*(col_offset_C) += pivot * *edge_location_A;

								pivot = *(pivot_offset_Al + 4*m);
								edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+4)*m;
								*(col_offset_C) += pivot * *edge_location_A;
								break;
							case 2:
								pivot = *(pivot_offset_Al);
								edge_location_A = A + (m-NUM_EXTRA_ROWS) + l*m;
								*(col_offset_C) += pivot * *edge_location_A;
								*(col_offset_C + 1) += pivot * *(edge_location_A+1);

								pivot = *(pivot_offset_Al + m);
								edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+1)*m;
								*(col_offset_C) += pivot * *edge_location_A;
								*(col_offset_C + 1) += pivot * *(edge_location_A+1);

								pivot = *(pivot_offset_Al + 2*m);
								edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+2)*m;
								*(col_offset_C) += pivot * *edge_location_A;
								*(col_offset_C + 1) += pivot * *(edge_location_A+1);

								pivot = *(pivot_offset_Al + 3*m);
								edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+3)*m;
								*(col_offset_C) += pivot * *edge_location_A;
								*(col_offset_C + 1) += pivot * *(edge_location_A+1);

								pivot = *(pivot_offset_Al + 4*m);
								edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+4)*m;
								*(col_offset_C) += pivot * *edge_location_A;
								*(col_offset_C + 1) += pivot * *(edge_location_A+1);
								break;
							case 3:
								pivot = *(pivot_offset_Al);
								edge_location_A = A + (m-NUM_EXTRA_ROWS) + l*m;
								*(col_offset_C) += pivot * *edge_location_A;
								*(col_offset_C + 1) += pivot * *(edge_location_A+1);
								*(col_offset_C + 2) += pivot * *(edge_location_A+2);

								pivot = *(pivot_offset_Al + m);
								edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+1)*m;
								*(col_offset_C) += pivot * *edge_location_A;
								*(col_offset_C + 1) += pivot * *(edge_location_A+1);
								*(col_offset_C + 2) += pivot * *(edge_location_A+2);

								pivot = *(pivot_offset_Al + 2*m);
								edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+2)*m;
								*(col_offset_C) += pivot * *edge_location_A;
								*(col_offset_C + 1) += pivot * *(edge_location_A+1);
								*(col_offset_C + 2) += pivot * *(edge_location_A+2);

								pivot = *(pivot_offset_Al + 3*m);
								edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+3)*m;
								*(col_offset_C) += pivot * *edge_location_A;
								*(col_offset_C + 1) += pivot * *(edge_location_A+1);
								*(col_offset_C + 2) += pivot * *(edge_location_A+2);

								pivot = *(pivot_offset_Al + 4*m);
								edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+4)*m;
								*(col_offset_C) += pivot * *edge_location_A;
								*(col_offset_C + 1) += pivot * *(edge_location_A+1);
								*(col_offset_C + 2) += pivot * *(edge_location_A+2);
								break;
							default:
								printf("*** If you see this message, something is wrong with how row special cases are handled.");
								break;
							}
							break;
							case 6:
								for (int i = 0; i < m/ROWS_PROCESSED*ROWS_PROCESSED; i+=ROWS_PROCESSED) {	// TODO Middle loop, general case.
									memory_location = C + i + j*m;

									// Load memory_vectors from C into registers.
									*memory_vector = _mm_loadu_ps(memory_location);
									*(memory_vector+1) = _mm_loadu_ps(memory_location+4);
									*(memory_vector+2) = _mm_loadu_ps(memory_location+8);
									*(memory_vector+3) = _mm_loadu_ps(memory_location+12);
									*(memory_vector+4) = _mm_loadu_ps(memory_location+16);
									*(memory_vector+5) = _mm_loadu_ps(memory_location+20);
									*(memory_vector+6) = _mm_loadu_ps(memory_location+24);
									*(memory_vector+7) = _mm_loadu_ps(memory_location+28);
									*(memory_vector+8) = _mm_loadu_ps(memory_location+32);
									*(memory_vector+9) = _mm_loadu_ps(memory_location+36);
									*(memory_vector+10) = _mm_loadu_ps(memory_location+40);
									*(memory_vector+11) = _mm_loadu_ps(memory_location+44);

									// Begin unrolled k loop.
									col_offset_A = A + i + l*m;
									*pivot_vector = _mm_load1_ps(pivot_offset_Al);

									column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*memory_vector = _mm_add_ps(*memory_vector, product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

									col_offset_A += m;	//col_offset_A = A + i + (l+1)*m;
									*pivot_vector = _mm_load1_ps(pivot_offset_Al + m);

									column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*memory_vector = _mm_add_ps(*memory_vector, product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

									col_offset_A += m;	// col_offset_A = A + i + (l+2)*m;
									*pivot_vector = _mm_load1_ps(pivot_offset_Al + 2*m);

									column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*memory_vector = _mm_add_ps(*memory_vector, product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

									col_offset_A += m;	// col_offset_A = A + i + (l+3)*m;
									*pivot_vector = _mm_load1_ps(pivot_offset_Al + 3*m);

									column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*memory_vector = _mm_add_ps(*memory_vector, product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

									col_offset_A += m;	// col_offset_A = A + i + (l+4)*m;
									*pivot_vector = _mm_load1_ps(pivot_offset_Al + 4*m);

									column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*memory_vector = _mm_add_ps(*memory_vector, product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

									col_offset_A += m;	// col_offset_A = A + i + (l+5)*m;
									*pivot_vector = _mm_load1_ps(pivot_offset_Al + 5*m);

									column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*memory_vector = _mm_add_ps(*memory_vector, product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

									column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

									// End unrolled k loop.

									_mm_storeu_ps(memory_location, *memory_vector);
									_mm_storeu_ps(memory_location+4, *(memory_vector+1));
									_mm_storeu_ps(memory_location+8, *(memory_vector+2));
									_mm_storeu_ps(memory_location+12, *(memory_vector+3));
									_mm_storeu_ps(memory_location+16, *(memory_vector+4));
									_mm_storeu_ps(memory_location+20, *(memory_vector+5));
									_mm_storeu_ps(memory_location+24, *(memory_vector+6));
									_mm_storeu_ps(memory_location+28, *(memory_vector+7));
									_mm_storeu_ps(memory_location+32, *(memory_vector+8));
									_mm_storeu_ps(memory_location+36, *(memory_vector+9));
									_mm_storeu_ps(memory_location+40, *(memory_vector+10));
									_mm_storeu_ps(memory_location+44, *(memory_vector+11));

								}	// TODO End middle loop, general case.

								// Special case handling for rows.
								for (int i = m/ROWS_PROCESSED * ROWS_PROCESSED; i < m/VECTOR_SIZE * VECTOR_SIZE; i+= VECTOR_SIZE) {	// TODO Middle loop, columns special case.
									memory_location = C + i + j*m;
									*memory_vector = _mm_loadu_ps(memory_location);

									// Unrolled k loop (inner); special case.
									col_offset_A = A + i + l*m;
									*pivot_vector = _mm_load1_ps(pivot_offset_Al);

									column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*memory_vector = _mm_add_ps(*memory_vector, product_vector);

									col_offset_A += m;	// col_offset_A = A + i + (l+1)*m;
									*pivot_vector = _mm_load1_ps(pivot_offset_Al + m);

									column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*memory_vector = _mm_add_ps(*memory_vector, product_vector);

									col_offset_A += m;	// col_offset_A = A + i + (l+2)*m;
									*pivot_vector = _mm_load1_ps(pivot_offset_Al +2*m);

									column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*memory_vector = _mm_add_ps(*memory_vector, product_vector);

									col_offset_A += m;	// col_offset_A = A + i + (l+3)*m;
									*pivot_vector = _mm_load1_ps(pivot_offset_Al + 3*m);

									column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*memory_vector = _mm_add_ps(*memory_vector, product_vector);

									col_offset_A += m;	// col_offset_A = A + i + (l+4)*m;
									*pivot_vector = _mm_load1_ps(pivot_offset_Al + 4*m);

									column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*memory_vector = _mm_add_ps(*memory_vector, product_vector);

									col_offset_A += m;	// col_offset_A = A + i + (l+5)*m;
									*pivot_vector = _mm_load1_ps(pivot_offset_Al + 5*m);

									column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
									product_vector = _mm_mul_ps(column_vector, *pivot_vector);
									*memory_vector = _mm_add_ps(*memory_vector, product_vector);
									// End unrolled k loop (inner); special case.

									_mm_storeu_ps(memory_location, *memory_vector);
								}	// TODO End middle loop, rows special case.

								switch (NUM_EXTRA_ROWS) {
								case 0:
									break;
								case 1:
									pivot = *(pivot_offset_Al);
									edge_location_A = A + (m-NUM_EXTRA_ROWS) + l*m;
									*(col_offset_C) += pivot * *edge_location_A;

									pivot = *(pivot_offset_Al + m);
									edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+1)*m;
									*(col_offset_C) += pivot * *edge_location_A;

									pivot = *(pivot_offset_Al + 2*m);
									edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+2)*m;
									*(col_offset_C) += pivot * *edge_location_A;

									pivot = *(pivot_offset_Al + 3*m);
									edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+3)*m;
									*(col_offset_C) += pivot * *edge_location_A;

									pivot = *(pivot_offset_Al + 4*m);
									edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+4)*m;
									*(col_offset_C) += pivot * *edge_location_A;

									pivot = *(pivot_offset_Al + 5*m);
									edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+5)*m;
									*(col_offset_C) += pivot * *edge_location_A;
									break;
								case 2:
									pivot = *(pivot_offset_Al);
									edge_location_A = A + (m-NUM_EXTRA_ROWS) + l*m;
									*(col_offset_C) += pivot * *edge_location_A;
									*(col_offset_C + 1) += pivot * *(edge_location_A+1);

									pivot = *(pivot_offset_Al + m);
									edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+1)*m;
									*(col_offset_C) += pivot * *edge_location_A;
									*(col_offset_C + 1) += pivot * *(edge_location_A+1);

									pivot = *(pivot_offset_Al + 2*m);
									edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+2)*m;
									*(col_offset_C) += pivot * *edge_location_A;
									*(col_offset_C + 1) += pivot * *(edge_location_A+1);

									pivot = *(pivot_offset_Al + 3*m);
									edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+3)*m;
									*(col_offset_C) += pivot * *edge_location_A;
									*(col_offset_C + 1) += pivot * *(edge_location_A+1);

									pivot = *(pivot_offset_Al + 4*m);
									edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+4)*m;
									*(col_offset_C) += pivot * *edge_location_A;
									*(col_offset_C + 1) += pivot * *(edge_location_A+1);

									pivot = *(pivot_offset_Al + 5*m);
									edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+5)*m;
									*(col_offset_C) += pivot * *edge_location_A;
									*(col_offset_C + 1) += pivot * *(edge_location_A+1);
									break;
								case 3:
									pivot = *(pivot_offset_Al);
									edge_location_A = A + (m-NUM_EXTRA_ROWS) + l*m;
									*(col_offset_C) += pivot * *edge_location_A;
									*(col_offset_C + 1) += pivot * *(edge_location_A+1);
									*(col_offset_C + 2) += pivot * *(edge_location_A+2);

									pivot = *(pivot_offset_Al + m);
									edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+1)*m;
									*(col_offset_C) += pivot * *edge_location_A;
									*(col_offset_C + 1) += pivot * *(edge_location_A+1);
									*(col_offset_C + 2) += pivot * *(edge_location_A+2);

									pivot = *(pivot_offset_Al + 2*m);
									edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+2)*m;
									*(col_offset_C) += pivot * *edge_location_A;
									*(col_offset_C + 1) += pivot * *(edge_location_A+1);
									*(col_offset_C + 2) += pivot * *(edge_location_A+2);

									pivot = *(pivot_offset_Al + 3*m);
									edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+3)*m;
									*(col_offset_C) += pivot * *edge_location_A;
									*(col_offset_C + 1) += pivot * *(edge_location_A+1);
									*(col_offset_C + 2) += pivot * *(edge_location_A+2);

									pivot = *(pivot_offset_Al + 4*m);
									edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+4)*m;
									*(col_offset_C) += pivot * *edge_location_A;
									*(col_offset_C + 1) += pivot * *(edge_location_A+1);
									*(col_offset_C + 2) += pivot * *(edge_location_A+2);

									pivot = *(pivot_offset_Al + 5*m);
									edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+5)*m;
									*(col_offset_C) += pivot * *edge_location_A;
									*(col_offset_C + 1) += pivot * *(edge_location_A+1);
									*(col_offset_C + 2) += pivot * *(edge_location_A+2);
									break;
								default:
									printf("*** If you see this message, something is wrong with how row special cases are handled.");
									break;
								}
								break;
								case 7:
									for (int i = 0; i < m/ROWS_PROCESSED*ROWS_PROCESSED; i+=ROWS_PROCESSED) {	// TODO Middle loop, general case.
										memory_location = C + i + j*m;

										// Load memory_vectors from C into registers.
										*memory_vector = _mm_loadu_ps(memory_location);
										*(memory_vector+1) = _mm_loadu_ps(memory_location+4);
										*(memory_vector+2) = _mm_loadu_ps(memory_location+8);
										*(memory_vector+3) = _mm_loadu_ps(memory_location+12);
										*(memory_vector+4) = _mm_loadu_ps(memory_location+16);
										*(memory_vector+5) = _mm_loadu_ps(memory_location+20);
										*(memory_vector+6) = _mm_loadu_ps(memory_location+24);
										*(memory_vector+7) = _mm_loadu_ps(memory_location+28);
										*(memory_vector+8) = _mm_loadu_ps(memory_location+32);
										*(memory_vector+9) = _mm_loadu_ps(memory_location+36);
										*(memory_vector+10) = _mm_loadu_ps(memory_location+40);
										*(memory_vector+11) = _mm_loadu_ps(memory_location+44);

										// Begin unrolled k loop.
										col_offset_A = A + i + l*m;
										*pivot_vector = _mm_load1_ps(pivot_offset_Al);

										column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*memory_vector = _mm_add_ps(*memory_vector, product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

										col_offset_A += m;	// col_offset_A = A + i + (l+1)*m;
										*pivot_vector = _mm_load1_ps(pivot_offset_Al + m);

										column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*memory_vector = _mm_add_ps(*memory_vector, product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

										col_offset_A += m;	// col_offset_A = A + i + (l+2)*m;
										*pivot_vector = _mm_load1_ps(pivot_offset_Al + 2*m);

										column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*memory_vector = _mm_add_ps(*memory_vector, product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

										col_offset_A += m;	// col_offset_A = A + i + (l+3)*m;
										*pivot_vector = _mm_load1_ps(pivot_offset_Al + 3*m);

										column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*memory_vector = _mm_add_ps(*memory_vector, product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

										col_offset_A += m;	// col_offset_A = A + i + (l+4)*m;
										*pivot_vector = _mm_load1_ps(pivot_offset_Al + 4*m);

										column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*memory_vector = _mm_add_ps(*memory_vector, product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

										col_offset_A += m;	// col_offset_A = A + i + (l+5)*m;
										*pivot_vector = _mm_load1_ps(pivot_offset_Al + 5*m);

										column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*memory_vector = _mm_add_ps(*memory_vector, product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

										col_offset_A += m;	// col_offset_A = A + i + (l+6)*m;
										*pivot_vector = _mm_load1_ps(pivot_offset_Al + 6*m);

										column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*memory_vector = _mm_add_ps(*memory_vector, product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+4);	// A + (i+4) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+1) = _mm_add_ps(*(memory_vector+1), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+8);	// A + (i+8) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+2) = _mm_add_ps(*(memory_vector+2), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+12);	// A + (i+12) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+3) = _mm_add_ps(*(memory_vector+3), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+16);	// A + (i+16) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+4) = _mm_add_ps(*(memory_vector+4), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+20);	// A + (i+20) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+5) = _mm_add_ps(*(memory_vector+5), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+24);	// A + (i+24) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+6) = _mm_add_ps(*(memory_vector+6), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+28);	// A + (i+28) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+7) = _mm_add_ps(*(memory_vector+7), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+32);	// A + (i+32) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+8) = _mm_add_ps(*(memory_vector+8), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+36);	// A + (i+36) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+9) = _mm_add_ps(*(memory_vector+9), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+40);	// A + (i+40) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+10) = _mm_add_ps(*(memory_vector+10), product_vector);

										column_vector = _mm_loadu_ps(col_offset_A+44);	// A + (i+44) + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*(memory_vector+11) = _mm_add_ps(*(memory_vector+11), product_vector);

										// End unrolled k loop.

										_mm_storeu_ps(memory_location, *memory_vector);
										_mm_storeu_ps(memory_location+4, *(memory_vector+1));
										_mm_storeu_ps(memory_location+8, *(memory_vector+2));
										_mm_storeu_ps(memory_location+12, *(memory_vector+3));
										_mm_storeu_ps(memory_location+16, *(memory_vector+4));
										_mm_storeu_ps(memory_location+20, *(memory_vector+5));
										_mm_storeu_ps(memory_location+24, *(memory_vector+6));
										_mm_storeu_ps(memory_location+28, *(memory_vector+7));
										_mm_storeu_ps(memory_location+32, *(memory_vector+8));
										_mm_storeu_ps(memory_location+36, *(memory_vector+9));
										_mm_storeu_ps(memory_location+40, *(memory_vector+10));
										_mm_storeu_ps(memory_location+44, *(memory_vector+11));

									}	// TODO End middle loop, general case.

									// Special case handling for rows.
									for (int i = m/ROWS_PROCESSED * ROWS_PROCESSED; i < m/VECTOR_SIZE * VECTOR_SIZE; i+= VECTOR_SIZE) {	// TODO Middle loop, columns special case.
										memory_location = C + i + j*m;
										*memory_vector = _mm_loadu_ps(memory_location);

										// Unrolled k loop (inner); special case.
										col_offset_A = A + i + l*m;
										*pivot_vector = _mm_load1_ps(pivot_offset_Al);

										column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*memory_vector = _mm_add_ps(*memory_vector, product_vector);

										col_offset_A += m;	// col_offset_A = A + i + (l+1)*m;
										*pivot_vector = _mm_load1_ps(pivot_offset_Al + m);

										column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*memory_vector = _mm_add_ps(*memory_vector, product_vector);

										col_offset_A += m;	// col_offset_A = A + i + (l+2)*m;
										*pivot_vector = _mm_load1_ps(pivot_offset_Al + 2*m);

										column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*memory_vector = _mm_add_ps(*memory_vector, product_vector);

										col_offset_A += m;	// col_offset_A = A + i + (l+3)*m;
										*pivot_vector = _mm_load1_ps(pivot_offset_Al + 3*m);

										column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*memory_vector = _mm_add_ps(*memory_vector, product_vector);

										col_offset_A += m;	// col_offset_A = A + i + (l+4)*m;
										*pivot_vector = _mm_load1_ps(pivot_offset_Al + 4*m);

										column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*memory_vector = _mm_add_ps(*memory_vector, product_vector);

										col_offset_A += m;	// col_offset_A = A + i + (l+5)*m;
										*pivot_vector = _mm_load1_ps(pivot_offset_Al + 5*m);

										column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*memory_vector = _mm_add_ps(*memory_vector, product_vector);

										col_offset_A += m;	// col_offset_A = A + i + (l+6)*m;
										*pivot_vector = _mm_load1_ps(pivot_offset_Al + 6*m);

										column_vector = _mm_loadu_ps(col_offset_A);	// A + i + k*m
										product_vector = _mm_mul_ps(column_vector, *pivot_vector);
										*memory_vector = _mm_add_ps(*memory_vector, product_vector);
										// End unrolled k loop (inner); special case.

										_mm_storeu_ps(memory_location, *memory_vector);
									}	// TODO End middle loop, rows special case.

									switch (NUM_EXTRA_ROWS) {
									case 0:
										break;
									case 1:
										pivot = *(pivot_offset_Al);
										edge_location_A = A + (m-NUM_EXTRA_ROWS) + l*m;
										*(col_offset_C) += pivot * *edge_location_A;

										pivot = *(pivot_offset_Al + m);
										edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+1)*m;
										*(col_offset_C) += pivot * *edge_location_A;

										pivot = *(pivot_offset_Al + 2*m);
										edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+2)*m;
										*(col_offset_C) += pivot * *edge_location_A;

										pivot = *(pivot_offset_Al + 3*m);
										edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+3)*m;
										*(col_offset_C) += pivot * *edge_location_A;

										pivot = *(pivot_offset_Al + 4*m);
										edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+4)*m;
										*(col_offset_C) += pivot * *edge_location_A;

										pivot = *(pivot_offset_Al + 5*m);
										edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+5)*m;
										*(col_offset_C) += pivot * *edge_location_A;

										pivot = *(pivot_offset_Al + 6*m);
										edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+6)*m;
										*(col_offset_C) += pivot * *edge_location_A;
										break;
									case 2:
										pivot = *(pivot_offset_Al);
										edge_location_A = A + (m-NUM_EXTRA_ROWS) + l*m;
										*(col_offset_C) += pivot * *edge_location_A;
										*(col_offset_C + 1) += pivot * *(edge_location_A+1);

										pivot = *(pivot_offset_Al + m);
										edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+1)*m;
										*(col_offset_C) += pivot * *edge_location_A;
										*(col_offset_C + 1) += pivot * *(edge_location_A+1);

										pivot = *(pivot_offset_Al + 2*m);
										edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+2)*m;
										*(col_offset_C) += pivot * *edge_location_A;
										*(col_offset_C + 1) += pivot * *(edge_location_A+1);

										pivot = *(pivot_offset_Al + 3*m);
										edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+3)*m;
										*(col_offset_C) += pivot * *edge_location_A;
										*(col_offset_C + 1) += pivot * *(edge_location_A+1);

										pivot = *(pivot_offset_Al + 4*m);
										edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+4)*m;
										*(col_offset_C) += pivot * *edge_location_A;
										*(col_offset_C + 1) += pivot * *(edge_location_A+1);

										pivot = *(pivot_offset_Al + 5*m);
										edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+5)*m;
										*(col_offset_C) += pivot * *edge_location_A;
										*(col_offset_C + 1) += pivot * *(edge_location_A+1);

										pivot = *(pivot_offset_Al + 6*m);
										edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+6)*m;
										*(col_offset_C) += pivot * *edge_location_A;
										*(col_offset_C + 1) += pivot * *(edge_location_A+1);
										break;
									case 3:
										pivot = *(pivot_offset_Al);
										edge_location_A = A + (m-NUM_EXTRA_ROWS) + l*m;
										*(col_offset_C) += pivot * *edge_location_A;
										*(col_offset_C + 1) += pivot * *(edge_location_A+1);
										*(col_offset_C + 2) += pivot * *(edge_location_A+2);

										pivot = *(pivot_offset_Al + m);
										edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+1)*m;
										*(col_offset_C) += pivot * *edge_location_A;
										*(col_offset_C + 1) += pivot * *(edge_location_A+1);
										*(col_offset_C + 2) += pivot * *(edge_location_A+2);

										pivot = *(pivot_offset_Al + 2*m);
										edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+2)*m;
										*(col_offset_C) += pivot * *edge_location_A;
										*(col_offset_C + 1) += pivot * *(edge_location_A+1);
										*(col_offset_C + 2) += pivot * *(edge_location_A+2);

										pivot = *(pivot_offset_Al + 3*m);
										edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+3)*m;
										*(col_offset_C) += pivot * *edge_location_A;
										*(col_offset_C + 1) += pivot * *(edge_location_A+1);
										*(col_offset_C + 2) += pivot * *(edge_location_A+2);

										pivot = *(pivot_offset_Al + 4*m);
										edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+4)*m;
										*(col_offset_C) += pivot * *edge_location_A;
										*(col_offset_C + 1) += pivot * *(edge_location_A+1);
										*(col_offset_C + 2) += pivot * *(edge_location_A+2);

										pivot = *(pivot_offset_Al + 5*m);
										edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+5)*m;
										*(col_offset_C) += pivot * *edge_location_A;
										*(col_offset_C + 1) += pivot * *(edge_location_A+1);
										*(col_offset_C + 2) += pivot * *(edge_location_A+2);

										pivot = *(pivot_offset_Al + 6*m);
										edge_location_A = A + (m-NUM_EXTRA_ROWS) + (l+6)*m;
										*(col_offset_C) += pivot * *edge_location_A;
										*(col_offset_C + 1) += pivot * *(edge_location_A+1);
										*(col_offset_C + 2) += pivot * *(edge_location_A+2);
										break;
									default:
										printf("*** If you see this message, something is wrong with how row special cases are handled.");
										break;
									}
									break;
		}
		//}
	} // TODO End outer loop, general case.
}
