/*
 * Copyright 1993-2006 NVIDIA Corporation.  All rights reserved.
 *
 * NOTICE TO USER:
 *
 * This source code is subject to NVIDIA ownership rights under U.S. and
 * international Copyright laws.
 *
 * This software and the information contained herein is PROPRIETARY and
 * CONFIDENTIAL to NVIDIA and is being provided under the terms and
 * conditions of a Non-Disclosure Agreement.  Any reproduction or
 * disclosure to any third party without the express written consent of
 * NVIDIA is prohibited.
 *
 * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
 * CODE FOR ANY PURPOSE.  IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
 * IMPLIED WARRANTY OF ANY KIND.  NVIDIA DISCLAIMS ALL WARRANTIES WITH
 * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
 * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
 * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
 * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
 * OR PERFORMANCE OF THIS SOURCE CODE.
 *
 * U.S. Government End Users.  This source code is a "commercial item" as
 * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting  of
 * "commercial computer software" and "commercial computer software
 * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
 * and is provided to the U.S. Government only as a commercial end item.
 * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
 * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
 * source code with only those rights set forth herein.
 */

/* Matrix multiplication: C = A * B.
 * Host code.
 */

// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>

// includes, project
#include <cutil.h>

// includes, kernels
#include "MN_cuda_kernel.cu"
#include "MN_cuda.h"


////////////////////////////////////////////////////////////////////////////////
// declarations, forward

extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);

Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);

void ComputeOnDevice(float *S_h, float *kf_h, float *kr_h, float *m_h, float *vt_h, int n, int p, float epsilon, float tolerance, float t);

float S_test[48] = {
				1,-1,-1,0,0,0,0,0,
				0,0,2,-1,-1,0,0,0,
				0,1,0,0,-1,0,1,0,
				0,1,0,0,0,0,-1,-1,
				0,0,0,0,1,-1,0,0,
				0,0,0,1,0,-1,0,0
			};
			
float kf_test[8] = {1,1,1,1,1,1,1,1};
float kr_test[8] = {0,1,1,1,1,0,1,0};
float m_test[6] = {1,1,1,1,1,1};
float epsilon_test = 0.01;
//float *epsilon = &epsilon_c;
float tolerance_test = 0.00001;
//float *tolerance = &tolerance_c;
float t_test = 0.001;
//float *t = &t_c;
int n_test = 8;
//int *n = &n_c;
int p_test = 6;
//int *p = &p_c;

////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {

	
	srand(52);
	
	Matrix  S;
	Matrix  kf;
	Matrix  kr;
	Matrix  m;
	Matrix vt;
	
	
	srand(2012);
	
	if(argc == 6 ) 
	{
		// Allocate and read in matrices from disk
		int* params = NULL; 
		unsigned int data_read = 0;
		cutReadFilei(argv[1], &params, &data_read, true);
		if(data_read != 5){
			printf("Error reading parameter file\n");
			cutFree(params);
			return 1;
		}

		S  = AllocateMatrix(params[0], params[1], 0);
		kf  = AllocateMatrix(params[1], 1, 0);		
		kr  = AllocateMatrix(params[1], 1, 0);
		m  = AllocateMatrix(params[0], 1, 0);
		vt  = AllocateMatrix(params[1], 1, 0);
		cutFree(params);
		(void)ReadFile(&S, argv[2]);
		(void)ReadFile(&kf, argv[3]);
		(void)ReadFile(&kr, argv[4]);
		(void)ReadFile(&m, argv[5]);
		
		ComputeOnDevice(S.elements, kf.elements, kr.elements, m.elements, vt.elements, S.width, S.height, params[2], params[3], params[4]);
		
		FreeMatrix(&S);
    	FreeMatrix(&kf);
    	FreeMatrix(&kr);
		FreeMatrix(&m);
	}
	else //if we can't read the input, then run test
	{
			float *vt;
			vt = (float*) malloc(n_test*sizeof(float));
	       printf("unable reading parameter file\nExecuting test\n");
		   ComputeOnDevice(S_test, kf_test, kr_test, m_test, vt, n_test, p_test, epsilon_test, tolerance_test, t_test);
			
	}

    
	/*printf("GPU computation complete\n");
    	// compute the matrix multiplication on the CPU for comparison
    	Matrix reference = AllocateMatrix(P.height, P.width, 0);
    	computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width);
        
	printf("CPU computation complete\n");
    	// in this case check if the result is equivalent to the expected soluion
    	CUTBoolean res = cutComparefe(reference.elements, P.elements, P.height*P.width, 0.001f);
    	printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
    
    	if(argc == 5)
    	{
		WriteFile(P, argv[4]);
	}
	else if(argc == 2)
	{
	    	WriteFile(P, argv[1]);
	}   */

	return 0;
}

////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void ComputeOnDevice(float *S_h, float *kf_h, float *kr_h, float *m_h, float *vt_h, int n, int p, float epsilon, float tolerance, float t)
{

	vt_h = (float*) malloc(n*sizeof(float));
	
	float *S_d;
	float *kf_d;
	float *kr_d;
	float *vf_d;
	float *vr_d;
	float *vt_d;
	float *dm_d;
	float *m_d;
	float *a_d;
	
	
    cudaMalloc((void**)&S_d, n*p*sizeof(float));
	cudaMalloc((void**)&kf_d, n*sizeof(float));
	cudaMalloc((void**)&kr_d, n*sizeof(float));
	
	cudaMalloc((void**)&vf_d, n*sizeof(float));
	cudaMalloc((void**)&vr_d, n*sizeof(float));
	cudaMalloc((void**)&vt_d, n*sizeof(float));
	cudaMalloc((void**)&dm_d, n*sizeof(float));
	cudaMalloc((void**)&m_d, n*sizeof(float));
	cudaMalloc((void**)&a_d, p*sizeof(float));
	
	
	cudaMemcpy(S_d, S_h, n*p*sizeof(float), cudaMemcpyHostToDevice);
	cudaMemcpy(kf_d, kf_h, n*sizeof(float), cudaMemcpyHostToDevice);
	cudaMemcpy(kr_d, kr_h, n*sizeof(float), cudaMemcpyHostToDevice);
	cudaMemcpy(m_d, m_h, n*sizeof(float), cudaMemcpyHostToDevice);
	
	cudaMemset(a_d,1,p*sizeof(float));
	
	
	
	// Setup the execution configuration
	dim3 dim_grid, dim_block;
	cudaError_t cuda_ret;
	
	// Launch the device computation threads!
	/* Kernel Call */
	/*cudaDeviceProp dev_prop; 
	cudaGetDeviceProperties( &dev_prop, 0);
	printf("Max Threads per block = %d\n", dev_prop.maxThreadsPerBlock);
	printf("Shared Memory per block = %d\n", dev_prop.sharedMemPerBlock);
	printf("Max threads per multiprocessor = %d\n", dev_prop.maxThreadsPerMultiProcessor);
	printf("MultiProcessor count = %d\n", dev_prop.multiProcessorCount);*/
	
	//int TILE_WIDTH = 16;
	
	dim_block.x = n;
	dim_block.y = 1; 
	dim_block.z = 1;
	dim_grid.x = 1;
	//if(MATRIX_WIDTH % dim_block.x != 0) dim_grid.x++;
	dim_grid.y = 1;
	//if(MATRIX_HEIGHT % dim_block.y != 0) dim_grid.y++;
	dim_grid.z = 1;

	MN_cuda_Kernel<<<dim_grid,dim_block>>>(n, p, vf_d, vr_d, vt_d, a_d, dm_d, m_d, kf_d, kr_d, S_d, epsilon, tolerance, t);

	
	/* Synchronize threads */
	cuda_ret = cudaDeviceSynchronize();
    	if(cuda_ret != cudaSuccess) printf("Unable to launch/execute kernel");
		
		
	// Read P from the device
	//CopyFromDeviceMatrix(P, Pd); 
	cudaMemcpy(vt_h, vt_d, n*sizeof(float), cudaMemcpyDeviceToHost);
	
	for(int i = 0; i<n; i++) {
			printf("vt[%d] = %f\n",i,vt_h[i]);
		}
		
	cudaFree(S_d);
	cudaFree(kf_d);
	cudaFree(kr_d);
	cudaFree(vf_d);
	cudaFree(vr_d);
	cudaFree(vt_d);
	cudaFree(dm_d);
	cudaFree(m_d);
	cudaFree(a_d);
	free(vt_h);
	
	
}

// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
    Matrix Mdevice = M;
    int size = M.width * M.height * sizeof(float);
    cudaMalloc((void**)&Mdevice.elements, size);
    return Mdevice;
}

// Allocate a device matrix of dimensions height*width
//	If init == 0, initialize to all zeroes.  
//	If init == 1, perform random initialization.
//  If init == 2, initialize matrix parameters, but do not allocate memory 
Matrix AllocateMatrix(int height, int width, int init)
{
    Matrix M;
    M.width = M.pitch = width;
    M.height = height;
    int size = M.width * M.height;
    M.elements = NULL;
    
    // don't allocate memory on option 2
    if(init == 2)
		return M;
		
	M.elements = (float*) malloc(size*sizeof(float));

	for(unsigned int i = 0; i < M.height * M.width; i++)
	{
		M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX);
	}
    return M;
}	

// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
    int size = Mhost.width * Mhost.height * sizeof(float);
    Mdevice.height = Mhost.height;
    Mdevice.width = Mhost.width;
    Mdevice.pitch = Mhost.pitch;
    cudaMemcpy(Mdevice.elements, Mhost.elements, size, 
					cudaMemcpyHostToDevice);
}

// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
    int size = Mdevice.width * Mdevice.height * sizeof(float);
    cudaMemcpy(Mhost.elements, Mdevice.elements, size, 
					cudaMemcpyDeviceToHost);
}

// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
    cudaFree(M->elements);
    M->elements = NULL;
}

// Free a host Matrix
void FreeMatrix(Matrix* M)
{
    free(M->elements);
    M->elements = NULL;
}

// Read a floating point matrix in from file
// Returns zero if the number of elements read is 
//  equals M.height * M.width, and 1 otherwise
int ReadFile(Matrix* M, char* file_name)
{
	unsigned int data_read = M->height*M->width;
	cutReadFilef(file_name, &(M->elements), &data_read, true);
	return (data_read != (M->height * M->width));
}

// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
    cutWriteFilef(file_name, M.elements, M.width*M.height,
                       0.0001f);
}

