/*
 * Copyright 1993-2006 NVIDIA Corporation.  All rights reserved.
 *
 * NOTICE TO USER:   
 *
 * This source code is subject to NVIDIA ownership rights under U.S. and 
 * international Copyright laws.  
 *
 * This software and the information contained herein is PROPRIETARY and 
 * CONFIDENTIAL to NVIDIA and is being provided under the terms and 
 * conditions of a Non-Disclosure Agreement.  Any reproduction or 
 * disclosure to any third party without the express written consent of 
 * NVIDIA is prohibited.     
 *
 * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE 
 * CODE FOR ANY PURPOSE.  IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR 
 * IMPLIED WARRANTY OF ANY KIND.  NVIDIA DISCLAIMS ALL WARRANTIES WITH 
 * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF 
 * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.   
 * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, 
 * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS 
 * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE 
 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE 
 * OR PERFORMANCE OF THIS SOURCE CODE.  
 *
 * U.S. Government End Users.  This source code is a "commercial item" as 
 * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting  of 
 * "commercial computer software" and "commercial computer software 
 * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) 
 * and is provided to the U.S. Government only as a commercial end item.  
 * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through 
 * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the 
 * source code with only those rights set forth herein.
 */

/* Matrix multiplication: C = A * B.
 * Device code.
 */

#ifndef _MN_CUDA_KERNEL_H_
#define _MN_CUDA_KERNEL_H_

#include <stdio.h>
#include "MN_cuda.h"

/*
	Possible optimizations
		- Matrix compaction - (kernel 3)
		- Precompute Sf and Sr - (kernel 3)
		- kf, kr in constant memory - not implemented
		- assign kf, kr, dm to registers (kernel 2)
		- vf, vr, m, dm, a in shared memory (kernel 2)
		- __pow (kernel 2)
		- reduction max (kernel 2)
		- multiple concurrent time steps - (kernel 4)
		- analyze memory access patterns
		- multiple steps before a max operation (kernel 2)
*/

__global__ void MN_cuda_Kernel(int max_iterations, int n, int p, float *vf, float *vr, float *vt, float *a, float *dm, float *m, float *kf, float *kr, float *S, float epsilon, float tolerance, float t)
{	
	//if(threadIdx.x<blockDim.x)	vt[threadIdx.x] = threadIdx.x;
	int tx = threadIdx.x;
	int loop_count = 0;
	
	dm[0] = tolerance + 1;
	
	while(dm[0]>tolerance && loop_count<max_iterations)
	{
		if(tx<n) {
			vf[tx] = kf[tx];
			vr[tx] = kr[tx];
			
			for(int j = 0; j<p; j++) {
				vf[tx] = vf[tx]*pow(m[j], (int)(S[j*n+tx]<0) ? -1*S[j*n+tx] : 0);
				vr[tx] = vr[tx]*pow(m[j], (int)(S[j*n+tx]>0) ? S[j*n+tx] : 0);
			}
		}
		__syncthreads();
		if(tx<p)
		{
			dm[tx] = 0;
			for(int j = 0; j<n; j++) {
				dm[tx] = dm[tx] + S[tx*n+j]*(vf[j]-vr[j]);
			}
			
			m[tx] = m[tx] + a[tx]*dm[tx]*t;
			m[tx] = (m[tx]>0) ? m[tx] : 0.00001;
			a[tx] = epsilon + 1/(abs(dm[tx]) + epsilon);
		}
		__syncthreads();
		loop_count++;
		
		//find max value
		//calculate first stride. Need to find out if it is odd
		unsigned int stride;
		if(p%2 != 0)
		{
			stride = p/2+1;
			if(tx==0)
			{
				dm[p] = 0;
				dm[p+1] = 0;
			}
		} else {
			stride = p/2;
		}	
		//reduction for max value
		for (; stride > 0; stride >>= 1) {
			stride = (stride%2!=0 && stride!=1) ? stride+1 : stride; //add one if stride is odd
			__syncthreads(); 
			if (tx < stride)
				dm[tx] = (dm[tx]>dm[tx+stride]) ? dm[tx] : dm[tx+stride];
		}
	}
	vt[tx] = vf[tx]-vr[tx];
	//vt[tx] = n;
}



__global__ void MN_cuda_Kernel2(int max_iterations, int n, int p, float *vt, float *m_i, float *kf_i, float *kr_i, float *S, float epsilon, float tolerance, float t)
{	
	extern __shared__ float shared[];
	
	float *vf = &shared[0];
	float *vr = &shared[n];
	float *m = &shared[2*n];
	float *dm_s = &shared[2*n+p]; //dm_s has size of p+2 (+2 in case p is odd)
	
	int tx = threadIdx.x;
	float kf = kf_i[tx];
	float kr = kr_i[tx];
	float a = 1;
	float dm;
	int loop_count = 0;
	
	vf[tx] = kf;
	vr[tx] = kr;
	
	if(tx<p) {
		m[tx] = m_i[tx];
		dm_s[0] = tolerance+1;
	}
	
	while(dm_s[0]>tolerance && loop_count<max_iterations)
	{
		for(int time = 0; time< LOOP_PER_MAX; time++) //do 1000 loops (time steps) per max operation
		{
			if(tx<n) {
				vf[tx] = kf;
				vr[tx] = kr;
				
				for(int j = 0; j<p; j++) {
					vf[tx] = vf[tx]*__powf(m[j], (int)(S[j*n+tx]<0) ? -1*S[j*n+tx] : 0);
					vr[tx] = vr[tx]*__powf(m[j], (int)(S[j*n+tx]>0) ? S[j*n+tx] : 0);
				}
			}
			__syncthreads();
			if(tx<p)
			{
				dm = 0;
				for(int j = 0; j<n; j++) {
					dm = dm + S[tx*n+j]*(vf[j]-vr[j]);
				}
				
				m[tx] = m[tx] + a*dm*t;
				m[tx] = (m[tx]>0) ? m[tx] : 0.00001;
				a = epsilon + 1/(abs(dm) + epsilon);
			}
			__syncthreads();
			loop_count++;
		}

		//write register to shared memory
		if(tx<p) dm_s[tx] = dm;
		__syncthreads();
		
		//find max value
		//calculate first stride. Need to find out if it is odd
		unsigned int stride;
		if(p%2 != 0)
		{
			stride = p/2+1;
			if(tx==0)
			{
				dm_s[p] = 0;
				dm_s[p+1] = 0;
			}
		} else {
			stride = p/2;
		}	
		//reduction for max value
		for (; stride > 0; stride >>= 1) {
			stride = (stride%2!=0 && stride!=1) ? stride+1 : stride; //add one if stride is odd
			__syncthreads(); 
			if (tx < stride)
				dm_s[tx] = (dm_s[tx]>dm_s[tx+stride]) ? dm_s[tx] : dm_s[tx+stride];
		}
	}
	vt[tx] = vf[tx]-vr[tx];
	//vt[tx] = n;
}



__global__ void MN_cuda_Kernel3(int max_iterations, int n, int p, 
								float *vt, float *m_i, float *kf_i,	float *kr_i, 
								float *Sf_ptr_rm, float *Sf_idx_rm, float *Sf_data_rm, 
								float *Sr_ptr_rm, float *Sr_idx_rm, float *Sr_data_rm, 
								float *Sf_ptr_cm, float *Sf_idx_cm, float *Sf_data_cm, 
								float *Sr_ptr_cm, float *Sr_idx_cm, float *Sr_data_cm, 
								float epsilon, float tolerance, float t)
{	
	extern __shared__ float shared[];
	
	float *vf = &shared[0];
	float *vr = &shared[n];
	float *m = &shared[2*n];
	float *dm_s = &shared[2*n+p]; //dm_s has size of p+2 (+2 in case p is odd)
	
	int tx = threadIdx.x;
	float kf = kf_i[tx];
	float kr = kr_i[tx];
	float a = 1;
	float dm;
	int loop_count = 0;
	int f_start;
	int f_end;
	int r_start;
	int r_end;
	
	vf[tx] = kf;
	vr[tx] = kr;
	
	if(tx<p) {
		m[tx] = m_i[tx];
		dm_s[0] = tolerance+1; //possible shared memory conflict. fix? use synchreads maybe?
	}
	
	while(dm_s[0]>tolerance && loop_count<max_iterations)
	{
		for(int time = 0; time< LOOP_PER_MAX; time++) //do 1000 loops (time steps) per max operation
		{
			if(tx<n) { //only n threads update the fluxes
				vf[tx] = kf;
				vr[tx] = kr;
				
				f_start = Sf_ptr_cm[tx]; //pointer to column tx in the compact data array
				f_end = Sf_ptr_cm[tx+1]; //next pointer. The difference is how many non-zero elements in the column
				for(int j = f_start; j<f_end; j++) {
					vf[tx] = vf[tx]*__powf(m[(int)Sf_idx_cm[j]], Sf_data_cm[j]); //Sf_idx_cm[j] is the row of our data in the original S
				}
				//same for the reverse reactions
				r_start = Sr_ptr_cm[tx];
				r_end = Sr_ptr_cm[tx+1];
				for(int j = r_start; j<r_end; j++) {
					vr[tx] = vr[tx]*__powf(m[(int)Sr_idx_cm[j]], Sr_data_cm[j]);
				}
				
			}
			__syncthreads();
			if(tx<p)
			{
				dm = 0;
				//similar usage of compact matrix as before, but here we use the row major format
				r_start = Sr_ptr_rm[tx];
				r_end = Sr_ptr_rm[tx+1];
				for(int j = r_start; j<r_end; j++) {
					dm = dm + Sr_data_rm[j]*(vf[(int)Sr_idx_rm[j]]-vr[(int)Sr_idx_rm[j]]);
				}
				f_start = Sf_ptr_rm[tx];
				f_end = Sf_ptr_rm[tx+1];
				for(int j = f_start; j<f_end; j++) {
					dm = dm - Sf_data_rm[j]*(vf[(int)Sf_idx_rm[j]]-vr[(int)Sf_idx_rm[j]]);
				}
				
				m[tx] = m[tx] + a*dm*t;
				m[tx] = (m[tx]>0) ? m[tx] : 0.00001;
				a = epsilon + 1/(abs(dm) + epsilon);
			}
			__syncthreads();
			loop_count++;
		}

		//write register to shared memory
		if(tx<p) dm_s[tx] = dm;
		__syncthreads();
		
		//find max value
		//calculate first stride. Need to find out if it is odd
		unsigned int stride;
		if(p%2 != 0)
		{
			stride = p/2+1;
			if(tx==0)
			{
				dm_s[p] = 0;
				dm_s[p+1] = 0;
			}
		} else {
			stride = p/2;
		}	
		//reduction for max value
		for (; stride > 0; stride >>= 1) {
			stride = (stride%2!=0 && stride!=1) ? stride+1 : stride; //add one if stride is odd
			__syncthreads(); 
			if (tx < stride)
				dm_s[tx] = (dm_s[tx]>dm_s[tx+stride]) ? dm_s[tx] : dm_s[tx+stride];
		}
	}
	vt[tx] = vf[tx]-vr[tx];
	//vt[tx] = n;
}

#endif // #ifndef _MN_CUDA_KERNEL_H_
