/*
 * Copyright 1993-2006 NVIDIA Corporation.  All rights reserved.
 *
 * NOTICE TO USER:   
 *
 * This source code is subject to NVIDIA ownership rights under U.S. and 
 * international Copyright laws.  
 *
 * This software and the information contained herein is PROPRIETARY and 
 * CONFIDENTIAL to NVIDIA and is being provided under the terms and 
 * conditions of a Non-Disclosure Agreement.  Any reproduction or 
 * disclosure to any third party without the express written consent of 
 * NVIDIA is prohibited.     
 *
 * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE 
 * CODE FOR ANY PURPOSE.  IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR 
 * IMPLIED WARRANTY OF ANY KIND.  NVIDIA DISCLAIMS ALL WARRANTIES WITH 
 * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF 
 * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.   
 * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, 
 * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS 
 * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE 
 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE 
 * OR PERFORMANCE OF THIS SOURCE CODE.  
 *
 * U.S. Government End Users.  This source code is a "commercial item" as 
 * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting  of 
 * "commercial computer software" and "commercial computer software 
 * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) 
 * and is provided to the U.S. Government only as a commercial end item.  
 * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through 
 * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the 
 * source code with only those rights set forth herein.
 */

/* Matrix multiplication: C = A * B.
 * Device code.
 */

#ifndef _MN_CUDA_KERNEL_H_
#define _MN_CUDA_KERNEL_H_

#include <stdio.h>
#include "MN_cuda.h"

/*
	Possible optimizations
		- Matrix compaction
		- kf, kr in constant memory
		- assign kf, kr, dm to registers
		- vf, vr, m, dm, a in shared memory
		- __pow
		- reduction max
		- successive approximation
		- analyze memory access patterns
		- multiple steps before a max operation
*/

__global__ void MN_cuda_Kernel(int n, int p, float *vf, float *vr, float *vt, float *a, float *dm, float *m, float *kf, float *kr, float *S, float epsilon, float tolerance, float t)
{	
	if(threadIdx.x<8)	vt[threadIdx.x] = threadIdx.x;
	int tx = threadIdx.x;
	
	for(int time = 0; time< 1000; time++)
	{
		vf[tx] = kf[tx];
		vr[tx] = kr[tx];
		
		for(int j = 0; j<p; j++) {
			vf[tx] = vf[tx]*pow(m[j], (int)(S[j*n+tx]<0) ? -1*S[j*n+tx] : 0);
			vr[tx] = vr[tx]*pow(m[j], (int)(S[j*n+tx]>0) ? S[j*n+tx] : 0);
		}
		__syncthreads();
		if(tx<p)
		{
			dm[tx] = 0;
			for(int j = 0; j<n; j++) {
				dm[tx] = dm[tx] + S[tx*n+j]*(vf[j]-vr[j]);
			}
			
			m[tx] = m[tx] + a[tx]*dm[tx]*t;
			m[tx] = (m[tx]>0) ? m[tx] : 0.00001;
			a[tx] = epsilon + 1/(abs(dm[tx]) + epsilon);
		}
		__syncthreads();
	}
	vt[tx] = vf[tx]-vr[tx];
	//vt[tx] = n;
}

#endif // #ifndef _MN_CUDA_KERNEL_H_
