//////////////////////////////////////////////
// GPU-BASED ARTIFICIAL NEURAL NETWORK
// BACKPROPAGATION
// developer : ERIC JANSEN
// e-mail : eric[at]jansen[dot]net
// http://www.ericjansen.net
// ONLY WORKING UNDER LINUX
//////////////////////////////////////////////
/*
Copyright (c) 2012, Computer Engineering and Telematics,
Dept. of Electrical Engineering, Institut Teknologi Sepuluh Nopember (ITS)
All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
   notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
   notice, this list of conditions and the following disclaimer in the
   documentation and/or other materials provided with the distribution.
3. All advertising materials mentioning features or use of this software
   must display the following acknowledgement:
   This product includes software developed by Computer Engineering and
   Telematics, Dept. of Electrical Engineering, Institut Teknologi Sepuluh
   Nopember.
4. Neither the name of Institut Teknologi Sepuluh Nopember (ITS) nor the
   names of its contributors may be used to endorse or promote products
   derived from this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY ITS ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL [ERIC JANSEN] BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/

#include "gpubackprop.cuh"

__device__ float sigmoid(float fIn) {
  return (float)(1/(1+expf(-fIn)));
}

__device__ void ffwd(float* dfIn,float** dfOut,float* dfWeight,
    unsigned int* duDim,unsigned int numl) {
  float fSum;

  unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
  unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;

  for (size_t i=0; i<numl; ++i)
    dfOut[0][i] = dfIn[i];

  for (size_t i=1; i<numl; ++i) {
    if (y < duDim[i]) {
      fSum = 0.0;
      if (x < duDim[i-1])
        fSum += dfOut[i-1][x]/*[(i-1)*duDim[i-1]+x]*/ * dfWeight[i*duDim[i-1]+y*duDim[i-1]+x];
      fSum += dfWeight[i*duDim[i-1]+y*duDim[i-1]+duDim[i-1]];
      dfOut[i][y] = sigmoid(fSum);
    }
  }
}

__global__ void predictout(float* dfIn,float** dfOut,float* dfWeight,
    unsigned int* duDim,unsigned int numl) {
  float fSum;

  unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
  unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;

  for (size_t i=0; i<numl; ++i)
    dfOut[0][i] = dfIn[i];

  for (size_t i=1; i<numl; ++i) {
    if (y < duDim[i]) {
      fSum = 0.0;
      if (x < duDim[i-1])
        fSum += dfOut[i-1][x]/*[(i-1)*duDim[i-1]+x]*/ * dfWeight[i*duDim[i-1]+y*duDim[i-1]+x];
      fSum += dfWeight[i*duDim[i-1]+y*duDim[i-1]+duDim[i-1]];
      dfOut[i][y] = sigmoid(fSum);
    }
  }
}

__device__ void odelta(float** dfOut,float* dfDelta,float* dfTgt,
    unsigned int* duDim,unsigned int numl)
{
  for (int i=0; i<duDim[numl-1]; ++i)
    dfDelta[(numl-1)*duDim[numl-1]+i] = dfOut[numl-1][i] *
      (1 - dfOut[numl-1][i]) * 
      (dfTgt[i] - dfOut[numl-1][i]);
}

__device__ void hdelta(float** dfOut,float* dfDelta,float* dfWeight,
    unsigned int* duDim,unsigned int numl)
{
  unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
  unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;

  for (int i=numl-2; i>0; --i) {
    if (y < duDim[i]) {
      float fSum = 0.0;
      if (x < duDim[i+1])
        fSum += dfDelta[(i+1)*duDim[i+1]+x] * 
          dfWeight[(i+1)*duDim[i]+x*duDim[i]+y];
      dfDelta[i*duDim[i]+y] = dfOut[i][y] * 
        (1-dfOut[i][y]) * fSum;
    }
  }
}

__device__ void momentum(float* dfWeight,float* dfPrevDwt,
    float fAlpha,unsigned int* duDim,unsigned int numl)
{
  unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
  unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;

  for (int i=1; i<numl; ++i) {
    if (y < duDim[i]) {
      if (x < duDim[i-1])
        dfWeight[i*duDim[i-1]+y*duDim[i-1]+x] += fAlpha *
          dfPrevDwt[i*duDim[i-1]+y*duDim[i-1]+x];
      dfWeight[i*duDim[i-1]+y*duDim[i-1]+duDim[i-1]] += fAlpha *
        dfPrevDwt[i*duDim[i-1]+y*duDim[i-1]+duDim[i-1]];
    }
  }
}

__device__ float mse(float *tgt,float **dfOut,unsigned int* duDim,
    unsigned int numl) {
  float fmse = 0.0;
  for (int i=0; i<duDim[i-1]; ++i)
    fmse += (tgt[i]-dfOut[numl-1][i]) *
            (tgt[i]-dfOut[numl-1][i]);

  return fmse/2;
}

__device__ float out(int i,float **dfOut,unsigned int numl) {
  return dfOut[numl-1][i];
}

__device__ void bpgt(float* dfIn,float* dfTgt,float** dfOut,
    float* dfWeight,float* dfPrevDwt,float* dfDelta,
    unsigned int* duDim,unsigned int numl,
    unsigned int uHeight,float fAlpha,float fBeta)
{
  unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
  unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;

  ffwd(dfIn,dfOut,dfWeight,duDim,numl);
  odelta(dfOut,dfDelta,dfTgt,duDim,numl);
  hdelta(dfOut,dfDelta,dfWeight,duDim,numl);

  for (int i=1; i<numl; ++i) {
    if (y < duDim[i]) {
      if (x < duDim[i-1]) {
        dfPrevDwt[i*duDim[i-1]+y*duDim[i-1]+x] = fBeta *
          dfDelta[i*duDim[i]+y] * dfOut[i-1][x];
        dfWeight[i*duDim[i-1]+y*duDim[i-1]+x] +=
          dfPrevDwt[i*duDim[i-1]+y*duDim[i-1]+x];
      }
      dfPrevDwt[i*duDim[i-1]+y*duDim[i-1]+duDim[i-1]] = fBeta *
        dfDelta[i*duDim[i]+y];
      dfWeight[i*duDim[i-1]+y*duDim[i-1]+duDim[i-1]] +=
        dfPrevDwt[i*duDim[i-1]+y*duDim[i-1]+duDim[i-1]];
    }
  }
}

__global__ void backprop(float* dfIn,float* dfTgt,float** dfOut,
    float* dfWeight,float* dfPrevDwt,float* dfDelta,
    unsigned int* duDim,unsigned int numl,unsigned int uHeight,
    float fAlpha,float fBeta,float fThresh,unsigned long it)
{
  unsigned long x = blockIdx.x * blockDim.x + threadIdx.x;

  if (x <= it) {
    bpgt(dfIn,dfTgt,dfOut,dfWeight,dfPrevDwt,dfDelta,duDim,numl,uHeight,
        fAlpha,fBeta);

    if (mse(dfTgt,dfOut,duDim,numl) < fThresh) { ;
//      std::cout << "\nNetwork Trained. Threshold value achieved in " <<
//        x << " iterations.\n";
//      std::cout << "MSE : " << mse(dfTgt,dfOut,duDim,numl) << "\n";
//      exit(1);
//      break;
    }
    if (x%(it/10) == 0) ;
//      std::cout << "\nMSE : " << mse(dfTgt,dfOut,duDim,numl) << 
//        ".. Training ...\n";
  }
  if (x == it) ;
//    std::cout << std::endl << x << " iterations completed...\n";
}

void predict(const float* hfIn,float** hfOut,float* hfWeight,
    unsigned int* huDim,unsigned int numl,unsigned int uH)
{
  float *dfIn,**dfOut,*dfWeight;
  unsigned int *duDim;
  unsigned int uSize = 0,wSize = 0;

  for (size_t i=0; i<numl; ++i)
    uSize += huDim[i];
  for (int i=0; i<numl; ++i)
    wSize += (huDim[i] * huDim[i+1]);

  cudaMalloc((void**)&dfIn,huDim[0] * sizeof(float));
  cudaMalloc((void**)&dfOut,numl * sizeof(float*));
  float **dhfOut;

  dhfOut = (float**) malloc(sizeof(float *)*numl);
  for (int i=0; i<numl; ++i)
    cudaMalloc((void**)&dhfOut[i],huDim[i] * sizeof(float));

  cudaMalloc((void**)&dfWeight,wSize * sizeof(float));
  cudaMalloc((void**)&duDim,numl * sizeof(unsigned int));

  cudaMemcpy(duDim,huDim,numl*sizeof(unsigned int),cudaMemcpyHostToDevice);
  cudaMemcpy(dfIn,hfIn,huDim[0]*sizeof(float),cudaMemcpyHostToDevice);

  for (int i=0; i<numl; ++i)
    cudaMemcpy(dfOut,dhfOut,numl*sizeof(float*),
        cudaMemcpyHostToDevice);

  for (int i=0; i<numl; ++i)
    cudaMemcpy(dhfOut[i],hfOut[i],huDim[i]*sizeof(float),
        cudaMemcpyHostToDevice);
  cudaMemcpy(dfWeight,hfWeight,wSize*sizeof(float),cudaMemcpyHostToDevice);

  dim3 block(16,16);
  dim3 grid((huDim[0]+block.x-1)/block.x,(uH+block.y-1)/block.y);
  predictout<<<grid,block>>>(dfIn,dfOut,dfWeight,duDim,numl);

  for (int i=0; i<numl; ++i)
    cudaMemcpy(hfOut[i],dhfOut[i],huDim[i]*sizeof(float),
        cudaMemcpyDeviceToHost);
//  cudaMemcpy(hfWeight,dfWeight,wSize*sizeof(float),cudaMemcpyDeviceToHost);

  cudaFree(dfIn);
  for (int i=0; i<numl; ++i)
    cudaFree(dhfOut[i]);
  cudaFree(dfOut);
  cudaFree(dfWeight);
  cudaFree(duDim);
}

void callbp(const float* hfIn,float* hfTgt,float** hfOut,
    float* hfWeight,float* hfPrevDwt,float* hfDelta,
    unsigned int* huDim,unsigned int numl,
    unsigned int uHeight,
    float fAlpha,float fBeta,float fThresh,unsigned long it)
{
  float *dfIn,*dfTgt,**dfOut,*dfWeight,*dfPrevDwt,*dfDelta;
  unsigned int *duDim;
  unsigned int uSize = 0,wSize = 0;

  for (size_t i=0; i<numl; ++i)
    uSize += huDim[i];

//  float *hfOut = (float*) malloc(uSize * sizeof(float));
//  float *hfDelta = (float*) malloc(uSize * sizeof(float));

//  std::cout << "callFFWD uSize : " << uSize << "\n";
  for (int i=0; i<numl; ++i)
    wSize += (huDim[i] * huDim[i+1]);
//  std::cout << "callFFWD wSize : " << wSize << "\n" << huDim[0] << "\n";

//  float *hfWeight = (float*) malloc(wSize * sizeof(float));
//  float *hfPrevDwt = (float*) malloc(wSize * sizeof(float));

//  thrust::generate(hfWeight,hfWeight+wSize,_rand);
//  thrust::fill(hfPrevDwt,hfPrevDwt+wSize,0.0);

  cudaMalloc((void**)&dfIn,huDim[0] * sizeof(float));
  cudaMalloc((void**)&dfTgt,huDim[numl-1] * sizeof(float));
  cudaMalloc((void**)&dfOut,numl * sizeof(float*));
  float **dhfOut;

  dhfOut = (float**) malloc(sizeof(float *)*numl);
  for (int i=0; i<numl; ++i)
    cudaMalloc((void**)&dhfOut[i],huDim[i] * sizeof(float));

  cudaMalloc((void**)&dfDelta,uSize * sizeof(float));
  cudaMalloc((void**)&dfWeight,wSize * sizeof(float));
  cudaMalloc((void**)&dfPrevDwt,wSize * sizeof(float));
  cudaMalloc((void**)&duDim,numl * sizeof(unsigned int));

  cudaMemcpy(duDim,huDim,numl*sizeof(unsigned int),cudaMemcpyHostToDevice);
  cudaMemcpy(dfIn,hfIn,huDim[0]*sizeof(float),cudaMemcpyHostToDevice);
  cudaMemcpy(dfTgt,hfTgt,huDim[numl-1]*sizeof(float),cudaMemcpyHostToDevice);

  for (int i=0; i<numl; ++i)
    cudaMemcpy(dfOut,dhfOut,numl*sizeof(float*),
        cudaMemcpyHostToDevice);

  for (int i=0; i<numl; ++i)
    cudaMemcpy(dhfOut[i],hfOut[i],huDim[i]*sizeof(float),
        cudaMemcpyHostToDevice);

//  cudaMemcpy(dfOut,hfOut,uSize*sizeof(float),cudaMemcpyHostToDevice);
  cudaMemcpy(dfDelta,hfDelta,uSize*sizeof(float),cudaMemcpyHostToDevice);
  cudaMemcpy(dfWeight,hfWeight,wSize*sizeof(float),cudaMemcpyHostToDevice);
  cudaMemcpy(dfPrevDwt,hfPrevDwt,wSize*sizeof(float),cudaMemcpyHostToDevice);

  dim3 block(16,16);
  dim3 grid((huDim[0]+block.x-1)/block.x,(1+block.y-1)/block.y);
  backprop<<<grid,block>>>(dfIn,dfTgt,dfOut,dfWeight,dfPrevDwt,dfDelta,
      duDim,numl,uHeight,fAlpha,fBeta,fThresh,it);

  for (int i=0; i<numl; ++i)
    cudaMemcpy(hfOut[i],dhfOut[i],huDim[i]*sizeof(float),
        cudaMemcpyDeviceToHost);
//  cudaMemcpy(hfOut,dfOut,uSize*sizeof(float),cudaMemcpyDeviceToHost);
  cudaMemcpy(hfWeight,dfWeight,wSize*sizeof(float),cudaMemcpyDeviceToHost);

  cudaFree(dfIn);
  cudaFree(dfTgt);
  for (int i=0; i<numl; ++i)
    cudaFree(dhfOut[i]);
  cudaFree(dfOut);
  cudaFree(dfDelta);
  cudaFree(dfWeight);
  cudaFree(dfPrevDwt);
  cudaFree(duDim);
}

float _rand() {
	srand((unsigned)(time(NULL)));
	return (float)(rand())/(RAND_MAX/2)-1;
}

__gpuBackProp::__gpuBackProp(unsigned int numL,unsigned int uH,
    unsigned int* huDim,float fA,float fB,float fT,long lIt):
      m_numLayers(numL),m_Height(uH),
      m_fAlpha(fA),m_fBeta(fB) ,m_fThresh(fT),m_lIt(lIt)
{
  m_huDim = new unsigned int[m_numLayers];
  for (int i=0; i<m_numLayers; ++i) 
     m_huDim[i] = huDim[i];

  unsigned int uSize = 0;
  for (int i=0; i<m_numLayers; ++i)
    uSize += m_huDim[i];
  m_hfDelta = new float[uSize];


  m_hfOut = new float*[m_numLayers];
  for (int i=0; i<m_numLayers; ++i)
    m_hfOut[i] =  new float[m_huDim[i]];

  unsigned int wSize = 0;
  for (int i=0; i<m_numLayers; ++i)
    wSize += (m_huDim[i] * m_huDim[i+1]);
  std::cout << wSize << "\n";
  m_hfWeight = new float[wSize];
  m_hfPrevDwt = new float[wSize];

/*        for (int i=1; i<wSize; ++i) {
          m_hfWeight[i] = (float)(rand())/(RAND_MAX/2)-1;
//          m_hfPrevDwt[i] = 0.0;
        }*/

  std::generate(m_hfWeight,m_hfWeight+wSize,_rand);
  std::fill(m_hfPrevDwt,m_hfPrevDwt+wSize,0.0);

/*  for (int i=0; i<wSize; ++i)
    std::cout << m_hfWeight[i] << " ";
    std::cout << "\n"; */
}

__gpuBackProp::~__gpuBackProp() {
  if (m_hfDelta) delete [] m_hfDelta;
  if (m_hfWeight) delete [] m_hfWeight;
  if (m_hfPrevDwt) delete [] m_hfPrevDwt;
  if (m_huDim) delete [] m_huDim;  
  for (int i=0; i<m_numLayers; ++i)
    delete [] (m_hfOut[i]);
  delete [](m_hfOut);
}

void __gpuBackProp::operator()(float* hfIn,float* hfTgt) {
    callbp(hfIn,hfTgt,m_hfOut,m_hfWeight,m_hfPrevDwt,
        m_hfDelta,m_huDim,m_numLayers,m_Height,m_fAlpha,m_fBeta,m_fThresh,
        m_lIt);
}

void __gpuBackProp::operator()(float* hfIn) {
  std::cout << "\nPredicting test data....\n\n";
  for (int i=0; i<m_Height; ++i) {
    predict(hfIn,m_hfOut,m_hfWeight,m_huDim,m_numLayers,m_Height);
    std::cout << out(i) << std::endl;
  }
}


