#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
#include <cublas.h>
#include <R.h>
#include "cuda_utils.h"

#define DEBUG 1
#define VERBOSE 0
#define index(i,j,ld) (((j)*(ld))+(i))

// computes reconstruction error of a given element of a, using u and v
// row_u and row_v must be the same size
static __device__ __host__ float recon_error(float elem_a, float *row_u, float *row_v, int projdim, float beta) 
{
   int i;
   float error, dotprod;
   
   // compute dot product and norm of u,v
   for (i=0; i<projdim; i++) {
       dotprod += row_u[i]*row_v[i];
   }

   // compute error
   error = pow(elem_a-dotprod,2);
   return error;
}

// sum reduction
__device__ void reduce_sum(float *val, int size) {
    int tid = threadIdx.x;
    if (size >= 512) {if(tid<256){val[tid] += val[tid+256];} __syncthreads();}
    if (size >= 256) {if(tid<128){val[tid] += val[tid+128];} __syncthreads();}
    if (size >= 128) {if(tid<64){val[tid] += val[tid+64];} __syncthreads();}
    // in same warp
    if (tid<32) {
        if (size >= 64) {val[tid] += val[tid+32];}
        if (size >= 32) {val[tid] += val[tid+16];}
        if (size >= 16) {val[tid] += val[tid+8];}
        if (size >= 8)  {val[tid] += val[tid+4];}
        if (size >= 4)  {val[tid] += val[tid+2];}
        if (size >= 2)  {val[tid] += val[tid+1];}
    }
}

/*
// sum reduction
__device__ void reduce_sum(float *val, int size) {
        int tid = threadIdx.x;
        if (size >= 512) {val[tid] += val[tid+256]; __syncthreads();}
        if (size >= 256) {val[tid] += val[tid+128]; __syncthreads();}
        if (size >= 128) {val[tid] += val[tid+64]; __syncthreads();}
        if (size >= 64) {val[tid] += val[tid+32];}
        if (size >= 32) {val[tid] += val[tid+16];}
        if (size >= 16) {val[tid] += val[tid+8];}
        if (size >= 8)  {val[tid] += val[tid+4];}
        if (size >= 4)  {val[tid] += val[tid+2];}
        if (size >= 2)  {val[tid] += val[tid+1];}
}
*/

// dynamic shared array
extern __shared__ float array[];

/*
 * CUDA approximate SVD kernel via steepest gradient descent
 * for a given data matrix a, compute a = u'v, returning u' and v
 * only able to approximate a up to rank no larger than 512
 *
 * right now, can only deal with square matrices that are powers of 2
 */
__global__ static void svdKernel(float *gpu_a, int rowsa, int colsa, float *gpu_u, float *gpu_v, float *gpu_err_total, int projdim, float alpha)
{

    // init vars
    int i;
    int u_ind, v_ind;
    float beta = 0.01; //not sure what this should be

    // bid = [0..ncols]
    // tid = [0..nrows]
    const int bid = blockIdx.x; 
    const int tid = threadIdx.x;
    float *col_a = (float*)array;
    float *err_a = (float*)&col_a[rowsa];
    float *err_sum = (float*)&err_a[rowsa];

    // one thread resets the global error total
    if (tid==0 && bid==0) *(gpu_err_total)=0;
    
#if DEBUG
    #if VERBOSE
    if (tid==0 && blockIdx.x==0) {
        printf("value of gpu_a:\n");
        for (i=0; i<rowsa*colsa; i++) {
            printf("%0.3f\n", gpu_a[i]);
        }
        printf("\n");
    }
    #endif
#endif

    // load rows of a into shared memory
    // each block is responsible for a column of a
    // each thread responsible for an element of a
    col_a[tid] = gpu_a[index(tid,bid,rowsa)];

#if DEBUG
    #if VERBOSE
    if (bid==1) {
        if (tid==0) printf("value of col_a:\n");
        printf("%0.3f   ", col_a[tid]);
    }
    #endif
#endif
        // start of relevant column of u and v for each thread
        u_ind = tid*projdim;
        v_ind = bid*projdim;     

        // populate error column for current values of u and v, and 
        // repopulate errors vector - each thread does a one element
        // update
        // also populate err_sum for the reduction process
        err_a[tid] = recon_error(col_a[tid], gpu_u+u_ind, gpu_v+v_ind, projdim, beta);
        err_sum[tid] = err_a[tid];

#if DEBUG
    #if VERBOSE
        printf("[%d,%d] val of err_a is: %0.3f\n", bid, tid, err_a[tid]);
    #endif
#endif

        //perform sum reduction operation on err_sum
        reduce_sum(err_sum, colsa);

        //sum up err_totals to find the global error total
        //one thread does this for each block
        if (tid==0) {
            *(gpu_err_total) += err_sum[tid];
#if DEBUG
    #if VERBOSE
            printf("\n[%d,%d]: err_total = %f\n", bid, tid, *(gpu_err_total));
    #endif
#endif
        }

        // update u
        // every thread updates one column of u
        // can't use shared memory - each thread updates a
        // different column of u and v
        // TODO: random sampling - this increases the performance of an
        //       ensemble algorithm (right now, all elements in U and V
        //       are updated deterministically)
        for (i=0; i<projdim; i++) {
            gpu_u[index(i,tid,projdim)] = gpu_u[index(i,tid,projdim)] + 2*alpha*(err_a[tid]*gpu_v[index(i,bid,projdim)] - beta*gpu_u[index(i,tid,projdim)]);
            
        }

        // now update v accordingly using the value of u found from before
        for (i=0; i<projdim; i++) {
            gpu_v[index(i,bid,projdim)] = gpu_v[index(i,bid,projdim)] + 2*alpha*(err_a[tid]*gpu_u[index(i,tid,projdim)] - beta*gpu_v[index(i,bid,projdim)]);
        }

#if DEBUG
    #if VERBOSE
        if (bid == 0 && tid == 0) {
            printf("[%d,%d] U matrix:\n",bid,tid);
            for (i=0; i<projdim*rowsa; i++) {
                printf("%0.3f  ",gpu_u[i]);
            }
            printf("\n");
        }
        if (bid == 0 && tid == 0) {
            printf("[%d,%d] V matrix:\n",bid,tid);
            for (i=0; i<projdim*colsa; i++) {
                printf("%0.3f  ",gpu_v[i]);
            }
            printf("\n");
        }
    #endif
#endif
}

// note: a is the transposed original data (copy optimized for column major)
#if DEBUG
int main()
#else
extern "C" 
{
void gpuSvd(float *a, int rowsa, int colsa, float *u_approx, float *v_approx, float *u, float *v, int projdim, float thresh, float alpha)
#endif
{
    // init gpu vars
    float *gpu_a, *gpu_u, *gpu_v, *gpu_err_total;
    float err_total = 100000000000.0; //init something random

#if DEBUG
    // allocate variables on the host
    // fabricate some data
    // this is the largest we can do
    // error lower bound appears to be around 50
    int rowsa = 1024;
    int colsa = 1024;
    int projdim = 32;
    float *a = (float*)malloc(sizeof(float)*rowsa*colsa);
    float *u = (float*)malloc(sizeof(float)*projdim*rowsa);
    float *v = (float*)malloc(sizeof(float)*projdim*colsa);
    float *u_approx = (float*)malloc(sizeof(float)*projdim*rowsa);
    float *v_approx = (float*)malloc(sizeof(float)*projdim*colsa);
    for (int i=0; i<rowsa*colsa; i++)
        a[i] = 0.5;
    for (int i=0; i<projdim*rowsa; i++)
        u_approx[i] = 1;
    for (int i=0; i<projdim*colsa; i++)
        v_approx[i] = 1;

    // default learning and threshold parameters
    float alpha = -0.000001;
    float thresh = 25;
#endif

    // allocate data onto gpu arrays
    cublasInit();
    cublasAlloc(rowsa*colsa, sizeof(float), (void**)&gpu_a);
    cublasAlloc(projdim*rowsa, sizeof(float), (void**)&gpu_u);
    cublasAlloc(projdim*colsa, sizeof(float), (void**)&gpu_v);
    cublasAlloc(1, sizeof(float), (void**)&gpu_err_total);

    // copy the data
    cublasSetMatrix(rowsa, colsa, sizeof(float), a, rowsa, gpu_a, rowsa);
    cublasSetMatrix(projdim, rowsa, sizeof(float), u_approx, projdim, gpu_u, projdim);
    cublasSetMatrix(projdim, colsa, sizeof(float), v_approx, projdim, gpu_v, projdim);
    cublasSetMatrix(1,1,sizeof(float),&err_total,1,gpu_err_total,1);

    // create grid for blocks and threads
    // want one block to correspond to number of rows
    // threads correspond to number of cols
    dim3 blocks (rowsa, 1, 1);
    dim3 threads(colsa, 1, 1);

    // initiate kernel call loop
    // this populates the global error container on the device
    size_t shared_mem = 4*rowsa*sizeof(float);
    int iter=0;
    while (err_total > thresh || err_total < -(thresh))
    {
        printf("Iter %d, Total Error %f\n", iter, err_total);

        //reset the error total
        err_total=0;
        cublasSetMatrix(1,1,sizeof(float),&err_total,1,gpu_err_total,1);
        
        //run the kernel
        svdKernel<<<blocks, threads, shared_mem>>>(gpu_a, rowsa, colsa, gpu_u, gpu_v, gpu_err_total, projdim, alpha);

        //get the total error
        cublasGetMatrix(1,1,sizeof(float),gpu_err_total, 1, &err_total, 1);
        iter++;
    
    }

    // retrieve results to u and v
    cublasGetMatrix(projdim, rowsa, sizeof(float), gpu_u, projdim, u, projdim);
    cublasGetMatrix(projdim, colsa, sizeof(float), gpu_v, projdim, v, projdim);
//    CUDA_CHECK_ERROR(cudaMemcpy((void*)u, (void*)gpu_u, sizeof(float)*projdim*rowsa, cudaMemcpyDeviceToHost));
//    CUDA_CHECK_ERROR(cudaMemcpy((void*)v, (void*)gpu_v, sizeof(float)*projdim*colsa, cudaMemcpyDeviceToHost));

    // print results
#if DEBUG
    int head = 10;
    printf("A matrix:\n");
    for (int i=0; i<head; i++) {
        printf("%0.3f  ", a[i]);
    }
    printf("\n------------------------\n");
    printf("U matrix:\n");
    for (int i=0; i<head; i++) {
        printf("%0.3f  ", u[i]);
    }
    printf("\n------------------------\n");
    printf("V matrix:\n");
    for (int i=0; i<head; i++) {
        printf("%0.3f   ", v[i]);
    }
    printf("\n");
#endif

    // free device memory
    cublasFree(gpu_a);
    cublasFree(gpu_u);
    cublasFree(gpu_v);
    cublasShutdown();
}

#if DEBUG==0
}
#endif

#if DEBUG==0
// R wrapper
extern "C" {
void RgpuSvd(float *a, int *rowsa, int *colsa, float *u_approx, float *v_approx, float *u, float *v, int *projdim, float *thresh, float *alpha) {
    gpuSvd(a, *rowsa, *colsa, u_approx, v_approx, u, v, *projdim, *thresh, *alpha);
}
}
#endif
