#include "autocorr_CUDA.cuh"


void printCudaProp(const cudaDeviceProp& prop) {
    printf("Device Name : %s.\n", prop.name);
    printf("totalGlobalMem : %d.\n", prop.totalGlobalMem);
    printf("sharedMemPerBlock : %d.\n", prop.sharedMemPerBlock);
    printf("regsPerBlock : %d.\n", prop.regsPerBlock);
    printf("warpSize : %d.\n", prop.warpSize);
    printf("memPitch : %d.\n", prop.memPitch);
    printf("maxThreadsPerBlock : %d.\n", prop.maxThreadsPerBlock);
    printf("maxThreadsDim[0 - 2] : %d %d %d.\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
    printf("maxGridSize[0 - 2] : %d %d %d.\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
    printf("totalConstMem : %d.\n", prop.totalConstMem);
    printf("major.minor : %d.%d.\n", prop.major, prop.minor);
    printf("clockRate : %d.\n", prop.clockRate);
    printf("textureAlignment : %d.\n", prop.textureAlignment);
    printf("deviceOverlap : %d.\n", prop.deviceOverlap);
    printf("multiProcessorCount : %d.\n", prop.multiProcessorCount);
    printf("\n\n");
}

bool Init_CUDA() {
    int count = 1;
    cudaGetDeviceCount(&count);

    if (count == 0) {
        printf("There is no Device available\n");
        return false;
    }

    int i = 0;
    for (i; i < count; i++) {
        cudaDeviceProp prop;
        cudaGetDeviceProperties(&prop, i);
        if (prop.major >= 1) {
            //printCudaProp(prop);
            break;
        }
    }

    if (i == count) {
        printf("No device supporting CUDA 1.x\n");
        return false;
    }

    cudaSetDevice(i);

    return true;
}


//autocorr
__global__ void autocorr_CUDA_1(double* veloc, double* elem, int nstep) {
    int tid = threadIdx.x, gap = 3 * THREAD_NUM;

    for (int i = 3 * tid; i < 3 * nstep; i += gap)
        elem[i  / 3] = veloc[i] + veloc[i + 1] + veloc[i + 2];
}

__global__ void autocorr_CUDA_2(double *elem, int nstep, int Tau, double* acf) {
    int tid = threadIdx.x, gap = THREAD_NUM;
    
    for (int low = tid, high = low + Tau; high < nstep; low += gap, high += gap)
        acf[tid] += elem[low] * elem[high];
}

//the number of threads should be set to "THREAD_NUM"
__global__ void result_atom_sum_CUDA(double* result, double* acf, int nstep, int seq, int tau) {
    result[seq] += acf[threadIdx.x] / (nstep - tau);
}

//the number of threads should be set to "Parameter_Natoms"
__global__ void result_sys_sum_CUDA(double* result_sys, double* result_atom, int natoms, int tau) {
    result_sys[tau] += result_atom[threadIdx.x] / natoms;    
}

__global__ void calculate_diff_CUDA(double* diff, double* result_sys, int nsteps) {
    int tid = threadIdx.x, gap = THREAD_NUM;

    for (int i = tid; i < nsteps; i += gap) {
        *diff += result_sys[i];
    }
}

double autocorr_CUDA(int tau, int seq) {
    double* veloc_gpu;
    double* acf_gpu;
    double* elem_gpu;
 
    double* veloc = (double*)malloc(sizeof(double) * 3 * Parameter_Nsteps);
    double* elem_cpu = (double*)malloc(sizeof(double) * Parameter_Nsteps);
    double* acf_cpu = (double*)malloc(sizeof(double) * THREAD_NUM);
    double result = 0;


    //transfer the vector into "C" array
    for (int i = 0; i < 3 * Parameter_Nsteps; i += 3) {
        for (int j = 0; j < 3; j++)
            veloc[i + j] = velocitiesOFeach[i / 3].mol[seq].veloc[j];
    }

    //some allocator will be uesed inside device
    cudaMalloc((void**)&veloc_gpu, sizeof(double) * 3 * Parameter_Nsteps);
    cudaMalloc((void**)&elem_gpu, sizeof(double) * Parameter_Nsteps);
    cudaMalloc((void**)&acf_gpu, sizeof(double) * THREAD_NUM);
   
    //content copy to device
    cudaMemcpy(veloc_gpu, veloc, sizeof(double) * 3 * Parameter_Nsteps, cudaMemcpyHostToDevice);

    //calculate inside device   
    autocorr_CUDA_1 << <1, THREAD_NUM, 0 >> > (veloc_gpu, elem_gpu, Parameter_Nsteps);
    cudaDeviceSynchronize();
    autocorr_CUDA_2 << <1, THREAD_NUM, 0 >> > (elem_gpu, Parameter_Nsteps, tau, acf_gpu);

    //content in the device copy to host
    cudaMemcpy(acf_cpu, acf_gpu, sizeof(double) * THREAD_NUM, cudaMemcpyDeviceToHost);
    
    for (int i = 0; i < THREAD_NUM; i++)
        result += acf_cpu[i];

    result /= Parameter_Nsteps - tau;

    free(veloc);
    free(elem_cpu);
    free(acf_cpu);

    cudaFree(veloc_gpu);
    cudaFree(acf_gpu);
    cudaFree(elem_gpu);

    return result;
}

double Diffusion_Coefficient_CUDA() {
    double* veloc_gpu;
    double* elem_gpu;
    double* acf_gpu;
    double* result_atom_gpu;
    double* result_sys_gpu;
    double* diff_gpu;

    double* veloc = (double*)malloc(sizeof(double) * 3 * Parameter_Nsteps);
    double* diff_cpu = (double*)malloc(sizeof(double));

    //some allocator will be uesed inside device
    cudaMalloc((void**)&veloc_gpu, sizeof(double) * 3 * Parameter_Nsteps);
    cudaMalloc((void**)&elem_gpu, sizeof(double) * Parameter_Nsteps);
    cudaMalloc((void**)&acf_gpu, sizeof(double) * THREAD_NUM);
    cudaMalloc((void**)&result_atom_gpu, sizeof(double) * Parameter_Natoms);
    cudaMalloc((void**)&result_sys_gpu, sizeof(double) * Parameter_Nsteps);
    cudaMalloc((void**)&diff_gpu, sizeof(double));


    for (int tau = 0; tau < Parameter_Nsteps; tau++) {
        for (int seq = 0; seq < Parameter_Natoms; seq++) {

            //transfer the vector into "C" array
            for (int i = 0; i < 3 * Parameter_Nsteps; i += 3) {
                veloc[i + 0] = velocitiesOFeach[i / 3].mol[seq].veloc[0];
                veloc[i + 1] = velocitiesOFeach[i / 3].mol[seq].veloc[1];
                veloc[i + 2] = velocitiesOFeach[i / 3].mol[seq].veloc[2];
            }

            //content copy to device
            cudaMemcpy(veloc_gpu, veloc, sizeof(double) * 3 * Parameter_Nsteps, cudaMemcpyHostToDevice);

            //calculate inside device   
            autocorr_CUDA_1 << <1, THREAD_NUM, 0 >> > (veloc_gpu, elem_gpu, Parameter_Nsteps);
            cudaDeviceSynchronize();
            autocorr_CUDA_2 << <1, THREAD_NUM, 0 >> > (elem_gpu, Parameter_Nsteps, tau, acf_gpu);
            cudaDeviceSynchronize();
            result_atom_sum_CUDA << <1, THREAD_NUM, 0 >> > (result_atom_gpu, acf_gpu, Parameter_Nsteps, seq, tau);
            cudaDeviceSynchronize();
        }
        
        result_sys_sum_CUDA<<<1, Parameter_Natoms, 0>>>(result_sys_gpu, result_atom_gpu, Parameter_Natoms, tau);
        cudaDeviceSynchronize();
    }

    calculate_diff_CUDA<<<1, THREAD_NUM, 0>>>(diff_gpu, result_sys_gpu, Parameter_Nsteps);

    cudaMemcpy(diff_cpu, diff_gpu, sizeof(double), cudaMemcpyDeviceToHost);

    free(veloc);

    cudaFree(veloc_gpu);
    cudaFree(elem_gpu);
    cudaFree(acf_gpu);
    cudaFree(result_atom_gpu);
    cudaFree(result_sys_gpu);
    cudaFree(diff_gpu);

    return *diff_cpu;
}

double* autoccor_store_CUDA() {
    double* veloc_gpu;
    double* elem_gpu;
    double* acf_gpu;
    double* result_atom_gpu;
    double* result_sys_gpu;

    double* veloc = (double*)malloc(sizeof(double) * 3 * Parameter_Nsteps);
    double* result_sys_cpu = (double*)malloc(sizeof(double) * Parameter_Nsteps);

    //some allocator will be uesed inside device
    cudaMalloc((void**)&veloc_gpu, sizeof(double) * 3 * Parameter_Nsteps);
    cudaMalloc((void**)&elem_gpu, sizeof(double) * Parameter_Nsteps);
    cudaMalloc((void**)&acf_gpu, sizeof(double) * THREAD_NUM);
    cudaMalloc((void**)&result_atom_gpu, sizeof(double) * Parameter_Natoms);
    cudaMalloc((void**)&result_sys_gpu, sizeof(double) * Parameter_Nsteps);


    for (int tau = 0; tau < Parameter_Nsteps; tau++) {
        for (int seq = 0; seq < Parameter_Natoms; seq++) {

            //transfer the vector into "C" array
            for (int i = 0; i < 3 * Parameter_Nsteps; i += 3) {
                veloc[i + 0] = velocitiesOFeach[i / 3].mol[seq].veloc[0];
                veloc[i + 1] = velocitiesOFeach[i / 3].mol[seq].veloc[1];
                veloc[i + 2] = velocitiesOFeach[i / 3].mol[seq].veloc[2];
            }

            //content copy to device
            cudaMemcpy(veloc_gpu, veloc, sizeof(double) * 3 * Parameter_Nsteps, cudaMemcpyHostToDevice);

            //calculate inside device   
            autocorr_CUDA_1 << <1, THREAD_NUM, 0 >> > (veloc_gpu, elem_gpu, Parameter_Nsteps);
            cudaDeviceSynchronize();
            autocorr_CUDA_2 << <1, THREAD_NUM, 0 >> > (elem_gpu, Parameter_Nsteps, tau, acf_gpu);
            cudaDeviceSynchronize();
            result_atom_sum_CUDA << <1, THREAD_NUM, 0 >> > (result_atom_gpu, acf_gpu, Parameter_Nsteps, seq, tau);
            cudaDeviceSynchronize();
        }

        result_sys_sum_CUDA << <1, Parameter_Natoms, 0 >> > (result_sys_gpu, result_atom_gpu, Parameter_Natoms, tau);
        cudaDeviceSynchronize();
    }


    cudaMemcpy(result_sys_cpu, result_sys_gpu, sizeof(double), cudaMemcpyDeviceToHost);

    free(veloc);

    cudaFree(veloc_gpu);
    cudaFree(elem_gpu);
    cudaFree(acf_gpu);
    cudaFree(result_atom_gpu);
    cudaFree(result_sys_gpu);

    return result_sys_cpu;
}



