// retrace.cu
//
// GPU-accelerated retrace step

#include <stdio.h>
#include <cuda.h>
#include <math.h>


double *dev_log_micro_t, *dev_log_micro_s;
double *dev_log_sig_t, *dev_log_sig_s;
double *dev_sig_t, *dev_sig_s; // Perturbed macroscopic
double *dev_dens;
double *dev_log_dtc;
int *dev_log_loss;
double *dev_wgts, *dev_tallies;

/*
    CUDA NOTES (Fermi and newer):
    Run multiples of 32 threads per block
    Run no more than 1024 threads in each dimension
    Up to 8 resident blocks per multiprocessor
    Up to 1536 resident threads "
*/

extern "C" void cuda_init_(
    int * nseg_p,
    int * nmodel_p,
    int * nmat_p,
    double * dens)
{
    size_t nmat = *nmat_p;
    size_t nseg = *nseg_p;
    size_t nmodel = *nmodel_p;
    size_t sdouble = sizeof(double);
    double dens_trans[nmodel * nmat], zeros[nmodel * 3];
    //
    // Transpose the perturbed density matrix to avoid strided access
    for (size_t imodel = 0; imodel < nmodel; ++imodel)
    {
        for (size_t imat = 0; imat < nmat; ++imat)
        {
            dens_trans[imodel + nmodel * imat] =
                dens[imat + nmat * imodel];
        }
    }
    //
    for (size_t imodel = 0; imodel < nmodel * 3; ++imodel)
    {
        zeros[imodel] = 0.;
    }
    //
    cudaMalloc((void**) &dev_log_micro_t, nmat * nseg * sdouble);
    cudaMalloc((void**) &dev_log_micro_s, nmat * nseg * sdouble);
    cudaMalloc((void**) &dev_log_sig_t, nseg * sdouble);
    cudaMalloc((void**) &dev_log_sig_s, nseg * sdouble);
    cudaMalloc((void**) &dev_sig_t, nseg * nmodel * sdouble);
    cudaMalloc((void**) &dev_sig_s, nseg * nmodel * sdouble);
    cudaMalloc((void**) &dev_dens, nmat * nmodel * sdouble);
    cudaMalloc((void**) &dev_log_dtc, nseg * sdouble);
    cudaMalloc((void**) &dev_log_loss, nseg * sizeof(int));
    cudaMalloc((void**) &dev_wgts, nseg * nmodel * sdouble);
    cudaMalloc((void**) &dev_tallies, nmodel * 3 * sdouble);
    //
    cudaMemcpy(dev_dens, dens_trans, nmat*nmodel*sdouble, cudaMemcpyHostToDevice);
    cudaMemcpy(dev_tallies, zeros, nmodel*3*sdouble, cudaMemcpyHostToDevice);
}

extern "C" void cuda_destroy_(int * nmodel_p, double * retrace_tallies)
{
    int nmodel = *nmodel_p;
    cudaMemcpy(retrace_tallies, dev_tallies, nmodel*3*sizeof(double), cudaMemcpyDeviceToHost);
    //
    cudaFree(dev_log_micro_t);
    cudaFree(dev_log_micro_s);
    cudaFree(dev_log_sig_t);
    cudaFree(dev_log_sig_s);
    cudaFree(dev_sig_t);
    cudaFree(dev_sig_s);
    cudaFree(dev_dens);
    cudaFree(dev_wgts);
    cudaFree(dev_tallies);
}

__global__ void mtm_kernel(
    double * dev_log,
    double * dev_dens,
    double * dev_sig,
    int nseg,
    int nmodel,
    int nmat)
{
    int imodel, iseg, imat, index;
    double sum;
    //
    index = threadIdx.x + blockIdx.x * blockDim.x;
    iseg = index / nmodel;
    if (iseg >= nseg) return;
    imodel = index % nmodel;
    sum = 0.;
    //
    for (imat = 0; imat < nmat; ++imat)
    {
        sum += dev_log[imat + nmat*iseg] * dev_dens[imodel + nmodel*imat];
    }
    dev_sig[imodel + nmodel*iseg] = sum;
}

__global__ void wgt_kernel(
    double * Sig_t,
    double * Sig_s,
    double * log_sig_t,
    double * log_sig_s,
    double * log_dtc,
    int * log_loss,
    double * wgts,
    int nseg,
    int nmodel)
{
    int index, iseg, iloss;
    double local_wgt;

    index = threadIdx.x + blockIdx.x * blockDim.x;
    iseg = index / nmodel;
    if (iseg >= nseg) return;
    // imodel = index % nmodel;
    iloss = log_loss[iseg];

    if (iloss == 0)
    {
        local_wgt = Sig_s[index] / log_sig_s[iseg] *
                    exp(log_dtc[iseg] * (log_sig_t[iseg] - Sig_t[index]));
    }
    else if (iloss < 3)
    {
        local_wgt = exp(log_dtc[iseg] * (log_sig_t[iseg] - Sig_t[index]));
    }
    else
    {
        local_wgt = (Sig_t[index]-Sig_s[index]) /
            (log_sig_t[iseg]-log_sig_s[iseg]) *
            exp(log_dtc[iseg] * (log_sig_t[iseg] - Sig_t[index]));
    }
    wgts[index] = local_wgt;
}

__global__ void tally_kernel(
    double * wgts,
    int * log_loss,
    double * retrace_tallies,
    int nseg,
    int nmodel)
{
    int imodel = threadIdx.x + blockIdx.x * blockDim.x;
    int iseg, iloss;
    double wgt = 1;

    for (iseg = 0; iseg < nseg; ++iseg)
    {
        iloss = log_loss[iseg];
        wgt *= wgts[imodel + nmodel * iseg];
        if (iloss > 0)
        {
            retrace_tallies[imodel + nmodel*(iloss-1)] += wgt;
            wgt = 1;
        }
    }
}

extern "C" void retrace_(
    int * nseg_p, int * nmodel_p, int * nmat_p,
    double * log_micro_t, double * log_micro_s,
    double * log_sig_t, double * log_sig_s, double * log_dtc,
    int * log_loss, double * retrace_tallies)
{
    size_t nmat, nseg, nmodel;
    int nthreads, nblocks;

    nseg = *nseg_p;
    nmodel = *nmat_p;
    nmat = *nmat_p;


    // Sig [nseg x nmodel] = log_micro[nmat x nseg]^T x per_dens[nmat x nmodel]
    // Sig[iseg, imodel] = sum_imat log_micro[imat, iseg] x per_dens[imat, imodel]
    cudaMemcpy(dev_log_micro_t, log_micro_t, nmat*nseg*sizeof(double), cudaMemcpyHostToDevice);
    cudaMemcpy(dev_log_micro_s, log_micro_s, nmat*nseg*sizeof(double), cudaMemcpyHostToDevice);
    //
    nthreads = nmodel;
    nblocks = (nseg * nmodel + nthreads - 1) / nthreads;
    mtm_kernel <<< nblocks, nthreads >>> (
        dev_log_micro_t, dev_dens, dev_sig_t,
        nseg, nmodel, nmat);
    mtm_kernel <<< nblocks, nthreads >>> (
        dev_log_micro_s, dev_dens, dev_sig_s,
        nseg, nmodel, nmat);

    // Calculate weight multiplier at each step
    cudaMemcpy(dev_log_sig_t, log_sig_t, nseg*sizeof(double), cudaMemcpyHostToDevice);
    cudaMemcpy(dev_log_sig_s, log_sig_s, nseg*sizeof(double), cudaMemcpyHostToDevice);
    cudaMemcpy(dev_log_dtc, log_dtc, nseg*sizeof(double), cudaMemcpyHostToDevice);
    cudaMemcpy(dev_log_loss, log_loss, nseg*sizeof(int), cudaMemcpyHostToDevice);

    nthreads = nmodel;
    nblocks = (nseg * nmodel + nthreads - 1) / nthreads;
    wgt_kernel <<< nblocks, nthreads >>> (
        dev_sig_t, dev_sig_s,
        dev_log_sig_t, dev_log_sig_s,
        dev_log_dtc, dev_log_loss, dev_wgts,
        nseg, nmodel);

    // Increment the tallies
    tally_kernel <<< 1, nmodel >>> (dev_wgts, dev_log_loss, dev_tallies, nseg, nmodel);

}
