#include "GPULayout.h"

#include "oclUtil.h"

#include <ctime>
#include <queue>
#include <cmath>
#include <cstring>
#include <iostream>
#include <cstdlib>
using namespace std;
//using namespace GM3;

unsigned int GPULayout::n = 128; //number of work-items per work-group

/*************************/
/*Kernel argument indices*/
enum{
    //both kernels
    NODEP_i,
    CSRN_i,
    CSRE_i,
    NODEW_i,
    EDGEW_i,
    KDNODES_i,
    KDOFFSET_i,
    ETA_i,
    C_i,
    DESLENGTH_i,
    T_i,
    SHAREDPOS_i,
    SHAREDWT_i,
    
    //timestep kernel only
    CORRCONST_i,
    NTM1_i,
    TMAP_i,
};
/*************************/

GPULayout::GPULayout(const char *fn):
    GM3Layout(fn),
    graphTimeList(), trans(),
    context(), devices(NULL), commandQueue(), OCLProg(),
    stepKern(), timestepKern(),
    nodes_b(0), nodestm1_b(0), tmap_b(0), CSRN_b(0), CSRE_b(0), nodeW_b(0),
    edgeW_b(0), KDNodes_b(0),
    GPUTmp(), GPUKDNodes(), CSRNodes(), CSREdges()
{
    initOnce(); //load Multipole.cl here
}

GPULayout::GPULayout(GM3Graph *g):
    GM3Layout(g),
    graphTimeList(),
    context(), devices(NULL), commandQueue(), OCLProg(),
    stepKern(), timestepKern(),
    nodes_b(0), nodestm1_b(0), tmap_b(0), CSRN_b(0), CSRE_b(0), nodeW_b(0),
    edgeW_b(0), KDNodes_b(0),
    GPUTmp(), GPUKDNodes(), CSRNodes(), CSREdges()
{
    initOnce(); //load Multipole.cl here
}

GPULayout::~GPULayout(){
    cleanupCL();
}


void GPULayout::cleanupCL(){
    cleanupCLMem();
    if(stepKern) clReleaseKernel(stepKern);
    if(timestepKern) clReleaseKernel(timestepKern);
    if(OCLProg) clReleaseProgram(OCLProg);
    if(commandQueue) clReleaseCommandQueue(commandQueue);
    if(devices) free(devices);
    if(context) clReleaseContext(context);
}

void GPULayout::cleanupCLMem(){
    if(nodes_b)  cluReleaseMemObject(nodes_b);
    if(nodestm1_b) cluReleaseMemObject(nodestm1_b);
    if(tmap_b) cluReleaseMemObject(tmap_b);
    if(CSRN_b)  cluReleaseMemObject(CSRN_b);
    if(CSRE_b) cluReleaseMemObject(CSRE_b);
    if(nodeW_b) cluReleaseMemObject(nodeW_b);
    if(edgeW_b) cluReleaseMemObject(edgeW_b);
    if(KDNodes_b) cluReleaseMemObject(KDNodes_b);
}

void errNotify(const char *errstr, const void *priv, size_t cb, void *usr){
    std::cerr << "Error in this context: " << errstr;
}


void GPULayout::loadGraph(GM3Graph *g, unsigned int N, unsigned int E, bool ifRandomize){
    GM3Layout::loadGraph(g, N, E, ifRandomize);

    initPerGraph(N, E);
}


void GPULayout::initOnce(){
    //get the platform
    unsigned int numPlatforms;
    cluGetPlatformIDs(0, NULL, &numPlatforms);
    cl_platform_id platIDs[numPlatforms];
    cluGetPlatformIDs(numPlatforms, platIDs, NULL);

    //create the platform property list
    cl_context_properties clcxProps[] = 
    {
        CL_CONTEXT_PLATFORM, (cl_context_properties)platIDs[0], 0
    };

    //create an OpenCL context
    context = cluCreateContextFromType(clcxProps, CL_DEVICE_TYPE_GPU,
            errNotify, NULL);

    //set up the device and a command queue for it
    size_t ParmDataBytes;
    cluGetContextInfo(context, CL_CONTEXT_DEVICES, 0, NULL, 
            &ParmDataBytes);
    devices = (cl_device_id*)malloc(ParmDataBytes);
    cluGetContextInfo(context, CL_CONTEXT_DEVICES, ParmDataBytes, 
            devices, NULL);

    commandQueue = cluCreateCommandQueue(context, devices[0], NULL);
            //CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE);

    // create and compile the program
    //   this uses the xx.cl file under the directory /src/xxx.cl
    //char *src[1] = {cluLoadProg("./Multipole.cl")};

    //   this uses xxx.cl file under the directory "/src/layouts/GPULayout-lite_cus02/"


    string strCLsource = "./layouts/GPULayout-lite_cus02/multipole/Multipole_0425-1440_original.cl";
    //char *src[1] = {cluLoadProg("./layouts/GPULayout-lite_cus02/Multipole_0425-1440_original.cl")};

    char *src[1] = {cluLoadProg(strCLsource.c_str())};


    ////char *src[1] = {cluLoadProg("./src/layouts/GPULayout-lite_cus02/Multipole.cl")};

    if(src != NULL){
        cout << "[openCL] just loaded openCL source file at " << strCLsource << endl;
        OCLProg = cluCreateProgramWithSource(context, 1, 
                const_cast<const char**>(src), NULL);
        cluBuildProgram(OCLProg, 1, devices, NULL, NULL, NULL);
        delete src[0];
    }
    else{
        cout << "[openCL] error: cannot load openCL source file at " << strCLsource << endl;
    }

    //get the kernel handle
    stepKern = cluCreateKernel(OCLProg, "stepKern");
    timestepKern = cluCreateKernel(OCLProg, "timestepKern");

    KDTree::threshold(n);
}

void GPULayout::initPerGraph(unsigned int Ni, unsigned int Ei){
    //clear out old buffers
    cleanupCLMem();

    unsigned int N = Ni==0 ? graphList[0].numNodes() : Ni;
    unsigned int E = Ei==0 ? graphList[0].numEdges() : Ei;

    //allocate space for the translation vector
    trans.resize(N);

    //allocate node and edge buffers for the GPU
    nodes_b = cluCreateBuffer(context, CL_MEM_READ_WRITE,
            sizeof(float)*2*N, NULL);
    nodestm1_b = cluCreateBuffer(context, CL_MEM_READ_WRITE,
            sizeof(float)*2*N, NULL);

    tmap_b = cluCreateBuffer(context, CL_MEM_READ_WRITE,
            sizeof(cl_int)*N, NULL);

    CSRN_b = cluCreateBuffer(context, CL_MEM_READ_ONLY, 
            sizeof(cl_uint)*(N+1), NULL);
    CSRE_b = cluCreateBuffer(context, CL_MEM_READ_ONLY,
            sizeof(cl_uint)*E, NULL);

    nodeW_b = cluCreateBuffer(context, CL_MEM_READ_ONLY, sizeof(float)*N, NULL);
    edgeW_b = cluCreateBuffer(context, CL_MEM_READ_ONLY, sizeof(float)*E, NULL);

    unsigned int kdsize = 4*(unsigned int)(N/n) + 1;
    KDNodes_b = cluCreateBuffer(context, CL_MEM_READ_ONLY,
            sizeof(kdnode)*kdsize, NULL);

    //local buffers to store GPU-formatted data
    GPUTmp.resize(N*2>E ? N : E);
    GPUKDNodes.resize(kdsize);
    CSRNodes.reserve(N+1);
    CSREdges.reserve(E);

    cluFinish(commandQueue);

    //pointers to global memory
    cluSetKernelArg(stepKern, NODEP_i, sizeof(cl_mem), (void*)&nodes_b);
    cluSetKernelArg(stepKern, CSRN_i, sizeof(cl_mem), (void*)&CSRN_b);
    cluSetKernelArg(stepKern, CSRE_i, sizeof(cl_mem), (void*)&CSRE_b);
    cluSetKernelArg(stepKern, NODEW_i, sizeof(cl_mem), (void*)&nodeW_b);
    cluSetKernelArg(stepKern, EDGEW_i, sizeof(cl_mem), (void*)&edgeW_b);
    cluSetKernelArg(stepKern, KDNODES_i, sizeof(cl_mem), (void*)&KDNodes_b);

    //pointers to local memory
    cluSetKernelArg(stepKern, SHAREDPOS_i, sizeof(float)*2*n, NULL);
    cluSetKernelArg(stepKern, SHAREDWT_i, sizeof(float)*n, NULL);

    //pointers to global memory for timestepKern
    cluSetKernelArg(timestepKern, NODEP_i, sizeof(cl_mem), (void*)&nodes_b);
    cluSetKernelArg(timestepKern, CSRN_i, sizeof(cl_mem), (void*)&CSRN_b);
    cluSetKernelArg(timestepKern, CSRE_i, sizeof(cl_mem), (void*)&CSRE_b);
    cluSetKernelArg(timestepKern, NODEW_i, sizeof(cl_mem), (void*)&nodeW_b);
    cluSetKernelArg(timestepKern, EDGEW_i, sizeof(cl_mem), (void*)&edgeW_b);
    cluSetKernelArg(timestepKern, KDNODES_i, sizeof(cl_mem), (void*)&KDNodes_b);

    //pointers to local memory for timestepKern
    cluSetKernelArg(timestepKern, SHAREDPOS_i, sizeof(float)*2*n, NULL);
    cluSetKernelArg(timestepKern, SHAREDWT_i, sizeof(float)*n, NULL);

    cluSetKernelArg(timestepKern, NTM1_i, sizeof(cl_mem), (void*)&nodestm1_b);
    cluSetKernelArg(timestepKern, TMAP_i, sizeof(cl_mem), (void*)&tmap_b);
}

void GPULayout::initPerLayout(){
    buildReductions();

    currGraph = graphList.end() - 1;

    cluSetKernelArg(stepKern, ETA_i, sizeof(float), (void*)&eta);
    cluSetKernelArg(stepKern, C_i, sizeof(float), (void*)&repConst);
}

unsigned int GPULayout::initPerLevel(unsigned int lvl){
    //unsigned int g = currGraph - graphList.begin();
    unsigned int g = lvl;
    unsigned int steps;
    float d;

    cluSetKernelArg(timestepKern, ETA_i, sizeof(float), (void*)&eta);
    cluSetKernelArg(timestepKern, C_i, sizeof(float), (void*)&repConst);

    //determine the number of steps
    if(graphList.size()-1 <= 0) steps = (coarsestSteps+finestSteps)/2;
    else{
        steps = finestSteps + g*(coarsestSteps-finestSteps)/
            (graphList.size()-1);
    }

    //calculate initialT decay - less energy at lower levels
    currT = GM3Layout::initialT / pow(initialTDecay,(float)(graphList.size()-1-g));

    //calculate desired length decay - restrict cluster expansion
    d = GM3Layout::desLength / pow(desLengthDecay, (float)(graphList.size()-1-g));
    cluSetKernelArg(stepKern, DESLENGTH_i, sizeof(float),(void*)&d);
    cluSetKernelArg(timestepKern, DESLENGTH_i, sizeof(float),(void*)&d);

    //create an identity translation vector
    for(unsigned int i=0; i<currGraph->numNodes(); i++) trans[i] = i;

    return steps;
}

void GPULayout::initPerKDTree(){
    //rebuild the CSR
    buildCSR();

    KDTree *kd = currGraph->tree();
    unsigned int N = currGraph->numNodes();
    unsigned int E = currGraph->numEdges();

    //write the offset for the first leaf node
    cl_uint ofs = kd->size() - kd->leaves();
    cluSetKernelArg(stepKern, KDOFFSET_i, sizeof(cl_uint), (void*)&ofs);
    cluSetKernelArg(timestepKern, KDOFFSET_i, sizeof(cl_uint), (void*)&ofs);

    //write everything that might have changed
    convertGraphNodePosToGPU(currGraph->nodesRef(), GPUTmp);
    cluEnqueueWriteBuffer(commandQueue, nodes_b, CL_TRUE, 0, 
            sizeof(cl_float)*N*2, &(GPUTmp[0]), 0,NULL,NULL);

    cluEnqueueWriteBuffer(commandQueue, CSRN_b, CL_TRUE, 0,
            sizeof(cl_uint)*CSRNodes.size(), &(CSRNodes[0]), 0,NULL,NULL);

    convertGraphNodeWToGPU(currGraph->nodesRef(), GPUTmp);
    cluEnqueueWriteBuffer(commandQueue, nodeW_b, CL_TRUE, 0,
            sizeof(cl_float)*N, &(GPUTmp[0]), 0, NULL, NULL);

    if(E > 0){
        cluEnqueueWriteBuffer(commandQueue, CSRE_b, CL_TRUE, 0,
                sizeof(cl_uint)*CSREdges.size(), &(CSREdges[0]), 0,NULL,NULL);

        convertGraphEdgeWToGPU(currGraph->edgesRef(), GPUTmp);
        cluEnqueueWriteBuffer(commandQueue, edgeW_b, CL_TRUE, 0,
                sizeof(cl_float)*E, &(GPUTmp[0]), 0, NULL, NULL);
    }

    convertKDTree(currGraph->tree(), GPUKDNodes);
    cluEnqueueWriteBuffer(commandQueue, KDNodes_b, CL_TRUE, 0, 
            sizeof(kdnode)*GPUKDNodes.size(), &(GPUKDNodes[0]), 0,NULL,NULL);
}

void GPULayout::initPerStep(){
    cluSetKernelArg(stepKern, T_i, sizeof(cl_float), (void*)&currT);
    cluSetKernelArg(timestepKern, T_i, sizeof(cl_float), (void*)&currT);
}


GM3Graph *GPULayout::computeLayout(){
    initPerLayout();

    for(unsigned int i=0; i<graphList.size(); i++){
        layoutLevel();

        //set up the next graph if it exists
        if(currGraph > graphList.begin()){
            currGraph--;
            currGraph->setLayoutFromMIS(&*(currGraph+1), initialT/10.f);
        }
    }

    //wait for everything in the queue to finish
   cluFinish(commandQueue);

    //rescale all levels
    //for(unsigned int i=0; i<graphList.size(); i++) graphList[i].rescale();

    return &(graphList[0]);
}


void GPULayout::layoutLevel(){
    if(currGraph->numNodes() <= 0) return;

    unsigned int steps = initPerLevel(currGraph - graphList.begin());


    unsigned int lastKDStep = 0;

    //Actually layout the level
    for(unsigned int i=0; i<steps; i++){
        if(KDRebuildStep(i, lastKDStep)){
            lastKDStep = i;
            buildKDTree();
        }

        layoutStep();

        //reduce temperature
        currT = currT * lambda;

        //finish operations
        cluFinish(commandQueue);

        //get ready to set up the next KD tree
        cluEnqueueReadBuffer(commandQueue, nodes_b, CL_TRUE, 0,
                sizeof(cl_float)*currGraph->numNodes()*2, &(GPUTmp[0]),
                0, NULL, NULL);
        convertGraphNodePosToCPU(GPUTmp, currGraph->nodesRef());
    }

    //translate nodes to their original location
    std::vector<GM3Graph::node> nodeCopy = currGraph->nodes;
    for(unsigned int i=0; i<currGraph->numNodes(); i++){
        currGraph->nodesRef()[i] = nodeCopy[trans[i]];
    }

    //write the nodes back to the previous timestep buffer
    convertGraphNodePosToGPU(currGraph->nodesRef(), GPUTmp);
    cluEnqueueWriteBuffer(commandQueue, nodestm1_b, CL_TRUE, 0, 
            sizeof(cl_float)*currGraph->numNodes()*2, &(GPUTmp[0]),
            0,NULL,NULL);

    //reverse the translation vector for edges
    std::vector<unsigned int> rtrans(currGraph->numNodes());
    for(unsigned int i=0; i<currGraph->numNodes(); i++) rtrans[trans[i]] = i;

    //translate edges using the reversed translation vector - should now be the
    //same as the original edge list
    currGraph->translateEdges(rtrans);
}


void GPULayout::layoutStep(){
    initPerStep();

    size_t gwork[1] = {0};
    size_t lwork[1] = {0};
    int n = currGraph->tree()->maxLeaf();

    gwork[0] = (size_t)(currGraph->tree()->leaves()*n);
    lwork[0] = (size_t)(n);
    cluEnqueueNDRangeKernel(commandQueue, stepKern, 1, NULL, gwork, lwork,
            0, NULL, NULL);
}


//Assumptions:
// * GTn is the graph for the current timestep
// * nodes_b already holds the position data from the previous timestep. This 
//    means that computeLayout should be called only for T0, and 
//    computeLayoutTimestep after that.
// * tmap[i] contains the index of Tn[i]. I.e. Tn[i] and Tn-1[tmap[i]] should be
//    the same node!
// * If a node in Tn-1 doesn't appear in Tn (i.e. it was removed), then tmap 
//    shouldn't contain any mapping for that node.
// * If a new node appears in Tn that was not in Tn-1, it should map to index 
//    -1, so the computation knows not to look for that node in Tn-1.
GM3Graph *GPULayout::computeLayoutTS(GM3Graph *GTn, std::vector<int> tmap){


    /*  0421 13:33 nick's fix for zero position assignment for newly added nodes.
      //set initial positions
    std::vector<GM3Graph::node> &n = GTn->nodesRef();
    for(unsigned int i=0; i<GTn->numNodes(); i++){
        if(tmap[i] >= 0){
            currGraph->getPos( tmap[i], &(n[i].p[0]) );
        }else{
            n[i].p = cvec();
        }
    }*/


    if(graphTimeList.size() == 0) graphTimeList.resize(1);
    graphTimeList[0] = *GTn;
    currGraph = graphTimeList.begin();


    //cluEnqueueCopyBuffer(commandQueue, nodes_b, nodestm1_b, 0, 0, 
            //sizeof(cl_float)*currGraph->numNodes(), 0,NULL,NULL);
    //cluEnqueueWriteBuffer(commandQueue, nodes_b, CL_TRUE, 0,
            //sizeof(cl_float)*currGraph->numNodes(), &([0]), 0,NULL,NULL);

    layoutLevelTS(tmap);

    //wait for everything in the queue to finish
    cluFinish(commandQueue);

    //rescale all levels
    //currGraph->rescale();
    //for(unsigned int i=0; i<graphList.size(); i++) graphList[i].rescale();
    return &(graphTimeList[0]);
}


void GPULayout::layoutLevelTS(std::vector<int> &tmap){
    if(currGraph->numNodes() <= 0) return;

    unsigned int steps = initPerLevel(0);
    cluSetKernelArg(timestepKern, CORRCONST_i, sizeof(float),
            (void*)&correlatingConst);

    std::vector<int> tmap1(tmap.size());
    std::vector<int> tmap2 = tmap;

    //write the timestep map to GPU
    cluEnqueueWriteBuffer(commandQueue, tmap_b, CL_TRUE, 0,
            sizeof(cl_int)*tmap.size(), &(tmap2[0]), 0,NULL,NULL);

    unsigned int lastKDStep = 0;

    //Actually layout the level
    for(unsigned int i=0; i<steps; i++){
        if(KDRebuildStep(i, lastKDStep)){
            lastKDStep = i;

            //std::vector<unsigned int> trans = currGraph->buildKDTree();
            //initPerKDTree();
            std::vector<unsigned int> trans = buildKDTree();

            //translate the time map
            tmap1 = tmap2;
            for(unsigned int j=0; j<trans.size(); j++){
                tmap2[trans[j]] = tmap1[j];
            }
            cluEnqueueWriteBuffer(commandQueue, tmap_b, CL_TRUE, 0,
                    sizeof(cl_int)*tmap.size(), &(tmap2[0]), 0,NULL,NULL);
        }

        layoutStepTS();

        //reduce temperature
        currT = currT * lambda;

        //finish operations
        cluFinish(commandQueue);

        //get ready to set up the next KD tree
        cluEnqueueReadBuffer(commandQueue, nodes_b, CL_TRUE, 0,
                sizeof(cl_float)*currGraph->numNodes()*2, &(GPUTmp[0]),
                0, NULL, NULL);
        convertGraphNodePosToCPU(GPUTmp, currGraph->nodesRef());
    }

    //translate nodes to their original location
    std::vector<GM3Graph::node> nodeCopy = currGraph->nodes;
    for(unsigned int i=0; i<currGraph->numNodes(); i++){
        currGraph->nodesRef()[i] = nodeCopy[trans[i]];
    }

    //reverse the translation vector for edges
    std::vector<unsigned int> rtrans(currGraph->numNodes());
    for(unsigned int i=0; i<currGraph->numNodes(); i++) rtrans[trans[i]] = i;

    //translate edges using the reversed translation vector - should now be the
    //same as the original edge list
    currGraph->translateEdges(rtrans);
}

void GPULayout::layoutStepTS(){
    initPerStep();

    size_t gwork[1] = {0};
    size_t lwork[1] = {0};
    int n = currGraph->tree()->maxLeaf();

    gwork[0] = (size_t)(currGraph->tree()->leaves()*n);
    lwork[0] = (size_t)(n);
    cluEnqueueNDRangeKernel(commandQueue, timestepKern, 1, NULL, gwork, lwork,
            0, NULL, NULL);
}


//Better function: the number of iterations between rebuilds increases with the
//logarithm(base 2/3*lambda) of the last rebuild step.
bool GPULayout::KDRebuildStep(unsigned int s, unsigned int s0){
    return (float)(s-s0) > -1.f * log((float)s0)/log(lambda*2.f/3.f);
}

std::vector<unsigned int> GPULayout::buildKDTree(){
    //GM3Layout::buildKDTree();

    //build the KD tree and get the translation vector
    std::vector<unsigned int> newTrans = currGraph->buildKDTree();

    //update parents
    //if(currGraph > graphList.begin()) (currGraph-1)->translateParents(newTrans);

    //update the running translation vector
    for(unsigned int i=0; i<newTrans.size(); i++) trans[i] = newTrans[trans[i]];

    initPerKDTree();

    return newTrans;
}


GPULayout::kdnode GPULayout::convertKDNode(KDTree::node *kn){
    kdnode gn;

    gn.x = kn->p.x();
    gn.y = kn->p.y();

    gn.d = kn->d;
    gn.w = kn->w;
    gn.s = kn->s;
    gn.t = kn->t;

    gn.l = -1;
    gn.r = -1;

    return gn;
}

//Re-storing algorithm
// Start with the root node in the queue
// Until the queue is empty:
// 1. Pop node from the queue
// 2. Add left and right child to the queue
// 3. Place converted node in vector
// 4. Calculate new left and right pointers
void GPULayout::convertKDTree(KDTree *kd,
        std::vector<GPULayout::kdnode> &kdnodes)
{
    if(!kd) return; //don't process a NULL pointer

    kdnodes.clear(); //make sure the KD tree buffer is empty

    std::queue<KDTree::node*> levelOrderQ;
    std::queue<kdnode> leafQ;
    KDTree::node *n;
    unsigned int loff = kd->size() - kd->leaves();

    n = kd->kdroot();
    levelOrderQ.push(n); // Start with the root node in the queue
    while(!levelOrderQ.empty()){ // Until the queue is empty
        // 1. Pop node from the queue
        n = levelOrderQ.front();
        levelOrderQ.pop();

        // 2. Place converted node in vector
        kdnodes.push_back(convertKDNode(n));

        // 3. Handle left child
        if(n->l){
            if(n->l->l && n->l->r){ //interior
                // 3.1 Add left child to the queue
                levelOrderQ.push(n->l);

                // 3.2 Calculate new left pointer
                kdnodes.back().l = kdnodes.size() + levelOrderQ.size()-1;
            }else{ //leaf
                // 3.1 Add left child to leaf queue
                leafQ.push(convertKDNode(n->l));

                // 3.2 Calculate new left pointer
                kdnodes.back().l = loff + leafQ.size()-1;
            }
        }

        // 4. Handle right child
        if(n->r){
            if(n->r->l && n->r->r){ //interior
                // 4.1 Add right child to the queue
                levelOrderQ.push(n->r);

                // 4.2 Calculate new right pointer
                kdnodes.back().r = kdnodes.size()-1 + levelOrderQ.size();
            }else{ //leaf
                // 4.1 Add right child to leaf queue
                leafQ.push(convertKDNode(n->r));

                // 4.2 Calculate new right pointer
                kdnodes.back().r = loff + leafQ.size()-1;
            }
        }
    }

    // 5. Add the leaves at the end of the list
    while(!leafQ.empty()){
        kdnodes.push_back(leafQ.front());
        leafQ.pop();
    }

    rStacklessConv(kdnodes, 0, -1);
}

//Stackless traversal conversion
// Start at root
// A pointer for the right side, "R," is passed in
// 1. Call recursively on node->l with R = node->r
// 2. Call recursively on node->r with R = node->r
// 3. Set node->r = R
void GPULayout::rStacklessConv(std::vector<GPULayout::kdnode> &t, int c, int R){
    if(t[c].l > 0 && t[c].l<t.size() ) rStacklessConv(t, t[c].l, t[c].r); // YUHSUAN
    if(t[c].r > 0 && t[c].r<t.size() ) rStacklessConv(t, t[c].r, R); // YUHSUAN
    t[c].r = R;
}


void GPULayout::buildCSR(){
    currGraph->sortEdges();

    CSRNodes.clear();
    CSREdges.clear();

    unsigned int edgeIndex = 0;

    for(unsigned int n=0; n<currGraph->numNodes(); n++){
        CSRNodes.push_back(edgeIndex);
        while(edgeIndex < currGraph->numEdges() &&
                currGraph->edgeAt(edgeIndex).id1 == n){
            CSREdges.push_back(currGraph->edgeAt(edgeIndex++).id2);
        }
    }

    //add a fake node to mark the end of the edge list
    CSRNodes.push_back(currGraph->numEdges());
}


void GPULayout::convertGraphNodePosToGPU(std::vector<GM3Graph::node> &g,
        std::vector<float> &p){
    for(unsigned int i=0; i<g.size(); i++){
        p[i*2+0] = g[i].p.x();
        p[i*2+1] = g[i].p.y();
    }
}

void GPULayout::convertGraphNodePosToCPU(std::vector<cl_float> &p,
        std::vector<GM3Graph::node> &g){
    for(unsigned int i=0; i<g.size(); i++){
        g[i].p.x(p[i*2+0]);
        g[i].p.y(p[i*2+1]);
    }
}

void GPULayout::convertGraphNodeWToGPU(std::vector<GM3Graph::node> &g,
        std::vector<cl_float> &w){
    for(unsigned int i=0; i<g.size(); i++){
        w[i] = g[i].weight;
    }
}

void GPULayout::convertGraphEdgeWToGPU(std::vector<GM3Graph::edge> &g,
        std::vector<cl_float> &w){
    for(unsigned int i=0; i<g.size(); i++){
        w[i] = g[i].weight;
    }
}
