#include "misc.h"
#include "matrix.h"
#include "util.h"

#define MAX(a,b)  (((a) > (b)) ? (a) : (b))
#define TOLERANCE   1.0e-6



/* Reads a file into a matrix */
void read_file(matrix *A, const char *filename) {
    FILE * fp = fopen(filename, "r");
    load_mat(fp, A);
    fclose(fp);
}

void calc_Q(matrix *M, matrix *HpInv, matrix*Q) {
    matrix temp, Mt;
    trans_mat(M, &Mt);

    mult_mat(M, HpInv, &temp);
    mult_mat(&temp, &Mt, Q);

    dest_mat(&temp);
    dest_mat(&Mt);
}

void calc_wXX(matrix *A, matrix *x0, matrix *xXX, matrix *d, int N, int pnum, double uXX, matrix *wXX) {

    matrix _A[N];     // _A[i] = A^i (A^0 is redundant, but useful in loops)

    /* We need A^i multiple times. So, let's pre-calculate it once */
    init_mat(&(_A[0]),A->row,A->col);  // Take the size from matrix A
    fill_ident(&(_A[0]));
    for (int i=1; i<N; i++) {
        mult_mat(&(_A[i-1]),A,&(_A[i]));
    }

    init_mat(wXX,4*(N-1),1);

    for (int i=1; i<=N-1; i++) {

        /* Calculating (xXX - A^i*x0 - sum(var=1..i)[A^(i-var)*d(var-1)] */
        /* d(var-1) is for first problem. It shifts by one for every problem */
        matrix temp1, temp2, temp3, temp4, temp5;

        /* 2nd term */
        mult_mat(&(_A[i]),x0,&temp1);

        /* 3rd term */
        init_mat(&temp2,3,1);
        fill_zero(&temp2);

        for (int var=1;var<=i;var++) {

            /* set up d(var) */
            matrix tempdvar;
            init_mat(&tempdvar,3,1); 
            fill_zero(&tempdvar);
            MAT_ELEM(&tempdvar,0,0) = MAT_ELEM(d,((pnum+var-1)/d->col),((pnum+var-1)%d->col));            

            matrix tempmult;
            mult_mat(&(_A[i-var]),&tempdvar,&tempmult);
                   
            matrix temp__;
            copy_mat(&temp2,&temp__);
            dest_mat(&temp2); 
            add_mat(&temp__,&tempmult,&temp2);

            /* cleanup*/
            dest_mat(&tempdvar);
            dest_mat(&tempmult);
            dest_mat(&temp__);
        }

        /* temp1 is 2nd term, temp2 is 3rd term */
        add_mat(&temp1,&temp2,&temp3);

        mult_scalar(&temp3,-1.0,&temp4);
        add_mat(xXX, &temp4, &temp5);        

        for (int j=0; j<3; j++)
            MAT_ELEM(wXX,(3*(i-1)+j),0) = MAT_ELEM(&temp5,j,0);
        MAT_ELEM(wXX,3*(N-1)+(i-1),0) = uXX;

        /* Cleanup */
        dest_mat(&temp1);
        dest_mat(&temp2);
        dest_mat(&temp3);
        dest_mat(&temp4);
        dest_mat(&temp5);
    }
 
    for (int i=0; i<N; i++)
        dest_mat(&(_A[i]));

}

void calc_b(matrix *A, matrix *x0, matrix *xlo, matrix *xhi, matrix *d, int N, int pnum, double ulo, double uhi, matrix *b) {

    matrix wlo, whi;
    /* calculate wlo*/
    calc_wXX(A, x0, xlo, d, N, pnum, ulo, &wlo);
    /* calculate whi*/
    calc_wXX(A, x0, xhi, d, N, pnum, uhi, &whi);

    matrix temp;
    mult_scalar(&wlo,-1.0,&temp);
   
    concat_mat_ver(&whi,&temp,b); 

    dest_mat(&wlo);
    dest_mat(&whi);
    dest_mat(&temp);
}

/* Equation 3 from the paper */
void calc_eq3(matrix *lambda, matrix *Q, matrix *h, double *F) {

    matrix temp1, temp2, temp3, temp4;
    matrix Fmat;
    matrix lambdat;
  
    /* First term */ 
    trans_mat(lambda, &lambdat);
    mult_mat(&lambdat, Q, &temp1);
    mult_mat(&temp1, lambda, &temp2);
    mult_scalar(&temp2, 0.5, &temp3);

    /* Second term */
    mult_mat(&lambdat, h, &temp4);

    /* add */
    add_mat(&temp3, &temp4, &Fmat);

    /* Now this is really a single value */
    assert(Fmat.row == 1 && Fmat.col == 1);
    *F = MAT_ELEM(&Fmat,0,0);

    dest_mat(&temp1);
    dest_mat(&temp2);
    dest_mat(&temp3);
    dest_mat(&temp4);
    dest_mat(&lambdat);
    dest_mat(&Fmat);
}

/* Equation 3 from the paper */
void calc_eq3_fast(matrix *_lambda, matrix *_Q, matrix *_h, double *F) {

    /* we do stuff the dirty way, for performance */

    int n = _lambda->row;

    double *y = _lambda->data;
    double *Q = _Q->data;
    double *h = _h->data;
    assert(_Q->major == ROWMAJOR);

    double *temp = (double *)calloc(n,sizeof(double));

    /* Calculate Q*y first, temp = Q*y*/
    for (int i=0; i<n; i++) {
        /* temp[i] calculation */
        for (int j=0; j<n; j++) {
            temp[i] += Q[(i*n)+j]*y[j];
        }
    }

    double t1 = 0.0;
    double t2 = 0.0;
    /* calculate y^T * temp */
    for (int i=0; i<n; i++)
        t1 += y[i]*temp[i];

    for (int i=0; i<n; i++)
        t2 += y[i]*h[i];

    *F = 0.5*t1 + t2;

    free(temp);
}


/* Calculates the product of matrices Q and y, and stores the result back in qy 
   All matrices are in device memory

   NOTE: We deal with raw data blocks, and not nice matrices
*/
__global__ void calc_eq3_qy(double *_Q, double *lambda, int n, double *temp) {

    int i = threadIdx.x + blockIdx.x * blockDim.x;
    /* Each thread calculates the multiplication of one row of Q with y(/lambda) */

    if (i>=n)
        return;

    if (lambda[i]<1.0e-10) {
        temp[i] = 0.0;
        return;
    }


    double *Q = _Q + (i*n);
    double t = 0.0;

    for (int j=0; j<n; j++) {
        t += Q[j]*lambda[j];
    }
    temp[i] = t;

}

/* Returns back A.B, assume A is a 1xn, and B is nx1 */
__global__ void dot_product_gpu(double *A, double *B, int n, double *f) { 

    int i = threadIdx.x + blockIdx.x * blockDim.x;

    if (i==0) {
        *f = 0.0;
        for (int j=0; j<n; j++)
            *f += A[j]*B[j];
    }

}

/* Returns back 0.5*A*B+A*C, assume A is a 1xn, and B is nx1 and so on */
__global__ void fancy_dot_product_gpu(double *A, double *B, double *C, int n, double *f) { 

    int i = threadIdx.x + blockIdx.x * blockDim.x;

    if (i==0) {
        double t1=0.0, t2=0.0;
        for (int j=0; j<n; j++) {
            t1 += A[j]*B[j];
            t2 += A[j]*C[j];
        }
        *f = 0.5*t1 + t2;
    }

}
/* Equation 3 from the paper 
   This is still a CPU function, but it calls things on gpu
*/
void calc_eq3_gpu(matrix *lambda, matrix *Q, matrix *h, double *F) {

    int n = lambda->row;

    double *y = lambda->data;
    double *_h = h->data;
    assert(Q->major == ROWMAJOR);

    double *temp;
    (cudaMalloc((void**)&temp, (n+1)*sizeof(double)));

    cudaThreadSynchronize();
    ((calc_eq3_qy <<< n/8,8 >>> (Q->data, lambda->data, n, temp)));
    cudaThreadSynchronize();

    cudaThreadSynchronize();
    ((fancy_dot_product_gpu <<< 1,1 >>> (y,temp,_h, n, temp+n)));
    cudaThreadSynchronize();

    (cudaMemcpy(F,temp+n,sizeof(double),cudaMemcpyDeviceToHost));

    (cudaFree(temp));


}



/* This calculates r to be used for the calculation of Q- and Q+ */
void calc_r(matrix *Q, matrix *r) {
/*
    matrix temp1, temp2; 
    mult_scalar(Q, -1.0, &temp1);

    // Update temp1 to max(-Q and zero) 
    for (int i=0; i<(temp1.row); i++) 
        for (int j=0; j<(temp1.col); j++) 
            MAT_ELEM(&temp1,i,j) = MAX(MAT_ELEM(&temp1,i,j),0.0);

    init_mat(&temp2,Q->row,1);
    for (int i=0;i<(temp2.row);i++)
        MAT_ELEM(&temp2,i,0) = 1.0;        

    mult_mat(&temp1,&temp2,r);

    dest_mat(&temp1);
    dest_mat(&temp2);
*/

    init_mat(r,Q->row,1);
    fill_zero(r);

}

/* Calculate Qxx for updating lambda 
    if hi == 1, then calculates Qhi
    if hi == 0, then calculates Qlo
*/
void calc_QXX(matrix *Q, matrix *r, int hi, matrix *QXX) {

    matrix diag_r;
    init_mat(&diag_r,r->row,r->row);
    fill_zero(&diag_r);

    init_mat(QXX,Q->row,Q->col);

    for (int i=0;i<(r->row);i++)
        MAT_ELEM(&diag_r,i,i) = MAT_ELEM(r,i,0); 

    for (int i=0;i<(Q->row);i++)
        for (int j=0; j<(Q->col); j++) {

            if (hi == 1)
                MAT_ELEM(QXX,i,j) = MAX(0.0,MAT_ELEM(Q,i,j)) + MAT_ELEM(&diag_r,i,j);
            else
                MAT_ELEM(QXX,i,j) = MAX(0.0,-1.0*MAT_ELEM(Q,i,j)) + MAT_ELEM(&diag_r,i,j);

        }
    
    dest_mat(&diag_r);
}


/* update lambda as per eq 8 */
void update_lambda(matrix *h, matrix *Qlo, matrix *Qhi, matrix *lambda) {
    int row = lambda->row;
    int col = lambda->col;

    matrix temp1, temp2;

    mult_mat(Qlo,lambda,&temp1);
    mult_mat(Qhi,lambda,&temp2);

    for (int i=0;i<row;i++) {
        for (int j=0;j<col;j++) {

            double t1 = MAX(0.0,-1*MAT_ELEM(h,i,j)) + MAT_ELEM(&temp1,i,j);
            double t2 = MAX(0.0,MAT_ELEM(h,i,j)) + MAT_ELEM(&temp2,i,j);
            if (fabs(t2)>0.000001)
                MAT_ELEM(lambda,i,j) = MAT_ELEM(lambda,i,j)*(t1/t2);
            else
                MAT_ELEM(lambda,i,j) = 0.0;

        }
    }

    dest_mat(&temp1);
    dest_mat(&temp2);

}

/* update lambda as per eq 8 */
void update_lambda_fast(matrix *h, matrix *Qlo, matrix *Qhi, matrix *lambda) {

    int row = lambda->row;
/*
    assert(lambda->col == 1);
    assert(h->col == 1);
    assert(Qlo->major == ROWMAJOR);
    assert(Qhi->major == ROWMAJOR);
*/
    double *_Qlo = Qlo->data;
    double *_Qhi = Qhi->data;
    double *_h = h->data;
    double *y = (double *)malloc(sizeof(double)*row);     /* a copy of the data */
    memcpy(y,lambda->data,sizeof(double)*row);
    double *_y = lambda->data;  /* This is what we will update */

    // Process lambda[i]
    for (int i=0;i<row;i++) {

        if (fabs(y[i])<1.0e-10) {
            _y[i] = 0.0;            
            continue;
        }


        double t1 = 0.0;    // numerator
        double t2 = 0.0;    // denominator

        for (int j=0; j<row; j++) { 
            t1 += _Qlo[(i*row)+j]*y[j];
            t2 += _Qhi[(i*row)+j]*y[j];
        }

        t1 += MAX(0.0,-1*_h[i]);
        t2 += MAX(0.0,1*_h[i]);

        if (fabs(t2)>0.000001) 
            _y[i] = _y[i]*(t1/t2);
        else
            _y[i] = 0.0;

    }

    free(y);
}

__global__ void update_lambda_gpu_helper(double *h, double *Q, double *lambda, int row, double *newlambda) {


    /* This is the thread id */
    int i = threadIdx.x + blockIdx.x * blockDim.x;
    int tid = threadIdx.x;

    extern __shared__ double sharedLambda[];

    if (i>=row)
        return;

    for (int j=tid;j<row;j+=blockDim.x)
        sharedLambda[j] = lambda[j];

    __syncthreads();

    // Process lambda[i]

    if (fabs(sharedLambda[i])<1.0e-6) {
        newlambda[i] = 0.0;
        return;
    }

    double t1 = 0.0;    // numerator
    double t2 = 0.0;    // denominator

    for (int j=0; j<row; j++) { 
        double q = Q[(i*row)+j];
        t1 += MAX(-q,0.0)*sharedLambda[j];
        t2 += MAX(q,0.0)*sharedLambda[j];
    }

    t1 += MAX(0.0,-1*h[i]);
    t2 += MAX(0.0,1*h[i]);

    if (fabs(t2)>0.000001) 
        newlambda[i] = sharedLambda[i]*(t1/t2);
    else
        newlambda[i] = 0.0;

    return;
}


void update_lambda_gpu(matrix *h, matrix *Q, matrix *lambda, matrix *newlambda) {

    int row = lambda->row;

    cudaThreadSynchronize();
    update_lambda_gpu_helper <<< 8, row/8, sizeof(double)*row >>> (h->data, Q->data, lambda->data, row, newlambda->data);
    cudaThreadSynchronize();

}

/* calculate equation 7 from the paper */
void calc_U(matrix *HpInv, matrix *M, matrix *lambda, matrix *U) {

    matrix temp1, temp2;
    matrix Mt;

    /* The term inside the parenthesis */
    trans_mat(M,&Mt);
    mult_mat(&Mt,lambda,&temp1);

    mult_mat(HpInv, &temp1, &temp2);

    mult_scalar(&temp2, -1.0, U);

    dest_mat(&temp1);
    dest_mat(&temp2);
    dest_mat(&Mt);
    
}


/* x1 = A*x0+B*u(0) +d(0) */
void calc_x1(matrix *A, matrix *x0, matrix *B, matrix *U, matrix *d, int pnum, matrix *x1) {

    matrix temp1, temp2, temp3, temp4;

    /* First term */
    mult_mat(A,x0,&temp1);

    /* second term */
    mult_scalar(B,MAT_ELEM(U,0,0),&temp2);

    /* third term */
    init_mat(&temp3,3,1);
    fill_zero(&temp3);
    MAT_ELEM(&temp3,0,0) = MAT_ELEM(d,(pnum/d->col),(pnum%d->col));

    add_mat(&temp1,&temp2,&temp4);
    add_mat(&temp4,&temp3,x1);

    dest_mat(&temp1);
    dest_mat(&temp2);
    dest_mat(&temp3);
    dest_mat(&temp4);

}

/* moves the DATA part of a matrix to the device */
void move_to_dev(matrix *A) {

    double *data;
    cudaMalloc((void **)&data, sizeof(double)*(A->row)*(A->col));
    cudaMemcpy(data,A->data,sizeof(double)*(A->row)*(A->col),cudaMemcpyHostToDevice);
    free(A->data);
    A->data = data;

}

/* moves the DATA part of a matrix to the host */
void move_to_host(matrix *A) {

    double *data = (double *)malloc(sizeof(double)*(A->row)*(A->col));
    cudaMemcpy(data,A->data,sizeof(double)*(A->row)*(A->col),cudaMemcpyDeviceToHost);
    cudaFree(A->data);
    A->data = data;

}

/* Calculates the sum in the shared memory array pointed to by scratch, of length n 
   This is the ith thread.
*/
__device__ void sum_reduction(double *scratch, int i, int n, int totalthreads) {


    /* We assume that n/totalthreads is an integer. */

    int b = n/totalthreads; 
    int j;
    for (j=1; j<b; j++) {
        scratch[(i*b)] += scratch[(i*b)+j];
    }

    __syncthreads();
    /* Now we have TOTALTHREADS threads, and TOTALTHREADS items to add */

    int step = 2;
    
    while (step<(totalthreads*2)) {
        if (i%step == 0 && (i+(step/2))<totalthreads ) {  // all the even number threads add, rest stay dormant.
            scratch[i*b] += scratch[(i+(step/2))*b];
        }
        step *=2;
        __syncthreads();
    } 
    __syncthreads();

    // scratch[0] should be the total sum now.

}

/* Processes lamdba[i] according to strategy 0, and stores things in scratch[i] */

__device__ void lambda_i_process(double *y, double *Q, double *h, double *_y, double *scratch, int i, int n) {


    if (fabs(y[i]) > 1.0e-6) {

        /* Calculate _y[i] */

        // calculating the dot product 
        double numerator = 0.0;
        double denominator = 0.0;
        for (int j=0; j<n; j++) {

            if(fabs(y[j])>1.0e-6) {
                double q = Q[i*n + j];
                numerator += MAX(-q,0.0)*y[j];
                denominator += MAX(q,0.0)*y[j];
            }
        }
        double qy = denominator - numerator; 

        if (fabs(denominator)>1.0e-6) {
            numerator += MAX(0.0,-1*h[i]);
            denominator += MAX(0.0,h[i]);
            _y[i] = y[i]*(numerator/denominator);
        }
        else
            _y[i] = 0.0;


        /* Calculate contribution to the total cost by this little guy 
           and store that in scratch
        */
        scratch[i] = (0.5*y[i]*qy) + (y[i]*h[i]);

    } else {

        scratch[i] = 0.0;
        _y[i] = 0.0;

    }


} 

/* Giant kernel function that calculates the cost and next lamba in one go.
   n is the size of the matrices, iter is the iteration, if we were to switch strategies 
   sometime when most of the lambda's are zeros
*/
__global__ void iteration_kernel(double *_lambda, double *Q, double *_h, double *_newlambda, int n, int iter, double *F) {


    /* Shared chunk */
    extern __shared__ double shared[];

    /* We definitely keep lambda=>y, newlambda=>_y, and h in the shared memory */
    double *y = (double *)shared; 
    double *_y = (double *)shared + n; 
    double *h = (double *)shared + 2*n; 
    // scratch keeps the individual contribution to the cost
    double *scratch = (double *)shared + 3*n;

    int i = threadIdx.x + blockIdx.x * blockDim.x;
    int tid = threadIdx.x;

    //XXX XXX XXX
    if(tid != i) 
        return;

    for (int j=tid;j<n;j+=blockDim.x) {
        y[j] = _lambda[j];
        h[j] = _h[j];
        /* We will move _y to newlambda in the end */
    }

    __syncthreads();


    /* This thread processes n/blockDim.x (currently = 4)  values of lambda */
    for (int j=0; j<(n/blockDim.x);j++) {
        lambda_i_process(y, Q, h, _y, scratch, i+((j*n)/(n/blockDim.x)), n);
    }
    __syncthreads();

    // Let's try the sum reduction
    sum_reduction(scratch, i, n, blockDim.x);
    __syncthreads();

    /* We also update the newlambda */
    for (int j=tid;j<n;j+=blockDim.x){
        _newlambda[j] = _y[j];
        //F[j] = scratch[j];
    }

    if (tid == 0)
        *F = scratch[0];

    __syncthreads();
    return;

}


void do_iteration(matrix *lambda, matrix *Q, matrix *h, matrix *newlambda, int iter, double *F) {

    int n = lambda->row;

    // Let's calculate the total cost function on a CPU
    double *d_F;
    double *_F = (double *)malloc(sizeof(double)*n);
    cudaMalloc((void**)&d_F,sizeof(double)*n);

    iteration_kernel <<<1,n/4, sizeof(double)*4*n >>> 
        (lambda->data, Q->data, h->data, newlambda->data, n, iter, d_F);

    cudaThreadSynchronize();

    cudaMemcpy(_F,d_F,sizeof(double)*n,cudaMemcpyDeviceToHost);

    cudaThreadSynchronize();

//    *F = 0.0;
//    for (int i=0;i<n;i++)
//        *F += _F[i];

    *F = _F[0];

    cudaFree(d_F);
    free(_F);
}

__global__ void iteration_kernel_all_gpu(double *_lambda, double *Q, double *_h, int n, double *_F) {


    /* Shared chunk */
    extern __shared__ double shared[];

    /* We definitely keep lambda=>y, newlambda=>_y, and h in the shared memory */
    double *y,*_y;  // First two quarters of the shared memory, depending upon the iteration.
    double *h = (double *)shared + 2*n; 
    double *scratch = (double *)shared + 3*n;

    int i = threadIdx.x + blockIdx.x * blockDim.x;
    int tid = threadIdx.x;

    if(tid != i) 
        return;

    for (int j=tid;j<n;j+=blockDim.x) {
        shared[j] = _lambda[j]; // The initial lambda goes to the first quarter of the shared memory
        h[j] = _h[j];
    }

    __syncthreads();

    int iter = 0;
    double F = 0.0;
    double Flast = 0.0;

    do {

        __syncthreads();
        if (iter%2 == 0) { 
            y = (double *)shared; 
            _y = (double *)shared + n; 
        } else {
            _y = (double *)shared; 
            y = (double *)shared + n; 
        }

        __syncthreads();
    
        /* This thread processes n/blockDim.x (currently = 4)  values of lambda */
        for (int j=0; j<(n/blockDim.x);j++) {
            lambda_i_process(y, Q, h, _y, scratch, i+((j*n)/(n/blockDim.x)), n);
        }
        __syncthreads();
    
        // Let's try the sum reduction
        sum_reduction(scratch, i, n, blockDim.x);
        __syncthreads();

        Flast = F;
        F = scratch[0];    
  
        iter++; 
        __syncthreads();

    } while ((iter<2) || (fabs(Flast-F)>TOLERANCE));
 
    if (tid == 0) {
        _F[0] = scratch[0];
        _F[1] = (double)iter;
    }

    /* We also update the newlambda */
    for (int j=tid;j<n;j+=blockDim.x){
        _lambda[j] = _y[j];
    }

    __syncthreads();
    return;

}


void iterate_on_gpu(matrix *lambda, matrix *Q, matrix *h, int *iter, double *F) {

    int n = lambda->row;

    // Let's calculate the total cost function on a CPU
    double *d_F;
    double *_F = (double *)malloc(sizeof(double)*n);

    // For some reason, cudamalloc is significantly slow if I allocate 
    // memory for just one double. Worry about that later
    cudaMalloc((void**)&d_F,sizeof(double)*n);

    // final value for F is scratch[0]
    // final value for iter is scratch[1] (careful, double vs float)
    iteration_kernel_all_gpu <<<1,n/2, sizeof(double)*4*n >>> 
        (lambda->data, Q->data, h->data, n, d_F);

    cudaThreadSynchronize();

    cudaMemcpy(_F,d_F,sizeof(double)*n,cudaMemcpyDeviceToHost);

    cudaThreadSynchronize();

    *F = _F[0];
    *iter = (int)_F[1];

    cudaFree(d_F);
    free(_F);


}


int main (int argc, char *argv[]) {

    int n=1, N=180;
    matrix M, HpInv, Q;     // for Q
    matrix A, B, d;
    double ulo=-10.0, uhi=10.0;
    matrix xlo, xhi;

    /* Load matrices from files */
    read_file(&M, "data/M.bin");
    read_file(&HpInv, "data/HpInv.bin");    
    read_file(&A, "data/A.bin");
    read_file(&B, "data/B.bin");
    read_file(&d, "data/d.bin");    

    calc_Q(&M,&HpInv,&Q);

    /* Initialize other matrices */
    init_mat(&xlo,3,1);
    for (int i=0; i<3; i++) MAT_ELEM(&xlo,i,0) = 20.0;
    init_mat(&xhi,3,1);
    for (int i=0; i<3; i++) MAT_ELEM(&xhi,i,0) = 23.0;

    matrix x0;          // This is what links different problems together    
 
    init_mat(&x0,3,1);
    MAT_ELEM(&x0,0,0) = 22.9;
    MAT_ELEM(&x0,1,0) = 22.8;
    MAT_ELEM(&x0,2,0) = 22.8; 

    matrix Qhi, Qlo;
    matrix r;

    calc_r(&Q,&r);
    calc_QXX(&Q,&r,1,&Qhi);
    calc_QXX(&Q,&r,0,&Qlo);

    move_to_dev(&Q);
    move_to_dev(&Qlo);
    move_to_dev(&Qhi);

    for (int pnum=0; pnum<100; pnum++) {

        double u0, _x0;
    
        matrix x1;   
        matrix b, lambda, U;
       
        calc_b(&A, &x0, &xlo, &xhi, &d, N, pnum, ulo, uhi, &b);
    
        /* Now since we have everything we need to solve the dual program, let's solve it */
        init_mat(&lambda,8*n*(N-1),1);
        for (int i=0; i<8*n*(N-1);i++)  
            MAT_ELEM(&lambda,i,0) = 1.0;   // Initialize lambda with 1's
    

        int iter=0;
        double F=0.0;
    
        move_to_dev(&b);
        move_to_dev(&lambda);

        double s = read_timer();
//        do {
//
//            Flast = F;
//            matrix *lambda, *newlambda;
//
//            if (iter%2 == 0) {
//                lambda = &lambda0;
//                newlambda = &lambda1;
//            }
//            else  {
//                lambda = &lambda1;
//                newlambda = &lambda0;
//            }   
//
//            //TIME_FUNC(calc_eq3_gpu(lambda, &Q, &b, &F));
//            //TIME_FUNC(update_lambda_gpu(&b, &Q, lambda, newlambda));
//            
//            //TIME_FUNC(do_iteration(lambda, &Q, &b, newlambda, iter, &F));
//            (do_iteration(lambda, &Q, &b, newlambda, iter, &F));
//
//            //printf(">>> %d: %lf\n", iter, F);
//            iter++;
// 
//        } while ((iter<2) || (fabs(Flast-F)>TOLERANCE));
        iterate_on_gpu(&lambda, &Q, &b, &iter, &F);

        double e = read_timer();
   
        move_to_host(&lambda);
        move_to_host(&b);

        // printf("F:%lf, Flast:%lf\n", F, Flast);
        calc_U(&HpInv,&M,&lambda,&U);

        u0 = MAT_ELEM(&U,0,0);    

        calc_x1(&A,&x0,&B,&U,&d,pnum,&x1);
        _x0 = MAT_ELEM(&x1,0,0);

        // printf(">>> %d: %f <<<\n", pnum, MAT_ELEM(&x0,0,0)); 

        int nz = 0;
        int z = 0;
        for (int i=0; i<lambda.row; i++) 
            if (fabs(MAT_ELEM(&lambda,i,0))<1.0e-6) z++;
            else nz++;
            

        /* Copy x1 to x0 */
        for (int i=0; i<x0.row; i++)
            MAT_ELEM(&x0,i,0) = MAT_ELEM(&x1,i,0);
 
        dest_mat(&lambda);
        dest_mat(&x1);
        dest_mat(&b);
        dest_mat(&U);


        printf("Time step %d: time: %lf, cost: %lf, iterations: %d, u(0): %lf, x0: %lf, ratio: %f\n", pnum, e-s, F, iter, u0, _x0, (float)z/(float)(z+nz));

    }


    move_to_host(&Q);
    move_to_host(&Qlo);
    move_to_host(&Qhi);
    
    /* Cleanup */
    dest_mat(&Qhi);
    dest_mat(&Qlo);
    dest_mat(&r);
    dest_mat(&x0);
    dest_mat(&M);
    dest_mat(&HpInv);
    dest_mat(&Q);
    dest_mat(&xlo);
    dest_mat(&xhi);
    dest_mat(&A);
    dest_mat(&d);
    dest_mat(&B);

    return 0;
}

/* 
 lambda's size is 8*n*(N-1)
*/
