#include "libmri.h"
#include "auxfns.h"

#include "cudebug_shm.h"

#include <stdlib.h>
#include <stdarg.h>
#include <cuda.h>
#include <cufft.h>
#include <iostream>
#include <omp.h>

using namespace std;

void joint_soft_threshold (cplxf *C, int nc, int nch, int nwa, float *absC, float *TabsC, float lambda, int batch);
void data_consistency_projection (cplxf *X, cplxf *X0, float *mask, int xs1, int xs2, int nch, int batch);
void make_rank1_spirit_op (cplxf *K, cplxf *vmap, float *emap, int *imsize, int qcols, int batch);

void
pocs2d_l1spirit_cuda (cplxf *data, int *dsize, cplxf *k2d,
                      l1spirit_config const &config)
{
	int *ksize = config.ksize;
	int n_iters = config.l1spirit_iterations;
	float lambda_1 = config.l1_threshold_start,
	      lambda_n = config.l1_threshold_end;

	int xs0 = dsize[0], xs1 = dsize[1], xs2 = dsize[2], nch = dsize[3];
	int ks1 = ksize[1], ks2 = ksize[2];

	float *lor, *hir, *lod, *hid;
	int nc, lf, L, *S;

	float *cmask;
	solver_mask (cmask, data, xs0, xs1, xs2);

	l1spirit_debug(config,mask,0,cmask,2,xs1,xs2);

	/* Again, the kludge! */
	{
		cplxf *X = new cplxf [xs1*xs2*nch];
		for (int i1 = 0; i1 < xs1*xs2*nch; ++i1)
			X [i1] = data [(xs0/2) + xs0*i1];

		plan_wavelet (nc, lf, L, S, lor, hir, lod, hid, X, xs1, xs2);

		delete [] X;
	}

	l1spirit_debug (config,wavplan,0,S,2,2,L+2);

	int nwa = S[0]*S[1];

	int num_gpus;
	cuda (GetDeviceCount (&num_gpus));

	#pragma omp parallel num_threads(num_gpus)
	{
		double ts_tot = timestamp();

		int tid = omp_get_thread_num(), dev = tid;
		if (tid != 0)
			cuda(SetDevice(tid));

		cudaDeviceProp prop;
		cuda (GetDevice(&dev));
		cuda (GetDeviceProperties (&prop, dev));

		int chunksize = (xs0 + num_gpus-1) / num_gpus;
		int batchsize = prop.multiProcessorCount;

	// FIXME -- we need the last thread to plan for
	// (xs0-(xs0/chunksize)*chunksize) % batchsize

		cufftHandle _K_plan[2], _X_plan[2];

		plan_cufft2c (_K_plan[0], xs1, xs2, nch*nch*batchsize);
		if (chunksize % batchsize)
			plan_cufft2c (_K_plan[1], xs1, xs2, nch*nch*(chunksize % batchsize));

		plan_cufft2c (_X_plan[0], xs1, xs2, nch*batchsize);

		if (chunksize % batchsize)
			plan_cufft2c (_X_plan[1], xs1, xs2, nch*(chunksize % batchsize));

		int nX = xs1 * xs2 * nch;

		cplxf *cX = new cplxf [nX*batchsize];
		cplxf *cK = new cplxf [xs1*xs2*nch*nch*batchsize];

		cplxf *X;     cuda (Malloc (&X,  nX     *batchsize*sizeof(*X)));
		cplxf *X0;    cuda (Malloc (&X0, nX     *batchsize*sizeof(*X0)));
		cplxf *Y;     cuda (Malloc (&Y,  nX     *batchsize*sizeof(*Y)));
		cplxf *K;     cuda (Malloc (&K,  nX*nch *batchsize*sizeof(*K)));

		cplxf *C;     cuda (Malloc (&C,     nc*nch    *batchsize*sizeof(*C)));
		float *absC;  cuda (Malloc (&absC,  nc        *batchsize*sizeof(*absC)));
		float *TabsC; cuda (Malloc (&TabsC, nc        *batchsize*sizeof(*TabsC)));

		float *mask; cuda (Malloc (&mask, xs1*xs2*sizeof(*mask)));
		cuda (Memcpy (mask, cmask, xs1*xs2*sizeof(*mask), cudaMemcpyHostToDevice));


		bool do_orthiter = (config.calib_algorithm == l1spirit_config::cheev)
		                or (config.calib_algorithm == l1spirit_config::lanczos)
		                or (config.calib_algorithm == l1spirit_config::culanczos);

		cplxf *vmap = 0;
		float *emap = 0;
		int qcols = config.calib_pixel_n_eig;
		int qriters = config.calib_pixel_n_qrit;

		if (do_orthiter){
			cuda (Malloc (&vmap, xs1*xs2*nch*qcols*batchsize*sizeof(*vmap)));
			cuda (Malloc (&emap, xs1*xs2*qcols*batchsize*sizeof(*emap)));
		}

		#pragma omp critical
		{
			cout << "[" << tid << "] "
				<< "Batchsize = " << batchsize
				<< ", xs0 = " << xs0
				<< ", Chunksize = " << chunksize << endl;
		}

		#pragma omp for schedule(static)
		for (int ii0 = 0; ii0 < xs0; ii0 += chunksize)
		{
		  #pragma omp critical
		  {
			cout << "[" << tid << "] "
				<< "ii0 = " << ii0 << endl;
		  }

		  for (int i0 = ii0; i0 < min(xs0,ii0+chunksize); i0 += batchsize)
		  {
			int batch = batchsize, ft = 0;

			if (i0+batch > ii0+chunksize){
				batch = ii0+chunksize-i0;
				ft = 1;
			}

			#pragma omp critical
			{
				cout << "[" << tid << "] "
					<< "Initializing for " << i0 << " ... " << i0+batch-1 << endl;
			}
			double ts = timestamp();

			cufftHandle &X_plan = _X_plan[ft];
			cufftHandle &K_plan = _K_plan[ft];

			int k1_0 = pad_index (xs1, ks1);
			int k2_0 = pad_index (xs2, ks2);

			for (int b = 0; b < batch; ++b)
			{
				for (int i = 0; i < xs1*xs2*nch; ++i)
					cX [i + xs1*xs2*nch*b] = data [i0+b + xs0*i];

			// This is inefficient. Start here if you need to reduce
			// memcpy overhead: We should only copy out the central non-zero region

				for (int i = 0; i < xs1*xs2*nch*nch; ++i)
					cK [i + xs1*xs2*nch*nch*b] = 0.0f;

				for (int ch = 0; ch < nch*nch; ++ch)
					for (int k2 = 0; k2 < ks2; ++k2)
						for (int k1 = 0; k1 < ks1; ++k1)
							cK [k1_0+k1 + xs1*(k2_0+k2 + xs2*(ch + nch*nch*b))]
							 = k2d [i0+b + xs0*(ks1-1-k1 + ks1*
						                   (ks2-1-k2 + ks2*ch))]
							   * sqrtf(xs1*xs2);
			}

			cuda (Memcpy (X0, cX, nX *batch*sizeof(*X), cudaMemcpyHostToDevice));
			cuda (Memcpy (X, X0, nX *batch*sizeof(*X), cudaMemcpyDeviceToDevice));

			if (i0 <= config.debug_slice && config.debug_slice < i0+batch){
				l1spirit_cudebug(config,l1s_x0,i0,X,4,xs1,xs2,nch,batch);
			}

			cuda (Memcpy (K, cK, nX*nch *batch*sizeof(*K), cudaMemcpyHostToDevice));

			cufft2c_exec (K_plan, CUFFT_INVERSE, K, xs1, xs2, nch*nch*batch);

			if (i0 <= config.debug_slice && config.debug_slice < i0+batch){
				cudebug_shm ("K",K,5,xs1,xs2,nch,nch,batch);
			}

			if (do_orthiter)
			{
				double ts = timestamp();
				int imsize[3] = {xs1, xs2, nch};

				calib2d_cuorthiter (vmap, emap, imsize, K, qcols, qriters, batch);

				if (i0 <= config.debug_slice && config.debug_slice < i0+batch){
					cudebug_shm ("vmap", vmap, 5, imsize[0],imsize[1],nch,qcols,batch);
					cudebug_shm ("emap", emap, 4, imsize[0],imsize[1],qcols,batch);
				}

				make_rank1_spirit_op (K, vmap, emap, imsize, qcols, batch);

				if (i0 <= config.debug_slice && config.debug_slice < i0+batch){
					cudebug_shm ("Kmap",K,5,imsize[0],imsize[1],nch,nch,batch);
				}


				ts = timestamp()-ts;

				if (config.verbose_info){
					#pragma omp critical
					cout << "[" << omp_get_thread_num() << ", " << i0 << "] "
					     << "orthiter: " << ts << " s "
					     << "(" << (ts/batch) << " s/slice)" << endl;
				}
			}


			if (i0 <= config.debug_slice && config.debug_slice < i0+batch){
				l1spirit_cudebug(config,l1s_kern,i0,K,5,xs1,xs2,nch,nch,batch);
			}

			cuda (ThreadSynchronize());
			ts = timestamp()-ts;
			#pragma omp critical
			{
				cout << "[" << tid << "]"
					<< "    Done: " << ts << " s (" << (ts/batch) << ")" << endl
				    << "    Iterating for " << i0 << " ... " << i0+batch-1 << endl;
			}

			ts = timestamp();

			for (int iter = 1; iter <= n_iters; ++iter)
			{
				bool do_debug_now = (iter==1)
				                and i0 <= config.debug_slice 
				                and config.debug_slice < i0+batch;

				cufft2c_exec (X_plan, CUFFT_INVERSE, X, xs1, xs2, nch*batch);

				if (do_debug_now){
					l1spirit_cudebug(config,l1s_ifft,0,X,4,xs1,xs2,nch,batch);
				}

				cuspirit (Y, false, K, xs1, xs2, nch, nch, X, xs1, xs2, nch, batch);

				//cuda(Memcpy (Y, X, xs1*xs2*nch*batch*sizeof(*X), 
				 //            cudaMemcpyDeviceToDevice));

				if (do_debug_now){
					l1spirit_cudebug(config,l1s_spir,0,Y,4,xs1,xs2,nch,batch);
				}

				float lambda = lambda_1 + (iter-1.0f)*(lambda_n-lambda_1)
			                                 / (n_iters-1.0f);
				if (lambda > 0)
				{
					cu_forward_wavelet (C, nc,
					                    Y, xs1, xs2,
					                    L, S, lod, hid, lf,
					                    nch * batch);

					if (do_debug_now){
						l1spirit_cudebug(config,l1s_fwav,0,C,4,1,nc,nch,batch);
					}

					joint_soft_threshold (C, nc, nch, nwa, absC, TabsC, lambda, batch);

					if (do_debug_now){
						l1spirit_cudebug(config,l1s_thr,0,C,4,1,nc,nch,batch);
					}

					cu_inverse_wavelet (X, xs1, xs2,
					                    C, nc, 
					                    L, S, lor, hir, lf,
					                    nch * batch);

					if (do_debug_now){
						l1spirit_cudebug(config,l1s_iwav,0,X,4,xs1,xs2,nch,batch);
					}
				}
				else{
					cuda (Memcpy (X, Y, xs1*xs2*nch*batch * sizeof(*X),
					                     cudaMemcpyDeviceToDevice));
				}

				cufft2c_exec (X_plan, CUFFT_FORWARD, X, xs1, xs2, nch*batch);

				if (do_debug_now){
					l1spirit_cudebug(config,l1s_fft,0,X,4,xs1,xs2,nch,batch);
				}

				data_consistency_projection (X, X0, mask, xs1, xs2, nch, batch);

				if (do_debug_now){
					l1spirit_cudebug(config,l1s_proj,0,X,4,xs1,xs2,nch,batch);
				}
			}

			cuda(ThreadSynchronize());
			ts = timestamp()-ts;

			#pragma omp critical
			{
				cout << "[" <<  tid <<  "]" << "    Done!! " << ts << " s (" << (ts/batch) << ")" << endl;
			}
			ts = timestamp();
		
			cuda (Memcpy (cX, X, nX * batch * sizeof(*X), cudaMemcpyDeviceToHost));

			for (int b = 0; b < batch; ++b){
				for (int i = 0; i < xs1*xs2*nch; ++i)
					data [i0+b + xs0*i] = cX [i + xs1*xs2*nch*b];
			}

			ts = timestamp()-ts;
			#pragma omp critical
			{
				cout << "[" << tid << "]"  << " Copied to shm " << ts << " s (" << (ts/batch) << ")" << endl;
			}
		  }
		}

		cuda (Free (vmap));
		cuda (Free (emap));
		cuda (Free (K));
		cuda (Free (X));
		cuda (Free (X0));
		cuda (Free (Y));
		cuda (Free (mask));
		cuda (Free (absC));
		cuda (Free (TabsC));

		delete [] cX;
		delete [] cK;
		cuda (ThreadExit());

		ts_tot = timestamp() - ts_tot;
		#pragma omp critical
		{
			cout << "[" << tid << "]"  << " Exiting after " << (ts_tot) << " s" <<endl;
		}
	}

	delete [] cmask;
	delete [] S;
}

__global__ void
make_rank1_spirit_op (cplxf *K, cplxf *vmap, float *emap,
                      int im1, int im2, int nch, int qcols, int batch)
{
	extern __shared__ cplxf _shmem_[]; cplxf *shm = _shmem_;

	cplxf *V = shm; shm += nch;

	for (int bid = blockIdx.x; bid < im1*im2*batch; bid += gridDim.x)
	{
		int ix = bid;
		int  b = ix / (im1*im2); 
		   ix -= b*im1*im2;
		int iy = ix / im1;
		   ix -= iy*im1;

		/*
		__shared__ int j_max;
		if (threadIdx.x == 0)
		{
			j_max = -1;
			float maxval = 1e9f; //HUGE_VALF;
			for (int j = 0; j < qcols; ++j){
				float d = fabsf (emap [ix + im1*(iy + im2*j)]);
				if (d > maxval ){
					j_max = j;
					maxval = d;
				}
			}
		}
		__syncthreads();
		*/

		for (int i = threadIdx.x; i < nch; i += blockDim.x){
			V[i] = vmap [ix + im1*(iy + im2*(i + nch * 0 /*j_max*/))];
		}

		__syncthreads();

		for (int id = threadIdx.x; id < nch*nch; id += blockDim.x){
			int j = id / nch;
			int i = id - j*nch;

			K[ix+im1*(iy + im2*id)] = V[i] * conj(V[j]);
		}
	}
}


void
make_rank1_spirit_op (cplxf *K, cplxf *vmap, float *emap,
                      int *im, int qcols, int batch)
{
	int dev;
	cudaDeviceProp prop;
	cuda(GetDevice(&dev));
	cuda(GetDeviceProperties(&prop,dev));

	make_rank1_spirit_op
		<<< 4*prop.multiProcessorCount, 32, sizeof(cplxf)*im[2]*im[2] >>>
		(K, vmap, emap, im[0], im[1], im[2], qcols, batch);

	cuda (ThreadSynchronize());
}

__global__ void
joint_soft_threshold (cplxf *C, int nc, int nch, int nwa,
                      float *absC, float *TabsC, float lambda)
{
	C += nc*nch*blockIdx.x;
	absC += nc*blockIdx.x;
	TabsC += nc*blockIdx.x;

	int i0 = (nwa/warpSize) * warpSize;

	for (int i = i0+threadIdx.x; i < nc; i += blockDim.x)
	{
		if (i >= nwa){
			absC[i] = 0.0f;

			for (int ch = 0; ch < nch; ++ch)
				absC[i] += abs2 (C[i + nc*ch]);

			absC[i] = sqrtf (absC[i]);

			if (0.0f > (TabsC[i] = absC[i] - lambda))
				TabsC[i] = 0.0f;

			for (int ch = 0; ch < nch; ++ch)
				C[i + nc*ch] *= TabsC[i] / (absC[i] + 1e-6f);
		}
	}
}

void
joint_soft_threshold (cplxf *C, int nc, int nch, int nwa,
                      float *absC, float *TabsC, float lambda, int batch)
{
	joint_soft_threshold <<<batch, 64>>> (C, nc, nch, nwa, absC, TabsC, lambda);
	cuda (GetLastError());
}

__global__ void
data_consistency_projection (cplxf *X, cplxf *X0, float *mask,
                             int xs1, int xs2, int nch)
{
	X += xs1*xs2*nch*blockIdx.x;
	X0 += xs1*xs2*nch*blockIdx.x;

	int n = xs1*xs2;

	for (int ch = 0; ch < nch; ++ch)
		for (int i = threadIdx.x; i < n; i += blockDim.x)
			X [i + n*ch] =    mask[i] * X0[i + n*ch]
			             + (1-mask[i]) * X[i + n*ch];
}

void
data_consistency_projection (cplxf *X, cplxf *X0, float *mask,
                             int xs1, int xs2, int nch, int batch)
{
	data_consistency_projection <<<batch,64>>> (X, X0, mask, xs1,xs2,nch);
	cuda (GetLastError());
}

