#include "MatrixMath.h"

__global__ void cudaReduce_sum1C(REALN*,REALN*,UINTN,UINTN);
__global__ void cudaReduce_sum2C(REALN*,REALN*,REALN*,UINTN,UINTN,UINTN,UINTN);
__global__ void cudaSquareEl(REALN*,REALN*,REALN);
__global__ void cudaMultEl(REALN*,REALN*,REALN);
__global__ void cudaVar(REALN*,REALN*,REALN*,REALN*,REALN);
__global__ void cudaR(REALN*,REALN*,REALN*,REALN*,REALN*,REALN*,REALN);

extern __shared__ REALN sdata[];

Matrix* MatrixMath::Mult(Matrix* m1,Matrix* m2){
	REALN t;
	UINTN i,j,k;
	UINTN max_k;
	Matrix* m_out;
	
	if(m1->getCol()!=m2->getRow()){
		cerr<<"Invalid matrix dimensions (Mult)"<<endl;
		exit(3);
	}
	if(m1->getMemorySpace()!=m2->getMemorySpace()){
		cerr<<"Memory spaces must be equal"<<endl;
		exit(3);
	}
	
	max_k=m1->getCol();
	
	if(m1->getMemorySpace()==CPU_MEM && m2->getMemorySpace()==CPU_MEM){
		m_out = new Matrix(m1->getRow(),m2->getCol(),CPU_MEM);
		for(i=0;i<m_out->getRow();i++){
			for(j=0;j<m_out->getCol();j++){
				for(k=0,t=0.0;k<max_k;k++){
					t+=(m1->get(i,k)*m2->get(k,j));
					//cout <<m1->get(i,k)<<" "<<m2->get(k,j)<<" "<<t<<" ";
				}
				//cout << endl<<"--->"<<t <<endl;
				m_out->set(t,i,j);
			}
		}
	}else if(m1->getMemorySpace()==DEVICE_MEM && m2->getMemorySpace()==DEVICE_MEM){
		//m_out = new Matrix(m1->getRow(),m2->getCol(),DEVICE_MEM);
		//TODO transposition...
		//m_out->print();
		//m_out->setUni(1.0);
		//cout<<m1->getData()<<" "<<m2->getData()<<" "<<m_out->getData()<<endl;
		//cout<<m_out->getRow()<<" "<<m_out->getCol()<<endl;
		
		/* */
		
		m_out = new Matrix(m2->getCol(),m1->getRow(),DEVICE_MEM);
		
		cublasSgemm(m2->getMode(),m1->getMode(),m2->getCol(),m1->getRow(),m2->getRow(),1.0,
					m2->getData(),m2->getLeading(),m1->getData(),m1->getLeading(),
					0.0,m_out->getData(),m_out->getRow());
					
		m_out->swapdim();
		//m_out->setMode('n');
					
					
					
		/*			cout<<m1->getMode()<<" "<<m2->getMode()<<endl;
					cout<<m1->getLeading()<<" "<<m2->getLeading()<<endl;
					
		cublasSgemm(m2->getMode(),m1->getMode(),m2->getRow(),m1->getRow(),m2->getRow(),1.0,
					m2->getData(),m2->getCol(),m1->getData(),m1->getCol(),
					0.0,m_out->getData(),m_out->getRow());*/
		/*Matrix *ttt;
		ttt=m2;
		m2=m1;
		m1=ttt;*/
					//TODO single double function Xgemm
		/*cublasSgemm('t','n',m2->getCol(),m1->getRow(),m2->getRow(),1.0,
					m2->getData(),m2->getRow(),m1->getData(),m1->getCol(),
					0.0,m_out->getData(),m_out->getRow());
					
		//m_out->transpose();
		m_out->setMode('n');*/
					
		//cout<<cublasGetError()<<endl;
		
		/*cout<<m1->getData()<<" "<<m2->getData()<<" "<<m_out->getData()<<endl;
		cout<<cublasGetVector(10, sizeof(m_out->getData()[0]), m_out->getData(), 1, dd, 1)<<endl;
		for(int i =10;i<11;i++){
			cout << dd[i]<<endl;
		}
		cout<<endl;*/
		/*for(int a=2;a<=5;a++){
		for(int b=2;b<=5;b++){
		for(int c=2;c<=5;c++){
		for(int d=2;d<=5;d++){
		for(int e=2;e<=5;e++){
		for(int f=2;f<=5;f++){

		cout << a << " "<< b << " "<< c << " "<< d << " "<< e << " "<< f<<endl;
		cublasSgemm('t','t',a,b,c,1.0,
					m2->getData(),d,m1->getData(),e,
					0.0,m_out->getData(),f);
		        CUDA_SC( cudaThreadSynchronize() );

		//m_out->setMode(COL_WISE);
		m_out->print();
		}
		}
		}
		}
		}
		}*/
					
					

		//m_out->print();
	}
	
	return m_out;
}
//----------------------------------------------------------
/*Matrix* MatrixMath::ReduceToRow(Matrix* m){
	REALN t;
	UINTN i,j;
	Matrix* m_out;
	Matrix* m_out_temp;

	if(m->getMemorySpace()==CPU_MEM){
		m_out = new Matrix(1,m->getCol(),CPU_MEM);
		for(j=0;j<m->getCol();j++){
			for(i=0,t=0.0;i<m->getRow();i++){
				t+=m->get(i,j);
			}
			m_out->set(t,0,j);
		}
	}else{
		dim3 nblock(m->getRow()/(REDUCE_BS1R*2),m->getCol());
		m_out_temp = new Matrix(nblock.x,m->getCol(),DEVICE_MEM);
		cout<<m->getRow()<<" "<<m->getCol()<<endl;
		cudaReduce_sum1R<<<nblock,REDUCE_BS1R,REDUCE_BS1R*sizeof(REALN)>>>
					(m->getData(),m_out_temp->getData(),
					m->getCol(),
					nblock.x*REDUCE_BS1R*2);
		cudaThreadSynchronize();
		m_out=m_out_temp;
		
		m_out=new Matrix(1,m->getCol(),DEVICE_MEM);
		//m_out->setUni(0.0);
		cudaReduce_sum2R<<<(m->getRow()/REDUCE_BS2R)+1,REDUCE_BS2R>>>
					(m_out_temp->getData(),
					m->getData(),
					m_out->getData(),
					m_out_temp->getCol(),
					REDUCE_BS1C*2*nblock.x,m->getCol(),
					m->getRow());
					//m->getCol());

		cudaThreadSynchronize();
		delete (m_out_temp);
	}
	return m_out;
}
__global__ void cudaReduce_sum1R(REALN* g_idata, REALN* g_odata,UINTN c, UINTN n){
  int tid = threadIdx.x;              
  int i = blockIdx.x*(REDUCE_BS1C*2) + tid;
  int gridSize = REDUCE_BS1C*2*gridDim.x;
  sdata[tid]=0;
  while (i < n) { 
	sdata[tid]  +=  g_idata[i*c+blockIdx.y] +
				g_idata[(i+REDUCE_BS1C)*c+blockIdx.y];
	i += gridSize;
  }
  __syncthreads();
  #if (REDUCE_BS1C >= 512)
  	if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); 
  #endif 
  #if (REDUCE_BS1C >= 256)
  	if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); 
  #endif
  #if (REDUCE_BS1C >= 128)
  	if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); 
  #endif
  if (tid < 32) {
  	  #if (REDUCE_BS1C >= 64)
     	 sdata[tid] += sdata[tid + 32];
     	 //__syncthreads(); 
      #endif
      #if (REDUCE_BS1C >= 32)
      	sdata[tid] += sdata[tid + 16];
      	//__syncthreads(); 
      #endif
      #if (REDUCE_BS1C >= 16)
      	sdata[tid] += sdata[tid + 8];
      	//__syncthreads(); 
      #endif
      #if (REDUCE_BS1C >= 8)
      	sdata[tid] += sdata[tid + 4];
      	//__syncthreads(); 
      #endif
      #if (REDUCE_BS1C >= 4)
      	sdata[tid] += sdata[tid + 2];
      	//__syncthreads(); 
      #endif
      #if (REDUCE_BS1C >= 2)
      	sdata[tid] += sdata[tid + 1];
      	//__syncthreads(); 
      #endif
  }
  if (tid == 0){
  	  g_odata[blockIdx.x+blockIdx.y*gridDim.x] = sdata[0];    
  } 
}
__global__ void cudaReduce_sum2R(REALN* idata1,REALN* idata2,
			 REALN* odata,UINTN c1,UINTN c2,UINTN c3,UINTN n){    
	int i = (blockIdx.x*REDUCE_BS2C+threadIdx.x);
	if(i>n){
		return;
	}
	int j=0;
	odata[i]=0;
	
	while(j<c1){
		odata[i] += idata1[i*c1+j];
		j++;
	}
	j=c2;
	while(j<c3){
		odata[i] += idata2[i*c3+j];
		j++;
	}
}*/
//----------------------------------------------------------
Matrix* MatrixMath::ReduceToCol(Matrix* m){
	REALN t;
	UINTN i,j;
	Matrix* m_out;
	Matrix* m_out_temp;
	Timer t1,t2,t3,t4;
	
	if(m->getMemorySpace()==CPU_MEM){
		m_out = new Matrix(m->getRow(),1,CPU_MEM);
		for(i=0;i<m->getRow();i++){
			for(j=0,t=0.0;j<m->getCol();j++){
				t+=m->get(i,j);
			}
			m_out->set(t,i,0);
		}
	}else{
		//t1.Start();
		dim3 nblock(m->getCol()/(REDUCE_BS1C*2),m->getRow());
		//cout<<m->getRow()<<" "<<nblock.x<<endl;
		m_out_temp = new Matrix(m->getRow(),nblock.x,DEVICE_MEM);
		//m_out_temp->setUni(0.0);//+++
		//cout<<nblock.x<<endl;
		//t1.Print("first alloc");
		//t1.Start();
		cudaReduce_sum1C<<<nblock,REDUCE_BS1C,REDUCE_BS1C*sizeof(REALN)>>>
					(m->getData(),m_out_temp->getData(),
					m->getCol(),
					nblock.x*REDUCE_BS1C*2);
		cudaThreadSynchronize();
		//t1.Print("first phase");
		//t2.Start();
		m_out=new Matrix(m->getRow(),1,DEVICE_MEM);
		//m_out->setUni(0.0);
		//t2.Print("second alloc");
		//m_out_temp->print();
		//t3.Start();
		//cout<<(m->getRow()/REDUCE_BS2C)+1<<" "<<REDUCE_BS2C<<endl;
		cudaReduce_sum2C<<<(m->getRow()/REDUCE_BS2C)+1,REDUCE_BS2C>>>
					(m_out_temp->getData(),
					m->getData(),
					m_out->getData(),
					m_out_temp->getCol(),
					REDUCE_BS2C*2*nblock.x,m->getCol(),
					m->getRow());
					//m->getCol());
		/*nblockx=m->getRow()/REDUCE_BS1C;
		//cout <<nblockx<<endl<<endl;
		m_out=new Matrix(m->getRow(),1,DEVICE_MEM);
		//m_out->setUni(0.0);//+++
		
		cudaReduce_sumf<<<nblockx,REDUCE_BS1C>>>
					(m->getData(),
					m_out->getData(),
					m->getCol());
		cudaReduce_sumf<<<1,m->getRow()-nblockx*REDUCE_BS1C>>>
					((m->getData())+(nblockx*REDUCE_BS1C*m->getCol()),
					(m_out->getData())+(nblockx*REDUCE_BS1C),
					m->getCol());*/
		
		cudaThreadSynchronize();
		delete (m_out_temp);
		//t3.Print("second phase");
	}
	return m_out;
}
__global__ void cudaReduce_sum1C(REALN* g_idata, REALN* g_odata,UINTN c, UINTN n){
  int tid = threadIdx.x;              
  int i = blockIdx.x*(REDUCE_BS1C*2) + tid;
  int gridSize = REDUCE_BS1C*2*gridDim.x;
  sdata[tid]=0;
  while (i < n) { 
	sdata[tid]  +=  g_idata[i+blockIdx.y*c] +
				g_idata[i+REDUCE_BS1C+blockIdx.y*c];
	i += gridSize;
  }
  __syncthreads();
  #if (REDUCE_BS1C >= 512)
  	if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); 
  #endif 
  #if (REDUCE_BS1C >= 256)
  	if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); 
  #endif
  #if (REDUCE_BS1C >= 128)
  	if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); 
  #endif
  if (tid < 32) {
  	  #if (REDUCE_BS1C >= 64)
     	 sdata[tid] += sdata[tid + 32];
     	 //__syncthreads(); 
      #endif
      #if (REDUCE_BS1C >= 32)
      	sdata[tid] += sdata[tid + 16];
      	//__syncthreads(); 
      #endif
      #if (REDUCE_BS1C >= 16)
      	sdata[tid] += sdata[tid + 8];
      	//__syncthreads(); 
      #endif
      #if (REDUCE_BS1C >= 8)
      	sdata[tid] += sdata[tid + 4];
      	//__syncthreads(); 
      #endif
      #if (REDUCE_BS1C >= 4)
      	sdata[tid] += sdata[tid + 2];
      	//__syncthreads(); 
      #endif
      #if (REDUCE_BS1C >= 2)
      	sdata[tid] += sdata[tid + 1];
      	//__syncthreads(); 
      #endif
  }
  if (tid == 0){
  	  g_odata[blockIdx.x+blockIdx.y*gridDim.x] = sdata[0];    
  } 
}
__global__ void cudaReduce_sum2C(REALN* idata1,REALN* idata2,
			 REALN* odata,UINTN c1,UINTN c2,UINTN c3,UINTN n){    
	int i = (blockIdx.x*REDUCE_BS2C+threadIdx.x);
	if(i>=n){
		return;
	}
	int j=0;
	odata[i]=0;
	
	while(j<c1){
		odata[i] += idata1[i*c1+j];
		j++;
	}
	j=c2;
	while(j<c3){
		odata[i] += idata2[i*c3+j];
		j++;
	}
}
//-----------------------------------------------------------------------------------
Matrix* MatrixMath::AllElemUnaryOp(Matrix* m, short operation,REALN parameter){
	UINTN i,j;
	Matrix* m_out;
	UINTN last=0;
	UINTN current=0;
	void (*funct)(REALN*,REALN*,REALN);
	UINTN bs;
	
	if(m->getMemorySpace()==CPU_MEM){
		m_out = new Matrix(m->getRow(),m->getCol(),CPU_MEM);
		for(i=0;i<m->getRow();i++){
			for(j=0;j<m->getCol();j++){
				switch(operation){
					case(OP_POW2):
						m_out->set(pow(m->get(i,j),2),i,j);
					break;
					case(OP_MULT):
						m_out->set(m->get(i,j)*parameter,i,j);
					break;
				} 
			}
		}
	}else{
		switch(operation){
			case(OP_POW2):
				funct = &cudaSquareEl;
				bs = POW2_BS;
			break;
			case(OP_MULT):
				funct = &cudaMultEl;
				bs = MULT_BS;
			break;
		} 
		
		m_out = new Matrix(m->getRow(),m->getCol(),DEVICE_MEM);
		if(m->getNData()<bs){
			(*funct)<<<1,m->getNData()>>>
						(m->getData(),m_out->getData(),parameter);
		}else{
			current=0;
			while((m->getNData()-current)/bs>MAX_GRID_DIM){
					(*funct)<<<MAX_GRID_DIM,bs>>>
								(m->getData()+current,
								m_out->getData()+current,parameter);
					current += MAX_GRID_DIM*bs;
					//cout<<current<<endl;
			}
			(*funct)<<<(m->getNData()-current)/bs,bs>>>
						(m->getData()+current,
						m_out->getData()+current,parameter);
			last = current+((m->getNData()-current)/bs)*bs;
			//cout<<"l "<<last<<endl;
			(*funct)<<<1,m->getNData()-last>>>
						(m->getData()+last,m_out->getData()+last,parameter);
			cudaThreadSynchronize();
		}
	}
	return m_out;
}
__global__  void cudaSquareEl(REALN* in,REALN* out,REALN parameter){
	int i = blockIdx.x * blockDim.x + threadIdx.x;
	out[i]=in[i]*in[i];
	//out[i]=powf(in[i],2);
	//out[i]=pow(in[i],2);
	//out[i]=__powf(in[i],2);
}
__global__  void cudaMultEl(REALN* in,REALN* out,REALN parameter){
	int i = blockIdx.x * blockDim.x + threadIdx.x;
	out[i]=in[i]*parameter;
}
//-----------------------------------------------------------------------------------
Matrix* MatrixMath::Var(Matrix* sum, Matrix* sum2, Matrix* mean,REALN ntrack){
	UINTN j;
	REALN t;
	Matrix* m_out;
	UINTN last=0;
	UINTN current=0;
	UINTN bs=VAR_BS;
	UINTN max_dim;
	
	if(!(((sum->getRow()==1 && sum2->getRow()==1 && mean->getRow()==1 &&
		sum->getCol()==sum2->getCol()&&sum2->getCol()==mean->getCol()))||
		((sum->getCol()==1 && sum2->getCol()==1 && mean->getCol()==1 &&
		sum->getRow()==sum2->getRow()&&sum2->getRow()==mean->getRow())))){ 
		cerr<<"Invalid matrix dimensions (Var)"<<endl;
		exit(3);
	}
	if(!(sum->getMemorySpace()==sum2->getMemorySpace()&&
		sum2->getMemorySpace()==mean->getMemorySpace())){
		cerr<<"Memory spaces must be equal"<<endl;
		exit(3);
	}
	if(sum->getCol()>sum->getRow()){
		max_dim=sum->getCol();
	}else{
		max_dim=sum->getRow();
	}

	if(sum->getMemorySpace()==CPU_MEM){
		m_out = new Matrix(1,max_dim,CPU_MEM);
		for(j=0;j<max_dim;j++){
			t = sum2->getData()[j]+ntrack*mean->getData()[j]*mean->getData()[j] -
				((REALN)2.0)*mean->getData()[j]*sum->getData()[j];
			if(t<(REALN(0.0))){
				cout<<t<<endl;
				t=(REALN)CORRECTED_NEGATIVE_VARIANCE;
			}
			m_out->set(t,0,j);
		}
	}else{
		m_out = new Matrix(1,max_dim,DEVICE_MEM);
		if(sum->getNData()<bs){
			cudaVar<<<1,sum->getNData()>>>
						(sum->getData(),sum2->getData(),mean->getData(),m_out->getData(),ntrack);
		}else{
			current=0;
			while((sum->getNData()-current)/bs>MAX_GRID_DIM){
					cudaVar<<<MAX_GRID_DIM,bs>>>
								(sum->getData()+current,sum2->getData()+current,mean->getData()+current,
								m_out->getData()+current,ntrack);
					current += MAX_GRID_DIM*bs;
			}
			cudaVar<<<(sum->getNData()-current)/bs,bs>>>
						(sum->getData()+current,sum2->getData()+current,mean->getData()+current,
						m_out->getData()+current,ntrack);
			last = current+((sum->getNData()-current)/bs)*bs;
			cudaVar<<<1,sum->getNData()-last>>>
						(sum->getData()+last,sum2->getData()+last,mean->getData()+last,
						m_out->getData()+last,ntrack);
			cudaThreadSynchronize();
		}
	}
	return m_out;
}
__global__  void cudaVar(REALN* sum,REALN* sum2,REALN* mean, REALN* out,REALN ntrack){
	int i = blockIdx.x * blockDim.x + threadIdx.x;
	REALN t;
	t=sum2[i]+ntrack*mean[i]*mean[i]-((REALN)2.0)*mean[i]*sum[i];
	if(t<(REALN(0.0))){
		t=(REALN)CORRECTED_NEGATIVE_VARIANCE;
	}
	out[i]=t;
}
//-----------------------------------------------------------------------------------
Matrix* MatrixMath::R(Matrix* sct, Matrix* t1, Matrix* t2,Matrix* t3,Matrix* var_conj,REALN ntrack){
	UINTN i,j;
	REALN t;
	Matrix* m_out;
	UINTN last=0;
	UINTN current=0;
	UINTN bs=R_BS;
	
	if(!(
		(sct->getRow()==t1->getRow()&&t1->getRow()==t2->getRow()&&
		t2->getRow()==t3->getRow()&&t3->getRow()==var_conj->getRow())
		&&
		(sct->getCol()==t1->getCol()&&t1->getCol()==t2->getCol()&&
		t2->getCol()==t3->getCol()&&t3->getCol()==var_conj->getCol())
		)){
		cerr<<"Invalid matrix dimensions (R)"<<endl;
		exit(3);
	}
	if(!(sct->getMemorySpace()==t1->getMemorySpace()&&t1->getMemorySpace()==t2->getMemorySpace()&&
		t2->getMemorySpace()==t3->getMemorySpace()&&t3->getMemorySpace()==var_conj->getMemorySpace())){
		cerr<<"Memory spaces must be equal"<<endl;
		exit(3);
	}

	if(sct->getMemorySpace()==CPU_MEM){
		m_out = new Matrix(sct->getRow(),sct->getCol(),CPU_MEM);
		for(i=0;i<sct->getRow();i++){
			for(j=0;j<sct->getCol();j++){
				t = (sct->get(i,j)+t1->get(i,j)*ntrack-t2->get(i,j)-t3->get(i,j))/var_conj->get(i,j);
				m_out->set(t,i,j);
			}
			
		}
	}else{
		m_out = new Matrix(sct->getRow(),sct->getCol(),DEVICE_MEM);
		if(sct->getNData()<bs){
			cudaR<<<1,sct->getNData()>>>
						(sct->getData(),t1->getData(),t2->getData(),
						t3->getData(),var_conj->getData(),
						m_out->getData(),ntrack);
		}else{
			current=0;
			while((sct->getNData()-current)/bs>MAX_GRID_DIM){
					cudaR<<<MAX_GRID_DIM,bs>>>
								(sct->getData()+current,t1->getData()+current,t2->getData()+current,
								t3->getData()+current,var_conj->getData()+current,
								m_out->getData()+current,ntrack);
					current += MAX_GRID_DIM*bs;
			}
			cudaR<<<(sct->getNData()-current)/bs,bs>>>
						(sct->getData()+current,t1->getData()+current,t2->getData()+current,
						t3->getData()+current,var_conj->getData()+current,
						m_out->getData()+current,ntrack);
			last = current+((sct->getNData()-current)/bs)*bs;
			cudaR<<<1,sct->getNData()-last>>>
						(sct->getData()+last,t1->getData()+last,t2->getData()+last,
						t3->getData()+last,var_conj->getData()+last,
						m_out->getData()+last,ntrack);
			cudaThreadSynchronize();
		}
	}
	return m_out;
}
__global__  void cudaR(REALN* sct,REALN* t1,REALN* t2,REALN* t3,REALN* var_conj,REALN* out,REALN ntrack){
	int i = blockIdx.x * blockDim.x + threadIdx.x;
	
	out[i] = (sct[i]+t1[i]*ntrack-t2[i]-t3[i])/var_conj[i];
}
//-----------------------------------------------------------------------------------
 void MatrixMath::ShowMem(){
	size_t free, total; 
	cuMemGetInfo(&free,&total); 
	cout << "*** "<<free<< " "<<total<<endl;    
}

/*void MatrixMath::init(){
	int i;
	UINTN size = MAX_MATRIX_DIM * sizeof(REALN);
	REALN temp[MAX_MATRIX_DIM];
	
	for(i=0;i<MAX_MATRIX_DIM;i++){
		temp[i]=1.0;
	}
	CUDA_SC(cudaMalloc((void**)&ones,size));
	CUDA_SC(cudaMemcpy(ones,temp,size,cudaMemcpyHostToDevice));
}*/

//TODO reduce to, transposed matrices, speedup fill 1s


