#include <cstdio>
#include <cmath>
#include <cuda.h>
#include <unistd.h>
#define CEIL(a, b) ((a)+(b-1))/(b)
#define PADDED(val, multi) CEIL(val,multi)*(multi)


#include "config.hxx"
//add other header below, not above!
#include "gnu_timer.h"
#include "jacobi_mat.hxx"
#include "paramap.hxx"
#include "sort.hxx"
#include "spinlock.hxx"
#include "debug.hxx"


struct Matrix{
  double *in, *out;
  __device__ __forceinline__ void swap(){
    double* temp = out;
    out = in;
    in = temp;
  }

  inline void host_swap(){
    double* temp = out;
    out = in;
    in = temp;
  }

  __host__ void alloc(int width, int height, size_t *pitch){
    cudaMallocPitch(&in, pitch, sizeof(double)*PADDED(width, SN), height); //行主映射
    cudaMallocPitch(&out, pitch, sizeof(double)*PADDED(width, SN), height);
    *pitch /= sizeof(double);
  }
};


__device__ __forceinline__ double sign(double number) {
  if(number<0)
    return -1.0;
  else
    return 1.0;
}


//assumed that all arrays to be reduced have NUM_THD_REDUCE eles
template <typename T>
__device__ __forceinline__ void _block_reduce(T* temp, T* res){
  //this loop should be auto-unrolled if using -O3
  const int tid = threadIdx.x;
  for(int rd_width = NUM_THD_REDUCE >> 1; rd_width > 1; rd_width >>= 1){
    if(tid < rd_width)
      temp[tid]+=temp[tid + rd_width];
    __syncthreads();
  }
  if(tid == 0)
    *res = temp[0] + temp[1];

  /* safe reduce ...
  if(tid==0){
    for(int i=0; i<NUM_THD_REDUCE; i++)
		*res += temp[i];
  }
  __syncthreads();
  */
}

//  列主映射版
__device__ void vectorProduct(double *a, double *b, int len, double *res){
  __shared__ double temp[NUM_THD_REDUCE];
  
  double local=0.0;
  for(int tid=threadIdx.x; tid<len; tid+=blockDim.x){
    local += a[tid]*b[tid];
  }
  temp[threadIdx.x]=local;
  __syncthreads();
  _block_reduce(temp, res);
}

__global__ void get_norm(double *A, double* norms, int height, size_t pitch){
  //const int col = blockIdx.x;
  //double local = 0;
  double *col = A + pitch*blockIdx.x;
  vectorProduct(col, col, height, norms + blockIdx.x);
  //以下为行主映射版
  /*for(int tid=threadIdx.x; tid<height; tid+=blockDim.x){
    local += A[pitch*tid + col]*A[pitch*tid + col];
  }
  temp[threadIdx.x]=local;
  __syncthreads();
  _block_reduce(temp, norms + col);*/
}


__global__ void sort_norm(double* norms, int* idcs, int width, bool *orth_flags){
  const int STRIDE = blockDim.x*gridDim.x;
  for(int tid=threadIdx.x; tid<width; tid+=STRIDE){
    orth_flags[tid] = 1;
    idcs[tid] = tid;
  }
  __syncthreads();
  fast_sort(norms, idcs, width);
}
/** 
 * note: only use sorted_idx when access the content of matrix A in GM,
 *   norm is already sorted so it's ok to use raw global col_idx 
 
__global__ void grouped_rotate(struct Matrix A, struct Matrix V, double* norms, int* sorted_idx, 
                      int width, int height, int ngrps, size_t gm_pitch,
                      bool* orth_flags, struct spinlock_t lock){
  __shared__ struct jacobi_mat shared_mat;
  __shared__ double product_buffer[SN/2];
  __shared__ double product_parts[NUM_THD_ROT];

  lock.reset_spinlock();

  for(int gpair_iter=0; gpair_iter<ngrps-1; gpair_iter++){
    shared_mat.init();
    int grp_a, grp_b;
    query_group_pair(blockIdx.x, gpair_iter, &grp_a, &grp_b);
    for(int cpair_iter=0; cpair_iter<SN-1; cpair_iter++){
      if(threadIdx.x < SN/2) product_buffer[threadIdx.x] = 0;
      __syncthreads();
      //construt col of A and compute all innner product within a pair as a batch
      for(int scan_iter=0; scan_iter<SN/2; scan_iter++){
        int col_a_local;
        int col_b_local;
        query_col_pair_local(scan_iter, cpair_iter, &col_a_local, &col_b_local);
        
        int nr = bcnt(shared_mat.assigned[col_a_local]);
        for(int row=threadIdx.x; row<height; row+=blockDim.x){
          double rcol_a=0.0, rcol=0.0;
          product_parts[threadIdx.x] = 0.0;

          int local_ridx = shared_mat.next_idx(col_a_local, -1);
          for(int i=0; i<nr; i++){
            int global_ridx = query_col_global(grp_a, grp_b, local_ridx);
            rcol_a += A.in[sorted_idx[global_ridx]*gm_pitch + row] * shared_mat.val(local_ridx, col_a_local);
            local_ridx = shared_mat.next_idx(col_a_local, local_ridx);
          }

          local_ridx = shared_mat.next_idx(col_b_local, -1);
          for(int i=0; i<nr; i++){
            int global_ridx = query_col_global(grp_a, grp_b, local_ridx);
            rcol_b += A.in[sorted_idx[global_ridx]*gm_pitch + row] * shared_mat.val(local_ridx, col_b_local);
            local_ridx = shared_mat.next_idx(col_b_local, local_ridx);
          }

          product_parts[threadIdx.x] += rcol_a * rcol_b;
        }
        __syncthreads();
        _block_reduce(product_parts, product_buffer+scan_iter);
      }

      //calc all sinTheta & cosTheta and update jacobi matrix in a batch
      const int coop_num = blockDim.x/SN;
      const int coop_idx = threadIdx.x/SN;
      const int coid = threadIdx.x%SN;

      for(int update_iter=coop_idx; update_iter<SN/2; update_iter+=coop_num){
        int col_a_local;
        int col_b_local;
        query_col_pair_local(update_iter, cpair_iter, &col_a_local, &col_b_local);
        int col_a = query_col_global(grp_a, grp_b, col_a_local);
        int col_b = query_col_global(grp_a, grp_b, col_b_local);

        double ele = product_buffer[update_iter];
        double sinTheta = 0.0;
        double cosTheta = 1.0;
        if(fabs(ele) > THRESHOLD){
          double ele1 = norms[col_a];
          double ele2 = norms[col_b];
          double tao = (ele1 - ele2) / (2 * ele);
          double tan = sign(tao) / (fabs(tao) + sqrt(1 + tao*tao);
          cosTheta = 1 / sqrt(1 + tan*tan);
          sinTheta = cosTheta * tan;
          
          shared_mat.update_cached(sinTheta, cosTheta, col_a_local, col_b_local);
          if(coid == 0){
            norms[col_a] = ele1 + ele*tan;
            norms[col_b] = ele2 - ele*tan;
            orth_flags[blockIdx.x] = 0;
          }
        }
        __syncthreads();
      }
    }
    //update A & V
    for(int wb_iter=0; wb_iter<SN; wb_iter++){
      int nr = bcnt(shared_mat.assigned[wb_iter]);
      int col_update = query_col_global(grp_a, grp_b, wb_iter);
      //each thread update a row of col[wb_iter]
      for(int tid=threadIdx.x; tid<height; tid++){
        int local_ridx = shared_mat.next_idx(wb_iter, -1);
        double new_a = 0.0;
        double new_v = 0.0;
        //each relevant cols are multiplied by jacobi values to compose new value
        for(int row=0; row<nr; row++){
          int col_sorted = query_col_global(grp_a, grp_b, local_ridx);
          double *col_A = A.in + gm_pitch * sorted_idx[col_sorted];
          double *col_V = V.in + gm_pitch * sorted_idx[col_sorted];
          double jacobi_val = shared_mat.val(local_ridx, wb_iter);
          new_a += col_A[tid] * jacobi_val;
          new_v += col_V[tid] * jacobi_val;
        }
        A.out[gm_pitch*col_update + tid] = new_a;
        V.out[gm_pitch*col_update + tid] = new_v;
      }
    }

    //finally synchronize among blocks for safa ring shift
    lock.spinlock_wait(gpair_iter+1);
    A.swap();
    V.swap();
  }
}*/

__device__ __forceinline__
void load_global_index(int grp_a, int grp_b, int* jacobi_idx){
  int idx = blockDim.x - threadIdx.x - 1; //trying to fucking around with parallelism
  if(idx < SN){
    jacobi_idx[idx] = query_col_global(grp_a, grp_b, idx);
  }
}

#define PITER 16 
#define OBLK 0
__device__ __forceinline__ 
void initial_group_pair_iter(struct Matrix A, struct jacobi_mat& shared_mat, double* norms, int* sorted_idx, 
  int grp_a, int grp_b, int height, size_t gm_pitch, bool* is_orth){
    
  __shared__ double product_buffer[SN/2];
  __shared__ double product_parts[NUM_THD_ROT];

  for(int cpair_iter=0; cpair_iter<SN-1; cpair_iter++){
    //if(threadIdx.x < SN/2) product_buffer[threadIdx.x] = 0;
	//__syncthreads();
	//construt col of A and compute all innner product within a pair as a batch
	//if(cpair_iter < PITER+1) print_dev_float(shared_mat.data,SN,SN,SN+1,OBLK); 
    for(int scan_iter=0; scan_iter<SN/2; scan_iter++){
      int col_a_local;
      int col_b_local;
      int nr;
      query_col_pair_local_initial(scan_iter, cpair_iter, &col_a_local, &col_b_local);
      int col_a_soffset = col_a_local*SN + OFFSET(col_a_local*SN);
      int col_b_soffset = col_b_local*SN + OFFSET(col_b_local*SN);
      double local_parts = 0.0;
	  product_parts[threadIdx.x] = 0.0;
      
	  //if(scan_iter==15 && cpair_iter == 1) GLOG(0,0, "rows_a=%d, rows_b=%d\n", bcnt(shared_mat.assigned[col_a_local]), bcnt(shared_mat.assigned[col_b_local]));
      for(int row=threadIdx.x; row<height; row+=blockDim.x){
        double rcol_a=0.0, rcol_b=0.0;
        
        nr = bcnt(shared_mat.assigned[col_a_local]);
        for(int i=0; i<nr; i++){
          int global_ridx = shared_mat.global_idx[shared_mat.index_map[col_a_soffset+i]];
		  double *A_col = A.in + gm_pitch*sorted_idx[global_ridx];
          //if(scan_iter == 0 && cpair_iter < PITER) GLOG_G(OBLK,"{%d=%d:%d=%d} rcol_a[%d]=%.6f*%.6f\n", col_a_local,shared_mat.global_idx[col_a_local], col_b_local, shared_mat.global_idx[col_b_local], row, A_col[row], shared_mat.data[col_a_soffset+i] );__syncthreads();
		  rcol_a += A_col[row] * shared_mat.data[col_a_soffset+i];
        }
        
		nr = bcnt(shared_mat.assigned[col_b_local]);
        for(int i=0; i<nr; i++){
          int global_ridx = shared_mat.global_idx[shared_mat.index_map[col_b_soffset+i]];
          double *A_col = A.in + gm_pitch*sorted_idx[global_ridx];
          //if(scan_iter == 0 && cpair_iter < PITER) GLOG_G(OBLK,"{%d=%d,%d=%d} rcol_b[%d]=%.6f*%.6f\n",col_a_local,shared_mat.global_idx[col_a_local],col_b_local,shared_mat.global_idx[col_b_local], row, A_col[row], shared_mat.data[col_b_soffset+i] );__syncthreads();
		  rcol_b += A_col[row] * shared_mat.data[col_b_soffset+i];
        }
		local_parts += rcol_a * rcol_b;
      }
	  product_parts[threadIdx.x] = local_parts;
      __syncthreads();
	  //__threadfence();
	  _block_reduce(product_parts, product_buffer+scan_iter);
	}
	//__threadfence_block();
    
	//calc all sinTheta & cosTheta and update jacobi matrix in a batch
    const int coop_num = blockDim.x/SN;
    const int coop_idx = threadIdx.x/SN;
    const int coid = threadIdx.x % SN;
    for(int update_iter=coop_idx; update_iter<SN/2; update_iter+=coop_num){
      int col_a_local;
      int col_b_local;
      query_col_pair_local_initial(update_iter, cpair_iter, &col_a_local, &col_b_local);
      int col_a = shared_mat.global_idx[col_a_local];//query_col_global(grp_a, grp_b, col_a_local);
      int col_b = shared_mat.global_idx[col_b_local];//query_col_global(grp_a, grp_b, col_b_local);

      double ele = product_buffer[update_iter];
      //if(coid==0) GLOG_G(0, "cp_iter=%d ele[%d,%d]=%.8f\n", cpair_iter, col_a, col_b, ele);
      double sinTheta = 0.0;
      double cosTheta = 1.0;
      if(fabs(ele) > THRESHOLD){
        double ele1 = norms[col_a];
        double ele2 = norms[col_b];
        double tao = (ele1 - ele2) / (2 * ele);
        double tan = sign(tao) / (fabs(tao) + sqrt(1 + tao*tao));
        cosTheta = 1 / sqrt(1 + tan*tan);
        sinTheta = cosTheta * tan;
        shared_mat.update_cached(sinTheta, cosTheta, col_a_local, col_b_local);
		if(coid == 0){
          //if(cpair_iter < PITER) GLOG_G(OBLK, "[%d,%d] ele=%.8f sin=%.8f cos=%.8f\n", col_a, col_b, ele, sinTheta, cosTheta);
		  norms[col_a] = ele1 + ele*tan;
          norms[col_b] = ele2 - ele*tan;
          *is_orth = false;
          //shared_mat.update_cached_solo(sinTheta, cosTheta, col_a_local, col_b_local, cpair_iter < PITER);//update_bitmap(col_a_local, col_b_local);
          shared_mat.update_bitmap(col_a_local, col_b_local);
		}
	  }
	}
	__syncthreads();
  }
}

__device__ __forceinline__ 
void trivial_group_pair_iter(struct Matrix A, struct jacobi_mat& shared_mat, double* norms, int* sorted_idx, 
  int grp_a, int grp_b, int height, size_t gm_pitch, bool* is_orth){
    
  __shared__ double product_buffer[SN/2];
  __shared__ double product_parts[NUM_THD_ROT];

  for(int cpair_iter=0; cpair_iter<SN/2; cpair_iter++){
    //if(threadIdx.x < SN/2) product_buffer[threadIdx.x] = 0;
	//__syncthreads();
	//construt col of A and compute all innner product within a pair as a batch
	//if(cpair_iter < PITER+1) print_dev_float(shared_mat.data,SN,SN,SN+1,OBLK); 
    for(int scan_iter=0; scan_iter<SN/2; scan_iter++){
      int col_a_local;
      int col_b_local;
      int nr;
      query_col_pair_local_wtf(scan_iter, cpair_iter, &col_a_local, &col_b_local);
      __syncthreads();
	  int col_a_soffset = col_a_local*SN + OFFSET(col_a_local*SN);
      int col_b_soffset = col_b_local*SN + OFFSET(col_b_local*SN);
      double local_parts = 0.0;
	  product_parts[threadIdx.x] = 0.0;
      
	  //printf("local_a=%d, local_b=%d\n", col_a_local, col_b_local);
      for(int row=threadIdx.x; row<height; row+=blockDim.x){
        double rcol_a=0.0, rcol_b=0.0;
        
        nr = bcnt(shared_mat.assigned[col_a_local]);
        for(int i=0; i<nr; i++){
          int global_ridx = shared_mat.global_idx[shared_mat.index_map[col_a_soffset+i]];
		  double *A_col = A.in + gm_pitch*sorted_idx[global_ridx];
          //if(global_ridx < 0 || global_ridx >= 512) GLOG_G(OBLK,"{%d=%d:%d=%d} rcol_a[%d]=%.6f*%.6f\n", col_a_local,shared_mat.global_idx[col_a_local], col_b_local, shared_mat.global_idx[col_b_local], row, A_col[row], shared_mat.data[col_a_soffset+i] );__syncthreads();
		  rcol_a += A_col[row] * shared_mat.data[col_a_soffset+i];
        }
        
		nr = bcnt(shared_mat.assigned[col_b_local]);
        for(int i=0; i<nr; i++){
          int global_ridx = shared_mat.global_idx[shared_mat.index_map[col_b_soffset+i]];
          double *A_col = A.in + gm_pitch*sorted_idx[global_ridx];
          //if(global_ridx < 0 || global_ridx >= 512) GLOG_G(OBLK,"{%d=%d,%d=%d} rcol_b[%d]=%.6f*%.6f\n",col_a_local,shared_mat.global_idx[col_a_local],col_b_local,shared_mat.global_idx[col_b_local], row, A_col[row], shared_mat.data[col_b_soffset+i] );__syncthreads();
		  rcol_b += A_col[row] * shared_mat.data[col_b_soffset+i];
        }
		local_parts += rcol_a * rcol_b;
      }
	  product_parts[threadIdx.x] = local_parts;
      __syncthreads();
	  //__threadfence_block();
	  _block_reduce(product_parts, product_buffer+scan_iter);
	}
	//__threadfence_block();
    
	//calc all sinTheta & cosTheta and update jacobi matrix in a batch
    const int coop_num = blockDim.x/SN;
    const int coop_idx = threadIdx.x/SN;
    const int coid = threadIdx.x % SN;
    for(int update_iter=coop_idx; update_iter<SN/2; update_iter+=coop_num){
      int col_a_local;
      int col_b_local;
      query_col_pair_local_wtf(update_iter, cpair_iter, &col_a_local, &col_b_local);
	  int col_a = shared_mat.global_idx[col_a_local];//query_col_global(grp_a, grp_b, col_a_local);
      int col_b = shared_mat.global_idx[col_b_local];//query_col_global(grp_a, grp_b, col_b_local);

      double ele = product_buffer[update_iter];
      //if(coid==0) GLOG_G(0, "cp_iter=%d ele[%d,%d]=%.8f\n", cpair_iter, col_a, col_b, ele);
      double sinTheta = 0.0;
      double cosTheta = 1.0;
      if(fabs(ele) > THRESHOLD){
        double ele1 = norms[col_a];
        double ele2 = norms[col_b];
        double tao = (ele1 - ele2) / (2 * ele);
        double tan = sign(tao) / (fabs(tao) + sqrt(1 + tao*tao));
        cosTheta = 1 / sqrt(1 + tan*tan);
        sinTheta = cosTheta * tan;
        shared_mat.update_cached(sinTheta, cosTheta, col_a_local, col_b_local);
		if(coid == 0){
          //if(cpair_iter < PITER) GLOG_G(OBLK, "[%d,%d] ele=%.8f sin=%.8f cos=%.8f\n", col_a, col_b, ele, sinTheta, cosTheta);
		  norms[col_a] = ele1 + ele*tan;
          norms[col_b] = ele2 - ele*tan;
          *is_orth = false;
          //shared_mat.update_cached_solo(sinTheta, cosTheta, col_a_local, col_b_local, cpair_iter < PITER);//update_bitmap(col_a_local, col_b_local);
          shared_mat.update_bitmap(col_a_local, col_b_local);
		}
	  }
	}
	__syncthreads();
  }
}

__device__ __forceinline__ 
bool dense_cpair_iter(struct Matrix A, struct Matrix V, double* norms, int* sorted_idx, 
                      int width, int height, int ngrps, size_t gm_pitch){

}

__global__ void grouped_rotate(struct Matrix A, struct Matrix V, double* norms, int* sorted_idx, 
                      int width, int height, int ngrps, size_t gm_pitch,
                      bool* orth_flags, struct spinlock_t lock){
  __shared__ struct jacobi_mat shared_mat;
  
  lock.reset_spinlock();
  for(int gpair_iter=0; gpair_iter<ngrps-1; gpair_iter++){
    shared_mat.init();
    int grp_a, grp_b;
    query_group_pair(blockIdx.x, gpair_iter, &grp_a, &grp_b);
    load_global_index(grp_a, grp_b, shared_mat.global_idx);
    //GLOG(OBLK, 0, "blk%d: pairing %d:%d at iter:%d\n", blockIdx.x, grp_a, grp_b, gpair_iter); 
	if(gpair_iter){
      trivial_group_pair_iter(A, shared_mat, norms, sorted_idx, grp_a, grp_b, height, gm_pitch, orth_flags+blockIdx.x);
    } else {
      initial_group_pair_iter(A, shared_mat, norms, sorted_idx, grp_a, grp_b, height, gm_pitch, orth_flags+blockIdx.x);
    }
    //print_dev_float(shared_mat.data, SN, SN, SN+1, OBLK); 
    //update A & V
    for(int wb_iter=0; wb_iter<SN; wb_iter++){
      int nr = bcnt(shared_mat.assigned[wb_iter]);
      int col_update = query_col_global(grp_a, grp_b, wb_iter);//shared_mat.global_idx[wb_iter];//query_col_global(grp_a, grp_b, wb_iter);
      int s_offset = OFFSET(SN*wb_iter) + SN*wb_iter;
      //each thread update a row of col[wb_iter]
      for(int row=threadIdx.x; row<height; row+=blockDim.x){
        double new_a = 0.0;
        double new_v = 0.0;
        //each relevant cols are multiplied by jacobi values to compose new value
        for(int col=0; col<nr; col++){
          int col_sorted = shared_mat.global_idx[shared_mat.index_map[s_offset+col]];//query_col_global(grp_a, grp_b, shared_mat.index_map[OFFSET(SN*wb_iter + col)]);
          double jacobi_val = shared_mat.data[s_offset + col];//val(local_ridx, wb_iter);
          double* col_A = A.in+gm_pitch*sorted_idx[col_sorted];
          double* col_V = V.in+gm_pitch*sorted_idx[col_sorted];
          new_a += col_A[row] * jacobi_val;
          new_v += col_V[row] * jacobi_val;
        }
        //if(sorted_idx[col_update] == 456) GLOG_G(0, "b%dt%d---iter %d [%d,%d] reading = %.8f writing= %.8f\n", blockIdx.x, threadIdx.x, gpair_iter, row, sorted_idx[col_update], A.in[gm_pitch*sorted_idx[col_update] + row], new_a);
		A.out[gm_pitch*sorted_idx[col_update] + row] = new_a;
		V.out[gm_pitch*sorted_idx[col_update] + row] = new_v;
	    //printf("A_new[%d,%d]=%.8f]\n",row,sorted_idx[col_update], new_a);
	  }
	}
    //finally synchronize among blocks for safa ring shift
	lock.spinlock_wait(gpair_iter+1);
    //GLOG_B(0, "blk%d:iter:%d fin\n", blockIdx.x, gpair_iter);  
	A.swap();
    V.swap();
  }
}

int main(int argc, char* argv[]){
  int width, height;
  int num_groups, num_cta;
  size_t pitch;
  FILE *fp;
  double *host_A, *host_U, *host_S, *host_V;
  double *dev_U, *dev_S;
  double *dev_norm;
  int *dev_sorted_idx;
  bool *host_orth, *dev_orth;
  struct spinlock_t lock;
  struct Matrix dev_A, dev_V;

  //load Matrix from file
  double t1 = mwtime();
  fp=fopen(argv[1],"r");
  fscanf(fp,"%d",&width);
  fscanf(fp,"%d",&height);

  host_A = new double[height*width];
  host_V = new double[height*height];
  host_U = new double[width*width];
  host_S = new double[height];

  for(int i=0; i<width; i++)
    for(int j=0; j<height; j++)
      fscanf(fp,"%lf",&host_A[i*width+j]); //行主映射
  fclose(fp);
  for(int i=0; i<height; i++)
    host_V[i*width+i] = 1.0;
  double t2 = mwtime();
  printf("File IO time: %.3f ms\n", t2-t1);

  //Alloc device memory
  num_groups = CEIL(height, SN/2);
  num_cta = num_groups >> 1;
  dev_A.alloc(width, height, &pitch);
  dev_V.alloc(height, height, &pitch);
  cudaHostAlloc(&host_orth, sizeof(bool)*num_cta, cudaHostAllocWriteCombined | cudaHostAllocMapped);
  cudaHostGetDevicePointer(&dev_orth, host_orth, 0);
  cudaMalloc(&dev_S, sizeof(double)*height);
  cudaMalloc(&dev_U, sizeof(double)*width*width);
  cudaMalloc(&dev_norm, sizeof(double)*width);
  cudaMalloc(&dev_sorted_idx, sizeof(int)*width);
  cudaMemcpy2D(dev_A.in, sizeof(double)*pitch, host_A, sizeof(double)*height, sizeof(double)*height, width, cudaMemcpyHostToDevice);
  cudaMemcpy2D(dev_V.in, sizeof(double)*pitch, host_V, sizeof(double)*height, sizeof(double)*height, height, cudaMemcpyHostToDevice);
  lock.init_spinlock(num_cta);

  t1 = mwtime(); // algorithm starts
  generate_map(num_groups);
   bool convenged = false;
  int iteration;
  for(iteration=0; iteration<ITERATION && !convenged; iteration++){
	printf("----iter %d-----\n", iteration);
	get_norm<<<PADDED(width,SN),NUM_THD_REDUCE>>>(dev_A.in, dev_norm, PADDED(width,SN), pitch);
    cudaDeviceSynchronize();
    sort_norm<<<1,NUM_THD_SORT>>>(dev_norm, dev_sorted_idx, PADDED(width,SN), dev_orth);
    cudaDeviceSynchronize();
    //print_f<<<1,1>>>(dev_norm, width, 1,0);
	//for(int i=0; i<31; i++){
	//for(int j=0; j<16; j++){
	grouped_rotate<<<num_cta,NUM_THD_ROT>>>(dev_A, dev_V, dev_norm, dev_sorted_idx, PADDED(width,SN), height, num_groups, pitch, dev_orth, lock);
	//cudaDeviceSynchronize();
	//}
	//dev_A.host_swap();
	//dev_V.host_swap();
	cudaDeviceSynchronize();
	//}
	convenged = true;
    for(int i=0; i<num_cta; i++)
      if(!host_orth[i]){
        convenged = false;
        break;
      }
    
	//print_f<<<1,1>>>(dev_A.in+456*pitch, 500, 1, 0);
	if(num_groups%2 == 0){
      dev_A.host_swap();
      dev_V.host_swap();
    }
	
    //sort_norm<<<1,NUM_THD_SORT>>>(dev_norm, dev_sorted_idx, PADDED(width,SN), dev_orth);
	//cudaDeviceSynchronize();
    //print_f<<<1,1>>>(dev_norm, width, 1,0);
	//getchar();
	//usleep(10);
  }
  t2 = mwtime();
  cudaError_t error_check101;
  error_check101 = cudaGetLastError();
  if (error_check101 != cudaSuccess)
  {
		printf("%s\n", cudaGetErrorString(error_check101));
  }
  printf("iters: %d, time: %.3f\n", iteration, t2-t1);
}
