#include <string>
#include <vector>
#include <iostream>
#include <fstream>
#include <cuda_runtime.h>
#include "Complex.cu"
#include<cmath>
#include "../utils.cuh"
#include "../fft_gpu.cu"

#pragma once

#define	R2	2		// 2-radix
#define	R4	4		// 4-radix
#define MAX_THREAD_PER_BLOCK 1024 

#define Check_Error( error ) do{\
  if((error)!=cudaSuccess) {\
      cout << "Check_Error " << " in line " <<  __LINE__  << " in file" << __FILE__ << " " << cudaGetErrorString(error) << endl;\
      exit(-1);\
  }\
} while(0);\

#define Check_Error_D( a ) do { \
  if (cudaSuccess != (a)) { \
  fprintf(stderr, "Cuda runtime error in line %d of file %s \
  : %s \n", __LINE__, __FILE__, cudaGetErrorString(cudaGetLastError()) ); \
  exit(EXIT_FAILURE); \
  } \
  } while(0);


#define NFCT 25
#define RALLOC(type,num) \
  ((type *)malloc((num)*sizeof(type)))
#define DEALLOC(ptr) \
  do { free(ptr); (ptr)=NULL; } while(0)
#define SWAP(a,b,type) \
  do { type tmp_=(a); (a)=(b); (b)=tmp_; } while(0)

// define for pass2b and pass2f

#define PMC(a,b,c,d) { a.r=c.r+d.r; a.i=c.i+d.i; b.r=c.r-d.r; b.i=c.i-d.i; }
#define ADDC(a,b,c) { a.r=b.r+c.r; a.i=b.i+c.i; }
#define SCALEC(a,b) { a.r*=b; a.i*=b; }
#define ROT90(a) { float tmp_=a.r; a.r=-a.i; a.i=tmp_; }
#define ROTM90(a) { float tmp_=-a.r; a.r=a.i; a.i=tmp_; }
#define CH(a,b,c) ch[(a)+ido*((b)+l1*(c))]
#define CC(a,b,c) cc[(a)+ido*((b)+cdim*(c))]
#define WA(x,i) wa[(i)-1+(x)*(ido-1)]
/* a = b*c */
#define A_EQ_B_MUL_C(a,b,c) { a.r=b.r*c.r-b.i*c.i; a.i=b.r*c.i+b.i*c.r; }
/* a = conj(b)*c*/
#define A_EQ_CB_MUL_C(a,b,c) { a.r=b.r*c.r+b.i*c.i; a.i=b.r*c.i-b.i*c.r; }

#define PMSIGNC(a,b,c,d) { a.r=c.r+sign*d.r; a.i=c.i+sign*d.i; b.r=c.r-sign*d.r; b.i=c.i-sign*d.i; }
/* a = b*c */
#define MULPMSIGNC(a,b,c) { a.r=b.r*c.r-sign*b.i*c.i; a.i=b.r*c.i+sign*b.i*c.r; }
/* a *= b */
#define MULPMSIGNCEQ(a,b) { float xtmp=a.r; a.r=b.r*a.r-sign*b.i*a.i; a.i=b.r*a.i+sign*b.i*xtmp; }


// define for pass3b
#define PREP3(idx) \
        cmplx t0 = CC(idx,0,k), t1, t2; \
        PMC (t1,t2,CC(idx,1,k),CC(idx,2,k)) \
        CH(idx,k,0).r=t0.r+t1.r; \
        CH(idx,k,0).i=t0.i+t1.i;
#define PARTSTEP3a(u1,u2,twr,twi) \
        { \
        cmplx ca,cb; \
        ca.r=t0.r+twr*t1.r; \
        ca.i=t0.i+twr*t1.i; \
        cb.i=twi*t2.r; \
        cb.r=-(twi*t2.i); \
        PMC(CH(0,k,u1),CH(0,k,u2),ca,cb) \
        }

#define PARTSTEP3b(u1,u2,twr,twi) \
        { \
        cmplx ca,cb,da,db; \
        ca.r=t0.r+twr*t1.r; \
        ca.i=t0.i+twr*t1.i; \
        cb.i=twi*t2.r; \
        cb.r=-(twi*t2.i); \
        PMC(da,db,ca,cb) \
        A_EQ_B_MUL_C (CH(i,k,u1),WA(u1-1,i),da) \
        A_EQ_B_MUL_C (CH(i,k,u2),WA(u2-1,i),db) \
        }

// define for pass3f
#define PARTSTEP3f(u1,u2,twr,twi) \
        { \
        cmplx ca,cb,da,db; \
        ca.r=t0.r+twr*t1.r; \
        ca.i=t0.i+twr*t1.i; \
        cb.i=twi*t2.r; \
        cb.r=-(twi*t2.i); \
        PMC(da,db,ca,cb) \
        A_EQ_CB_MUL_C (CH(i,k,u1),WA(u1-1,i),da) \
        A_EQ_CB_MUL_C (CH(i,k,u2),WA(u2-1,i),db) \
        }

// define for pass5b
#define PREP5(idx) \
        cmplx t0 = CC(idx,0,k), t1, t2, t3, t4; \
        PMC (t1,t4,CC(idx,1,k),CC(idx,4,k)) \
        PMC (t2,t3,CC(idx,2,k),CC(idx,3,k)) \
        CH(idx,k,0).r=t0.r+t1.r+t2.r; \
        CH(idx,k,0).i=t0.i+t1.i+t2.i;

#define PARTSTEP5a(u1,u2,twar,twbr,twai,twbi) \
        { \
        cmplx ca,cb; \
        ca.r=t0.r+twar*t1.r+twbr*t2.r; \
        ca.i=t0.i+twar*t1.i+twbr*t2.i; \
        cb.i=twai*t4.r twbi*t3.r; \
        cb.r=-(twai*t4.i twbi*t3.i); \
        PMC(CH(0,k,u1),CH(0,k,u2),ca,cb) \
        }

#define PARTSTEP5b(u1,u2,twar,twbr,twai,twbi) \
        { \
        cmplx ca,cb,da,db; \
        ca.r=t0.r+twar*t1.r+twbr*t2.r; \
        ca.i=t0.i+twar*t1.i+twbr*t2.i; \
        cb.i=twai*t4.r twbi*t3.r; \
        cb.r=-(twai*t4.i twbi*t3.i); \
        PMC(da,db,ca,cb) \
        A_EQ_B_MUL_C (CH(i,k,u1),WA(u1-1,i),da) \
        A_EQ_B_MUL_C (CH(i,k,u2),WA(u2-1,i),db) \
        }
// define for pass5f
#define PARTSTEP5f(u1,u2,twar,twbr,twai,twbi) \
        { \
        cmplx ca,cb,da,db; \
        ca.r=t0.r+twar*t1.r+twbr*t2.r; \
        ca.i=t0.i+twar*t1.i+twbr*t2.i; \
        cb.i=twai*t4.r twbi*t3.r; \
        cb.r=-(twai*t4.i twbi*t3.i); \
        PMC(da,db,ca,cb) \
        A_EQ_CB_MUL_C (CH(i,k,u1),WA(u1-1,i),da) \
        A_EQ_CB_MUL_C (CH(i,k,u2),WA(u2-1,i),db) \
        }
// define for pass7

#define PREP7(idx) \
        cmplx t1 = CC(idx,0,k), t2, t3, t4, t5, t6, t7; \
        PMC (t2,t7,CC(idx,1,k),CC(idx,6,k)) \
        PMC (t3,t6,CC(idx,2,k),CC(idx,5,k)) \
        PMC (t4,t5,CC(idx,3,k),CC(idx,4,k)) \
        CH(idx,k,0).r=t1.r+t2.r+t3.r+t4.r; \
        CH(idx,k,0).i=t1.i+t2.i+t3.i+t4.i;

#define PARTSTEP7a0(u1,u2,x1,x2,x3,y1,y2,y3,out1,out2) \
        { \
        cmplx ca,cb; \
        ca.r=t1.r+x1*t2.r+x2*t3.r+x3*t4.r; \
        ca.i=t1.i+x1*t2.i+x2*t3.i+x3*t4.i; \
        cb.i=y1*t7.r y2*t6.r y3*t5.r; \
        cb.r=-(y1*t7.i y2*t6.i y3*t5.i); \
        PMC(out1,out2,ca,cb) \
        }
#define PARTSTEP7a(u1,u2,x1,x2,x3,y1,y2,y3) \
        PARTSTEP7a0(u1,u2,x1,x2,x3,y1,y2,y3,CH(0,k,u1),CH(0,k,u2))
#define PARTSTEP7(u1,u2,x1,x2,x3,y1,y2,y3) \
        { \
        cmplx da,db; \
        PARTSTEP7a0(u1,u2,x1,x2,x3,y1,y2,y3,da,db) \
        MULPMSIGNC (CH(i,k,u1),WA(u1-1,i),da) \
        MULPMSIGNC (CH(i,k,u2),WA(u2-1,i),db) \
        }
// define for pass11

#define PREP11(idx) \
        cmplx t1 = CC(idx,0,k), t2, t3, t4, t5, t6, t7, t8, t9, t10, t11; \
        PMC (t2,t11,CC(idx,1,k),CC(idx,10,k)) \
        PMC (t3,t10,CC(idx,2,k),CC(idx, 9,k)) \
        PMC (t4,t9 ,CC(idx,3,k),CC(idx, 8,k)) \
        PMC (t5,t8 ,CC(idx,4,k),CC(idx, 7,k)) \
        PMC (t6,t7 ,CC(idx,5,k),CC(idx, 6,k)) \
        CH(idx,k,0).r=t1.r+t2.r+t3.r+t4.r+t5.r+t6.r; \
        CH(idx,k,0).i=t1.i+t2.i+t3.i+t4.i+t5.i+t6.i;

#define PARTSTEP11a0(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5,out1,out2) \
        { \
        cmplx ca,cb; \
        ca.r=t1.r+x1*t2.r+x2*t3.r+x3*t4.r+x4*t5.r+x5*t6.r; \
        ca.i=t1.i+x1*t2.i+x2*t3.i+x3*t4.i+x4*t5.i+x5*t6.i; \
        cb.i=y1*t11.r y2*t10.r y3*t9.r y4*t8.r y5*t7.r; \
        cb.r=-(y1*t11.i y2*t10.i y3*t9.i y4*t8.i y5*t7.i ); \
        PMC(out1,out2,ca,cb) \
        }
#define PARTSTEP11a(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5) \
        PARTSTEP11a0(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5,CH(0,k,u1),CH(0,k,u2))
#define PARTSTEP11(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5) \
        { \
        cmplx da,db; \
        PARTSTEP11a0(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5,da,db) \
        MULPMSIGNC (CH(i,k,u1),WA(u1-1,i),da) \
        MULPMSIGNC (CH(i,k,u2),WA(u2-1,i),db) \
        }
// define for passg
#define CX(a,b,c) cc[(a)+ido*((b)+l1*(c))]
#define CX2(a,b) cc[(a)+idl1*(b)]
#define CH2(a,b) ch[(a)+idl1*(b)]


vector<string> vector_twiddle;
int is_twiddle_load = 0;
float *h_twiddle_element = 0; 
__device__ float *d_twiddle_element = 0; 


struct cmplx 
{
    float r,i;
};

struct cfftp_fctdata
{
    size_t fct;
    cmplx *tw,*tws;
};




__device__ void my_sincosm1pi (float a, float * res)
{
    float s = a * a;
    /* Approximate cos(pi*x)-1 for x in [-0.25,0.25] */
    float r =        0x1.d9e000p-3f;
    r = fmaf (r, s, -0x1.55c400p+0f);
    r = fmaf (r, s,  0x1.03c1cep+2f);
    r = fmaf (r, s, -0x1.3bd3ccp+2f);
    float c = r*s;
    /* Approximate sin(pi*x) for x in [-0.25,0.25] */
    r =             -0x1.310000p-1f;
    r = fmaf (r, s,  0x1.46737ep+1f);
    r = fmaf (r, s, -0x1.4abbfep+2f);
    r = (a * s) * r;
    s = fmaf (a, 0x1.921fb6p+1f, r);
    
    
  res[0] = c;
  res[1] = s;
}

__device__ void calc_first_octant(int den, float * res)// den = 76  
{
  int n = (den+4)>>3; // n = 10  40 / 8 = 5
  if (n==0) return;
  res[0]=1.; res[1]=0.;
  if (n==1) return;
  int l1=(int)sqrt((float)n);// log 以2为底n的对数  l1 = 10^(1/2) =3
  for (int i=1; i<l1; ++i)
    my_sincosm1pi((2.*i)/den,&res[2*i]); // 1/76 2/76 3/76 , res[2] res[4] res[6]
  int start=l1; 


  while(start<n)
    {
    float cs[2];
    my_sincosm1pi((2.*start)/den,cs);
    res[2*start] = cs[0]+1.; // cs[0] 为近似的 cos(pi*x)-1 
    res[2*start+1] = cs[1];  // cs[1] 为近似的 sin(pi*x)
    int end = l1;
    if (start+end>n) end = n-start;
    for (int i=1; i<end; ++i)
      {
      float csx[2]={res[2*i], res[2*i+1]};
      res[2*(start+i)] = ((cs[0]*csx[0] - cs[1]*csx[1] + cs[0]) + csx[0]) + 1.;
      res[2*(start+i)+1] = (cs[0]*csx[1] + cs[1]*csx[0]) + cs[1] + csx[1]; // max index is 172 * (9+2)

      
      }
    start += l1;


    
    }

    
  for (int i=1; i<l1; ++i)
    res[2*i] += 1.;

}

__device__ void calc_first_quadrant(int n, float * res)
{
  float * p = res+n;
  calc_first_octant(n<<1, p); /// n << 1 = n * 2
  int ndone=(n+2)>>2;
  int i=0, idx1=0, idx2=2*ndone-2;
  for (; i+1<ndone; i+=2, idx1+=2, idx2-=2)
    {
    res[idx1]   = p[2*i];
    res[idx1+1] = p[2*i+1];
    res[idx2]   = p[2*i+3];
    res[idx2+1] = p[2*i+2];
    }
  if (i!=ndone)
    {
    res[idx1  ] = p[2*i];
    res[idx1+1] = p[2*i+1];
    }
}

__device__ void calc_first_half(int n, float * res)
{
  int ndone=(n+1)>>1; // (n + 1 ) /2   //(9 + 1) /2 = 5
  float * p = res+n-1; // p 指向res中的第n - 1 个元素
  calc_first_octant(n<<2, p); // n<<2 为 n = 4*n 36 
  int i4=0, in=n, i=0;
  for (; i4<=in-i4; ++i, i4+=4) // octant 0
    {
    res[2*i] = p[2*i4]; res[2*i+1] = p[2*i4+1];
    }
  for (; i4-in <= 0; ++i, i4+=4) // octant 1
    {
    int xm = in-i4;
    res[2*i] = p[2*xm+1]; res[2*i+1] = p[2*xm];
    }
  for (; i4<=3*in-i4; ++i, i4+=4) // octant 2
    {
    int xm = i4-in;
    res[2*i] = -p[2*xm+1]; res[2*i+1] = p[2*xm];
    }
  for (; i<ndone; ++i, i4+=4) // octant 3
    {
    int xm = 2*in-i4;
    res[2*i] = -p[2*xm]; res[2*i+1] = p[2*xm+1];
    }
}

__device__ void fill_first_quadrant(int n, float * res)
{
  const float hsqt2 = 0.707106f;
  int quart = n>>2; // quart eg 1/4 
  if ((n&7)==0) // 判断是否是8的倍数
    res[quart] = res[quart+1] = hsqt2;
  for (int i=2, j=2*quart-2; i<quart; i+=2, j-=2)
    {
    res[j  ] = res[i+1];
    res[j+1] = res[i  ];
    }
}

__device__ void fill_first_half(int n, float * res)
{
  int half = n>>1;
  if ((n&3)==0)
    for (int i=0; i<half; i+=2)
      {
      res[i+half]   = -res[i+1];
      res[i+half+1] =  res[i  ];
      }
  else
    for (int i=2, j=2*half-2; i<half; i+=2, j-=2)
      {
      res[j  ] = -res[i  ];
      res[j+1] =  res[i+1];
      }
}

__device__ void fill_second_half(int n, float * res)
{
  if ((n&1)==0)
    for (int i=0; i<n; ++i)
      res[i+n] = -res[i];
  else
    for (int i=2, j=2*n-2; i<n; i+=2, j-=2)
      {
      res[j  ] =  res[i  ];
      res[j+1] = -res[i+1];
      }
}

__device__ void sincos_2pibyn_half(int n, float * res)
{

  if ((n&3)==0) // 如果n是4的倍数，那么就先计算前面1/8个结果，然后填充剩下的3/8
    {
    calc_first_octant(n, res);
    fill_first_quadrant(n, res);
    fill_first_half(n, res);
    }
  else if ((n&1)==0) // 如果n是2的倍数，那么就计算前1/4,  然后填充剩下的1/4
    {
    calc_first_quadrant(n, res);
    fill_first_half(n, res);
    }
  else
    calc_first_half(n, res);// 其他情况 则直接计算前面1/2 个元素

}

__device__ void sincos_2pibyn(int n, float * res)
{
  sincos_2pibyn_half(n, res);
  fill_second_half(n, res);
}



struct cfftp_plan_gpu 
{
    size_t length, nfct;
    cmplx *mem;
    cfftp_fctdata fct[NFCT];

    void to_info(){
      cout << "cfftp_plan_gpu info" << endl;
      cout << "length: " << length << endl;
      cout << "nfct: " << nfct  << endl;
      cout << "cfftp_fctdata infos: " << endl;
      
      // for (int i = 0 ; i < NFCT; i++)
      // {
      //   cout << "cfftp_fctdata info: "<< i << endl;
      //   cout << "cfftp_fctdata fct: " <<  fct[i].fct << endl;
      //   cout << "cfftp_fctdata tw: " <<  fct[i].tw[0].r << "," << fct[i].tw[0].i << endl;
      //   // cout << "cfftp_fctdata tws: " <<  fct[i].tws[0].r << "," << fct[i].tws[0].i<< endl;

      // }
    }

    cfftp_plan_gpu() = default;// tell compiler the default constructor function

   

    __device__ cfftp_plan_gpu(size_t N) : length(N){

        size_t length_tmp = N; //use to calculate the nfct , the length will change during the process 
        nfct = 0;
        // init all the fct as 0
        for (size_t i=0; i<NFCT; ++i)
            fct[i]=(cfftp_fctdata){0,0,0};// init fct=0,tw as null,tws as null
        mem=0;
        // cfftp_factorize : init the ncft and all the factor of ncft
        if (length_tmp==1) return ;
        while ((length_tmp%4)==0)
          { if (nfct>=NFCT) return ; fct[nfct++].fct=4; length_tmp>>=2; }
        if ((length_tmp%2)==0)
           {
            length_tmp>>=1;
           // factor 2 should be at the front of the factor list
           if (nfct>=NFCT) return ;
           fct[nfct++].fct=2;
           SWAP(fct[0].fct, fct[nfct-1].fct,size_t);
           }
        size_t maxl=(size_t)(sqrt((float)length_tmp))+1;
        for (size_t divisor=3; (length_tmp>1)&&(divisor<maxl); divisor+=2)
            if ((length_tmp%divisor)==0)
            {
            while ((length_tmp%divisor)==0)
                {
                if (nfct>=NFCT)  ;
                fct[nfct++].fct=divisor;
                length_tmp/=divisor;
                }
            maxl=(size_t)(sqrt((float)length_tmp))+1;
            }
        if (length_tmp>1) fct[nfct++].fct=length_tmp;
        
        // init the tsize by query table
        // size_t tws=twsize_array[N - 1];
        size_t tws=cfftp_twsize(N);
        // use tws to allocate mem
        mem=RALLOC(cmplx,tws);

        // int twiddle_offset = twiddle_offset_array[N - 1];
        float *twid = RALLOC(float, 2*N);
        // use original implement 
        sincos_2pibyn(N, twid);

       

        size_t l1=1;
        size_t memofs=0;
        for (size_t k=0; k<nfct; ++k)
            {
            size_t ip=fct[k].fct, ido= length/(l1*ip);// ip = 4 ,ido = 76/(1*4) = 19
            fct[k].tw=mem+memofs;
            memofs+=(ip-1)*(ido-1); // offset ??? 
            for (size_t j=1; j<ip; ++j)
            for (size_t i=1; i<ido; ++i)
                {
                fct[k].tw[(j-1)*(ido-1)+i-1].r = twid[2*j*l1*i]; //tw[ (1 - 0 ) * (19 - 1) + 1 -  ]
                fct[k].tw[(j-1)*(ido-1)+i-1].i = twid[2*j*l1*i+1];
                }
            if (ip>11)
            {
            fct[k].tws=mem+memofs;
            memofs+=ip;
            for (size_t j=0; j<ip; ++j)
                {
                fct[k].tws[j].r = twid[2*j*l1*ido];
                fct[k].tws[j].i = twid[2*j*l1*ido+1];
                }
            }
            l1*=ip;
            }

           
           DEALLOC(twid);
    }

    __device__ size_t cfftp_twsize (size_t N)
    {
      size_t twsize=0, l1=1;
      for (size_t k=0; k<nfct; ++k)
        {
        size_t ip=fct[k].fct, ido= N / (l1*ip);
        twsize+=(ip-1)*(ido-1);
        if (ip>11)
          twsize+=ip;
        l1*=ip;
        }
      return twsize;
    }

    __device__ void memory_free(){
      DEALLOC(mem);
      free(fct);
    }

    __device__ int pass_all( cmplx c[], float factor, const int sign)
    
    {

        if (length==1) return 0;
        size_t len=length;
        size_t l1=1, nf=nfct;
        cmplx *ch = RALLOC(cmplx, len);// input complex array
        cmplx *p1=c, *p2=ch;
        for(size_t k1=0; k1<nf; k1++)
        {

          size_t ip=fct[k1].fct;
          size_t l2=ip*l1;
          size_t ido = len/l2;

            if(ip==4)
                sign>0 ? pass4b (ido, l1, p1, p2, fct[k1].tw)
                    : pass4f (ido, l1, p1, p2, fct[k1].tw);
            else if(ip==2)
                sign>0 ? pass2b (ido, l1, p1, p2, fct[k1].tw)
                        : pass2f (ido, l1, p1, p2, fct[k1].tw);
            else if(ip==3)
                sign>0 ? pass3b (ido, l1, p1, p2, fct[k1].tw)
                        : pass3f (ido, l1, p1, p2, fct[k1].tw);
            else if(ip==5)
                sign>0 ? pass5b (ido, l1, p1, p2, fct[k1].tw)
                        : pass5f (ido, l1, p1, p2, fct[k1].tw);
            else if(ip==7)  pass7 (ido, l1, p1, p2, fct[k1].tw, sign);
            else if(ip==11) pass11(ido, l1, p1, p2, fct[k1].tw, sign);
            else
                {
                if (passg(ido, ip, l1, p1, p2, fct[k1].tw, fct[k1].tws, sign))
                    { DEALLOC(ch); return -1; }
                SWAP(p1,p2,cmplx *);
                }

            SWAP(p1,p2,cmplx *);
            l1=l2;
        }

        if (p1!=c)
        {
            if (factor!=1.)
            for (size_t i=0; i<len; ++i)
                {
                c[i].r = ch[i].r*factor;
                c[i].i = ch[i].i*factor;
                }
            else
            memcpy (c,p1,len*sizeof(cmplx));
        }
        else
            if (factor!=1.)
            for (size_t i=0; i<len; ++i)
                {
                c[i].r *= factor;
                c[i].i *= factor;
                }   
        
        
        DEALLOC(ch);
        return 0;
    }

    __device__ void pass2b (size_t ido, size_t l1, const cmplx *  cc,
        cmplx *  ch, const cmplx *  wa)
    {
        const size_t cdim=2;
      
        if (ido==1)
          for (size_t k=0; k<l1; ++k)
            PMC (CH(0,k,0),CH(0,k,1),CC(0,0,k),CC(0,1,k))
        else
          for (size_t k=0; k<l1; ++k)
            {
            PMC (CH(0,k,0),CH(0,k,1),CC(0,0,k),CC(0,1,k))
            for (size_t i=1; i<ido; ++i)
              {
              cmplx t;
              PMC (CH(i,k,0),t,CC(i,0,k),CC(i,1,k))
              A_EQ_B_MUL_C (CH(i,k,1),WA(0,i),t)
              }
            }
    }
    
    __device__ void pass2f (size_t ido, size_t l1, const cmplx *  cc,
        cmplx *  ch, const cmplx *  wa)
    {
        const size_t cdim=2;
      
        if (ido==1)
          for (size_t k=0; k<l1; ++k)
            PMC (CH(0,k,0),CH(0,k,1),CC(0,0,k),CC(0,1,k))
        else
          for (size_t k=0; k<l1; ++k)
            {
            PMC (CH(0,k,0),CH(0,k,1),CC(0,0,k),CC(0,1,k))
            for (size_t i=1; i<ido; ++i)
              {
              cmplx t;
              PMC (CH(i,k,0),t,CC(i,0,k),CC(i,1,k))
              A_EQ_CB_MUL_C (CH(i,k,1),WA(0,i),t)
              }
            }
    }

    __device__  void pass3b (size_t ido, size_t l1, const cmplx *  cc,
        cmplx *  ch, const cmplx *  wa)
    {
        const size_t cdim=3;
        // const float tw1r=-0.5, tw1i= 0.86602540378443864676;
        // _change_mark_
        const float tw1r=-0.5f, tw1i= 0.8660254f;
      
        if (ido==1)
          for (size_t k=0; k<l1; ++k)
            {
            PREP3(0)
            PARTSTEP3a(1,2,tw1r,tw1i)
            }
        else
          for (size_t k=0; k<l1; ++k)
            {
            {
            PREP3(0)
            PARTSTEP3a(1,2,tw1r,tw1i)
            }
            for (size_t i=1; i<ido; ++i)
              {
              PREP3(i)
              PARTSTEP3b(1,2,tw1r,tw1i)
              }
            }
    }

    __device__ void pass3f (size_t ido, size_t l1, const cmplx *  cc,
        cmplx *  ch, const cmplx *  wa)
    {
        const size_t cdim=3;
        // _change_mark_
        const float tw1r=-0.5f, tw1i= -0.8660254f;
      
        if (ido==1)
          for (size_t k=0; k<l1; ++k)
            {
            PREP3(0)
            PARTSTEP3a(1,2,tw1r,tw1i)
            }
        else
          for (size_t k=0; k<l1; ++k)
            {
            {
            PREP3(0)
            PARTSTEP3a(1,2,tw1r,tw1i)
            }
            for (size_t i=1; i<ido; ++i)
              {
              PREP3(i)
              PARTSTEP3f(1,2,tw1r,tw1i)
              }
            }
    }

    __device__  void pass4b (size_t ido, size_t l1, const cmplx *  cc,
        cmplx *  ch, const cmplx *  wa)
    {
        const size_t cdim=4;
      
        if (ido==1)
          for (size_t k=0; k<l1; ++k)
            {
            cmplx t1, t2, t3, t4;
            PMC(t2,t1,CC(0,0,k),CC(0,2,k))
            PMC(t3,t4,CC(0,1,k),CC(0,3,k))
            ROT90(t4)
            PMC(CH(0,k,0),CH(0,k,2),t2,t3)
            PMC(CH(0,k,1),CH(0,k,3),t1,t4)
            }
        else
          for (size_t k=0; k<l1; ++k)
            {
            {
            cmplx t1, t2, t3, t4;
            PMC(t2,t1,CC(0,0,k),CC(0,2,k))
            PMC(t3,t4,CC(0,1,k),CC(0,3,k))
            ROT90(t4)
            PMC(CH(0,k,0),CH(0,k,2),t2,t3)
            PMC(CH(0,k,1),CH(0,k,3),t1,t4)
            }
            for (size_t i=1; i<ido; ++i)
              {
              cmplx c2, c3, c4, t1, t2, t3, t4;
              cmplx cc0=CC(i,0,k), cc1=CC(i,1,k),cc2=CC(i,2,k),cc3=CC(i,3,k);
              PMC(t2,t1,cc0,cc2)
              PMC(t3,t4,cc1,cc3)
              ROT90(t4)
              cmplx wa0=WA(0,i), wa1=WA(1,i),wa2=WA(2,i);
              PMC(CH(i,k,0),c3,t2,t3)
              PMC(c2,c4,t1,t4)
              A_EQ_B_MUL_C (CH(i,k,1),wa0,c2)
              A_EQ_B_MUL_C (CH(i,k,2),wa1,c3)
              A_EQ_B_MUL_C (CH(i,k,3),wa2,c4)
              }
            }
    }

    __device__ void pass4f (size_t ido, size_t l1, const cmplx *  cc,
        cmplx *  ch, const cmplx *  wa)
    {
        const size_t cdim=4;
      
        if (ido==1)
          for (size_t k=0; k<l1; ++k)
            {
            cmplx t1, t2, t3, t4;
            PMC(t2,t1,CC(0,0,k),CC(0,2,k))
            PMC(t3,t4,CC(0,1,k),CC(0,3,k))
            ROTM90(t4)
            PMC(CH(0,k,0),CH(0,k,2),t2,t3)
            PMC(CH(0,k,1),CH(0,k,3),t1,t4)
            }
        else
          for (size_t k=0; k<l1; ++k)
            {
            {
            cmplx t1, t2, t3, t4;
            PMC(t2,t1,CC(0,0,k),CC(0,2,k))
            PMC(t3,t4,CC(0,1,k),CC(0,3,k))
            ROTM90(t4)
            PMC(CH(0,k,0),CH(0,k,2),t2,t3)
            PMC (CH(0,k,1),CH(0,k,3),t1,t4)
            }
            for (size_t i=1; i<ido; ++i)
              {
              cmplx c2, c3, c4, t1, t2, t3, t4;
              cmplx cc0=CC(i,0,k), cc1=CC(i,1,k),cc2=CC(i,2,k),cc3=CC(i,3,k);
              PMC(t2,t1,cc0,cc2)
              PMC(t3,t4,cc1,cc3)
              ROTM90(t4)
              cmplx wa0=WA(0,i), wa1=WA(1,i),wa2=WA(2,i);
              PMC(CH(i,k,0),c3,t2,t3)
              PMC(c2,c4,t1,t4)
              A_EQ_CB_MUL_C (CH(i,k,1),wa0,c2)
              A_EQ_CB_MUL_C (CH(i,k,2),wa1,c3)
              A_EQ_CB_MUL_C (CH(i,k,3),wa2,c4)
              }
            }
    }

    __device__  void pass5b (size_t ido, size_t l1, const cmplx *  cc,
        cmplx *  ch, const cmplx *  wa)
    {
        const size_t cdim=5;
        // _change_mark_
        const float tw1r= 0.309017f,
                     tw1i= 0.9510565f,
                     tw2r= -0.809017f,
                     tw2i= 0.5877852f;
      
        if (ido==1)
          for (size_t k=0; k<l1; ++k)
            {
            PREP5(0)
            PARTSTEP5a(1,4,tw1r,tw2r,+tw1i,+tw2i)
            PARTSTEP5a(2,3,tw2r,tw1r,+tw2i,-tw1i)
            }
        else
          for (size_t k=0; k<l1; ++k)
            {
            {
            PREP5(0)
            PARTSTEP5a(1,4,tw1r,tw2r,+tw1i,+tw2i)
            PARTSTEP5a(2,3,tw2r,tw1r,+tw2i,-tw1i)
            }
            for (size_t i=1; i<ido; ++i)
              {
              PREP5(i)
              PARTSTEP5b(1,4,tw1r,tw2r,+tw1i,+tw2i)
              PARTSTEP5b(2,3,tw2r,tw1r,+tw2i,-tw1i)
              }
            }
    }

    __device__ void pass5f (size_t ido, size_t l1, const cmplx *  cc,
        cmplx *  ch, const cmplx *  wa)
    {
        const size_t cdim=5;
       // _change_mark_
        const float tw1r= 0.309017f,
                     tw1i= -0.9510565f,
                     tw2r= -0.809017f,
                     tw2i= -0.5877852f;
      
        if (ido==1)
          for (size_t k=0; k<l1; ++k)
            {
            PREP5(0)
            PARTSTEP5a(1,4,tw1r,tw2r,+tw1i,+tw2i)
            PARTSTEP5a(2,3,tw2r,tw1r,+tw2i,-tw1i)
            }
        else
          for (size_t k=0; k<l1; ++k)
            {
            {
            PREP5(0)
            PARTSTEP5a(1,4,tw1r,tw2r,+tw1i,+tw2i)
            PARTSTEP5a(2,3,tw2r,tw1r,+tw2i,-tw1i)
            }
            for (size_t i=1; i<ido; ++i)
              {
              PREP5(i)
              PARTSTEP5f(1,4,tw1r,tw2r,+tw1i,+tw2i)
              PARTSTEP5f(2,3,tw2r,tw1r,+tw2i,-tw1i)
              }
            }
    }
    __device__ void pass7(size_t ido, size_t l1, const cmplx *  cc,
        cmplx *  ch, const cmplx *  wa, const int sign)
    {
        const size_t cdim=7;
        // _change_mark_
        const float tw1r= 0.6234898f,
                     tw1i= sign * 0.7818314f,
                     tw2r= -0.2225209f,
                     tw2i= sign * 0.9749279f,
                     tw3r= -0.9009689f,
                     tw3i= sign * 0.4338837f;
      
        if (ido==1)
          for (size_t k=0; k<l1; ++k)
            {
            PREP7(0)
            PARTSTEP7a(1,6,tw1r,tw2r,tw3r,+tw1i,+tw2i,+tw3i)
            PARTSTEP7a(2,5,tw2r,tw3r,tw1r,+tw2i,-tw3i,-tw1i)
            PARTSTEP7a(3,4,tw3r,tw1r,tw2r,+tw3i,-tw1i,+tw2i)
            }
        else
          for (size_t k=0; k<l1; ++k)
            {
            {
            PREP7(0)
            PARTSTEP7a(1,6,tw1r,tw2r,tw3r,+tw1i,+tw2i,+tw3i)
            PARTSTEP7a(2,5,tw2r,tw3r,tw1r,+tw2i,-tw3i,-tw1i)
            PARTSTEP7a(3,4,tw3r,tw1r,tw2r,+tw3i,-tw1i,+tw2i)
            }
            for (size_t i=1; i<ido; ++i)
              {
              PREP7(i)
              PARTSTEP7(1,6,tw1r,tw2r,tw3r,+tw1i,+tw2i,+tw3i)
              PARTSTEP7(2,5,tw2r,tw3r,tw1r,+tw2i,-tw3i,-tw1i)
              PARTSTEP7(3,4,tw3r,tw1r,tw2r,+tw3i,-tw1i,+tw2i)
              }
            }
    }
    __device__ void pass11 (size_t ido, size_t l1, const cmplx *  cc,
        cmplx *  ch, const cmplx *  wa, const int sign)
    {
        const size_t cdim=11;
        // _change_mark_
        const float tw1r =        0.8412535f,
                     tw1i = sign * 0.5406408f,
                     tw2r =        0.415415f,
                     tw2i = sign * 0.909632f,
                     tw3r =       -0.1423148f,
                     tw3i = sign * 0.9898214f,
                     tw4r =       -0.6548607f,
                     tw4i = sign * 0.7557496f,
                     tw5r =       -0.959493f,
                     tw5i = sign * 0.2817326f;
      
        if (ido==1)
          for (size_t k=0; k<l1; ++k)
            {
            PREP11(0)
            PARTSTEP11a(1,10,tw1r,tw2r,tw3r,tw4r,tw5r,+tw1i,+tw2i,+tw3i,+tw4i,+tw5i)
            PARTSTEP11a(2, 9,tw2r,tw4r,tw5r,tw3r,tw1r,+tw2i,+tw4i,-tw5i,-tw3i,-tw1i)
            PARTSTEP11a(3, 8,tw3r,tw5r,tw2r,tw1r,tw4r,+tw3i,-tw5i,-tw2i,+tw1i,+tw4i)
            PARTSTEP11a(4, 7,tw4r,tw3r,tw1r,tw5r,tw2r,+tw4i,-tw3i,+tw1i,+tw5i,-tw2i)
            PARTSTEP11a(5, 6,tw5r,tw1r,tw4r,tw2r,tw3r,+tw5i,-tw1i,+tw4i,-tw2i,+tw3i)
            }
        else
          for (size_t k=0; k<l1; ++k)
            {
            {
            PREP11(0)
            PARTSTEP11a(1,10,tw1r,tw2r,tw3r,tw4r,tw5r,+tw1i,+tw2i,+tw3i,+tw4i,+tw5i)
            PARTSTEP11a(2, 9,tw2r,tw4r,tw5r,tw3r,tw1r,+tw2i,+tw4i,-tw5i,-tw3i,-tw1i)
            PARTSTEP11a(3, 8,tw3r,tw5r,tw2r,tw1r,tw4r,+tw3i,-tw5i,-tw2i,+tw1i,+tw4i)
            PARTSTEP11a(4, 7,tw4r,tw3r,tw1r,tw5r,tw2r,+tw4i,-tw3i,+tw1i,+tw5i,-tw2i)
            PARTSTEP11a(5, 6,tw5r,tw1r,tw4r,tw2r,tw3r,+tw5i,-tw1i,+tw4i,-tw2i,+tw3i)
            }
            for (size_t i=1; i<ido; ++i)
              {
              PREP11(i)
              PARTSTEP11(1,10,tw1r,tw2r,tw3r,tw4r,tw5r,+tw1i,+tw2i,+tw3i,+tw4i,+tw5i)
              PARTSTEP11(2, 9,tw2r,tw4r,tw5r,tw3r,tw1r,+tw2i,+tw4i,-tw5i,-tw3i,-tw1i)
              PARTSTEP11(3, 8,tw3r,tw5r,tw2r,tw1r,tw4r,+tw3i,-tw5i,-tw2i,+tw1i,+tw4i)
              PARTSTEP11(4, 7,tw4r,tw3r,tw1r,tw5r,tw2r,+tw4i,-tw3i,+tw1i,+tw5i,-tw2i)
              PARTSTEP11(5, 6,tw5r,tw1r,tw4r,tw2r,tw3r,+tw5i,-tw1i,+tw4i,-tw2i,+tw3i)
              }
            }
    }

    __device__ int passg (size_t ido, size_t ip, size_t l1,
        cmplx *  cc, cmplx *  ch, const cmplx *  wa,
        const cmplx *  csarr, const int sign)
    {
        const size_t cdim=ip;
        size_t ipph = (ip+1)/2;
        size_t idl1 = ido*l1;
      
        cmplx *  wal=RALLOC(cmplx,ip);
        if (!wal) return -1;
        wal[0]=(cmplx){1.,0.};
        for (size_t i=1; i<ip; ++i)
          wal[i]=(cmplx){csarr[i].r,sign*csarr[i].i};
      
        for (size_t k=0; k<l1; ++k)
          for (size_t i=0; i<ido; ++i)
            CH(i,k,0) = CC(i,0,k);
        for (size_t j=1, jc=ip-1; j<ipph; ++j, --jc)
          for (size_t k=0; k<l1; ++k)
            for (size_t i=0; i<ido; ++i)
              PMC(CH(i,k,j),CH(i,k,jc),CC(i,j,k),CC(i,jc,k))
        for (size_t k=0; k<l1; ++k)
          for (size_t i=0; i<ido; ++i)
            {
            cmplx tmp = CH(i,k,0);
            for (size_t j=1; j<ipph; ++j)
              ADDC(tmp,tmp,CH(i,k,j))
            CX(i,k,0) = tmp;
            }
        for (size_t l=1, lc=ip-1; l<ipph; ++l, --lc)
          {
          // j=0
          for (size_t ik=0; ik<idl1; ++ik)
            {
            CX2(ik,l).r = CH2(ik,0).r+wal[l].r*CH2(ik,1).r+wal[2*l].r*CH2(ik,2).r;
            CX2(ik,l).i = CH2(ik,0).i+wal[l].r*CH2(ik,1).i+wal[2*l].r*CH2(ik,2).i;
            CX2(ik,lc).r=-wal[l].i*CH2(ik,ip-1).i-wal[2*l].i*CH2(ik,ip-2).i;
            CX2(ik,lc).i=wal[l].i*CH2(ik,ip-1).r+wal[2*l].i*CH2(ik,ip-2).r;
            }
      
          size_t iwal=2*l;
          size_t j=3, jc=ip-3;
          for (; j<ipph-1; j+=2, jc-=2)
            {
            iwal+=l; if (iwal>ip) iwal-=ip;
            cmplx xwal=wal[iwal];
            iwal+=l; if (iwal>ip) iwal-=ip;
            cmplx xwal2=wal[iwal];
            for (size_t ik=0; ik<idl1; ++ik)
              {
              CX2(ik,l).r += CH2(ik,j).r*xwal.r+CH2(ik,j+1).r*xwal2.r;
              CX2(ik,l).i += CH2(ik,j).i*xwal.r+CH2(ik,j+1).i*xwal2.r;
              CX2(ik,lc).r -= CH2(ik,jc).i*xwal.i+CH2(ik,jc-1).i*xwal2.i;
              CX2(ik,lc).i += CH2(ik,jc).r*xwal.i+CH2(ik,jc-1).r*xwal2.i;
              }
            }
          for (; j<ipph; ++j, --jc)
            {
            iwal+=l; if (iwal>ip) iwal-=ip;
            cmplx xwal=wal[iwal];
            for (size_t ik=0; ik<idl1; ++ik)
              {
              CX2(ik,l).r += CH2(ik,j).r*xwal.r;
              CX2(ik,l).i += CH2(ik,j).i*xwal.r;
              CX2(ik,lc).r -= CH2(ik,jc).i*xwal.i;
              CX2(ik,lc).i += CH2(ik,jc).r*xwal.i;
              }
            }
          }
        DEALLOC(wal);
      
        // shuffling and twiddling
        if (ido==1)
          for (size_t j=1, jc=ip-1; j<ipph; ++j, --jc)
            for (size_t ik=0; ik<idl1; ++ik)
              {
              cmplx t1=CX2(ik,j), t2=CX2(ik,jc);
              PMC(CX2(ik,j),CX2(ik,jc),t1,t2)
              }
        else
          {
          for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc)
            for (size_t k=0; k<l1; ++k)
              {
              cmplx t1=CX(0,k,j), t2=CX(0,k,jc);
              PMC(CX(0,k,j),CX(0,k,jc),t1,t2)
              for (size_t i=1; i<ido; ++i)
                {
                cmplx x1, x2;
                PMC(x1,x2,CX(i,k,j),CX(i,k,jc))
                size_t idij=(j-1)*(ido-1)+i-1;
                MULPMSIGNC (CX(i,k,j),wa[idij],x1)
                idij=(jc-1)*(ido-1)+i-1;
                MULPMSIGNC (CX(i,k,jc),wa[idij],x2)
                }
              }
          }
        return 0;
    }

};



struct fftblue_plan_gpu
{
    size_t n, n2;
    cfftp_plan_gpu plan;
    float *mem;
    float *bk, *bkf;

    fftblue_plan_gpu() = default;

    __device__ fftblue_plan_gpu(size_t N) 
    {
        int good_size[512] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,180,0,0,0,0,0,0,0,0,0,0,0,210,0,210,0,0,0,216,0,0,0,0,0,225,0,0,0,0,0,0,0,0,0,0,0,0,0,256,0,0,0,264,0,0,0,0,0,275,0,280,0,0,0,0,0,0,0,0,0,297,0,308,0,0,0,0,0,315,0,0,0,0,0,330,0,0,0,336,0,0,0,0,0,350,0,0,0,0,0,360,0,363,0,0,0,0,0,0,0,0,0,384,0,385,0,0,0,396,0,400,0,0,405,0,0,0,0,0,0,0,0,432,0,0,432,0,0,0,0,0,0,0,0,448,0,0,0,462,0,462,0,0,0,480,0,0,0,0,0,480,0,484,0,0,0,0,0,0,0,0,0,504,0,0,512,0,0,525,0,0,0,0,525,525,0,0,0,0,0,539,0,550,0,0,550,0,0,560,560,0,0,567,0,567,0,0,0,0,0,0,0,0,0,588,0,0,0,0,600,0,0,0,0,0,0,0,0,616,0,0,0,625,0,625,630,0,0,640,0,0,0,648,0,0,0,0,660,0,0,0,0,672,0,0,672,0,0,675,0,0,0,0,0,0,0,0,693,693,0,700,0,0,0,720,0,0,0,0,720,720,0,0,726,0,0,0,0,735,0,0,0,0,0,750,0,0,0,0,0,768,0,768,768,768,0,0,784,0,0,784,0,0,0,792,792,0,0,800,800,0,0,810,0,0,810,0,0,0,0,825,0,825,0,0,0,0,0,840,0,840,0,847,847,0,0,0,0,0,864,0,0,864,0,875,0,0,0,0,0,880,0,0,0,891,0,0,891,896,0,900,0,0,0,924,924,0,0,924,924,0,0,924,0,945,0,0,945,945,0,0,0,945,0,0,0,0,0,0,960,960,0,0,968,0,0,0,0,980,0,980,0,990,0,0,0,0,0,0,0,1000,0,1008,1008,1008,0,0,0,0,1024,1024,0,0,0};
        
        n = N;
        n2 = good_size[N - 1];// quary table 
        mem = RALLOC(float, 2*n+2*n2);
        bk  = mem ;
        bkf = bk+2*n;
        float *tmp = RALLOC(float,4*n);

        int twiddle_offset_array[1024] = {0,1,5,11,19,29,41,55,71,89,109,131,155,181,209,239,271,305,341,379,419,461,505,551,599,649,701,755,811,869,929,991,1055,1121,1189,1259,1331,1405,1481,1559,1639,1721,1805,1891,1979,2069,2161,2255,2351,2449,2549,2651,2755,2861,2969,3079,3191,3305,3421,3539,3659,3781,3905,4031,4159,4289,4421,4555,4691,4829,4969,5111,5255,5401,5549,5699,5851,6005,6161,6319,6479,6641,6805,6971,7139,7309,7481,7655,0,8191,8371,8553,8737,8923,9111,9301,9493,9687,9883,10081,0,10701,0,10905,11113,11323,0,11967,12183,12401,12621,12843,0,13517,13745,13975,14207,14441,14677,14915,15155,15397,15641,15887,16135,16385,0,17149,17405,17663,0,18451,18715,18981,19249,19519,0,20341,0,21177,21457,21739,22023,22309,22597,22887,23179,23473,0,24363,0,25279,25583,25889,26197,26507,0,27449,27765,28083,28403,28725,0,29709,30037,30367,0,31371,31707,32045,32385,32727,0,33771,34119,34469,34821,35175,0,7831,0,36977,37341,37707,38075,38445,38817,39191,39567,39945,0,41093,0,42247,42635,43025,0,44209,0,45405,45805,0,47017,47423,47831,48241,48653,49067,49483,10281,0,50765,51189,0,51615,11535,52045,52479,52915,53353,53793,54235,0,55575,13067,56023,0,57399,0,57855,58315,58777,0,60201,60669,61139,61611,62085,0,62561,0,64009,64493,64979,65467,65957,66449,66943,67439,67937,0,69445,69949,0,71479,16637,0,73039,73555,74073,74593,0,0,17923,75115,75645,76177,76711,0,78325,0,79965,80509,0,19791,81055,0,0,82727,20617,0,84419,0,84983,85551,86121,86693,87267,87843,88421,89001,89583,0,91343,91931,92521,23769,0,94313,94911,95511,96113,96717,97323,97931,98541,0,24663,100385,101003,0,102873,0,0,26819,104757,0,106669,107305,107943,0,109879,110523,111169,111817,0,113787,114441,115097,29049,0,117099,117763,0,118429,30699,0,120449,121125,121803,122483,123165,123849,124535,125223,0,0,127299,0,33071,129395,130097,0,132241,132949,133659,134371,0,0,35531,135085,0,36251,137259,137987,138717,0,140919,141655,142393,143133,143875,0,146119,146867,147617,148369,149123,0,151415,0,0,0,40325,41477,0,153743,154517,0,155293,156073,156855,0,0,159223,43417,0,0,161613,44605,0,164031,164835,0,46207,165641,166453,167267,0,169733,0,170553,171377,172203,173031,173861,0,176373,0,177209,0,0,179743,180589,181437,182287,183139,0,185721,186579,0,49901,0,189189,190057,190927,191799,192673,0,195309,196189,197071,0,199737,200625,0,0,54679,0,205107,206007,206909,0,0,209661,210571,0,0,211483,212401,0,56475,0,215211,216139,0,0,217069,218005,218943,0,219883,220827,221773,222721,223671,224623,0,0,59241,227497,0,230395,63041,231361,232331,0,235263,0,236239,0,239199,240183,241169,242157,243147,244139,245133,0,248129,0,0,0,68437,251145,252155,253167,0,0,256229,257249,70455,0,0,0,0,0,0,0,0,0,0,0,0,71989,0,0,0,0,0,0,0,0,0,0,0,0,0,77247,0,0,0,0,0,0,0,0,0,0,78865,0,0,0,0,0,0,0,0,0,81607,0,0,0,0,0,0,83285,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,90167,0,0,0,0,0,0,0,0,0,0,0,93113,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,99153,0,0,0,0,0,0,0,0,101623,0,0,0,0,103497,0,0,0,0,0,0,0,0,0,105389,0,0,0,0,0,0,0,108583,0,0,0,0,0,0,0,0,0,0,0,112467,0,0,0,0,0,0,0,0,0,0,0,115755,0,0,119099,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,125913,0,0,0,0,0,0,127995,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,130801,0,0,0,0,0,135807,0,0,0,0,0,0,0,0,139449,0,0,0,0,0,0,0,0,0,0,0,0,0,0,144619,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,149879,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,152175,0,0,0,0,0,0,0,157639,0,0,0,0,0,0,0,160013,0,0,0,0,0,0,0,0,0,162411,0,0,0,0,0,0,0,0,0,0,0,0,0,0,168083,0,0,0,0,0,0,0,0,0,0,0,0,0,0,174693,0,0,0,0,0,0,178049,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,183993,0,0,0,0,0,0,0,0,0,0,187439,0,0,0,0,193549,0,0,0,0,0,0,0,0,0,0,197955,0,0,0,0,201515,0,0,0,203307,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,207813,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,213321,0,0,0,0,0,0,0,0,0,0,0,0,0,0,225577,0,0,0,0,0,0,0,228459,0,0,0,0,0,0,0,0,0,0,0,233303,0,0,0,0,0,0,0,0,0,237219,0,0,0,0,0,0,0,0,0,246129,0,0,0,0,0,0,0,249129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,254181}; 

        int twiddle_offset = twiddle_offset_array[2*n - 1];
        for (int i = 0 ; i < 4 * n ; i++){
            tmp[i] = d_twiddle_element[twiddle_offset + i];
        }

        bk[0] = 1;
        bk[1] = 0;

            
        size_t coeff=0;
        for (size_t m=1; m<n; ++m)
            {
            coeff+=2*m-1;
            if (coeff>=2*n) coeff-=2*n;
            bk[2*m  ] = tmp[2*coeff  ];
            bk[2*m+1] = tmp[2*coeff+1];
            }

        /* initialize the zero-padded, Fourier transformed b_k. Add normalisation. */
        float xn2 = 1./n2;
        bkf[0] = bk[0]*xn2;
        bkf[1] = bk[1]*xn2;
        for (size_t m=2; m<2*n; m+=2)
            {
            bkf[m]   = bkf[2*n2-m]   = bk[m]   *xn2;
            bkf[m+1] = bkf[2*n2-m+1] = bk[m+1] *xn2;
            }
        for (size_t m=2*n;m<=(2*n2-2*n+1);++m)
            bkf[m]=0.;

        new(&plan) cfftp_plan_gpu(n2);
    }

    __device__ void memory_free(){
      plan.memory_free();
      DEALLOC(mem);
      DEALLOC(bk);
      DEALLOC(bkf);
    }

    __device__ int fftblue_fft(float c[], int isign, float fct)
    {
        size_t n=n;
        size_t n2=n2;
        float *bk  = bk;
        float *bkf = bkf;
        float *akf = RALLOC(float, 2*n2);
        if (!akf) return -1;

        /* initialize a_k and FFT it */
        if (isign>0)
            for (size_t m=0; m<2*n; m+=2)
            {
            akf[m]   = c[m]*bk[m]   - c[m+1]*bk[m+1];
            akf[m+1] = c[m]*bk[m+1] + c[m+1]*bk[m];
            }
        else
            for (size_t m=0; m<2*n; m+=2)
            {
            akf[m]   = c[m]*bk[m]   + c[m+1]*bk[m+1];
            akf[m+1] =-c[m]*bk[m+1] + c[m+1]*bk[m];
            }
        for (size_t m=2*n; m<2*n2; ++m)
            akf[m]=0;

        if (plan.pass_all( (cmplx *)c, 1., -1)!=0)
            { DEALLOC(akf); return -1; }

        /* do the convolution */
        if (isign>0)
            for (size_t m=0; m<2*n2; m+=2)
            {
            float im = -akf[m]*bkf[m+1] + akf[m+1]*bkf[m];
            akf[m  ]  =  akf[m]*bkf[m]   + akf[m+1]*bkf[m+1];
            akf[m+1]  = im;
            }
        else
            for (size_t m=0; m<2*n2; m+=2)
            {
            float im = akf[m]*bkf[m+1] + akf[m+1]*bkf[m];
            akf[m  ]  = akf[m]*bkf[m]   - akf[m+1]*bkf[m+1];
            akf[m+1]  = im;
            }

        /* inverse FFT */
        if (plan.pass_all ( (cmplx *)c,1.,1)!=0)
            { DEALLOC(akf); return -1; }

        /* multiply by b_k */
        if (isign>0)
            for (size_t m=0; m<2*n; m+=2)
            {
            c[m]   = bk[m]  *akf[m] - bk[m+1]*akf[m+1];
            c[m+1] = bk[m+1]*akf[m] + bk[m]  *akf[m+1];
            }
        else
            for (size_t m=0; m<2*n; m+=2)
            {
            c[m]   = bk[m]  *akf[m] + bk[m+1]*akf[m+1];
            c[m+1] =-bk[m+1]*akf[m] + bk[m]  *akf[m+1];
            }
        DEALLOC(akf);
        return 0;
    }


};


__device__ int judge[512] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,1,0,0,0,0,1,1,0,0,0,0,0,1,0,1,0,0,1,0,0,1,1,0,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,1,0,1,1,0,0,1,0,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,1,0,0,0,0,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,1,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,1,1,0,0,1,0,0,1,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,1,1,0,0,0,0,0,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,1,1,0,1,0,0,0,1,1,0,0,1,1,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,1,0,1,0,1,0,0,0,0,0,0,0,1,0,1,1,1,0,0,0,0,1,1,0,0,0}; 

struct fft_plan
{
  int cfft_or_blue = 0 ;// 0 is cfftp , 1 is blue
  cfftp_plan_gpu cfft_plan;
  fftblue_plan_gpu blue_plan;

  fft_plan() = default;// tell compiler the default constructor function

  __device__ fft_plan(size_t N)
  {
      int judge_index = judge[N - 1];
      if (judge_index){
        blue_plan = fftblue_plan_gpu(N);
        cfft_or_blue = 1;
      } else  //if (!judge_index)
        cfft_plan = cfftp_plan_gpu(N);
  }

  __device__ void memory_free(){
    if (!cfft_or_blue){
      cfft_plan.memory_free();
    } else {
      blue_plan.memory_free();
    }
  }

};


__device__ void fft_cuda_1d_gpu_with_plan(fft_plan plan,int sign, int N, Complex* input)
{
    Real data[600];
    for (int i = 0 ; i < N ; i++){
        data[2 * i] = input[i].real;
        data[2 * i + 1] = input[i].imag;
    }
    if (plan.cfft_or_blue==0)
    {
        if (sign > 0)
            plan.cfft_plan.pass_all((cmplx *)data, 1., -1);
        else
            plan.cfft_plan.pass_all((cmplx *)data, 1./N, 1);
    } 
    else //if (plan -> blue_plan)
    {
        if (sign > 0)
            plan.blue_plan.fftblue_fft(data,-1,1.);
        else
            plan.blue_plan.fftblue_fft(data,1,1./N);
    }
    for (int i = 0 ; i < N ; i++){
      input[i]  = Complex(data[2 * i],data[2 * i + 1]);
  }
  free(data);
}

__device__ void fft_cuda_1d_gpu(int sign, int N, Complex* input)
{

    // new logic  
    Real data[600];
    for (int i = 0 ; i < N ; i++){
        data[2 * i] = input[i].real;
        data[2 * i + 1] = input[i].imag;
    }
    int judge[512] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,1,0,0,0,0,1,1,0,0,0,0,0,1,0,1,0,0,1,0,0,1,1,0,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,1,0,1,1,0,0,1,0,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,1,0,0,0,0,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,1,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,1,1,0,0,1,0,0,1,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,1,1,0,0,0,0,0,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,1,1,0,1,0,0,0,1,1,0,0,1,1,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,1,0,1,0,1,0,0,0,0,0,0,0,1,0,1,1,1,0,0,0,0,1,1,0,0,0}; 
    if (judge[N - 1] == 0){ // make_cfftp_plan
        cfftp_plan_gpu packplan = cfftp_plan_gpu(N);
        if (sign > 0)
            packplan.pass_all((cmplx *)data, 1., -1);
        else
            packplan.pass_all((cmplx *)data, 1./N, 1);
        packplan.memory_free();
    } else { // if (judge[N - 1] == 1) make_fftblue_plan
        fftblue_plan_gpu blueplan = fftblue_plan_gpu(N);
        if (sign > 0)
            blueplan.fftblue_fft(data,-1,1.);
        else
            blueplan.fftblue_fft(data,1,1./N);
        blueplan.memory_free();
    }
    for (int i = 0 ; i < N ; i++){
        input[i]  = Complex(data[2 * i],data[2 * i + 1]);
    }
    
    

    free(data);
    // free(judge);
}


/* 转置，即循环处理所有环 */
// expanded by the function in utils.cuh
__device__ void transpose_gpu(Complex* mtx, int m, int n)
{
    for(int i=0; i<m*n; ++i)
    {
        int next = (i % n) * m + i / n;
        while(next > i) // 若存在后继小于i说明重复
            next = (next % n) * m + next / n;
        if(next == i)   // 处理当前环 
          {
            Complex temp = mtx[i];  // 暂存
            int cur = i;       // 当前下标
            int pre = ( cur % m ) * n + cur / m;
            while(pre != i)
            {
                mtx[cur] = mtx[pre];
                cur = pre;
                pre = ( cur % m ) * n + cur / m;
            }
            mtx[cur] = temp;
          }
    }
}


#define BLOCK_DIM 256 

// This kernel is optimized to ensure all global reads and writes are coalesced,
// and to avoid bank conflicts in shared memory.  This kernel is up to 11x faster
// than the naive kernel below.  Note that the shared memory array is sized to 
// (BLOCK_DIM+1)*BLOCK_DIM.  This pads each row of the 2D block in shared memory 
// so that bank conflicts do not occur when threads address the array column-wise.
__global__ void transpose_gpu_v2(Complex *idata, int width, int height)
{
	//__shared__ Complex block[BLOCK_DIM][BLOCK_DIM+1];
	__shared__ Complex block[BLOCK_DIM][1];
	
	// read the matrix tile into shared memory
        // load one element per thread from device memory (idata) and store it
        // in transposed order in block[][]
	unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
	unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y;
	if((xIndex < width) && (yIndex < height))
	{
		unsigned int index_in = yIndex * width + xIndex;
		block[threadIdx.y][threadIdx.x] = idata[index_in];
	}

        // synchronise to ensure all writes to block[][] have completed
	__syncthreads();

	// write the transposed matrix tile to global memory (odata) in linear order
	xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x;
	yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y;
	if((xIndex < height) && (yIndex < width))
	{
		unsigned int index_out = yIndex * height + xIndex;
		idata[index_out] = block[threadIdx.x][threadIdx.y];
	}
}

__global__ void swap_axes(Complex *idata, int depth, int height, int width, int axes1, int axes2)
{
	// __shared__ Complex block[BLOCK_DIM][BLOCK_DIM+1][BLOCK_DIM+1];
	__shared__ Complex block[BLOCK_DIM][1][1];
	
	// read the matrix tile into shared memory
        // load one element per thread from device memory (idata) and store it
        // in transposed order in block[][]
	unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
	unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y;
	unsigned int zIndex = blockIdx.z * BLOCK_DIM + threadIdx.z;

	if((xIndex < width) && (yIndex < height) && (zIndex < depth))
	{
		unsigned int index_in = zIndex * height * width + yIndex * width + xIndex;
		if (axes1 == 1 && axes2 == 2  || axes1 == 2 && axes2 == 1)	
            	   block[threadIdx.y][threadIdx.z][threadIdx.x] = idata[index_in];
		if (axes1 == 1 && axes2 == 3  || axes1 == 3 && axes2 == 1)	
            	   block[threadIdx.x][threadIdx.y][threadIdx.z] = idata[index_in];
		if (axes1 == 2 && axes2 == 3  || axes1 == 3 && axes2 == 2)	
            	   block[threadIdx.z][threadIdx.y][threadIdx.x] = idata[index_in];
	}

        // synchronise to ensure all writes to block[][] have completed
	__syncthreads();
	// write the transposed matrix tile to global memory (odata) in linear order

	if (axes1 == 1 && axes2 == 2  || axes1 == 2 && axes2 == 1){	
	   zIndex = blockIdx.y * BLOCK_DIM + threadIdx.z;
	   yIndex = blockIdx.z * BLOCK_DIM + threadIdx.y;
	   xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
	} else if (axes1 == 1 && axes2 == 3  || axes1 == 3 && axes2 == 1) {	
	   zIndex = blockIdx.x * BLOCK_DIM + threadIdx.z;
	   yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y;
	   xIndex = blockIdx.z * BLOCK_DIM + threadIdx.x;
	} else if (axes1 == 2 && axes2 == 3  || axes1 == 3 && axes2 == 2) {	
	   zIndex = blockIdx.z * BLOCK_DIM + threadIdx.z;
	   yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y;
	   xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x;
	}

	if((xIndex < height) && (yIndex < width) && (zIndex < depth))
	{
		unsigned int index_out = yIndex * height + xIndex;
		idata[index_out] = block[threadIdx.z][threadIdx.x][threadIdx.y];
	}
}
__global__ void swap_axes(Complex *idata, Complex *odata, int depth, int height, int width, int axes1, int axes2)
{
	unsigned int j = blockDim.x * blockIdx.x + threadIdx.x;

	if(j < depth * height * width)
	{
		unsigned int zIndex = j / (height * width); 
		unsigned int yIndex = (j % (height * width)) / width; 
		unsigned int xIndex = (j % (height * width)) % width; 
		unsigned int index_in = j; 
		unsigned int index_out = 0; 
		if (axes1 == 1 && axes2 == 2  || axes1 == 2 && axes2 == 1){	
		   index_out = yIndex * depth * width + zIndex * width + xIndex;
            	   odata[index_out] = idata[index_in];
		} else if (axes1 == 1 && axes2 == 3  || axes1 == 3 && axes2 == 1){	
		   index_out = xIndex * height * depth + yIndex * depth + zIndex;
            	   odata[index_out] = idata[index_in];
		} else if (axes1 == 2 && axes2 == 3  || axes1 == 3 && axes2 == 2){	
		   index_out = zIndex * width * height + xIndex * height + yIndex;
            	   odata[index_out] = idata[index_in];
		}
	}

}

// work thread num is : in_n * k_n * k_c 
__global__ void convfft_2d_kernel(Real* input_X, int in_n, int in_c, int in_d, int in_h, int in_w,
  Real* input_W, int k_n, int k_c, int k_d, int k_h, int k_w,
  int tmp_out_h, int tmp_out_w, int out_h, int out_w,
  
  Real* output_Y)
{
  int tdx = blockDim.x * blockIdx.x + threadIdx.x;
  if (tdx < in_n * k_n * k_c){


    extern __shared__ Complex knkcOutOutComp[];

      int x_offset = (k_c * (tdx / (k_c * k_n)) + tdx % k_c) * in_h * in_w ;
      int w_offset = (tdx % (k_n * k_c)) * k_h * k_w ;
      Complex*  inComp = (Complex*)malloc( tmp_out_h * tmp_out_w * sizeof(Complex));
      Complex*  keComp = (Complex*)malloc( tmp_out_h * tmp_out_w * sizeof(Complex));
      memset(inComp,0,tmp_out_h * tmp_out_w * sizeof(Complex));
      memset(keComp,0,tmp_out_h * tmp_out_w * sizeof(Complex));
      for (int i = 0 ; i < in_h ; i++){
        for (int j = 0 ; j < in_w ; j++){
            inComp[i * tmp_out_w + j] = Complex((input_X + x_offset)[i * in_w + j],0.0);
        }
      }
      for (int i = 0 ; i < k_h ; i++){
        for (int j = 0 ; j < k_w ; j++){
            keComp[i * tmp_out_w + j] = Complex((input_W + w_offset)[i * k_w + j],0.0);
        }
      }

      fft_plan plan_w = fft_plan(tmp_out_w);
      fft_plan plan_h = fft_plan(tmp_out_h);

      // fft_cuda_1d_gpu_with_plan(plan_w, 1, tmp_out_w ,inComp + 0 * tmp_out_w);// test 

      // fft along w demension
      for (int i = 0 ; i < tmp_out_h ; i++) {
        fft_cuda_1d_gpu_with_plan(plan_w, 1, tmp_out_w ,inComp + i * tmp_out_w);
        fft_cuda_1d_gpu_with_plan(plan_w, 1, tmp_out_w ,keComp + i * tmp_out_w);
      }
      // transpose the matirx
      transpose_gpu(inComp,tmp_out_h, tmp_out_w);
      transpose_gpu(keComp,tmp_out_h, tmp_out_w);
      // fft along h demension
      for (int i = 0 ; i < tmp_out_w ; i++) {
        fft_cuda_1d_gpu_with_plan(plan_h, 1, tmp_out_h ,inComp + i * tmp_out_h);
        fft_cuda_1d_gpu_with_plan(plan_h, 1, tmp_out_h ,keComp + i * tmp_out_h);
      }
      // transpose the matirx
      transpose_gpu(inComp,tmp_out_w,tmp_out_h);
      transpose_gpu(keComp,tmp_out_w,tmp_out_h);

      Complex * tmp_res = knkcOutOutComp + tdx * tmp_out_h * tmp_out_w;
      // multiply a and b to get c
      // use output to store the result
      for (int i = 0 ; i < tmp_out_h * tmp_out_w ; i++){
        tmp_res[i] = Complex(inComp[i].real * keComp[i].real - inComp[i].imag * keComp[i].imag,
                                                      inComp[i].imag * keComp[i].real + inComp[i].real * keComp[i].imag);
      }
      __syncthreads();
      // give out vector a ifft transform
      // fft along w demesion
      for (int i = 0 ; i < tmp_out_h ; i++) {
          fft_cuda_1d_gpu_with_plan(plan_w, 0, tmp_out_w ,tmp_res + i * tmp_out_w);
      }
      // transpose the matirx
      transpose_gpu(tmp_res,tmp_out_h, tmp_out_w);

      // fft along h demension
      for (int i = 0 ; i < tmp_out_w ; i++) {
          fft_cuda_1d_gpu_with_plan(plan_h, 0, tmp_out_h, tmp_res + i * tmp_out_h);
      }
      // transpose the matirx
      transpose_gpu(tmp_res,tmp_out_w,tmp_out_h);
      // add kc axis elemnts to the first position 
      if (tdx % k_c == 0 )
      {
        for (int i = 1 ; i < k_c ; i++){
          for (int k = 0 ; k < tmp_out_h * tmp_out_w ; k++){
            tmp_res[k] += tmp_res[i * tmp_out_h * tmp_out_w + k];
          }
        }
        int start_row_index = (tmp_out_h - out_h) / 2;
        int start_col_index = (tmp_out_w - out_w) / 2;
        int y_offset = (tdx / k_c) * out_h * out_w;
        for (int i = 0 ; i < out_h ; i ++){
            for (int j = 0 ; j < out_w ; j ++){
                output_Y[y_offset + i * out_w + j] = tmp_res[(i + start_row_index) * tmp_out_w + j + start_col_index].real;
            }
        }
      }
      free(inComp);
      free(keComp);
      plan_w.memory_free();
      plan_h.memory_free();
    }
}

/*
    a funtion to calculate the convolution of 2D vector A and B in parrelel mode
    
*/
void convfft_2d_gpu(Real* input_X, int in_n, int in_c, int in_d, int in_h, int in_w,
  Real* input_W, int k_n, int k_c, int k_d, int k_h, int k_w,
  int p_h, int p_w,
  int s_h, int s_w,
  int d_h, int d_w,
  Real* output_Y,int out_n, int out_c, int out_d, int out_h, int out_w)
{

  /** comments from cuDNN-API

    限制输入矩阵的通道数和kernel的输入通道数一致
    1. in_c must equals to k_c
    2. in_h + 2 * padding height must equal 256 or less
       in_w + 2 * padding width must equal 256 or less
    3. stride of height and width must equal 1
    4. k_h must greater then padding height
       k_w must greater then padding width

  */
  if (k_c != in_c 
    || in_h + 2 * p_h > 256 || in_w + 2 * p_w > 256 
    || s_h != 1 || s_w != 1 
    || k_h <= p_h || k_w <= p_w ){
    throw string("convfft_2d cannot perform convolution because the input size does not match the kernel size!");
  }
  // tmp size of output
  size_t tmp_out_h = in_h + k_h - 1;
  size_t tmp_out_w = in_w + k_w - 1;
  // allocate memory for the nchw output
  // 开始计时
  // cudaEvent_t start, stop;
	// Check_Error( cudaEventCreate(&start) );
	// Check_Error( cudaEventCreate(&stop) );
	// Check_Error( cudaEventRecord(start, 0) ); 
  // memory alloc
  Real* d_input_X ;
  Check_Error(cudaMalloc(&d_input_X, in_n * in_c * in_d * in_h * in_w * sizeof(Real)));
  Real* d_input_W ;
  Check_Error(cudaMalloc(&d_input_W, k_n * k_c * k_d * k_h * k_w * sizeof(Real)));
  Complex* d_knkcOutOutComp ;
  Check_Error(cudaMalloc(&d_knkcOutOutComp, in_n * k_n * k_c * tmp_out_h * tmp_out_w* sizeof(Complex)));
  Real* d_output_Y;
  Check_Error(cudaMalloc(&d_output_Y, out_n * out_c * out_h * out_w * sizeof(Real)));
  // memory copy host to device 
  Check_Error(cudaMemcpy(d_input_X,input_X, in_n * in_c * in_d * in_h * in_w * sizeof(Real), cudaMemcpyHostToDevice));
  Check_Error(cudaMemcpy( d_input_W,input_W, k_n * k_c * k_d * k_h * k_w * sizeof(Real), cudaMemcpyHostToDevice));
  // invoke the kernel 
  int threadsPerBlock = 256;
  int blocksPerGrid = (in_n * k_n * k_c  + threadsPerBlock - 1) / threadsPerBlock;
  int sharedMemSize = in_n * k_n * k_c * tmp_out_h * tmp_out_w * sizeof(Complex);
  clock_t startTime,endTime;
  startTime = clock();
  convfft_2d_kernel<<<blocksPerGrid, threadsPerBlock,sharedMemSize>>>(d_input_X, in_n, in_c, in_d, in_h, in_w,
                                                        d_input_W, k_n, k_c, k_d, k_h, k_w,
                                                        tmp_out_h, tmp_out_w, out_h, out_w
                                                        // ,d_knkcOutOutComp
                                                        ,d_output_Y);
  Check_Error(cudaDeviceSynchronize());
  endTime = clock();
  cout << "The convfft_2d_kernel run time is: " <<(double) 1000 * (endTime - startTime) / CLOCKS_PER_SEC << "ms" << endl;
  Check_Error(cudaMemcpy( output_Y, d_output_Y, out_n * out_c * out_h * out_w * sizeof(Real), cudaMemcpyDeviceToHost));
 //停止计时
  // Check_Error( cudaEventRecord(stop, 0) );
  // Check_Error( cudaEventSynchronize(start) );
  // Check_Error( cudaEventSynchronize(stop) );
  // float elapsedTime;
  // Check_Error( cudaEventElapsedTime(&elapsedTime,start,stop) );
  // cout << "elapsedTime is " << elapsedTime << " ms:" << endl;
  cudaFree(d_input_X);
  cudaFree(d_input_W);
  cudaFree(d_knkcOutOutComp);
  cudaFree(d_output_Y);
}

__global__ void init_comp(Real* input, int n, int c, int d, int h, int w, int tmp_out_h, int tmp_out_w, Complex* output)
{
  int tdx = blockDim.x * blockIdx.x + threadIdx.x;
  if (tdx < n * c * h * w)
  {
    
    int index_n = tdx / (c * h * w);
    int index_c = (tdx % (c * h * w)) / (h * w);
    int index_h = ((tdx % (c * h * w)) % (h * w)) / w;
    int index_w = ((tdx % (c * h * w)) % (h * w)) % w;


    output[index_n * c * tmp_out_h * tmp_out_w + index_c * tmp_out_h * tmp_out_w + index_h * tmp_out_w + index_w] 
    = Complex(input[index_n * c * h * w + index_c * h * w + index_h * w + index_w],0.0);
  }
}

__global__ void init_comp_3d(Real* input, int n, int c, int d, int h, int w, int pd, int ph, int pw, int tmp_out_d, int tmp_out_h, int tmp_out_w, Complex* output)
{
  int tdx = blockDim.x * blockIdx.x + threadIdx.x;
  if (tdx < n * c * d * h * w)
  {
    
    int index_n = tdx / (c * d * h * w);
    int index_c = (tdx % (c * d * h * w)) / (d * h * w);
    int index_d = ((tdx % (c * d * h * w)) % (d * h * w)) / (h * w);
    int index_h = (((tdx % (c * d * h * w)) % (d * h * w)) % (h * w)) / w;
    int index_w = (((tdx % (c * d * h * w)) % (d * h * w)) % (h * w)) % w;

    //output[index_n * c * tmp_out_d * tmp_out_h * tmp_out_w + index_c * tmp_out_d * tmp_out_h * tmp_out_w + (index_d + pd) * tmp_out_h * tmp_out_w + (index_h + ph) * tmp_out_w + index_w + pw] 
    //= Complex(input[index_n * c * d * h * w + index_c * d * h * w + index_d * h * w + index_h * w + index_w],0.0);
   output[index_n * c * tmp_out_d * tmp_out_h * tmp_out_w + index_c * tmp_out_d * tmp_out_h * tmp_out_w + (index_d + pd) * tmp_out_h * tmp_out_w + (index_h + ph) * tmp_out_w + index_w + pw] = Complex(input[index_n * c * d * h * w + index_c * d * h * w + index_d * h * w + index_h * w + index_w], 0.0);
  }
}

__global__ void fft_1d_fwd(Complex* input, int n1, int c1, int d1, int n2, int c2, int d2, int h, int w)
{
  int tdx = blockDim.x * blockIdx.x + threadIdx.x;
  if (tdx < (n1 * c1 + n2 * c2) * h){
    fft_plan plan_w = fft_plan(w);
    fft_cuda_1d_gpu_with_plan(plan_w, 1, w ,input + tdx * w);
    plan_w.memory_free();
  }
}

__global__ void FFT_Iter_R2(Complex* input, Complex* output, const unsigned int N, const unsigned int M, const unsigned int Ns, const unsigned int length) 
{	
        long long j = blockDim.x * blockIdx.x + threadIdx.x;	//Thread Id
	if (j < N * length/R2){
	long long idxI = j / (N/R2);
	long long idxS = j % (N/R2);
	float v_r[R2]; float v_i[R2];		//2 inputs of butterfly
	//angle come from W = e^(-2*PI/(R2*Ns) * m)
	float angle = -2*PI*(idxS%Ns) / (Ns*R2);	//Ns=1 --> angle=0 , Ns=2 --> angle=0 & -2PI/4 , and so on...
	//Reading From Memory and multiplying in W
	float v_r_temp[R2]; float v_i_temp[R2];
	for (int r=0; r<R2; r++){
		v_r_temp[r] = input[idxI * N + idxS+r*N/R2].real;
		v_i_temp[r] = input[idxI * N + idxS+r*N/R2].imag;		
		v_r[r] = (v_r_temp[r]*cos(r*angle) - v_i_temp[r]*sin(r*angle));	// (v_r + i v_i)*(cos + i sin) = (v_r*cos - v_i*sin) + i (v_r*sin + v_i*cos)
		v_i[r] = (v_r_temp[r]*sin(r*angle) + v_i_temp[r]*cos(r*angle));			
	}
	//Butterfly (R = 2)
	float v0_r = v_r[0]; float v0_i = v_i[0];
	v_r[0] = v0_r + v_r[1]; v_i[0] = v0_i + v_i[1];
	v_r[1] = v0_r - v_r[1]; v_i[1] = v0_i - v_i[1];
	//expand
	int idxD = (idxS/Ns)*Ns*R2 + (idxS%Ns);
	//Write in X_d
	for (int r=0; r<R2 ; r++){
		output[idxI * N + idxD + r*Ns] = Complex(v_r[r],v_i[r]);
	}
     }

}

__global__ void FFT_Iter_R2_wt(Complex* input, Complex* output, const unsigned int N,const unsigned int H, const unsigned int M, const unsigned int Ns, const unsigned int length) 
{	
        long long j = blockDim.x * blockIdx.x + threadIdx.x;	//Thread Id
	if (j < N * length/R2){
	long long idxM = j / (H * N/R2);// the index of which one in n * c
	long long idxI = (j % (H * N/R2)) / (N/R2);// the index of which row of the unique matrix 
	long long idxS = (j % (H * N/R2)) % (N/R2);// the index of which col of the unique matrix maybe equals to j % (N/R2) 
	float v_r[R2]; float v_i[R2];		//2 inputs of butterfly
	//angle come from W = e^(-2*PI/(R2*Ns) * m)
	float angle = -2*PI*(idxS%Ns) / (Ns*R2);	//Ns=1 --> angle=0 , Ns=2 --> angle=0 & -2PI/4 , and so on...
	//Reading From Memory and multiplying in W
	float v_r_temp[R2]; float v_i_temp[R2];
	for (int r=0; r<R2; r++){
		v_r_temp[r] = input[idxM * H * N + idxS * H + idxI + r*(N/R2)*H].real;//change the posision of col index and row index
		v_i_temp[r] = input[idxM * H * N + idxS * H + idxI + r*(N/R2)*H].imag;		
		v_r[r] = (v_r_temp[r]*cos(r*angle) - v_i_temp[r]*sin(r*angle));	// (v_r + i v_i)*(cos + i sin) = (v_r*cos - v_i*sin) + i (v_r*sin + v_i*cos)
		v_i[r] = (v_r_temp[r]*sin(r*angle) + v_i_temp[r]*cos(r*angle));			
	}
	//Butterfly (R = 2)
	float v0_r = v_r[0]; float v0_i = v_i[0];
	v_r[0] = v0_r + v_r[1]; v_i[0] = v0_i + v_i[1];
	v_r[1] = v0_r - v_r[1]; v_i[1] = v0_i - v_i[1];
	//expand
	int idxD = (idxS/Ns)*Ns*R2 + (idxS%Ns);
	//Write in X_d
	for (int r=0; r<R2 ; r++){
		output[idxM * H * N + idxD * H + idxI + r*Ns*H] = Complex(v_r[r],v_i[r]);
	}
     }

}
__global__ void FFT_Iter_R2_sm(Complex* input, Complex* output, const unsigned int N, const unsigned int M, const unsigned int Ns, const unsigned int length) 
{
	extern __shared__ Complex temp_elements[];

        long long j = blockDim.x * blockIdx.x + threadIdx.x;	//Thread Id
	if (j < N * length/R2){
	long long idxI = j / (N/R2);
	long long idxS = j % (N/R2);
	float v_r[R2]; float v_i[R2];		//2 inputs of butterfly
	//angle come from W = e^(-2*PI/(R2*Ns) * m)
	float angle = -2*PI*(idxS%Ns) / (Ns*R2);	//Ns=1 --> angle=0 , Ns=2 --> angle=0 & -2PI/4 , and so on...
	//Reading From Memory and multiplying in W
	float v_r_temp[R2]; float v_i_temp[R2];
	for (int r=0; r<R2; r++){
		v_r_temp[r] = input[idxI * N + idxS+r*N/R2].real;
		v_i_temp[r] = input[idxI * N + idxS+r*N/R2].imag;		
		v_r[r] = (v_r_temp[r]*cos(r*angle) - v_i_temp[r]*sin(r*angle));	// (v_r + i v_i)*(cos + i sin) = (v_r*cos - v_i*sin) + i (v_r*sin + v_i*cos)
		v_i[r] = (v_r_temp[r]*sin(r*angle) + v_i_temp[r]*cos(r*angle));			
	}
	//Butterfly (R = 2)
	float v0_r = v_r[0]; float v0_i = v_i[0];
	v_r[0] = v0_r + v_r[1]; v_i[0] = v0_i + v_i[1];
	v_r[1] = v0_r - v_r[1]; v_i[1] = v0_i - v_i[1];
	//expand
	int idxD = (idxS/Ns)*Ns*R2 + (idxS%Ns);
	//Write in X_d
	for (int r=0; r<R2 ; r++){
		temp_elements[idxI * N + idxD + r*Ns] = Complex(v_r[r],v_i[r]); 
	}
	
	__syncthreads();
	
	for (int r=0; r<R2 ; r++){
		input[idxI * N + idxD + r*Ns] = temp_elements[idxI * N + idxD + r*Ns];
	}

     }

}
__global__ void FFT_Iter_R2_wt_sm(Complex* input, Complex* output, const unsigned int N,const unsigned int H, const unsigned int M, const unsigned int Ns, const unsigned int length) 
{	
	extern __shared__ Complex temp_elements[];
        long long j = blockDim.x * blockIdx.x + threadIdx.x;	//Thread Id
	if (j < N * length/R2){
	long long idxM = j / (H * N/R2);// the index of which one in n * c
	long long idxI = (j % (H * N/R2)) / (N/R2);// the index of which row of the unique matrix 
	long long idxS = (j % (H * N/R2)) % (N/R2);// the index of which col of the unique matrix maybe equals to j % (N/R2) 
	float v_r[R2]; float v_i[R2];		//2 inputs of butterfly
	//angle come from W = e^(-2*PI/(R2*Ns) * m)
	float angle = -2*PI*(idxS%Ns) / (Ns*R2);	//Ns=1 --> angle=0 , Ns=2 --> angle=0 & -2PI/4 , and so on...
	//Reading From Memory and multiplying in W
	float v_r_temp[R2]; float v_i_temp[R2];
	for (int r=0; r<R2; r++){
		v_r_temp[r] = input[idxM * H * N + idxS * H + idxI + r*(N/R2)*H].real;//change the posision of col index and row index
		v_i_temp[r] = input[idxM * H * N + idxS * H + idxI + r*(N/R2)*H].imag;		
		v_r[r] = (v_r_temp[r]*cos(r*angle) - v_i_temp[r]*sin(r*angle));	// (v_r + i v_i)*(cos + i sin) = (v_r*cos - v_i*sin) + i (v_r*sin + v_i*cos)
		v_i[r] = (v_r_temp[r]*sin(r*angle) + v_i_temp[r]*cos(r*angle));			
	}
	//Butterfly (R = 2)
	float v0_r = v_r[0]; float v0_i = v_i[0];
	v_r[0] = v0_r + v_r[1]; v_i[0] = v0_i + v_i[1];
	v_r[1] = v0_r - v_r[1]; v_i[1] = v0_i - v_i[1];
	//expand
	int idxD = (idxS/Ns)*Ns*R2 + (idxS%Ns);
	//Write in X_d
	for (int r=0; r<R2 ; r++){
		temp_elements[idxM * H * N + idxD * H + idxI + r*Ns*H] = Complex(v_r[r],v_i[r]); 
	}
	__syncthreads();
	for (int r=0; r<R2 ; r++){
		input[idxM * H * N + idxD * H + idxI + r*Ns*H] = temp_elements[idxM * H * N + idxD * H + idxI + r*Ns*H];
	}

     }

}
__global__ void IFFT_Iter_R2(Complex* input, Complex* output, const unsigned int N, const unsigned int M, const unsigned int Ns, const unsigned int length) 
{	
        long long j = blockDim.x * blockIdx.x + threadIdx.x;	//Thread Id
	if (j < N * length/R2){
	long long idxI = j / (N/R2);
	long long idxS = j % (N/R2);
	float v_r[R2]; float v_i[R2];		//2 inputs of butterfly
	//angle come from W = e^(-2*PI/(R2*Ns) * m)
	float angle = -2*PI*(idxS%Ns) / (Ns*R2);	//Ns=1 --> angle=0 , Ns=2 --> angle=0 & -2PI/4 , and so on...
	//Reading From Memory and multiplying in W
	float v_r_temp[R2]; float v_i_temp[R2];
	for (int r=0; r<R2; r++){
		v_r_temp[r] = input[idxI * N + idxS+r*N/R2].real;
		v_i_temp[r] = -input[idxI * N + idxS+r*N/R2].imag;// 这里取负数		
		v_r[r] = (v_r_temp[r]*cos(r*angle) - v_i_temp[r]*sin(r*angle));	// (v_r + i v_i)*(cos + i sin) = (v_r*cos - v_i*sin) + i (v_r*sin + v_i*cos)
		v_i[r] = (v_r_temp[r]*sin(r*angle) + v_i_temp[r]*cos(r*angle));			
	}
	//Butterfly (R = 2)
	float v0_r = v_r[0]; float v0_i = v_i[0];
	v_r[0] = v0_r + v_r[1]; v_i[0] = v0_i + v_i[1];
	v_r[1] = v0_r - v_r[1]; v_i[1] = v0_i - v_i[1];
	//expand
	int idxD = (idxS/Ns)*Ns*R2 + (idxS%Ns);
	//Write in X_d
	for (int r=0; r<R2 ; r++){
		output[idxI * N + idxD + r*Ns] = Complex(v_r[r]/R2,-v_i[r]/R2);// 这里imag取负数,同时real and iamg 除以基底R2
	}
     }
}

__global__ void IFFT_Iter_R2_sm(Complex* input, Complex* output, const unsigned int N, const unsigned int M, const unsigned int Ns, const unsigned int length) 
{	
	extern __shared__ Complex temp_elements[];
        long long j = blockDim.x * blockIdx.x + threadIdx.x;	//Thread Id
	if (j < N * length/R2){
	long long idxI = j / (N/R2);
	long long idxS = j % (N/R2);
	float v_r[R2]; float v_i[R2];		//2 inputs of butterfly
	//angle come from W = e^(-2*PI/(R2*Ns) * m)
	float angle = -2*PI*(idxS%Ns) / (Ns*R2);	//Ns=1 --> angle=0 , Ns=2 --> angle=0 & -2PI/4 , and so on...
	//Reading From Memory and multiplying in W
	float v_r_temp[R2]; float v_i_temp[R2];
	for (int r=0; r<R2; r++){
		v_r_temp[r] = input[idxI * N + idxS+r*N/R2].real;
		v_i_temp[r] = -input[idxI * N + idxS+r*N/R2].imag;// 这里取负数		
		v_r[r] = (v_r_temp[r]*cos(r*angle) - v_i_temp[r]*sin(r*angle));	// (v_r + i v_i)*(cos + i sin) = (v_r*cos - v_i*sin) + i (v_r*sin + v_i*cos)
		v_i[r] = (v_r_temp[r]*sin(r*angle) + v_i_temp[r]*cos(r*angle));			
	}
	//Butterfly (R = 2)
	float v0_r = v_r[0]; float v0_i = v_i[0];
	v_r[0] = v0_r + v_r[1]; v_i[0] = v0_i + v_i[1];
	v_r[1] = v0_r - v_r[1]; v_i[1] = v0_i - v_i[1];
	//expand
	int idxD = (idxS/Ns)*Ns*R2 + (idxS%Ns);
	//Write in X_d
	for (int r=0; r<R2 ; r++){
		temp_elements[idxI * N + idxD + r*Ns] = Complex(v_r[r]/R2,-v_i[r]/R2); 
	}
	__syncthreads();
	for (int r=0; r<R2 ; r++){
		input[idxI * N + idxD + r*Ns] =temp_elements[idxI * N + idxD + r*Ns] ;// 这里imag取负数,同时real and iamg 除以基底R2
	}
     }
}
__global__ void FFT_Iter_R2(float* x_r_d, float* x_i_d, float* X_r_d, float* X_i_d, const unsigned int N, const unsigned int M, const unsigned int Ns, const unsigned int length) 
{	

        long long j = blockDim.x * blockIdx.x + threadIdx.x;	//Thread Id
	long long idxS = j;
	float v_r[R2]; float v_i[R2];		//2 inputs of butterfly
	//angle come from W = e^(-2*PI/(R2*Ns) * m)
	float angle = -2*PI*(j%Ns) / (Ns*R2);	//Ns=1 --> angle=0 , Ns=2 --> angle=0 & -2PI/4 , and so on...
	//Reading From Memory and multiplying in W
	float v_r_temp[R2]; float v_i_temp[R2];
	for (int r=0; r<R2; r++){
		v_r_temp[r] = x_r_d[idxS+r*N/R2];
		v_i_temp[r] = x_i_d[idxS+r*N/R2];		
		v_r[r] = (v_r_temp[r]*cos(r*angle) - v_i_temp[r]*sin(r*angle));	// (v_r + i v_i)*(cos + i sin) = (v_r*cos - v_i*sin) + i (v_r*sin + v_i*cos)
		v_i[r] = (v_r_temp[r]*sin(r*angle) + v_i_temp[r]*cos(r*angle));			
	}
	//Butterfly (R = 2)
	float v0_r = v_r[0]; float v0_i = v_i[0];
	v_r[0] = v0_r + v_r[1]; v_i[0] = v0_i + v_i[1];
	v_r[1] = v0_r - v_r[1]; v_i[1] = v0_i - v_i[1];
	//expand
	int idxD = (j/Ns)*Ns*R2 + (j%Ns);
	//Write in X_d
	for (int r=0; r<R2 ; r++){
		X_r_d[idxD + r*Ns] = v_r[r];
		X_i_d[idxD + r*Ns] = v_i[r];
	}

}

__global__ void IFFT_Iter_R2(float* x_r_d, float* x_i_d, float* X_r_d, float* X_i_d, const unsigned int N, const unsigned int M, const unsigned int Ns) 
{	
        long long j = blockDim.x * blockIdx.x + threadIdx.x;	//Thread Id
	long long idxS = j;
	float v_r[R2]; float v_i[R2];		//2 inputs of butterfly
	//angle come from W = e^(-2*PI/(R2*Ns) * m)
	float angle = -2*PI*(j%Ns) / (Ns*R2);	//Ns=1 --> angle=0 , Ns=2 --> angle=0 & -2PI/4 , and so on...
	//Reading From Memory and multiplying in W
	float v_r_temp[R2]; float v_i_temp[R2];
	for (int r=0; r<R2; r++){
		v_r_temp[r] = x_r_d[idxS+r*N/R2];
		v_i_temp[r] = -x_i_d[idxS+r*N/R2];// 这里取负数		
		v_r[r] = (v_r_temp[r]*cos(r*angle) - v_i_temp[r]*sin(r*angle));	// (v_r + i v_i)*(cos + i sin) = (v_r*cos - v_i*sin) + i (v_r*sin + v_i*cos)
		v_i[r] = (v_r_temp[r]*sin(r*angle) + v_i_temp[r]*cos(r*angle));			
	}
	//Butterfly (R = 2)
	float v0_r = v_r[0]; float v0_i = v_i[0];
	v_r[0] = v0_r + v_r[1]; v_i[0] = v0_i + v_i[1];
	v_r[1] = v0_r - v_r[1]; v_i[1] = v0_i - v_i[1];
	//expand
	int idxD = (j/Ns)*Ns*R2 + (j%Ns);
	//Write in X_d
	for (int r=0; r<R2 ; r++){
		X_r_d[idxD + r*Ns] = v_r[r]/R2;//这里要乘上基底R2
		X_i_d[idxD + r*Ns] = -v_i[r]/R2;// 这里取负数,同时乘上基底R2
	}
}
__global__ void IFFT_Iter_R2_wt(Complex* input, Complex* output, const unsigned int N,const unsigned int H, const unsigned int M, const unsigned int Ns, const unsigned int length) 
{	
        long long j = blockDim.x * blockIdx.x + threadIdx.x;	//Thread Id
	if (j < N * length/R2){
	long long idxM = j / (H * N/R2);// the index of which one in n * c
	long long idxI = (j % (H * N/R2)) / (N/R2);// the index of which row of the unique matrix 
	long long idxS = (j % (H * N/R2)) % (N/R2);// the index of which col of the unique matrix maybe equals to j % (N/R2) 
	float v_r[R2]; float v_i[R2];		//2 inputs of butterfly
	//angle come from W = e^(-2*PI/(R2*Ns) * m)
	float angle = -2*PI*(idxS%Ns) / (Ns*R2);	//Ns=1 --> angle=0 , Ns=2 --> angle=0 & -2PI/4 , and so on...
	//Reading From Memory and multiplying in W
	float v_r_temp[R2]; float v_i_temp[R2];
	for (int r=0; r<R2; r++){
		v_r_temp[r] = input[idxM * H * N + idxS * H + idxI + r*(N/R2)*H].real;//change the posision of col index and row index
		v_i_temp[r] = -input[idxM * H * N + idxS * H + idxI + r*(N/R2)*H].imag;		
		v_r[r] = (v_r_temp[r]*cos(r*angle) - v_i_temp[r]*sin(r*angle));	// (v_r + i v_i)*(cos + i sin) = (v_r*cos - v_i*sin) + i (v_r*sin + v_i*cos)
		v_i[r] = (v_r_temp[r]*sin(r*angle) + v_i_temp[r]*cos(r*angle));			
	}
	//Butterfly (R = 2)
	float v0_r = v_r[0]; float v0_i = v_i[0];
	v_r[0] = v0_r + v_r[1]; v_i[0] = v0_i + v_i[1];
	v_r[1] = v0_r - v_r[1]; v_i[1] = v0_i - v_i[1];
	//expand
	int idxD = (idxS/Ns)*Ns*R2 + (idxS%Ns);
	//Write in X_d
	for (int r=0; r<R2 ; r++){
		output[idxM * H * N + idxD * H + idxI + r*Ns*H] = Complex(v_r[r]/R2,-v_i[r]/R2);
	}
     }

}
__global__ void IFFT_Iter_R2_wt_sm(Complex* input, Complex* output, const unsigned int N,const unsigned int H, const unsigned int M, const unsigned int Ns, const unsigned int length) 
{	
	extern __shared__ Complex temp_elements[];
        long long j = blockDim.x * blockIdx.x + threadIdx.x;	//Thread Id
	if (j < N * length/R2){
	long long idxM = j / (H * N/R2);// the index of which one in n * c
	long long idxI = (j % (H * N/R2)) / (N/R2);// the index of which row of the unique matrix 
	long long idxS = (j % (H * N/R2)) % (N/R2);// the index of which col of the unique matrix maybe equals to j % (N/R2) 
	float v_r[R2]; float v_i[R2];		//2 inputs of butterfly
	//angle come from W = e^(-2*PI/(R2*Ns) * m)
	float angle = -2*PI*(idxS%Ns) / (Ns*R2);	//Ns=1 --> angle=0 , Ns=2 --> angle=0 & -2PI/4 , and so on...
	//Reading From Memory and multiplying in W
	float v_r_temp[R2]; float v_i_temp[R2];
	for (int r=0; r<R2; r++){
		v_r_temp[r] = input[idxM * H * N + idxS * H + idxI + r*(N/R2)*H].real;//change the posision of col index and row index
		v_i_temp[r] = -input[idxM * H * N + idxS * H + idxI + r*(N/R2)*H].imag;		
		v_r[r] = (v_r_temp[r]*cos(r*angle) - v_i_temp[r]*sin(r*angle));	// (v_r + i v_i)*(cos + i sin) = (v_r*cos - v_i*sin) + i (v_r*sin + v_i*cos)
		v_i[r] = (v_r_temp[r]*sin(r*angle) + v_i_temp[r]*cos(r*angle));			
	}
	//Butterfly (R = 2)
	float v0_r = v_r[0]; float v0_i = v_i[0];
	v_r[0] = v0_r + v_r[1]; v_i[0] = v0_i + v_i[1];
	v_r[1] = v0_r - v_r[1]; v_i[1] = v0_i - v_i[1];
	//expand
	int idxD = (idxS/Ns)*Ns*R2 + (idxS%Ns);
	//Write in X_d
	for (int r=0; r<R2 ; r++){
		temp_elements[idxM * H * N + idxD * H + idxI + r*Ns*H] = Complex(v_r[r]/R2,-v_i[r]/R2); 
	}
	__syncthreads();
	for (int r=0; r<R2 ; r++){
		input[idxM * H * N + idxD * H + idxI + r*Ns*H] = temp_elements[idxM * H * N + idxD * H + idxI + r*Ns*H];
	}
     }

}
__global__ void Copy_Comp_to_x (Complex* comp, float* x_r_d, float* x_i_d){	
	long long t_Id = blockDim.x * blockIdx.x + threadIdx.x;//Thread Id	
	x_r_d[t_Id] = comp[t_Id].real;
	x_i_d[t_Id] = comp[t_Id].imag;	
}

__global__ void Copy_X_to_x (float* x_r_d, float* x_i_d, float* X_r_d, float* X_i_d){	
	long long t_Id = blockDim.x * blockIdx.x + threadIdx.x;//Thread Id	
	x_r_d[t_Id] = X_r_d[t_Id];
	x_i_d[t_Id] = X_i_d[t_Id];	
}

__global__ void Copy_X_to_x (Complex* input, Complex* output){	
	long long t_Id = blockDim.x * blockIdx.x + threadIdx.x;//Thread Id	
	input[t_Id] = output[t_Id];
}
__global__ void Copy_X_to_x (Complex* input, Complex* output, int N ){	
	long long t_Id = blockDim.x * blockIdx.x + threadIdx.x;//Thread Id	
	if (t_Id < N){
          input[t_Id] = output[t_Id];
	}
}
__global__ void Copy_X_to_Comp (Complex* comp, float* X_r_d, float* X_i_d){	
	long long t_Id = blockDim.x * blockIdx.x + threadIdx.x;//Thread Id	
	comp[t_Id].real = X_r_d[t_Id];
	comp[t_Id].imag = X_i_d[t_Id];	
}
// Complex* input, int n1, int c1, int d1, int n2, int c2, int d2, int h, int w
void fft_1d_fwd_stockham(Complex* input,Complex* output, int n1, int c1, int d1, int n2, int c2, int d2, int h, int w, const unsigned int N )
//void fft_1d_fwd_stockham(Complex* input,Complex* output, int n1, int c1, int d1, int n2, int c2, int d2, int h, int w, const unsigned int N )
{	

	int M = log2(w); 
	if ( (n1 * c1 + n2 * c2) * h * w < MAX_THREAD_PER_BLOCK ){	
		for (int Ns=1; Ns < w ; Ns*=R2){							
		    FFT_Iter_R2 <<< 1, (n1 * c1 + n2 * c2) * h * w/R2 >>>(input, output, w , M, Ns, (n1 * c1 + n2 * c2) * h );	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		    Copy_X_to_x <<< 1, (n1 * c1 + n2 * c2) * h * w>>>(input, output, (n1 * c1 + n2 * c2) * h * w);
		}
	}
	else {
		for (int Ns=1; Ns< w ; Ns*=R2){							
		    FFT_Iter_R2 <<< (n1 * c1 + n2 * c2) * h * w/(R2 * MAX_THREAD_PER_BLOCK ) + 1 , MAX_THREAD_PER_BLOCK >>>(input, output, w , M, Ns, (n1 * c1 + n2 * c2) * h );	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		    Copy_X_to_x <<< (n1 * c1 + n2 * c2) * h * w/MAX_THREAD_PER_BLOCK, MAX_THREAD_PER_BLOCK >>>(input, output, (n1 * c1 + n2 * c2) * h * w);
		}
	}
	
}
void fft_1d_fwd_stockham_wt(Complex* input,Complex* output, int n1, int c1, int d1, int n2, int c2, int d2, int h, int w, const unsigned int N )
{	

	int M = log2(h); 
	if ( (n1 * c1 + n2 * c2) * h * w < MAX_THREAD_PER_BLOCK ){	
		//cout << "begin to invoke FFT_Iter_R2" << endl;
		for (int Ns=1; Ns < h ; Ns*=R2){							
		    FFT_Iter_R2_wt <<< 1, (n1 * c1 + n2 * c2) * h * w/R2 >>>(input, output, h , w, M, Ns, (n1 * c1 + n2 * c2) * w );	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		    Copy_X_to_x <<< 1, (n1 * c1 + n2 * c2) * h * w>>>(input, output, (n1 * c1 + n2 * c2) * h * w);
		}
	}
	else {
		for (int Ns=1; Ns< h ; Ns*=R2){							
		    FFT_Iter_R2_wt <<< (n1 * c1 + n2 * c2) * h * w/(R2 * MAX_THREAD_PER_BLOCK ) + 1 , MAX_THREAD_PER_BLOCK >>>(input, output, h, w, M, Ns, (n1 * c1 + n2 * c2) * w );	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		    Copy_X_to_x <<< (n1 * c1 + n2 * c2) * h * w/MAX_THREAD_PER_BLOCK, MAX_THREAD_PER_BLOCK >>>(input, output, (n1 * c1 + n2 * c2) * h * w);
		}
	}
	
}
void fft_1d_bwd_stockham(Complex* input,Complex* output, int n, int c, int d, int h, int w, const unsigned int N )
{	

	int M = log2(w); 
	if (  n * c * h * w < MAX_THREAD_PER_BLOCK ){	
		for (int Ns=1; Ns < w ; Ns*=R2){							
		    IFFT_Iter_R2 <<< 1, n * c * h * w/R2 >>>(input, output, w , M, Ns, n * c * h);	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		    Copy_X_to_x <<< 1, n * c * h * w>>>(input, output, n * c * h * w);
		}
	}
	else {
		for (int Ns=1; Ns < w; Ns*=R2){							
		//	FFT_Iter_R2 <<< N/(1024*R2), 1024 >>>(x_r_d, x_i_d, X_r_d, X_i_d, N, M, Ns, (n1 * c1 + n2 * c2) * h);	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads //if .1 ms tasir dare
		//	Copy_X_to_x <<< (1<<M-10), (1<<10) >>>(x_r_d, x_i_d, X_r_d, X_i_d);
		    IFFT_Iter_R2 <<< n * c * h * w / (R2 * MAX_THREAD_PER_BLOCK) + 1, MAX_THREAD_PER_BLOCK >>>(input, output, w , M, Ns, n * c * h );	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		    Copy_X_to_x <<< n * c * h * w / MAX_THREAD_PER_BLOCK  + 1, MAX_THREAD_PER_BLOCK >>>(input, output, n * c * h * w );
		}
	}
}

void fft_1d_bwd_stockham_wt(Complex* input,Complex* output, int n, int c, int d, int h, int w, const unsigned int N )
{	

	int M = log2(h); 
	if (  n * c * h * w < MAX_THREAD_PER_BLOCK ){	
//		cout << "begin to invoke iFFT_Iter_R2" << endl;
		for (int Ns=1; Ns < h ; Ns*=R2){							
		    IFFT_Iter_R2_wt <<< 1, n * c * h * w/R2 >>>(input, output, h, w , M, Ns, n * c * w);	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		    Copy_X_to_x <<< 1, n * c * h * w>>>(input, output, n * c * h * w);
		}
	}
	else {
		for (int Ns=1; Ns < h; Ns*=R2){							
		//	FFT_Iter_R2 <<< N/(1024*R2), 1024 >>>(x_r_d, x_i_d, X_r_d, X_i_d, N, M, Ns, (n1 * c1 + n2 * c2) * h);	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads //if .1 ms tasir dare
		//	Copy_X_to_x <<< (1<<M-10), (1<<10) >>>(x_r_d, x_i_d, X_r_d, X_i_d);
		    IFFT_Iter_R2_wt <<< n * c * h * w / (R2 * MAX_THREAD_PER_BLOCK) + 1, MAX_THREAD_PER_BLOCK >>>(input, output, h, w , M, Ns, n * c * w );	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		    Copy_X_to_x <<< n * c * h * w / MAX_THREAD_PER_BLOCK  + 1, MAX_THREAD_PER_BLOCK >>>(input, output, n * c * h * w );
		}
	}
}
void fft_1d_fwd_stockham_sm(Complex* input,Complex* output, int n1, int c1, int d1, int n2, int c2, int d2, int h, int w, const unsigned int N )
{	

	int M = log2(w); 
	if ( (n1 * c1 + n2 * c2) * h * w < MAX_THREAD_PER_BLOCK ){	
		for (int Ns=1; Ns < w ; Ns*=R2){							
		    FFT_Iter_R2_sm <<< 1, (n1 * c1 + n2 * c2) * h * w/R2, (n1 * c1 + n2 * c2) * h * w * sizeof(Complex) >>>(input, output, w , M, Ns, (n1 * c1 + n2 * c2) * h );	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		}
	}
	else {
		for (int Ns=1; Ns< w ; Ns*=R2){							
		    FFT_Iter_R2_sm <<< (n1 * c1 + n2 * c2) * h * w/(R2 * MAX_THREAD_PER_BLOCK ) + 1 , MAX_THREAD_PER_BLOCK, (n1 * c1 + n2 * c2) * h * w * sizeof(Complex) >>>(input, output, w , M, Ns, (n1 * c1 + n2 * c2) * h );	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		}
	}
	
}
// wt is with transpose
void fft_1d_fwd_stockham_wt_sm(Complex* input,Complex* output, int n1, int c1, int d1, int n2, int c2, int d2, int h, int w, const unsigned int N )
{	

	int M = log2(h); 
	if ( (n1 * c1 + n2 * c2) * h * w < MAX_THREAD_PER_BLOCK ){	
		//cout << "begin to invoke FFT_Iter_R2" << endl;
		for (int Ns=1; Ns < h ; Ns*=R2){							
		    FFT_Iter_R2_wt_sm <<< 1, (n1 * c1 + n2 * c2) * h * w/R2 ,(n1 * c1 + n2 * c2) * h * w * sizeof(Complex)>>>(input, output, h , w, M, Ns, (n1 * c1 + n2 * c2) * w );	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		    //Copy_X_to_x <<< 1, (n1 * c1 + n2 * c2) * h * w>>>(input, output, (n1 * c1 + n2 * c2) * h * w);
		}
	}
	else {
		for (int Ns=1; Ns< h ; Ns*=R2){							
		    FFT_Iter_R2_wt_sm <<< (n1 * c1 + n2 * c2) * h * w/(R2 * MAX_THREAD_PER_BLOCK ) + 1 , MAX_THREAD_PER_BLOCK ,(n1 * c1 + n2 * c2) * h * w * sizeof(Complex)>>>(input, output, h, w, M, Ns, (n1 * c1 + n2 * c2) * w );	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		    //Copy_X_to_x <<< (n1 * c1 + n2 * c2) * h * w/MAX_THREAD_PER_BLOCK, MAX_THREAD_PER_BLOCK >>>(input, output, (n1 * c1 + n2 * c2) * h * w);
		}
	}
	
}
void fft_1d_bwd_stockham_sm(Complex* input,Complex* output, int n, int c, int d, int h, int w, const unsigned int N )
{	

	int M = log2(w); 
	if (  n * c * h * w < MAX_THREAD_PER_BLOCK ){	
		//cout << "begin to invoke iFFT_Iter_R2" << endl;
		for (int Ns=1; Ns < w ; Ns*=R2){							
		    IFFT_Iter_R2_sm <<< 1, n * c * h * w/R2, n * c * h * w * sizeof(Complex)>>>(input, output, w , M, Ns, n * c * h);	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
	//	    Copy_X_to_x <<< 1, n * c * h * w>>>(input, output, n * c * h * w);
		}
	}
	else {
		for (int Ns=1; Ns < w; Ns*=R2){							
		//	FFT_Iter_R2 <<< N/(1024*R2), 1024 >>>(x_r_d, x_i_d, X_r_d, X_i_d, N, M, Ns, (n1 * c1 + n2 * c2) * h);	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads //if .1 ms tasir dare
		//	Copy_X_to_x <<< (1<<M-10), (1<<10) >>>(x_r_d, x_i_d, X_r_d, X_i_d);
		    IFFT_Iter_R2_sm <<< n * c * h * w / (R2 * MAX_THREAD_PER_BLOCK) + 1, MAX_THREAD_PER_BLOCK , n * c * h * w * sizeof(Complex)>>>(input, output, w , M, Ns, n * c * h );	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
	//	    Copy_X_to_x <<< n * c * h * w / MAX_THREAD_PER_BLOCK  + 1, MAX_THREAD_PER_BLOCK >>>(input, output, n * c * h * w );
		}
	}
}

void fft_1d_bwd_stockham_wt_sm(Complex* input,Complex* output, int n, int c, int d, int h, int w, const unsigned int N )
{	

	int M = log2(h); 
	if (  n * c * h * w < MAX_THREAD_PER_BLOCK ){	
		//cout << "begin to invoke iFFT_Iter_R2" << endl;
		for (int Ns=1; Ns < h ; Ns*=R2){							
		    IFFT_Iter_R2_wt_sm <<< 1, n * c * h * w/R2 , n * c * h * w * sizeof(Complex)>>>(input, output, h, w , M, Ns, n * c * w);	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		    //Copy_X_to_x <<< 1, n * c * h * w>>>(input, output, n * c * h * w);
		}
	}
	else {
		for (int Ns=1; Ns < h; Ns*=R2){							
		//	FFT_Iter_R2 <<< N/(1024*R2), 1024 >>>(x_r_d, x_i_d, X_r_d, X_i_d, N, M, Ns, (n1 * c1 + n2 * c2) * h);	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads //if .1 ms tasir dare
		//	Copy_X_to_x <<< (1<<M-10), (1<<10) >>>(x_r_d, x_i_d, X_r_d, X_i_d);
		    IFFT_Iter_R2_wt_sm <<< n * c * h * w / (R2 * MAX_THREAD_PER_BLOCK) + 1, MAX_THREAD_PER_BLOCK , n * c * h * w * sizeof(Complex)>>>(input, output, h, w , M, Ns, n * c * w );	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
	//	    Copy_X_to_x <<< n * c * h * w / MAX_THREAD_PER_BLOCK  + 1, MAX_THREAD_PER_BLOCK >>>(input, output, n * c * h * w );
		}
	}
}
/*
 "sp" denotes change the postion of two parameters 

 this aim to get rid of the copy_X_x funcion
 */
void fft_1d_fwd_stockham_sp(Complex* input,Complex* output, int n1, int c1, int n2, int c2, int d, int h, int w, const unsigned int N )
{	
	int M = log2(w); 
	if ( (n1 * c1 + n2 * c2) * d * h * w < MAX_THREAD_PER_BLOCK ){	
		for (int Ns=1; Ns < w ; Ns*=R2){							
                    if (((int)log2(Ns))%R2 == 0)
		       FFT_Iter_R2 <<< 1, (n1 * c1 + n2 * c2) * d * h * w/R2 >>>(input, output, w , M, Ns, (n1 * c1 + n2 * c2) * d * h);	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
                    if (((int)log2(Ns))%R2 == 1)
		       FFT_Iter_R2 <<< 1, (n1 * c1 + n2 * c2) * d * h * w/R2 >>>(output, input, w , M, Ns, (n1 * c1 + n2 * c2) * d * h);	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		}
		if (M % R2 == 0){
		    Copy_X_to_x <<< 1, (n1 * c1 + n2 * c2) * d * h * w>>>(output, input, (n1 * c1 + n2 * c2) * d * h * w);
		}

	}
	else {
		for (int Ns=1; Ns< w ; Ns*=R2){							
                    if (((int)log2(Ns))%R2 == 0){
		       FFT_Iter_R2 <<< (n1 * c1 + n2 * c2) * d * h * w/(R2 * MAX_THREAD_PER_BLOCK ) + 1 , MAX_THREAD_PER_BLOCK >>>(input, output, w , M, Ns, (n1 * c1 + n2 * c2) * d * h );	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		    }
                    if (((int)log2(Ns))%R2 == 1){
		       FFT_Iter_R2 <<< (n1 * c1 + n2 * c2) * d * h * w/(R2 * MAX_THREAD_PER_BLOCK ) + 1 , MAX_THREAD_PER_BLOCK >>>(output, input, w , M, Ns, (n1 * c1 + n2 * c2) * d * h );	//blockdim.x = 2^10 , griddim.x = 2^(m-11) , n/2 threads
		    }
		}
		if (M % R2 == 0){
		    Copy_X_to_x <<< (n1 * c1 + n2 * c2) * d * h * w/MAX_THREAD_PER_BLOCK + 1, MAX_THREAD_PER_BLOCK >>>(output, input, (n1 * c1 + n2 * c2) * d * h * w);
		}
	}
	
}
// wt is with transpose
void fft_1d_fwd_stockham_wt_sp(Complex* input,Complex* output, int n1, int c1, int d1, int n2, int c2, int d2, int h, int w, const unsigned int N )
{	

	int M = log2(h); 
	if ( (n1 * c1 + n2 * c2) * h * w < MAX_THREAD_PER_BLOCK ){	
		for (int Ns=1; Ns < h ; Ns*=R2){							
                    if (((int)log2(Ns))%R2 == 0){
               	       FFT_Iter_R2_wt <<< 1, (n1 * c1 + n2 * c2) * h * w/R2 >>>(input, output, h , w, M, Ns, (n1 * c1 + n2 * c2) * w );	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		    }
                    if (((int)log2(Ns))%R2 == 1){
		       FFT_Iter_R2_wt <<< 1, (n1 * c1 + n2 * c2) * h * w/R2 >>>(output, input, h , w, M, Ns, (n1 * c1 + n2 * c2) * w );	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		    }
		}
		if (M % R2 == 0){
		    Copy_X_to_x <<< 1, (n1 * c1 + n2 * c2) * h * w>>>(output, input,  (n1 * c1 + n2 * c2) * h * w);
		}
	}
	else {
		for (int Ns=1; Ns< h ; Ns*=R2){							
                    if (((int)log2(Ns))%R2 == 0){
		       FFT_Iter_R2_wt <<< (n1 * c1 + n2 * c2) * h * w/(R2 * MAX_THREAD_PER_BLOCK ) + 1 , MAX_THREAD_PER_BLOCK >>>(input, output, h, w, M, Ns, (n1 * c1 + n2 * c2) * w );	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		    }
                    if (((int)log2(Ns))%R2 == 1){
		       FFT_Iter_R2_wt <<< (n1 * c1 + n2 * c2) * h * w/(R2 * MAX_THREAD_PER_BLOCK ) + 1 , MAX_THREAD_PER_BLOCK >>>(output, input, h, w, M, Ns, (n1 * c1 + n2 * c2) * w );	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		    }
		}
		if (M % R2 == 0){
		    Copy_X_to_x <<< (n1 * c1 + n2 * c2) * h * w/MAX_THREAD_PER_BLOCK + 1, MAX_THREAD_PER_BLOCK >>>(output, input, (n1 * c1 + n2 * c2) * h * w);
		}
	}
	
}
void fft_1d_bwd_stockham_sp(Complex* input,Complex* output, int n, int c, int d, int h, int w, const unsigned int N )
{	
	int M = log2(w); 
	if ( n * c * d * h * w < MAX_THREAD_PER_BLOCK ){	
		for (int Ns=1; Ns < w ; Ns*=R2){							
                    if (((int)log2(Ns))%R2 == 0)
		       IFFT_Iter_R2 <<< 1, n * c * d * h * w/R2>>>(input, output, w , M, Ns, n * c * d * h);	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
                    if (((int)log2(Ns))%R2 == 1)
		       IFFT_Iter_R2 <<< 1, n * c * d * h * w/R2>>>(output, input, w , M, Ns, n * c * d * h);	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		}
		if (M % R2 == 0)
		    Copy_X_to_x <<< 1, n * c * d * h * w>>>(output, input, n * c * d * h * w);
	} else {
		for (int Ns=1; Ns < w; Ns*=R2){							
                    if (((int)log2(Ns))%R2 == 0)
		       IFFT_Iter_R2 <<< n * c * d * h * w / (R2 * MAX_THREAD_PER_BLOCK) + 1, MAX_THREAD_PER_BLOCK >>>(input, output, w , M, Ns, n * c * d * h );	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
                    if (((int)log2(Ns))%R2 == 1)
		       IFFT_Iter_R2 <<< n * c * d * h * w / (R2 * MAX_THREAD_PER_BLOCK) + 1, MAX_THREAD_PER_BLOCK >>>(output, input, w , M, Ns, n * c * d * h );	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		}
		if (M % R2 == 0)
		    Copy_X_to_x <<< n * c * d * h * w / MAX_THREAD_PER_BLOCK  + 1, MAX_THREAD_PER_BLOCK >>>(output, input, n * c * d * h * w );
	}
}

void fft_1d_bwd_stockham_wt_sp(Complex* input,Complex* output, int n, int c, int d, int h, int w, const unsigned int N )
{	
	int M = log2(h); 
	if (  n * c * h * w < MAX_THREAD_PER_BLOCK ){	
		for (int Ns=1; Ns < h ; Ns*=R2){							
                    if (((int)log2(Ns))%R2 == 0)
  		       IFFT_Iter_R2_wt <<< 1, n * c * h * w/R2 >>>(input, output, h, w , M, Ns, n * c * w);	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		    
                    if (((int)log2(Ns))%R2 == 1){
		       IFFT_Iter_R2_wt <<< 1, n * c * h * w/R2 >>>(output, input, h, w , M, Ns, n * c * w);	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		    }
		}
		if (M % R2 == 0)
		    Copy_X_to_x <<< 1, n * c * h * w>>>(output, input, n * c * h * w);
	}
	else {
		for (int Ns=1; Ns < h; Ns*=R2){							
                    if (((int)log2(Ns))%R2 == 0)
		       IFFT_Iter_R2_wt <<< n * c * h * w / (R2 * MAX_THREAD_PER_BLOCK) + 1, MAX_THREAD_PER_BLOCK >>>(input, output, h, w , M, Ns, n * c * w );	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
                    if (((int)log2(Ns))%R2 == 1)
		       IFFT_Iter_R2_wt <<< n * c * h * w / (R2 * MAX_THREAD_PER_BLOCK) + 1, MAX_THREAD_PER_BLOCK >>>(output, input, h, w , M, Ns, n * c * w );	//blockDim.x = 2^10 , gridDim.x = 2^(M-11) , N/2 Threads
		}
		if (M % R2 == 0)
		   Copy_X_to_x <<< n * c * h * w / MAX_THREAD_PER_BLOCK  + 1, MAX_THREAD_PER_BLOCK >>>(output, input, n * c * h * w );
	}
}
__global__ void fft_1d_fwd(fft_plan plan_w, Complex* input, int n1, int c1, int d1, int n2, int c2, int d2, int h, int w)
{
  int tdx = blockDim.x * blockIdx.x + threadIdx.x;
  if (tdx < (n1 * c1 + n2 * c2) * h){
    fft_cuda_1d_gpu_with_plan(plan_w, 1, w ,input + tdx * w);
    plan_w.memory_free();
  }
}
__global__ void fft_1d_bwd(Complex* input, int n, int c, int d, int h, int w)
{
  int tdx = blockDim.x * blockIdx.x + threadIdx.x;
  if (tdx < n * c * h){
    fft_plan plan_w = fft_plan(w);
    fft_cuda_1d_gpu_with_plan(plan_w, 0, w ,input + tdx * w);
    plan_w.memory_free();
  }
}
__global__ void hadamard_product(Complex* inComp, Complex* keComp, int in_n, int k_c, int k_n, int h, int w , Complex* knkcOutOutComp )
{
  int tdx = blockDim.x * blockIdx.x + threadIdx.x;
  if (tdx < in_n * k_c * k_n * h * w){
    
    int tdx_ori = tdx / (h * w); // current matrix index 
    //int offset_ori = (tdx - tdx_ori * h * w) % (h * w);
    int offset_ori = tdx % (h * w);// current element index
    // int x_offset = (k_c * (tdx_ori / (k_c * k_n)) + tdx_ori % k_c) * h * w + offset_ori ;
    // int w_offset = (tdx_ori % (k_n * k_c)) * h * w + offset_ori ;

    int x_offset = ((tdx_ori / k_c) / k_n) * k_c * h * w + (tdx_ori % k_c) * h * w + offset_ori ;// (tdx_ori / k_c) / k_n denotes nn_index , tdx_ori % k_c denotes c_index
    int w_offset = ((tdx_ori / k_c) % k_n) * k_c * h * w + (tdx_ori % k_c) * h * w + offset_ori ;// (tdx_ori / k_c) % k_n denotes kn_index, tdx_ori % k_c denotes c_index

    knkcOutOutComp[tdx] = Complex(inComp[x_offset].real * keComp[w_offset].real - inComp[x_offset].imag * keComp[w_offset].imag,
      inComp[x_offset].imag * keComp[w_offset].real + inComp[x_offset].real * keComp[w_offset].imag);
  }
}
__global__ void hadamard_product_3d(Complex* inComp, Complex* keComp, int in_n, int k_c, int k_n, int d, int h, int w , Complex* knkcOutOutComp )
{
  int tdx = blockDim.x * blockIdx.x + threadIdx.x;
  if (tdx < in_n * k_c * k_n * d * h * w){

    int tdx_ori = tdx / (d * h * w); // index of current 3d matrix
    int offset_ori = (tdx - tdx_ori * d * h * w) % (d * h * w);//element of current 3d matrix
    int x_offset = (k_c * (tdx_ori / (k_c * k_n)) + tdx_ori % k_c) * d * h * w + offset_ori ;//index of input which will take part in calculation
    int w_offset = (tdx_ori % (k_n * k_c)) * d * h * w + offset_ori ;//index of weight which will take part in calculation

//    knkcOutOutComp[tdx] = Complex(inComp[x_offset].real * keComp[w_offset].real - inComp[x_offset].imag * keComp[w_offset].imag,
//      inComp[x_offset].imag * keComp[w_offset].real + inComp[x_offset].real * keComp[w_offset].imag);
    // the calculaiton of 3d matrix need to get the imag part opposite 
    knkcOutOutComp[tdx] = Complex(inComp[x_offset].real * keComp[w_offset].real +  inComp[x_offset].imag * keComp[w_offset].imag,
      inComp[x_offset].imag * keComp[w_offset].real -  inComp[x_offset].real * keComp[w_offset].imag);
  }
}
//每k_c个就进行一次相加,结果存储在第一个加数中
__global__ void trim_matrix(Complex* knkcOutOutComp,int in_n, int k_c, int k_n, int out_h, int out_w,int tmp_out_h, int tmp_out_w,Real* output_Y){
  int tdx = blockDim.x * blockIdx.x + threadIdx.x;
  if (tdx < in_n * k_n * k_c){
    Complex * tmp_res = knkcOutOutComp + tdx * tmp_out_h * tmp_out_w;
    if (tdx % k_c == 0 )//aim to trim the k_c axis of the matrix
      {
        for (int i = 1 ; i < k_c ; i++){
          for (int k = 0 ; k < tmp_out_h * tmp_out_w ; k++){
            tmp_res[k] += tmp_res[i * tmp_out_h * tmp_out_w + k];
          }
        }
        int start_row_index = (tmp_out_h - out_h) / 2;
        int start_col_index = (tmp_out_w - out_w) / 2;
        int y_offset = (tdx / k_c) * out_h * out_w;
        for (int i = 0 ; i < out_h ; i ++){
            for (int j = 0 ; j < out_w ; j ++){
                output_Y[y_offset + i * out_w + j] = tmp_res[(i + start_row_index) * tmp_out_w + j + start_col_index].real;
            }
        }

      }

      }
}

//__global__ void trim_matrix(Complex* knkcOutOutComp,int in_n, int k_c, int k_n, int out_h, int out_w,int tmp_out_h, int tmp_out_w, int tmp_out_h_ori, int tmp_out_w_ori, Real* output_Y){
//  int tdx = blockDim.x * blockIdx.x + threadIdx.x;
//  if (tdx < in_n * k_n * k_c){
//    Complex * tmp_res = knkcOutOutComp + tdx * tmp_out_h * tmp_out_w;
//    if (tdx % k_c == 0 )//aim to trim the k_c axis of the matrix
//      {
//        for (int i = 1 ; i < k_c ; i++){
//          for (int k = 0 ; k < tmp_out_h * tmp_out_w ; k++){
//            tmp_res[k] += tmp_res[i * tmp_out_h * tmp_out_w + k];
//          }
//        }
//        int start_row_index = (tmp_out_h_ori - out_h) / 2;
//        int start_col_index = (tmp_out_w_ori - out_w) / 2;
//        int y_offset = (tdx / k_c) * out_h * out_w;
//        for (int i = 0 ; i < out_h ; i ++){
//            for (int j = 0 ; j < out_w ; j ++){
//                output_Y[y_offset + i * out_w + j] = tmp_res[(i + start_row_index) * tmp_out_w + j + start_col_index].real;
//            }
//        }
//
//      }
//
//      }
//}
__global__ void trim_matrix(Complex* knkcOutOutComp,int in_n, int k_c, int k_n, int out_h, int out_w,int tmp_out_h, int tmp_out_w, int tmp_out_h_ori, int tmp_out_w_ori, Real* output_Y){
  int tdx = blockDim.x * blockIdx.x + threadIdx.x;
  if (tdx < in_n * k_n * out_h * out_w){
     int offset_row = (tmp_out_h_ori - out_h) / 2;
     int offset_col = (tmp_out_w_ori - out_w) / 2;
     int n = tdx / (out_h * out_w);// how many matrix are there in current thread 
     int row = (tdx % (out_h * out_w)) / out_w;
     int col = (tdx % (out_h * out_w)) % out_w;
     for (int c_index = 0 ; c_index < k_c ; c_index++){
         output_Y[tdx] += knkcOutOutComp[n * k_c * tmp_out_h * tmp_out_w + c_index * tmp_out_h * tmp_out_w + (row + offset_row) * tmp_out_w + col + offset_col].real; 
     } 
  }
}
__global__ void trim_matrix_3d(Complex* knkcOutOutComp,int in_n, int k_c, int k_n, int out_d, int out_h, int out_w,int tmp_out_d, int tmp_out_h, int tmp_out_w, int tmp_out_d_ori, int tmp_out_h_ori, int tmp_out_w_ori, Real* output_Y){
  int tdx = blockDim.x * blockIdx.x + threadIdx.x;
  if (tdx < in_n * k_n * out_d * out_h * out_w){
     int offset_dep = (tmp_out_d - out_d) / 2;
     int offset_row = (tmp_out_h - out_h) / 2;
     int offset_col = (tmp_out_w - out_w) / 2;
     int n = tdx / (out_d * out_h * out_w);// how many matrix are there in current thread 
     int dep = (tdx % (out_d * out_h * out_w)) / (out_h * out_w);
     int row = ((tdx % (out_d * out_h * out_w)) % (out_h * out_w)) / out_w;
     int col = ((tdx % (out_d * out_h * out_w)) % (out_h * out_w)) % out_w;
     for (int c_index = 0 ; c_index < k_c ; c_index++){
         output_Y[tdx] += knkcOutOutComp[n * k_c * tmp_out_d * tmp_out_h * tmp_out_w + c_index * tmp_out_d * tmp_out_h * tmp_out_w + (dep) * (tmp_out_h) * tmp_out_w + (row) * tmp_out_w + col].real; 
     } 
  }
}
__global__ void transpose_matrix(Complex* inComp, int n1, int c1, int d1, int n2, int c2, int d2, int h, int w)
{
  int tdx = blockDim.x * blockIdx.x + threadIdx.x;
  if (tdx < (n1 * c1 + n2 * c2)){
      transpose_gpu(inComp + tdx * h * w, h ,w);
  }
}
void transpose_matrix_v2(Complex* inComp, int n1, int c1, int d1, int n2, int c2, int d2, int h, int w)
{
    dim3 grid(w/BLOCK_DIM + 1, h/BLOCK_DIM + 1, 1);
    dim3 threads(BLOCK_DIM, BLOCK_DIM, 1);
    for (int i = 0 ; i < n1 * c1 + n2 * c2 ; i++)
        transpose_gpu_v2<<< grid, threads >>>(inComp + i * h * w, w, h);
}
void swap_matrix_axes(Complex* inComp, int n1, int c1, int n2, int c2, int d, int h, int w, int axes1, int axes2){
    dim3 grid(w/BLOCK_DIM + 1, h/BLOCK_DIM + 1, d/BLOCK_DIM + 1 );
    dim3 threads(BLOCK_DIM, BLOCK_DIM, BLOCK_DIM);
    for (int i = 0 ; i < n1 * c1 + n2 * c2 ; i++)
        swap_axes<<< grid, threads >>>(inComp + i * d * h * w, d, h, w, axes1, axes2);
}

void swap_matrix_axes(Complex* inComp, Complex* outComp,int n1, int c1, int n2, int c2, int d, int h, int w, int axes1, int axes2){
    // dim3 grid(w/BLOCK_DIM + 1, h/BLOCK_DIM + 1, d/BLOCK_DIM + 1 );
    for (int i = 0 ; i < n1 * c1 + n2 * c2 ; i++){
	if (d * h * w < MAX_THREAD_PER_BLOCK  )
        swap_axes<<< 1 , d * h * w  >>>(inComp + i * d * h * w, outComp + i * d * h * w, d, h, w, axes1, axes2);
	else
        swap_axes<<< d * h * w/ MAX_THREAD_PER_BLOCK  + 1 , MAX_THREAD_PER_BLOCK  >>>(inComp + i * d * h * w, outComp + i * d * h * w, d, h, w, axes1, axes2);

    }
}

void swap_matrix_axes(Complex* inComp, Complex* outComp,int n, int c, int d, int h, int w, int axes1, int axes2){
    // dim3 grid(w/BLOCK_DIM + 1, h/BLOCK_DIM + 1, d/BLOCK_DIM + 1 );
    for (int i = 0 ; i < n * c ; i++){
	if (d * h * w < MAX_THREAD_PER_BLOCK  )
        swap_axes<<< 1 , d * h * w  >>>(inComp + i * d * h * w, outComp + i * d * h * w, d, h, w, axes1, axes2);
	else
        swap_axes<<< d * h * w/ MAX_THREAD_PER_BLOCK  + 1 , MAX_THREAD_PER_BLOCK  >>>(inComp + i * d * h * w, outComp + i * d * h * w, d, h, w, axes1, axes2);

    }
}


__global__ void transpose_matrix(Complex* inComp, int n, int c, int d, int h, int w)
{
  int tdx = blockDim.x * blockIdx.x + threadIdx.x;
  if (tdx < n * c){
      transpose_gpu(inComp + tdx * h * w, h ,w);
  }
}
void transpose_matrix_v2(Complex* inComp, int n, int c, int d, int h, int w)
{
    dim3 grid(w/BLOCK_DIM + 1, h/BLOCK_DIM + 1, 1);
    dim3 threads(BLOCK_DIM, BLOCK_DIM, 1);
    for (int i = 0 ; i < n * c  ; i++)
        transpose_gpu_v2<<< grid, threads >>>(inComp + i * h * w, w, h);
}
void swap_matrix_axes(Complex* inComp, int n, int c, int d, int h, int w, int axes1, int axes2)
{
    dim3 grid(w/BLOCK_DIM + 1, h/BLOCK_DIM + 1,d/BLOCK_DIM + 1);
    dim3 threads(BLOCK_DIM, BLOCK_DIM, BLOCK_DIM);
    for (int i = 0 ; i < n * c  ; i++)
        swap_axes<<< grid, threads >>>(inComp + i * d * h * w, d, h, w, axes1, axes2);
}
__global__ void fft_2d_r2c(Real* input, int n, int c, int d, int h, int w,int tmp_out_h, int tmp_out_w,Complex* output){
  int tdx = blockDim.x * blockIdx.x + threadIdx.x;
  if (tdx < n * c){
    int in_offset = tdx * h * w;
    int out_offset = tdx * tmp_out_h * tmp_out_w;
    Complex* comp = (Complex*)malloc( tmp_out_h * tmp_out_w * sizeof(Complex));
    memset(comp, 0, tmp_out_h * tmp_out_w * sizeof(Complex));
    for (int i = 0 ; i < h ; i++){
      for (int j = 0 ; j < w ; j++){
        comp[i * tmp_out_w + j] = Complex((input + in_offset)[i * w + j],0.0);
      }
    }
    fft_plan plan_w = fft_plan(tmp_out_w);
    fft_plan plan_h = fft_plan(tmp_out_h);
    // fft along w demension
    for (int i = 0 ; i < tmp_out_h ; i++) {
      fft_cuda_1d_gpu_with_plan(plan_w, 1, tmp_out_w ,comp + i * tmp_out_w);
    }
    transpose_gpu(comp,tmp_out_h, tmp_out_w);
    // fft along h demension
    for (int i = 0 ; i < tmp_out_w ; i++) {
      fft_cuda_1d_gpu_with_plan(plan_h, 1, tmp_out_h ,comp + i * tmp_out_h);
    }
    // (output + out_offset) = comp;
    free(comp);
  }

}

__global__ void fft_2d_c2r(Complex* input, int n, int c, int d, int h, int w,int tmp_out_h, int tmp_out_w,Real* output){
  int tdx = blockDim.x * blockIdx.x + threadIdx.x;
  if (tdx < n * c){
    int in_offset = tdx * h * w;
    int out_offset = tdx * tmp_out_h * tmp_out_w;

  }
  

}

void convfft_2d_gpu_v2(Real* input_X, int in_n, int in_c, int in_d, int in_h, int in_w,
  Real* input_W, int k_n, int k_c, int k_d, int k_h, int k_w,
  int p_h, int p_w,
  int s_h, int s_w,
  int d_h, int d_w,
  Real* output_Y,int out_n, int out_c, int out_d, int out_h, int out_w)
{

  /** comments from cuDNN-API

    限制输入矩阵的通道数和kernel的输入通道数一致
    1. in_c must equals to k_c
    2. in_h + 2 * padding height must equal 256 or less
       in_w + 2 * padding width must equal 256 or less
    3. stride of height and width must equal 1
    4. k_h must greater then padding height
       k_w must greater then padding width

  */
  if (k_c != in_c 
    || in_h + 2 * p_h > 256 || in_w + 2 * p_w > 256 
    || s_h != 1 || s_w != 1 
    || k_h <= p_h || k_w <= p_w ){
    throw string("convfft_2d cannot perform convolution because the input size does not match the kernel size!");
  }
  // tmp size of output
  size_t tmp_out_h = in_h + k_h - 1;
  size_t tmp_out_w = in_w + k_w - 1;
  // allocate memory for the nchw output
  // 开始计时
  // cudaEvent_t start, stop;
	// Check_Error( cudaEventCreate(&start) );
	// Check_Error( cudaEventCreate(&stop) );
	// Check_Error( cudaEventRecord(start, 0) ); 
  // memory alloc
  Real* d_input_X ;
  Check_Error(cudaMalloc(&d_input_X, in_n * in_c * in_d * in_h * in_w * sizeof(Real)));
  Real* d_input_W ;
  Check_Error(cudaMalloc(&d_input_W, k_n * k_c * k_d * k_h * k_w * sizeof(Real)));

  // Complex* d_output_X ;
  // Check_Error(cudaMalloc(&d_output_X, in_n * in_c * in_d * tmp_out_h * tmp_out_w * sizeof(Complex)));
  // Check_Error(cudaMemset(d_output_X, 0.0, in_n * in_c * in_d * tmp_out_h * tmp_out_w * sizeof(Complex)));
  // Complex* d_output_W ;
  // Check_Error(cudaMalloc(&d_output_W, k_n * k_c * k_d * tmp_out_h * tmp_out_w * sizeof(Complex)));
  // Check_Error(cudaMemset(d_output_W, 0.0, k_n * k_c * k_d * tmp_out_h * tmp_out_w * sizeof(Complex)));

  Complex* d_knkcOutOutComp ;
  Check_Error(cudaMalloc(&d_knkcOutOutComp, in_n * k_n * k_c * tmp_out_h * tmp_out_w* sizeof(Complex)));

  Real* d_output_Y;
  Check_Error(cudaMalloc(&d_output_Y, out_n * out_c * out_h * out_w * sizeof(Real)));


  //malloc a vector contail both vectorX and vectorW
  Complex* d_output;
  Check_Error(cudaMalloc(&d_output, (in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w * sizeof(Complex)));
  Check_Error(cudaMemset(d_output, 0.0, (in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w * sizeof(Complex)));
  int output_X_length = in_n * in_c * in_d * tmp_out_h * tmp_out_w;
  int output_W_length = k_n * k_c * k_d * tmp_out_h * tmp_out_w;

  // Complex* output = (Complex*)malloc((in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w * sizeof(Complex));

  


  // memory copy host to device 
  Check_Error(cudaMemcpy(d_input_X,input_X, in_n * in_c * in_d * in_h * in_w * sizeof(Real), cudaMemcpyHostToDevice));
  Check_Error(cudaMemcpy(d_input_W,input_W, k_n * k_c * k_d * k_h * k_w * sizeof(Real), cudaMemcpyHostToDevice));
  // invoke the kernel 
  int threadsPerBlock = 256;
  int blocksPerGrid = ((in_n * in_c * k_n ) * tmp_out_h * tmp_out_w  + threadsPerBlock - 1) / threadsPerBlock;
  int sharedMemSize = in_n * k_n * k_c * tmp_out_h * tmp_out_w * sizeof(Complex);


  clock_t startTime,endTime;
  startTime = clock();

  init_comp<<<blocksPerGrid, threadsPerBlock>>>(d_input_X, in_n, in_c, in_d, in_h, in_w, tmp_out_h, tmp_out_w, d_output);
  init_comp<<<blocksPerGrid, threadsPerBlock>>>(d_input_W, k_n, k_c, k_d, k_h, k_w,     tmp_out_h, tmp_out_w, d_output + output_X_length);

  fft_1d_fwd<<<blocksPerGrid, threadsPerBlock>>>(d_output, in_n, in_c, in_d, k_n, k_c, k_d, tmp_out_h,tmp_out_w);
  transpose_matrix<<<blocksPerGrid, threadsPerBlock>>>(d_output,in_n, in_c, in_d, k_n, k_c, k_d, tmp_out_h,tmp_out_w);
  fft_1d_fwd<<<blocksPerGrid, threadsPerBlock>>>(d_output, in_n, in_c, in_d, k_n, k_c, k_d, tmp_out_w, tmp_out_h);
  transpose_matrix<<<blocksPerGrid, threadsPerBlock>>>(d_output, in_n, in_c, in_d, k_n, k_c, k_d, tmp_out_w, tmp_out_h);
  
  

  // transpose_matrix<<<blocksPerGrid, threadsPerBlock>>>(d_output, in_n, in_c, in_d, k_n, k_c, k_d, tmp_out_w, tmp_out_h);
  // Check_Error(cudaMemcpy( output, d_output, (in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w * sizeof(Complex), cudaMemcpyDeviceToHost));
  
  hadamard_product<<<blocksPerGrid, threadsPerBlock>>>(d_output,d_output + output_X_length,in_n,in_c,k_n,tmp_out_h,tmp_out_w,d_knkcOutOutComp);

  fft_1d_bwd<<<blocksPerGrid, threadsPerBlock>>>(d_knkcOutOutComp, in_n * k_n, in_c, in_d, tmp_out_h,tmp_out_w);
  transpose_matrix<<<blocksPerGrid, threadsPerBlock>>>(d_knkcOutOutComp, in_n * k_n, in_c, in_d, tmp_out_h,tmp_out_w);
  fft_1d_bwd<<<blocksPerGrid, threadsPerBlock>>>(d_knkcOutOutComp, in_n * k_n, in_c, in_d,tmp_out_w, tmp_out_h);
  transpose_matrix<<<blocksPerGrid, threadsPerBlock>>>(d_knkcOutOutComp, in_n * k_n, in_c, in_d, tmp_out_w, tmp_out_h);
   
   
    
  trim_matrix<<<blocksPerGrid, threadsPerBlock>>>(d_knkcOutOutComp,in_n,in_c,k_n,out_h,out_w,tmp_out_h,tmp_out_w,d_output_Y);
                                                          
  Check_Error(cudaDeviceSynchronize());
  endTime = clock();
  cout << "The convfft_2d_kernel run time is: " <<(double) 1000 * (endTime - startTime) / CLOCKS_PER_SEC << "ms" << endl;
  Complex* knkcOutOutComp = (Complex*)malloc(in_n * k_n * k_c * tmp_out_h * tmp_out_w* sizeof(Complex));
  Check_Error(cudaMemcpy( knkcOutOutComp, d_knkcOutOutComp, in_n * k_n * k_c * tmp_out_h * tmp_out_w* sizeof(Complex), cudaMemcpyDeviceToHost));
  

  // cout << "x_comp" << endl;
  // for (int i = 0 ; i < in_n ; i++){
  //     for (int k = 0 ; k < in_c ; k++){
  //       for (int l = 0 ; l < tmp_out_h ; l++){
  //         for (int m = 0 ; m < tmp_out_w ; m++){
  //             cout << output[i * in_c * tmp_out_h * tmp_out_w 
  //                                     + k * tmp_out_h * tmp_out_w
  //                                     + l * tmp_out_w
  //                                     + m ].real << ",";
  //         }
  //         cout << endl;
  //       }
  //       cout << endl;
  //     }
  //     cout << endl;
  // }              
  // cout << endl;

  // cout << "k_comp" << endl;
  // for (int i = 0 ; i < k_n ; i++){
  //     for (int k = 0 ; k < k_c ; k++){
  //       for (int l = 0 ; l < tmp_out_h ; l++){
  //         for (int m = 0 ; m < tmp_out_w ; m++){
  //             cout << (output+output_X_length)[i * in_c * tmp_out_h * tmp_out_w 
  //                                     + k * tmp_out_h * tmp_out_w
  //                                     + l * tmp_out_w
  //                                     + m ].real << ",";
  //         }
  //         cout << endl;
  //       }
  //       cout << endl;
  //     }
  //     cout << endl;
  // }              
  // cout << endl;







//   cout << "knkcOutOutComp" << endl;
//   for (int i = 0 ; i < in_n ; i++){
//     for (int j = 0 ; j < k_n ; j++){
//       for (int k = 0 ; k < k_c ; k++){
//         for (int l = 0 ; l < tmp_out_h ; l++){
//           for (int m = 0 ; m < tmp_out_w ; m++){
//               cout << knkcOutOutComp[i * k_n * k_c * tmp_out_h * tmp_out_w 
//                                       + j * k_c * tmp_out_h * tmp_out_w
//                                       + k * tmp_out_h * tmp_out_w
//                                       + l * tmp_out_w
//                                       + m ].real << ",";
//           }
//           cout << endl;
//         }
//         cout << endl;
//       }
//       cout << endl;
//     }
//     cout << endl;
//   }              
//   cout << endl;
//
  Check_Error(cudaMemcpy( output_Y, d_output_Y, out_n * out_c * out_h * out_w * sizeof(Real), cudaMemcpyDeviceToHost));


//   for (int n = 0 ; n < out_n ; n++){
//     for (int k = 0 ; k < out_c ; k++){
//         for (int i = 0 ; i < out_h ; i ++){
//             for (int j = 0 ; j < out_w ; j ++){
//                 cout << output_Y[n * out_c * out_h * out_w + k * out_h * out_w + i * out_w + j] << ",";
//             }
//             cout << endl;
//         }
//         cout << endl;
//     }
//     cout << endl;
// }
// cout << endl;
//

 //停止计时
  // Check_Error( cudaEventRecord(stop, 0) );
  // Check_Error( cudaEventSynchronize(start) );
  // Check_Error( cudaEventSynchronize(stop) );
  // float elapsedTime;
  // Check_Error( cudaEventElapsedTime(&elapsedTime,start,stop) );
  // cout << "elapsedTime is " << elapsedTime << " ms:" << endl;
  cudaFree(d_input_X);
  cudaFree(d_input_W);
  cudaFree(d_knkcOutOutComp);
  cudaFree(d_output_Y);
}



 /*
    a gpu version based on cpu serial version
 */
void convfft_2d_gpu_v3(Real* input_X, int in_n, int in_c, int in_d, int in_h, int in_w,
    Real* input_W, int k_n, int k_c, int k_d, int k_h, int k_w,
    int p_h, int p_w,
    int s_h, int s_w,
    int d_h, int d_w,
    Real* output_Y,int out_n, int out_c, int out_d, int out_h, int out_w)
{
 
    /** comments from cuDNN-API

      限制输入矩阵的通道数和kernel的输入通道数一致
      1. in_c must equals to k_c
      2. in_h + 2 * padding height must equal 256 or less
         in_w + 2 * padding width must equal 256 or less
      3. stride of height and width must equal 1
      4. k_h must greater then padding height
         k_w must greater then padding width

    */
    if (k_c != in_c 
      || in_h + 2 * p_h > 256 || in_w + 2 * p_w > 256 
      || s_h != 1 || s_w != 1 
      || k_h <= p_h || k_w <= p_w ){
      throw string("convfft_2d cannot perform convolution because the input size does not match the kernel size!");
    }
  
    // tmp size of output
    size_t tmp_out_h = in_h + k_h - 1;
    size_t tmp_out_w = in_w + k_w - 1;

    // allocate memory for the nchw output
    Comp* knkcOutOutComp = (Comp*) malloc( in_n * k_n * k_c * tmp_out_h * tmp_out_w * sizeof(Comp));
    // allocate memory for the nhw output ,because after accumulate to the first dimension, the 
    // k_c-th dimension will eliminate.
    Comp* knOutOutComp = (Comp*) malloc( in_n * k_n * tmp_out_h * tmp_out_w * sizeof(Comp));
    // malloc temp pointer
    // init temp in and kernel vector
    Comp*  inComp = (Comp*) malloc(in_n * in_c * tmp_out_h * tmp_out_w * sizeof(Comp));
    Comp*  keComp = (Comp*) malloc(k_n * k_c * tmp_out_h * tmp_out_w * sizeof(Comp));
    Comp*  output = (Comp*) malloc((in_n * in_c +  k_n * k_c) * tmp_out_h * tmp_out_w * sizeof(Comp));
    // malloc output 
    Comp* outinComp = (Comp*) malloc(in_n * in_c * tmp_out_h * tmp_out_w * sizeof(Comp));
    Comp* outkeComp = (Comp*) malloc(k_n * k_c * tmp_out_h * tmp_out_w * sizeof(Comp));
    // tmp in comp pointer
    Comp* inOutComp = (Comp*) malloc(in_n * in_c * k_n * tmp_out_h * tmp_out_w * sizeof(Comp));
   

   // for (int i = 0 ; i < k_n * k_c * k_h * k_w; i++){
   //     cout << input_W[i] << ",";
   // }
   //cout << endl;
    // init the incomp and kecomp
    clock_t startTime,endTime;
    startTime = clock(); 
    transReal2CompWithMemset(input_X , output , in_n, in_c, tmp_out_h, tmp_out_w, in_h, in_w);
    transReal2CompWithMemset(input_W , output + in_n * in_c * tmp_out_h * tmp_out_w , k_n, k_c, tmp_out_h, tmp_out_w, k_h, k_w);
    //for (int i = 0 ; i < k_n * k_c * tmp_out_h * tmp_out_w; i++){
      //cout <<(output + in_n * in_c * tmp_out_h * tmp_out_w)[i]<< ",";
  //  }
   // cout << endl;

    endTime = clock();
    cout << "The transReal2CompWithMemset in convfft_gpu_v3 run time is: " <<(double) 1000 * (endTime - startTime) / CLOCKS_PER_SEC << "ms" << endl;


    // init the output on host
    Complex* h_output = (Complex*) malloc((in_n * in_c +  k_n * k_c) * tmp_out_h * tmp_out_w * sizeof(Complex));
    for (int i = 0 ; i < (in_n * in_c +  k_n * k_c) * tmp_out_h * tmp_out_w; i++){
        h_output[i] = Complex::GetComplex(output[i].real(),output[i].imag());
    }
    // memory copy from host to device 
    Complex* d_output;
    Check_Error(cudaMalloc(&d_output, (in_n * in_c + k_n * k_c ) * tmp_out_h * tmp_out_w * sizeof(Complex)));
    Check_Error(cudaMemcpy(d_output,h_output, (in_n * in_c + k_n * k_c ) * tmp_out_h * tmp_out_w * sizeof(Complex), cudaMemcpyHostToDevice));
    startTime = clock(); 
    cfft_plan plan_h = make_cfft_plan (tmp_out_h);
    cfft_plan plan_w = make_cfft_plan (tmp_out_w);
    endTime = clock();
    cout << "The make cfft plan in convfft_gpu_v3 run time is: " <<(double) 1000 * (endTime - startTime) / CLOCKS_PER_SEC << "ms" << endl;
    // fft the incomp and kecomp
    /*
    for (int i = 0 ; i < in_n ; i++){
	for (int j = 0 ; j < in_c ; j++){
	    int offset = i * in_c * tmp_out_h * tmp_out_w + j * tmp_out_h * tmp_out_w ; 
            fft_cuda_2d_v2(plan_h, plan_w, 1, tmp_out_h, tmp_out_w, inComp + offset , outinComp + offset);
	}
    }

    for (int i = 0 ; i < k_n ; i++){
	for (int j = 0 ; j < k_c ; j++){
	    int offset = i * k_c * tmp_out_h * tmp_out_w + j * tmp_out_h * tmp_out_w  ;
            fft_cuda_2d_v2(plan_h, plan_w, 1, tmp_out_h, tmp_out_w, keComp + offset , outkeComp + offset);
	}
    }
    */
    // invoke the kernel 
    startTime = clock(); 
    int threadsPerBlock = 256;
    int blocksPerGrid = ((in_n * in_c * k_n ) * tmp_out_h * tmp_out_w  + threadsPerBlock - 1) / threadsPerBlock;

    fft_1d_fwd<<<blocksPerGrid, threadsPerBlock>>>(d_output, in_n, in_c, in_d, k_n, k_c, k_d, tmp_out_h,tmp_out_w);
    transpose_matrix<<<blocksPerGrid, threadsPerBlock>>>(d_output,in_n, in_c, in_d, k_n, k_c, k_d, tmp_out_h,tmp_out_w);
    fft_1d_fwd<<<blocksPerGrid, threadsPerBlock>>>(d_output, in_n, in_c, in_d, k_n, k_c, k_d, tmp_out_w, tmp_out_h);
    transpose_matrix<<<blocksPerGrid, threadsPerBlock>>>(d_output, in_n, in_c, in_d, k_n, k_c, k_d, tmp_out_w, tmp_out_h);

    Check_Error(cudaDeviceSynchronize());
    endTime = clock();
    cout << "The fft_cuda_2d in convfft_gpu_v3 run time is: " <<(double) 1000 * (endTime - startTime) / CLOCKS_PER_SEC << "ms" << endl;

    Check_Error(cudaMemcpy( output, d_output, (in_n * in_c + k_n * k_c ) * tmp_out_h * tmp_out_w * sizeof(Complex), cudaMemcpyDeviceToHost));
    Check_Error(cudaDeviceSynchronize());
  
    for (int i = 0 ; i < (in_n * in_c +  k_n * k_c) * tmp_out_h * tmp_out_w;i++){
     if (i < in_n * in_c * tmp_out_h * tmp_out_w){
       outinComp[i] = {output[i].real(),output[i].imag()};  
     } else {
       outkeComp[i - in_n * in_c * tmp_out_h * tmp_out_w] = {output[i].real(),output[i].imag()};  
     }
    }

  // cout << "outinComp" << endl;
  // for (int nn = 0 ; nn < in_n ; nn++){
  //     for (int c_index = 0 ; c_index < k_c ; c_index++){
  //         for (int i = 0 ; i < tmp_out_h ; i++){
  //             for (int j = 0 ; j < tmp_out_w ; j++){
  //      	    cout << outinComp[nn * k_c * tmp_out_h * tmp_out_w + c_index * tmp_out_h * tmp_out_w + i * tmp_out_w + j].real()<<",";
  //             }
  //             cout << endl;
  //         } 
  //         cout << endl;
  //     }
  //     cout << endl;
  //  }
  //  cout << endl;

  // cout << "outkeComp" << endl;
  // for (int nn = 0 ; nn < k_n ; nn++){
  //     for (int c_index = 0 ; c_index < k_c ; c_index++){
  //         for (int i = 0 ; i < tmp_out_h ; i++){
  //             for (int j = 0 ; j < tmp_out_w ; j++){
  //      	    cout << outkeComp[nn * k_c * tmp_out_h * tmp_out_w + c_index * tmp_out_h * tmp_out_w + i * tmp_out_w + j].real()<<",";
  //             }
  //             cout << endl;
  //         } 
  //         cout << endl;
  //     }
  //     cout << endl;
  //  }
  //  cout << endl;

  startTime = clock(); 
    for (int nn = 0 ; nn < in_n ; nn++){
        for (int k_index = 0 ; k_index < k_n ; k_index++){ 
            for (int c_index = 0 ; c_index < k_c ; c_index++){
                // the offset of each feature map
                int input_offset = nn * in_c * tmp_out_h * tmp_out_w + c_index * tmp_out_h * tmp_out_w;
                int filter_offset = k_index * k_c * tmp_out_h * tmp_out_w + c_index * tmp_out_h * tmp_out_w;
                vectorCrossMultip(outinComp + input_offset, outkeComp + filter_offset, inOutComp + nn * k_n * k_c * tmp_out_h * tmp_out_w + k_index * k_c * tmp_out_h * tmp_out_w + c_index* tmp_out_h * tmp_out_w, tmp_out_h * tmp_out_w);
	    }
	}
    }
    endTime = clock();
    cout << "The vectorCrossMultip in convfft_gpu_v3 run time is: " <<(double) 1000 * (endTime - startTime) / CLOCKS_PER_SEC << "ms" << endl;
//   cout << "inOutComp" << endl;
//   for (int nn = 0 ; nn < in_n ; nn++){
//        for (int k_index = 0 ; k_index < k_n ; k_index++){ 
//            for (int c_index = 0 ; c_index < k_c ; c_index++){
//                // the offset of each feature map
//               for (int i = 0 ; i < tmp_out_h ; i++){
//		   for (int j = 0 ; j < tmp_out_w ; j++){
//		       cout << (inOutComp+ nn * k_n * k_c * tmp_out_h * tmp_out_w + k_index * k_c * tmp_out_h * tmp_out_w + c_index* tmp_out_h * tmp_out_w )[i * tmp_out_w + j].real()<<",";
//		   }
//		   cout << endl;
//		}
//
//		   cout << endl;
//	    }
//		   cout << endl;
//	}
//		   cout << endl;
//    }
//		   cout << endl;


    startTime = clock(); 
    // fft 2d inverse
   for (int nn = 0 ; nn < in_n ; nn++){
       for (int k_index = 0 ; k_index < k_n ; k_index++){ 
           for (int c_index = 0 ; c_index < k_c ; c_index++){
               // the offset of each feature map
               fft_cuda_2d_v2(plan_h, plan_w, 0, tmp_out_h, tmp_out_w, 
     			inOutComp+ nn * k_n * k_c * tmp_out_h * tmp_out_w + k_index * k_c * tmp_out_h * tmp_out_w + c_index* tmp_out_h * tmp_out_w, 
     			knkcOutOutComp + nn * k_n * k_c * tmp_out_h * tmp_out_w + k_index * k_c * tmp_out_h * tmp_out_w + c_index* tmp_out_h * tmp_out_w );
         }
     }
   }

    // destory the plan
    destroy_cfft_plan (plan_h);
    destroy_cfft_plan (plan_w);

//    Complex* h_inOutComp = (Complex*) malloc(in_n * in_c * k_n * tmp_out_h * tmp_out_w * sizeof(Complex));
//    for (int i = 0 ; i < in_n * in_c * k_n * tmp_out_h * tmp_out_w; i++){
//        h_inOutComp[i] = Complex::GetComplex(inOutComp[i].real(),inOutComp[i].imag());
//    }
//
//    Complex* d_knkcOutOutComp ;
//    Check_Error(cudaMalloc(&d_knkcOutOutComp, in_n * in_c * k_n * tmp_out_h * tmp_out_w* sizeof(Complex)));
//    Check_Error(cudaMemcpy(d_knkcOutOutComp,h_inOutComp, in_n * in_c * k_n * tmp_out_h * tmp_out_w * sizeof(Complex), cudaMemcpyHostToDevice));
//
//   fft_1d_bwd<<<blocksPerGrid, threadsPerBlock>>>(d_knkcOutOutComp, in_n * k_n, in_c, in_d, tmp_out_h,tmp_out_w);
//   transpose_matrix<<<blocksPerGrid, threadsPerBlock>>>(d_knkcOutOutComp, in_n * k_n, in_c, in_d, tmp_out_h,tmp_out_w);
//   fft_1d_bwd<<<blocksPerGrid, threadsPerBlock>>>(d_knkcOutOutComp, in_n * k_n, in_c, in_d,tmp_out_w, tmp_out_h);
//   transpose_matrix<<<blocksPerGrid, threadsPerBlock>>>(d_knkcOutOutComp, in_n * k_n, in_c, in_d, tmp_out_w, tmp_out_h);
//
//  Check_Error(cudaMemcpy( h_inOutComp,d_knkcOutOutComp, in_n * in_c * k_n * tmp_out_h * tmp_out_w * sizeof(Complex), cudaMemcpyDeviceToHost));
//  Check_Error(cudaDeviceSynchronize());
//  for (int i = 0 ; i < in_n * in_c * k_n * tmp_out_h * tmp_out_w ; i++){
//       knkcOutOutComp[i] = {h_inOutComp[i].real,h_inOutComp[i].imag};  
//  }

//   cout << "knkcOutOutComp" << endl;
//   for (int nn = 0 ; nn < in_n ; nn++){
//        for (int k_index = 0 ; k_index < k_n ; k_index++){ 
//            for (int c_index = 0 ; c_index < k_c ; c_index++){
//                // the offset of each feature map
//               for (int i = 0 ; i < tmp_out_h ; i++){
//		   for (int j = 0 ; j < tmp_out_w ; j++){
//		       cout << (knkcOutOutComp+ nn * k_n * k_c * tmp_out_h * tmp_out_w + k_index * k_c * tmp_out_h * tmp_out_w + c_index* tmp_out_h * tmp_out_w )[i * tmp_out_w + j].real()<<",";
//		   }
//		   cout << endl;
//		}
//
//		   cout << endl;
//	    }
//		   cout << endl;
//	}
//		   cout << endl;
//    }
//		   cout << endl;
//

    endTime = clock();
    cout << "The fft_cuda_2d inverse in convfft_gpu_v3 run time is: " <<(double) 1000 * (endTime - startTime) / CLOCKS_PER_SEC << "ms" << endl;
    startTime = clock(); 

   for (int nn = 0 ; nn < in_n ; nn++){
        int n_kn_kc_out_offset = nn * k_n * k_c * tmp_out_h * tmp_out_w;
        // the output channel of kernel filter
        for (int k_index = 0 ; k_index < k_n ; k_index++){ 
            int tmp_out_kn_offset = n_kn_kc_out_offset + k_index * k_c * tmp_out_h * tmp_out_w;
            int out_kn_offset = nn * k_n * tmp_out_h * tmp_out_w + k_index * tmp_out_h * tmp_out_w;
            matrixAddition(knkcOutOutComp + tmp_out_kn_offset , k_c, tmp_out_h, tmp_out_w);
            eliminateInputChannel(knkcOutOutComp + tmp_out_kn_offset, knOutOutComp + out_kn_offset, tmp_out_h, tmp_out_w);
        }


    }
    endTime = clock();
    cout << "The matrixAddition in convfft_gpu_v3 run time is: " <<(double) 1000 * (endTime - startTime) / CLOCKS_PER_SEC << "ms" << endl;
    startTime = clock(); 

  
    // 对输出结果进行裁边处理
    Comp* outOutTrimComp = (Comp*) malloc(out_n * out_c * out_h * out_w * sizeof(Comp));
    matrixTrimPadding(knOutOutComp, outOutTrimComp, out_n, out_c, tmp_out_h, tmp_out_w, out_h, out_w);
    // transform complex to real
    transComp2Real(outOutTrimComp, output_Y, out_n * out_c * out_h * out_w);
    endTime = clock();
    cout << "The matrixTrimPadding and transreal in convfft_gpu_v3 run time is: " <<(double) 1000 * (endTime - startTime) / CLOCKS_PER_SEC << "ms" << endl;
    startTime = clock(); 

}

/*
   a version which 
   merge the efficient Stockham FFT algorithm 
   into the v2 convfft process!
*/
void convfft_2d_gpu_v4(Real* input_X, int in_n, int in_c, int in_d, int in_h, int in_w,
  Real* input_W, int k_n, int k_c, int k_d, int k_h, int k_w,
  int p_h, int p_w,
  int s_h, int s_w,
  int d_h, int d_w,
  Real* output_Y,int out_n, int out_c, int out_d, int out_h, int out_w)
{

  /** comments from cuDNN-API

    限制输入矩阵的通道数和kernel的输入通道数一致
    1. in_c must equals to k_c
    2. in_h + 2 * padding height must equal 256 or less
       in_w + 2 * padding width must equal 256 or less
    3. stride of height and width must equal 1
    4. k_h must greater then padding height
       k_w must greater then padding width

  */
  if (k_c != in_c 
    || in_h + 2 * p_h > 256 || in_w + 2 * p_w > 256 
    || s_h != 1 || s_w != 1 
    || k_h <= p_h || k_w <= p_w ){
    throw string("convfft_2d cannot perform convolution because the input size does not match the kernel size!");
  }
  // tmp size of output
  size_t tmp_out_h_ori = in_h + k_h - 1;
  size_t tmp_out_w_ori = in_w + k_w - 1;

  // the size of h and w need to satisfy the power of 2
  size_t tmp_out_h = pow(R2,((int)log2(tmp_out_h_ori - 1) + 1)); 
  size_t tmp_out_w = pow(R2,((int)log2(tmp_out_w_ori - 1) + 1)); 
  cout <<  "the length of tmp h is " << tmp_out_h << endl;
  cout <<  "the length of tmp w is " << tmp_out_w << endl;
  
  // allocate memory for the nchw output
  // 开始计时
  // cudaEvent_t start, stop;
	// Check_Error( cudaEventCreate(&start) );
	// Check_Error( cudaEventCreate(&stop) );
	// Check_Error( cudaEventRecord(start, 0) ); 
  // memory alloc
  Real* d_input_X ;
  Check_Error(cudaMalloc(&d_input_X, in_n * in_c * in_d * in_h * in_w * sizeof(Real)));
  Real* d_input_W ;
  Check_Error(cudaMalloc(&d_input_W, k_n * k_c * k_d * k_h * k_w * sizeof(Real)));

  Complex* d_knkcOutOutComp ;
  Check_Error(cudaMalloc(&d_knkcOutOutComp, in_n * k_n * k_c * tmp_out_h * tmp_out_w* sizeof(Complex)));
  Complex* d_knkcOutOutComp_output ;
  Check_Error(cudaMalloc(&d_knkcOutOutComp_output, in_n * k_n * k_c * tmp_out_h * tmp_out_w* sizeof(Complex)));

  Real* d_output_Y;
  Check_Error(cudaMalloc(&d_output_Y, out_n * out_c * out_h * out_w * sizeof(Real)));
  //malloc a vector contail both vectorX and vectorW
  Complex* d_input;
  Check_Error(cudaMalloc(&d_input, (in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w * sizeof(Complex)));
  Complex* d_output;
  Check_Error(cudaMalloc(&d_output, (in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w * sizeof(Complex)));
  Check_Error(cudaMemset(d_output, 0.0, (in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w * sizeof(Complex)));
  int output_X_length = in_n * in_c * in_d * tmp_out_h * tmp_out_w;
  int output_W_length = k_n * k_c * k_d * tmp_out_h * tmp_out_w;
  // memory copy host to device 
  Check_Error(cudaMemcpy(d_input_X,input_X, in_n * in_c * in_d * in_h * in_w * sizeof(Real), cudaMemcpyHostToDevice));
  Check_Error(cudaMemcpy(d_input_W,input_W, k_n * k_c * k_d * k_h * k_w * sizeof(Real), cudaMemcpyHostToDevice));
  // invoke the kernel 
  int threadsPerBlock = 256;
  int blocksPerGrid = ((in_n * in_c * k_n ) * tmp_out_h * tmp_out_w  + threadsPerBlock - 1) / threadsPerBlock;
  int sharedMemSize = in_n * k_n * k_c * tmp_out_h * tmp_out_w * sizeof(Complex);


  clock_t startTime,endTime;
  startTime = clock();

  init_comp<<<blocksPerGrid, threadsPerBlock>>>(d_input_X, in_n, in_c, in_d, in_h, in_w, tmp_out_h, tmp_out_w, d_input);
  init_comp<<<blocksPerGrid, threadsPerBlock>>>(d_input_W, k_n, k_c, k_d, k_h, k_w,     tmp_out_h, tmp_out_w, d_input + output_X_length);


  fft_1d_fwd_stockham(d_input, d_output, in_n, in_c, in_d, k_n, k_c, k_d, tmp_out_h,tmp_out_w,(in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w);
  transpose_matrix_v2(d_output,in_n, in_c, in_d, k_n, k_c, k_d, tmp_out_h,tmp_out_w);
  fft_1d_fwd_stockham(d_output, d_input, in_n, in_c, in_d, k_n, k_c, k_d, tmp_out_w,tmp_out_h,(in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w);
  transpose_matrix_v2(d_input, in_n, in_c, in_d, k_n, k_c, k_d, tmp_out_w, tmp_out_h);
  
  hadamard_product<<<blocksPerGrid, threadsPerBlock>>>(d_input, d_input + output_X_length,in_n,in_c,k_n,tmp_out_h,tmp_out_w,d_knkcOutOutComp);

  fft_1d_bwd_stockham(d_knkcOutOutComp, d_knkcOutOutComp_output, in_n * k_n, in_c, in_d, tmp_out_h, tmp_out_w, in_n * k_n * in_c * tmp_out_h * tmp_out_w);
  transpose_matrix_v2(d_knkcOutOutComp_output, in_n * k_n, in_c, in_d, tmp_out_h,tmp_out_w);
  fft_1d_bwd_stockham(d_knkcOutOutComp_output, d_knkcOutOutComp, in_n * k_n, in_c, in_d, tmp_out_w, tmp_out_h,(in_n * k_n * in_c) * tmp_out_h * tmp_out_w);
  transpose_matrix_v2(d_knkcOutOutComp, in_n * k_n, in_c, in_d,tmp_out_w, tmp_out_h);
  
  trim_matrix<<<blocksPerGrid, threadsPerBlock>>>(d_knkcOutOutComp,in_n,in_c,k_n,out_h,out_w,tmp_out_h,tmp_out_w,tmp_out_h_ori,tmp_out_w_ori,d_output_Y);


  Check_Error(cudaMemcpy( output_Y, d_output_Y, out_n * out_c * out_h * out_w * sizeof(Real), cudaMemcpyDeviceToHost));


  cudaFree(d_input_X);
  cudaFree(d_input_W);
  cudaFree(d_knkcOutOutComp);
  cudaFree(d_knkcOutOutComp_output);
  cudaFree(d_output_Y);
  cudaFree(d_input);
  cudaFree(d_output);


}
/*
   a version which do not have the matrix transpose kernel
   
   replace it with the "_wt" funcion
    
   migrate the "_wt" funtion into this funcion 
*/
void convfft_2d_gpu_v5(Real* input_X, int in_n, int in_c, int in_d, int in_h, int in_w,
  Real* input_W, int k_n, int k_c, int k_d, int k_h, int k_w,
  int p_h, int p_w,
  int s_h, int s_w,
  int d_h, int d_w,
  Real* output_Y,int out_n, int out_c, int out_d, int out_h, int out_w)
{

  /** comments from cuDNN-API

    限制输入矩阵的通道数和kernel的输入通道数一致
    1. in_c must equals to k_c
    2. in_h + 2 * padding height must equal 256 or less
       in_w + 2 * padding width must equal 256 or less
    3. stride of height and width must equal 1
    4. k_h must greater then padding height
       k_w must greater then padding width

  */
  if (k_c != in_c 
    || in_h + 2 * p_h > 256 || in_w + 2 * p_w > 256 
    || s_h != 1 || s_w != 1 
    || k_h <= p_h || k_w <= p_w ){
    throw string("convfft_2d cannot perform convolution because the input size does not match the kernel size!");
  }
  // tmp size of output
  size_t tmp_out_h_ori = in_h + k_h - 1;
  size_t tmp_out_w_ori = in_w + k_w - 1;

  // the size of h and w need to satisfy the power of 2
  size_t tmp_out_h = pow(R2,((int)log2(tmp_out_h_ori - 1) + 1)); 
  size_t tmp_out_w = pow(R2,((int)log2(tmp_out_w_ori - 1) + 1)); 
  cout <<  "the length of tmp h is " << tmp_out_h << endl;
  cout <<  "the length of tmp w is " << tmp_out_w << endl;
  
  // allocate memory for the nchw output
  // 开始计时
  // cudaEvent_t start, stop;
	// Check_Error( cudaEventCreate(&start) );
	// Check_Error( cudaEventCreate(&stop) );
	// Check_Error( cudaEventRecord(start, 0) ); 
  // memory alloc
  Real* d_input_X ;
  Check_Error(cudaMalloc(&d_input_X, in_n * in_c * in_d * in_h * in_w * sizeof(Real)));
  Real* d_input_W ;
  Check_Error(cudaMalloc(&d_input_W, k_n * k_c * k_d * k_h * k_w * sizeof(Real)));

  Complex* d_knkcOutOutComp ;
  Check_Error(cudaMalloc(&d_knkcOutOutComp, in_n * k_n * k_c * tmp_out_h * tmp_out_w* sizeof(Complex)));
  Complex* d_knkcOutOutComp_output ;
  Check_Error(cudaMalloc(&d_knkcOutOutComp_output, in_n * k_n * k_c * tmp_out_h * tmp_out_w* sizeof(Complex)));

  Real* d_output_Y;
  Check_Error(cudaMalloc(&d_output_Y, out_n * out_c * out_h * out_w * sizeof(Real)));
  //malloc a vector contail both vectorX and vectorW
  Complex* d_input;
  Check_Error(cudaMalloc(&d_input, (in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w * sizeof(Complex)));
  Complex* d_output;
  Check_Error(cudaMalloc(&d_output, (in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w * sizeof(Complex)));
  Check_Error(cudaMemset(d_output, 0.0, (in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w * sizeof(Complex)));
  int output_X_length = in_n * in_c * in_d * tmp_out_h * tmp_out_w;
  int output_W_length = k_n * k_c * k_d * tmp_out_h * tmp_out_w;
  // memory copy host to device 
  Check_Error(cudaMemcpy(d_input_X,input_X, in_n * in_c * in_d * in_h * in_w * sizeof(Real), cudaMemcpyHostToDevice));
  Check_Error(cudaMemcpy(d_input_W,input_W, k_n * k_c * k_d * k_h * k_w * sizeof(Real), cudaMemcpyHostToDevice));
  // invoke the kernel 
  int threadsPerBlock = 256;
  int blocksPerGrid = ((in_n * in_c * k_n ) * tmp_out_h * tmp_out_w  + threadsPerBlock - 1) / threadsPerBlock;
  int sharedMemSize = in_n * k_n * k_c * tmp_out_h * tmp_out_w * sizeof(Complex);

  clock_t startTime,endTime;
  startTime = clock();

  init_comp<<<blocksPerGrid, threadsPerBlock>>>(d_input_X, in_n, in_c, in_d, in_h, in_w, tmp_out_h, tmp_out_w, d_input);
  init_comp<<<blocksPerGrid, threadsPerBlock>>>(d_input_W, k_n, k_c, k_d, k_h, k_w,     tmp_out_h, tmp_out_w, d_input + output_X_length);

  fft_1d_fwd_stockham(d_input, d_output, in_n, in_c, in_d, k_n, k_c, k_d, tmp_out_h,tmp_out_w,(in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w);
  fft_1d_fwd_stockham_wt(d_output, d_input, in_n, in_c, in_d, k_n, k_c, k_d, tmp_out_h,tmp_out_w,(in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w);
  
  hadamard_product<<<blocksPerGrid, threadsPerBlock>>>(d_input, d_input + output_X_length,in_n,in_c,k_n,tmp_out_h,tmp_out_w,d_knkcOutOutComp);

  fft_1d_bwd_stockham(d_knkcOutOutComp, d_knkcOutOutComp_output, in_n * k_n, in_c, in_d, tmp_out_h, tmp_out_w, in_n * k_n * in_c * tmp_out_h * tmp_out_w);
  fft_1d_bwd_stockham_wt(d_knkcOutOutComp_output, d_knkcOutOutComp, in_n * k_n, in_c, in_d, tmp_out_h, tmp_out_w,(in_n * k_n * in_c) * tmp_out_h * tmp_out_w);
  
  trim_matrix<<<blocksPerGrid, threadsPerBlock>>>(d_knkcOutOutComp,in_n,in_c,k_n,out_h,out_w,tmp_out_h,tmp_out_w,tmp_out_h_ori,tmp_out_w_ori,d_output_Y);


  Check_Error(cudaMemcpy( output_Y, d_output_Y, out_n * out_c * out_h * out_w * sizeof(Real), cudaMemcpyDeviceToHost));


  cudaFree(d_input_X);
  cudaFree(d_input_W);
  cudaFree(d_knkcOutOutComp);
  cudaFree(d_knkcOutOutComp_output);
  cudaFree(d_output_Y);
  cudaFree(d_input);
  cudaFree(d_output);
}
/*
   a version which do not have the Copy_X_x kernel 
   
   replace it with shared memory 
    
   migrate version22 into this funcion 
*/
void convfft_2d_gpu_v6(Real* input_X, int in_n, int in_c, int in_d, int in_h, int in_w,
  Real* input_W, int k_n, int k_c, int k_d, int k_h, int k_w,
  int p_h, int p_w,
  int s_h, int s_w,
  int d_h, int d_w,
  Real* output_Y,int out_n, int out_c, int out_d, int out_h, int out_w)
{

  /** comments from cuDNN-API

    限制输入矩阵的通道数和kernel的输入通道数一致
    1. in_c must equals to k_c
    2. in_h + 2 * padding height must equal 256 or less
       in_w + 2 * padding width must equal 256 or less
    3. stride of height and width must equal 1
    4. k_h must greater then padding height
       k_w must greater then padding width

  */
  if (k_c != in_c 
    || in_h + 2 * p_h > 256 || in_w + 2 * p_w > 256 
    || s_h != 1 || s_w != 1 
    || k_h <= p_h || k_w <= p_w ){
    throw string("convfft_2d cannot perform convolution because the input size does not match the kernel size!");
  }
  // tmp size of output
  size_t tmp_out_h_ori = in_h + k_h - 1;
  size_t tmp_out_w_ori = in_w + k_w - 1;

  // the size of h and w need to satisfy the power of 2
  size_t tmp_out_h = pow(R2,((int)log2(tmp_out_h_ori - 1) + 1)); 
  size_t tmp_out_w = pow(R2,((int)log2(tmp_out_w_ori - 1) + 1)); 
  cout <<  "the length of tmp h is " << tmp_out_h << endl;
  cout <<  "the length of tmp w is " << tmp_out_w << endl;
  
  // allocate memory for the nchw output
  // 开始计时
  // cudaEvent_t start, stop;
	// Check_Error( cudaEventCreate(&start) );
	// Check_Error( cudaEventCreate(&stop) );
	// Check_Error( cudaEventRecord(start, 0) ); 
  // memory alloc
  Real* d_input_X ;
  Check_Error(cudaMalloc(&d_input_X, in_n * in_c * in_d * in_h * in_w * sizeof(Real)));
  Real* d_input_W ;
  Check_Error(cudaMalloc(&d_input_W, k_n * k_c * k_d * k_h * k_w * sizeof(Real)));

  Complex* d_knkcOutOutComp ;
  Check_Error(cudaMalloc(&d_knkcOutOutComp, in_n * k_n * k_c * tmp_out_h * tmp_out_w* sizeof(Complex)));
  Complex* d_knkcOutOutComp_output ;
  Check_Error(cudaMalloc(&d_knkcOutOutComp_output, in_n * k_n * k_c * tmp_out_h * tmp_out_w* sizeof(Complex)));

  Real* d_output_Y;
  Check_Error(cudaMalloc(&d_output_Y, out_n * out_c * out_h * out_w * sizeof(Real)));
  //malloc a vector contail both vectorX and vectorW
  Complex* d_input;
  Check_Error(cudaMalloc(&d_input, (in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w * sizeof(Complex)));
  Complex* d_output;
  Check_Error(cudaMalloc(&d_output, (in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w * sizeof(Complex)));
  Check_Error(cudaMemset(d_output, 0.0, (in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w * sizeof(Complex)));
  int output_X_length = in_n * in_c * in_d * tmp_out_h * tmp_out_w;
  int output_W_length = k_n * k_c * k_d * tmp_out_h * tmp_out_w;
  // memory copy host to device 
  Check_Error(cudaMemcpy(d_input_X,input_X, in_n * in_c * in_d * in_h * in_w * sizeof(Real), cudaMemcpyHostToDevice));
  Check_Error(cudaMemcpy(d_input_W,input_W, k_n * k_c * k_d * k_h * k_w * sizeof(Real), cudaMemcpyHostToDevice));
  // invoke the kernel 
  int threadsPerBlock = 256;
  int blocksPerGrid = ((in_n * in_c * k_n ) * tmp_out_h * tmp_out_w  + threadsPerBlock - 1) / threadsPerBlock;
  int sharedMemSize = in_n * k_n * k_c * tmp_out_h * tmp_out_w * sizeof(Complex);

  clock_t startTime,endTime;
  startTime = clock();

  init_comp<<<blocksPerGrid, threadsPerBlock>>>(d_input_X, in_n, in_c, in_d, in_h, in_w, tmp_out_h, tmp_out_w, d_input);
  init_comp<<<blocksPerGrid, threadsPerBlock>>>(d_input_W, k_n, k_c, k_d, k_h, k_w,     tmp_out_h, tmp_out_w, d_input + output_X_length);

  fft_1d_fwd_stockham_sm(d_input, d_output, in_n, in_c, in_d, k_n, k_c, k_d, tmp_out_h,tmp_out_w,(in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w);
  fft_1d_fwd_stockham_wt_sm(d_input, d_input, in_n, in_c, in_d, k_n, k_c, k_d, tmp_out_h,tmp_out_w,(in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w);
  
  hadamard_product<<<blocksPerGrid, threadsPerBlock>>>(d_input, d_input + output_X_length,in_n,in_c,k_n,tmp_out_h,tmp_out_w,d_knkcOutOutComp);

  fft_1d_bwd_stockham_sm(d_knkcOutOutComp, d_knkcOutOutComp_output, in_n * k_n, in_c, in_d, tmp_out_h, tmp_out_w, in_n * k_n * in_c * tmp_out_h * tmp_out_w);
  fft_1d_bwd_stockham_wt_sm(d_knkcOutOutComp, d_knkcOutOutComp_output, in_n * k_n, in_c, in_d, tmp_out_h, tmp_out_w,(in_n * k_n * in_c) * tmp_out_h * tmp_out_w);
  
  trim_matrix<<<blocksPerGrid, threadsPerBlock>>>(d_knkcOutOutComp,in_n,in_c,k_n,out_h,out_w,tmp_out_h,tmp_out_w,tmp_out_h_ori,tmp_out_w_ori,d_output_Y);


  Check_Error(cudaMemcpy( output_Y, d_output_Y, out_n * out_c * out_h * out_w * sizeof(Real), cudaMemcpyDeviceToHost));


  cudaFree(d_input_X);
  cudaFree(d_input_W);
  cudaFree(d_knkcOutOutComp);
  cudaFree(d_knkcOutOutComp_output);
  cudaFree(d_output_Y);
  cudaFree(d_input);
  cudaFree(d_output);
}
/*
   a version which do not have the Copy_X_x kernel 
   
   replace it by change the postion of input and output
    
   migrate version23 into this funcion 
*/
void convfft_2d_gpu_v7(Real* input_X, int in_n, int in_c, int in_d, int in_h, int in_w,
  Real* input_W, int k_n, int k_c, int k_d, int k_h, int k_w,
  int p_h, int p_w,
  int s_h, int s_w,
  int d_h, int d_w,
  Real* output_Y,int out_n, int out_c, int out_d, int out_h, int out_w)
{

  /** comments from cuDNN-API

    限制输入矩阵的通道数和kernel的输入通道数一致
    1. in_c must equals to k_c
    2. in_h + 2 * padding height must equal 256 or less
       in_w + 2 * padding width must equal 256 or less
    3. stride of height and width must equal 1
    4. k_h must greater then padding height
       k_w must greater then padding width

  */
  if (k_c != in_c 
    || in_h + 2 * p_h > 256 || in_w + 2 * p_w > 256 
    || s_h != 1 || s_w != 1 
    || k_h <= p_h || k_w <= p_w ){
    throw string("convfft_2d cannot perform convolution because the input size does not match the kernel size!");
  }
  // tmp size of output
  size_t tmp_out_h_ori = in_h + k_h - 1;
  size_t tmp_out_w_ori = in_w + k_w - 1;

  // the size of h and w need to satisfy the power of 2
  size_t tmp_out_d = 1; 
  size_t tmp_out_h = pow(R2,((int)log2(tmp_out_h_ori - 1) + 1)); 
  size_t tmp_out_w = pow(R2,((int)log2(tmp_out_w_ori - 1) + 1)); 
  
  // allocate memory for the nchw output
  // 开始计时
  // cudaEvent_t start, stop;
	// Check_Error( cudaEventCreate(&start) );
	// Check_Error( cudaEventCreate(&stop) );
	// Check_Error( cudaEventRecord(start, 0) ); 
  // memory alloc
  Real* d_input_X ;
  Check_Error(cudaMalloc(&d_input_X, in_n * in_c * in_d * in_h * in_w * sizeof(Real)));
  Real* d_input_W ;
  Check_Error(cudaMalloc(&d_input_W, k_n * k_c * k_d * k_h * k_w * sizeof(Real)));

  Complex* d_knkcOutOutComp ;
  Check_Error(cudaMalloc(&d_knkcOutOutComp, in_n * k_n * k_c * tmp_out_h * tmp_out_w* sizeof(Complex)));
  Complex* d_knkcOutOutComp_output ;
  Check_Error(cudaMalloc(&d_knkcOutOutComp_output, in_n * k_n * k_c * tmp_out_h * tmp_out_w* sizeof(Complex)));

  Real* d_output_Y;
  Check_Error(cudaMalloc(&d_output_Y, out_n * out_c * out_h * out_w * sizeof(Real)));
  //malloc a vector contail both vectorX and vectorW
  Complex* d_input;
  Check_Error(cudaMalloc(&d_input, (in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w * sizeof(Complex)));
  Complex* d_output;
  Check_Error(cudaMalloc(&d_output, (in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w * sizeof(Complex)));
  Check_Error(cudaMemset(d_output, 0.0, (in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w * sizeof(Complex)));
  int output_X_length = in_n * in_c * in_d * tmp_out_h * tmp_out_w;
  int output_W_length = k_n * k_c * k_d * tmp_out_h * tmp_out_w;
  // memory copy host to device 
  Check_Error(cudaMemcpy(d_input_X,input_X, in_n * in_c * in_d * in_h * in_w * sizeof(Real), cudaMemcpyHostToDevice));
  Check_Error(cudaMemcpy(d_input_W,input_W, k_n * k_c * k_d * k_h * k_w * sizeof(Real), cudaMemcpyHostToDevice));
  // invoke the kernel 
  int threadsPerBlock = 256;
  int blocksPerGrid = ((in_n * in_c * k_n ) * tmp_out_h * tmp_out_w  + threadsPerBlock - 1) / threadsPerBlock;
  int sharedMemSize = in_n * k_n * k_c * tmp_out_h * tmp_out_w * sizeof(Complex);

  clock_t startTime,endTime;
  startTime = clock();

  init_comp<<<blocksPerGrid, threadsPerBlock>>>(d_input_X, in_n, in_c, in_d, in_h, in_w, tmp_out_h, tmp_out_w, d_input);
  init_comp<<<blocksPerGrid, threadsPerBlock>>>(d_input_W, k_n, k_c, k_d, k_h, k_w,     tmp_out_h, tmp_out_w, d_input + output_X_length);

  fft_1d_fwd_stockham_sp(d_input, d_output, in_n, in_c, k_n, k_c, k_d, tmp_out_h,tmp_out_w,(in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w);
  fft_1d_fwd_stockham_wt_sp(d_output, d_input, in_n, in_c, in_d, k_n, k_c, k_d, tmp_out_h,tmp_out_w,(in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_h * tmp_out_w);
  
  hadamard_product<<<blocksPerGrid, threadsPerBlock>>>(d_input, d_input + output_X_length,in_n,in_c,k_n,tmp_out_h,tmp_out_w,d_knkcOutOutComp);

  fft_1d_bwd_stockham_sp(d_knkcOutOutComp, d_knkcOutOutComp_output, in_n * k_n, in_c, in_d, tmp_out_h, tmp_out_w, in_n * k_n * in_c * tmp_out_h * tmp_out_w);
  fft_1d_bwd_stockham_wt_sp(d_knkcOutOutComp_output, d_knkcOutOutComp, in_n * k_n, in_c, in_d, tmp_out_h, tmp_out_w,(in_n * k_n * in_c) * tmp_out_h * tmp_out_w);
  
  trim_matrix<<<blocksPerGrid, threadsPerBlock>>>(d_knkcOutOutComp,in_n,in_c,k_n,out_h,out_w,tmp_out_h,tmp_out_w,tmp_out_h_ori,tmp_out_w_ori,d_output_Y);

  Check_Error(cudaMemcpy( output_Y, d_output_Y, out_n * out_c * out_h * out_w * sizeof(Real), cudaMemcpyDeviceToHost));

  cudaFree(d_input_X);
  cudaFree(d_input_W);
  cudaFree(d_knkcOutOutComp);
  cudaFree(d_knkcOutOutComp_output);
  cudaFree(d_output_Y);
  cudaFree(d_input);
  cudaFree(d_output);
}


void convfft_3d_gpu(Real* input_X, int in_n, int in_c, int in_d, int in_h, int in_w,
  Real* input_W, int k_n, int k_c, int k_d, int k_h, int k_w,
  int p_d, int p_h, int p_w,
  int s_d, int s_h, int s_w,
  int d_d, int d_h, int d_w,
  Real* output_Y,int out_n, int out_c, int out_d, int out_h, int out_w)
{

  /** comments from cuDNN-API

    限制输入矩阵的通道数和kernel的输入通道数一致
    1. in_c must equals to k_c
    2. in_h + 2 * padding height must equal 256 or less
       in_w + 2 * padding width must equal 256 or less
    3. stride of height and width must equal 1
    4. k_h must greater then padding height
       k_w must greater then padding width

  */
  if (k_c != in_c 
    || in_h + 2 * p_h > 256 || in_w + 2 * p_w > 256 
    || s_h != 1 || s_w != 1 
    || k_h <= p_h || k_w <= p_w ){
    cout << "convfft_2d cannot perform convolution because the input size does not match the kernel size!" << endl;
    throw string("convfft_2d cannot perform convolution because the input size does not match the kernel size!");
  }
    
  // tmp size of output
  size_t tmp_out_h_ori = in_h + p_h * 2;
  size_t p_out_w = in_w + p_w * 2;
  size_t tmp_out_w_ori = p_out_w % 2 != 0 ? p_out_w + 1 : p_out_w;
  size_t tmp_out_d_ori = in_d + p_d * 2;

  // the size of h and w need to satisfy the power of 2
  size_t tmp_out_h = pow(R2,((int)log2(tmp_out_h_ori - 1) + 1)); 
  size_t tmp_out_w = pow(R2,((int)log2(tmp_out_w_ori - 1) + 1)); 
  size_t tmp_out_d = pow(R2,((int)log2(tmp_out_d_ori - 1) + 1)); 
  
  
  // allocate memory for the nchw output
  // 开始计时
  // cudaEvent_t start, stop;
	// Check_Error( cudaEventCreate(&start) );
	// Check_Error( cudaEventCreate(&stop) );
	// Check_Error( cudaEventRecord(start, 0) ); 
  // memory alloc
  Real* d_input_X ;
  Check_Error(cudaMalloc(&d_input_X, in_n * in_c * in_d * in_h * in_w * sizeof(Real)));
  Real* d_input_W ;
  Check_Error(cudaMalloc(&d_input_W, k_n * k_c * k_d * k_h * k_w * sizeof(Real)));

  Complex* d_knkcOutOutComp ;
  Check_Error(cudaMalloc(&d_knkcOutOutComp, in_n * k_n * k_c * tmp_out_d * tmp_out_h * tmp_out_w * sizeof(Complex)));
  Complex* d_knkcOutOutComp_output ;
  Check_Error(cudaMalloc(&d_knkcOutOutComp_output, in_n * k_n * k_c * tmp_out_d * tmp_out_h * tmp_out_w* sizeof(Complex)));

  Real* d_output_Y;
  Check_Error(cudaMalloc(&d_output_Y, out_n * out_c * out_d * out_h * out_w * sizeof(Real)));
  //malloc a vector contail both vectorX and vectorW
  Complex* d_input;
  Check_Error(cudaMalloc(&d_input, (in_n * in_c + k_n * k_c) * tmp_out_d * tmp_out_h * tmp_out_w * sizeof(Complex)));
  Complex* d_output;
  Check_Error(cudaMalloc(&d_output, (in_n * in_c + k_n * k_c) * tmp_out_d * tmp_out_h * tmp_out_w * sizeof(Complex)));
  Check_Error(cudaMemset(d_output, 0.0, (in_n * in_c + k_n * k_c) * tmp_out_d * tmp_out_h * tmp_out_w * sizeof(Complex)));
  int output_X_length = in_n * in_c * tmp_out_d * tmp_out_h * tmp_out_w;
  // memory copy host to device 
  Check_Error(cudaMemcpy(d_input_X,input_X, in_n * in_c * in_d * in_h * in_w * sizeof(Real), cudaMemcpyHostToDevice));
  Check_Error(cudaMemcpy(d_input_W,input_W, k_n * k_c * k_d * k_h * k_w * sizeof(Real), cudaMemcpyHostToDevice));
  // invoke the kernel 
  int threadsPerBlock = 256;
  int blocksPerGrid = ((in_n * in_c * k_n ) * tmp_out_d * tmp_out_h * tmp_out_w  + threadsPerBlock - 1) / threadsPerBlock;
  int sharedMemSize = in_n * k_n * k_c * tmp_out_h * tmp_out_w * sizeof(Complex);

  clock_t startTime,endTime;
  startTime = clock();

  init_comp_3d<<<blocksPerGrid, threadsPerBlock>>>(d_input_X, in_n, in_c, in_d, in_h, in_w, p_d, p_h, p_w, tmp_out_d, tmp_out_h, tmp_out_w, d_input);
  init_comp_3d<<<blocksPerGrid, threadsPerBlock>>>(d_input_W, k_n, k_c, k_d, k_h, k_w, 0, 0, 0, tmp_out_d, tmp_out_h, tmp_out_w, d_input + output_X_length);

  fft_1d_fwd_stockham_sp(d_input, d_output, in_n, in_c, k_n, k_c, tmp_out_d, tmp_out_h, tmp_out_w, (in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_d * tmp_out_h * tmp_out_w);
  

  swap_matrix_axes(d_output, d_input,  in_n, in_c, k_n, k_c, tmp_out_d, tmp_out_h, tmp_out_w, 2, 3);
  fft_1d_fwd_stockham_sp(d_input, d_output, in_n, in_c, k_n, k_c, tmp_out_d, tmp_out_w, tmp_out_h,(in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_d * tmp_out_h * tmp_out_w);
  swap_matrix_axes( d_output, d_input, in_n, in_c, k_n, k_c, tmp_out_d, tmp_out_w, tmp_out_h, 2, 3);

  swap_matrix_axes(d_input, d_output, in_n, in_c, k_n, k_c, tmp_out_d, tmp_out_h, tmp_out_w, 1, 3);
  fft_1d_fwd_stockham_sp(d_output, d_input,in_n, in_c, k_n, k_c, tmp_out_w,tmp_out_h,tmp_out_d, (in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_d * tmp_out_h * tmp_out_w);
  swap_matrix_axes(d_input, d_output, in_n, in_c, k_n, k_c, tmp_out_w, tmp_out_h, tmp_out_d, 1, 3);

  hadamard_product_3d<<<blocksPerGrid, threadsPerBlock>>>(d_output, d_output + output_X_length, in_n, in_c, k_n, tmp_out_d, tmp_out_h, tmp_out_w, d_knkcOutOutComp);

  fft_1d_bwd_stockham_sp(d_knkcOutOutComp, d_knkcOutOutComp_output, in_n * k_n, in_c, tmp_out_d, tmp_out_h, tmp_out_w, in_n * k_n * in_c * tmp_out_d * tmp_out_h * tmp_out_w);
  
  swap_matrix_axes(d_knkcOutOutComp_output, d_knkcOutOutComp, in_n * k_n, in_c, tmp_out_d, tmp_out_h, tmp_out_w, 2, 3);
  fft_1d_bwd_stockham_sp(d_knkcOutOutComp, d_knkcOutOutComp_output, in_n * k_n, in_c, tmp_out_d, tmp_out_w, tmp_out_h, in_n * k_n * in_c * tmp_out_d * tmp_out_h * tmp_out_w);
  swap_matrix_axes(d_knkcOutOutComp_output, d_knkcOutOutComp, in_n * k_n, in_c, tmp_out_d, tmp_out_w, tmp_out_h, 2, 3);

  swap_matrix_axes(d_knkcOutOutComp, d_knkcOutOutComp_output, in_n * k_n, in_c, tmp_out_d, tmp_out_h, tmp_out_w, 1, 3);
  fft_1d_bwd_stockham_sp(d_knkcOutOutComp_output, d_knkcOutOutComp, in_n * k_n, in_c, tmp_out_w, tmp_out_h, tmp_out_d, in_n * k_n * in_c * tmp_out_d * tmp_out_h * tmp_out_w);
  swap_matrix_axes(d_knkcOutOutComp, d_knkcOutOutComp_output, in_n * k_n, in_c, tmp_out_w, tmp_out_h, tmp_out_d, 1, 3);

  
  trim_matrix_3d<<<blocksPerGrid, threadsPerBlock>>>(d_knkcOutOutComp_output,in_n,in_c,k_n,out_d,out_h,out_w,tmp_out_d,tmp_out_h,tmp_out_w,tmp_out_d_ori,tmp_out_h_ori,tmp_out_w_ori,d_output_Y);
  Check_Error(cudaMemcpy( output_Y, d_output_Y, out_n * out_d * out_c * out_h * out_w * sizeof(Real), cudaMemcpyDeviceToHost));


  cudaFree(d_input_X);
  cudaFree(d_input_W);
  cudaFree(d_knkcOutOutComp);
  cudaFree(d_knkcOutOutComp_output);
  cudaFree(d_output_Y);
  cudaFree(d_input);
  cudaFree(d_output);
}


void convfft_3d_4backdata_gpu(Real* input_X, int in_n, int in_c, int in_d, int in_h, int in_w,
  Real* input_W, int k_n, int k_c, int k_d, int k_h, int k_w,
  int p_d, int p_h, int p_w,
  int s_d, int s_h, int s_w,
  int d_d, int d_h, int d_w,
  Real* output_Y,int out_n, int out_c, int out_d, int out_h, int out_w)
{

  /** comments from cuDNN-API

    限制输入矩阵的通道数和kernel的输入通道数一致
    1. in_c must equals to k_c
    2. in_h + 2 * padding height must equal 256 or less
       in_w + 2 * padding width must equal 256 or less
    3. stride of height and width must equal 1
    4. k_h must greater then padding height
       k_w must greater then padding width

  */
  if (k_c != in_c 
    || in_h + 2 * p_h > 256 || in_w + 2 * p_w > 256 
    || s_h != 1 || s_w != 1 
    || k_h <= p_h || k_w <= p_w ){
    cout << "convfft_2d cannot perform convolution because the input size does not match the kernel size!" << endl;
    throw string("convfft_2d cannot perform convolution because the input size does not match the kernel size!");
  }
    
  // tmp size of output
  size_t tmp_out_h_ori = in_h + p_h * 2;
  size_t p_out_w = in_w + p_w * 2;
  size_t tmp_out_w_ori = p_out_w % 2 != 0 ? p_out_w + 1 : p_out_w;
  size_t tmp_out_d_ori = in_d + p_d * 2;

  // the size of h and w need to satisfy the power of 2
  size_t tmp_out_h = pow(R2,((int)log2(tmp_out_h_ori - 1) + 1)); 
  size_t tmp_out_w = pow(R2,((int)log2(tmp_out_w_ori - 1) + 1)); 
  size_t tmp_out_d = pow(R2,((int)log2(tmp_out_d_ori - 1) + 1)); 
  
  cout <<  "the length of tmp h is " << tmp_out_h << endl;
  cout <<  "the length of tmp w is " << tmp_out_w << endl;
  cout <<  "the length of tmp d is " << tmp_out_d << endl;
  
  // allocate memory for the nchw output
  // 开始计时
  // cudaEvent_t start, stop;
	// Check_Error( cudaEventCreate(&start) );
	// Check_Error( cudaEventCreate(&stop) );
	// Check_Error( cudaEventRecord(start, 0) ); 
  // memory alloc
  Real* d_input_X ;
  Check_Error(cudaMalloc(&d_input_X, in_n * in_c * in_d * in_h * in_w * sizeof(Real)));
  Real* d_input_W ;
  Check_Error(cudaMalloc(&d_input_W, k_n * k_c * k_d * k_h * k_w * sizeof(Real)));

  Complex* d_knkcOutOutComp ;
  Check_Error(cudaMalloc(&d_knkcOutOutComp, in_n * k_n * k_c * tmp_out_d * tmp_out_h * tmp_out_w * sizeof(Complex)));
  Complex* d_knkcOutOutComp_output ;
  Check_Error(cudaMalloc(&d_knkcOutOutComp_output, in_n * k_n * k_c * tmp_out_d * tmp_out_h * tmp_out_w* sizeof(Complex)));

  Real* d_output_Y;
  Check_Error(cudaMalloc(&d_output_Y, out_n * out_c * out_d * out_h * out_w * sizeof(Real)));
  //malloc a vector contail both vectorX and vectorW
  Complex* d_input;
  Check_Error(cudaMalloc(&d_input, (in_n * in_c + k_n * k_c) * tmp_out_d * tmp_out_h * tmp_out_w * sizeof(Complex)));
  Complex* d_output;
  Check_Error(cudaMalloc(&d_output, (in_n * in_c + k_n * k_c) * tmp_out_d * tmp_out_h * tmp_out_w * sizeof(Complex)));
  Check_Error(cudaMemset(d_output, 0.0, (in_n * in_c + k_n * k_c) * tmp_out_d * tmp_out_h * tmp_out_w * sizeof(Complex)));
  int output_X_length = in_n * in_c * tmp_out_d * tmp_out_h * tmp_out_w;
  // memory copy host to device 
  Check_Error(cudaMemcpy(d_input_X,input_X, in_n * in_c * in_d * in_h * in_w * sizeof(Real), cudaMemcpyHostToDevice));
  Check_Error(cudaMemcpy(d_input_W,input_W, k_n * k_c * k_d * k_h * k_w * sizeof(Real), cudaMemcpyHostToDevice));
  // invoke the kernel 
  int threadsPerBlock = 256; int blocksPerGrid = ((in_n * in_c * k_n ) * tmp_out_d * tmp_out_h * tmp_out_w  + threadsPerBlock - 1) / threadsPerBlock;
  int sharedMemSize = in_n * k_n * k_c * tmp_out_h * tmp_out_w * sizeof(Complex);

  clock_t startTime,endTime;
  startTime = clock();

  init_comp_3d<<<blocksPerGrid, threadsPerBlock>>>(d_input_X, in_n, in_c, in_d, in_h, in_w, p_d, p_h, p_w, tmp_out_d, tmp_out_h, tmp_out_w, d_input);
  init_comp_3d<<<blocksPerGrid, threadsPerBlock>>>(d_input_W, k_n, k_c, k_d, k_h, k_w, 0, 0, 0, tmp_out_d, tmp_out_h, tmp_out_w, d_input + output_X_length);

  fft_1d_fwd_stockham_sp(d_input, d_output, in_n, in_c, k_n, k_c, tmp_out_d, tmp_out_h, tmp_out_w, (in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_d * tmp_out_h * tmp_out_w);
  

  swap_matrix_axes(d_output, d_input,  in_n, in_c, k_n, k_c, tmp_out_d, tmp_out_h, tmp_out_w, 2, 3);
  fft_1d_fwd_stockham_sp(d_input, d_output, in_n, in_c, k_n, k_c, tmp_out_d, tmp_out_w, tmp_out_h,(in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_d * tmp_out_h * tmp_out_w);
  swap_matrix_axes( d_output, d_input, in_n, in_c, k_n, k_c, tmp_out_d, tmp_out_w, tmp_out_h, 2, 3);

  swap_matrix_axes(d_input, d_output, in_n, in_c, k_n, k_c, tmp_out_d, tmp_out_h, tmp_out_w, 1, 3);
  fft_1d_fwd_stockham_sp(d_output, d_input,in_n, in_c, k_n, k_c, tmp_out_w,tmp_out_h,tmp_out_d, (in_n * in_c * in_d + k_n * k_c * k_d ) * tmp_out_d * tmp_out_h * tmp_out_w);
  swap_matrix_axes(d_input, d_output, in_n, in_c, k_n, k_c, tmp_out_w, tmp_out_h, tmp_out_d, 1, 3);

  hadamard_product_3d<<<blocksPerGrid, threadsPerBlock>>>(d_output, d_output + output_X_length, in_n, in_c, k_n, tmp_out_d, tmp_out_h, tmp_out_w, d_knkcOutOutComp);

  fft_1d_bwd_stockham_sp(d_knkcOutOutComp, d_knkcOutOutComp_output, in_n * k_n, in_c, tmp_out_d, tmp_out_h, tmp_out_w, in_n * k_n * in_c * tmp_out_d * tmp_out_h * tmp_out_w);
  
  swap_matrix_axes(d_knkcOutOutComp_output, d_knkcOutOutComp, in_n * k_n, in_c, tmp_out_d, tmp_out_h, tmp_out_w, 2, 3);
  fft_1d_bwd_stockham_sp(d_knkcOutOutComp, d_knkcOutOutComp_output, in_n * k_n, in_c, tmp_out_d, tmp_out_w, tmp_out_h, in_n * k_n * in_c * tmp_out_d * tmp_out_h * tmp_out_w);
  swap_matrix_axes(d_knkcOutOutComp_output, d_knkcOutOutComp, in_n * k_n, in_c, tmp_out_d, tmp_out_w, tmp_out_h, 2, 3);

  swap_matrix_axes(d_knkcOutOutComp, d_knkcOutOutComp_output, in_n * k_n, in_c, tmp_out_d, tmp_out_h, tmp_out_w, 1, 3);
  fft_1d_bwd_stockham_sp(d_knkcOutOutComp_output, d_knkcOutOutComp, in_n * k_n, in_c, tmp_out_w, tmp_out_h, tmp_out_d, in_n * k_n * in_c * tmp_out_d * tmp_out_h * tmp_out_w);
  swap_matrix_axes(d_knkcOutOutComp, d_knkcOutOutComp_output, in_n * k_n, in_c, tmp_out_w, tmp_out_h, tmp_out_d, 1, 3);

  
  trim_matrix_3d<<<blocksPerGrid, threadsPerBlock>>>(d_knkcOutOutComp_output,in_n,in_c,k_n,out_d,out_h,out_w,tmp_out_d,tmp_out_h,tmp_out_w,tmp_out_d_ori,tmp_out_h_ori,tmp_out_w_ori,d_output_Y);
  Check_Error(cudaMemcpy( output_Y, d_output_Y, out_n * out_d * out_c * out_h * out_w * sizeof(Real), cudaMemcpyDeviceToHost));

  cudaFree(d_input_X);
  cudaFree(d_input_W);
  cudaFree(d_knkcOutOutComp);
  cudaFree(d_knkcOutOutComp_output);
  cudaFree(d_output_Y);
  cudaFree(d_input);
  cudaFree(d_output);
}










