#include<stdio.h>
#include<stdlib.h>
#include <sys/time.h>
#include<cuda.h>
#include<cuda_runtime.h>
#include <cudnn.h>
#include <math.h>

#define inputsize (64 * 224 * 224)
#define kernelsize (64 * 3 * 3 * 64)
#define outputsize (64 * 222 *222)

#define CHECK(call)\
{\
    const cudaError_t error = call;\
    if(error != cudaSuccess)\
    {\
        printf("Error: %s:%d", __FILE__, __LINE__);\
        printf("Code:%d, reason: %s\n",error, cudaGetErrorString(error));\
        exit(1);\
    }\
}

#define CUDA_CHECK_ERROR()    __cudaCheckError( __FILE__, __LINE__ )

inline void __cudaCheckError( const char *file, const int line )
{

    cudaError err = cudaGetLastError();
    if ( cudaSuccess != err )
    {
        fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
                 file, line, cudaGetErrorString( err ) );
        exit( -1 );
    }
    
    // More careful checking. However, this will affect performance.
    // Comment away if needed.
    err = cudaDeviceSynchronize();
    if( cudaSuccess != err )
    {
        fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
                 file, line, cudaGetErrorString( err ) );
        exit( -1 );
    }
    
    return;
}

#define checkCUDNN(status) do {                                        \
    if (status != CUDNN_STATUS_SUCCESS) {                              \
        printf("Error: %s:%d", __FILE__, __LINE__);                    \
        printf("reason: %s\n",cudnnGetErrorString(status));             \
        exit(-1);                                                      \
    }                                                                  \
} while(0)

__global__ void input_transform(float *input, float *output, int C, int H, int W);
__global__ void kernel_transform(float *input, float *ouput, int C, int K);
__global__ void Batch_GEMM(float *input, float *ouput);
__global__ void output_transform(float *input, float *output);
void checkresult(float *gpuinput, int size);

void checkresult(float *gpuinput, int size)
{
    int i;
    int error = 0;
    for(i = 0; i < size; ++i)
    {
        if(fabs(gpuinput[i]-576.0) > 0.01)
        {
            error++;
            //printf("%f\n",gpuinput[i]);
        }
            
    }

    if(error == 0)
        printf("result right\n");
    else
    {
        printf("result wrong\n");
        printf("%d\n",error);
    }
        
}
/*
void checkresult_input_transform(float *gpuinput, int size = 12321*64*16)
{
    int i;
    int error = 0;
    for(i = 0; i < 5*12321*64; ++i)
    {
        if(fabs(gpuinput[i]-0) > 0.01)
        {
            error++;
            //printf("%f\n",gpuinput[i]);
        }
            
    }

    for(i = 5*12321*64; i < 6*12321*64; ++i)
    {
        if(fabs(gpuinput[i]-8) > 0.01)
        {
            error++;
            //printf("%f\n",gpuinput[i]);
        }
            
    }
    for(i = 6*12321*64; i < 15*12321*64; ++i)
    {
        if(fabs(gpuinput[i]-0) > 0.01)
        {
            error++;
            //printf("%f\n",gpuinput[i]);
        }
            
    }

    if(error == 0)
        printf("result right\n");
    else
    {
        printf("result wrong\n");
        printf("%d\n",error);
    }
        
}

void checkresult_kernel_transform(float *gpuinput)
{
    int i,j,k;
    int error = 0;
    float real[16] = {0.5,0.75,0.25,0.5,0.75,1.125,0.375,0.75,0.25,0.375,0.125,0.25,0.5,0.75,0.25,0.5};
    for(i = 0; i < 16; ++i)
    for(j = 0; j < 64; ++j)
    for(k = 0; k < 64; ++k)
            if(fabs(gpuinput[i*64*64+j*64+k]-real[i]) > 0.01)  error++;

    if(error == 0)
        printf("result right\n");
    else
    {
        printf("result wrong\n");
        printf("%d\n",error);
    }
        
}


void checkresult_batch_transform(float *gpuinput)
{
    int i,j,k;
    int error = 0;
    
    for(i = 0; i < 5*12321*64; ++i)
    {
        if(fabs(gpuinput[i]-0) > 0.01)
        {
            error++;
            //printf("%f\n",gpuinput[i]);
        }
            
    }

    for(i = 5*12321*64; i < 6*12321*64; ++i)
    {
        if(fabs(gpuinput[i]-576) > 0.01)
        {
            error++;
            //printf("%f\n",gpuinput[i]);
        }
            
    }
    for(i = 6*12321*64; i < 15*12321*64; ++i)
    {
        if(fabs(gpuinput[i]-0) > 0.01)
        {
            error++;
            //printf("%f\n",gpuinput[i]);
        }
            
    }

    if(error == 0)
        printf("result right\n");
    else
    {
        printf("result wrong\n");
        printf("%d\n",error);
    }
}
*/

double cpusecond()
{
    struct timeval tp;
    gettimeofday(&tp, NULL);
    return ((double)tp.tv_sec + (double) tp.tv_usec*1.e-6);
}

__global__ void input_transform(float *input, float *output, int C=64, int H=224, int W=224)
{
    int bix = threadIdx.y * blockDim.x + threadIdx.x; //块内地址
    int pixel_id = bix / C; //0~15  C=64个一组，组编号，分配计算哪个pixel
    int offset = bix % 64; //组内偏移
    int x_start = (blockIdx.x * 2 + threadIdx.x / 64 ) * 64;    // 2 is overlap 
    int y_start = (blockIdx.y * 2 + threadIdx.y) * 224 * 64 ; 

    __shared__ float BTd[4][256];

    switch(threadIdx.y)
    {
        case 0:
            BTd[threadIdx.y][threadIdx.x] = input[y_start + x_start + offset] - input[y_start + x_start + 2 * 224 * 64 + offset];
            break;
        case 1:
            BTd[threadIdx.y][threadIdx.x] = input[y_start + x_start + offset] + input[y_start + x_start + 224 * 64 + offset];
            break;
        case 2:
            BTd[threadIdx.y][threadIdx.x] = input[y_start + x_start + offset] - input[y_start + x_start - 224 * 64 + offset];
            break;
        case 3:
            BTd[threadIdx.y][threadIdx.x] = -input[y_start + x_start + offset] + input[y_start + x_start - 2 * 224 * 64 + offset];   
            break;
    }
    __syncthreads();
    int tile_id = blockIdx.y * gridDim.x + blockIdx.x;
    //printf("mytile_Id is: %d  the gridDim is:%d\n",tile_id, gridDim.x);
    int output_idx = pixel_id * 64 * 111 * 111 + tile_id * 64 + offset;
    //if(output_idx > (64*111*111*16-1)) printf("bound wrong");
    
    switch(threadIdx.x / 64)
    {
        case 0:
            output[output_idx] = BTd[threadIdx.y][threadIdx.x] - BTd[threadIdx.y][threadIdx.x + 2*64]; 
            break;
        case 1:
            output[output_idx] = BTd[threadIdx.y][threadIdx.x] + BTd[threadIdx.y][threadIdx.x + 64];
            break;
        case 2:
            output[output_idx] = BTd[threadIdx.y][threadIdx.x] - BTd[threadIdx.y][threadIdx.x - 64];
            break;
        case 3:
            output[output_idx] = -BTd[threadIdx.y][threadIdx.x] + BTd[threadIdx.y][threadIdx.x - 2*64];
            break;
    }
    
    
}


__global__ void kernel_transform(float *input, float *output, int Outputfeature_num, int Inputfeature_num)
{
    int element_row_id = threadIdx.x / 64;
    int offset = threadIdx.x % 64;
    __shared__ float Gg[4][192];
    if(element_row_id < 3)
    {
        int in_idx = blockIdx.x * 64 * 9 + threadIdx.y * 192 + element_row_id * 64 + offset;
        switch(threadIdx.y)
        {
            case 0:
                Gg[threadIdx.y][threadIdx.x] = input[in_idx];
                break;
            case 1:
                Gg[threadIdx.y][threadIdx.x] = (input[in_idx] + input[in_idx - 192] + input[in_idx + 192]) / 2;
                break;
            case 2:
                Gg[threadIdx.y][threadIdx.x] = (input[in_idx] + input[in_idx - 192*2] - input[in_idx - 192]) / 2;
                break;
            case 3:
                Gg[threadIdx.y][threadIdx.x] = input[in_idx - 192];
                break;
        }
    }

    __syncthreads();
    int to_idx = blockIdx.x * 64 + offset + (element_row_id  +  threadIdx.y * 4) * 64 * Outputfeature_num;
    switch(element_row_id)
    {
        case 0:
            output[to_idx] = Gg[threadIdx.y][threadIdx.x];
            break;
        case 1:
            output[to_idx] = (Gg[threadIdx.y][threadIdx.x] + Gg[threadIdx.y][threadIdx.x - 64] + Gg[threadIdx.y][threadIdx.x + 64]) / 2;
            break;
        case 2:
            output[to_idx] = (Gg[threadIdx.y][threadIdx.x] + Gg[threadIdx.y][threadIdx.x - 64*2] - Gg[threadIdx.y][threadIdx.x - 64]) / 2;
            break;
        case 3:
            output[to_idx] = Gg[threadIdx.y][threadIdx.x - 64];
            break;
    }

}

__global__ void Batch_GEMM(float *feature_map,float *kernel, float *output)
{
    int iy = blockDim.y * blockIdx.y + threadIdx.y;
    int mat_id = blockIdx.x / 2; 
    int group_offset = blockIdx.x % 2;
    int i, k;
    float temp_value = 0.0;
    int m_in_start = mat_id * 12321 * 64 + iy * 64 + threadIdx.x;
    int n_in_start = mat_id * 64 * 64 + group_offset * 32*64 +  threadIdx.y * 64 + threadIdx.x ;
    int m_in_idx, n_in_idx;
    int to_idx = mat_id * 12321 * 64 + iy * 64 + group_offset * 32 + threadIdx.x;

    __shared__ float M[32][32];
    __shared__ float N[32][32];
    // bound check
    if(iy < 12321)
    {
        for(k = 0; k <2; ++k)
        {
            m_in_idx = k * 32 + m_in_start;
            n_in_idx = k * 32 + n_in_start;
            M[threadIdx.y][threadIdx.x] = feature_map[m_in_idx];
            N[threadIdx.y][threadIdx.x] = kernel[n_in_idx];
        
            __syncthreads();
            for(i = 0; i < 32; ++i)
            {
                temp_value += M[threadIdx.y][i] * N[threadIdx.y][i];
            }
            __syncthreads();
        }
        output[to_idx] = temp_value;
    }
}

__global__ void output_transform(float *input, float *output)
{
    int bix = threadIdx.y * blockDim.x + threadIdx.x;
    int tile_id = blockIdx.y * gridDim.x + blockIdx.x; //gridDim.x = 111
    int element_id = bix / 64;
    int group_offset = bix % 64;

    __shared__ float AT[2][256];

    int in_start =  tile_id * 64 + element_id * 12321 * 64;
    switch(threadIdx.y)
    {
        case 0:
            AT[threadIdx.y][threadIdx.x] = input[in_start + group_offset] + input[in_start + 4*12321*64 + group_offset] + input[in_start + 8*12321*64 + group_offset];
            break;
        case 1:
            AT[threadIdx.y][threadIdx.x] = input[in_start + group_offset] - input[in_start + 4*12321*64 + group_offset] - input[in_start + 8*12321*64 + group_offset];
            break;
    }

    __syncthreads();
    int element_row_id = threadIdx.x / 64;
    if(element_row_id < 2)
    {
        int to_idx = (blockIdx.y * blockDim.y + threadIdx.y) *222*64 + (blockIdx.x * 2 + element_row_id) * 64 + group_offset; 
        switch(element_row_id)
        {
            case 0:
                output[to_idx] = AT[threadIdx.y][threadIdx.x] + AT[threadIdx.y][threadIdx.x+64] + AT[threadIdx.y][threadIdx.x+64*2];
                break;
            case 1:
                output[to_idx] = AT[threadIdx.y][threadIdx.x] - AT[threadIdx.y][threadIdx.x+64] + AT[threadIdx.y][threadIdx.x+64*2];
                break;
        }
    }
}

int main(int argc, char *argv[])
{
    float *input = (float *)malloc(sizeof(float) * inputsize);
    float *kernel = (float *)malloc(sizeof(float) * kernelsize);
    float *cudnn_host_output = (float *)malloc(sizeof(float) * 64*222*222);
    float *winograd_host_output = (float *)malloc(sizeof(float) * outputsize);
    //initial
    int i;
    double istart, ielaps;
    for(i = 0; i < inputsize; ++i)
        input[i] = 2.0;
    for(i = 0; i < kernelsize; ++i)
        kernel[i] = 0.5;
    
    cudaSetDevice(3);
    
    // cudnn preparation
    float alpha = 1.0, beta = 0.0;
    float *cudnninput, *cudnnkernel, *cudnnoutput;
    CHECK(cudaMalloc((void **)&cudnninput, sizeof(float)*64*224*224));
    CHECK(cudaMalloc((void **)&cudnnkernel, sizeof(float)*64*64*9));
    CHECK(cudaMalloc((void **)&cudnnoutput, sizeof(float)*64*222*222));
    CHECK(cudaMemcpy(cudnninput,input,sizeof(float)*inputsize,cudaMemcpyHostToDevice));
    CHECK(cudaMemcpy(cudnnkernel,kernel,sizeof(float)*kernelsize,cudaMemcpyHostToDevice));

    size_t size;
    cudnnStatus_t status;
    cudnnHandle_t handle;
    status = cudnnCreate(&handle);
    if(status != CUDNN_STATUS_SUCCESS) 
    {   
        printf("failed in create handle\n");
        printf("status is: %d\n" ,status);
    }
    cudnnTensorDescriptor_t xdesc, ydesc;
    cudnnFilterDescriptor_t wdesc;
    checkCUDNN(cudnnCreateTensorDescriptor(&xdesc));
    checkCUDNN(cudnnSetTensor4dDescriptor(xdesc, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 64, 224, 224));
    checkCUDNN(cudnnCreateTensorDescriptor(&ydesc));
    checkCUDNN(cudnnSetTensor4dDescriptor(ydesc, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 64, 222, 222));
    checkCUDNN(cudnnCreateFilterDescriptor(&wdesc));
    checkCUDNN(cudnnSetFilter4dDescriptor(wdesc, CUDNN_DATA_FLOAT,CUDNN_TENSOR_NCHW, 64, 64, 3, 3));//if NHWC the shape is: output, input, H,W
    cudnnConvolutionDescriptor_t conv_desc;
    checkCUDNN(cudnnCreateConvolutionDescriptor(&conv_desc));
    checkCUDNN(cudnnSetConvolution2dDescriptor(conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
    cudnnConvolutionFwdAlgo_t algo = CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD;

    checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(handle,
	   xdesc,
	   wdesc,
	   conv_desc,
	   ydesc,
	   algo,
       &size));
     
	float *extra;
    CHECK(cudaMalloc((void **) &extra, size));

    istart = cpusecond();
    checkCUDNN(cudnnConvolutionForward(handle, &alpha,
		xdesc, cudnninput, wdesc, cudnnkernel, 
		conv_desc, algo, 
		extra, size, &beta,
        ydesc, cudnnoutput));
   
    CHECK(cudaDeviceSynchronize());
    ielaps = cpusecond() - istart;
    printf("Cudnn kernel time is: %f\n",ielaps);

    CHECK(cudaMemcpy(cudnn_host_output,cudnnoutput,sizeof(float)*outputsize,cudaMemcpyDeviceToHost));

    cudnnDestroy(handle);
    checkCUDNN(cudnnDestroyTensorDescriptor(xdesc));
    checkCUDNN(cudnnDestroyTensorDescriptor(ydesc));
    checkCUDNN(cudnnDestroyFilterDescriptor(wdesc));
    checkCUDNN(cudnnDestroyConvolutionDescriptor(conv_desc));
    cudaFree(cudnninput);
    cudaFree(cudnnkernel);
    cudaFree(cudnnoutput);
    cudaFree(extra);
    
    //my kernel preparation
    float *con_input, *t_input, *con_kernel, *t_kernel, *batch_gemm, *con_output;

    CHECK(cudaMalloc((void **)&con_input,sizeof(float)*64*224*224)); //64 *224 *224
    CHECK(cudaMalloc((void **)&t_input,sizeof(float)*64*111*111*16));
    CHECK(cudaMalloc((void **)&con_kernel,sizeof(float)*64*64*3*3));
    CHECK(cudaMalloc((void **)&t_kernel,sizeof(float)*64*64*4*4));
    CHECK(cudaMalloc((void **)&batch_gemm,sizeof(float)*111*111*64*16));//12321 * 64*16
    CHECK(cudaMalloc((void **)&con_output,sizeof(float)*64*222*222));//64*222*222

    CHECK(cudaMemcpy(con_input,input,sizeof(float) * 64*224*224,cudaMemcpyHostToDevice));
    CHECK(cudaMemcpy(con_kernel,kernel,sizeof(float) * 64*64*9,cudaMemcpyHostToDevice));
    
    istart = cpusecond();
    input_transform<<<dim3(111,111),dim3(256,4)>>>(con_input,t_input,64,224,224);
    kernel_transform<<<dim3(64,1),dim3(256,4)>>>(con_kernel,t_kernel,64,64);
    Batch_GEMM<<<dim3(32,(12321+3)/4),dim3(32,32)>>>(t_input,t_kernel,batch_gemm);
    output_transform<<<dim3(111,111),dim3(256,2)>>>(batch_gemm,con_output);
    cudaDeviceSynchronize();
    ielaps = cpusecond() - istart;
    printf("winograd convolution time is: %f\n",ielaps);

    CHECK(cudaMemcpy(winograd_host_output,con_output,sizeof(float)*64*222*222,cudaMemcpyDeviceToHost));
    //check result
    //checkresult(winograd_host_output,outputsize);

    cudaFree(con_input);
    cudaFree(con_kernel);
    cudaFree(t_input);
    cudaFree(t_kernel);
    cudaFree(batch_gemm);
    cudaFree(con_output);
    


    free(input);
    free(kernel);
    free(cudnn_host_output);
    free(winograd_host_output);
    return 0;
}
