#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <iostream>
#include <stdio.h>
#include <float.h>

#define DTYPE float
#define ROWS 1024
#define COLS 1024
#define BLOCKSIZE 32
#define USE_VECTOR false


#define Check_Cuda_Runtime(op) __check_cuda_runtime((op),#op,__FILE__,__LINE__)
void __check_cuda_runtime(cudaError_t code,const char* op,const char* file ,int line){
    const char*  errorname = cudaGetErrorName(code);
    const char*  erroestring = cudaGetErrorString(code);
    if(code != cudaSuccess){
        printf("cuda eerror happend at file%s line%d op%s name%s string%s",file,line,op,errorname,erroestring);
    }
}




template<typename T>
void host_data_init(T* A,int total_elements){
    for(int i=0;i<total_elements;++i){
        A[i] = i;
    }
}

template<>
void host_data_init(half* A,int total_elements){
    for(int i=0;i<total_elements;++i){
        A[i] = __float2half((float(i)));
    }
}


template<typename T>
__global__ void cuda_matrix_add(T* A,T* B,T* C,int total_elements,bool use_vector){
    int idx = blockIdx.x*blockDim.x + threadIdx.x;
    if(use_vector){
        idx *=4;
        if(idx<total_elements){
            using Vec4 = typename std::conditional<std::is_same<T,float>::value,float4,int4>::type;
            Vec4 vec_a = *reinterpret_cast<Vec4*>(&A[idx]);
            Vec4 vec_b = *reinterpret_cast<Vec4*>(&B[idx]);
            Vec4 vec_c;
            vec_c.x = vec_a.x + vec_b.x;
            vec_c.y = vec_a.y + vec_b.y;
            vec_c.z = vec_a.z + vec_b.z;
            vec_c.w = vec_a.w + vec_b.w;
            *reinterpret_cast<Vec4*>(&C[idx]) = vec_c;

        }
        
    }else{
        if(idx<total_elements){
            C[idx] = A[idx]+B[idx];
        }
        
    }
}

template<>
__global__ void cuda_matrix_add(half* A,half* B,half* C,int total_elements,bool use_vector){
    int idx = blockIdx.x*blockDim.x + threadIdx.x;
    if(use_vector){
        if(idx<total_elements){
            idx *= 2;
            half2 vec_a = __ldg(reinterpret_cast<half2*>(&A[idx]));
            half2 vec_b = __ldg(reinterpret_cast<half2*>(&B[idx]));
            half2 vec_c;
            vec_c = __hadd2(vec_a,vec_b);
            *reinterpret_cast<half2*>(&C[idx]) = vec_c;
        }
    }else{
        if(idx<total_elements){
            C[idx] = __hadd(A[idx],B[idx]);
        }
    }
}

template<typename T>
void launch_cuda_matrix_add(T* A,T* B,T* C,int total_elements,int blocksize,bool use_vector){
    int gridesize = (int)((total_elements+blocksize-1)/blocksize);
    cuda_matrix_add<T><<<gridesize,blocksize>>>(A,B,C,total_elements,use_vector);
}

template<typename T>
void launch_cpu_matrx_add(T* A,T* B,T* C,int total_elements){
    for(int j=0;j<total_elements;++j){
        C[j] = A[j] + B[j];
    }
}

template<>
void launch_cpu_matrx_add(half* A,half* B,half* C,int total_elements){
    for(int k=0;k<total_elements;++k){
        C[k] = __float2half(__half2float(A[k]) + __half2float(B[k]));
    }
}


template<typename T>
void check_cuda_cpu_result(T* A,T* B,int total_elements){
    bool ans = true;
    for(int m=0;m<total_elements;++m){
        if(fabs(A[m]-B[m])>1e-5){
            // printf("idx %d A:%f B:%f diff:%f\n!",m,(float)A[m],(float)(B[m]),fabs(A[m]-B[m]));
            ans = false;
        }
    }
    if(ans){
        printf("cuda and cpu get the same result!\n");
    }

}



template<>
void check_cuda_cpu_result(half* A,half* B,int total_elements){
    bool ans = true;
    for(int n=0;n<total_elements;++n){
        if(fabs(__half2float(A[n])-__half2float(B[n]))>1e-5){
            // printf("idx %d A:%f B:%f diff:%f\n!",n,__half2float(A[n]),__half2float(B[n]),__half2float(A[n])-__half2float(B[n]));
            ans = false;
        }
    }
    if(ans){
        printf("cuda and cpu get the same result!\n");
    }
    
}



int main(){
    using dtype = DTYPE;
    int rows = ROWS;
    int cols = COLS;
    int blocksize = BLOCKSIZE;
    int total_elements = rows*cols;
    int total_bytes = total_elements*sizeof(dtype);
    bool use_vector = USE_VECTOR;
    dtype* ha;
    dtype* hb;
    dtype* hc;
    dtype* hc_;
    ha = (dtype*)malloc(total_bytes);
    hb = (dtype*)malloc(total_bytes);
    hc = (dtype*)malloc(total_bytes);
    hc_ = (dtype*)malloc(total_bytes);
    host_data_init(ha,total_elements);
    host_data_init(hb,total_elements);
    dtype* da;
    dtype* db;
    dtype* dc;
    Check_Cuda_Runtime(cudaMalloc((void**)&da,total_bytes));
    Check_Cuda_Runtime(cudaMalloc((void**)&db,total_bytes));
    Check_Cuda_Runtime(cudaMalloc((void**)&dc,total_bytes));
    Check_Cuda_Runtime(cudaMemcpy(da,ha,total_bytes,cudaMemcpyHostToDevice));
    Check_Cuda_Runtime(cudaMemcpy(db,hb,total_bytes,cudaMemcpyHostToDevice));
    launch_cuda_matrix_add<dtype>(da,db,dc,total_elements,blocksize,use_vector);
    cudaMemcpy(hc,dc,total_bytes,cudaMemcpyDeviceToHost);
    launch_cpu_matrx_add<dtype>(ha,hb,hc_,total_elements);
    check_cuda_cpu_result<dtype>(hc,hc_,total_elements);
    free(ha);
    free(hb);
    free(hc);
    free(hc_);
    cudaFree(da);
    cudaFree(db);
    cudaFree(dc);

    return -1;
}