#include <cuda_runtime.h>
#include <iostream>
#include <stdio.h>


#define WARPSIZE 32

#define Check_Cuda_Runtime(op) __check_cuda_runtime((op),#op,__FILE__,__LINE__)
void __check_cuda_runtime(cudaError_t code,const char* op,const char* file,const char* line){
    if(code !=cudaSuccess){
        const char* errorname = cudaGetErrorName(code);
        const char* errorstr = cudaGetErrorName(code);
        printf("cuda error happen at file%s line%d,op%s,errorname%s,errorstr%s",file,line,op,errorname,errorstr);
    }
}


__inline__ __device__ float cuda_warp_sum(float val){
    for(int offset=WARPSIZE/2;offset>0;offset/=2){
        val += __shfl_down_sync(0xffffffff,val,offset);
    }
    return val;
}


__inline__ __device__  float cuda_block_sum(float val){
    static __shared__ float share_data[32];
    int warpid = threadIdx.x / WARPSIZE;
    int lainid = threadIdx.x % WARPSIZE;
    val += cuda_warp_sum(val);//首先在每个warp内进行规约求和
    if(lainid==0){
        share_data[warpid]=val; // 将每个warp的结果写入第0个warp的32线 if (tid == 0) atomicAdd(total, sum);
    }
    __syncthreads; // 线程同步
    val = threadIdx.x<blockDim.x/WARPSIZE ? share_data[lainid] : 0.f; // 此时只需要对第0个warp进行规约求和即可
    if(warpid==0){
        val += cuda_warp_sum(val); // 对第0个warp的32个线程求和就是所有原子的和了
    }
    return val;
}


__global__ void softmax(float* a,float* b, float* total, int total_elements){
    int tid = threadIdx.x;
    int idx = blockDim.x*blockIdx.x + tid;
    float exp_x = (idx<total_elements) ? expf(a[idx]):0.f;
    float block_resullt =  cuda_block_sum(exp_x);
    if(tid==0){
        atomicAdd(total,block_resullt);
    }
    __threadfence;
    if(idx<total_elements){
        b[idx] = exp_x/(*total);
    }
}