#include<iostream>
#include<cuda_runtime.h>
#include "../MyTimer.h"

float* init(int len)
{
    float* f = new float[len];
    for(int i = 0; i < len; i++)
        f[i] = (float)i;
    return f;
}

float sum_common(float *f, int len)
{
    float result = 0;
    for(int i = 0; i < len; i++)
        result += f[i];
    return result;
}

__global__ void cuda_sum1(float *f, int len){
    int thread_id = threadIdx.x;
    int my_pos = thread_id * 2;
    //这里其实确实不需要考虑线程束分化问题，因为随着计算的进行，需要使用的线程确实是越来越少的。

    /*
    0 1 2 3 4 ....
    0 2 4 6 ...
    0 4 8 ...
    */
    for(int i = 1; i <= len; i *= 2)
    {
        if(thread_id % i == 0)
            f[my_pos] = f[my_pos] + f[my_pos + i];
        __syncthreads();
    }

}

__global__ void cross_sum(float *f, int len)
{
    // 这里我们还没有考虑那么多复杂的东西，默认线程数就是len/2。所以是肯定够用的
    int thread_num = blockIdx.x;
    int tidx = threadIdx.x;
    int warp_id = threadIdx.x / 32;
    int thread_id = threadIdx.x % 32;

    for(int stride = len / 2; stride >= 1; stride /= 2)
    {
        if(tidx < stride)
        {
            f[tidx] += f[tidx + stride];
        }
        __syncthreads();  //如果不需要进入if的线程执行到这里被卡住等待同步，会不会被暂时调度给别的进程避免浪费？
    }


}



__inline__ __device__ float warpReduceSum(float val)
{
    val += __shfl__down__sync(0xffffffff, val, 16); //当前线程取到16个以后的val值。比如0号线程取16号的/
    //默认有一个同步步骤
    val += __shfl__down__sync(0xffffffff, val, 8);
    val += __shfl__down__sync(0xffffffff, val, 4);
    val += __shfl__down__sync(0xffffffff, val, 2);
    val += __shfl__down__sync(0xffffffff, val, 1);
    return val;
}


__global__ cross8(float *f, int len)
{
    int thread_id = threadIdx.x;
    int index = blockIdx.x * blockDim.x * 8 + thread_id;
    
    if(index + 7 * blockDim.x < len)
    {
        /*
        这里一个block处理一个数据块。数据块的大小是block线程数的8倍。
        我们当前线程是基于当前block所在的数据块中的前1/8的数据，然后分别向后延申8次步长为blockDim.x
        index + 7 * blockDim.x < len保证了当前的数据是在前1/8的位置的，可以放心的向后
        这个地方是不涉及同步问题的，大家处理各自的数据。
        */
        f[index] = (
            f[index + 0 * blockDim.x] + 
            f[index + 1 * blockDim.x] +
            f[index + 2 * blockDim.x] +
            f[index + 3 * blockDim.x] +
            f[index + 4 * blockDim.x] +
            f[index + 5 * blockDim.x] +
            f[index + 6 * blockDim.x] +
            f[index + 7 * blockDim.x] +
        );
        __syncthreads();
        //现在我们得到的是，间隔blockDim.x * 8的f
        int stride = blockDim.x * 8;


        for(int stride = blockDim.x / 2; stride > 32; stride >>= 1)
        {
            if(thread_id < stride)
            {
                f[thread_id] += f[thread_id + stride];
            }
            __syncthreads();
        }
        
        //然后开始循环处理第一个
                
    }


}




__global__ void recursive(int *g_idata, int *g_odata, int isize){
    int tid = threadIdx.x;
    int *idata = g_idata + blockDim.x * blockIdx.x;
    int *odata = &g_odata[blockIdx.x];

    if(isize == 2 && tid == 0)
    {
        g_odata[blockIdx.x] = idata[0] + idata[1];
        return ;
    }

    int istride = isize >> 1;
    if(istride > 1  && tid < istride)
    {
        idata[tid] += idata[tid + istride];
    }

    __syncthreads();

    if(tid == 0)
    {
        recursive<<<1, istride>>>(idata, odata, istride);
        cudaDeviceSynchronize();
    }

    __syncthreads();

}


int main()
{
    int len = 1 << 20;
    float* f = init(len);

    float correct = sum_common(f, len);

    int nBs = sizeof(float) * len;
    float *c_f;
    float *c_f2;
    cudaMalloc((float **)&c_f, nBs);
    cudaMalloc((float **)&c_f2, nBs);
    cudaMemcpy(c_f, f, nBs, cudaMemcpyHostToDevice);
    cudaMemcpy(c_f2, f, nBs, cudaMemcpyHostToDevice);
    float r; // = new float[len];
    float r2; // = new float[len];

    GPUTimer t;
    t.start();
    cuda_sum1<<<1, len / 2>>>(c_f, len);
    float ms1 = t.stop();
    
    GPUTimer t2;
    t2.start(); 
    t.start();  
    cross_sum<<<1, len / 2>>>(c_f2, len);
    float ms2 = t2.stop();
    float ms3 = t.stop();


    int blockSize = 512;
    int gridSize = (len + blockSize * 8 - 1) / (blockSize * 8);



    cudaMemcpy(&r, c_f, sizeof(float), cudaMemcpyDeviceToHost);
    cudaMemcpy(&r2, c_f2, sizeof(float), cudaMemcpyDeviceToHost);

    std::cout << "aa " << (r == correct) << (r2 == correct) << "\nno_cross: " << ms1 << "cross: " << ms2 << " " <<ms3 <<std::endl; 
}











