#define pi 3.1415926
#define gaussian_num 20
#define shared_size 1024

__device__ float normal(float mu, float sigma, float weight, float x)
{
    return weight/(sqrtf(2.0 * pi) * sigma) * expf(-(x - mu) * (x - mu) / ( 2.0 * sigma * sigma));
}

__global__ void reduce0(float *mu, float *sigma, float *weight, float *sample, float *gamma_out,
                        float *mu_out,float *sigma_out) {
    __shared__ float3 mu_sigma_weight[gaussian_num];
    //
    unsigned int tid = threadIdx.x;

    if(tid < gaussian_num)
    {
        mu_sigma_weight[tid].x = mu[tid];
        mu_sigma_weight[tid].y = sigma[tid];
        mu_sigma_weight[tid].z = weight[tid];
    }

    __syncthreads();

    unsigned int index = blockIdx.x*blockDim.x + threadIdx.x;
    float _sample = sample[index];
    
    //计算gamma
    float _gamma[gaussian_num];
    float _mu[gaussian_num];
    float _sigma[gaussian_num];
    float sum = 0.0;
    for (unsigned int i=0; i<gaussian_num; ++i) {
        _gamma[i] = normal(mu_sigma_weight[i].x, mu_sigma_weight[i].y, mu_sigma_weight[i].z, _sample);
        sum += _gamma[i];
    }

    for (unsigned int i=0; i<gaussian_num; ++i) {
        _gamma[i] = _gamma[i]/sum;
        _mu[i] = _gamma[i] * _sample;
        _sigma[i] =  _gamma[i] * (_sample - mu_sigma_weight[i].x) * (_sample - mu_sigma_weight[i].x);
    }

    //测试
    //sample[index] = _gamma[0];
    //return;

    // do reduction in shared mem
    __shared__ float sdata[shared_size];
    //定义
    float *params[3] = {gamma_out,mu_out,sigma_out};
    float *_gamma_mu_sigma[3] = {_gamma,_mu,_sigma};
    //循环输出
    for (unsigned int j=0; j<3; ++j)
    {
        //循环高斯参数
        for (unsigned int i=0; i<gaussian_num; ++i) {
            sdata[tid] = _gamma_mu_sigma[j][i];
            __syncthreads();
            //循环累加
            for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
                if (tid < s) {
                    sdata[tid] += sdata[tid + s];
                }
                __syncthreads();
            }

            if (tid == 0) params[j][blockIdx.x*gaussian_num + i] = sdata[0];
        }
    }

}

//sequential addressing(连续寻址,就是合并访问)
__global__ void reduce1(float *gamma_in,float *mu_in,float *sigma_in,float *gamma_out,float *mu_out,float *sigma_out) {
        // do reduction in shared mem
        __shared__ float sdata[shared_size];
        //定义
        float *params[3] = {gamma_out,mu_out,sigma_out};
        float *_gamma_mu_sigma[3] = {gamma_in,mu_in,sigma_in};

        unsigned int tid = threadIdx.x;

        //循环输出
        for (unsigned int j=0; j<3; ++j)
        {
            //循环高斯参数
            for (unsigned int i=0; i<gaussian_num; ++i) {
                sdata[tid] = _gamma_mu_sigma[j][i];
                __syncthreads();
                //循环累加
                for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
                    if (tid < s) {
                        sdata[tid] += sdata[tid + s];
                    }
                    __syncthreads();
                }

                if (tid == 0) params[j][blockIdx.x*gaussian_num + i] = sdata[0];
            }
        }
}