#include "op/reduce/reduce.h"
#include "mm.h"
#include <iostream>
#include <cmath>
#include <gtest/gtest.h>
#include "perf.h"

int main(int argc, char** argv)
{
    size_t size = 1024 * 1024 * 256;
    size_t bytes = size * sizeof(double);
    double* input = (double*)malloc(bytes);
    for (size_t i = 0; i < size; i++) {
        input[i] = 1.0;
    }
    // 真值
    double output_true = 0.0;
    {
        PERF(CPU_REDUCE); 
        for (int i = 0; i < size; i++) {
            output_true += input[i];
        }
    }

    LOG_INFO("output true: %f", output_true);

    double output = 0.0;
    int block_size = g_reduceBlockSize;
    // 第一次输出
    int grid_size1 = (size - 1) / block_size + 1;
    double* output1;
    int tbytes = grid_size1 * sizeof(double);
    cudaMalloc((void**)&output1, tbytes);

    double* output1_cpu = (double*)malloc(tbytes);

    // 第二次输出
    int grid_size2 = (grid_size1 - 1) / block_size + 1;
    double* output2;
    
    cudaMalloc((void**)&output2, sizeof(double) * grid_size2);

    // 创建输出的CPU内存
    double* output_cpu = (double*)malloc(sizeof(double) * grid_size2);
    // 把host上的数据拷贝到device上
    double* dptr;
    cudaMalloc((void**)&dptr, bytes);
    {
        PERF(CUDA_REDUCE); 
        cudaMemcpy(dptr, input, bytes, cudaMemcpyHostToDevice);
        cuda_reduce_sum_double(dptr, output1, size, grid_size1, block_size);
        cudaDeviceSynchronize();
        cudaMemcpy(output1_cpu, output1, tbytes, cudaMemcpyDeviceToHost);
        cuda_reduce_sum_double(output1, output2, grid_size1, grid_size2, block_size);
        cudaDeviceSynchronize();
        cudaMemcpy(output_cpu, output2, sizeof(double) * grid_size2, cudaMemcpyDeviceToHost);
         output = 0.0;
        for(int i = 0; i < grid_size2; i++) {
            output += output_cpu[i];
        }
    }

    LOG_INFO("gpu calc value: %f", output);
}