import pycuda.autoinit
import pycuda.driver as drv
import numpy as np
#计时器
from timeit import default_timer as timer
from pycuda.compiler import SourceModule

fo = open("reduction.cu", "rb")

content = str(fo.read(), encoding = "utf8")

mod = SourceModule(content)

reduce0 = mod.get_function("reduce0")
reduce1 = mod.get_function("reduce1")
reduce2 = mod.get_function("reduce2")
reduce3 = mod.get_function("reduce3")

grid_size = 1024
#block_size = 512

a = np.ones(1024 * grid_size).astype(np.int32)
dest = np.zeros(grid_size).astype(np.int32)

#drv_a = drv.In(a)
#drv_dest = drv.Out(dest)

a_gpu = drv.mem_alloc(a.nbytes)
dest_gpu = drv.mem_alloc(dest.nbytes)

drv.memcpy_htod(a_gpu, a)
drv.memcpy_htod(dest_gpu, dest)

#reduce2(a_gpu, dest_gpu, block=(512,1,1), grid=(1,1))

for func,block_size in zip([reduce0,reduce1,reduce2,reduce3],[1024,1024,1024,512]):
    start = timer()
    step_count = 100
    for x in range(step_count):
        func(a_gpu, dest_gpu, block=(block_size,1,1), grid=(grid_size,1))
        func(dest_gpu, dest_gpu, block=(block_size,1,1), grid=(1,1))
    run_time = timer() - start
    print("gpu run time %f seconds " % (run_time))
    drv.memcpy_dtoh(dest,dest_gpu)
    print(dest[0])