extern "C" {
#include "dvmgpu_red.h"
#include "util.h"
}

#define DEF_PAIR(func, body) \
template <typename T> \
inline int func##_func(T *a, T *b) body \
 \
template <typename T> \
inline __device__ int func##_func_dev(T *a, T *b) body \

#define DEF_PAIR_SPEC(func, type, body) \
template <> \
inline int func##_func(type *a, type *b) body \
template <> \
inline __device__ int func##_func_dev(type *a, type *b) body \

DEF_PAIR(sum, {*a += *b; return 0;})
DEF_PAIR_SPEC(sum, float_complex, {(*a)[0] += (*b)[0]; (*a)[1] += (*b)[1]; return 0;})
DEF_PAIR_SPEC(sum, double_complex, {(*a)[0] += (*b)[0]; (*a)[1] += (*b)[1]; return 0;})

DEF_PAIR(mult, {*a *= *b; return 0;})
DEF_PAIR_SPEC(mult, float_complex, {(*a)[0] = (*a)[0] * (*b)[0] - (*a)[1] * (*b)[1]; (*a)[1] = (*a)[0] * (*b)[1] + (*a)[1] * (*b)[0]; return 0;})
DEF_PAIR_SPEC(mult, double_complex, {(*a)[0] = (*a)[0] * (*b)[0] - (*a)[1] * (*b)[1]; (*a)[1] = (*a)[0] * (*b)[1] + (*a)[1] * (*b)[0]; return 0;})

DEF_PAIR(max, {if (*a < *b) {*a = *b; return 1;} return 0;})
DEF_PAIR(min, {if (*a > *b) {*a = *b; return 1;} return 0;})
DEF_PAIR(and, {(*a) &= (*b); return 0;})
DEF_PAIR(or, {(*a) |= (*b); return 0;})
DEF_PAIR(xor, {(*a) ^= (*b); return 0;})
DEF_PAIR(equ, {(*a) = ~((*a) ^ (*b)); return 0;})

// Strange operators
DEF_PAIR(ne, {(*a) ^= (*b); return 0;})
DEF_PAIR(eq, {(*a) ^= (*b) ^ 1; return 0;})

template<typename T>
static inline __device__ void copy_func(T *a, T *b) {
    (*a) = (*b);
}

template<>
static inline __device__ void copy_func(float_complex *a, float_complex *b) {
    (*a)[0] = (*b)[0];
    (*a)[1] = (*b)[1];
}

template<>
static inline __device__ void copy_func(double_complex *a, double_complex *b) {
    (*a)[0] = (*b)[0];
    (*a)[1] = (*b)[1];
}

template <typename T, int (*func)(T *, T *), int bs>
__global__ void perform_reduction_kernel_woloc(int items, T *vars, T *res) {
    __shared__ T tmp[bs];
    int tid = threadIdx.x;
    int i = bs * blockIdx.x + tid;
    if (i < items)
        copy_func(tmp + tid, vars + i);
    for (i += bs * gridDim.x; i < items; i += bs * gridDim.x)
        func(tmp + tid, vars + i);
    __syncthreads();
    if (bs  >= 512) {
        if (tid < 256)
            func(tmp + tid, tmp + tid + 256);
        __syncthreads();
    }
    if (bs >= 256) {
        if (tid < 128)
            func(tmp + tid, tmp + tid + 128);
        __syncthreads();
    }
    if (bs >= 128) {
        if (tid < 64)
            func(tmp + tid, tmp + tid + 64);
        __syncthreads();
    }
    if (bs >= 64) {
        if (tid < 32)
            func(tmp + tid, tmp + tid + 32);
        __syncthreads();
    }
    if (bs >= 32) {
        if (tid < 16)
            func(tmp + tid, tmp + tid + 16);
        __syncthreads();
    }
    if (bs >= 16) {
        if (tid < 8)
            func(tmp + tid, tmp + tid + 8);
        __syncthreads();
    }
    if (bs >= 8) {
        if (tid < 4)
            func(tmp + tid, tmp + tid + 4);
        __syncthreads();
    }
    if (bs >= 4) {
        if (tid < 2)
            func(tmp + tid, tmp + tid + 2);
        __syncthreads();
    }
    if (bs >= 2) {
        if (tid == 0)
            func(tmp, tmp + 1);
    }
    if (tid == 0)
        copy_func(res + blockIdx.x, tmp);
}

template <typename T, int (*func)(T *, T *), int bs, typename LocType>
__global__ void perform_reduction_kernel_loc(int items, T *vars, LocType *locs, T *res, LocType *locRes) {
    __shared__ T tmp[bs];
    __shared__ LocType tmpLoc[bs];
    int tid = threadIdx.x;
    int i = bs * blockIdx.x + tid;
    if (i < items) {
        copy_func(tmp + tid, vars + i);
        copy_func(tmpLoc + tid, locs + i);
    }
    for (i += bs * gridDim.x; i < items; i += bs * gridDim.x)
        if (func(tmp + tid, vars + i))
            copy_func(tmpLoc + tid, locs + i);
    __syncthreads();
    if (bs  >= 512) {
        if (tid < 256)
            if (func(tmp + tid, tmp + tid + 256))
                copy_func(tmpLoc + tid, tmpLoc + tid + 256);
        __syncthreads();
    }
    if (bs >= 256) {
        if (tid < 128)
            if (func(tmp + tid, tmp + tid + 128))
                copy_func(tmpLoc + tid, tmpLoc + tid + 128);
        __syncthreads();
    }
    if (bs >= 128) {
        if (tid < 64)
            if (func(tmp + tid, tmp + tid + 64))
                copy_func(tmpLoc + tid, tmpLoc + tid + 64);
        __syncthreads();
    }
    if (bs >= 64) {
        if (tid < 32)
            if (func(tmp + tid, tmp + tid + 32))
                copy_func(tmpLoc + tid, tmpLoc + tid + 32);
        __syncthreads();
    }
    if (bs >= 32) {
        if (tid < 16)
            if (func(tmp + tid, tmp + tid + 16))
                copy_func(tmpLoc + tid, tmpLoc + tid + 16);
        __syncthreads();
    }
    if (bs >= 16) {
        if (tid < 8)
            if (func(tmp + tid, tmp + tid + 8))
                copy_func(tmpLoc + tid, tmpLoc + tid + 8);
        __syncthreads();
    }
    if (bs >= 8) {
        if (tid < 4)
            if (func(tmp + tid, tmp + tid + 4))
                copy_func(tmpLoc + tid, tmpLoc + tid + 4);
        __syncthreads();
    }
    if (bs >= 4) {
        if (tid < 2)
            if (func(tmp + tid, tmp + tid + 2))
                copy_func(tmpLoc + tid, tmpLoc + tid + 2);
        __syncthreads();
    }
    if (bs >= 2) {
        if (tid == 0)
            if (func(tmp, tmp + 1))
                copy_func(tmpLoc, tmpLoc + 1);
    }
    if (tid == 0) {
        copy_func(res + blockIdx.x, tmp);
        copy_func(locRes + blockIdx.x, tmpLoc);
    }

}

#define BLOCK_SIZE 256
#define MAX_BLOCKS 256
template <typename T, int (*func)(T *, T *), int (*func_dev)(T *, T *)>
static void perform_reduction(int items, T *vars, int length, char *locs, int locSize, T *res, char *locRes) {
    dim3 blocks(MAX_BLOCKS + 1), threads(BLOCK_SIZE);
    int computeFlag = items >= BLOCK_SIZE;
    if (computeFlag) {
        for (int multiplier = 2; blocks.x > MAX_BLOCKS; multiplier++)
            blocks.x = (items + multiplier * BLOCK_SIZE - 1) / (multiplier * BLOCK_SIZE);
    } else {
        threads.x = 1;
        blocks.x = items;
    }
    T *tempRes;
    char *tempLocRes;
//    int tempSize = blocks.x * length * (sizeof(T) + locSize);
    if (computeFlag) {
        assertCuda(cudaMalloc(&tempRes, blocks.x * length * sizeof(T)));
        assertCuda(cudaMalloc(&tempLocRes, blocks.x * length * locSize));
    } else {
        tempRes = vars;
        tempLocRes = locs;
    }
    if (computeFlag)
        dvmh_log(TRACE, "blocks=%d threads=%d sizeof(T)=%d locSize=%d length=%d", blocks.x, threads.x, sizeof(T), locSize, length);
    else
        dvmh_log(TRACE, "items=%d (not computing) locSize=%d length=%d", items, locSize, length);
    if (computeFlag) {
        if (locSize <= 0) {
            for (int i = 0; i < length; i++) {
                perform_reduction_kernel_woloc<T, func_dev, BLOCK_SIZE><<<blocks, threads>>>(items, vars + i * items, tempRes + i * blocks.x);
                assertCuda(cudaGetLastError());
            }
        } else {
            for (int i = 0; i < length; i++) {
                switch(locSize) {
                case sizeof(char):
                    perform_reduction_kernel_loc<T, func_dev, BLOCK_SIZE, char><<<blocks, threads>>>(items, vars + i * items, locs + i * items, tempRes + i * blocks.x, tempLocRes + i * blocks.x);
                    break;
                case sizeof(short):
                    perform_reduction_kernel_loc<T, func_dev, BLOCK_SIZE, short><<<blocks, threads>>>(items, vars + i * items, (short *)locs + i * items, tempRes + i * blocks.x, (short *)tempLocRes + i * blocks.x);
                    break;
                case sizeof(int):
                    perform_reduction_kernel_loc<T, func_dev, BLOCK_SIZE, int><<<blocks, threads>>>(items, vars + i * items, (int *)locs + i * items, tempRes + i * blocks.x, (int *)tempLocRes + i * blocks.x);
                    break;
                case sizeof(long):
                    perform_reduction_kernel_loc<T, func_dev, BLOCK_SIZE, long><<<blocks, threads>>>(items, vars + i * items, (long *)locs + i * items, tempRes + i * blocks.x, (long *)tempLocRes + i * blocks.x);
                    break;
                case sizeof(double_complex):
                    perform_reduction_kernel_loc<T, func_dev, BLOCK_SIZE, double_complex><<<blocks, threads>>>(items, vars + i * items, (double_complex *)locs + i * items, tempRes + i * blocks.x, (double_complex *)tempLocRes + i * blocks.x);
                    break;
                default:
//                    perform_reduction_kernel_loc_2<T, func_dev, BLOCK_SIZE><<<blocks, threads, BLOCK_SIZE * (sizeof(T) + locSize)>>>(items, vars + i * items, locs + i * items * locSize, locSize, tempRes + i * blocks.x, tempLocRes + i * blocks.x * locSize);
                    break;
                }
                assertCuda(cudaGetLastError());
            }
        }
    }
    T *tempHostRes = (T *)malloc(blocks.x * length * sizeof(T));
    char *tempHostLocRes = (char *)malloc(blocks.x * length * locSize);
    assertCuda(cudaMemcpy(tempHostRes, tempRes, blocks.x * length * sizeof(T), cudaMemcpyDeviceToHost));
    assertCuda(cudaMemcpy(tempHostLocRes, tempLocRes, blocks.x * length * locSize, cudaMemcpyDeviceToHost));
    if (computeFlag) {
        assertCuda(cudaFree(tempRes));
        assertCuda(cudaFree(tempLocRes));
    }
    for (int i = 0; i < blocks.x; i++)
        for (int j = 0; j < length; j++) {
            if (func(res + j, tempHostRes + (j * blocks.x + i)))
                if (locSize > 0)
                    memcpy(locRes + j * locSize, tempHostLocRes + (j * blocks.x + i) * locSize, locSize);
        }
    free(tempHostRes);
    free(tempHostLocRes);
}

extern "C" {

#define DEFINE_REDFUNC(func, type) DECLARE_REDFUNC(func, type) { \
    perform_reduction<type, func##_func<type>, func##_func_dev<type> >(items, vars, length, locs, locSize, res, locRes); \
}

DEFINE_REDFUNC(sum, char);
DEFINE_REDFUNC(mult, char);
DEFINE_REDFUNC(max, char);
DEFINE_REDFUNC(min, char);
DEFINE_REDFUNC(and, char);
DEFINE_REDFUNC(or, char);
DEFINE_REDFUNC(xor, char);
DEFINE_REDFUNC(equ, char);
DEFINE_REDFUNC(ne, char);
DEFINE_REDFUNC(eq, char);

DEFINE_REDFUNC(sum, int);
DEFINE_REDFUNC(mult, int);
DEFINE_REDFUNC(max, int);
DEFINE_REDFUNC(min, int);
DEFINE_REDFUNC(and, int);
DEFINE_REDFUNC(or, int);
DEFINE_REDFUNC(xor, int);
DEFINE_REDFUNC(equ, int);
DEFINE_REDFUNC(ne, int);
DEFINE_REDFUNC(eq, int);

DEFINE_REDFUNC(sum, long);
DEFINE_REDFUNC(mult, long);
DEFINE_REDFUNC(max, long);
DEFINE_REDFUNC(min, long);
DEFINE_REDFUNC(and, long);
DEFINE_REDFUNC(or, long);
DEFINE_REDFUNC(xor, long);
DEFINE_REDFUNC(equ, long);
DEFINE_REDFUNC(ne, long);
DEFINE_REDFUNC(eq, long);

DEFINE_REDFUNC(sum, float);
DEFINE_REDFUNC(mult, float);
DEFINE_REDFUNC(max, float);
DEFINE_REDFUNC(min, float);

DEFINE_REDFUNC(sum, double);
DEFINE_REDFUNC(mult, double);
DEFINE_REDFUNC(max, double);
DEFINE_REDFUNC(min, double);

DEFINE_REDFUNC(sum, float_complex);
DEFINE_REDFUNC(mult, float_complex);

DEFINE_REDFUNC(sum, double_complex);
DEFINE_REDFUNC(mult, double_complex);

}
