/**
 * @file DCTLee.cuh
 * @brief Define some DCT/DST/DCST/DSCT Lee implementation functions, this file should not be included by interface header files.
 * @author zpfeng
 * @copyright (c) Copyright 2013~2050 - PANGO MICROSYSTEMS, INC.
 * ALL RIGHTS RESERVED.
 */

#ifndef __DCTLEE_CUH__
#define __DCTLEE_CUH__

#include "Utils.cuh"

namespace fabric_space
{
namespace dct_op
{
/**
 * Call following function through AddX0AndScaleKernel<<<CeilDiv(M * N, 256), 256>>>(x, M, N, y)
 */
template <typename T> __global__ void AddX0AndScaleKernel(const T *x, const int M, const int N, T *y)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i >= M * N)
    {
        return;
    }
    int i0 = int(i / N) * N;
    y[i] = (y[i] + x[i0]) * (T)0.5;
}

/**
 * Given an array x_0, x_1, ..., x_{N-1}, convert to 0, x_{N-1}, ..., x_2, x_1 drop x_0.
 *
 * Call following function through ComputeFlipAndShiftKernel<<<CeilDiv(M * N, 256), 256>>>(x, M, N, y)
 */
template <typename T> __global__ void ComputeFlipAndShiftKernel(const T *x, const int M, const int N, T *y)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i >= M * N)
    {
        return;
    }
    int ii = i % N;
    y[i] = (ii) ? x[i + N - ii * 2] : 0;
}

/**
 * Flip sign of odd entries index starts from 0.
 *
 * Call following function through NegateOddEntriesKernel<<<CeilDiv(M * (N / 2), 256), 256>>>(x, M, N)
 */
template <typename T> __global__ void NegateOddEntriesKernel(T *x, const int M, const int N)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i >= M * (N / 2))
    {
        return;
    }
    x[i * 2 + 1] = -x[i * 2 + 1];
}

/**
 * Given an array x_0, x_1, ..., x_{N-1}, convert to x_{N-1}, ..., x_2, x_1, x_0.
 *
 * Call following function through ComputeFlipKernel<<<CeilDiv(M * N, 256), 256>>>(x, M, N, y)
 */
template <typename T> __global__ void ComputeFlipKernel(const T *x, const int M, const int N, T *y)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i >= M * N)
    {
        return;
    }
    int ii = i % N;
    y[i] = x[i + N - ii * 2 - 1];
}

/**
 * Transpose a row-major matrix with M rows and N columns using block transpose method.
 *
 * Call following function through MatTranspose<<<dim3(CeilDiv(N, 16 * blockSize), CeilDiv(M, 16 * blockSize)), dim3(16, 16)>>>(x, N)
 */
template <typename TValue, typename TIndex = unsigned> __global__ void MatTranspose(const TValue *in, TValue *out, TIndex M, TIndex N, TIndex blockSize = 16)
{
    TIndex i = (TIndex)(blockSize * (blockIdx.y * blockDim.y + threadIdx.y));
    TIndex j = (TIndex)(blockSize * (blockIdx.x * blockDim.x + threadIdx.x));
    // Transpose the block beginning at [i, j]
    TIndex xend = min(M, i + blockSize);
    TIndex yend = min(N, j + blockSize);
    for (TIndex y = j; y < yend; ++y)
    {
        for (TIndex x = i; x < xend; ++x)
        {
            out[x + y * M] = in[y + x * N];
        }
    }
}

/**
 * Use shared memory to accelerate matrix transpose speed.
 *
 * https://zhuanlan.zhihu.com/p/692010210
 */
template <typename TValue, typename TIndex, int BLOCK_SZ, int NUM_PER_THREAD> __global__ void MatTransposeKernelV2(TValue *idata, TValue *odata, TIndex M, TIndex N)
{
    const int bx = blockIdx.x, by = blockIdx.y;
    const int tx = threadIdx.x, ty = threadIdx.y;

    __shared__ float sdata[BLOCK_SZ][BLOCK_SZ + 1];

    int x = bx * BLOCK_SZ + tx;
    int y = by * BLOCK_SZ + ty;

    constexpr int ROW_STRIDE = BLOCK_SZ / NUM_PER_THREAD;

    if (x < N)
    {
#pragma unroll
        for (int y_off = 0; y_off < BLOCK_SZ; y_off += ROW_STRIDE)
        {
            if (y + y_off < M)
            {
                sdata[ty + y_off][tx] = idata[(y + y_off) * N + x];
            }
        }
    }
    __syncthreads();

    x = by * BLOCK_SZ + tx;
    y = bx * BLOCK_SZ + ty;
    if (x < M)
    {
        for (int y_off = 0; y_off < BLOCK_SZ; y_off += ROW_STRIDE)
        {
            if (y + y_off < N)
            {
                odata[(y + y_off) * M + x] = sdata[tx][ty + y_off];
            }
        }
    }
}

template <typename TValue, typename TIndex = unsigned> void MatTransposeV2(TValue *idata, TValue *odata, TIndex M, TIndex N)
{
    constexpr TIndex BLOCK_SZ = 32;
    constexpr TIndex NUM_PER_THREAD = 4;
    dim3 block(BLOCK_SZ, BLOCK_SZ / NUM_PER_THREAD);
    dim3 grid(CeilDiv(N, BLOCK_SZ), CeilDiv(M, BLOCK_SZ));
    MatTransposeKernelV2<TValue, TIndex, BLOCK_SZ, NUM_PER_THREAD><<<grid, block>>>(idata, odata, M, N);
}

/**
 * Precompute cosine values needed for N-point dct.
 * @param  cos  size N - 1 buffer, contains the result after function call
 * @param  N    the length of target dct, must be power of 2
 */
template <typename TValue, typename TIndex = unsigned> __host__ __device__ void PrecomputeDCTCos(TValue *cos, TIndex N)
{
    // The input length must be power of 2
    if (!IsPowerOf2<TIndex>(N))
    {
        return;
    }
    TIndex offset = 0;
    TIndex halfLen = N / 2;
    while (halfLen)
    {
        TValue phaseStep = 0.5 * PI / halfLen;
        TValue phase = 0.5 * phaseStep;
        for (TIndex i = 0; i < halfLen; ++i)
        {
            cos[offset + i] = 0.5 / std::cos(phase);
            phase += phaseStep;
        }
        offset += halfLen;
        halfLen /= 2;
    }
}

/**
 * Precompute cosine values needed for N-point idct.
 * @param  cos  size N - 1 buffer, contains the result after function call
 * @param  N    the length of target idct, must be power of 2
 */
template <typename TValue, typename TIndex = unsigned> __host__ __device__ void PrecomputeIDCTCos(TValue *cos, TIndex N)
{
    // The input length must be power of 2
    if (!IsPowerOf2<TIndex>(N))
    {
        return;
    }
    TIndex offset = 0;
    TIndex halfLen = 1;
    while (halfLen < N)
    {
        TValue phaseStep = 0.5 * PI / halfLen;
        TValue phase = 0.5 * phaseStep;
        for (TIndex i = 0; i < halfLen; ++i)
        {
            cos[offset + i] = 0.5 / std::cos(phase);
            phase += phaseStep;
        }
        offset += halfLen;
        halfLen *= 2;
    }
}

/**
 * The implementation of fast Discrete Cosine Transform (DCT) algorithm and its
 * inverse (IDCT) are Lee's algorithms Algorithm reference: A New Algorithm to
 * Compute the Discrete Cosine Transform, by Byeong Gi Lee, 1984
 *
 * Lee's algorithm has a recursive structure in nature.
 * Here is a sample recursive implementation:
 * https://www.nayuki.io/page/fast-discrete-cosine-transform-algorithms
 *
 * My implementation here is iterative, which is more efficient than the
 * recursive version. Here is a sample iterative implementation:
 * https://www.codeproject.com/Articles/151043/Iterative-Fast-1D-Forvard-DCT
 *
 * Compute y[k] = sum_n=0..N-1 (x[n] * cos((n + 0.5) * k * PI / N)), for k = 0..N-1
 *
 * @param  vec   length N sequence to be transformed
 * @param  temp  length 2 * N helping buffer
 * @param  cos   length N - 1, stores cosine values precomputed by function 'PrecomputeDCTCos'
 * @param  N     length of vec, must be power of 2
 */
template <typename TValue, typename TIndex = unsigned> __host__ __device__ void DCT(TValue *vec, TValue *out, TValue *buf, const TValue *cos, TIndex N)
{
    // The input length must be power of 2
    if (!IsPowerOf2<TIndex>(N))
    {
        return;
    }
    // Pointers point to the beginning indices of two adjacent iterations
    TValue *curr = out;
    TValue *next = buf;
    // 'temp' is used to store data of two adjacent iterations
    // Copy 'vec' to the first N element in 'temp'
    // std::copy(vec, vec + N, curr);
    for (TIndex i = 0; i < N; ++i)
    {
        curr[i] = vec[i];
    }
    // Current bufferfly length and half length
    TIndex len = N;
    TIndex halfLen = len / 2;
    // Iteratively bi-partition sequences into sub-sequences
    TIndex cosOffset = 0;
    while (halfLen)
    {
        TIndex offset = 0;
        TIndex steps = N / len;
        for (TIndex k = 0; k < steps; ++k)
        {
            for (TIndex i = 0; i < halfLen; ++i)
            {
                next[offset + i] = curr[offset + i] + curr[offset + len - i - 1];
                next[offset + halfLen + i] = (curr[offset + i] - curr[offset + len - i - 1]) * cos[cosOffset + i];
            }
            offset += len;
        }
        Swap(curr, next);
        cosOffset += halfLen;
        len = halfLen;
        halfLen /= 2;
    }
    // Bottom-up form the final DCT solution
    // Note that the case len = 2 will do nothing, so we start from len = 4
    len = 4;
    halfLen = 2;
    while (halfLen < N)
    {
        TIndex offset = 0;
        TIndex steps = N / len;
        for (TIndex k = 0; k < steps; ++k)
        {
            for (TIndex i = 0; i < halfLen - 1; ++i)
            {
                next[offset + i * 2] = curr[offset + i];
                next[offset + i * 2 + 1] = curr[offset + halfLen + i] + curr[offset + halfLen + i + 1];
            }
            next[offset + len - 2] = curr[offset + halfLen - 1];
            next[offset + len - 1] = curr[offset + len - 1];
            offset += len;
        }
        Swap(curr, next);
        halfLen = len;
        len *= 2;
    }
    // Populate the final results into 'out'
    if (curr != out)
    {
        // std::copy(curr, curr + N, out);
        for (TIndex i = 0; i < N; ++i)
        {
            out[i] = curr[i];
        }
    }
}

/**
 * Compute y[k] = 0.5 * x[0] + sum_n=1..N-1 (x[n] * cos(n * (k + 0.5) * PI / N)), for k = 0..N-1
 * @param  vec   length N sequence to be transformed
 * @param  temp  length 2 * N helping buffer
 * @param  cos   length N - 1, stores cosine values precomputed by function 'PrecomputeIDCTCos'
 * @param  N     length of vec, must be power of 2
 */
template <typename TValue, typename TIndex = unsigned> __host__ __device__ void IDCT(TValue *vec, TValue *out, TValue *buf, const TValue *cos, TIndex N)
{
    // The input length must be power of 2
    if (!IsPowerOf2<TIndex>(N))
    {
        return;
    }
    // Pointers point to the beginning indices of two adjacent iterations
    TValue *curr = out;
    TValue *next = buf;
    // This array is used to store date of two adjacent iterations
    // Copy 'vec' to the first N element in 'temp'
    // std::copy(vec, vec + N, curr);
    for (TIndex i = 0; i < N; ++i)
    {
        curr[i] = vec[i];
    }
    curr[0] /= 2;
    // Current bufferfly length and half length
    TIndex len = N;
    TIndex halfLen = len / 2;
    // Iteratively bi-partition sequences into sub-sequences
    while (halfLen)
    {
        TIndex offset = 0;
        TIndex steps = N / len;
        for (TIndex k = 0; k < steps; ++k)
        {
            next[offset] = curr[offset];
            next[offset + halfLen] = curr[offset + 1];
            for (TIndex i = 1; i < halfLen; ++i)
            {
                next[offset + i] = curr[offset + i * 2];
                next[offset + halfLen + i] = curr[offset + i * 2 - 1] + curr[offset + i * 2 + 1];
            }
            offset += len;
        }
        Swap(curr, next);
        len = halfLen;
        halfLen /= 2;
    }
    // Bottom-up form the final IDCT solution
    len = 2;
    halfLen = 1;
    TIndex cosOffset = 0;
    while (halfLen < N)
    {
        TIndex offset = 0;
        TIndex steps = N / len;
        for (TIndex k = 0; k < steps; ++k)
        {
            for (TIndex i = 0; i < halfLen; ++i)
            {
                TValue g = curr[offset + i];
                TValue h = curr[offset + halfLen + i] * cos[cosOffset + i];
                next[offset + i] = g + h;
                next[offset + len - 1 - i] = g - h;
            }
            offset += len;
        }
        Swap(curr, next);
        cosOffset += halfLen;
        halfLen = len;
        len *= 2;
    }
    // Populate the final results into 'out'
    if (curr != out)
    {
        // std::copy(curr, curr + N, out);
        for (TIndex i = 0; i < N; ++i)
        {
            out[i] = curr[i];
        }
    }
}

/**
 * Compute batch dct.
 * @param  mtx   size M * N row-major matrix to be transformed
 * @param  temp  length 3 * M * N helping buffer, first 2 * M * N is for dct, the last M * N is for matrix transpose
 * @param  cos  length M - 1, stores cosine values precomputed by function 'PrecomputeDCTCos' for N-point dct
 * @param  M     number of rows
 * @param  N     number of columns
 *
 * Try to read following url to improve DCTKernel speed:
 * https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-hw-model
 * Call following function through DCTKernel<<<CeilDiv(M, 256), 256>>>(mtx, out, buf, cos, M, N)
 */
template <typename TValue, typename TIndex = unsigned> __global__ void DCTKernel(TValue *mtx, TValue *out, TValue *buf, const TValue *cos, TIndex M, TIndex N)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i >= M)
    {
        return;
    }
    DCT<TValue, TIndex>(mtx + i * N, out + i * N, buf + i * N, cos, N);
}

/**
 * Compute batch idct.
 * @param  mtx   size M * N row-major matrix to be transformed
 * @param  temp  length 3 * M * N helping buffer, first 2 * M * N is for dct, the last M * N is for matrix transpose
 * @param  cos  length M - 1, stores cosine values precomputed by function 'PrecomputeIDCTCos' for N-point idct
 * @param  M     number of rows
 * @param  N     number of columns
 *
 * Call following function through IDCTKernel<<<CeilDiv(M, 256), 256>>>(mtx, out, buf, cos, M, N)
 */
template <typename TValue, typename TIndex = unsigned> __global__ void IDCTKernel(TValue *mtx, TValue *out, TValue *buf, const TValue *cos, TIndex M, TIndex N)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i >= M)
    {
        return;
    }
    IDCT<TValue, TIndex>(mtx + i * N, out + i * N, buf + i * N, cos, N);
}

template <typename TValue, typename TIndex = unsigned> __global__ void MultiplyScaleCoefKernel(TValue *out, TIndex M, TIndex N)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i >= M)
    {
        return;
    }
    for (TIndex j = 0; j < N; ++j)
    {
        out[i * N + j] *= (TValue)(2.0 / N);
    }
}

template <typename TValue, typename TIndex = unsigned> __global__ void MultiplyScaleCoefKernelV2(TValue *out, TIndex M, TIndex N)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i >= M * N)
    {
        return;
    }
    out[i] *= (TValue)(2.0 / N);
}

template <typename TValue, typename TIndex = unsigned> __global__ void MultiplyScaleCoefKernel(TValue *out, TValue *in, TValue s, TIndex M, TIndex N)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i >= M)
    {
        return;
    }
    for (TIndex j = 0; j < N; ++j)
    {
        out[i * N + j] *= in[i * N + j] * s;
    }
}

template <typename TValue, typename TIndex = unsigned> __global__ void MultiplyScaleCoefKernelV2(TValue *out, TValue *in, TValue s, TIndex M, TIndex N)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i >= M * N)
    {
        return;
    }
    out[i] *= in[i] * s;
}

template <typename TValue, typename TIndex = unsigned> __host__ inline void BatchDCT2D(TValue *mtx, TValue *out, TValue *buf, const TValue *cos, TIndex M, TIndex N)
{
    DCTKernel<<<min(M, (TIndex)128), CeilDiv(M, (TIndex)128)>>>(mtx, out, buf, cos, M, N);
    cudaDeviceSynchronize();
    MultiplyScaleCoefKernelV2<<<CeilDiv(M * N, (TIndex)256), min((TIndex)256, M * N)>>>(out, M, N);
    cudaDeviceSynchronize();
}

template <typename TValue, typename TIndex = unsigned> __host__ inline void BatchIDCT2D(TValue *mtx, TValue *out, TValue *buf, const TValue *cos, TIndex M, TIndex N)
{
    IDCTKernel<<<min(M, (TIndex)128), CeilDiv(M, (TIndex)128)>>>(mtx, out, buf, cos, M, N);
    cudaDeviceSynchronize();
}

template <typename TValue, typename TIndex = unsigned> __host__ inline void BatchDST2D(TValue *mtx, TValue *out, TValue *buf, const TValue *cos, TIndex M, TIndex N)
{
    MemcpyDeviceToDevice(buf, mtx, M * N);
    cudaDeviceSynchronize();
    NegateOddEntriesKernel<<<CeilDiv(M * (N / 2), (TIndex)256), min((TIndex)256, M * (N / 2))>>>(buf, M, N);
    cudaDeviceSynchronize();
    DCTKernel<<<min(M, (TIndex)128), CeilDiv(M, (TIndex)128)>>>(buf, out, buf, cos, M, N);
    cudaDeviceSynchronize();
    ComputeFlipKernel<<<CeilDiv(M * N, (TIndex)256), min((TIndex)256, M * N)>>>(out, M, N, buf);
    cudaDeviceSynchronize();
    MultiplyScaleCoefKernelV2<<<CeilDiv(M * N, (TIndex)256), min((TIndex)256, M * N)>>>(out, buf, (TValue)(2.0 / N), M, N);
    cudaDeviceSynchronize();
}

template <typename TValue, typename TIndex = unsigned> __host__ inline void BatchIDST2D(TValue *mtx, TValue *out, TValue *buf, const TValue *cos, TIndex M, TIndex N)
{
    ComputeFlipKernel<<<CeilDiv(M * N, (TIndex)256), min((TIndex)256, M * N)>>>(mtx, M, N, buf);
    cudaDeviceSynchronize();
    IDCTKernel<<<min(M, (TIndex)128), CeilDiv(M, (TIndex)128)>>>(buf, out, buf, cos, M, N);
    cudaDeviceSynchronize();
    NegateOddEntriesKernel<<<CeilDiv(M * (N / 2), (TIndex)256), min((TIndex)256, M * (N / 2))>>>(out, M, N);
    cudaDeviceSynchronize();
}

template <typename TValue, typename TIndex = unsigned> __host__ inline void BatchIDXCT2D(TValue *mtx, TValue *out, TValue *buf, const TValue *cos, TIndex M, TIndex N)
{
    IDCTKernel<<<min(M, (TIndex)128), CeilDiv(M, (TIndex)128)>>>(mtx, out, buf, cos, M, N);
    cudaDeviceSynchronize();
    AddX0AndScaleKernel<<<CeilDiv(M * N, (TIndex)256), min((TIndex)256, M * N)>>>(mtx, M, N, out);
    cudaDeviceSynchronize();
}

template <typename TValue, typename TIndex = unsigned> __host__ inline void BatchIDXST2D(TValue *mtx, TValue *out, TValue *buf, const TValue *cos, TIndex M, TIndex N)
{
    ComputeFlipAndShiftKernel<<<CeilDiv(M * N, (TIndex)256), min((TIndex)256, M * N)>>>(mtx, M, N, buf);
    cudaDeviceSynchronize();
    IDCTKernel<<<min(M, (TIndex)128), CeilDiv(M, (TIndex)128)>>>(buf, out, buf, cos, M, N);
    cudaDeviceSynchronize();
    NegateOddEntriesKernel<<<CeilDiv(M * (N / 2), (TIndex)256), min((TIndex)256, M * (N / 2))>>>(out, M, N);
    cudaDeviceSynchronize();
}

template <typename TValue, typename TIndex = unsigned> __host__ inline void DCT2D(TValue *mtx, TValue *out, TValue *buf, const TValue *cos0, const TValue *cos1, TIndex M, TIndex N)
{
    BatchDCT2D(mtx, out, buf, cos1, M, N);
    MatTransposeV2(out, buf, N, M);
    cudaDeviceSynchronize();
    BatchDCT2D(buf, buf, out, cos0, N, M);
    MatTransposeV2(buf, out, M, N);
    cudaDeviceSynchronize();
}

template <typename TValue, typename TIndex = unsigned>
__host__ inline void IDCT2D(TValue *mtx, TValue *out, TValue *buf, const TValue *cos0, const TValue *cos1, TIndex M, TIndex N)
{
    BatchIDCT2D(mtx, out, buf, cos1, M, N);
    MatTransposeV2(out, buf, N, M);
    cudaDeviceSynchronize();
    BatchIDCT2D(buf, buf, out, cos0, N, M);
    MatTransposeV2(buf, out, M, N);
    cudaDeviceSynchronize();
}

template <typename TValue, typename TIndex = unsigned> __host__ inline void DST2D(TValue *mtx, TValue *out, TValue *buf, const TValue *cos0, const TValue *cos1, TIndex M, TIndex N)
{
    BatchDST2D(mtx, out, buf, cos1, M, N);
    MatTransposeV2(out, buf, N, M);
    cudaDeviceSynchronize();
    BatchDST2D(buf, buf, out, cos0, N, M);
    MatTransposeV2(buf, out, M, N);
    cudaDeviceSynchronize();
}

template <typename TValue, typename TIndex = unsigned>
__host__ inline void IDST2D(TValue *mtx, TValue *out, TValue *buf, const TValue *cos0, const TValue *cos1, TIndex M, TIndex N)
{
    BatchIDST2D(mtx, out, buf, cos1, M, N);
    MatTransposeV2(out, buf, N, M);
    cudaDeviceSynchronize();
    BatchIDST2D(buf, buf, out, cos0, N, M);
    MatTransposeV2(buf, out, M, N);
    cudaDeviceSynchronize();
}

template <typename TValue, typename TIndex = unsigned>
__host__ inline void DSCT2D(TValue *mtx, TValue *out, TValue *buf, const TValue *cos0, const TValue *cos1, TIndex M, TIndex N)
{
    BatchDST2D(mtx, out, buf, cos1, M, N);
    MatTransposeV2(out, buf, N, M);
    cudaDeviceSynchronize();
    BatchDCT2d(buf, buf, out, cos0, N, M);
    MatTransposeV2(buf, out, M, N);
    cudaDeviceSynchronize();
}

template <typename TValue, typename TIndex = unsigned>
__host__ inline void IDSCT2D(TValue *mtx, TValue *out, TValue *buf, const TValue *cos0, const TValue *cos1, TIndex M, TIndex N)
{
    BatchIDXST2D(mtx, out, buf, cos1, M, N);
    MatTransposeV2(out, buf, N, M);
    cudaDeviceSynchronize();
    BatchIDCT2D(buf, buf, out, cos0, N, M);
    MatTransposeV2(buf, out, M, N);
    cudaDeviceSynchronize();
}

template <typename TValue, typename TIndex = unsigned>
__host__ inline void DCST2D(TValue *mtx, TValue *out, TValue *buf, const TValue *cos0, const TValue *cos1, TIndex M, TIndex N)
{
    BatchDCT2D(mtx, out, buf, cos1, M, N);
    MatTransposeV2(out, buf, N, M);
    cudaDeviceSynchronize();
    BatchDST2D(buf, buf, out, cos0, N, M);
    MatTransposeV2(buf, out, M, N);
    cudaDeviceSynchronize();
}

template <typename TValue, typename TIndex = unsigned>
__host__ inline void IDCST2D(TValue *mtx, TValue *out, TValue *buf, const TValue *cos0, const TValue *cos1, TIndex M, TIndex N)
{
    BatchIDCT2D(mtx, out, buf, cos1, M, N);
    MatTransposeV2(out, buf, N, M);
    cudaDeviceSynchronize();
    BatchIDXST2D(buf, buf, out, cos0, N, M);
    MatTransposeV2(buf, out, M, N);
    cudaDeviceSynchronize();
}

}  //!< end of namespace dct_op
}  //!< end of namespace fabric_space
#endif
