#include "math.h"

#include "hip/hip_ext.h"
#include "hip/hip_runtime.h"

typedef long BB __attribute__((ext_vector_type(2)));
typedef float float4_ __attribute__((ext_vector_type(4)));
typedef float float2_ __attribute__((ext_vector_type(2)));


#define VERIFY

#define BENCH
#define BENCH_LOOP 0

#define THREAD_TILE 4
#define LDS_H (32)
#define LDS_W (16 * THREAD_TILE)
#define LDS_SIZE (2 * LDS_H * LDS_W)
#define LDS_OFFSET (LDS_H * LDS_W)

union RegisterUnion
{
    float4_ vector4;
    struct
    {
        float2_ vector_front;
        float2_ vector_rear;
    };
};

#define CLEAR_ACC(x4) \
    x4.x = 0.0;       \
    x4.y = 0.0;       \
    x4.z = 0.0;       \
    x4.w = 0.0;

#define INFERENCE_GLOB_WEI_OFF                                                                \
    glob_wei_off = (base_k + slide_wei_k) < k && slide_c < c                                  \
                       ? (k * c * base_rs + slide_k_tile * 32 * c + \
                          slide_c * 32 + slide_wei_k)                                         \
                             << 2                                                             \
                       : -1;

#define INFERENCE_GLOB_OUT_OFF                                                                     \
    glob_out_off = base_slide_hw < hw_stride && slide_oh >= 0 && slide_oh < oh && slide_ow >= 0 && \
                           slide_ow < ow && slide_oh_flag%u==0 && slide_ow_flag%v==0  &&(base_k + slide_out_k) < k\
                       ? (slide_n * ohwk_stride + slide_k_tile * 32 * ohw_stride +                 \
                          (slide_oh * ow + slide_ow) * 32 + slide_out_k)                           \
                             << 2                                                                  \
                       : -1;

#define STORE_TO_LDS                                       \
    sm[sm_store_wei_off]                = glob_wei_val0.x; \
    sm[sm_store_wei_off + LDS_W]        = glob_wei_val0.y; \
    sm[sm_store_wei_off + 2 * LDS_W]    = glob_wei_val0.z; \
    sm[sm_store_wei_off + 3 * LDS_W]    = glob_wei_val0.w; \
    sm[sm_store_wei_off + 4 * LDS_W]    = glob_wei_val1.x; \
    sm[sm_store_wei_off + 5 * LDS_W]    = glob_wei_val1.y; \
    sm[sm_store_wei_off + 6 * LDS_W]    = glob_wei_val1.z; \
    sm[sm_store_wei_off + 7 * LDS_W]    = glob_wei_val1.w; \
    sm[sm_store_out_off]                = glob_out_val0.x; \
    sm[sm_store_out_off + LDS_W]        = glob_out_val0.y; \
    sm[sm_store_out_off + 2 * LDS_W]    = glob_out_val0.z; \
    sm[sm_store_out_off + 3 * LDS_W]    = glob_out_val0.w; \
    sm[sm_store_out_off + 4 * LDS_W]    = glob_out_val1.x; \
    sm[sm_store_out_off + 5 * LDS_W]    = glob_out_val1.y; \
    sm[sm_store_out_off + 6 * LDS_W]    = glob_out_val1.z; \
    sm[sm_store_out_off + 7 * LDS_W]    = glob_out_val1.w;

__device__ void PrefetchLDS(RegisterUnion& a, RegisterUnion& b, int offset_a, int offset_b)
{
    asm volatile("ds_read_m32x8_b32 %0, %1 offset:0\n\t" : "+v"(a.vector4), "+v"(offset_a));
    asm volatile("ds_read_m32x8_b32 %0, %1 offset:0\n\t" : "+v"(b.vector4), "+v"(offset_b));
}

#define FP16_MMAC(a, b, c0, c1, c2, c3)                       \
    asm volatile("s_waitcnt lgkmcnt(0)\n\t"                   \
                 "v_mmac_16x16x8_f32 %0, %1, %2, %0\n\t"      \
                 : "+v"(c0)                                   \
                 : "v"(a.vector_front), "v"(b.vector_front)); \
    asm volatile("v_mmac_16x16x8_f32 %0, %1, %2, %0\n\t"      \
                 : "+v"(c1)                                   \
                 : "v"(a.vector_rear), "v"(b.vector_front));  \
    asm volatile("v_mmac_16x16x8_f32 %0, %1, %2, %0\n\t"      \
                 : "+v"(c2)                                   \
                 : "v"(a.vector_front), "v"(b.vector_rear));  \
    asm volatile("v_mmac_16x16x8_f32 %0, %1, %2, %0\n\t"      \
                 : "+v"(c3)                                   \
                 : "v"(a.vector_rear), "v"(b.vector_rear));

__device__ void BufferLoadx4(float4& gA, int& offset, BB& addr)
{
    asm volatile("buffer_load_dwordx4 %0,%1,%2,0, offen offset:0\n"
                 : "=v"(gA), "+v"(offset), "+s"(addr));
}

extern "C" __global__ void conv2d_fp32_rs3x3_stride1_ncxhwx(float* input_device,
                                                            float* weight_device,
                                                            float* output_device,
                                                            int n,
                                                            int c,
                                                            int h,
                                                            int w,
                                                            int k,
                                                            int oh,
                                                            int ow,
                                                            int r,
                                                            int s,
                                                            int p,
                                                            int q,
                                                            int u,
                                                            int v,
                                                            int l,
                                                            int j)
{
    __shared__ float sm[LDS_SIZE];

    BB global_weight;
    global_weight.x            = (long)weight_device;
    global_weight.x            = (global_weight.x | (((long)(0x2 << 16)) << 32));
    global_weight.y            = (((((long)0x20000) << 32) | 0xFFFFFFFE));
    BB global_weight_alias_in  = global_weight;
    BB global_weight_alias_out = global_weight;
    BB global_output;
    global_output.x            = (long)output_device;
    global_output.x            = (global_output.x | (((long)(0x2 << 16)) << 32));
    global_output.y            = (((((long)0x20000) << 32) | 0xFFFFFFFE));
    BB global_output_alias_in  = global_output;
    BB global_output_alias_out = global_output;
    BB global_input;
    global_input.x = (long)input_device;
    global_input.x = (global_input.x | (((long)(0x2 << 16)) << 32));
    global_input.y = (((((long)0x20000) << 32) | 0xFFFFFFFE));

    int t_id         = threadIdx.x;
    int wei_k_stride = r * s * c;
    int r_stride     = s * c;
    int hw_stride    = h * w;
    int hwc_stride   = h * w * c;
    int nhw          = n * h * w;

    int out_k_stride = 1;
    int ohwk_stride  = oh * ow * k;
    int ohw_stride   = oh * ow;

    int slide_n = blockIdx.z;

    int base_k = 0;
    int base_r = 0;
    int base_s = 0;

    int base_slide_c = blockIdx.y * 64;
    int slide_c      = base_slide_c + t_id / 4;
    int slide_wei_k  = t_id % 4 * 8;
    // int slide_c_tile = slide_c / 32;
    // int slide_c_res  = slide_c % 32;

    int base_slide_hw = blockIdx.x * 64 + t_id / 4;
    int base_h        = base_slide_hw / w;
    int base_w        = base_slide_hw % w;
    int slide_out_k   = t_id % 4 * 8;
    int slide_k_tile  = 0;

    int sm_store_wei_off = t_id % 4 * 8 * LDS_W + (t_id / 4) ;
    int sm_store_out_off = t_id % 4 * 8 * LDS_W + (t_id / 4) + LDS_OFFSET;

    RegisterUnion wei, out;
    int sm_load_wei_base = t_id % 128 / 64 * 32;
    int sm_load_out_base = t_id / 128 * 32;
    int sm_thread_off    = t_id % 64 / 8 * LDS_W + t_id % 64 % 8 * 4;
    int sm_load_wei_off  = (sm_load_wei_base + sm_thread_off) * 4;
    int sm_load_out_off  = (sm_load_out_base + sm_thread_off + LDS_OFFSET) * 4;

    float4_ hw0_c0, hw0_c1, hw1_c0, hw1_c1;
    CLEAR_ACC(hw0_c0);
    CLEAR_ACC(hw0_c1);
    CLEAR_ACC(hw1_c0);
    CLEAR_ACC(hw1_c1);

    int glob_wei_off, glob_out_off;
    float4 glob_wei_val0, glob_wei_val1, glob_out_val0, glob_out_val1;

    base_s = -1;
#pragma unroll 1
    for(int base_rs = 0; base_rs < r * s; ++base_rs)
    {
        slide_k_tile = 0;
        base_k       = 0;
        base_s++;
        if(base_s == s)
        {
            base_s = 0;
            base_r++;
        }
        int slide_oh_flag = base_h - base_r*l + p;
        int slide_ow_flag = base_w - base_s*j + q;

        int slide_oh = slide_oh_flag / u;
        int slide_ow = slide_ow_flag / v;
        INFERENCE_GLOB_WEI_OFF;
        BufferLoadx4(glob_wei_val0, glob_wei_off, global_weight_alias_out);
        glob_wei_off = glob_wei_off != -1 ? glob_wei_off + 16 : -1;
        BufferLoadx4(glob_wei_val1, glob_wei_off, global_weight_alias_out);

        INFERENCE_GLOB_OUT_OFF;
        BufferLoadx4(glob_out_val0, glob_out_off, global_output_alias_out);
        glob_out_off = glob_out_off != -1 ? glob_out_off + 16 : -1;
        BufferLoadx4(glob_out_val1, glob_out_off, global_output_alias_out);
        asm volatile("s_waitcnt vmcnt(0)\n\t");

        __syncthreads();
        STORE_TO_LDS;
        __syncthreads();
        PrefetchLDS(wei, out, sm_load_wei_off, sm_load_out_off);
        base_k += 32;

        while(base_k < k)
        {
            slide_k_tile++;
            INFERENCE_GLOB_WEI_OFF;
            BufferLoadx4(glob_wei_val0, glob_wei_off, global_weight_alias_in);
            glob_wei_off = glob_wei_off != -1 ? glob_wei_off + 16 : -1;
            BufferLoadx4(glob_wei_val1, glob_wei_off, global_weight_alias_in);

            INFERENCE_GLOB_OUT_OFF;
            BufferLoadx4(glob_out_val0, glob_out_off, global_output_alias_in);
            glob_out_off = glob_out_off != -1 ? glob_out_off + 16 : -1;
            BufferLoadx4(glob_out_val1, glob_out_off, global_output_alias_in);

            FP16_MMAC(out, wei, hw0_c0, hw1_c0, hw0_c1, hw1_c1);
            PrefetchLDS(wei, out, sm_load_wei_off + 8 * LDS_W * 4, sm_load_out_off + 8 * LDS_W * 4);
            FP16_MMAC(out, wei, hw0_c0, hw1_c0, hw0_c1, hw1_c1);
            PrefetchLDS(
                wei, out, sm_load_wei_off + 16 * LDS_W * 4, sm_load_out_off + 16 * LDS_W * 4);
            FP16_MMAC(out, wei, hw0_c0, hw1_c0, hw0_c1, hw1_c1);
            PrefetchLDS(
                wei, out, sm_load_wei_off + 24 * LDS_W * 4, sm_load_out_off + 24 * LDS_W * 4);
            FP16_MMAC(out, wei, hw0_c0, hw1_c0, hw0_c1, hw1_c1);

            asm volatile("s_waitcnt vmcnt(0)\n\t");
            __syncthreads();
            STORE_TO_LDS;
            __syncthreads();

            PrefetchLDS(wei, out, sm_load_wei_off, sm_load_out_off);
            base_k += 32;
        }
        FP16_MMAC(out, wei, hw0_c0, hw1_c0, hw0_c1, hw1_c1);
        PrefetchLDS(wei, out, sm_load_wei_off + 8 * LDS_W * 4, sm_load_out_off + 8 * LDS_W * 4);
        FP16_MMAC(out, wei, hw0_c0, hw1_c0, hw0_c1, hw1_c1);
        PrefetchLDS(wei, out, sm_load_wei_off + 16 * LDS_W * 4, sm_load_out_off + 16 * LDS_W * 4);
        FP16_MMAC(out, wei, hw0_c0, hw1_c0, hw0_c1, hw1_c1);
        PrefetchLDS(wei, out, sm_load_wei_off + 24 * LDS_W * 4, sm_load_out_off + 24 * LDS_W * 4);
        FP16_MMAC(out, wei, hw0_c0, hw1_c0, hw0_c1, hw1_c1);
    }

    int in_batch_base = slide_n * hwc_stride;
    int slide_in_c    = blockIdx.y * 64 + t_id % 64 / 16 + t_id / 64 % 2 * 32;
    int slide_in_hw   = blockIdx.x * 64 + t_id % 16 + t_id / 128 * 32;

    if(slide_in_hw >= hw_stride)
    {
        return;
    }

    if(slide_in_c >= c)
    {
        return;
    }

    int store_base_off = in_batch_base + slide_in_c * hw_stride + slide_in_hw;

    input_device[store_base_off]                  = hw0_c0.x;
    input_device[store_base_off + 4 * hw_stride]  = hw0_c0.y;
    input_device[store_base_off + 8 * hw_stride]  = hw0_c0.z;
    input_device[store_base_off + 12 * hw_stride] = hw0_c0.w;

    input_device[store_base_off + 16 * hw_stride] = hw0_c1.x;
    input_device[store_base_off + 20 * hw_stride] = hw0_c1.y;
    input_device[store_base_off + 24 * hw_stride] = hw0_c1.z;
    input_device[store_base_off + 28 * hw_stride] = hw0_c1.w;

    if(slide_in_hw + 16 < hw_stride)
    {
        store_base_off += 16;
        input_device[store_base_off]                  = hw1_c0.x;
        input_device[store_base_off + 4 * hw_stride]  = hw1_c0.y;
        input_device[store_base_off + 8 * hw_stride]  = hw1_c0.z;
        input_device[store_base_off + 12 * hw_stride] = hw1_c0.w;

        input_device[store_base_off + 16 * hw_stride] = hw1_c1.x;
        input_device[store_base_off + 20 * hw_stride] = hw1_c1.y;
        input_device[store_base_off + 24 * hw_stride] = hw1_c1.z;
        input_device[store_base_off + 28 * hw_stride] = hw1_c1.w;
    }
}

template <typename src_data_t, typename dst_data_t>
inline __device__ __host__ dst_data_t cast_to(const src_data_t& val)
{
    return static_cast<dst_data_t>(val);
}

__global__ void naive_conv_bwd_nchw(float* p_in,
                                    const float* p_wei,
                                    const float* p_out,
                                    int hi,
                                    int wi,
                                    int n,
                                    int k_per_group,
                                    int c_per_group,
                                    int ho,
                                    int wo,
                                    int sy,
                                    int sx,
                                    int dy,
                                    int dx,
                                    int py,
                                    int px,
                                    int fy,
                                    int fx,
                                    int group)
{
    /*
     *  need to compute total input pixel: `group * n * c_per_group * hi * wi`.
     *  to distribute this workload, let one workgroup compute `hi * wi` pixel,
     *  hence need `group * n * c_per_group` workgroups (grid_size).
     */
    int k             = k_per_group * group;
    int c             = c_per_group * group;
    int thread_length = hi * wi;
    int bid           = blockIdx.x;
    int ic            = bid % c_per_group;
    int in            = (bid / c_per_group) % n;
    int ig            = bid / (n * c_per_group);

    p_in += static_cast<size_t>(in) * c * hi * wi +
            static_cast<size_t>(ig) * c_per_group * hi * wi + static_cast<size_t>(ic) * hi * wi;
    p_wei += static_cast<size_t>(ig) * k_per_group * c_per_group * fy * fx +
             static_cast<size_t>(ic) * fy * fx;
    p_out +=
        static_cast<size_t>(in) * k * ho * wo + static_cast<size_t>(ig) * k_per_group * ho * wo;

    for(int tid = threadIdx.x; tid < thread_length; tid += 256)
    {
        int ihi = tid / wi;
        int iwi = tid % wi;

        float value = .0f;

        for(int ik = 0; ik < k_per_group; ik++)
        {
            for(int iy = 0; iy < fy; iy++)
            {
                int valid_h = 1;
                int cur_ho  = ihi + py - dy * iy;
                if(cur_ho < 0 || cur_ho % sy)
                    valid_h &= 0;
                cur_ho /= sy;
                if(cur_ho >= ho)
                    valid_h &= 0;
                for(int ix = 0; ix < fx; ix++)
                {
                    int valid_w = 1;
                    int cur_wo  = iwi + px - dx * ix;
                    if(cur_wo < 0 || cur_wo % sx)
                        valid_w &= 0;
                    cur_wo /= sx;
                    if(cur_wo >= wo)
                        valid_w &= 0;

                    if(valid_h & valid_w)
                    {
                        size_t o_idx = static_cast<size_t>(ik) * ho * wo +
                                       static_cast<size_t>(cur_ho) * wo +
                                       static_cast<size_t>(cur_wo);
                        size_t f_idx = static_cast<size_t>(ik) * c_per_group * fy * fx +
                                       static_cast<size_t>(iy) * fx + static_cast<size_t>(ix);
                        value += float(p_out[o_idx]) * float(p_wei[f_idx]);
                    }
                }
            }
        }
        size_t i_idx = static_cast<size_t>(ihi) * wi + static_cast<size_t>(iwi);
        p_in[i_idx]  = value;
    }
}

float getPrecisionFP32(float tmp)
{
    int tmpInt = (int)tmp;
    float eNum = 1.0e-5;
    if(abs(tmpInt) > 0)
    {
        while(tmpInt != 0)
        {
            tmpInt = (int)(tmpInt / 10);
            eNum *= 10;
        }
    }
    else
    {
        if(tmp == 0)
            return eNum;
        eNum = 1.0e-2;
        while(tmpInt == 0)
        {
            tmp *= 10;
            tmpInt = (int)(tmp);
            eNum /= 10;
        }
    }
    return eNum;
}

template <bool is_rand>
void InitHostMatrix(float** src, int x, int y, float pad)
{
    int rand_max_int = RAND_MAX;
    *src             = (float*)malloc(x * y * sizeof(float));
    for(int i = 0; i < x * y; ++i)
    {
#ifdef INT
        (*src)[i] = is_rand ? float(i + 1) : pad;
#else
        (*src)[i] = is_rand ? (rand() / (float)rand_max_int) : pad;
#endif
    }
}

void TransToNCxHWx(float* src, int n, int c, int h, int w, float* dst)
{
    for(int _n = 0; _n < n; ++_n)
    {
        for(int _c = 0; _c < c; ++_c)
        {
            int c_til = _c / 32;
            int c_res = _c % 32;
            for(int _h = 0; _h < h; ++_h)
            {
                for(int _w = 0; _w < w; ++_w)
                {
                    int dst_offset =
                        _n * c * h * w + c_til * 32 * h * w + (_h * w + _w) * 32 + c_res;
                    int src_offset  = _n * c * h * w + _c * h * w + _h * w + _w;
                    dst[dst_offset] = src[src_offset];
                }
            }
        }
    }
}

void TransToRSKxCx(float* src, int k, int c, int r, int s, float* dst)
{
    for(int _k = 0; _k < k; ++_k)
    {
        int k_til = _k / 32;
        int k_res = _k % 32;
        for(int _c = 0; _c < c; ++_c)
        {
            for(int _r = 0; _r < r; ++_r)
            {
                for(int _s = 0; _s < s; ++_s)
                {
                    int dst_offset = _r * s * k * c + _s * k * c + k_til * 32 * c + (_c)*32 + k_res;
                    int src_offset = _k * c * r * s + _c * r * s + _r * s + _s;
                    dst[dst_offset] = src[src_offset];
                }
            }
        }
    }
}

void TransToKxRSCx(float* src, int k, int c, int r, int s, float* dst)
{
    for(int _k = 0; _k < k; ++_k)
    {
        int k_til = _k / 32;
        int k_res = _k % 32;
        for(int _c = 0; _c < c; ++_c)
        {
            for(int _r = 0; _r < r; ++_r)
            {
                for(int _s = 0; _s < s; ++_s)
                {
                    int dst_offset = k_til * 32 *r*s*c + _r*s*c*32+_s*c*32+_c*32 + k_res;
                    int src_offset = _k * c * r * s + _c * r * s + _r * s + _s;
                    dst[dst_offset] = src[src_offset];
                }
            }
        }
    }
}

void TransToKxCRSx(float* src, int k, int c, int r, int s, float* dst)
{
    for(int _k = 0; _k < k; ++_k)
    {
        int k_til = _k / 32;
        int k_res = _k % 32;
        for(int _c = 0; _c < c; ++_c)
        {
            for(int _r = 0; _r < r; ++_r)
            {
                for(int _s = 0; _s < s; ++_s)
                {
                    int dst_offset = k_til * 32 *r*s*c + _r*s*32+_s*32+_c*r*s*32 + k_res;
                    int src_offset = _k * c * r * s + _c * r * s + _r * s + _s;
                    dst[dst_offset] = src[src_offset];
                }
            }
        }
    }
}
int main(int argc, char** argv)
{
    // if(argc != 10)
    // {
    //     printf("input args: batch, channel, kernel, height, width,p,q,l,j.\n");
    //     return 0;
    // }
    int batch   = atoi(argv[1]);
    int channel = atoi(argv[2]);
    int height  = atoi(argv[3]);
    int width   = atoi(argv[4]);
    int kernel  = atoi(argv[5]);
    int r       = atoi(argv[6]);
    int s       = atoi(argv[7]);
    int p       = atoi(argv[8]);
    int q       = atoi(argv[9]);
    int u       = atoi(argv[10]);
    int v       = atoi(argv[11]);
    int l       = atoi(argv[12]);
    int j       = atoi(argv[13]);
    int group   = 1;
    printf(
        "batch: %d, channel: %d, kernel: %d, H*W: %dx%d\n", batch, channel, kernel, height, width);
    if(batch <= 0 || channel <= 0 || kernel <= 0 || height <= 0 || width <= 0)
    {
        printf("input args invalid.\n");
        return 0;
    }
  int r_l = (r - 1) * l + 1;
  int s_l = (s - 1) * j + 1;
    int out_size_h = (height - r_l + 2 * p) / u + 1;
    int out_size_w = (width - s_l + 2 * q) / v + 1;

    float* m_kernel;
    float* m_kernel_base;
    float* m_input;
    float* m_input_tmp;
    float* m_output;
    float* m_output_base;
    float* m_input_gpu;

    float* input_device;
    float* input_device_base;
    float* output_device;
    float* kernel_device;

    InitHostMatrix<true>(&m_kernel, kernel, (channel / group) * r * s, 1.0);
    InitHostMatrix<true>(&m_kernel_base, kernel, (channel / group) * r * s, 1.0);
    InitHostMatrix<true>(&m_output, batch, kernel * out_size_h * out_size_w, 1.0);
    InitHostMatrix<true>(&m_output_base, batch, kernel * out_size_h * out_size_w, 1.0);
    InitHostMatrix<false>(&m_input, batch, channel * height * width, 0.0);
    InitHostMatrix<false>(&m_input_tmp, batch, channel * height * width, 0.0);
    InitHostMatrix<false>(&m_input_gpu, batch, channel * height * width, 0.0);

    hipMalloc((void**)&kernel_device, (kernel * (channel / group) * r * s) * sizeof(float));
    hipMalloc((void**)&input_device, (batch * channel * height * width) * sizeof(float));
    hipMalloc((void**)&input_device_base, (batch * channel * height * width) * sizeof(float));
    hipMalloc((void**)&output_device, (batch * kernel * out_size_h * out_size_w) * sizeof(float));

    hipMemcpy(kernel_device,
              m_kernel,
              (kernel * (channel / group) * r * s) * sizeof(float),
              hipMemcpyHostToDevice);
    hipMemcpy(input_device,
              m_input,
              (batch * channel * height * width) * sizeof(float),
              hipMemcpyHostToDevice);
    hipMemcpy(input_device_base,
              m_input,
              (batch * channel * height * width) * sizeof(float),
              hipMemcpyHostToDevice);
    hipMemcpy(output_device,
              m_output,
              (batch * kernel * out_size_h * out_size_w) * sizeof(float),
              hipMemcpyHostToDevice);
printf("start verify compute!\n");
#ifdef VERIFY
    dim3 grid(batch * channel);
    dim3 block(256);
    naive_conv_bwd_nchw<<<grid, block>>>(input_device_base,
                                         kernel_device,
                                         output_device,
                                         height,
                                         width,
                                         batch,
                                         kernel / group,
                                         channel / group,
                                         out_size_h,
                                         out_size_w,
                                         u,
                                         v,
                                         l,
                                         j,
                                         p,
                                         q,
                                         r,
                                         s,
                                         group);
    hipDeviceSynchronize();
    hipMemcpy(m_input,
              input_device_base,
              (batch * channel * height * width) * sizeof(float),
              hipMemcpyDeviceToHost);
#endif

    // int blk_x = (channel / group + 63) / 64;
    // int blk_y = (height * width + 63) / 64;
    // int blk_z = batch;

    int blk_x = (height * width + 63) / 64;
    int blk_y = (channel / group + 63) / 64;//(height * width + 63) / 64;
    int blk_z = batch;
    printf("Choose block dim3: %d, %d, %d\n", blk_x, blk_y, blk_z);
    TransToNCxHWx(m_output, batch, kernel, out_size_h, out_size_w, m_output_base);
    hipMemcpy(output_device,
              m_output_base,
              (batch * kernel * out_size_h * out_size_w) * sizeof(float),
              hipMemcpyHostToDevice);
    TransToRSKxCx(m_kernel, kernel, channel, r, s, m_kernel_base);
    hipMemcpy(kernel_device,
              m_kernel_base,
              (kernel * (channel / group) * r * s) * sizeof(float),
              hipMemcpyHostToDevice);
    hipLaunchKernelGGL(conv2d_fp32_rs3x3_stride1_ncxhwx,
                       dim3(blk_x, blk_y, blk_z),
                       dim3(256, 1, 1),
                       0,
                       0,
                       input_device,
                       kernel_device,
                       output_device,
                       batch,
                       channel,
                       height,
                       width,
                       kernel,
                       out_size_h,
                       out_size_w,
                       r,
                       s,
                       p,
                       q,
                       u,
                       v,
                       l,
                       j);
    hipDeviceSynchronize();
    hipMemcpy(m_input_gpu,
              input_device,
              batch * channel * height * width * sizeof(float),
              hipMemcpyDeviceToHost);
    printf("End DCU computing...\n");

#ifdef BENCH
    float time_all = 0;
    hipEvent_t start;
    hipEvent_t stop;
    hipEventCreate(&start);
    hipEventCreate(&stop);
    for(int i = 0; i < BENCH_LOOP; ++i)
    {
        float time_elapsed = 0;
        hipEventRecord(start, 0);
    hipLaunchKernelGGL(conv2d_fp32_rs3x3_stride1_ncxhwx,
                       dim3(blk_x, blk_y, blk_z),
                       dim3(256, 1, 1),
                       0,
                       0,
                       input_device,
                       kernel_device,
                       output_device,
                       batch,
                       channel,
                       height,
                       width,
                       kernel,
                       out_size_h,
                       out_size_w,
                       r,
                       s,
                       p,
                       q,
                       u,
                       v,
                       l,
                       j);
        hipEventRecord(stop, 0);
        hipEventSynchronize(stop);
        hipEventElapsedTime(&time_elapsed, start, stop);
        time_all += time_elapsed;
    }
    printf("DCU AVG is %f (ms)\n", time_all / BENCH_LOOP);
#endif

#ifdef VERIFY
    int error = 0;
    for(int i = 0; i < batch * height * width * channel; ++i)
    {
        if(fabs(float(m_input[i]) - float(m_input_gpu[i])) > getPrecisionFP32(float(m_input[i])) ||
           isnan(float(m_input_gpu[i])))
        {
            error++;
            if(error < 10)
            {
                printf("Error: i:%d, batch: %d, c: %d, h: %d, w: %d, cpu:%f, "
                       "gpu:%f\n",
                       i,
                       i / (height * width * channel),
                       i % (height * width * channel) / (height * width),
                       i % (height * width * channel) % (height * width) / width,
                       i % (height * width * channel) % (height * width) % width,
                       float(m_input[i]),
                       float(m_input_gpu[i]));
            }
        }
    }
    printf("Error cnt: %d\n", error);
    for (int kkk = 0; kkk < 20; kkk++)
    {
        printf("%f\n", float(m_input_gpu[kkk]));
    }

#endif

    hipFree(input_device);
    hipFree(input_device_base);
    hipFree(output_device);
    hipFree(kernel_device);
    free(m_kernel);
    free(m_input);
    free(m_output);
    free(m_input_gpu);
    return 0;
}