/**
 * @file conv_fprop_example.cpp
 * @copyright Copyright (c) 2021
 * @brief
 *           An example about convolution forward.
 *       Note:
 *           For algo MCDNN_CONVOLUTION_FWD_ALGO_FFT_TILING only
 *           support stride=1, dilate=1, 1<= filter_size <=16, data type=F32, group=1,
 *           mode=MCDNN_CROSS_CORRELATION currently.
 * @version 0.1
 * @date 2022-06-20
 *
 */
#include <math.h>

#include <iostream>
#include <vector>

#include "example_utils.h"  // NOLINT
#include "mcdnn/mcdnn.h"
#include "mcr/mc_runtime.h"

int main(int argc, char **argv) {
    bool default_flag = false;
    if (argc == 1) {
        default_flag = true;
        printf(
            "You choose a default args to run!\n another usage: %s -b [value] -c [value] -h "
            "[value] -w [value] -o [value] -fh "
            "[value] -fw [value] -pad_h [value] -pad_w [value] -alpha [value] -beta [value]\n",
            argv[0]);
    } else {
        for (int i = 0; i < argc; ++i) {
            if (!argv[i]) continue;
            StripArgs(argv[i]);
        }
    }
    // input data param
    uint32_t batch;
    uint32_t in_channel;
    uint32_t data_h;
    uint32_t data_w;

    // input filter param
    uint32_t out_channel;
    uint32_t filter_h;
    uint32_t filter_w;
    float alpha;
    float beta;
    int pad[4];  // top bottom left right
    if (default_flag) {
        // input data param
        batch      = 1;
        in_channel = 4;
        data_h     = 16;
        data_w     = 16;

        // input filter param
        out_channel = 16;
        filter_h    = 5;
        filter_w    = 5;

        // conv param
        alpha = 2.5;
        beta  = 4.0;
        for (int i = 0; i < 4; ++i) {
            pad[i] = 1;
        }

    } else {
        // input data param
        batch      = FindIntArg(argc, argv, "-b", 8);
        in_channel = FindIntArg(argc, argv, "-c", 4);
        data_h     = FindIntArg(argc, argv, "-h", 16);
        data_w     = FindIntArg(argc, argv, "-w", 16);

        // input filter param
        out_channel = FindIntArg(argc, argv, "-o", 16);
        filter_h    = FindIntArg(argc, argv, "-fh", 5);
        filter_w    = FindIntArg(argc, argv, "-fw", 5);
        alpha       = FindFloatArg(argc, argv, "-alpha", 2.5);
        beta        = FindFloatArg(argc, argv, "-beta", 4.0);

        // conv param
        pad[0] = pad[1] = FindIntArg(argc, argv, "-pad_h", 1);
        pad[2] = pad[3] = FindIntArg(argc, argv, "-pad_w", 1);
    }

    std::cout << "batch: " << batch << std::endl;
    std::cout << "in_channel: " << in_channel << std::endl;
    std::cout << "data_h: " << data_h << std::endl;
    std::cout << "data_w: " << data_w << std::endl;
    std::cout << "out_channel: " << out_channel << std::endl;
    std::cout << "filter_h: " << filter_h << std::endl;
    std::cout << "filter_w: " << filter_w << std::endl;
    std::cout << "alpha: " << alpha << std::endl;
    std::cout << "beta: " << beta << std::endl;
    std::cout << "pad_t: " << pad[0] << " pad_b: " << pad[1] << " pad_l: " << pad[2]
              << " pad_r: " << pad[3] << std::endl;
    // other conv param
    mcdnnConvolutionMode_t mode    = MCDNN_CROSS_CORRELATION;
    mcdnnConvolutionFwdAlgo_t algo = MCDNN_CONVOLUTION_FWD_ALGO_FFT_TILING;
    int stride[2]                  = {1, 1};  // only support for above algo
    int dilate[2]                  = {1, 1};  // only support for above algo
    mcdnnDataType_t data_type      = MCDNN_DATA_FLOAT;
    mcdnnMathType_t math_type      = MCDNN_FMA_MATH;

    mcdnnHandle_t handle_conv;
    CHECK_CALL(mcdnnCreate(&handle_conv));

    mcdnnTensorDescriptor_t x_desc;
    mcdnnFilterDescriptor_t w_desc;
    mcdnnTensorDescriptor_t y_desc;
    mcdnnConvolutionDescriptor_t conv_desc;
    CHECK_CALL(mcdnnCreateTensorDescriptor(&x_desc));
    CHECK_CALL(mcdnnCreateFilterDescriptor(&w_desc));
    CHECK_CALL(mcdnnCreateTensorDescriptor(&y_desc));
    CHECK_CALL(mcdnnCreateConvolutionDescriptor(&conv_desc));

    uint32_t padding_w = data_w + pad[2] + pad[3];
    uint32_t padding_h = data_h + pad[0] + pad[1];
    uint32_t out_h     = padding_h - filter_h + 1;
    uint32_t out_w     = padding_w - filter_w + 1;
    mcdnnSetTensor4dDescriptor(x_desc,
                               MCDNN_TENSOR_NCHW,
                               data_type,
                               batch,
                               in_channel,
                               data_h,
                               data_w);
    mcdnnSetFilter4dDescriptor(w_desc,
                               data_type,
                               MCDNN_TENSOR_NCHW,
                               out_channel,
                               in_channel,
                               filter_h,
                               filter_w);
    mcdnnSetTensor4dDescriptor(y_desc,
                               MCDNN_TENSOR_NCHW,
                               data_type,
                               batch,
                               out_channel,
                               out_h,
                               out_w);
    mcdnnSetConvolution2dDescriptor(conv_desc,
                                    pad[1],
                                    pad[2],
                                    stride[0],
                                    stride[1],
                                    dilate[0],
                                    dilate[1],
                                    mode,
                                    data_type);
    mcdnnSetConvolutionMathType(conv_desc, math_type);
    // init input data
    uint32_t input_data_numbers  = batch * in_channel * data_h * data_w;
    uint32_t filter_data_numbers = out_channel * in_channel * filter_h * filter_w;
    uint32_t out_data_numbers    = batch * out_channel * out_h * out_w;
    std::vector<float> x(input_data_numbers);
    std::vector<float> w(filter_data_numbers);
    std::vector<float> y(out_data_numbers);
    for (int i = 0; i < input_data_numbers; ++i) {
        x[i] = std::cos(i) * i;
    }
    for (int i = 0; i < filter_data_numbers; ++i) {
        w[i] = std::sin(i) / 10;
    }
    // priorDstValue
    for (int i = 0; i < out_data_numbers; ++i) {
        y[i] = 10 * std::cos(i + 0.5);
    }
    // alloc x device memory
    void *ptr_x_dev = nullptr;
    CHECK_CALL(mcMalloc(&ptr_x_dev, x.size() * sizeof(float)));
    // copy data to device
    CHECK_CALL(mcMemcpy(ptr_x_dev, x.data(), x.size() * sizeof(float), mcMemcpyHostToDevice));

    // alloc w device memory
    void *ptr_w_dev = nullptr;
    CHECK_CALL(mcMalloc(&ptr_w_dev, w.size() * sizeof(float)));
    // copy data to device
    CHECK_CALL(mcMemcpy(ptr_w_dev, w.data(), w.size() * sizeof(float), mcMemcpyHostToDevice));

    // alloc y device memory
    void *ptr_y_dev = nullptr;
    CHECK_CALL(mcMalloc(&ptr_y_dev, y.size() * sizeof(float)));
    // copy data to device
    CHECK_CALL(mcMemcpy(ptr_y_dev, y.data(), y.size() * sizeof(float), mcMemcpyHostToDevice));

    // check result, calculate cpu result
    uint32_t padding_src_elements = batch * in_channel * padding_h * padding_w;
    std::vector<float> hsrc(padding_src_elements);
    PaddingCPU(hsrc.data(), x.data(), padding_src_elements, pad, data_h, data_w, batch, in_channel);
    std::vector<float> hdst(out_data_numbers);
    FwdConvCpu(hdst.data(),
               hsrc.data(),
               w.data(),
               batch,
               filter_w,
               filter_h,
               padding_w,
               padding_h,
               in_channel,
               out_w,
               out_h,
               out_channel);
    for (int i = 0; i < out_data_numbers; i++) {
        hdst[i] *= alpha;
        hdst[i] += beta * y[i];
    }

    size_t workspace_size = 0;
    CHECK_CALL(mcdnnGetConvolutionForwardWorkspaceSize(handle_conv,
                                                       x_desc,
                                                       w_desc,
                                                       conv_desc,
                                                       y_desc,
                                                       algo,
                                                       &workspace_size));
    // alloc work space device memory
    void *ptr_workspace = nullptr;
    if (workspace_size > 0) {
        CHECK_CALL(mcMalloc(&ptr_workspace, workspace_size));
    }

    // convolution forward
    CHECK_CALL(mcdnnConvolutionForward(handle_conv,
                                       &alpha,
                                       x_desc,
                                       ptr_x_dev,
                                       w_desc,
                                       ptr_w_dev,
                                       conv_desc,
                                       algo,
                                       ptr_workspace,
                                       workspace_size,
                                       &beta,
                                       y_desc,
                                       ptr_y_dev));
    // copy data from device to host
    CHECK_CALL(mcMemcpy(y.data(), ptr_y_dev, y.size() * sizeof(float), mcMemcpyDeviceToHost));
    bool res = Check(hdst.data(), y.data(), out_data_numbers);
    if (res) {
        std::cout << "Accumulative error : test pass!" << std::endl;
    } else {
        std::cout << "Accumulative error : test failure!" << std::endl;
    }
    // final free dev memory
    if (ptr_x_dev) mcFree(ptr_x_dev);
    if (ptr_w_dev) mcFree(ptr_w_dev);
    if (ptr_y_dev) mcFree(ptr_y_dev);
    if (ptr_workspace) mcFree(ptr_workspace);
    if (x_desc) mcdnnDestroyTensorDescriptor(x_desc);
    if (w_desc) mcdnnDestroyFilterDescriptor(w_desc);
    if (y_desc) mcdnnDestroyTensorDescriptor(y_desc);
    if (conv_desc) mcdnnDestroyConvolutionDescriptor(conv_desc);
    if (handle_conv) mcdnnDestroy(handle_conv);
    return 0;
}
