/**
 * Copyright (c) 2025 Huawei Technologies Co., Ltd.
 * This program is free software, you can redistribute it and/or modify it under
 * the terms and conditions of CANN Open Software License Agreement Version 2.0
 * (the "License"). Please refer to the License for details. You may not use
 * this file except in compliance with the License. THIS SOFTWARE IS PROVIDED ON
 * AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS
 * FOR A PARTICULAR PURPOSE. See LICENSE in the root of the software repository
 * for the full text of the License.
 */

/* !
 * \file addcdiv_custom.asc
 * \brief
 */

#include "acl/acl.h"
#include "data_utils.h"
#include "kernel_operator.h"


constexpr int32_t TOTAL_LENGTH = 8 * 2048;
constexpr int32_t USE_CORE_NUM = 8; // num of core used
constexpr int32_t BLOCK_LENGTH =
    TOTAL_LENGTH / USE_CORE_NUM;  // length computed of each core
constexpr int32_t TILE_NUM = 16;  // split data into 8 tiles for each core
constexpr int32_t BUFFER_NUM = 1; // tensor num for each queue
constexpr int32_t TILE_LENGTH =
    BLOCK_LENGTH / TILE_NUM /
    BUFFER_NUM; // seperate to 2 parts, due to double buffer

class KernelAddcdiv {
public:
  __aicore__ inline KernelAddcdiv() {}
  __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, GM_ADDR z, GM_ADDR out) {
    this->blockLength = BLOCK_LENGTH;
    this->tileNum = TILE_NUM;
    ASSERT(tileNum != 0 && "tile num can not be zero!");
    this->tileLength = TILE_LENGTH;
    this->value = (half)1.0; // 与gen_data.py内value保存一致

    xGm.SetGlobalBuffer((__gm__ half *)x +
                            this->blockLength * AscendC::GetBlockIdx(),
                        this->blockLength);
    yGm.SetGlobalBuffer((__gm__ half *)y +
                            this->blockLength * AscendC::GetBlockIdx(),
                        this->blockLength);
    zGm.SetGlobalBuffer((__gm__ half *)z +
                            this->blockLength * AscendC::GetBlockIdx(),
                        this->blockLength);
    outGm.SetGlobalBuffer((__gm__ half *)out +
                              this->blockLength * AscendC::GetBlockIdx(),
                          this->blockLength);
    pipe.InitBuffer(inQueueX, BUFFER_NUM, this->tileLength * sizeof(half));
    pipe.InitBuffer(inQueueY, BUFFER_NUM, this->tileLength * sizeof(half));
    pipe.InitBuffer(inQueueZ, BUFFER_NUM, this->tileLength * sizeof(half));
    pipe.InitBuffer(outQueueOUT, BUFFER_NUM, this->tileLength * sizeof(half));
  }
  __aicore__ inline void Process() {
    int32_t loopCount = this->tileNum * BUFFER_NUM;
    for (int32_t i = 0; i < loopCount; i++) {
      CopyIn(i);
      Compute(i);
      CopyOut(i);
    }
  }

private:
  __aicore__ inline void CopyIn(int32_t progress) {
    AscendC::LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();
    AscendC::LocalTensor<half> yLocal = inQueueY.AllocTensor<half>();
    AscendC::LocalTensor<half> zLocal = inQueueZ.AllocTensor<half>();
    AscendC::DataCopy(xLocal, xGm[progress * this->tileLength],
                      this->tileLength);
    AscendC::DataCopy(yLocal, yGm[progress * this->tileLength],
                      this->tileLength);
    AscendC::DataCopy(zLocal, zGm[progress * this->tileLength],
                      this->tileLength);
    inQueueX.EnQue(xLocal);
    inQueueY.EnQue(yLocal);
    inQueueZ.EnQue(zLocal);
  }
  __aicore__ inline void Compute(int32_t progress) {
    AscendC::LocalTensor<half> xLocal = inQueueX.DeQue<half>();
    AscendC::LocalTensor<half> yLocal = inQueueY.DeQue<half>();
    AscendC::LocalTensor<half> zLocal = inQueueZ.DeQue<half>();
    AscendC::LocalTensor<half> outLocal = outQueueOUT.AllocTensor<half>();
    // compute
    AscendC::Div(outLocal, yLocal, zLocal, this->tileLength);
    AscendC::Muls(outLocal, outLocal, this->value, this->tileLength);
    AscendC::Add(outLocal, xLocal, outLocal, this->tileLength);
    outQueueOUT.EnQue<half>(outLocal);
    inQueueX.FreeTensor(xLocal);
    inQueueY.FreeTensor(yLocal);
    inQueueZ.FreeTensor(zLocal);
  }
  __aicore__ inline void CopyOut(int32_t progress) {
    AscendC::LocalTensor<half> outLocal = outQueueOUT.DeQue<half>();
    AscendC::DataCopy(outGm[progress * this->tileLength], outLocal,
                      this->tileLength);
    outQueueOUT.FreeTensor(outLocal);
  }

private:
  AscendC::TPipe pipe;
  AscendC::TQue<AscendC::QuePosition::VECIN, BUFFER_NUM> inQueueX, inQueueY,
      inQueueZ;
  AscendC::TQue<AscendC::QuePosition::VECOUT, BUFFER_NUM> outQueueOUT;
  AscendC::GlobalTensor<half> xGm;
  AscendC::GlobalTensor<half> yGm;
  AscendC::GlobalTensor<half> zGm;
  AscendC::GlobalTensor<half> outGm;
  half value;
  uint32_t blockLength;
  uint32_t tileNum;
  uint32_t tileLength;
};

__global__ __aicore__ void addcdiv_custom(GM_ADDR x, GM_ADDR y, GM_ADDR z,
                                          GM_ADDR out) {
  KernelAddcdiv op;
  op.Init(x, y, z, out);
  op.Process();
}

int32_t main(int32_t argc, char *argv[]) {
  uint32_t blockDim = 8;
  size_t inputByteSize = 8 * 2048 * sizeof(uint16_t);
  size_t outputByteSize = 8 * 2048 * sizeof(uint16_t);

  int32_t deviceId = 0;
  aclrtSetDevice(deviceId);
  aclrtStream stream = nullptr;
  aclrtCreateStream(&stream);

  uint8_t *xHost, *yHost, *zHost, *outHost;
  uint8_t *xDevice, *yDevice, *zDevice, *outDevice;

  aclrtMallocHost((void **)(&xHost), inputByteSize);
  aclrtMallocHost((void **)(&yHost), inputByteSize);
  aclrtMallocHost((void **)(&zHost), inputByteSize);
  aclrtMallocHost((void **)(&outHost), outputByteSize);
  aclrtMalloc((void **)&xDevice, inputByteSize, ACL_MEM_MALLOC_HUGE_FIRST);
  aclrtMalloc((void **)&yDevice, inputByteSize, ACL_MEM_MALLOC_HUGE_FIRST);
  aclrtMalloc((void **)&zDevice, inputByteSize, ACL_MEM_MALLOC_HUGE_FIRST);
  aclrtMalloc((void **)&outDevice, outputByteSize, ACL_MEM_MALLOC_HUGE_FIRST);

  ReadFile("./input/input_x.bin", inputByteSize, xHost, inputByteSize);
  ReadFile("./input/input_y.bin", inputByteSize, yHost, inputByteSize);
  ReadFile("./input/input_z.bin", inputByteSize, zHost, inputByteSize);

  aclrtMemcpy(xDevice, inputByteSize, xHost, inputByteSize,
              ACL_MEMCPY_HOST_TO_DEVICE);
  aclrtMemcpy(yDevice, inputByteSize, yHost, inputByteSize,
              ACL_MEMCPY_HOST_TO_DEVICE);
  aclrtMemcpy(zDevice, inputByteSize, zHost, inputByteSize,
              ACL_MEMCPY_HOST_TO_DEVICE);

  addcdiv_custom<<<blockDim, nullptr, stream>>>(xDevice, yDevice, zDevice,
                                                outDevice);
  aclrtSynchronizeStream(stream);

  aclrtMemcpy(outHost, outputByteSize, outDevice, outputByteSize,
              ACL_MEMCPY_DEVICE_TO_HOST);
  WriteFile("./output/output_out.bin", outHost, outputByteSize);

  aclrtFree(xDevice);
  aclrtFree(yDevice);
  aclrtFree(zDevice);
  aclrtFree(outDevice);
  aclrtFreeHost(xHost);
  aclrtFreeHost(yHost);
  aclrtFreeHost(zHost);
  aclrtFreeHost(outHost);

  aclrtDestroyStream(stream);
  aclrtResetDevice(deviceId);
  aclFinalize();

  return 0;
}
