/**
 * @file linspaced_custom.cpp
 *
 * Copyright (C) 2025. Huawei Technologies Co., Ltd. All rights reserved.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 */
#include "linspaced_custom_tiling.h"
#include "kernel_operator.h"

constexpr uint32_t OUTPUT_DATA_TYPE_SIZE = 4;                           // 输出float的字节数
constexpr uint32_t BLOCK_SIZE = 32;                                     // 32B内存对齐基准
constexpr uint32_t TILE_ELEM_NUM = BLOCK_SIZE / OUTPUT_DATA_TYPE_SIZE;  // float的32B对齐元素数：8
constexpr uint32_t BUFFER_NUM = 2;                                      // 双缓冲配置

template <typename T>
class KernelLinSpaceD {
public:
    __aicore__ inline KernelLinSpaceD() {}

    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, const uint32_t n, GM_ADDR z, LinspacedCustomTilingData tiling)
    {
        startGm.SetGlobalBuffer((__gm__ T*)x, 1);
        endGm.SetGlobalBuffer((__gm__ T*)y, 1);
        this->start = static_cast<float>(startGm.GetValue(0));
        this->end = static_cast<float>(endGm.GetValue(0));
        this->origSize = n;
        this->step = (origSize > 1) ? (end - start) / (origSize - 1) : 0.0f;

        uint32_t coreId = AscendC::GetBlockIdx();
        uint32_t currentBlockLength = 0; 

        if (tiling.isEvenCore) 
        {
            this->blockLength = tiling.blockLength;
            this->tileNum = tiling.tileNum;
            this->lastTileLength = tiling.lastTileLength;
            this->blockOffset = blockLength * coreId; 
            currentBlockLength = blockLength;
        } 
        else 
        {
            if (coreId < tiling.formerNum) 
            {
                this->blockLength = tiling.formerLength;
                this->tileNum = tiling.formerTileNum;
                this->lastTileLength = tiling.formerLastTileLength;
                this->blockOffset = tiling.formerLength * coreId;
                currentBlockLength = tiling.formerLength;
            } 
            else 
            {
                this->blockLength = tiling.tailLength;
                this->tileNum = tiling.tailTileNum;
                this->lastTileLength = tiling.tailLastTileLength;
                this->blockOffset = tiling.formerLength * tiling.formerNum + tiling.tailLength * (coreId - tiling.formerNum);
                currentBlockLength = tiling.tailLength;
            }
        }
        zGm.SetGlobalBuffer(reinterpret_cast<__gm__ float*>(z) + this->blockOffset, currentBlockLength);
        pipe.InitBuffer(outQueueZ, BUFFER_NUM, TILE_ELEM_NUM * sizeof(float));
    }

    __aicore__ inline void Process()
    {
        if (origSize == 1) 
        {
            AscendC::LocalTensor<float> tileOutput = outQueueZ.AllocTensor<float>();
            for (int32_t i = 0; i < TILE_ELEM_NUM; i++) tileOutput.SetValue(i, (i == 0) ? start : 0.0f);
            outQueueZ.EnQue(tileOutput);
            CopyOut(0);
            return;
        }
        for (int32_t tileIdx = 0; tileIdx < tileNum; tileIdx++) 
        {
            Compute(tileIdx);
            CopyOut(tileIdx);
        }
    }

private:
    __aicore__ inline void CopyIn();
    
    __aicore__ inline void Compute(int32_t tileIdx)
    {
        AscendC::LocalTensor<float> tileOutput = outQueueZ.AllocTensor<float>();
        int32_t tileElemNum = TILE_ELEM_NUM;  // 8
        int32_t tileGlobalStart = blockOffset + tileIdx * tileElemNum;

        for (int32_t i = tileElemNum - 1; i >= 0; i--)
        {
            int32_t globalIdx = tileGlobalStart + i;
            AscendC::Duplicate<float>(tileOutput, start + step * static_cast<float>(globalIdx), i + 1);
        }
        outQueueZ.EnQue(tileOutput);
    }
    __aicore__ inline void CopyOut(int32_t tileIdx)
    {
        AscendC::LocalTensor<float> zLocal = outQueueZ.DeQue<float>();
        int32_t copyElemNum;                         // 实际写入的元素数
        
        if (tileIdx == tileNum - 1) {                // 最后一个Tile：写入长度=lastTileLength
            copyElemNum = lastTileLength;
        }  
        else {                                       // 普通Tile：写入长度=8元素
            copyElemNum = TILE_ELEM_NUM;
        }
        int32_t copyOffset = tileIdx * TILE_ELEM_NUM;// 写入偏移
        
        if (copyOffset + copyElemNum > blockLength) {// 确保写入不超核心的GM范围
            copyElemNum = blockLength - copyOffset;
        } 
        AscendC::DataCopy(zGm[copyOffset], zLocal, copyElemNum);
        outQueueZ.FreeTensor(zLocal);
    }

private:
    AscendC::TPipe pipe;
    AscendC::TQue<AscendC::TPosition::VECOUT, BUFFER_NUM> outQueueZ;
    AscendC::GlobalTensor<T> startGm;
    AscendC::GlobalTensor<T> endGm;
    AscendC::GlobalTensor<float> zGm;  

    uint32_t blockLength;    // 当前核心处理的float总元素数
    uint32_t tileNum;        // 当前核心的Tile总数（每个Tile 8个float元素）
    uint32_t lastTileLength; // 最后一个Tile的float元素数
    uint32_t blockOffset;    // 当前核心的全局float元素偏移
    uint32_t origSize;       // 原始有效元素数（float元素数）
    float start;
    float end;
    float step;
    
};

extern "C" __global__ __aicore__ void linspaced_custom(GM_ADDR startGm, 
                                                       GM_ADDR endGm, 
                                                       const uint32_t n, 
                                                       GM_ADDR zGm,
                                                       LinspacedCustomTilingData tiling){
    switch (tiling.dataType) {
        case SIG_HALF: {
            KernelLinSpaceD<half> op;
            op.Init(startGm, endGm, n, zGm, tiling);
            op.Process();
            break;
        }
        case SIG_FLOAT: {
            KernelLinSpaceD<float> op;
            op.Init(startGm, endGm, n, zGm, tiling);
            op.Process();
            break;
        }
        case SIG_INT16: {
            KernelLinSpaceD<int16_t> op;
            op.Init(startGm, endGm, n, zGm, tiling);
            op.Process();
            break;
        }
        case SIG_INT32: {
            KernelLinSpaceD<int32_t> op;
            op.Init(startGm, endGm, n, zGm, tiling);
            op.Process();
            break;
        }
        default:
            assert(false && "Unsupported input type! Only half/float/int16/int32 are allowed.");
            break;
    }
}
