#include "kernel_operator.h"
using namespace AscendC;

const int BLOCK_SIZE = 32;
const int MAX_DIM = 5;

/*------------------------------------------------------
 *
 *  需要广播，other只能全局读取，标量计算
 *
 *------------------------------------------------------*/
template <typename T> class CopysignOpBroadcast {
  public:
    __aicore__ inline CopysignOpBroadcast() {}
    __aicore__ inline void Init(GM_ADDR input, GM_ADDR other, GM_ADDR out, uint32_t blockPerCore, uint16_t nAcores, uint16_t nBcores,
                                const uint32_t* shapeInput, const uint32_t* shapeOther, uint16_t nDim, uint16_t maxBlockPerIter, TPipe* pipeIn) {
        this->pipe = pipeIn;
        this->nDim = nDim;
        this->shapeInput = shapeInput;
        this->shapeOther = shapeOther;
        this->coreIdx = GetBlockIdx();
        this->MAX_BLOCK_PER_ITER = maxBlockPerIter;
        // printf("myId=%u total=%u\n", GetBlockIdx(), GetBlockNum());
        uint32_t elemPerBlock = BLOCK_SIZE / sizeof(T);
        uint32_t inputGmOffset = coreIdx * (blockPerCore + 1) * elemPerBlock; //全局偏移 单位是元素数量
        elemPerIter = elemPerBlock * MAX_BLOCK_PER_ITER;                      //每次迭代最多处理的元素数量
        if (coreIdx < nAcores) {                                              //大核
            this->nElem = (blockPerCore + 1) * elemPerBlock;
            nIter = (blockPerCore + 1 + MAX_BLOCK_PER_ITER - 1) / MAX_BLOCK_PER_ITER;
            tailElem = ((blockPerCore + 1) % MAX_BLOCK_PER_ITER) * elemPerBlock;
        } else {
            inputGmOffset = inputGmOffset - (this->coreIdx - nAcores) * elemPerBlock;
            this->nElem = blockPerCore * elemPerBlock;
            nIter = (blockPerCore + MAX_BLOCK_PER_ITER - 1) / MAX_BLOCK_PER_ITER;
            tailElem = (blockPerCore % MAX_BLOCK_PER_ITER) * elemPerBlock;
        }
        //坐标与索引计算
        elemOffset = inputGmOffset; //全局元素索引，用于推断坐标
        for (int i = 0; i < MAX_DIM; i++) {
            dimSuffix[i] = 1;
            otherDimSuffix[i] = 1;
        }
        for (int i = nDim - 1; i >= 0; i--) {
            dimSuffix[i] = shapeInput[i] * dimSuffix[i + 1];
            otherDimSuffix[i] = shapeOther[i] * otherDimSuffix[i + 1];
        }

        inputGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)input + inputGmOffset, nElem);
        otherGm.SetGlobalBuffer((__gm__ DTYPE_OTHER*)other, (uint32_t)dimSuffix[0]);
        outGm.SetGlobalBuffer((__gm__ DTYPE_OUT*)out + inputGmOffset, nElem);

        pipe->InitBuffer(OutQue, BUFFER_NUM, elemPerIter * sizeof(T));
        pipe->InitBuffer(inputQue, BUFFER_NUM, elemPerIter * sizeof(T));
        signMask16 = 1 << 15;
        signMask32 = 1 << 31;
        valMask16 = ~signMask16;
        valMask32 = ~signMask32;
    }

    __aicore__ inline void Process() {
        //一次计算
        if (nElem < elemPerIter) {
            elemPerIter = nElem;
            curElem = nElem;
            CopyIn(0);
            Compute(0);
            CopyOut(0);
        } else {
            //多次迭代
            curElem = elemPerIter;
            for (int i = 0; i < nIter - 1; i++) {
                CopyIn(i);
                Compute(i);
                CopyOut(i);
            }
            if (tailElem > 0) {
                curElem = tailElem;
            }
            CopyIn(nIter - 1);
            Compute(nIter - 1);
            CopyOut(nIter - 1);
        }
    }
    __aicore__ inline void CopyIn(int iterIdx) {
        auto inputLocal = inputQue.AllocTensor<T>();
        DataCopy(inputLocal, inputGm[elemPerIter * iterIdx], curElem);
        inputQue.EnQue(inputLocal);
    }
    __aicore__ inline void Compute(int iterIdx) {
        auto outLocal = OutQue.AllocTensor<T>();
        auto inputLocal = inputQue.DeQue<T>();
        for (int i = 0; i < curElem; i++) {
            //第i个元素，逐维度确认在other中的位置
            uint64_t otherOffset = 0;
            for (int j = 0; j < nDim; j++) {
                uint32_t curDimensionInput = ((elemOffset) / dimSuffix[j + 1]) % shapeInput[j];
                uint32_t curDimensionOther = (shapeInput[j] > shapeOther[j]) ? 0 : curDimensionInput;
                otherOffset += otherDimSuffix[j + 1] * curDimensionOther;
            }
            T a, b;
            a = inputLocal.GetValue(i);
            b = otherGm.GetValue(otherOffset);
            if constexpr(sizeof(T)==2){
                int16_t tmp = (valMask16 & (*reinterpret_cast<int16_t *>(&a))) |  (signMask16 & (*reinterpret_cast<int16_t *>(&b)));
                T res = (*reinterpret_cast<T *>(&tmp));
                outLocal.SetValue(i, res);
            }else{
                int32_t tmp = (valMask32 & (*reinterpret_cast<int32_t *>(&a))) |  (signMask32 & (*reinterpret_cast<int32_t *>(&b)));
                T res = (*reinterpret_cast<T *>(&tmp));
                outLocal.SetValue(i, res);            
            }
            // 计算结束，偏移+1
            elemOffset++;
        }
        OutQue.EnQue(outLocal);
        inputQue.FreeTensor(inputLocal);
    }
    __aicore__ inline void CopyOut(int iterIdx) {
        auto outLocal = OutQue.DeQue<T>();
        DataCopy(outGm[elemPerIter * iterIdx], outLocal, curElem);
        OutQue.FreeTensor(outLocal);
    }

  private:
    int16_t signMask16, valMask16;
    int32_t signMask32, valMask32;
    const static int BUFFER_NUM = 1;
    GlobalTensor<T> inputGm, otherGm, outGm;
    TPipe* pipe;
    uint16_t coreIdx, MAX_BLOCK_PER_ITER, nDim;
    uint32_t nElem, nIter, elemPerIter, tailElem, curElem;
    const uint32_t *shapeInput, *shapeOther;
    uint64_t elemOffset;
    uint64_t dimSuffix[MAX_DIM], otherDimSuffix[MAX_DIM];
    TQue<QuePosition::VECOUT, BUFFER_NUM> OutQue;
    TQue<QuePosition::VECIN, BUFFER_NUM> inputQue;
};

/*------------------------------------------------------
 *
 *  只有一个维度需要广播，向量搬运，向量计算
 *
 *------------------------------------------------------*/
template <typename T> class CopysignOpBroadcastOne {
  public:
    __aicore__ inline CopysignOpBroadcastOne() {}
    __aicore__ inline void Init(GM_ADDR input, GM_ADDR other, GM_ADDR out, uint32_t nBatchPerCore, uint16_t nAcores, uint16_t nBcores,
                                uint32_t broadCastDim, uint32_t batchDataSize, TPipe* pipeIn) {
        this->pipe = pipeIn;
        this->coreIdx = GetBlockIdx();
        this->nBatch = nBatchPerCore;
        this->broadCastDim = broadCastDim;
        this->batchDataSize = batchDataSize;
        this->alignBatchDataSize = (batchDataSize*sizeof(T) + 31) / 32 * 32/sizeof(T); //对齐到32字节
        this->curOtherId = -1;
        this->flag=false;
        uint32_t totalBatch = nBatchPerCore * nBcores + (nBatchPerCore + 1) * nAcores;
        if (coreIdx < nAcores) {
            nBatch = nBatchPerCore + 1; //大核
            startBatch = coreIdx * (nBatchPerCore + 1);
            endBatch = startBatch + nBatch;
        } else {
            nBatch = nBatchPerCore; //小核
            startBatch = nAcores * (nBatchPerCore + 1) + (coreIdx - nAcores) * nBatchPerCore;
            endBatch = startBatch + nBatch;
        }
        inputGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)input, totalBatch * batchDataSize);
        otherGm.SetGlobalBuffer((__gm__ DTYPE_OTHER*)other, totalBatch / broadCastDim * batchDataSize);
        outGm.SetGlobalBuffer((__gm__ DTYPE_OUT*)out, totalBatch * batchDataSize);

        pipe->InitBuffer(OutQue, BUFFER_NUM, alignBatchDataSize * sizeof(T));
        pipe->InitBuffer(inputQue, BUFFER_NUM, alignBatchDataSize * sizeof(T) * nPerCopy);
        pipe->InitBuffer(otherQue, BUFFER_NUM, alignBatchDataSize * sizeof(T));
        signMask16 = 1 << 15;
        signMask32 = 1 << 31;
        valMask16 = ~signMask16;
        valMask32 = ~signMask32;
    }
    __aicore__ inline void Process() {
        for (int batchIdx = startBatch; batchIdx < endBatch;) {
            uint32_t otherBatchId = batchIdx / broadCastDim;
            uint32_t nSameOther = (otherBatchId + 1) * broadCastDim - batchIdx; //当前batch与other的关系
            nSameOther = min(endBatch - batchIdx, nSameOther);                  //不能超过endBatch
            CopyInOther(otherBatchId);
            auto otherLocal = otherQue.DeQue<DTYPE_INPUT>();
            // if constexpr(sizeof(T)==2){
            //     auto otherUint = otherLocal.template ReinterpretCast<uint16_t>();
            //     ShiftRight(otherUint, otherUint, (uint16_t)15, batchDataSize);
            //     ShiftLeft(otherUint, otherUint, (uint16_t)15, batchDataSize);
            // }else if constexpr(sizeof(T)==4){
            //     auto otherUint = otherLocal.template ReinterpretCast<uint32_t>();
            //     ShiftRight(otherUint, otherUint, (uint32_t)31, batchDataSize);
            //     ShiftLeft(otherUint, otherUint, (uint32_t)31, batchDataSize); 
            // }
            int i = 0;
            for (i = 0; i + nPerCopy <= nSameOther; i += nPerCopy) {
                CopyIn(batchIdx + i, nPerCopy);
                auto inputLocal = inputQue.DeQue<DTYPE_INPUT>();
                for (int j = 0; j < nPerCopy; j++) {
                    Compute(inputLocal, otherLocal, j * alignBatchDataSize);
                    CopyOut(batchIdx + i + j);
                }
                inputQue.FreeTensor(inputLocal);
            }
            for (; i < nSameOther; i++) {
                CopyIn(batchIdx + i, 1);
                auto inputLocal = inputQue.DeQue<DTYPE_INPUT>();
                Compute(inputLocal, otherLocal, 0);
                inputQue.FreeTensor(inputLocal);
                CopyOut(batchIdx + i);
            }
            batchIdx += nSameOther; //更新batchIdx
            otherQue.FreeTensor(otherLocal);
        }
    }
    __aicore__ inline void CopyInOther(int batchIdx) {
        auto otherLocal = otherQue.AllocTensor<DTYPE_INPUT>();
        DataCopyExtParams copyParams{1, (uint32_t)(batchDataSize * sizeof(T)), 0, 0, 0};
        DataCopyPadExtParams<DTYPE_INPUT> padParams{false, 0, 0, 0};
        DataCopyPad(otherLocal, otherGm[batchIdx * batchDataSize], copyParams, padParams);
        otherQue.EnQue(otherLocal);
    }
    __aicore__ inline void CopyIn(int batchIdx, int n2Copy) {
        auto inputLocal = inputQue.AllocTensor<DTYPE_INPUT>();
        DataCopyExtParams copyParams{(uint16_t)n2Copy, (uint32_t)(batchDataSize * sizeof(T)), 0, 0, 0};
        DataCopyPadExtParams<DTYPE_INPUT> padParams{false, 0, 0, 0};
        DataCopyPad(inputLocal, inputGm[batchIdx * batchDataSize], copyParams, padParams);
        inputQue.EnQue(inputLocal);
    }
    __aicore__ inline void Compute(LocalTensor<DTYPE_INPUT> inputLocal, LocalTensor<DTYPE_INPUT> otherLocal, int startOffset) {
        auto outLocal = OutQue.AllocTensor<T>();
        if constexpr(sizeof(T)==2){
            auto inputHalf = inputLocal.template ReinterpretCast<half>();
            auto inputUint = inputHalf.template ReinterpretCast<uint16_t>();
            auto otherUint = otherLocal.template ReinterpretCast<uint16_t>();
            auto outUint = outLocal.template ReinterpretCast<uint16_t>();
            ShiftRight(otherUint, otherUint, (uint16_t)15, batchDataSize);
            ShiftLeft(otherUint, otherUint, (uint16_t)15, batchDataSize);
            Abs(inputHalf[startOffset], inputHalf[startOffset], batchDataSize);
            Or(outUint, inputUint[startOffset], otherUint, batchDataSize);
        }else if constexpr(sizeof(T)==4){
            auto inputFloat = inputLocal.template ReinterpretCast<float>();
            auto inputUint = inputFloat.template ReinterpretCast<uint32_t>();
            auto otherUint = otherLocal.template ReinterpretCast<uint32_t>();
            auto outUint = outLocal.template ReinterpretCast<uint32_t>(); 
            ShiftRight(otherUint, otherUint, (uint32_t)31, batchDataSize);
            ShiftLeft(otherUint, otherUint, (uint32_t)31, batchDataSize); 
            Abs(inputFloat[startOffset], inputFloat[startOffset], batchDataSize);
            Or(outUint, inputUint[startOffset], otherUint, batchDataSize * 2);
        }
        OutQue.EnQue(outLocal);
    }
    __aicore__ inline void CopyOut(int batchIdx) {
        auto outLocal = OutQue.DeQue<T>();
        DataCopyExtParams copyParams{1, (uint32_t)(batchDataSize * sizeof(T)), 0, 0, 0};
        DataCopyPad(outGm[batchDataSize * batchIdx], outLocal, copyParams);
        OutQue.FreeTensor(outLocal);
    }

  private:
    int16_t signMask16, valMask16;
    int32_t signMask32, valMask32;
    const static int BUFFER_NUM = 1;
    const static int nPerCopy = 4;
    GlobalTensor<T> inputGm, otherGm, outGm;
    TPipe* pipe;
    uint16_t coreIdx, flag;
    uint32_t curOtherId;
    uint32_t nBatch, broadCastDim, batchDataSize, startBatch, endBatch, alignBatchDataSize;
    TQue<QuePosition::VECOUT, BUFFER_NUM> OutQue;
    TQue<QuePosition::VECIN, BUFFER_NUM> inputQue;
    TQue<QuePosition::VECIN, BUFFER_NUM> otherQue;
};

/*------------------------------------------------------
 *
 *  不需要广播，可以对所有输入、输出采用DataCopy数据搬运，向量计算
 *
 *------------------------------------------------------*/

template <typename T> class CopysignOp {
  public:
    __aicore__ inline CopysignOp() {}
    __aicore__ inline void Init(GM_ADDR input, GM_ADDR other, GM_ADDR out, uint32_t blockPerCore, uint16_t nAcores, uint16_t nBcores,
                                uint16_t maxBlockPerIter, TPipe* pipeIn) {
        this->pipe = pipeIn;
        this->coreIdx = GetBlockIdx();
        this->MAX_BLOCK_PER_ITER = maxBlockPerIter;
        uint32_t elemPerBlock = BLOCK_SIZE / sizeof(T);
        uint32_t inputGmOffset = coreIdx * (blockPerCore + 1) * elemPerBlock; //全局偏移 单位是元素数量
        elemPerIter = elemPerBlock * MAX_BLOCK_PER_ITER;                      //每次迭代最多处理的元素数量
        if (coreIdx < nAcores) {                                              //大核
            this->nElem = (blockPerCore + 1) * elemPerBlock;
            nIter = (blockPerCore + 1 + MAX_BLOCK_PER_ITER - 1) / MAX_BLOCK_PER_ITER;
            tailElem = ((blockPerCore + 1) % MAX_BLOCK_PER_ITER) * elemPerBlock;
        } else {
            inputGmOffset = inputGmOffset - (this->coreIdx - nAcores) * elemPerBlock;
            this->nElem = blockPerCore * elemPerBlock;
            nIter = (blockPerCore + MAX_BLOCK_PER_ITER - 1) / MAX_BLOCK_PER_ITER;
            tailElem = (blockPerCore % MAX_BLOCK_PER_ITER) * elemPerBlock;
        }

        inputGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)input + inputGmOffset, nElem);
        otherGm.SetGlobalBuffer((__gm__ DTYPE_OTHER*)other + inputGmOffset, nElem);
        outGm.SetGlobalBuffer((__gm__ DTYPE_OUT*)out + inputGmOffset, nElem);

        pipe->InitBuffer(outQue, BUFFER_NUM, elemPerIter * sizeof(T));
        pipe->InitBuffer(otherQue, BUFFER_NUM, elemPerIter * sizeof(T));
        pipe->InitBuffer(inputQue, BUFFER_NUM, elemPerIter * sizeof(T));
    }

    __aicore__ inline void Process() {
        //一次计算
        if (nElem < elemPerIter) {
            elemPerIter = nElem;
            curElem = nElem;
            CopyIn(0);
            Compute(0);
            CopyOut(0);
        } else {
            //多次迭代
            curElem = elemPerIter;
            int i;
            for (i=0; i < nIter - 1; i++) {
                CopyIn(i);
                Compute(i);
                CopyOut(i);
            }
            if (tailElem > 0) {
                curElem = tailElem;
            }
            CopyIn(nIter - 1);
            Compute(nIter - 1);
            CopyOut(nIter - 1);
        }
    }
    __aicore__ inline void CopyIn(int iterIdx) {
        auto otherLocal = otherQue.AllocTensor<T>();
        DataCopy(otherLocal, otherGm[elemPerIter * iterIdx], curElem);
        otherQue.EnQue(otherLocal);
    }
    __aicore__ inline void Compute(int iterIdx) {
        auto outLocal = outQue.AllocTensor<T>();
        auto inputLocal = inputQue.AllocTensor<T>();
        if constexpr(sizeof(T)==2){
            auto otherLocal = otherQue.DeQue<T>();
            auto otherUint = otherLocal.template ReinterpretCast<uint16_t>();
            auto outUint = outLocal.template ReinterpretCast<uint16_t>();
            ShiftRight(otherUint, otherUint, (uint16_t)15, curElem);
            ShiftLeft(otherUint, otherUint, (uint16_t)15, curElem);
            DataCopy(inputLocal, inputGm[elemPerIter * iterIdx], curElem);
            inputQue.EnQue(inputLocal);
            auto inputLocal = inputQue.DeQue<T>();
            auto inputHalf = inputLocal.template ReinterpretCast<half>();
            auto inputUint = inputHalf.template ReinterpretCast<uint16_t>();
            Abs(inputHalf, inputHalf, curElem);
            Or(outUint, inputUint, otherUint, curElem);
            inputQue.FreeTensor(inputLocal);
            otherQue.FreeTensor(otherLocal);
        }else if constexpr(sizeof(T)==4){
            auto otherLocal = otherQue.DeQue<T>();
            auto otherUint = otherLocal.template ReinterpretCast<uint32_t>();
            auto outUint = outLocal.template ReinterpretCast<uint32_t>(); 
            ShiftRight(otherUint, otherUint, (uint32_t)31, curElem);
            ShiftLeft(otherUint, otherUint, (uint32_t)31, curElem);
            DataCopy(inputLocal, inputGm[elemPerIter * iterIdx], curElem);
            inputQue.EnQue(inputLocal); 
            auto inputLocal = inputQue.DeQue<T>();
            auto inputFloat = inputLocal.template ReinterpretCast<float>();
            auto inputUint = inputFloat.template ReinterpretCast<uint32_t>();
            Abs(inputFloat, inputFloat, curElem);
            Or(outUint, inputUint, otherUint, curElem * 2);
            inputQue.FreeTensor(inputLocal);
            otherQue.FreeTensor(otherLocal);
        }
        outQue.EnQue(outLocal);
    }
    __aicore__ inline void CopyOut(int iterIdx) {
        auto outLocal = outQue.DeQue<T>();
        DataCopy(outGm[elemPerIter * iterIdx], outLocal, curElem);
        outQue.FreeTensor(outLocal);
    }

  private:
    const static int BUFFER_NUM = 1;
    GlobalTensor<T> inputGm, otherGm, outGm;
    TPipe* pipe;
    uint16_t coreIdx, MAX_BLOCK_PER_ITER;
    uint32_t nElem, nIter, elemPerIter, tailElem, curElem;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQue;
    TQue<QuePosition::VECIN, BUFFER_NUM> inputQue;
    TQue<QuePosition::VECIN, BUFFER_NUM> otherQue;
};

extern "C" __global__ __aicore__ void copysign(GM_ADDR input, GM_ADDR other, GM_ADDR out, GM_ADDR workspace, GM_ADDR tiling) {
    TPipe pipe;
    // TODO: user kernel impl
    if (TILING_KEY_IS(0)) { //需要广播且广播维度大于1
        GET_TILING_DATA(tiling_data, tiling);
        CopysignOpBroadcast<DTYPE_INPUT> op;
        op.Init(input, other, out, tiling_data.blockPerCore, tiling_data.nAcores, tiling_data.nBcores, tiling_data.shapeInput, tiling_data.shapeOther,
                tiling_data.nDim, tiling_data.maxBlockPerIter, &pipe);
        op.Process();
    } else if (TILING_KEY_IS(1)) {
        GET_TILING_DATA_WITH_STRUCT(CopysignTilingDataBroadSpec, tiling_data, tiling);
        CopysignOpBroadcastOne<DTYPE_INPUT> op;
        op.Init(input, other, out, tiling_data.nBatchPerCore, tiling_data.nAcores, tiling_data.nBcores, tiling_data.broadcastDim,
                tiling_data.batchDataSize, &pipe);
        op.Process();
    } else if (TILING_KEY_IS(2)) {
        GET_TILING_DATA_WITH_STRUCT(CopysignTilingDataVec, tiling_data, tiling);
        CopysignOp<DTYPE_INPUT> op;
        op.Init(input, other, out, tiling_data.blockPerCore, tiling_data.nAcores, tiling_data.nBcores, tiling_data.maxBlockPerIter, &pipe);
        op.Process();
    }
}