#include "kernel_operator.h"
using namespace AscendC;

const int BLOCK_SIZE = 32;
const int MAX_DIM = 5;

/*------------------------------------------------------
 *
 *  多个维度需要广播，other只能全局读取，标量计算
 *
 *------------------------------------------------------*/
template <typename T> class BitwiseLeftShiftOpBroadcast {
  public:
    __aicore__ inline BitwiseLeftShiftOpBroadcast() {}
    __aicore__ inline void Init(GM_ADDR input, GM_ADDR other, GM_ADDR out, uint32_t blockPerCore, uint16_t nAcores, uint16_t nBcores,
                                const uint32_t* shapeInput, const uint32_t* shapeOther, uint16_t nDim, uint16_t maxBlockPerIter, TPipe* pipeIn) {
        this->pipe = pipeIn;
        this->nDim = nDim;
        this->shapeInput = shapeInput;
        this->shapeOther = shapeOther;
        this->coreIdx = GetBlockIdx();
        this->MAX_BLOCK_PER_ITER = maxBlockPerIter;
        // printf("myId=%u total=%u\n", GetBlockIdx(), GetBlockNum());
        uint32_t elemPerBlock = BLOCK_SIZE / sizeof(T);
        uint32_t inputGmOffset = coreIdx * (blockPerCore + 1) * elemPerBlock; //全局偏移 单位是元素数量
        elemPerIter = elemPerBlock * MAX_BLOCK_PER_ITER;                      //每次迭代最多处理的元素数量
        if (coreIdx < nAcores) {                                              //大核
            this->nElem = (blockPerCore + 1) * elemPerBlock;
            nIter = (blockPerCore + 1 + MAX_BLOCK_PER_ITER - 1) / MAX_BLOCK_PER_ITER;
            tailElem = ((blockPerCore + 1) % MAX_BLOCK_PER_ITER) * elemPerBlock;
        } else {
            inputGmOffset = inputGmOffset - (this->coreIdx - nAcores) * elemPerBlock;
            this->nElem = blockPerCore * elemPerBlock;
            nIter = (blockPerCore + MAX_BLOCK_PER_ITER - 1) / MAX_BLOCK_PER_ITER;
            tailElem = (blockPerCore % MAX_BLOCK_PER_ITER) * elemPerBlock;
        }
        //坐标与索引计算
        elemOffset = inputGmOffset; //全局元素索引，用于推断坐标
        for (int i = 0; i < MAX_DIM; i++) {
            dimSuffix[i] = 1;
            otherDimSuffix[i] = 1;
        }
        for (int i = nDim - 1; i >= 0; i--) {
            dimSuffix[i] = shapeInput[i] * dimSuffix[i + 1];
            otherDimSuffix[i] = shapeOther[i] * otherDimSuffix[i + 1];
        }

        inputGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)input + inputGmOffset, nElem);
        otherGm.SetGlobalBuffer((__gm__ DTYPE_OTHER*)other, (uint32_t)dimSuffix[0]);
        outGm.SetGlobalBuffer((__gm__ DTYPE_OUT*)out + inputGmOffset, nElem);

        pipe->InitBuffer(OutQue, BUFFER_NUM, elemPerIter * sizeof(T));
        pipe->InitBuffer(inputQue, BUFFER_NUM, elemPerIter * sizeof(T));
    }

    __aicore__ inline void Process() {
        //一次计算
        if (nElem < elemPerIter) {
            elemPerIter = nElem;
            curElem = nElem;
            CopyIn(0);
            Compute(0);
            CopyOut(0);
        } else {
            //多次迭代
            curElem = elemPerIter;
            for (int i = 0; i < nIter - 1; i++) {
                CopyIn(i);
                Compute(i);
                CopyOut(i);
            }
            if (tailElem > 0) {
                curElem = tailElem;
            }
            CopyIn(nIter - 1);
            Compute(nIter - 1);
            CopyOut(nIter - 1);
        }
    }
    __aicore__ inline void CopyIn(int iterIdx) {
        auto inputLocal = inputQue.AllocTensor<T>();
        DataCopy(inputLocal, inputGm[elemPerIter * iterIdx], curElem);
        inputQue.EnQue(inputLocal);
    }
    __aicore__ inline void Compute(int iterIdx) {
        auto outLocal = OutQue.AllocTensor<T>();
        auto inputLocal = inputQue.DeQue<T>();
        for (int i = 0; i < curElem; i++) {
            //第i个元素，逐维度确认在other中的位置
            uint64_t otherOffset = 0;
            for (int j = 0; j < nDim; j++) {
                uint32_t curDimensionInput = ((elemOffset) / dimSuffix[j + 1]) % shapeInput[j];
                uint32_t curDimensionOther = (shapeInput[j] > shapeOther[j]) ? 0 : curDimensionInput;
                otherOffset += otherDimSuffix[j + 1] * curDimensionOther;
            }
            outLocal.SetValue(i, inputLocal.GetValue(i) << otherGm.GetValue(otherOffset));
            // 计算结束，偏移+1
            elemOffset++;
        }
        OutQue.EnQue(outLocal);
        inputQue.FreeTensor(inputLocal);
    }
    __aicore__ inline void CopyOut(int iterIdx) {
        auto outLocal = OutQue.DeQue<T>();
        DataCopy(outGm[elemPerIter * iterIdx], outLocal, curElem);
        OutQue.FreeTensor(outLocal);
    }

  private:
    const static int BUFFER_NUM = 1;
    GlobalTensor<T> inputGm, otherGm, outGm;
    TPipe* pipe;
    uint16_t coreIdx, MAX_BLOCK_PER_ITER, nDim;
    uint32_t nElem, nIter, elemPerIter, tailElem, curElem;
    const uint32_t *shapeInput, *shapeOther;
    uint64_t elemOffset;
    uint64_t dimSuffix[MAX_DIM], otherDimSuffix[MAX_DIM];
    TQue<QuePosition::VECOUT, BUFFER_NUM> OutQue;
    TQue<QuePosition::VECIN, BUFFER_NUM> inputQue;
};

/*------------------------------------------------------
 *
 *  只有一个维度需要广播，向量搬运，标量计算
 *
 *------------------------------------------------------*/
template <typename T> class BitwiseLeftShiftOpBroadcastOne {
  public:
    __aicore__ inline BitwiseLeftShiftOpBroadcastOne() {}
    __aicore__ inline void Init(GM_ADDR input, GM_ADDR other, GM_ADDR out, uint32_t nBatchPerCore, uint16_t nAcores, uint16_t nBcores,
                                uint32_t broadCastDim, uint32_t batchDataSize, TPipe* pipeIn) {
        this->pipe = pipeIn;
        this->coreIdx = GetBlockIdx();
        this->nBatch = nBatchPerCore;
        this->broadCastDim = broadCastDim;
        this->batchDataSize = batchDataSize;
        this->alignBatchDataSize = (batchDataSize + 32) / 32 * 32; //对齐到32字节
        this->curOtherId = -1;
        uint32_t totalBatch = nBatchPerCore * nBcores + (nBatchPerCore + 1) * nAcores;
        if (coreIdx < nAcores) {
            nBatch = nBatchPerCore + 1; //大核
            startBatch = coreIdx * (nBatchPerCore + 1);
            endBatch = startBatch + nBatch;
        } else {
            nBatch = nBatchPerCore; //小核
            startBatch = nAcores * (nBatchPerCore + 1) + (coreIdx - nAcores) * nBatchPerCore;
            endBatch = startBatch + nBatch;
        }
        inputGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)input, totalBatch * batchDataSize);
        otherGm.SetGlobalBuffer((__gm__ DTYPE_OTHER*)other, totalBatch / broadCastDim * batchDataSize);
        outGm.SetGlobalBuffer((__gm__ DTYPE_OUT*)out, totalBatch * batchDataSize);

        pipe->InitBuffer(OutQue, BUFFER_NUM, alignBatchDataSize * sizeof(T));
        pipe->InitBuffer(inputQue, BUFFER_NUM, alignBatchDataSize * sizeof(T) * nPerCopy);
        pipe->InitBuffer(otherQue, BUFFER_NUM, alignBatchDataSize * sizeof(T));
    }
    __aicore__ inline void Process1() {
        for (int batchIdx = startBatch; batchIdx < endBatch;) {
            uint32_t otherBatchId = batchIdx / broadCastDim;
            uint32_t nSameOther = (otherBatchId + 1) * broadCastDim - batchIdx; //当前batch与other的关系
            nSameOther = min(endBatch - batchIdx, nSameOther);                  //不能超过endBatch
            CopyInOther(otherBatchId);
            auto otherLocal = otherQue.DeQue<DTYPE_INPUT>();
            int i = 0;
            for (i = 0; i+nPerCopy <= nSameOther; i+=nPerCopy) {
                CopyIn(batchIdx + i, nPerCopy);
                auto inputLocal = inputQue.DeQue<DTYPE_INPUT>();
                for(int j=0;j<nPerCopy;j++) {
                    Compute(inputLocal, otherLocal, j*batchDataSize);
                    CopyOut(batchIdx + i + j);
                }
                inputQue.FreeTensor(inputLocal);
            }
            for (; i < nSameOther; i++) {
                CopyIn(batchIdx + i, 1);
                auto inputLocal = inputQue.DeQue<DTYPE_INPUT>();
                Compute(inputLocal, otherLocal, 0);
                inputQue.FreeTensor(inputLocal);
                CopyOut(batchIdx + i);
            }
            batchIdx += nSameOther; //更新batchIdx
            otherQue.FreeTensor(otherLocal);
        }
    }
    __aicore__ inline void CopyInOther(int batchIdx) {
        auto otherLocal = otherQue.AllocTensor<DTYPE_INPUT>();
        DataCopyParams copyParams{1, (uint16_t)(batchDataSize*sizeof(T)), 0, 0};
        DataCopyPadParams padParams{false, 0, 0, 0};
        DataCopyPad(otherLocal, otherGm[batchIdx * batchDataSize], copyParams, padParams);
        otherQue.EnQue(otherLocal);
    }
    __aicore__ inline void CopyIn(int batchIdx, int n2Copy) {
        auto inputLocal = inputQue.AllocTensor<DTYPE_INPUT>();
        DataCopyParams copyParams{1, (uint16_t)(batchDataSize*sizeof(T)*n2Copy), 0, 0};
        DataCopyPadParams padParams{false, 0, 0, 0};
        DataCopyPad(inputLocal, inputGm[batchIdx * batchDataSize], copyParams, padParams);
        inputQue.EnQue(inputLocal);
    }
    __aicore__ inline void Compute(LocalTensor<DTYPE_INPUT> inputLocal, LocalTensor<DTYPE_INPUT> otherLocal, int startOffset) {
        auto outLocal = OutQue.AllocTensor<T>();
        int i = 0;
        for (i = 0; i+8 <= batchDataSize; i+=8) {
            DTYPE_INPUT o1, o2, o3, o4, o5, o6, o7, o8;
            DTYPE_INPUT i1, i2, i3, i4, i5, i6, i7, i8;
            DTYPE_INPUT r1, r2, r3, r4, r5, r6, r7, r8;
            o1 = otherLocal.GetValue(i);
            o2 = otherLocal.GetValue(i+1);
            o3 = otherLocal.GetValue(i + 2);
            o4 = otherLocal.GetValue(i + 3);
            o5 = otherLocal.GetValue(i + 4);
            o6 = otherLocal.GetValue(i + 5);
            o7 = otherLocal.GetValue(i + 6);
            o8 = otherLocal.GetValue(i + 7);
            i1 = inputLocal.GetValue(startOffset+i);
            i2 = inputLocal.GetValue(startOffset+i+1);
            i3 = inputLocal.GetValue(startOffset + i + 2);
            i4 = inputLocal.GetValue(startOffset + i + 3);
            i5 = inputLocal.GetValue(startOffset + i + 4);
            i6 = inputLocal.GetValue(startOffset + i + 5);
            i7 = inputLocal.GetValue(startOffset + i + 6);
            i8 = inputLocal.GetValue(startOffset + i + 7);
            r1 = i1 << o1;
            r2 = i2 << o2;
            r3 = i3 << o3;
            r4 = i4 << o4;
            r5 = i5 << o5;
            r6 = i6 << o6;
            r7 = i7 << o7;
            r8 = i8 << o8;
            outLocal.SetValue(i,      r1);
            outLocal.SetValue(i + 1,  r2);
            outLocal.SetValue(i + 2,  r3);
            outLocal.SetValue(i + 3,  r4);
            outLocal.SetValue(i + 4,  r5);
            outLocal.SetValue(i + 5,  r6);
            outLocal.SetValue(i + 6,  r7);
            outLocal.SetValue(i + 7,  r8);
        }
        for (; i < batchDataSize; i++) {
            outLocal.SetValue(i, inputLocal.GetValue(startOffset+i) << otherLocal.GetValue(i));
        }
        OutQue.EnQue(outLocal);
    }
    __aicore__ inline void CopyOut(int batchIdx) {
        auto outLocal = OutQue.DeQue<T>();
        DataCopyParams copyParams{1, (uint16_t)(batchDataSize*sizeof(T)), 0, 0};
        DataCopyPad(outGm[batchDataSize * batchIdx], outLocal, copyParams);
        OutQue.FreeTensor(outLocal);
    }

  private:
    const static int BUFFER_NUM = 1;
    const static int nPerCopy = 4;
    GlobalTensor<T> inputGm, otherGm, outGm;
    TPipe* pipe;
    uint16_t coreIdx;
    uint32_t curOtherId;
    uint32_t nBatch, broadCastDim, batchDataSize, startBatch, endBatch, alignBatchDataSize;
    TQue<QuePosition::VECOUT, BUFFER_NUM> OutQue;
    TQue<QuePosition::VECIN, BUFFER_NUM> inputQue;
    TQue<QuePosition::VECIN, BUFFER_NUM> otherQue;
};

/*------------------------------------------------------
 *
 *  不需要广播，可以对所有输入、输出采用DataCopy数据搬运，标量计算
 *
 *------------------------------------------------------*/

template <typename T> class BitwiseLeftShiftOp {
  public:
    __aicore__ inline BitwiseLeftShiftOp() {}
    __aicore__ inline void Init(GM_ADDR input, GM_ADDR other, GM_ADDR out, uint32_t blockPerCore, uint16_t nAcores, uint16_t nBcores,
                                uint16_t maxBlockPerIter, TPipe* pipeIn) {
        this->pipe = pipeIn;
        this->coreIdx = GetBlockIdx();
        this->MAX_BLOCK_PER_ITER = maxBlockPerIter;
        uint32_t elemPerBlock = BLOCK_SIZE / sizeof(T);
        uint32_t inputGmOffset = coreIdx * (blockPerCore + 1) * elemPerBlock; //全局偏移 单位是元素数量
        elemPerIter = elemPerBlock * MAX_BLOCK_PER_ITER;                      //每次迭代最多处理的元素数量
        if (coreIdx < nAcores) {                                              //大核
            this->nElem = (blockPerCore + 1) * elemPerBlock;
            nIter = (blockPerCore + 1 + MAX_BLOCK_PER_ITER - 1) / MAX_BLOCK_PER_ITER;
            tailElem = ((blockPerCore + 1) % MAX_BLOCK_PER_ITER) * elemPerBlock;
        } else {
            inputGmOffset = inputGmOffset - (this->coreIdx - nAcores) * elemPerBlock;
            this->nElem = blockPerCore * elemPerBlock;
            nIter = (blockPerCore + MAX_BLOCK_PER_ITER - 1) / MAX_BLOCK_PER_ITER;
            tailElem = (blockPerCore % MAX_BLOCK_PER_ITER) * elemPerBlock;
        }

        inputGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)input + inputGmOffset, nElem);
        otherGm.SetGlobalBuffer((__gm__ DTYPE_OTHER*)other + inputGmOffset, nElem);
        outGm.SetGlobalBuffer((__gm__ DTYPE_OUT*)out + inputGmOffset, nElem);

        pipe->InitBuffer(outQue, BUFFER_NUM, elemPerIter * sizeof(T));
        pipe->InitBuffer(otherQue, BUFFER_NUM, elemPerIter * sizeof(T));
        pipe->InitBuffer(inputQue, BUFFER_NUM, elemPerIter * sizeof(T));
    }

    __aicore__ inline void Process() {
        //一次计算
        if (nElem < elemPerIter) {
            elemPerIter = nElem;
            curElem = nElem;
            CopyIn(0);
            Compute(0);
            CopyOut(0);
        } else {
            //多次迭代
            curElem = elemPerIter;
            for (int i = 0; i < nIter - 1; i++) {
                CopyIn(i);
                Compute(i);
                CopyOut(i);
            }
            if (tailElem > 0) {
                curElem = tailElem;
            }
            CopyIn(nIter - 1);
            Compute(nIter - 1);
            CopyOut(nIter - 1);
        }
    }
    __aicore__ inline void CopyIn(int iterIdx) {
        auto inputLocal = inputQue.AllocTensor<T>();
        auto otherLocal = otherQue.AllocTensor<T>();
        DataCopy(inputLocal, inputGm[elemPerIter * iterIdx], curElem);
        DataCopy(otherLocal, otherGm[elemPerIter * iterIdx], curElem);
        inputQue.EnQue(inputLocal);
        otherQue.EnQue(otherLocal);
    }
    __aicore__ inline void Compute(int iterIdx) {
        auto outLocal = outQue.AllocTensor<T>();
        auto inputLocal = inputQue.DeQue<T>();
        auto otherLocal = otherQue.DeQue<T>();
        int i = 0;
        for (i = 0; i < curElem; i += 4) {
            outLocal.SetValue(i, inputLocal.GetValue(i) << otherLocal.GetValue(i));
            outLocal.SetValue(i + 1, inputLocal.GetValue(i + 1) << otherLocal.GetValue(i + 1));
            outLocal.SetValue(i + 2, inputLocal.GetValue(i + 2) << otherLocal.GetValue(i + 2));
            outLocal.SetValue(i + 3, inputLocal.GetValue(i + 3) << otherLocal.GetValue(i + 3));
        }
        for (; i < curElem; i++) {
            outLocal.SetValue(i, inputLocal.GetValue(i) << otherLocal.GetValue(i));
        }
        outQue.EnQue(outLocal);
        inputQue.FreeTensor(inputLocal);
        otherQue.FreeTensor(otherLocal);
    }
    __aicore__ inline void CopyOut(int iterIdx) {
        auto outLocal = outQue.DeQue<T>();
        DataCopy(outGm[elemPerIter * iterIdx], outLocal, curElem);
        outQue.FreeTensor(outLocal);
    }

  private:
    const static int BUFFER_NUM = 1;
    GlobalTensor<T> inputGm, otherGm, outGm;
    TPipe* pipe;
    uint16_t coreIdx, MAX_BLOCK_PER_ITER;
    uint32_t nElem, nIter, elemPerIter, tailElem, curElem;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQue;
    TQue<QuePosition::VECIN, BUFFER_NUM> inputQue;
    TQue<QuePosition::VECIN, BUFFER_NUM> otherQue;
};

/*------------------------------------------------------
 *
 *  不需要广播，可以对所有输入、输出采用DataCopy数据搬运，向量化计算，Int16
 *
 *------------------------------------------------------*/
class BitwiseLeftShiftVeci16Op {
  public:
    __aicore__ inline BitwiseLeftShiftVeci16Op() {}
    __aicore__ inline void Init(GM_ADDR input, GM_ADDR other, GM_ADDR out, uint32_t blockPerCore, uint16_t nAcores, uint16_t nBcores,
                                uint16_t maxBlockPerIter, TPipe* pipeIn) {
        this->pipe = pipeIn;
        this->coreIdx = GetBlockIdx();
        this->MAX_BLOCK_PER_ITER = maxBlockPerIter;
        this->CMP_ELEM_ALIGN = 256 / sizeof(int16_t);

        uint32_t elemPerBlock = BLOCK_SIZE / sizeof(int16_t);
        uint32_t inputGmOffset = coreIdx * (blockPerCore + 1) * elemPerBlock; // 全局偏移 单位是元素数量
        elemPerIter = elemPerBlock * MAX_BLOCK_PER_ITER;                      // 每次迭代最多处理的元素数量
        if (coreIdx < nAcores) {                                              // 大核
            this->nElem = (blockPerCore + 1) * elemPerBlock;
            nIter = (blockPerCore + 1 + MAX_BLOCK_PER_ITER - 1) / MAX_BLOCK_PER_ITER;
            tailElem = ((blockPerCore + 1) % MAX_BLOCK_PER_ITER) * elemPerBlock;
        } else {
            inputGmOffset = inputGmOffset - (this->coreIdx - nAcores) * elemPerBlock;
            this->nElem = blockPerCore * elemPerBlock;
            nIter = (blockPerCore + MAX_BLOCK_PER_ITER - 1) / MAX_BLOCK_PER_ITER;
            tailElem = (blockPerCore % MAX_BLOCK_PER_ITER) * elemPerBlock;
        }

        inputGm.SetGlobalBuffer((__gm__ int16_t*)input + inputGmOffset, nElem);
        otherGm.SetGlobalBuffer((__gm__ int16_t*)other + inputGmOffset, nElem);
        outGm.SetGlobalBuffer((__gm__ int16_t*)out + inputGmOffset, nElem);

        // Compare操作需要256byte对齐，需要对TensorBuffer对齐
        uint32_t BUFFER_LEN = ((elemPerIter + CMP_ELEM_ALIGN - 1) / CMP_ELEM_ALIGN) * CMP_ELEM_ALIGN;
        pipe->InitBuffer(outQue, BUFFER_NUM, BUFFER_LEN * sizeof(int16_t));
        pipe->InitBuffer(otherQue, BUFFER_NUM, BUFFER_LEN * sizeof(int16_t));
        pipe->InitBuffer(inputQue, BUFFER_NUM, BUFFER_LEN * sizeof(int16_t));
        pipe->InitBuffer(bitBuf, (BUFFER_LEN / 8) * sizeof(uint8_t));
        pipe->InitBuffer(shiftBuf, BUFFER_LEN * sizeof(int16_t));
        pipe->InitBuffer(subBuf, BUFFER_LEN * sizeof(int16_t));
        pipe->InitBuffer(halfBuf, BUFFER_LEN * sizeof(half));
    }

    __aicore__ inline void Process() {
        // 一次计算
        if (nElem < elemPerIter) {
            elemPerIter = nElem;
            curElem = nElem;
            curCmpElem = ((curElem + CMP_ELEM_ALIGN - 1) / CMP_ELEM_ALIGN) * CMP_ELEM_ALIGN;
            CopyIn(0);
            Compute(0);
            CopyOut(0);
            // printf("CASE1\n");
        } else {
            // printf("CASE2\n");
            // 多次迭代
            curElem = elemPerIter;
            curCmpElem = ((curElem + CMP_ELEM_ALIGN - 1) / CMP_ELEM_ALIGN) * CMP_ELEM_ALIGN;
            for (int i = 0; i < nIter - 1; i++) {
                CopyIn(i);
                Compute(i);
                CopyOut(i);
            }
            if (tailElem > 0) {
                // printf("CASE3\n");
                curElem = tailElem;
                curCmpElem = ((tailElem + CMP_ELEM_ALIGN - 1) / CMP_ELEM_ALIGN) * CMP_ELEM_ALIGN;
            }
            CopyIn(nIter - 1);
            Compute(nIter - 1);
            CopyOut(nIter - 1);
        }
    }
    __aicore__ inline void CopyIn(int iterIdx) {
        auto inputLocal = inputQue.AllocTensor<int16_t>();
        auto otherLocal = otherQue.AllocTensor<int16_t>();
        DataCopy(inputLocal, inputGm[elemPerIter * iterIdx], curElem);
        DataCopy(otherLocal, otherGm[elemPerIter * iterIdx], curElem);
        inputQue.EnQue(inputLocal);
        otherQue.EnQue(otherLocal);
    }
    __aicore__ inline void Compute(int iterIdx) {
        auto outLocal = outQue.AllocTensor<int16_t>();
        auto inputLocal = inputQue.DeQue<int16_t>();
        auto otherLocal = otherQue.DeQue<int16_t>();
        // 将other进行格式转换
        auto halfOther = halfBuf.Get<half>();
        Cast(halfOther, otherLocal, AscendC::RoundMode::CAST_NONE, curElem);
        // 二分位移
        auto cmpRes = bitBuf.Get<uint8_t>();
        auto shiftTmp = shiftBuf.Get<int16_t>();
        auto subTmp = subBuf.Get<half>();
        int16_t shiftBits = sizeof(int16_t) * 8;
        auto halfLocal = inputLocal.ReinterpretCast<half>();
        auto shiftTmpHalf = shiftTmp.ReinterpretCast<half>();
        while (shiftBits > 1) {
            Adds(subTmp, halfOther, (half)(((float)-1) * ((float)shiftBits)), curElem);
            CompareScalar(cmpRes, halfOther, (half)shiftBits, CMPMODE::GE, curCmpElem);
            ShiftLeft(shiftTmp, inputLocal, shiftBits, curCmpElem);
            Select(halfLocal, cmpRes, shiftTmpHalf, halfLocal, SELMODE::VSEL_TENSOR_TENSOR_MODE, curCmpElem);
            Select(halfOther, cmpRes, subTmp, halfOther, SELMODE::VSEL_TENSOR_TENSOR_MODE, curCmpElem);
            shiftBits /= 2;
        }
        //DataCopy(outLocal, inputLocal, curElem);
        CompareScalar(cmpRes, halfOther, (half)shiftBits, CMPMODE::GE, curCmpElem);
        ShiftLeft(shiftTmp, inputLocal, shiftBits, curCmpElem);
        //写回结果
        LocalTensor<half> outHalfLocal = outLocal.ReinterpretCast<half>();
        Select(outHalfLocal, cmpRes, shiftTmpHalf, halfLocal, SELMODE::VSEL_TENSOR_TENSOR_MODE, curCmpElem);
        outQue.EnQue(outLocal);
        inputQue.FreeTensor(inputLocal);
        otherQue.FreeTensor(otherLocal);
    }
    __aicore__ inline void CopyOut(int iterIdx) {
        auto outLocal = outQue.DeQue<int16_t>();
        DataCopy(outGm[elemPerIter * iterIdx], outLocal, curElem);
        outQue.FreeTensor(outLocal);
    }

  private:
    const static int BUFFER_NUM = 1;
    GlobalTensor<int16_t> inputGm, otherGm, outGm;
    TPipe* pipe;
    uint16_t coreIdx, MAX_BLOCK_PER_ITER, CMP_ELEM_ALIGN;
    uint32_t nElem, nIter, elemPerIter, tailElem, curElem, curCmpElem;
    TBuf<QuePosition::VECCALC> bitBuf;
    TBuf<QuePosition::VECCALC> shiftBuf;
    TBuf<QuePosition::VECCALC> halfBuf;
    TBuf<QuePosition::VECCALC> subBuf;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQue;
    TQue<QuePosition::VECIN, BUFFER_NUM> inputQue;
    TQue<QuePosition::VECIN, BUFFER_NUM> otherQue;
};

/*------------------------------------------------------
 *
 *  不需要广播，可以对所有输入、输出采用DataCopy数据搬运，向量化计算，Int32
 *
 *------------------------------------------------------*/
class BitwiseLeftShiftVeci32Op {
  public:
    __aicore__ inline BitwiseLeftShiftVeci32Op() {}
    __aicore__ inline void Init(GM_ADDR input, GM_ADDR other, GM_ADDR out, uint32_t blockPerCore, uint16_t nAcores, uint16_t nBcores,
                                uint16_t maxBlockPerIter, TPipe* pipeIn) {
        this->pipe = pipeIn;
        this->coreIdx = GetBlockIdx();
        this->MAX_BLOCK_PER_ITER = maxBlockPerIter;
        this->CMP_ELEM_ALIGN = 256 / sizeof(int32_t);
        this->CMP_ELEM_ALIGN = 256 / sizeof(half);

        uint32_t elemPerBlock = BLOCK_SIZE / sizeof(int32_t);
        uint32_t inputGmOffset = coreIdx * (blockPerCore + 1) * elemPerBlock; // 全局偏移 单位是元素数量
        elemPerIter = elemPerBlock * MAX_BLOCK_PER_ITER;                      // 每次迭代最多处理的元素数量
        if (coreIdx < nAcores) {                                              // 大核
            this->nElem = (blockPerCore + 1) * elemPerBlock;
            nIter = (blockPerCore + 1 + MAX_BLOCK_PER_ITER - 1) / MAX_BLOCK_PER_ITER;
            tailElem = ((blockPerCore + 1) % MAX_BLOCK_PER_ITER) * elemPerBlock;
        } else {
            inputGmOffset = inputGmOffset - (this->coreIdx - nAcores) * elemPerBlock;
            this->nElem = blockPerCore * elemPerBlock;
            nIter = (blockPerCore + MAX_BLOCK_PER_ITER - 1) / MAX_BLOCK_PER_ITER;
            tailElem = (blockPerCore % MAX_BLOCK_PER_ITER) * elemPerBlock;
        }

        inputGm.SetGlobalBuffer((__gm__ int32_t*)input + inputGmOffset, nElem);
        otherGm.SetGlobalBuffer((__gm__ int32_t*)other + inputGmOffset, nElem);
        outGm.SetGlobalBuffer((__gm__ int32_t*)out + inputGmOffset, nElem);

        // Compare操作需要256byte对齐，需要对TensorBuffer对齐
        uint32_t BUFFER_LEN = ((elemPerIter + CMP_ELEM_ALIGN - 1) / CMP_ELEM_ALIGN) * CMP_ELEM_ALIGN;
        uint32_t BUFFER_LEN4CMP = ((elemPerIter + CMP_ELEM_ALIGN - 1) / CMP_ELEM_ALIGN) * CMP_ELEM_ALIGN;
        pipe->InitBuffer(outQue, BUFFER_NUM, BUFFER_LEN * sizeof(int32_t));
        pipe->InitBuffer(otherQue, BUFFER_NUM, BUFFER_LEN * sizeof(int32_t));
        pipe->InitBuffer(inputQue, BUFFER_NUM, BUFFER_LEN * sizeof(int32_t));
        pipe->InitBuffer(bitBuf, (BUFFER_LEN4CMP / 8) * sizeof(uint8_t));
        pipe->InitBuffer(shiftBuf, BUFFER_LEN * sizeof(int32_t));
        pipe->InitBuffer(subBuf, BUFFER_LEN * sizeof(float));
        pipe->InitBuffer(floatBuf, BUFFER_LEN4CMP * sizeof(float));
    }

    __aicore__ inline void Process() {
        // 一次计算
        if (nElem < elemPerIter) {
            elemPerIter = nElem;
            curElem = nElem;
            curCmpElem = ((curElem + CMP_ELEM_ALIGN - 1) / CMP_ELEM_ALIGN) * CMP_ELEM_ALIGN;
            CopyIn(0);
            Compute(0);
            CopyOut(0);
        } else {
            // 多次迭代
            curElem = elemPerIter;
            curCmpElem = ((curElem + CMP_ELEM_ALIGN - 1) / CMP_ELEM_ALIGN) * CMP_ELEM_ALIGN;
            for (int i = 0; i < nIter - 1; i++) {
                CopyIn(i);
                Compute(i);
                CopyOut(i);
            }
            if (tailElem > 0) {
                curElem = tailElem;
                curCmpElem = ((tailElem + CMP_ELEM_ALIGN - 1) / CMP_ELEM_ALIGN) * CMP_ELEM_ALIGN;
            }
            CopyIn(nIter - 1);
            Compute(nIter - 1);
            CopyOut(nIter - 1);
        }
    }
    __aicore__ inline void CopyIn(int iterIdx) {
        auto inputLocal = inputQue.AllocTensor<int32_t>();
        auto otherLocal = otherQue.AllocTensor<int32_t>();
        DataCopy(inputLocal, inputGm[elemPerIter * iterIdx], curElem);
        DataCopy(otherLocal, otherGm[elemPerIter * iterIdx], curElem);
        inputQue.EnQue(inputLocal);
        otherQue.EnQue(otherLocal);
    }
    __aicore__ inline void Compute(int iterIdx) {
        auto otherLocal = otherQue.DeQue<int32_t>();
        // 将other进行格式转换
        auto floatOther = floatBuf.Get<float>();
        //Cast(floatOtherTmp, otherLocal, AscendC::RoundMode::CAST_NONE, curElem);
        //Cast(floatOther, otherLocal, AscendC::RoundMode::CAST_NONE, curElem);
        otherQue.FreeTensor(otherLocal);
        
        // 二分位移
        auto cmpRes = bitBuf.Get<uint8_t>();
        auto shiftTmp = shiftBuf.Get<int32_t>();
        auto subTmp = subBuf.Get<uint32_t>();
        auto inputLocal = inputQue.DeQue<int32_t>();
        LocalTensor<float> floatLocal = inputLocal.ReinterpretCast<float>();
        LocalTensor<float> shiftTmpFloat = shiftTmp.ReinterpretCast<float>();
        LocalTensor<uint32_t> otherintTmp = otherLocal.ReinterpretCast<uint32_t>();
        Duplicate(subTmp, (uint32_t)(~((uint32_t)16)), curElem);
        
        //首先处理大于等于32的位移
        int32_t shiftBits = sizeof(int32_t) * 8;
        // CompareScalar(cmpRes, floatOther, (float)shiftBits, CMPMODE::LT, curCmpElem);
        // Select(floatLocal, cmpRes, floatLocal, (float)0, SELMODE::VSEL_TENSOR_SCALAR_MODE, curCmpElem); //<32时不变，大于时为0
        shiftBits /= 2;
        while (shiftBits > 1) {
            //Adds(subTmp, floatOther, (float)(-1 * shiftBits), curElem);
            ShiftLeft(shiftTmp, inputLocal, shiftBits, curElem);
            CompareScalar(cmpRes, otherLocal, (int32_t)shiftBits, CMPMODE::GE, curCmpElem);
            And(otherintTmp, otherintTmp, subTmp, curElem);
            Duplicate(subTmp, (uint32_t)(~((uint32_t)shiftBits/2)), curElem);
            //Cast(floatOther, otherLocal, AscendC::RoundMode::CAST_NONE, curElem);
            Select(floatLocal, cmpRes, shiftTmpFloat, floatLocal, SELMODE::VSEL_TENSOR_TENSOR_MODE, curElem);
            //Select(floatOther, cmpRes, subTmp, floatOther, SELMODE::VSEL_TENSOR_TENSOR_MODE, curElem);
            shiftBits /= 2;
        }
        CompareScalar(cmpRes, floatOther, (float)shiftBits, CMPMODE::GE, curCmpElem);
        ShiftLeft(shiftTmp, inputLocal, shiftBits, curElem);
        
        //写回结果
        auto outLocal = outQue.AllocTensor<int32_t>();
        LocalTensor<float> outFloatLocal = outLocal.ReinterpretCast<float>();
        Select(outFloatLocal, cmpRes, shiftTmpFloat, floatLocal, SELMODE::VSEL_TENSOR_TENSOR_MODE, curElem);
        outQue.EnQue(outLocal);
        inputQue.FreeTensor(inputLocal);
    }
    __aicore__ inline void CopyOut(int iterIdx) {
        auto outLocal = outQue.DeQue<int32_t>();
        DataCopy(outGm[elemPerIter * iterIdx], outLocal, curElem);
        outQue.FreeTensor(outLocal);
    }

  private:
    const static int BUFFER_NUM = 1;
    GlobalTensor<int32_t> inputGm, otherGm, outGm;
    TPipe* pipe;
    uint16_t coreIdx, MAX_BLOCK_PER_ITER, CMP_ELEM_ALIGN;
    uint32_t nElem, nIter, elemPerIter, tailElem, curElem, curCmpElem;
    TBuf<QuePosition::VECCALC> bitBuf;
    TBuf<QuePosition::VECCALC> subBuf;
    TBuf<QuePosition::VECCALC> shiftBuf;
    TBuf<QuePosition::VECCALC> floatBuf;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQue;
    TQue<QuePosition::VECIN, BUFFER_NUM> inputQue;
    TQue<QuePosition::VECIN, BUFFER_NUM> otherQue;
};

extern "C" __global__ __aicore__ void bitwise_left_shift(GM_ADDR input, GM_ADDR other, GM_ADDR out, GM_ADDR workspace, GM_ADDR tiling) {
    TPipe pipe;
    // TODO: user kernel impl
    if (TILING_KEY_IS(0)) { //不需要广播
        GET_TILING_DATA(tiling_data, tiling);
        BitwiseLeftShiftOp<DTYPE_INPUT> op;
        op.Init(input, other, out, tiling_data.blockPerCore, tiling_data.nAcores, tiling_data.nBcores, tiling_data.maxBlockPerIter, &pipe);
        op.Process();
    } else if (TILING_KEY_IS(1)) {
        // GET_TILING_DATA_WITH_STRUCT(BitwiseLeftShiftTilingDataBroad, tiling_data, tiling);
        GET_TILING_DATA_WITH_STRUCT(BitwiseLeftShiftTilingDataBroad, tiling_data, tiling);
        BitwiseLeftShiftOpBroadcast<DTYPE_INPUT> op;
        op.Init(input, other, out, tiling_data.blockPerCore, tiling_data.nAcores, tiling_data.nBcores, tiling_data.shapeInput, tiling_data.shapeOther,
                tiling_data.nDim, tiling_data.maxBlockPerIter, &pipe);
        op.Process();
    } else if (TILING_KEY_IS(2)) {
        GET_TILING_DATA(tiling_data, tiling);
        if (sizeof(DTYPE_INPUT) == 2) {
            BitwiseLeftShiftVeci16Op op;
            op.Init(input, other, out, tiling_data.blockPerCore, tiling_data.nAcores, tiling_data.nBcores, tiling_data.maxBlockPerIter, &pipe);
            op.Process();
        } else {
            BitwiseLeftShiftVeci32Op op;
            op.Init(input, other, out, tiling_data.blockPerCore, tiling_data.nAcores, tiling_data.nBcores, tiling_data.maxBlockPerIter, &pipe);
            op.Process();
        }
    } else if (TILING_KEY_IS(3)) { //只有一个维度需要广播
        GET_TILING_DATA_WITH_STRUCT(BitwiseLeftShiftTilingDataBroadSpec, tiling_data, tiling);
        BitwiseLeftShiftOpBroadcastOne<DTYPE_INPUT> op;
        op.Init(input, other, out, tiling_data.nBatchPerCore, tiling_data.nAcores, tiling_data.nBcores, tiling_data.broadcastDim,
                tiling_data.batchDataSize, &pipe);
        op.Process1();
    }
}