#include "kernel_operator.h"

using namespace AscendC;

const int BLOCK_SIZE = 32;
const int MAX_DIM = 5;

template <typename T> __aicore__ inline T abs(T a) { return a > 0 ? a : -a; }
template <typename T> __aicore__ inline T gcd(T a, T b) {
    while (b != 0) {
        T remainder = a % b;
        a = b;
        b = remainder;
    }
    return a;
}

/*------------------------------------------------------
 *
 *  需要广播，other只能全局读取，标量计算
 *
 *------------------------------------------------------*/
template <typename T> class LcmOpBroadcast {
  public:
    __aicore__ inline LcmOpBroadcast() {}
    __aicore__ inline void Init(GM_ADDR input, GM_ADDR other, GM_ADDR out, uint32_t blockPerCore, uint16_t nAcores, uint16_t nBcores,
                                const uint32_t* shapeInput, const uint32_t* shapeOther, uint16_t nDim, uint16_t maxBlockPerIter, TPipe* pipeIn) {
        this->pipe = pipeIn;
        this->nDim = nDim;
        this->shapeInput = shapeInput;
        this->shapeOther = shapeOther;
        this->coreIdx = GetBlockIdx();
        this->MAX_BLOCK_PER_ITER = maxBlockPerIter;
        uint32_t elemPerBlock = BLOCK_SIZE / sizeof(T);
        uint32_t inputGmOffset = coreIdx * (blockPerCore + 1) * elemPerBlock; //全局偏移 单位是元素数量
        elemPerIter = elemPerBlock * MAX_BLOCK_PER_ITER;                      //每次迭代最多处理的元素数量
        if (coreIdx < nAcores) {                                              //大核
            this->nElem = (blockPerCore + 1) * elemPerBlock;
            nIter = (blockPerCore + 1 + MAX_BLOCK_PER_ITER - 1) / MAX_BLOCK_PER_ITER;
            tailElem = ((blockPerCore + 1) % MAX_BLOCK_PER_ITER) * elemPerBlock;
        } else {
            inputGmOffset = inputGmOffset - (this->coreIdx - nAcores) * elemPerBlock;
            this->nElem = blockPerCore * elemPerBlock;
            nIter = (blockPerCore + MAX_BLOCK_PER_ITER - 1) / MAX_BLOCK_PER_ITER;
            tailElem = (blockPerCore % MAX_BLOCK_PER_ITER) * elemPerBlock;
        }
        //坐标与索引计算
        elemOffset = inputGmOffset; //全局元素索引，用于推断坐标
        for (int i = 0; i < MAX_DIM; i++) {
            dimSuffix[i] = 1;
            otherDimSuffix[i] = 1;
        }
        for (int i = nDim - 1; i >= 0; i--) {
            dimSuffix[i] = shapeInput[i] * dimSuffix[i + 1];
            otherDimSuffix[i] = shapeOther[i] * otherDimSuffix[i + 1];
        }

        inputGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)input + inputGmOffset, nElem);
        otherGm.SetGlobalBuffer((__gm__ DTYPE_OTHER*)other, (uint32_t)dimSuffix[0]);
        outGm.SetGlobalBuffer((__gm__ DTYPE_OUT*)out + inputGmOffset, nElem);

        pipe->InitBuffer(OutQue, BUFFER_NUM, elemPerIter * sizeof(T));
        pipe->InitBuffer(inputQue, BUFFER_NUM, elemPerIter * sizeof(T));
    }

    __aicore__ inline void Process() {
        //一次计算
        if (nElem < elemPerIter) {
            elemPerIter = nElem;
            curElem = nElem;
            CopyIn(0);
            Compute(0);
            CopyOut(0);
        } else {
            //多次迭代
            curElem = elemPerIter;
            for (int i = 0; i < nIter - 1; i++) {
                CopyIn(i);
                Compute(i);
                CopyOut(i);
            }
            if (tailElem > 0) {
                curElem = tailElem;
            }
            CopyIn(nIter - 1);
            Compute(nIter - 1);
            CopyOut(nIter - 1);
        }
    }
    __aicore__ inline void CopyIn(int iterIdx) {
        auto inputLocal = inputQue.AllocTensor<T>();
        DataCopy(inputLocal, inputGm[elemPerIter * iterIdx], curElem);
        inputQue.EnQue(inputLocal);
    }
    __aicore__ inline void Compute(int iterIdx) {
        auto outLocal = OutQue.AllocTensor<T>();
        auto inputLocal = inputQue.DeQue<T>();
        for (int i = 0; i < curElem; i++) {
            //第i个元素，逐维度确认在other中的位置
            uint64_t otherOffset = 0;
            for (int j = 0; j < nDim; j++) {
                uint32_t curDimensionInput = ((elemOffset) / dimSuffix[j + 1]) % shapeInput[j];
                uint32_t curDimensionOther = (shapeInput[j] > shapeOther[j]) ? 0 : curDimensionInput;
                otherOffset += otherDimSuffix[j + 1] * curDimensionOther;
            }
            T a, b;
            a = inputLocal.GetValue(i);
            b = otherGm.GetValue(otherOffset);
            T absa = a > 0 ? a : -a;
            T absb = b > 0 ? b : -b;
            T res = (absa / gcd(absa, absb)) * absb;
            if constexpr (!std::is_same_v<T, std::int8_t>) {
                if (res < 0)
                    res = -res;
            }
            outLocal.SetValue(i, res);
            // 计算结束，偏移+1
            elemOffset++;
        }
        OutQue.EnQue(outLocal);
        inputQue.FreeTensor(inputLocal);
    }
    __aicore__ inline void CopyOut(int iterIdx) {
        auto outLocal = OutQue.DeQue<T>();
        DataCopy(outGm[elemPerIter * iterIdx], outLocal, curElem);
        OutQue.FreeTensor(outLocal);
    }

  private:
    const static int BUFFER_NUM = 1;
    GlobalTensor<T> inputGm, otherGm, outGm;
    TPipe* pipe;
    uint16_t coreIdx, MAX_BLOCK_PER_ITER, nDim;
    uint32_t nElem, nIter, elemPerIter, tailElem, curElem;
    const uint32_t *shapeInput, *shapeOther;
    uint64_t elemOffset;
    uint64_t dimSuffix[MAX_DIM], otherDimSuffix[MAX_DIM];
    TQue<QuePosition::VECOUT, BUFFER_NUM> OutQue;
    TQue<QuePosition::VECIN, BUFFER_NUM> inputQue;
};

/*------------------------------------------------------
 *
 *  只有一个维度需要广播，向量搬运，标量计算
 *
 *------------------------------------------------------*/
template <typename T> class LcmOpBroadcastOne {
  public:
    __aicore__ inline LcmOpBroadcastOne() {}
    __aicore__ inline void Init(GM_ADDR input, GM_ADDR other, GM_ADDR out, uint32_t nBatchPerCore, uint16_t nAcores, uint16_t nBcores,
                                uint32_t broadCastDim, uint32_t batchDataSize, TPipe* pipeIn) {
        this->pipe = pipeIn;
        this->coreIdx = GetBlockIdx();
        this->nBatch = nBatchPerCore;
        this->broadCastDim = broadCastDim;
        this->batchDataSize = batchDataSize;
        this->alignBatchDataSize = (batchDataSize + (32 / sizeof(T)) - 1) / (32 / sizeof(T)) * (32 / sizeof(T)); //对齐到32字节
        this->curOtherId = -1;
        this->flag = false;
        this->nPerCopy = 4;
        uint32_t totalBatch = nBatchPerCore * nBcores + (nBatchPerCore + 1) * nAcores;
        if (coreIdx < nAcores) {
            nBatch = nBatchPerCore + 1; //大核
            startBatch = coreIdx * (nBatchPerCore + 1);
            endBatch = startBatch + nBatch;
        } else {
            nBatch = nBatchPerCore; //小核
            startBatch = nAcores * (nBatchPerCore + 1) + (coreIdx - nAcores) * nBatchPerCore;
            endBatch = startBatch + nBatch;
        }
        inputGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)input, totalBatch * batchDataSize);
        otherGm.SetGlobalBuffer((__gm__ DTYPE_OTHER*)other, totalBatch / broadCastDim * batchDataSize);
        outGm.SetGlobalBuffer((__gm__ DTYPE_OUT*)out, totalBatch * batchDataSize);

        pipe->InitBuffer(OutQue, BUFFER_NUM, alignBatchDataSize * sizeof(T) * nPerCopy);
        pipe->InitBuffer(inputQue, BUFFER_NUM, alignBatchDataSize * sizeof(T) * nPerCopy);
        pipe->InitBuffer(otherQue, BUFFER_NUM, alignBatchDataSize * sizeof(T));
        int alignFloatSize = (batchDataSize * nPerCopy + (256 / sizeof(float)) - 1) / (256 / sizeof(float)) * (256 / sizeof(float));
        pipe->InitBuffer(tmpABuf, alignFloatSize * sizeof(float));
        pipe->InitBuffer(tmpBBuf, alignFloatSize * sizeof(float));
        pipe->InitBuffer(resBuf, alignFloatSize * sizeof(float));
        pipe->InitBuffer(modBuf, alignFloatSize * sizeof(float));
        pipe->InitBuffer(cmpResBuf, (alignFloatSize + 7) / 8);
    }
    __aicore__ inline void Process() {
        for (int batchIdx = startBatch; batchIdx < endBatch;) {
            uint32_t otherBatchId = batchIdx / broadCastDim;
            uint32_t nSameOther = (otherBatchId + 1) * broadCastDim - batchIdx; //当前batch与other的关系
            nSameOther = min(endBatch - batchIdx, nSameOther);                  //不能超过endBatch
            CopyInOther(otherBatchId);
            auto otherLocal = otherQue.DeQue<DTYPE_INPUT>();
            int i = 0;
            if (otherBatchId == 0)
                printFlag = true;
            else
                printFlag = false;
            for (i = 0; i < nSameOther; i += nPerCopy) {
                int nCopies = nPerCopy < (nSameOther - i) ? nPerCopy : (nSameOther - i);
                CopyIn(batchIdx + i, nCopies);
                auto inputLocal = inputQue.DeQue<DTYPE_INPUT>();
                Stein(inputLocal, otherLocal, nCopies);
                ComputeGcdVec(inputLocal, otherLocal, nCopies);
                inputQue.EnQue(inputLocal);
                inputLocal = inputQue.DeQue<DTYPE_INPUT>();
                ComputeLcm(inputLocal, otherLocal, nCopies);
                CopyOut(batchIdx + i, nCopies);
                inputQue.FreeTensor(inputLocal);
            }
            batchIdx += nSameOther; //更新batchIdx
            otherQue.FreeTensor(otherLocal);
        }
    }
    __aicore__ inline void CopyInOther(int batchIdx) {
        auto otherLocal = otherQue.AllocTensor<DTYPE_INPUT>();
        DataCopyExtParams copyParams{1, (uint32_t)(batchDataSize * sizeof(T)), 0, 0, 0};
        DataCopyPadExtParams<DTYPE_INPUT> padParams{false, 0, 0, 0};
        DataCopyPad(otherLocal, otherGm[batchIdx * batchDataSize], copyParams, padParams);
        otherQue.EnQue(otherLocal);
    }
    __aicore__ inline void CopyIn(int batchIdx, int n2Copy) {
        auto inputLocal = inputQue.AllocTensor<DTYPE_INPUT>();
        DataCopyExtParams copyParams{1, (uint32_t)(batchDataSize * sizeof(T) * n2Copy), 0, 0, 0};
        DataCopyPadExtParams<DTYPE_INPUT> padParams{false, 0, 0, 0};
        DataCopyPad(inputLocal, inputGm[batchIdx * batchDataSize], copyParams, padParams);
        inputQue.EnQue(inputLocal);
    }
    __aicore__ inline void Stein(LocalTensor<DTYPE_INPUT> inputLocal, LocalTensor<DTYPE_INPUT> otherLocal, int nCopies) {
        auto aLocal = tmpABuf.Get<float>();
        auto bLocal = tmpBBuf.Get<float>();
        T o1, o2, o3, o4;
        T a1, a2, a3, a4;
        T b1, b2, b3, b4;
        int k = 0;
        for (; k + 4 <= batchDataSize; k += 4) {
            o1 = abs(otherLocal.GetValue(k));
            o2 = abs(otherLocal.GetValue(k + 1));
            o3 = abs(otherLocal.GetValue(k + 2));
            o4 = abs(otherLocal.GetValue(k + 3));
            for (int j = 0; j < nCopies; j++) {
                int offset = j * batchDataSize + k;
                a1 = abs(inputLocal.GetValue(offset));
                a2 = abs(inputLocal.GetValue(offset + 1));
                a3 = abs(inputLocal.GetValue(offset + 2));
                a4 = abs(inputLocal.GetValue(offset + 3));
                b1 = o1;
                b2 = o2;
                b3 = o3;
                b4 = o4;
                stein(a1, b1);
                stein(a2, b2);
                stein(a3, b3);
                stein(a4, b4);
                aLocal.SetValue(offset, (float)(a1));
                aLocal.SetValue(offset + 1, (float)(a2));
                aLocal.SetValue(offset + 2, (float)(a3));
                aLocal.SetValue(offset + 3, (float)(a4));
                bLocal.SetValue(offset, (float)(b1));
                bLocal.SetValue(offset + 1, (float)(b2));
                bLocal.SetValue(offset + 2, (float)(b3));
                bLocal.SetValue(offset + 3, (float)(b4));
            }
        }
        for (; k < batchDataSize; k++) {
            o1 = abs(otherLocal.GetValue(k));
            for (int j = 0; j < nCopies; j++) {
                int offset = j * batchDataSize + k;
                a1 = abs(inputLocal.GetValue(offset));
                b1 = o1;
                stein(a1, b1);
                aLocal.SetValue(offset, (float)(a1));
                bLocal.SetValue(offset, (float)(b1));
            }
        }
    }
    __aicore__ inline void stein(T& a, T& b) {
        b >>= ScalarGetSFFValue<1>(b);
        while (b) {
            a >>= ScalarGetSFFValue<1>(a);
            if (b > a) {
                a ^= b ^= a ^= b;
            }
            if (a < (1 << 24)) {
                break;
            }
            a -= b;
        };
    }
    __aicore__ inline void ComputeGcdVec(LocalTensor<DTYPE_INPUT> inputLocal, LocalTensor<DTYPE_INPUT> otherLocal, int nCopies) {
        auto aLocal = tmpABuf.Get<float>();
        auto bLocal = tmpBBuf.Get<float>();
        auto cmpRes = cmpResBuf.Get<uint8_t>();
        auto modRes = modBuf.Get<float>();
        auto resTmp = resBuf.Get<float>();
        int curElem = nCopies * batchDataSize;
        int curElem4CMP = static_cast<int>((curElem + (256 / sizeof(float)) - 1) / (256 / sizeof(float)) * (256 / sizeof(float)));
        for (int i = 0; i < 24; i++) {
            CompareScalar(cmpRes, bLocal, (float)(0), AscendC::CMPMODE::EQ, curElem4CMP);
            Select(resTmp, cmpRes, aLocal, resTmp, AscendC::SELMODE::VSEL_TENSOR_TENSOR_MODE, curElem4CMP);
            Fmod(modRes, aLocal, bLocal, curElem4CMP);
            auto tmpPtr = aLocal;
            aLocal = bLocal;
            bLocal = modRes;
            modRes = tmpPtr;
        }
    }
    __aicore__ inline T lcm(T input, T other, float gcd) {
        T firstZ = ScalarGetSFFValue<1>(input | other);
        input = input >> firstZ;
        T gcdRes = static_cast<T>(gcd);
        return abs((input / gcdRes) * other);
    }
    __aicore__ inline void ComputeLcm(LocalTensor<DTYPE_INPUT> inputLocal, LocalTensor<DTYPE_INPUT> otherLocal, int nCopies) {
        auto outLocal = OutQue.AllocTensor<T>();
        auto resTmp = resBuf.Get<float>();
        uint32_t k = 0;
        for (; k + 4 <= batchDataSize; k += 4) {
            T o1 = abs(otherLocal.GetValue(k));
            T o2 = abs(otherLocal.GetValue(k + 1));
            T o3 = abs(otherLocal.GetValue(k + 2));
            T o4 = abs(otherLocal.GetValue(k + 3));
            for (uint32_t j = 0; j < nCopies; j++) {
                uint32_t offset = j * batchDataSize + k;
                T i1 = inputLocal.GetValue(offset);
                T i2 = inputLocal.GetValue(offset + 1);
                T i3 = inputLocal.GetValue(offset + 2);
                T i4 = inputLocal.GetValue(offset + 3);
                float r1 = resTmp.GetValue(offset);
                float r2 = resTmp.GetValue(offset + 1);
                float r3 = resTmp.GetValue(offset + 2);
                float r4 = resTmp.GetValue(offset + 3);
                T out1 = lcm(i1, o1, r1);
                T out2 = lcm(i2, o2, r2);
                T out3 = lcm(i3, o3, r3);
                T out4 = lcm(i4, o4, r4);
                outLocal.SetValue(offset, out1);
                outLocal.SetValue(offset + 1, out2);
                outLocal.SetValue(offset + 2, out3);
                outLocal.SetValue(offset + 3, out4);
            }
        }
        for (; k < batchDataSize; k++) {
            T o1 = abs(otherLocal.GetValue(k));
            for (int j = 0; j < nCopies; j++) {
                int offset = j * batchDataSize + k;
                T i1 = inputLocal.GetValue(offset);
                float r1 = resTmp.GetValue(offset);
                T out1 = lcm(i1, o1, r1);
                outLocal.SetValue(offset, out1);
            }
        }
        OutQue.EnQue(outLocal);
    }

    __aicore__ inline void CopyOut(int batchIdx, int nCopies) {
        auto outLocal = OutQue.DeQue<T>();
        DataCopyExtParams copyParams{1, (uint32_t)(batchDataSize * nCopies * sizeof(T)), 0, 0, 0};
        DataCopyPad(outGm[batchDataSize * batchIdx], outLocal, copyParams);
        OutQue.FreeTensor(outLocal);
    }

  private:
    const static int BUFFER_NUM = 1;
    int nPerCopy;
    bool printFlag;
    GlobalTensor<T> inputGm, otherGm, outGm;
    TPipe* pipe;
    uint16_t coreIdx, flag;
    uint32_t curOtherId;
    uint32_t nBatch, broadCastDim, batchDataSize, startBatch, endBatch, alignBatchDataSize;
    TBuf<QuePosition::VECCALC> tmpABuf, tmpBBuf, cmpResBuf, resBuf, modBuf;
    TQue<QuePosition::VECOUT, BUFFER_NUM> OutQue;
    TQue<QuePosition::VECIN, BUFFER_NUM> inputQue;
    TQue<QuePosition::VECIN, BUFFER_NUM> otherQue;
};

/*------------------------------------------------------
 *
 *  不需要广播，可以对所有输入、输出采用DataCopy数据搬运，标量计算
 *
 *------------------------------------------------------*/

template <typename T> class LcmOp {
  public:
    __aicore__ inline LcmOp() {}
    __aicore__ inline void Init(GM_ADDR input, GM_ADDR other, GM_ADDR out, uint32_t blockPerCore, uint16_t nAcores, uint16_t nBcores,
                                uint16_t maxBlockPerIter, TPipe* pipeIn) {
        this->pipe = pipeIn;
        this->coreIdx = GetBlockIdx();
        this->MAX_BLOCK_PER_ITER = maxBlockPerIter;
        uint32_t elemPerBlock = BLOCK_SIZE / sizeof(T);
        uint32_t inputGmOffset = coreIdx * (blockPerCore + 1) * elemPerBlock; //全局偏移 单位是元素数量
        elemPerIter = elemPerBlock * MAX_BLOCK_PER_ITER;                      //每次迭代最多处理的元素数量
        if (coreIdx < nAcores) {                                              //大核
            this->nElem = (blockPerCore + 1) * elemPerBlock;
            nIter = (blockPerCore + 1 + MAX_BLOCK_PER_ITER - 1) / MAX_BLOCK_PER_ITER;
            tailElem = ((blockPerCore + 1) % MAX_BLOCK_PER_ITER) * elemPerBlock;
        } else {
            inputGmOffset = inputGmOffset - (this->coreIdx - nAcores) * elemPerBlock;
            this->nElem = blockPerCore * elemPerBlock;
            nIter = (blockPerCore + MAX_BLOCK_PER_ITER - 1) / MAX_BLOCK_PER_ITER;
            tailElem = (blockPerCore % MAX_BLOCK_PER_ITER) * elemPerBlock;
        }

        inputGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)input + inputGmOffset, nElem);
        otherGm.SetGlobalBuffer((__gm__ DTYPE_OTHER*)other + inputGmOffset, nElem);
        outGm.SetGlobalBuffer((__gm__ DTYPE_OUT*)out + inputGmOffset, nElem);

        pipe->InitBuffer(outQue, BUFFER_NUM, elemPerIter * sizeof(T));
        pipe->InitBuffer(otherQue, BUFFER_NUM, elemPerIter * sizeof(T));
        pipe->InitBuffer(inputQue, BUFFER_NUM, elemPerIter * sizeof(T));
    }

    __aicore__ inline void Process() {
        //一次计算
        if (nElem < elemPerIter) {
            elemPerIter = nElem;
            curElem = nElem;
            CopyIn(0);
            Compute(0);
            CopyOut(0);
        } else {
            //多次迭代
            curElem = elemPerIter;
            for (int i = 0; i < nIter - 1; i++) {
                CopyIn(i);
                Compute(i);
                CopyOut(i);
            }
            if (tailElem > 0) {
                curElem = tailElem;
            }
            CopyIn(nIter - 1);
            Compute(nIter - 1);
            CopyOut(nIter - 1);
        }
    }
    __aicore__ inline void CopyIn(int iterIdx) {
        auto inputLocal = inputQue.AllocTensor<T>();
        auto otherLocal = otherQue.AllocTensor<T>();
        DataCopy(inputLocal, inputGm[elemPerIter * iterIdx], curElem);
        DataCopy(otherLocal, otherGm[elemPerIter * iterIdx], curElem);
        inputQue.EnQue(inputLocal);
        otherQue.EnQue(otherLocal);
    }
    __aicore__ inline void Compute(int iterIdx) {
        auto outLocal = outQue.AllocTensor<T>();
        auto inputLocal = inputQue.DeQue<T>();
        auto otherLocal = otherQue.DeQue<T>();
        int i = 0;
        for (i = 0; i < curElem; i++) {
            T a = inputLocal.GetValue(i);
            T b = otherLocal.GetValue(i);
            T absa = a > 0 ? a : -a;
            T absb = b > 0 ? b : -b;
            T res = (absa / gcd(absa, absb)) * absb;
            if constexpr (!std::is_same_v<T, std::int8_t>) {
                if (res < 0)
                    res = -res;
            }
            outLocal.SetValue(i, res);
        }
        outQue.EnQue(outLocal);
        inputQue.FreeTensor(inputLocal);
        otherQue.FreeTensor(otherLocal);
    }
    __aicore__ inline T gcd(T a, T b) {
        while (b != 0) {
            T remainder = a % b;
            a = b;
            b = remainder;
        }
        return a;
    }
    __aicore__ inline void CopyOut(int iterIdx) {
        auto outLocal = outQue.DeQue<T>();
        DataCopy(outGm[elemPerIter * iterIdx], outLocal, curElem);
        outQue.FreeTensor(outLocal);
    }

  private:
    const static int BUFFER_NUM = 1;
    GlobalTensor<T> inputGm, otherGm, outGm;
    TPipe* pipe;
    uint16_t coreIdx, MAX_BLOCK_PER_ITER;
    uint32_t nElem, nIter, elemPerIter, tailElem, curElem;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQue;
    TQue<QuePosition::VECIN, BUFFER_NUM> inputQue;
    TQue<QuePosition::VECIN, BUFFER_NUM> otherQue;
};

/*------------------------------------------------------
 *
 *  不需要广播，可以对所有输入、输出采用DataCopy数据搬运，向量计算
 *
 *------------------------------------------------------*/

class LcmOpVec32 {
  public:
    __aicore__ inline LcmOpVec32() {}
    __aicore__ inline void Init(GM_ADDR input, GM_ADDR other, GM_ADDR out, uint32_t blockPerCore, uint16_t nAcores, uint16_t nBcores,
                                uint16_t maxBlockPerIter, TPipe* pipeIn) {
        this->pipe = pipeIn;
        this->coreIdx = GetBlockIdx();
        this->MAX_BLOCK_PER_ITER = maxBlockPerIter;
        this->CMP_ELEM_ALIGN = 256 / sizeof(int32_t);
        uint32_t elemPerBlock = BLOCK_SIZE / sizeof(int32_t);
        uint32_t inputGmOffset = coreIdx * (blockPerCore + 1) * elemPerBlock; //全局偏移 单位是元素数量
        elemPerIter = elemPerBlock * MAX_BLOCK_PER_ITER;                      //每次迭代最多处理的元素数量
        if (coreIdx < nAcores) {                                              //大核
            this->nElem = (blockPerCore + 1) * elemPerBlock;
            nIter = (blockPerCore + 1 + MAX_BLOCK_PER_ITER - 1) / MAX_BLOCK_PER_ITER;
            tailElem = ((blockPerCore + 1) % MAX_BLOCK_PER_ITER) * elemPerBlock;
        } else {
            inputGmOffset = inputGmOffset - (this->coreIdx - nAcores) * elemPerBlock;
            this->nElem = blockPerCore * elemPerBlock;
            nIter = (blockPerCore + MAX_BLOCK_PER_ITER - 1) / MAX_BLOCK_PER_ITER;
            tailElem = (blockPerCore % MAX_BLOCK_PER_ITER) * elemPerBlock;
        }

        inputGm.SetGlobalBuffer((__gm__ int32_t*)input + inputGmOffset, nElem);
        otherGm.SetGlobalBuffer((__gm__ int32_t*)other + inputGmOffset, nElem);
        outGm.SetGlobalBuffer((__gm__ int32_t*)out + inputGmOffset, nElem);
        // Compare操作需要256byte对齐，需要对TensorBuffer对齐
        uint32_t BUFFER_LEN = ((elemPerIter + CMP_ELEM_ALIGN - 1) / CMP_ELEM_ALIGN) * CMP_ELEM_ALIGN;
        pipe->InitBuffer(outQue, BUFFER_NUM, BUFFER_LEN * sizeof(int32_t));
        pipe->InitBuffer(otherQue, BUFFER_NUM, BUFFER_LEN * sizeof(int32_t));
        pipe->InitBuffer(inputQue, BUFFER_NUM, BUFFER_LEN * sizeof(int32_t));
        pipe->InitBuffer(bitBuf, (BUFFER_LEN / 8) * sizeof(uint8_t));
        pipe->InitBuffer(floatInputBuf, BUFFER_LEN * sizeof(float));
        pipe->InitBuffer(floatOtherBuf, BUFFER_LEN * sizeof(float));
        pipe->InitBuffer(floatOutBuf, BUFFER_LEN * sizeof(float));
        pipe->InitBuffer(modBuf, BUFFER_LEN * sizeof(float));
    }

    __aicore__ inline void Process() {
        // 一次计算
        if (nElem < elemPerIter) {
            elemPerIter = nElem;
            curElem = nElem;
            curCmpElem = ((curElem + CMP_ELEM_ALIGN - 1) / CMP_ELEM_ALIGN) * CMP_ELEM_ALIGN;
            CopyIn(0);
            Compute(0);
            CopyOut(0);
        } else {
            // 多次迭代
            curElem = elemPerIter;
            curCmpElem = ((curElem + CMP_ELEM_ALIGN - 1) / CMP_ELEM_ALIGN) * CMP_ELEM_ALIGN;
            for (int i = 0; i < nIter - 1; i++) {
                CopyIn(i);
                Compute(i);
                CopyOut(i);
            }
            if (tailElem > 0) {
                curElem = tailElem;
                curCmpElem = ((tailElem + CMP_ELEM_ALIGN - 1) / CMP_ELEM_ALIGN) * CMP_ELEM_ALIGN;
            }
            CopyIn(nIter - 1);
            Compute(nIter - 1);
            CopyOut(nIter - 1);
        }
    }
    __aicore__ inline void CopyIn(int iterIdx) {
        auto inputLocal = inputQue.AllocTensor<int32_t>();
        auto otherLocal = otherQue.AllocTensor<int32_t>();
        DataCopy(inputLocal, inputGm[elemPerIter * iterIdx], curElem);
        DataCopy(otherLocal, otherGm[elemPerIter * iterIdx], curElem);
        inputQue.EnQue(inputLocal);
        otherQue.EnQue(otherLocal);
    }
    __aicore__ inline void Compute(int iterIdx) {
        auto outLocal = outQue.AllocTensor<int32_t>();
        auto inputLocal = inputQue.DeQue<int32_t>();
        auto otherLocal = otherQue.DeQue<int32_t>();
        auto cmpRes = bitBuf.AllocTensor<uint8_t>();
        auto inputFloat = floatInputBuf.AllocTensor<float>();
        auto otherFloat = floatOtherBuf.AllocTensor<float>();
        auto outFloat = floatOutBuf.AllocTensor<float>();
        auto modRes = modBuf.AllocTensor<float>();
        //将数据cast到float进行计算
        Cast(inputFloat, inputLocal, RoundMode::CAST_NONE, curElem);
        Cast(otherFloat, otherLocal, RoundMode::CAST_NONE, curElem);
        Abs(inputFloat, inputFloat, curElem);
        Abs(otherFloat, otherFloat, curElem);
        Cast(otherLocal, otherFloat, RoundMode::CAST_RINT, curElem);
        //计算gcd
        for (int i = 0; i < 20; i++) {
            CompareScalar(cmpRes, otherFloat, (float)0, AscendC::CMPMODE::EQ, curCmpElem);
            Select(outFloat, cmpRes, inputFloat, outFloat, AscendC::SELMODE::VSEL_TENSOR_TENSOR_MODE, curCmpElem);
            Fmod(modRes, inputFloat, otherFloat, curCmpElem);
            auto tmpPtr = inputFloat;
            inputFloat = otherFloat;
            otherFloat = modRes;
            modRes = tmpPtr;
        }
        Cast(inputFloat, inputLocal, RoundMode::CAST_NONE, curElem);
        Abs(inputFloat, inputFloat, curElem);
        Div(inputFloat, inputFloat, outFloat, curElem);
        Cast(outLocal, inputFloat, RoundMode::CAST_RINT, curElem);
        Mul(outLocal, otherLocal, outLocal, curElem);
        outQue.EnQue(outLocal);
        inputQue.FreeTensor(inputLocal);
        otherQue.FreeTensor(otherLocal);
    }
    __aicore__ inline int32_t gcd(int32_t a, int32_t b) {
        while (b != 0) {
            int32_t remainder = a % b;
            a = b;
            b = remainder;
        }
        return a;
    }
    __aicore__ inline void CopyOut(int iterIdx) {
        auto outLocal = outQue.DeQue<int32_t>();
        DataCopy(outGm[elemPerIter * iterIdx], outLocal, curElem);
        outQue.FreeTensor(outLocal);
    }

  private:
    const static int BUFFER_NUM = 1;
    GlobalTensor<int32_t> inputGm, otherGm, outGm;
    TPipe* pipe;
    uint16_t coreIdx, MAX_BLOCK_PER_ITER, CMP_ELEM_ALIGN;
    uint32_t nElem, nIter, elemPerIter, tailElem, curElem, curCmpElem;
    TBuf<QuePosition::VECCALC> bitBuf;
    TBuf<QuePosition::VECCALC> floatInputBuf;
    TBuf<QuePosition::VECCALC> floatOtherBuf;
    TBuf<QuePosition::VECCALC> floatOutBuf;
    TBuf<QuePosition::VECCALC> modBuf;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQue;
    TQue<QuePosition::VECIN, BUFFER_NUM> inputQue;
    TQue<QuePosition::VECIN, BUFFER_NUM> otherQue;
};

extern "C" __global__ __aicore__ void lcm(GM_ADDR input, GM_ADDR other, GM_ADDR out, GM_ADDR workspace, GM_ADDR tiling) {
    TPipe pipe;
    if (TILING_KEY_IS(0)) { //需要广播且广播维度大于1
        GET_TILING_DATA(tiling_data, tiling);
        LcmOpBroadcast<DTYPE_INPUT> op;
        op.Init(input, other, out, tiling_data.blockPerCore, tiling_data.nAcores, tiling_data.nBcores, tiling_data.shapeInput, tiling_data.shapeOther,
                tiling_data.nDim, tiling_data.maxBlockPerIter, &pipe);
        op.Process();
    } else if (TILING_KEY_IS(1)) {
        GET_TILING_DATA_WITH_STRUCT(LcmTilingDataBroadSpec, tiling_data, tiling);
        LcmOpBroadcastOne<DTYPE_INPUT> op;
        op.Init(input, other, out, tiling_data.nBatchPerCore, tiling_data.nAcores, tiling_data.nBcores, tiling_data.broadcastDim,
                tiling_data.batchDataSize, &pipe);
        op.Process();
    } else if (TILING_KEY_IS(2)) {
        GET_TILING_DATA_WITH_STRUCT(LcmTilingDataVec, tiling_data, tiling);
        LcmOp<DTYPE_INPUT> op;
        op.Init(input, other, out, tiling_data.blockPerCore, tiling_data.nAcores, tiling_data.nBcores, tiling_data.maxBlockPerIter, &pipe);
        op.Process();
    } else if (TILING_KEY_IS(3)) {
        GET_TILING_DATA_WITH_STRUCT(LcmTilingDataVec, tiling_data, tiling);
        LcmOpVec32 op;
        op.Init(input, other, out, tiling_data.blockPerCore, tiling_data.nAcores, tiling_data.nBcores, tiling_data.maxBlockPerIter, &pipe);
        op.Process();
    }
}