#ifndef LIGHTNING_ATTENTION_H
#define LIGHTNING_ATTENTION_H

#include "kernel_operator.h"
#include "include/utils/status_utils.h"
#include "./utils/sync_utils.h"
#include "./utils/constant_values.h"

using namespace Atcos;
using namespace Atcos::Gemm;

struct LightningAttentionProblemShape {
    int32_t batchSize;
    int32_t numHeads;
    int32_t seqLen;
    int32_t headDim;
    int32_t blockSize;
};

static constexpr uint16_t INTRA_BMM1     = 0x01;
static constexpr uint16_t INTER_BMM      = 0x02;
static constexpr uint16_t KV_CAST        = 0x03;
static constexpr uint16_t INTRA_BMM1_EPI = 0x04;
static constexpr uint16_t INTRA_BMM2     = 0x05;
static constexpr uint16_t UPDATE_BMM     = 0x06;

struct SyncOp {
    using SyncIntraBmm1     = CrossCoreSync<INTRA_BMM1,     ((pipe_t)10)>;
    using SyncInterBmm      = CrossCoreSync<INTER_BMM,      ((pipe_t)10)>;
    using SyncIntraBmm2     = CrossCoreSync<INTRA_BMM2,     ((pipe_t)10)>;
    using SyncUpdateBmm     = CrossCoreSync<UPDATE_BMM,     ((pipe_t)10)>;
    using SyncIntraBmm1Epi  = CrossCoreSync<INTRA_BMM1_EPI, PIPE_MTE3>;
    using SyncKvCast        = CrossCoreSync<KV_CAST,        PIPE_MTE3>;
};

template<
    class ProblemShape_,
    class Intra1Mmad_, class Intra1Epilogue_,
    class InterMmad_,
    class Intra2Mmad_, class Intra2Epilogue_,
    class UpdateMmad_, class UpdateEpilogue_,

    typename Enable_=void
>
class LightningAttention {
    static_assert(
        AscendC::Std::always_false_v<ProblemShape_>,
        "Lightning Attention Only Supports LightningAttentionProblemShape as ProblemShape"
    );
};

template<
    class ProblemShape_,
    class Intra1Mmad_, class Intra1Epilogue_,
    class InterMmad_,
    class Intra2Mmad_, class Intra2Epilogue_,
    class UpdateMmad_, class UpdateEpilogue_
>
class LightningAttention<
    ProblemShape_,
    Intra1Mmad_, Intra1Epilogue_,
    InterMmad_,
    Intra2Mmad_, Intra2Epilogue_,
    UpdateMmad_, UpdateEpilogue_,

    AscendC::Std::enable_if_t<
        AscendC::Std::is_same_v<ProblemShape_, LightningAttentionProblemShape>
    >
> {
public:
    using InputType = typename Intra1Mmad_::AType::T;

    using ProblemShape = ProblemShape_;

    using Intra1Mmad        = Intra1Mmad_;
    using Intra1Epilogue    = Intra1Epilogue_;
    using InterMmad         = InterMmad_;
    using Intra2Mmad        = Intra2Mmad_;
    using Intra2Epilogue    = Intra2Epilogue_;
    using UpdateMmad        = UpdateMmad_;
    using UpdateEpilogue    = UpdateEpilogue_;

    using Intra1EpiArguments    = typename Intra1Epilogue::Arguments;
    using Intra1EpiParams       = typename Intra1Epilogue::Params;

    using Intra2EpiArguments    = typename Intra2Epilogue::Arguments;
    using Intra2EpiParams       = typename Intra2Epilogue::Params;

    using UpdateEpiArguments    = typename UpdateEpilogue::Arguments;
    using UpdateEpiParams       = typename UpdateEpilogue::Params;

    struct LightningAttentionAddrArguments {
        GM_ADDR q;
        GM_ADDR k;
        GM_ADDR v;
        GM_ADDR s;
        GM_ADDR w;

        GM_ADDR kv;
        GM_ADDR o;
    };

    struct LightningAttentionAddrParams {
        GM_ADDR q;
        GM_ADDR k;
        GM_ADDR v;
        GM_ADDR s;
        GM_ADDR w;

        int64_t intraBmm1ResOffset;
        int64_t interBmmResOffset;
        int64_t intraBmm2ResOffset;
        int64_t updateBmmResOffset;
        int64_t maskOffset;
        int64_t lambdaOffset;
        int64_t lambdaInverseOffset;
        int64_t intraMulCastOffset;
        int64_t updateMulCastOffset;
        int64_t kvCastOffset;

        GM_ADDR kv;
        GM_ADDR o;
    };

    struct Arguments {
        ProblemShape problemShape;
        LightningAttentionAddrArguments addrsArguments;
    };

    struct Params {
        ProblemShape problemShape;
        LightningAttentionAddrParams addrParams;

        Intra1EpiParams intra1EpiParams;
        Intra2EpiParams intra2EpiParams;
        UpdateEpiParams updateEpiParams;
    };

    Intra1Mmad intraBmm1_;
    Intra1Epilogue intraBmm1Epilogue_;

    InterMmad interBmm_;

    Intra2Mmad intraBmm2_;
    Intra2Epilogue intraBmm2Epilogue_;

    UpdateMmad updateBmm_;
    UpdateEpilogue updateBmmEpilogue_;
private:
    AscendC::GlobalTensor<InputType> qGm_;
    AscendC::GlobalTensor<InputType> kGm_;
    AscendC::GlobalTensor<InputType> vGm_;
    AscendC::GlobalTensor<InputType> attentionOutGm_;

    AscendC::GlobalTensor<float> maskGm_;
    AscendC::GlobalTensor<float> lambdaGm_;
    AscendC::GlobalTensor<float> lambdaInverseGm_;
    AscendC::GlobalTensor<float> intraBmm1ResGm_;
    AscendC::GlobalTensor<float> interBmmResGm_;
    AscendC::GlobalTensor<float> intraBmm2ResGm_;
    AscendC::GlobalTensor<float> updateBmmResGm_;
    AscendC::GlobalTensor<float> kvGm_;

    AscendC::GlobalTensor<InputType> intraMulCastGm_;
    AscendC::GlobalTensor<InputType> updateMulCastGm_;
    AscendC::GlobalTensor<InputType> kvCastGm_;

    AscendC::TBuf<> ubTBufDecays_[2];
    AscendC::TBuf<> ubTBufUpdateMul_[3];
    AscendC::TBuf<> ubTBufKVCast_[2];

    AscendC::TBuf<> ubTBufEpis_[3];

    float decay_{0};
    float lnDecay_{0};

    struct BlockShapeInfo {
        int32_t d{0};
        int32_t B{0};
        int32_t Bxd{0};
        int32_t BxB{0};
        int32_t dxd{0};
    };
    BlockShapeInfo blockInfo_;

    struct HardEventVec {
        SEvent<AscendC::HardEvent::MTE2_V> mte2ToV;
        SEvent<AscendC::HardEvent::V_MTE3> vToMTE3;
        SEvent<AscendC::HardEvent::MTE3_V> mte3ToV;
        SEvent<AscendC::HardEvent::MTE3_MTE2> mte3ToMTE2;
        SEvent<AscendC::HardEvent::V_MTE2> vToMTE2;
    };
    HardEventVec syncs;

    struct VecCoreSplitArgs {
        int32_t dataCountForComputing;
        int32_t dataCountOfComputed;
        int32_t offset;
    };

    int64_t bytesPerCore{0};
    int32_t vecSubIndex{-1};
    constexpr static int32_t policyOverTwoVectorThreshold = UB_SIZE / sizeof(float) / 3 * 2;
    constexpr static int32_t policyOverOneVectorThreshold = UB_SIZE / sizeof(float) / 3;
    constexpr static int32_t dataCountForOnceProcessing = UB_SIZE / sizeof(float) / 3;
public:
    __host_aicore__ static size_t GetBytesPerCore(ProblemShape shape) {
        int64_t B = shape.blockSize;
        int64_t d = shape.headDim;

        int64_t bytesPerCore = 
            sizeof(float) * (B * B * 2 + B * d * 4 + d * d * 1) +
            sizeof(InputType) * (B * B + B * d + d * d);
        return bytesPerCore;
    }

    __host_aicore__ static Status CanImplement(Arguments& args) {
        int32_t B = args.problemShape.blockSize;
        int32_t d = args.problemShape.headDim;
        int32_t s = args.problemShape.seqLen;

        if (
            B == 256 && 
            (d == 8 || d == 16 || d == 32 || d == 64 || d == 128) && 
            ((s % 256) == 0 && s >= 256)
        ) {
            return Status::success;
        } else {
            // Wrong enum type
            return Status::tileShapeErrorExceedsLimit;
        }
    }

    __host_aicore__ static size_t GetWorkspaceSize(ProblemShape shape, int64_t blockNum) {
        return LightningAttention::GetBytesPerCore(shape) * blockNum;
    }

    __host_aicore__ static Params InitParams(Arguments const& args, GM_ADDR workspace) {
        GM_ADDR w = workspace;

        int64_t B = args.problemShape.blockSize;
        int64_t d = args.problemShape.headDim;
        int64_t BxB = B * B;
        int64_t Bxd = B * d;
        int64_t dxd = d * d;

        int64_t offset = 0;
        constexpr int64_t WORKSPACE_ADDR_ALIGN = 512;

        int64_t intraBmm1ResOffset = offset;
        offset += CeilAlign(BxB, WORKSPACE_ADDR_ALIGN);
        int64_t interBmmResOffset = offset;
        offset += CeilAlign(Bxd, WORKSPACE_ADDR_ALIGN);
        int64_t intraBmm2ResOffset = offset;
        offset += CeilAlign(Bxd, WORKSPACE_ADDR_ALIGN);
        int64_t updateBmmResOffset = offset;
        offset += CeilAlign(dxd, WORKSPACE_ADDR_ALIGN);
        int64_t maskOffset = offset;
        offset += CeilAlign(BxB, WORKSPACE_ADDR_ALIGN);
        int64_t lambdaOffset = offset;
        offset += CeilAlign(Bxd, WORKSPACE_ADDR_ALIGN);
        int64_t lambdaInverseOffset = offset;
        offset += CeilAlign(Bxd, WORKSPACE_ADDR_ALIGN);
        
        offset *= 2;
        int64_t intraMulCastOffset = offset;
        offset += CeilAlign(BxB, WORKSPACE_ADDR_ALIGN);
        int64_t updateMulCastOffset = offset;
        offset += CeilAlign(Bxd, WORKSPACE_ADDR_ALIGN);
        int64_t kvCastOffset = offset;

        return {
            args.problemShape,
            {
                args.addrsArguments.q,
                args.addrsArguments.k,
                args.addrsArguments.v,
                args.addrsArguments.s,
                args.addrsArguments.w,

                intraBmm1ResOffset,
                interBmmResOffset,
                intraBmm2ResOffset,
                updateBmmResOffset,
                maskOffset,
                lambdaOffset,
                lambdaInverseOffset,
                intraMulCastOffset,
                updateMulCastOffset,
                kvCastOffset,

                args.addrsArguments.kv,
                args.addrsArguments.o
            },
            {  },
            {  },
            {  },
        };
    }

    static int64_t GetBlockNum(ProblemShape shape) {
        return shape.batchSize * shape.numHeads;
    }

    __aicore__ inline void operator()(Params const& params) {
        Run(params);
    }

private:
    __aicore__ inline void Run(Params const& params) {
        Init(params);
        int32_t s = params.problemShape.seqLen;
        int32_t blockNum = s / blockInfo_.B;

        for (int32_t blockIdx = 0; blockIdx < blockNum; blockIdx++) {
            int32_t offset = blockIdx * blockInfo_.Bxd;
            if ASCEND_IS_AIC {
                intraBmm1_.RunWithOutputToGm(
                    intraBmm1ResGm_, qGm_[offset], kGm_[offset],
                    blockInfo_.B, blockInfo_.B, blockInfo_.d
                );

                SyncOp::SyncIntraBmm1::Set();
                SyncOp::SyncKvCast::Wait();

                interBmm_.RunWithOutputToGm(
                    interBmmResGm_, qGm_[offset], kvCastGm_,
                    blockInfo_.B, blockInfo_.d, blockInfo_.d
                );

                SyncOp::SyncInterBmm::Set();
                SyncOp::SyncIntraBmm1Epi::Wait();

                intraBmm2_.RunWithOutputToGm(
                    intraBmm2ResGm_, intraMulCastGm_, vGm_[offset],
                    blockInfo_.B, blockInfo_.d, blockInfo_.B
                );
                SyncOp::SyncIntraBmm2::Set();

                updateBmm_.RunWithOutputToGm(
                    updateBmmResGm_, updateMulCastGm_, vGm_[offset],
                    blockInfo_.d, blockInfo_.d, blockInfo_.B
                );
                SyncOp::SyncUpdateBmm::Set();
            } else {
                kvCast();
                SyncOp::SyncKvCast::Set();

                updateMul(updateMulCastGm_, kGm_[offset], lambdaInverseGm_);

                SyncOp::SyncIntraBmm1::Wait();
                DoIntraBmm1Epi();
                SyncOp::SyncIntraBmm1Epi::Set();

                SyncOp::SyncInterBmm::Wait();
                DoIntraBmm2Epi<SyncOp::SyncIntraBmm2>(offset);

                DoUpdateBmmEpi<SyncOp::SyncUpdateBmm>();
            }
        }
    }

    __aicore__ inline void Init(Params const& params) {
        int32_t B = params.problemShape.blockSize;
        int32_t d = params.problemShape.headDim;
        int32_t s = params.problemShape.seqLen;
        const auto& addrs = params.addrParams;
        int32_t blockIdx = AscendC::GetBlockIdx();

        if ASCEND_IS_AIV {
            vecSubIndex = AscendC::GetSubBlockIdx();
            blockIdx /= 2;
        }

        decay_ = *(reinterpret_cast<__gm__ float*>(addrs.s) + (blockIdx % params.problemShape.numHeads));

        bytesPerCore = GetBytesPerCore(params.problemShape);
        qGm_.SetGlobalBuffer((__gm__ InputType*)addrs.q             + blockIdx * d * s);
        kGm_.SetGlobalBuffer((__gm__ InputType*)addrs.k             + blockIdx * d * s);
        vGm_.SetGlobalBuffer((__gm__ InputType*)addrs.v             + blockIdx * d * s);
        kvGm_.SetGlobalBuffer((__gm__ float*)addrs.kv               + blockIdx * d * d);
        attentionOutGm_.SetGlobalBuffer((__gm__ InputType*)addrs.o  + blockIdx * d * s);

        int64_t wOffset = bytesPerCore * blockIdx;
        int64_t wOffsetFloat = wOffset / sizeof(float);
        int64_t wOffsetInputType = wOffset / sizeof(InputType);
        GM_ADDR w = addrs.w;

        intraBmm1ResGm_.SetGlobalBuffer((__gm__ float*)w + (addrs.intraBmm1ResOffset + wOffsetFloat));
        interBmmResGm_.SetGlobalBuffer((__gm__ float*)w + (addrs.interBmmResOffset + wOffsetFloat));
        intraBmm2ResGm_.SetGlobalBuffer((__gm__ float*)w + (addrs.intraBmm2ResOffset + wOffsetFloat));
        updateBmmResGm_.SetGlobalBuffer((__gm__ float*)w + (addrs.updateBmmResOffset + wOffsetFloat));
        maskGm_.SetGlobalBuffer((__gm__ float*)w + (addrs.maskOffset + wOffsetFloat));
        lambdaGm_.SetGlobalBuffer((__gm__ float*)w + (addrs.lambdaOffset + wOffsetFloat));
        lambdaInverseGm_.SetGlobalBuffer((__gm__ float*)w + (addrs.lambdaInverseOffset + wOffsetFloat));

        intraMulCastGm_.SetGlobalBuffer((__gm__ InputType*)w + (addrs.intraMulCastOffset + wOffsetInputType));
        updateMulCastGm_.SetGlobalBuffer((__gm__ InputType*)w + (addrs.updateMulCastOffset + wOffsetInputType));
        kvCastGm_.SetGlobalBuffer((__gm__ InputType*)w + (addrs.kvCastOffset + wOffsetInputType));

        blockInfo_.d = d;
        blockInfo_.B = B;
        blockInfo_.Bxd = B * d;
        blockInfo_.BxB = B * B;
        blockInfo_.dxd = d * d;
        
        if ASCEND_IS_AIC {
            intraBmm1_.Init(nullptr);
            interBmm_.Init(nullptr);
            intraBmm2_.Init(nullptr);
            updateBmm_.Init(nullptr);
        } else {
            intraBmm1Epilogue_.Init(params.intra1EpiParams);
            intraBmm2Epilogue_.Init(params.intra2EpiParams);
            updateBmmEpilogue_.Init(params.updateEpiParams);

            AscendC::TPipe* pipe = GetTPipePtr();
            pipe->Reset();
            pipe->InitBuffer(ubTBufDecays_[0], UB_SIZE / 3 * 2);
            pipe->InitBuffer(ubTBufDecays_[1], UB_SIZE / 3);
            pipe->Reset();
            pipe->InitBuffer(ubTBufUpdateMul_[0], UB_SIZE / 3);
            pipe->InitBuffer(ubTBufUpdateMul_[1], UB_SIZE / 3);
            pipe->InitBuffer(ubTBufUpdateMul_[2], UB_SIZE / 3);
            pipe->Reset();
            pipe->InitBuffer(ubTBufKVCast_[0], sizeof(float) * blockInfo_.dxd);
            pipe->InitBuffer(ubTBufKVCast_[1], sizeof(InputType) * blockInfo_.dxd);
            pipe->Reset();
            pipe->InitBuffer(ubTBufEpis_[0], UB_SIZE / 3);
            pipe->InitBuffer(ubTBufEpis_[1], UB_SIZE / 3);
            pipe->InitBuffer(ubTBufEpis_[2], UB_SIZE / 3);
            pipe->Reset();
        }
        InitDecays();
    }


    __aicore__ inline void DoIntraBmm1Epi() {
        int32_t dataCount = blockInfo_.BxB;
        AscendC::LocalTensor<float> input = ubTBufEpis_[0].template Get<float>();
        AscendC::LocalTensor<float> param = ubTBufEpis_[1].template Get<float>();
        AscendC::LocalTensor<InputType> output = ubTBufEpis_[2].template Get<InputType>();
        for (int32_t dataCountOfComputed = 0; dataCountOfComputed < dataCount; ) {
            auto policy = QueryTwoVecPolicy(dataCountOfComputed, dataCount, dataCountOfComputed); // vec0 and vec1 will get different policy
            if (policy.dataCountForComputing != 0) {
                AscendC::DataCopy(input, intraBmm1ResGm_[policy.offset], policy.dataCountForComputing);
                AscendC::DataCopy(param, maskGm_[policy.offset], policy.dataCountForComputing);
                syncs.mte2ToV();
                intraBmm1Epilogue_.Run(output, input, param, policy.dataCountForComputing);
                syncs.vToMTE3();
                AscendC::DataCopy(intraMulCastGm_[policy.offset], output, policy.dataCountForComputing);
                syncs.mte3ToMTE2();
            }

            dataCountOfComputed = policy.dataCountOfComputed;
        }
    }

    template<class SyncOpInstance>
    __aicore__ inline void DoIntraBmm2Epi(int32_t outBaseOffset) {
        int32_t dataCount = blockInfo_.Bxd;
        AscendC::LocalTensor<float> input = ubTBufEpis_[0].template Get<float>();
        AscendC::LocalTensor<float> param = ubTBufEpis_[1].template Get<float>();
        AscendC::LocalTensor<InputType> output = ubTBufEpis_[2].template Get<InputType>();
        for (int32_t dataCountOfComputed = 0; dataCountOfComputed < dataCount; ) {
            auto policy = QueryTwoVecPolicy(dataCountOfComputed, dataCount, dataCountOfComputed); // vec0 and vec1 will get different policy
            if (policy.dataCountForComputing != 0) {
                AscendC::DataCopy(input, interBmmResGm_[policy.offset], policy.dataCountForComputing);
                AscendC::DataCopy(param, lambdaGm_[policy.offset], policy.dataCountForComputing);
                syncs.mte2ToV();
                intraBmm2Epilogue_.RunMultiply(input, input, param, policy.dataCountForComputing);
                
                if (dataCountOfComputed == 0) {
                    SyncOpInstance::Wait();
                }
                
                syncs.vToMTE2();
                AscendC::DataCopy(param, intraBmm2ResGm_[policy.offset], policy.dataCountForComputing);
                syncs.mte2ToV();
                intraBmm2Epilogue_.RunAdd(output, input, param, policy.dataCountForComputing);
                syncs.vToMTE3();
                AscendC::DataCopy(attentionOutGm_[outBaseOffset + policy.offset], output, policy.dataCountForComputing);
                syncs.mte3ToMTE2();
            } else {
                if (dataCountOfComputed == 0) {
                    SyncOpInstance::Wait();
                    break;
                }
            }

            dataCountOfComputed = policy.dataCountOfComputed;
        }
    }

    template<class SyncOpInstance>
    __aicore__ inline void DoUpdateBmmEpi() {
        int32_t dataCount = blockInfo_.dxd;
        AscendC::LocalTensor<float> input = ubTBufEpis_[0].template Get<float>();
        AscendC::LocalTensor<float> param = ubTBufEpis_[1].template Get<float>();
        AscendC::LocalTensor<float> output = ubTBufEpis_[2].template Get<float>();
        for (int32_t dataCountOfComputed = 0; dataCountOfComputed < dataCount; ) {
            auto policy = QueryTwoVecPolicy(dataCountOfComputed, dataCount, dataCountOfComputed); // vec0 and vec1 will get different policy
            if (policy.dataCountForComputing != 0) {
                AscendC::DataCopy(input, kvGm_[policy.offset], policy.dataCountForComputing);
                syncs.mte2ToV();
                updateBmmEpilogue_.RunMultiplyScalar(input, input, decay_, policy.dataCountForComputing);
                
                if (dataCountOfComputed == 0) {
                    SyncOpInstance::Wait();
                }
                
                AscendC::DataCopy(param, updateBmmResGm_[policy.offset], policy.dataCountForComputing);
                syncs.mte2ToV();
                updateBmmEpilogue_.RunAdd(output, input, param, policy.dataCountForComputing);
                syncs.vToMTE3();
                AscendC::DataCopy(kvGm_[policy.offset], output, policy.dataCountForComputing);
                syncs.mte3ToMTE2();
            } else {
                if (dataCountOfComputed == 0) {
                    SyncOpInstance::Wait();
                    break;
                }
            }

            dataCountOfComputed = policy.dataCountOfComputed;
        }
    }

    __aicore__ inline VecCoreSplitArgs QueryTwoVecPolicy (int32_t computed, int32_t dataCount, int32_t offset) {
        int32_t dataCountToCompute = dataCount - computed;
        if (dataCountToCompute > policyOverTwoVectorThreshold) {
            return {
                dataCountForOnceProcessing,
                computed + 2 * dataCountForOnceProcessing,
                offset + vecSubIndex * dataCountForOnceProcessing
            };
        } else if (dataCountToCompute > policyOverOneVectorThreshold) {
            int32_t temp = dataCountToCompute >> 1;
            return {
                temp,
                dataCount,
                offset + vecSubIndex * temp
            };
        } else {
            return {
                vecSubIndex == 0 ? dataCountToCompute : 0,
                dataCount,
                offset 
            };
        }
    }

    __aicore__ inline void InitDecays() {
        if ASCEND_IS_AIC {
            return;
        }

        auto vecSubIndex = AscendC::GetSubBlockIdx();

        constexpr int32_t localEleCount = UB_SIZE / 3 / sizeof(float);
        AscendC::LocalTensor<float> mLocal = ubTBufDecays_[0].template Get<float>();
        AscendC::LocalTensor<float> lliLocal = ubTBufDecays_[1].template Get<float>();

        AscendC::TPipe* pipe = GetTPipePtr();
        AscendC::TEventID vToS = pipe->FetchEventID(AscendC::HardEvent::V_S);
        AscendC::TEventID sToV = pipe->FetchEventID(AscendC::HardEvent::S_V);
        AscendC::TQueSync<PIPE_S, PIPE_V> syncSV;
        AscendC::TQueSync<PIPE_V, PIPE_S> syncVS;

        lnDecay_ = decay_;
        decay_ = -lnDecay_ * blockInfo_.B;
        lliLocal.SetValue(0, decay_);
        syncSV.SetFlag(sToV);
        syncSV.WaitFlag(sToV);

        AscendC::Exp(lliLocal, lliLocal, 1);

        syncVS.SetFlag(vToS);
        syncVS.WaitFlag(vToS);
        decay_ = lliLocal.GetValue(0);

        int32_t mEleCount = blockInfo_.BxB;
        int32_t copyTimesForM = mEleCount / localEleCount;
        int32_t halfCopyTimesForM = copyTimesForM / 2;
        int32_t rowsCountOfEachCopyM = localEleCount / blockInfo_.B;

        for (
            int32_t copyIndex = vecSubIndex * halfCopyTimesForM;
            copyIndex < halfCopyTimesForM + halfCopyTimesForM * vecSubIndex;
            copyIndex++
        ) {
            InitMDecays(copyIndex, localEleCount, rowsCountOfEachCopyM, mLocal);
        }

        // assume that head_dim is always less equal than 128
        int32_t lliRowsCount = blockInfo_.B;
        int32_t halfLLiRowsCount = lliRowsCount / 2;
        int32_t lliEleCount = blockInfo_.Bxd;
        int32_t halfLLiEleCount = lliEleCount / 2;

        InitLambdaDecays(vecSubIndex, halfLLiEleCount, halfLLiRowsCount, lliLocal);
        InitLambdaInverseDecays(vecSubIndex, halfLLiEleCount, halfLLiRowsCount, lliLocal);
    }

    __aicore__ inline void InitMDecays(
        int32_t copyIndex,
        int32_t localEleCount,
        int32_t rowsCountOfEachCopyM,
        AscendC::LocalTensor<float>& mLocal
    ) {
        constexpr uint32_t tmp = 0xFF800000;
        const float negInf = *((float *)(&tmp));

        AscendC::PipeBarrier<PIPE_V>();
        AscendC::Duplicate<float>(mLocal, negInf, rowsCountOfEachCopyM * blockInfo_.B);
        AscendC::PipeBarrier<PIPE_V>();

        for (int32_t localRowIndex = 0; localRowIndex < rowsCountOfEachCopyM; localRowIndex++) {
            int32_t mRowIndex = copyIndex * rowsCountOfEachCopyM + localRowIndex;
            AscendC::CreateVecIndex<float>(mLocal[localRowIndex * blockInfo_.B], -mRowIndex, mRowIndex + 1);
        }
        AscendC::PipeBarrier<PIPE_V>();
        AscendC::Muls(mLocal, mLocal, lnDecay_, localEleCount);
        AscendC::PipeBarrier<PIPE_V>();
        AscendC::Exp(mLocal, mLocal, localEleCount);
        AscendC::PipeBarrier<PIPE_V>();

        syncs.vToMTE3.SetFlag();
        syncs.vToMTE3.WaitFlag();
        AscendC::DataCopy(maskGm_[localEleCount * copyIndex], mLocal, localEleCount);
        syncs.mte3ToV.SetFlag();
        syncs.mte3ToV.WaitFlag();
    }

    __aicore__ inline void InitLambdaDecays(
        int32_t copyIndex,
        int32_t localEleCount,
        int32_t rowsCountOfEachCopyLLi,
        AscendC::LocalTensor<float>& lLocal
    ) {
        for (int32_t localRowIndex = 0; localRowIndex < rowsCountOfEachCopyLLi; localRowIndex++) {
            int32_t lambdaRowIndex = copyIndex * rowsCountOfEachCopyLLi + localRowIndex;
            AscendC::Duplicate<float>(lLocal[localRowIndex * blockInfo_.d], -lambdaRowIndex - 1, blockInfo_.d);
        }

        AscendC::PipeBarrier<PIPE_V>();
        AscendC::Muls(lLocal, lLocal, lnDecay_, localEleCount);
        AscendC::PipeBarrier<PIPE_V>();
        AscendC::Exp(lLocal, lLocal, localEleCount);
        AscendC::PipeBarrier<PIPE_V>();

        syncs.vToMTE3.SetFlag();
        syncs.vToMTE3.WaitFlag();
        AscendC::DataCopy(lambdaGm_[localEleCount * copyIndex], lLocal, localEleCount);
        syncs.mte3ToV.SetFlag();
        syncs.mte3ToV.WaitFlag();
    }

    __aicore__ inline void InitLambdaInverseDecays(
        int32_t copyIndex,
        int32_t localEleCount,
        int32_t rowsCountOfEachCopyLLi,
        AscendC::LocalTensor<float>& liLocal
    ) {
        for (int32_t localRowIndex = 0; localRowIndex < rowsCountOfEachCopyLLi; localRowIndex++) {
            int32_t liRowIndex = copyIndex * rowsCountOfEachCopyLLi + localRowIndex;
            AscendC::Duplicate<float>(liLocal[localRowIndex * blockInfo_.d], -(blockInfo_.B - liRowIndex - 1), blockInfo_.d);
        }

        AscendC::PipeBarrier<PIPE_V>();
        AscendC::Muls(liLocal, liLocal, lnDecay_, localEleCount);
        AscendC::PipeBarrier<PIPE_V>();
        AscendC::Exp(liLocal, liLocal, localEleCount);
        AscendC::PipeBarrier<PIPE_V>();

        syncs.vToMTE3.SetFlag();
        syncs.vToMTE3.WaitFlag();
        AscendC::DataCopy(lambdaInverseGm_[localEleCount * copyIndex], liLocal, localEleCount);
        syncs.mte3ToV.SetFlag();
        syncs.mte3ToV.WaitFlag();
    }

    __aicore__ inline void kvCast() {
        SEvent<AscendC::HardEvent::MTE2_V> mte2ToV;
        SEvent<AscendC::HardEvent::V_MTE3> vToMTE3;
        SEvent<AscendC::HardEvent::MTE3_MTE2> mte3ToMTE2;

        AscendC::LocalTensor<float> inputLocal = ubTBufKVCast_[0].template Get<float>();
        AscendC::LocalTensor<InputType> outputLocal = ubTBufKVCast_[1].template Get<InputType>();

        AscendC::DataCopy(inputLocal, kvGm_, blockInfo_.dxd);
        mte2ToV.SetFlag();
        mte2ToV.WaitFlag();
        AscendC::PipeBarrier<PIPE_V>();

        AscendC::Cast(outputLocal, inputLocal, AscendC::RoundMode::CAST_ROUND, blockInfo_.dxd);
        AscendC::PipeBarrier<PIPE_V>();
        vToMTE3.SetFlag();
        vToMTE3.WaitFlag();

        AscendC::DataCopy(kvCastGm_, outputLocal, blockInfo_.dxd);
        mte3ToMTE2.SetFlag();
        mte3ToMTE2.WaitFlag();
    }

    __aicore__ inline void updateMul(
        const AscendC::GlobalTensor<InputType>& outputGm_,
        const AscendC::GlobalTensor<InputType>& inputGm_,
        const AscendC::GlobalTensor<float>& paraGm_
    ) {
        int32_t vecIdx = AscendC::GetSubBlockIdx();
        AscendC::LocalTensor<float> paraLocal = ubTBufUpdateMul_[0].template Get<float>();
        AscendC::LocalTensor<InputType> inputLocal = ubTBufUpdateMul_[1].template Get<InputType>();
        AscendC::LocalTensor<float> resLocal = ubTBufUpdateMul_[2].template Get<float>();

        constexpr int32_t chunkSize = UB_SIZE / 3 / sizeof(float);
        int32_t numChunk = blockInfo_.Bxd / chunkSize;

        int32_t chunkCountPerVec = numChunk / 2;
        SEvent<AscendC::HardEvent::MTE2_V> mte2ToV;
        SEvent<AscendC::HardEvent::V_MTE3> vToMTE3;
        SEvent<AscendC::HardEvent::MTE3_MTE2> mte3ToMTE2;

        for (int32_t i = 0; i < chunkCountPerVec; i++) {
            int32_t gmOffset = i * chunkSize + vecIdx * chunkCountPerVec * chunkSize;

            AscendC::DataCopy(inputLocal, inputGm_[gmOffset], chunkSize);
            mte2ToV.SetFlag();
            mte2ToV.WaitFlag();

            AscendC::PipeBarrier<PIPE_V>();
            AscendC::Cast(resLocal, inputLocal, AscendC::RoundMode::CAST_NONE, chunkSize);
            AscendC::PipeBarrier<PIPE_V>();

            AscendC::DataCopy(paraLocal, paraGm_[gmOffset], chunkSize);
            mte2ToV.SetFlag();
            mte2ToV.WaitFlag();

            AscendC::Mul(resLocal, paraLocal, resLocal, chunkSize);
            AscendC::PipeBarrier<PIPE_V>();
            AscendC::Cast(inputLocal, resLocal, AscendC::RoundMode::CAST_ROUND, chunkSize);
            AscendC::PipeBarrier<PIPE_V>();

            vToMTE3.SetFlag();
            vToMTE3.WaitFlag();
            AscendC::DataCopy(outputGm_[gmOffset], inputLocal, chunkSize);
            mte3ToMTE2.SetFlag();
            mte3ToMTE2.WaitFlag();
        }
    }
};


#endif // !LIGHTNING_ATTENTION_H