/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

        http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
        limitations under the License.
==============================================================================*/
#ifndef HSTU_DENSE_FORWARD_JAGGED_KERNEL_FUXI_H
#define HSTU_DENSE_FORWARD_JAGGED_KERNEL_FUXI_H


#include "hstu_dense_kernel_patten_bsnd_fuxi.h"

using namespace AscendC;

namespace HstuDenseForwardFuxi {

struct JaggedTaskArgs {
    uint32_t batchId = 0;           // 该基本块所属的batch
    uint32_t headId = 0;            // 该基本块所属的head
    uint32_t qSeqId = 0;            // 该基本块所属Query 输入的第几个seq block 一个block是256条seq
    uint32_t kSeqId = 0;            // 该基本块所属Key 输入的第几个seq block 一个block是256条seq
    uint32_t actualSeqLen = 0;      // 该基本块实际的序列长度
    uint32_t kSeqNum = 0;           // 该基本块在K轴需要乘多少次
    uint32_t causalMask = 0;        // 该基本块是否需要做causal 掩码
    uint32_t transTaskId = 0;       // 该基本块转置任务的id
    uint32_t computeASeqLen = 0;    // 该基本块matmul计算左矩阵的序列长度
    uint32_t computeBSeqLen = 0;    // 该基本块matmul计算右矩阵的序列长度
    float scale = 0.0f;             // 该基本块的siluScale
    int64_t seqGlobalOffset = 0;    // 该基本块的全局序列偏移
    int64_t batchOffset = 0;        // 该基本块的batch偏移
    int64_t headSeqLimit = 0;       // 该基本块的head offset最大长度, 超过则需要考虑切换head_id
    int64_t kvOffset = 0;           // 该基本块的key value计算偏移
    int64_t ioOffset = 0;           // 该基本块的query attenOutput计算偏移
};

template <typename qType>
class HstuDenseForwardJaggedKernelFuxi : public HstuDenseKernelPattenBsndFuxi<qType> {
public:
    __aicore__ inline HstuDenseForwardJaggedKernelFuxi() {}

    __aicore__ inline void Compute(const HstuDenseForwardFuxiTilingData *__restrict tilingDataPtr);

    __aicore__ inline void ComputeAllBlock();

private:
    __aicore__ inline int PreInit(const HstuDenseForwardFuxiTilingData *__restrict tilingDataPtr);

    __aicore__ inline void GetTaskInfo(uint32_t sBlkId);

    __aicore__ inline void UpdateTaskInfo(uint32_t taskId);

    __aicore__ inline void FillTaskInfo(uint32_t batchId, uint32_t head_id, int64_t seqGlobalOffset, uint32_t taskId);

    __aicore__ inline void ComputeQkMatmul(uint32_t taskId);

    __aicore__ inline void ComputeVecScore(uint32_t taskId);

    __aicore__ inline void ComputeSvMatmul(uint32_t taskId);

    __aicore__ inline void ComputeTvMatmul(uint32_t taskId);

    __aicore__ inline void ComputePvMatmul(uint32_t taskId);

    __aicore__ inline void TransResult(uint32_t transtaskId);

    __aicore__ inline void ComputeBiasMask(uint32_t taskId);

    __aicore__ inline void ComputeCurrentTask(uint32_t currentTaskId);

    uint32_t seqOffsets[MAX_BATCH_SIZE + 1];
    uint32_t sBlkId {0};
    uint32_t eBlkId {0};
    uint32_t maxSeqLen {0};

    JaggedTaskArgs computeTaskInfo[COMPUTE_PIPE_NUM];
    JaggedTaskArgs trasnTaskInfo[TRANS_PIPE_NUM];
};

template <typename qType>
__aicore__ inline void
HstuDenseForwardJaggedKernelFuxi<qType>::Compute(const HstuDenseForwardFuxiTilingData *__restrict tilingDataPtr)
{
    int ret = PreInit(tilingDataPtr);
    if (ret == -1) {
        return; // no task
    }
    ComputeAllBlock();
}

template <typename qType>
__aicore__ inline void HstuDenseForwardJaggedKernelFuxi<qType>::ComputeSvMatmul(uint32_t taskId)
{
    uint8_t isAtomic = (computeTaskInfo[taskId].kSeqId != 0);

    this->DoSvMatmulImpl(computeTaskInfo[taskId].kvOffset, taskId, computeTaskInfo[taskId].transTaskId, isAtomic,
                         computeTaskInfo[taskId].computeASeqLen, this->headDim, computeTaskInfo[taskId].computeBSeqLen);
}

template <typename qType>
__aicore__ inline void HstuDenseForwardJaggedKernelFuxi<qType>::ComputeTvMatmul(uint32_t taskId)
{
    uint8_t isAtomic = (computeTaskInfo[taskId].kSeqId != 0);

    this->DoTvMatmulImpl(computeTaskInfo[taskId].kvOffset, taskId, computeTaskInfo[taskId].transTaskId, isAtomic,
                         computeTaskInfo[taskId].computeASeqLen, this->headDim, computeTaskInfo[taskId].computeBSeqLen);
}

template <typename qType>
__aicore__ inline void HstuDenseForwardJaggedKernelFuxi<qType>::ComputePvMatmul(uint32_t taskId)
{
    uint8_t isAtomic = (computeTaskInfo[taskId].kSeqId != 0);

    this->DoPvMatmulImpl(computeTaskInfo[taskId].kvOffset, taskId, computeTaskInfo[taskId].transTaskId, isAtomic,
                         computeTaskInfo[taskId].computeASeqLen, this->headDim, computeTaskInfo[taskId].computeBSeqLen);
}

template <typename qType>
__aicore__ inline void HstuDenseForwardJaggedKernelFuxi<qType>::ComputeQkMatmul(uint32_t taskId)
{
    this->DoQkMatmulImpl(computeTaskInfo[taskId].ioOffset, computeTaskInfo[taskId].kvOffset, taskId,
                         computeTaskInfo[taskId].computeASeqLen, computeTaskInfo[taskId].computeBSeqLen, this->headDim);
}


template <typename qType>
__aicore__ inline void HstuDenseForwardJaggedKernelFuxi<qType>::ComputeVecScore(uint32_t taskId)
{
    int64_t biasOffset = computeTaskInfo[taskId].batchId * this->headNum * this->maxSeqLen * this->maxSeqLen + \
        computeTaskInfo[taskId].headId * this->maxSeqLen * this->maxSeqLen + \
        computeTaskInfo[taskId].qSeqId * this->maxSeqLen * this->blockHeight + \
        computeTaskInfo[taskId].kSeqId * this->blockHeight;

    int64_t maskOffset = biasOffset;

    this->VecScoreImpl(taskId, biasOffset, maskOffset, computeTaskInfo[taskId].scale,
                       computeTaskInfo[taskId].causalMask, computeTaskInfo[taskId].computeASeqLen,
                       computeTaskInfo[taskId].computeBSeqLen);
}

template <typename qType>
__aicore__ inline void HstuDenseForwardJaggedKernelFuxi<qType>::ComputeBiasMask(uint32_t taskId)
{
    int64_t positionOffset = computeTaskInfo[taskId].qSeqId * this->maxSeqLen * this->blockHeight + \
        computeTaskInfo[taskId].kSeqId * this->blockHeight;

    int64_t timestampOffset = positionOffset + computeTaskInfo[taskId].batchId * this->maxSeqLen * this->maxSeqLen;
    
    int64_t maskOffset = positionOffset +
        computeTaskInfo[taskId].batchId * this->headNum * this->maxSeqLen * this->maxSeqLen +
        computeTaskInfo[taskId].headId * this->maxSeqLen * this->maxSeqLen;

    this->BiasMaskImpl(taskId, timestampOffset, positionOffset, maskOffset,
        computeTaskInfo[taskId].causalMask, computeTaskInfo[taskId].computeASeqLen,
        computeTaskInfo[taskId].computeBSeqLen);
}

template <typename qType>
__aicore__ inline void HstuDenseForwardJaggedKernelFuxi<qType>::TransResult(uint32_t transtaskId)
{
    int64_t outStartOffset = trasnTaskInfo[transtaskId].ioOffset;
    if (this->enableBias) {
        outStartOffset = trasnTaskInfo[transtaskId].batchOffset * OUTPUT_DIM2_TIMES3 * this->headDim * this->headNum +
            trasnTaskInfo[transtaskId].qSeqId * OUTPUT_DIM2_TIMES3 * this->blockHeight * this->headNum * this->headDim +
            trasnTaskInfo[transtaskId].headId * this->headDim;
    }
    this->DoTransResultImpl(transtaskId, outStartOffset, trasnTaskInfo[transtaskId].computeASeqLen);
}

template <typename qType>
__aicore__ inline void HstuDenseForwardJaggedKernelFuxi<qType>::ComputeCurrentTask(uint32_t currentTaskId)
{
    this->ComputeVecScore(currentTaskId);
    if (this->enableBias) {
        this->ComputeBiasMask(currentTaskId);
    }
    pipe_barrier(PIPE_ALL);

    this->ComputeSvMatmul(currentTaskId);
    this->WaitSvMatmul();

    if (this->enableBias) {
        this->ComputeTvMatmul(currentTaskId);
        this->WaitTvMatmul();

        this->ComputePvMatmul(currentTaskId);
        this->WaitPvMatmul();
    }
}

template <typename qType>
__aicore__ inline void HstuDenseForwardJaggedKernelFuxi<qType>::ComputeAllBlock()
{
    GetTaskInfo(this->sBlkId);

    uint32_t taskId = 0;
    uint32_t transtaskId = 0;

    uint32_t currentTaskId = 0;
    uint32_t preTaskId = 0;
    uint32_t prePreTaskId = 0;
    uint32_t nextTaskId = 0;

    for (auto blkId = sBlkId; blkId < eBlkId; blkId++) {
        auto kSeqNum = computeTaskInfo[taskId % COMPUTE_PIPE_NUM].kSeqNum;
        for (auto kSeqId = 0; kSeqId < kSeqNum; kSeqId++) {
            uint32_t causalMask = 0;

            if ((this->maskType == CausalMaskT::MASK_TRIL) &&
                kSeqId > computeTaskInfo[taskId % COMPUTE_PIPE_NUM].qSeqId) {
                continue;
            }

            currentTaskId = taskId % COMPUTE_PIPE_NUM;
            preTaskId = (taskId - 1) % COMPUTE_PIPE_NUM;
            prePreTaskId = (taskId - 2) % COMPUTE_PIPE_NUM;
            nextTaskId = (taskId + 1) % COMPUTE_PIPE_NUM;

            if ((this->maskType == CausalMaskT::MASK_TRIL) &&
                kSeqId == computeTaskInfo[currentTaskId].qSeqId) {
                causalMask = 1;
            }

            this->computeTaskInfo[currentTaskId].transTaskId = transtaskId % TRANS_PIPE_NUM;
            this->computeTaskInfo[currentTaskId].causalMask = causalMask;
            this->computeTaskInfo[currentTaskId].kSeqId = kSeqId;
            this->computeTaskInfo[currentTaskId].computeBSeqLen =
                (kSeqId != (kSeqNum - 1)) ?
                    (this->blockHeight) :
                    (this->computeTaskInfo[currentTaskId].actualSeqLen - kSeqId * this->blockHeight);
            this->computeTaskInfo[currentTaskId].kvOffset = \
                this->computeTaskInfo[currentTaskId].batchOffset * this->headDim * this->headNum + \
                this->computeTaskInfo[currentTaskId].kSeqId * this->blockHeight * this->headNum * this->headDim + \
                this->computeTaskInfo[currentTaskId].headId * this->headDim;

            // matmul qk
            this->ComputeQkMatmul(currentTaskId);

            // matmul sv
            if (taskId > 1) {
                this->ComputeSvMatmul(prePreTaskId);
                if (this->enableBias) {
                    this->ComputeTvMatmul(prePreTaskId);
                    this->ComputePvMatmul(prePreTaskId);
                }
            }

            // VecScore
            if (taskId > 0) {
                this->ComputeVecScore(preTaskId);
                if (this->enableBias) {
                    this->ComputeBiasMask(preTaskId);
                }
            }

            // wait qk
            this->WaitQkMatmul();
            
            // wait sv
            if (taskId > 1) {
                this->WaitSvMatmul();
                if (this->enableBias) {
                    this->WaitTvMatmul();
                    this->WaitPvMatmul();
                }
            }

            computeTaskInfo[nextTaskId] = computeTaskInfo[currentTaskId];
            taskId++;
        }

        this->trasnTaskInfo[transtaskId % TRANS_PIPE_NUM] = this->computeTaskInfo[currentTaskId];
        if (transtaskId > 1) {
            this->TransResult((transtaskId - 2) % TRANS_PIPE_NUM);
        }
        transtaskId++;

        this->UpdateTaskInfo(taskId % COMPUTE_PIPE_NUM);
    }

    if (taskId == 0) {
        return;
    }

    if (taskId == 1) {
        ComputeCurrentTask(currentTaskId);

        this->TransResult((transtaskId - 1) % TRANS_PIPE_NUM);
        return;
    }

    if (transtaskId == 1) {
        this->ComputeSvMatmul(preTaskId);
        this->WaitSvMatmul();
        if (this->enableBias) {
            this->ComputeTvMatmul(preTaskId);
            this->WaitTvMatmul();

            this->ComputePvMatmul(preTaskId);
            this->WaitPvMatmul();
        }

        ComputeCurrentTask(currentTaskId);
        this->TransResult((transtaskId - 1) % TRANS_PIPE_NUM);
        return;
    }

    this->ComputeSvMatmul(preTaskId);
    this->WaitSvMatmul();
    if (this->enableBias) {
        this->ComputeTvMatmul(preTaskId);
        this->WaitTvMatmul();

        this->ComputePvMatmul(preTaskId);
        this->WaitPvMatmul();
    }

    ComputeCurrentTask(currentTaskId);

    this->TransResult((transtaskId - 2) % TRANS_PIPE_NUM);
    this->TransResult((transtaskId - 1) % TRANS_PIPE_NUM);
}

template <typename qType>
__aicore__ inline void HstuDenseForwardJaggedKernelFuxi<qType>::FillTaskInfo(uint32_t batchId, uint32_t headId,
                                                                         int64_t seqGlobalOffset, uint32_t taskId)
{
    if (batchId >= this->batchSize) {
        return;
    }

    taskId = taskId % COMPUTE_PIPE_NUM;

    auto nextBatchSeqOffset = this->seqOffsets[batchId + 1];
    auto currentBatchSeqOffset = this->seqOffsets[batchId];

    computeTaskInfo[taskId].seqGlobalOffset = seqGlobalOffset;
    computeTaskInfo[taskId].batchId = batchId;
    computeTaskInfo[taskId].actualSeqLen = nextBatchSeqOffset - currentBatchSeqOffset;
    computeTaskInfo[taskId].scale = this->siluScale;
    computeTaskInfo[taskId].batchOffset = currentBatchSeqOffset;
    computeTaskInfo[taskId].headSeqLimit =
        computeTaskInfo[taskId].batchOffset * this->headNum + computeTaskInfo[taskId].actualSeqLen * (headId + 1);

    auto batchInnerOffset = seqGlobalOffset - (computeTaskInfo[taskId].batchOffset * this->headNum);
    computeTaskInfo[taskId].headId = headId;
    computeTaskInfo[taskId].qSeqId =
        (batchInnerOffset - computeTaskInfo[taskId].headId * computeTaskInfo[taskId].actualSeqLen) / this->blockHeight;
    computeTaskInfo[taskId].kSeqNum =
        (computeTaskInfo[taskId].actualSeqLen + this->blockHeight - 1) / this->blockHeight;

    computeTaskInfo[taskId].ioOffset =
        computeTaskInfo[taskId].batchOffset * this->headDim * this->headNum + \
        computeTaskInfo[taskId].qSeqId * this->blockHeight * this->headNum * this->headDim + \
        computeTaskInfo[taskId].headId * this->headDim;

    if ((computeTaskInfo[taskId].headSeqLimit - seqGlobalOffset) >= this->blockHeight) {
        computeTaskInfo[taskId].computeASeqLen = this->blockHeight;
    } else {
        computeTaskInfo[taskId].computeASeqLen = computeTaskInfo[taskId].headSeqLimit - seqGlobalOffset;
    }
}

template <typename qType>
__aicore__ inline void HstuDenseForwardJaggedKernelFuxi<qType>::UpdateTaskInfo(uint32_t taskId)
{
    auto batchId = computeTaskInfo[taskId].batchId;
    auto headId = computeTaskInfo[taskId].headId;

    int64_t seqGlobalOffset = computeTaskInfo[taskId].seqGlobalOffset;
    int64_t gap = computeTaskInfo[taskId].headSeqLimit - seqGlobalOffset;

    if (gap <= this->blockHeight) {
        headId++;
        if (headId >= this->headNum) {
            batchId++;
        }

        if (batchId >= this->batchSize) {
            return;
        }

        seqGlobalOffset = seqGlobalOffset + gap;
        headId = headId % this->headNum;
        this->FillTaskInfo(batchId, headId, seqGlobalOffset, taskId);
    } else {
        computeTaskInfo[taskId].seqGlobalOffset = seqGlobalOffset + this->blockHeight;

        uint32_t computeASeqLen = this->blockHeight;
        if ((computeTaskInfo[taskId].seqGlobalOffset + this->blockHeight) > computeTaskInfo[taskId].headSeqLimit) {
            computeASeqLen = computeTaskInfo[taskId].headSeqLimit - computeTaskInfo[taskId].seqGlobalOffset;
        }

        auto batchInnerOffset =
            computeTaskInfo[taskId].seqGlobalOffset - (computeTaskInfo[taskId].batchOffset * this->headNum);
        computeTaskInfo[taskId].qSeqId =
            (batchInnerOffset - computeTaskInfo[taskId].headId * computeTaskInfo[taskId].actualSeqLen) /
            this->blockHeight;
        computeTaskInfo[taskId].ioOffset =
            computeTaskInfo[taskId].batchOffset * this->headDim * this->headNum + \
            computeTaskInfo[taskId].qSeqId * this->blockHeight * this->headNum * this->headDim + \
            computeTaskInfo[taskId].headId * this->headDim;
        computeTaskInfo[taskId].computeASeqLen = computeASeqLen;
    }
}

template <typename qType>
__aicore__ inline void HstuDenseForwardJaggedKernelFuxi<qType>::GetTaskInfo(uint32_t sBlkId)
{
    uint32_t offsetOfBlk = 0;
    int64_t offsetOfSeq = 0;
    int64_t seqGlobalOffset = 0;

    for (auto index = 0; index < this->batchSize * this->headNum; index++) {
        uint32_t batchId = index / this->headNum;
        uint32_t headId = index % this->headNum;

        uint32_t batchSeqSize = this->seqOffsets[batchId + 1] - this->seqOffsets[batchId];
        uint32_t batchBlkSize = (batchSeqSize + this->blockHeight - 1) / this->blockHeight;

        if (sBlkId < (offsetOfBlk + batchBlkSize)) {
            uint32_t innerBlkId = sBlkId - offsetOfBlk;
            seqGlobalOffset = seqGlobalOffset + innerBlkId * this->blockHeight;
            this->FillTaskInfo(batchId, headId, seqGlobalOffset, 0);
            return;
        }

        offsetOfBlk += batchBlkSize;
        seqGlobalOffset += batchSeqSize;
    }
}

template <typename qType>
__aicore__ inline int
HstuDenseForwardJaggedKernelFuxi<qType>::PreInit(const HstuDenseForwardFuxiTilingData *__restrict tilingDataPtr)
{
    this->maxSeqLen = tilingDataPtr->maxSeqLen;
    this->sBlkId = tilingDataPtr->eachCoreStartBlockId[GetBlockIdx()];
    this->eBlkId = tilingDataPtr->eachCoreEndBlockId[GetBlockIdx()];

    if (this->sBlkId == this->eBlkId && this->eBlkId == 0) {
        return -1;
    }

    for (auto i = 0; i < this->batchSize + 1; i++) {
        this->seqOffsets[i] = tilingDataPtr->seqOffset[i];
    }

    return 0;
}

}

#endif