#include "kernel_operator.h"
#include "common.h"
#include "common_func.h"
#include "simd.h"
#include "iterator.h"
#include "mma.h"
#include "utils.h"
#include "fa_common.h"


using namespace AscendC;

template<typename typeT>
class KernelMla {
public:
    __aicore__ inline KernelMla()
    {
        aSize = m * k;
        bSize = k * n;
        cSize = m * n;
        mBlocks = m / 16;
        nBlocks = n / 16;
        kBlocks = k / 16;

        a2Size = m2 * k2;
        b2Size = k2 * n2;
        c2Size = m2 * n2;
        m2Blocks = m2 / 16;
        n2Blocks = n2 / 16;
        k2Blocks = k2 / 16;
    }
    __aicore__ inline void Init(GM_ADDR query, GM_ADDR queryRope, GM_ADDR kvCache, GM_ADDR kvCacheRope, GM_ADDR attenOut,
                                GM_ADDR workspace,
                                uint32_t num_batches,
                                uint32_t q_heads,
                                uint32_t block_size,
                                uint32_t total_block_num,
                                float tor
                                )
    {
        this->tokens_per_core = 3;
        this->tor = tor;
        this->num_heads = 32;
        uint32_t query_startpointer = GetBlockIdx() * this->tokens_per_core * m * 512;
        uint32_t queryRope_startpointer = GetBlockIdx() * this->tokens_per_core * m * 64;
        uint32_t key_startpointer = GetBlockIdx() * this->tokens_per_core * 2 * 128 * 512;
        uint32_t keyRope_startpointer = GetBlockIdx() * this->tokens_per_core * 2 * 128 * 64;
        uint32_t out_startpointer = GetBlockIdx() * this->tokens_per_core * m * n;
        uint32_t out2_startpointer = GetBlockIdx() * this->tokens_per_core * m2 * n2;
        queryGm.SetGlobalBuffer((__gm__ half*)query + query_startpointer);
        queryRopeGM.SetGlobalBuffer((__gm__ half*)queryRope + queryRope_startpointer);
        kvCacheGM.SetGlobalBuffer((__gm__ half*)kvCache + key_startpointer);
        kvCacheRopeGM.SetGlobalBuffer((__gm__ half*)kvCacheRope + keyRope_startpointer);
        attenOutGM.SetGlobalBuffer((__gm__ half*)attenOut + out2_startpointer);

        uint32_t tail_tokens = 2;
        if(GetBlockIdx() == 9) {
            this->tokens_per_core += tail_tokens;
        }
        tmp_GM.SetGlobalBuffer((__gm__ float*)workspace + out_startpointer, this->tokens_per_core * m * n);
        p_GM.SetGlobalBuffer((__gm__ half*)workspace + 32 * m * n * 4 + out_startpointer, this->tokens_per_core * m * n);

        pipe.InitBuffer(inQueueA1, 1, aSize * sizeof(half));
        pipe.InitBuffer(inQueueA2, 1, aSize * sizeof(half));
        pipe.InitBuffer(inQueueB1, 1, bSize * sizeof(half));
        pipe.InitBuffer(inQueueB2, 1, bSize * sizeof(half) / 16);
        pipe.InitBuffer(outQueueCO1, 1, cSize * sizeof(float) / 16);
        pipe.InitBuffer(outQueueCO2, 1, cSize * sizeof(float));

        pipe.InitBuffer(inQueueX, 1, 256 * sizeof(float));
        pipe.InitBuffer(outQueueY, 1, 256 * sizeof(half));
        pipe.InitBuffer(QueueTmp1, 256 * sizeof(float));
        pipe.InitBuffer(QueueTmp2, 32 * 512 * sizeof(half));

        int typeSize = 4;
        int elementPerBlock = 32 / typeSize;
        int elementPerRepeat = 256 / typeSize;
        int firstMaxRepeat = 256 / elementPerRepeat;
        int iter1OutputCount = firstMaxRepeat;
        int iter1AlignEnd = RoundUp(iter1OutputCount, elementPerBlock) * elementPerBlock;
        int finalWorkLocalNeedSize = iter1AlignEnd;
        pipe.InitBuffer(workQueue, 1, finalWorkLocalNeedSize * sizeof(float));
    }
    __aicore__ inline void Process()
    {
        // Q * K.T
        for(int32_t k = 0; k < this->tokens_per_core; k++) {
            CopyIn(k);
            SplitA();

            AscendC::LocalTensor<half> b1Local = inQueueB1.DeQue<half>();
            AscendC::LocalTensor<half> a2Local = inQueueA2.DeQue<half>();
            AscendC::LocalTensor<float> c2Local = outQueueCO2.AllocTensor<float>();
            // split matrix b into 2 parts, [32, 16] and [32, 16]
            for (int i = 0; i < n / 16; ++i) {
                SplitB(b1Local, i);
                Compute(a2Local);
                Aggregate(c2Local, i);
            }
            inQueueB1.FreeTensor(b1Local);
            inQueueA2.FreeTensor(a2Local);
            outQueueCO2.EnQue<float>(c2Local);

            CopyOut(k);
        }    
        PipeBarrier<PIPE_ALL>();
        // * scale & softmax 
        for(int32_t k = 0; k < this->tokens_per_core; k++) {
            for(int32_t m = 0; m < this->num_heads; m++) {
                CopyInSx(k * 32 * 256 + m * 256);
                ComputeSoftmax();
                CopyOutSx(k * 32 * 256 + m * 256);
            }
        }
        PipeBarrier<PIPE_ALL>();
        // P * V
        Init2nd();
        for(int32_t k = 0; k < this->tokens_per_core; k++) {
            CopyIn2(k);
            SplitA2();

            AscendC::LocalTensor<half> b1Local = inQueueB1.DeQue<half>();
            AscendC::LocalTensor<half> a2Local = inQueueA2.DeQue<half>();
            AscendC::LocalTensor<float> c2Local = outQueueCO2.AllocTensor<float>();
            // split matrix b into 2 parts, [32, 16] and [32, 16]
            for (int i = 0; i < n2 / 16; ++i) {
                SplitB2(b1Local, i);
                Compute2(a2Local);
                Aggregate2(c2Local, i);
            }
            inQueueB1.FreeTensor(b1Local);
            inQueueA2.FreeTensor(a2Local);
            outQueueCO2.EnQue<float>(c2Local);

            CopyOut2(k);
            PipeBarrier<PIPE_ALL>();
        }  

    }
    __aicore__ inline void Init2nd() {
        pipe.InitBuffer(inQueueA1, 1, a2Size * sizeof(half));
        pipe.InitBuffer(inQueueA2, 1, a2Size * sizeof(half));
        pipe.InitBuffer(inQueueB1, 1, b2Size * sizeof(half));
        pipe.InitBuffer(inQueueB2, 1, b2Size * sizeof(half) / 16);
        pipe.InitBuffer(outQueueCO1, 1, c2Size * sizeof(float) / 16);
        pipe.InitBuffer(outQueueCO2, 1, c2Size * sizeof(float));
    }
private:
    __aicore__ inline void CopyND2NZ(const AscendC::LocalTensor<half>& dst, const AscendC::GlobalTensor<half>& src,
        const uint16_t height, const uint16_t width)
    {
        for (int i = 0; i < width / 16; ++i) {
            int srcOffset = i * 16;
            int dstOffset = i * 16 * height;
            AscendC::DataCopy(dst[dstOffset], src[srcOffset], { height, 1, uint16_t(width / 16 - 1), 0 });
        }
    }
    __aicore__ inline void CopyIn(int32_t process)
    {
        AscendC::LocalTensor<half> a1Local = inQueueA1.AllocTensor<half>();
        AscendC::LocalTensor<half> b1Local = inQueueB1.AllocTensor<half>();

        CopyND2NZ(a1Local, queryGm[process * m * 512], m, 512);
        CopyND2NZ(a1Local[m * 512], queryRopeGM[process * m * 64], m, 64);
        // CopyND2NZ(b1Local, kvCacheGM, k, n);
        CopyND2NZ(b1Local, kvCacheGM[process * 512 * n], n, 512);
        CopyND2NZ(b1Local[n * 512], kvCacheRopeGM[process * 64 * n], n, 64);

        inQueueA1.EnQue(a1Local);
        inQueueB1.EnQue(b1Local);
    }
    __aicore__ inline void SplitA()
    {
        int srcOffset = 0;
        int dstOffset = 0;
        AscendC::LocalTensor<half> a1Local = inQueueA1.DeQue<half>();
        AscendC::LocalTensor<half> a2Local = inQueueA2.AllocTensor<half>();

        // transform nz to zz
        for (int i = 0; i < mBlocks; ++i) {
            AscendC::LoadData2DParams loadDataParams;
            loadDataParams.repeatTimes = kBlocks;
            loadDataParams.srcStride = mBlocks;
            loadDataParams.ifTranspose = false;

            AscendC::LoadData(a2Local[dstOffset], a1Local[srcOffset], loadDataParams);

            srcOffset += 16 * 16;
            dstOffset += k * 16;
        }

        inQueueA2.EnQue<half>(a2Local);
        inQueueA1.FreeTensor(a1Local);
    }
    __aicore__ inline void SplitB(const AscendC::LocalTensor<half>& b1Local, const int bSplitIdx)
    {
        AscendC::LocalTensor<half> b2Local = inQueueB2.AllocTensor<half>();

        // transform nz to zn
        AscendC::LoadData2DParams loadDataParams;
        // loadDataParams.repeatTimes = kBlocks;
        // loadDataParams.srcStride = 1;
        loadDataParams.repeatTimes = kBlocks;
        loadDataParams.srcStride = nBlocks;
        // loadDataParams.ifTranspose = true;
        loadDataParams.ifTranspose = false;

        // AscendC::LoadData(b2Local, b1Local[bSplitIdx * bSize / 2], loadDataParams);
        AscendC::LoadData(b2Local, b1Local[bSplitIdx * 16 * 16], loadDataParams);

        inQueueB2.EnQue<half>(b2Local);
    }
    __aicore__ inline void Compute(const AscendC::LocalTensor<half>& a2Local)
    {
        AscendC::LocalTensor<half> b2Local = inQueueB2.DeQue<half>();
        AscendC::LocalTensor<float> c1Local = outQueueCO1.AllocTensor<float>();

        AscendC::MmadParams mmadParams;
        mmadParams.m = m;
        mmadParams.n = n / 16;
        mmadParams.k = k;
        AscendC::Mmad(c1Local, a2Local, b2Local, mmadParams);

        outQueueCO1.EnQue<float>(c1Local);
        inQueueB2.FreeTensor(b2Local);
    }
    __aicore__ inline void Aggregate(const AscendC::LocalTensor<float>& c2Local, const int bSplitIdx)
    {
        AscendC::LocalTensor<float> c1Local = outQueueCO1.DeQue<float>();

        AscendC::DataCopyParams dataCopyParams;
        dataCopyParams.blockCount = 1;
        dataCopyParams.blockLen = 2;
        AscendC::DataCopyEnhancedParams enhancedParams;
        enhancedParams.blockMode = AscendC::BlockMode::BLOCK_MODE_MATRIX;
        AscendC::DataCopy(c2Local[bSplitIdx * cSize / 16], c1Local, dataCopyParams, enhancedParams);

        outQueueCO1.FreeTensor(c1Local);
    }
    __aicore__ inline void CopyOut(int32_t process)
    {
        AscendC::LocalTensor<float> c2Local = outQueueCO2.DeQue<float>();

        // transform nz to nd
        for (int i = 0; i < nBlocks; ++i) {
            AscendC::DataCopy(tmp_GM[process * m * n + i * 16], c2Local[i * m * 16], { m, 2, 0, uint16_t((nBlocks - 1) * 2) });
        }

        outQueueCO2.FreeTensor(c2Local);
    }

    __aicore__ inline void CopyInSx(int32_t startpos)
    {
        LocalTensor<float> xLocal = inQueueX.AllocTensor<float>();      
        DataCopy(xLocal, tmp_GM[startpos], 256);
        inQueueX.EnQue(xLocal);
    }
    __aicore__ inline void CopyOutSx(int32_t startpos)
    {
        LocalTensor<half> yLocal = outQueueY.DeQue<half>();
        DataCopy(p_GM[startpos], yLocal, 256);
        outQueueY.FreeTensor(yLocal);
    }
    __aicore__ inline void ComputeSoftmax() 
    {
        LocalTensor<float> xLocal = inQueueX.DeQue<float>();
        LocalTensor<half> yLocal = outQueueY.AllocTensor<half>();
        LocalTensor<float> workLocal = workQueue.AllocTensor<float>();
        LocalTensor<float> tmp1 = QueueTmp1.Get<float>();

        // Muls(xLocal, xLocal, this->tor, 256);
        Muls(xLocal, xLocal, float(0.0416667), 256);
        ReduceMax(tmp1, xLocal, workLocal, 256, false);
        float max_tmp = tmp1.GetValue(0);
        Adds(xLocal, xLocal, float(0 - max_tmp), 256);
        Exp(xLocal, xLocal, 256);
        ReduceSum(tmp1, xLocal, workLocal, 256);
        float sum_tmp = tmp1.GetValue(0);
        float sum_reci = 1 / sum_tmp;
        Muls(tmp1, xLocal, float(sum_reci), 256);
        Cast(yLocal, tmp1, RoundMode::CAST_NONE, 256);

        outQueueY.EnQue(yLocal);
        inQueueX.FreeTensor(xLocal);
        workQueue.FreeTensor(workLocal);
    }

    __aicore__ inline int RoundUp(int a, int b) 
    {
        return (a + b - 1) / b;
    }

    __aicore__ inline void CopyIn2(int32_t process)
    {
        AscendC::LocalTensor<half> a1Local = inQueueA1.AllocTensor<half>();
        AscendC::LocalTensor<half> b1Local = inQueueB1.AllocTensor<half>();

        CopyND2NZ(a1Local, p_GM[process * m2 * k2], m2, k2);
        CopyND2NZ(b1Local, kvCacheGM[process * k2 * n2], k2, n2);

        inQueueA1.EnQue(a1Local);
        inQueueB1.EnQue(b1Local);
    }
    __aicore__ inline void SplitA2()
    {
        int srcOffset = 0;
        int dstOffset = 0;
        AscendC::LocalTensor<half> a1Local = inQueueA1.DeQue<half>();
        AscendC::LocalTensor<half> a2Local = inQueueA2.AllocTensor<half>();

        // transform nz to zz
        for (int i = 0; i < m2Blocks; ++i) {
            AscendC::LoadData2DParams loadDataParams;
            loadDataParams.repeatTimes = k2Blocks;
            loadDataParams.srcStride = m2Blocks;
            loadDataParams.ifTranspose = false;

            AscendC::LoadData(a2Local[dstOffset], a1Local[srcOffset], loadDataParams);

            srcOffset += 16 * 16;
            dstOffset += k2 * 16;
        }

        inQueueA2.EnQue<half>(a2Local);
        inQueueA1.FreeTensor(a1Local);
    }

    __aicore__ inline void SplitB2(const AscendC::LocalTensor<half>& b1Local, const int bSplitIdx)
    {
        AscendC::LocalTensor<half> b2Local = inQueueB2.AllocTensor<half>();

        // transform nz to zn
        AscendC::LoadData2DParams loadDataParams;
        loadDataParams.repeatTimes = k2Blocks;
        loadDataParams.srcStride = 1;
        loadDataParams.ifTranspose = true;

        AscendC::LoadData(b2Local, b1Local[bSplitIdx * 256 * 16], loadDataParams);

        inQueueB2.EnQue<half>(b2Local);
    }

    __aicore__ inline void Compute2(const AscendC::LocalTensor<half>& a2Local)
    {
        AscendC::LocalTensor<half> b2Local = inQueueB2.DeQue<half>();
        AscendC::LocalTensor<float> c1Local = outQueueCO1.AllocTensor<float>();

        AscendC::MmadParams mmadParams;
        mmadParams.m = m2;
        mmadParams.n = n2 / 32;
        mmadParams.k = k2;
        AscendC::Mmad(c1Local, a2Local, b2Local, mmadParams);

        outQueueCO1.EnQue<float>(c1Local);
        inQueueB2.FreeTensor(b2Local);
    }
    __aicore__ inline void Aggregate2(const AscendC::LocalTensor<float>& c2Local, const int bSplitIdx)
    {
        AscendC::LocalTensor<float> c1Local = outQueueCO1.DeQue<float>();

        AscendC::DataCopyParams dataCopyParams;
        dataCopyParams.blockCount = 1;
        dataCopyParams.blockLen = 2;
        AscendC::DataCopyEnhancedParams enhancedParams;
        enhancedParams.blockMode = AscendC::BlockMode::BLOCK_MODE_MATRIX;
        AscendC::DataCopy(c2Local[bSplitIdx * c2Size / 32], c1Local, dataCopyParams, enhancedParams);

        outQueueCO1.FreeTensor(c1Local);
    }
    __aicore__ inline void CopyOut2(int32_t process)
    {
        AscendC::LocalTensor<float> c2Local = outQueueCO2.DeQue<float>();
        LocalTensor<half> tmp2 = QueueTmp2.Get<half>();
        Cast(tmp2, c2Local, RoundMode::CAST_NONE, 32 * 512);
        // transform nz to nd
        
        for (int i = 0; i < n2Blocks; ++i) {
            // AscendC::DataCopy(attenOutGM[process * m2 * n2 + i * 16], tmp2[i * m2 * 16], { m2, 2, 0, uint16_t((n2Blocks - 1) * 2) });
            AscendC::DataCopy(attenOutGM[process * m2 * n2 + i * 16], tmp2[i * m2 * 16], { m2, 1, 0, uint16_t((n2Blocks - 1) * 1) });
        }
        
        // for (int i = 0; i < 32; i++) {
        //     for (int z = 0; z < 32; z++) {
        //         DataCopy(attenOutGM[process * m2 * n2 + i * 16 + z * 512], tmp2[i * 32 * 16 + z * 16], 16);
        //     } 
        // }

        outQueueCO2.FreeTensor(c2Local);
    }    
private:
    AscendC::TPipe pipe;

    AscendC::TQue<AscendC::TPosition::A1, 1> inQueueA1;
    AscendC::TQue<AscendC::TPosition::A2, 1> inQueueA2;
    AscendC::TQue<AscendC::TPosition::B1, 1> inQueueB1;
    AscendC::TQue<AscendC::TPosition::B2, 2> inQueueB2;
    // dst queue
    AscendC::TQue<AscendC::TPosition::CO1, 2> outQueueCO1;
    AscendC::TQue<AscendC::TPosition::CO2, 1> outQueueCO2;

    AscendC::GlobalTensor<half> queryGm, queryRopeGM, kvCacheGM, kvCacheRopeGM;
    AscendC::GlobalTensor<half> attenOutGM;
    AscendC::GlobalTensor<half> p_GM;
    AscendC::GlobalTensor<float> tmp_GM;

    AscendC::TQue<QuePosition::VECIN, 1> inQueueX;
    AscendC::TQue<QuePosition::VECOUT, 1> outQueueY;
    AscendC::TQue<QuePosition::VECOUT, 1> workQueue;
    AscendC::TBuf<QuePosition::VECCALC> QueueTmp1, QueueTmp2;

    uint16_t m = 32;
    uint16_t n = 256;
    uint16_t k = 576;

    uint16_t m2 = 32;
    uint16_t n2 = 512;
    uint16_t k2 = 256;

    uint32_t tokens_per_core;

    uint16_t aSize, bSize, cSize, mBlocks, nBlocks, kBlocks;
    uint16_t a2Size, b2Size, c2Size, m2Blocks, n2Blocks, k2Blocks;

    float tor;
    uint32_t num_heads;
};

extern "C" __global__ __aicore__ void mla(GM_ADDR query, GM_ADDR queryRope, GM_ADDR kvCache, GM_ADDR kvCacheRope, GM_ADDR block_tables, GM_ADDR contextLens, GM_ADDR mask, GM_ADDR qSeqlen, GM_ADDR qkDescale, GM_ADDR pvDescale, GM_ADDR attenOut, GM_ADDR lseOut, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    KernelMla<half> op;
    op.Init(query, queryRope, kvCache, kvCacheRope, attenOut, workspace,
            tiling_data.num_batches,
            tiling_data.q_heads,
            tiling_data.block_size,
            tiling_data.total_block_num,
            tiling_data.tor);
    op.Process(); 
}

  