#include "kernel_operator.h"
#include "lib/matmul_intf.h"
using namespace AscendC;
constexpr int32_t BUFFER_NUM = 2;
using namespace matmul;
__aicore__ inline uint32_t Ceiling(uint32_t a, uint32_t b)
{
    return (a + b - 1) / b;
}
class GroupedMatmulKernel {
public:
    __aicore__ inline GroupedMatmulKernel(){};
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR weight, GM_ADDR bias, GM_ADDR scale, GM_ADDR per_token_scale, GM_ADDR out, 
                                                                                GM_ADDR workspace, const TCubeTiling &tiling, int32_t M1, int32_t M2, TPipe *pipe)
    {
        this->tiling = tiling;
        this->M1 = M1;
        this->M2 = M2;
        x1Gm.SetGlobalBuffer(reinterpret_cast<__gm__ int8_t *>(x), tiling.M * tiling.Ka);
        weight1Gm.SetGlobalBuffer(reinterpret_cast<__gm__ int8_t *>(weight), tiling.Kb * tiling.N);
        work1Gm.SetGlobalBuffer(reinterpret_cast<__gm__ int32_t *>(workspace), (this->M1+this->M2) * tiling.N);
        x2Gm.SetGlobalBuffer(reinterpret_cast<__gm__ int8_t *>(x) + this->M1 * tiling.Ka);
        weight2Gm.SetGlobalBuffer(reinterpret_cast<__gm__ int8_t *>(weight) + tiling.Kb * tiling.N);
        biasGm.SetGlobalBuffer(reinterpret_cast<__gm__ int32_t *>(bias));
        scaleGm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_SCALE *>(scale));
        tokenGm.SetGlobalBuffer(reinterpret_cast<__gm__ float *>(per_token_scale));
        outGm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_OUT *>(out));
        this->N_32 = (tiling.N + 31)/32*32;
        this->N = tiling.N;
        pipe->InitBuffer(inQueueWork, BUFFER_NUM, this->N_32 * sizeof(int32_t)); 
        pipe->InitBuffer(inQueueScale, BUFFER_NUM, this->N_32 * sizeof(DTYPE_SCALE));
        pipe->InitBuffer(outQueueOut, BUFFER_NUM, this->N_32 * sizeof(DTYPE_OUT));
        pipe->InitBuffer(QueueTmp1, this->N_32 * sizeof(float));
        if (GetSysWorkSpacePtr() == nullptr) {
            return;
        }
    }
    __aicore__ inline __gm__ void* ListTensorDecodeData(__gm__ void* data)
    {
        __gm__ uint64_t* dataAddr = reinterpret_cast<__gm__ uint64_t *>(data);
        uint64_t dataPtrOffset = *dataAddr;  
        __gm__ uint64_t* dataPtr_ = dataAddr + (dataPtrOffset >> 3);  
        return reinterpret_cast<__gm__ void *>(*(dataPtr_));
    }
    __aicore__ inline void Process(TPipe *pipe)
    {
            matmulObj.SetOrgShape(this->M1, this->N, tiling.Ka);
            matmulObj.SetTensorA(x1Gm);
            matmulObj.SetTensorB(weight1Gm);
            matmulObj.IterateAll(work1Gm);
        matmulObj.SetOrgShape(this->M2, this->N, tiling.Ka);
        matmulObj.SetTensorA(x2Gm);
        matmulObj.SetTensorB(weight2Gm);
        matmulObj.IterateAll(work1Gm[this->M1 * tiling.N]);
        LocalTensor<DTYPE_SCALE> scaleLocal;
        if(this->M1 != 0)
        {
            scaleLocal = inQueueScale.AllocTensor<DTYPE_SCALE>();
            DataCopy(scaleLocal, scaleGm, this->N_32);
            inQueueScale.EnQue(scaleLocal);
            scaleLocal = inQueueScale.DeQue<DTYPE_SCALE>();
            int32_t bias1 = biasGm.GetValue(0);
            if constexpr (std::is_same_v<DTYPE_SCALE, float>)
            {
                for(int32_t i=0; i<this->M1; i++)
                {
                    float tokens = tokenGm.GetValue(i);
                    CopyIn(i);
                    Compute(i, scaleLocal.ReinterpretCast<float>(), bias1, tokens);
                    CopyOut(i);
                }
            }
            else
            {
                auto tmp1 = QueueTmp1.Get<float>();
                Cast(tmp1, scaleLocal, RoundMode::CAST_NONE, this->N);
                for(int32_t i=0; i<this->M1; i++)
                {
                    float tokens = tokenGm.GetValue(i);
                    CopyIn(i);
                    Compute(i, tmp1, bias1, tokens);
                    CopyOut(i);
                }
            }
            inQueueScale.FreeTensor(scaleLocal); 
        }
        scaleLocal = inQueueScale.AllocTensor<DTYPE_SCALE>();
        DataCopy(scaleLocal, scaleGm[this->N], this->N_32);
        inQueueScale.EnQue(scaleLocal);
        scaleLocal = inQueueScale.DeQue<DTYPE_SCALE>();
        int32_t bias2 = biasGm.GetValue(1);
        if constexpr (std::is_same_v<DTYPE_SCALE, float>)
        {
            for(int32_t i=this->M1; i<this->M1+this->M2; i++)
            {
                float tokens = tokenGm.GetValue(i);
                CopyIn(i);
                Compute(i, scaleLocal.ReinterpretCast<float>(), bias2, tokens);
                CopyOut(i);
            }
        }
        else
        {
            auto tmp1 = QueueTmp1.Get<float>();
            Cast(tmp1, scaleLocal, RoundMode::CAST_NONE, this->N);
            for(int32_t i=this->M1; i<this->M1+this->M2; i++)
            {
                float tokens = tokenGm.GetValue(i);
                CopyIn(i);
                Compute(i, tmp1, bias2, tokens);
                CopyOut(i);
            }
        }
        inQueueScale.FreeTensor(scaleLocal); 
matmulObj.End();
    }
    __aicore__ inline void CopyIn(int32_t progress) {
        DataCopyExtParams copyParams = {1, static_cast<uint32_t>(this->N * sizeof(int32_t)), 0, 0, 0};
        DataCopyPadExtParams<int32_t> padParams{false, 0, 0, 0};
        LocalTensor<int32_t> work1Local = inQueueWork.AllocTensor<int32_t>();
        DataCopyPad(work1Local, work1Gm[progress * this->N], copyParams, padParams);
        inQueueWork.EnQue(work1Local);
    }
    __aicore__ inline void Compute(int32_t progress, LocalTensor<float> scaleLocal, int32_t biass, float tokens)
    {
        LocalTensor<DTYPE_OUT> outLocal = outQueueOut.AllocTensor<DTYPE_OUT>();
        LocalTensor<int32_t> work1Local = inQueueWork.DeQue<int32_t>();
        Adds(work1Local, work1Local, biass, this->N);
        Cast(work1Local.ReinterpretCast<float>(), work1Local, RoundMode::CAST_NONE, this->N);
        Mul(work1Local.ReinterpretCast<float>(), work1Local.ReinterpretCast<float>(), scaleLocal, this->N);
        Muls(work1Local.ReinterpretCast<float>(), work1Local.ReinterpretCast<float>(), tokens, this->N);
        Cast(outLocal, work1Local.ReinterpretCast<float>(), RoundMode::CAST_RINT, this->N);
        outQueueOut.EnQue<DTYPE_OUT>(outLocal);
        inQueueWork.FreeTensor(work1Local);
    }
    __aicore__ inline void CopyOut(int32_t progress)
    {
        LocalTensor<DTYPE_OUT> outLocal = outQueueOut.DeQue<DTYPE_OUT>();
        DataCopyExtParams copyParams = {1, static_cast<uint32_t>(this->N * sizeof(DTYPE_OUT)), 0, 0, 0};
        DataCopyPad(outGm[progress * this->N], outLocal, copyParams);
        outQueueOut.FreeTensor(outLocal);
    }
public:
    Matmul<MatmulType<TPosition::GM, CubeFormat::ND, int8_t>, 
           MatmulType<TPosition::GM, CubeFormat::ND, int8_t>,
           MatmulType<TPosition::GM, CubeFormat::ND, int32_t>, 
           MatmulType<TPosition::GM, CubeFormat::ND, int32_t>, CFG_MDL>
        matmulObj;
    GlobalTensor<int8_t> x1Gm, x2Gm;
    GlobalTensor<int8_t> weight1Gm, weight2Gm;
    GlobalTensor<int32_t> biasGm;
    GlobalTensor<DTYPE_SCALE> scaleGm;
    GlobalTensor<float> tokenGm;
    GlobalTensor<DTYPE_OUT> outGm;
    GlobalTensor<int32_t> work1Gm, work2Gm;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueBias, inQueueScale, inQueueToken, inQueueWork;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueOut;
    TBuf<QuePosition::VECCALC> QueueTmp1, QueueTmp2,boox;
    int32_t N;
    int32_t N_32;
    int32_t M1;
    int32_t M2;
    TCubeTiling tiling;
};
extern "C" __global__ __aicore__ void grouped_matmul(GM_ADDR x, GM_ADDR weight, GM_ADDR bias, GM_ADDR scale, GM_ADDR per_token_scale, GM_ADDR group_list, GM_ADDR out, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    {
        GroupedMatmulKernel grouped_matmulKernel;
        TPipe pipe;
        REGIST_MATMUL_OBJ(&pipe, GetSysWorkSpacePtr(), grouped_matmulKernel.matmulObj, &tiling_data.cubeTilingData); 
        GM_ADDR x1 = (GM_ADDR)grouped_matmulKernel.ListTensorDecodeData(reinterpret_cast<__gm__ void *>(x));
        GM_ADDR weight1 = (GM_ADDR)grouped_matmulKernel.ListTensorDecodeData(reinterpret_cast<__gm__ void *>(weight));
        GM_ADDR bias1 = (GM_ADDR)grouped_matmulKernel.ListTensorDecodeData(reinterpret_cast<__gm__ void *>(bias));
        GM_ADDR scale1 = (GM_ADDR)grouped_matmulKernel.ListTensorDecodeData(reinterpret_cast<__gm__ void *>(scale));
        GM_ADDR per_token_scale1 = (GM_ADDR)grouped_matmulKernel.ListTensorDecodeData(reinterpret_cast<__gm__ void *>(per_token_scale));
        GM_ADDR out1 = (GM_ADDR)grouped_matmulKernel.ListTensorDecodeData(reinterpret_cast<__gm__ void *>(out));
        {
            grouped_matmulKernel.Init(x1, weight1, bias1, scale1, per_token_scale1, out1, 
                                    GetSysWorkSpacePtr(), tiling_data.cubeTilingData, tiling_data.M1, tiling_data.M2, &pipe);
            grouped_matmulKernel.Process(&pipe);
        }
    }
}