#include "kernel_operator.h"
#include "lib/matmul_intf.h"

using namespace matmul;

using namespace AscendC;
constexpr int32_t BUFFER_NUM = 2;
constexpr int32_t MAXIMUM_NUM = 10240;
__aicore__ inline uint32_t Ceiling(uint32_t a, uint32_t b)
{
    return (a + b - 1) / b;
}
template<typename aType, typename bType, typename cType, typename biasType> class KernelMatMul {
public:
    __aicore__ inline KernelMatMul(){};
     __aicore__ inline void Init(GM_ADDR a, GM_ADDR b, GM_ADDR bias, GM_ADDR c, GM_ADDR workspace,
                                const TCubeTiling &tiling, TPipe *pipe);
    template <bool hasBias = false>
    __aicore__ inline void Process(int32_t number, int32_t c_dim, int32_t *a_shape, int32_t* b_shape, int32_t* bias_shape, int32_t* c_shape);
    __aicore__ inline void CalcOffset(int32_t blockIdx, const TCubeTiling &tiling, int32_t &offsetA, int32_t &offsetB,
                                      int32_t &offsetC, int32_t &offsetBias);

    __aicore__ inline uint32_t get_broadcasted_index(int32_t indices, int32_t *original_shape, int32_t *broadcast_shape, int32_t ndim)
    {
        int32_t shape[8];
        int32_t original_indices = 0;
        int i =0;
        for(i=ndim-1; i>=0; i--)
        {
            shape[i] = 0;
            if(original_shape[i]!=1)
            {
                shape[i] = indices % broadcast_shape[i];
            }
            indices /= broadcast_shape[i];
            if(indices == 0) break;   
        }
        for(; i<ndim; i++)
        {
            original_indices = shape[i] + original_indices * original_shape[i];
        }
        return original_indices;
    }

    Matmul<MatmulType<TPosition::GM, CubeFormat::ND, aType>, 
           MatmulType<TPosition::GM, CubeFormat::ND, bType>,
           MatmulType<TPosition::GM, CubeFormat::ND, cType>, 
           MatmulType<TPosition::GM, CubeFormat::ND, biasType>>
           matmulObj1, matmulObj2, matmulObj3, matmulObj4;
    
    GlobalTensor<aType> a1Gm;
    GlobalTensor<aType> a2Gm;
    GlobalTensor<bType> b1Gm;
    GlobalTensor<bType> b2Gm;
    GlobalTensor<cType> c1Gm;
    GlobalTensor<cType> c2Gm;
    GlobalTensor<cType> c3Gm;
    GlobalTensor<cType> c4Gm;

    GlobalTensor<aType> aGm;
    GlobalTensor<bType> bGm;
    GlobalTensor<cType> cGm;
    GlobalTensor<biasType> biasGm;
    
    GlobalTensor<aType> a0Gm;
    GlobalTensor<bType> b0Gm;
    GlobalTensor<cType> c0Gm;
    GlobalTensor<biasType> bias0Gm;


    TQue<QuePosition::VECIN, BUFFER_NUM> inQueue;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueue;

    TCubeTiling tiling;

    int tailM;
    int tailN;
};

template <typename aType, typename bType, typename cType, typename biasType>
__aicore__ inline void KernelMatMul<aType, bType, cType, biasType>::Init(GM_ADDR a, GM_ADDR b, GM_ADDR bias, GM_ADDR c,
                                        GM_ADDR workspace, const TCubeTiling &tiling, TPipe *pipe)
{
    this->tiling = tiling;
    a0Gm.SetGlobalBuffer(reinterpret_cast<__gm__ aType *>(a), tiling.M * tiling.Ka);
    b0Gm.SetGlobalBuffer(reinterpret_cast<__gm__ bType *>(b), tiling.Kb * tiling.N);
    c0Gm.SetGlobalBuffer(reinterpret_cast<__gm__ cType *>(c), tiling.M * tiling.N);
    bias0Gm.SetGlobalBuffer(reinterpret_cast<__gm__ biasType *>(bias), tiling.N);

    a1Gm.SetGlobalBuffer(reinterpret_cast<__gm__ aType *>(workspace) + GetBlockIdx() *(tiling.M*tiling.Ka + tiling.Kb*tiling.N + tiling.M*tiling.N*2)*2, tiling.M*tiling.Ka);
    a2Gm.SetGlobalBuffer(reinterpret_cast<__gm__ aType *>(workspace) + GetBlockIdx() *(tiling.M*tiling.Ka + tiling.Kb*tiling.N + tiling.M*tiling.N*2)*2 + tiling.M*tiling.Ka, tiling.M*tiling.Ka);
    b1Gm.SetGlobalBuffer(reinterpret_cast<__gm__ bType *>(workspace) + GetBlockIdx() *(tiling.M*tiling.Ka + tiling.Kb*tiling.N + tiling.M*tiling.N*2)*2 + tiling.M*tiling.Ka*2, tiling.Kb*tiling.N);
    b2Gm.SetGlobalBuffer(reinterpret_cast<__gm__ bType *>(workspace) + GetBlockIdx() *(tiling.M*tiling.Ka + tiling.Kb*tiling.N + tiling.M*tiling.N*2)*2 + tiling.M*tiling.Ka*2 + tiling.Kb*tiling.N, tiling.Kb*tiling.N);
    c1Gm.SetGlobalBuffer(reinterpret_cast<__gm__ cType *>(workspace) + GetBlockIdx() *(tiling.M*tiling.Ka + tiling.Kb*tiling.N + tiling.M*tiling.N*2)*2 + tiling.M*tiling.Ka*2 + tiling.Kb*tiling.N*2, tiling.M*tiling.N);
    c2Gm.SetGlobalBuffer(reinterpret_cast<__gm__ cType *>(workspace) + GetBlockIdx() *(tiling.M*tiling.Ka + tiling.Kb*tiling.N + tiling.M*tiling.N*2)*2 + tiling.M*tiling.Ka*2 + tiling.Kb*tiling.N*2 + tiling.M*tiling.N, tiling.M*tiling.N);
    c3Gm.SetGlobalBuffer(reinterpret_cast<__gm__ cType *>(workspace) + GetBlockIdx() *(tiling.M*tiling.Ka + tiling.Kb*tiling.N + tiling.M*tiling.N*2)*2 + tiling.M*tiling.Ka*2 + tiling.Kb*tiling.N*2 + tiling.M*tiling.N*2, tiling.M*tiling.N);
    c4Gm.SetGlobalBuffer(reinterpret_cast<__gm__ cType *>(workspace) + GetBlockIdx() *(tiling.M*tiling.Ka + tiling.Kb*tiling.N + tiling.M*tiling.N*2)*2 + tiling.M*tiling.Ka*2 + tiling.Kb*tiling.N*2 + tiling.M*tiling.N*3, tiling.M*tiling.N);

    pipe->InitBuffer(inQueue, BUFFER_NUM, MAXIMUM_NUM * sizeof(float));
    pipe->InitBuffer(outQueue, BUFFER_NUM, MAXIMUM_NUM * sizeof(float));

    if (GetSysWorkSpacePtr() == nullptr) {
        return;
    }
    
}

template <typename aType, typename bType, typename cType, typename biasType>
template <bool hasBias>
__aicore__ inline void KernelMatMul<aType, bType, cType, biasType>::Process(int32_t number, int32_t c_dim, int32_t *a_shape, int32_t* b_shape, int32_t* bias_shape, int32_t* c_shape)
{
    {
        aGm = a0Gm[get_broadcasted_index(number, a_shape, c_shape, c_dim) * tiling.M * tiling.Ka*2];
        bGm = b0Gm[get_broadcasted_index(number, b_shape, c_shape, c_dim) * tiling.Kb * tiling.N*2];
        cGm = c0Gm[number * tiling.M * tiling.N*2];
        if constexpr (hasBias)
        {
            biasGm = bias0Gm[get_broadcasted_index(number, bias_shape, c_shape, c_dim) * tiling.M * tiling.N*2];
        }
    }

    {
        LocalTensor<float> inLocal;
        LocalTensor<float> outLocal1;
        LocalTensor<float> outLocal2;
        DataCopyExtParams copyParams{1, static_cast<uint16_t>(MAXIMUM_NUM * sizeof(float)), 0, 0, 0}; // 结构体DataCopyExtParams最后一个参数是rsv保留位
        DataCopyPadExtParams<float> padParams{false, 0, 0, 0};

        int loop = (tiling.M * tiling.Ka*2) / MAXIMUM_NUM;
        int wei = (tiling.M * tiling.Ka*2) % MAXIMUM_NUM;
        if(wei != 0) {loop += 1;}
        else {wei = MAXIMUM_NUM;}
        int tileLength = MAXIMUM_NUM;
        for(int i=0;i<loop;i++)
        {
            if(i == (loop-1))
            {
                tileLength = wei;
            }
            copyParams.blockLen = tileLength * sizeof(float);
            inLocal = inQueue.AllocTensor<float>();
            DataCopyPad(inLocal, aGm[MAXIMUM_NUM*i], copyParams, padParams); 
            inQueue.EnQue(inLocal);

            inLocal = inQueue.DeQue<float>();
            outLocal1 = outQueue.AllocTensor<float>();
            outLocal2 = outLocal1[MAXIMUM_NUM/2];;
            uint64_t rsvdCnt = 0;
            GatherMask(outLocal1, inLocal, 1, false, 0, { 1, static_cast<uint16_t>((tileLength+63)/64), 8, 8 }, rsvdCnt);
            GatherMask(outLocal2, inLocal, 2, false, 0, { 1, static_cast<uint16_t>((tileLength+63)/64), 8, 8 }, rsvdCnt);
            inQueue.FreeTensor(inLocal);
            outQueue.EnQue<float>(outLocal1);

            copyParams.blockLen = tileLength / 2 * sizeof(float);
            outLocal1 = outQueue.DeQue<float>();
            outLocal2 = outLocal1[MAXIMUM_NUM/2];;
            DataCopyPad(a1Gm[MAXIMUM_NUM/2*i], outLocal1, copyParams);
            DataCopyPad(a2Gm[MAXIMUM_NUM/2*i], outLocal2, copyParams);
            outQueue.FreeTensor(outLocal1);
        }

        loop = (tiling.Kb * tiling.N*2) / MAXIMUM_NUM;
        wei = (tiling.Kb * tiling.N*2) % MAXIMUM_NUM;
        if(wei != 0) {loop += 1;}
        else {wei = MAXIMUM_NUM;}
        tileLength = MAXIMUM_NUM;
        for(int i=0;i<loop;i++)
        {
            if(i == (loop-1))
            {
                tileLength = wei;
            }
            copyParams.blockLen = tileLength * sizeof(float);
            inLocal = inQueue.AllocTensor<float>();
            DataCopyPad(inLocal, bGm[MAXIMUM_NUM*i], copyParams, padParams); 
            inQueue.EnQue(inLocal);

            inLocal = inQueue.DeQue<float>();
            outLocal1 = outQueue.AllocTensor<float>();
            outLocal2 = outLocal1[MAXIMUM_NUM/2];;
            uint64_t rsvdCnt = 0;
            GatherMask(outLocal1, inLocal, 1, false, 0, { 1, static_cast<uint16_t>((tileLength+63)/64), 8, 8 }, rsvdCnt);
            GatherMask(outLocal2, inLocal, 2, false, 0, { 1, static_cast<uint16_t>((tileLength+63)/64), 8, 8 }, rsvdCnt);
            inQueue.FreeTensor(inLocal);
            outQueue.EnQue<float>(outLocal1);

            copyParams.blockLen = tileLength / 2 * sizeof(float);
            outLocal1 = outQueue.DeQue<float>();
            outLocal2 = outLocal1[MAXIMUM_NUM/2];;
            DataCopyPad(b1Gm[MAXIMUM_NUM/2*i], outLocal1, copyParams);
            DataCopyPad(b2Gm[MAXIMUM_NUM/2*i], outLocal2, copyParams);
            outQueue.FreeTensor(outLocal1);
        }
    }


    matmulObj1.SetTensorA(a1Gm);
    matmulObj1.SetTensorB(b1Gm);

    matmulObj2.SetTensorA(a2Gm);
    matmulObj2.SetTensorB(b2Gm);

    matmulObj3.SetTensorA(a1Gm);
    matmulObj3.SetTensorB(b2Gm);

    matmulObj4.SetTensorA(a2Gm);
    matmulObj4.SetTensorB(b1Gm);
    matmulObj1.template IterateAll<true>(c1Gm);
    matmulObj1.End();
    
    matmulObj2.template IterateAll<true>(c2Gm);
    matmulObj2.End();

    matmulObj3.template IterateAll<true>(c3Gm);
    matmulObj3.End();

    matmulObj4.template IterateAll<true>(c4Gm);
    matmulObj4.End();

    if constexpr (hasBias)
    {
        {
            LocalTensor<float> inLocal;
            LocalTensor<float> outLocal;
            DataCopyExtParams copyParams{1, static_cast<uint16_t>(MAXIMUM_NUM * sizeof(float)), 0, 0, 0}; // 结构体DataCopyExtParams最后一个参数是rsv保留位
            DataCopyPadExtParams<float> padParams{false, 0, 0, 0};

            int loop = (tiling.M * tiling.N*2) / MAXIMUM_NUM;
            int wei = (tiling.M * tiling.N*2) % MAXIMUM_NUM;
            if(wei != 0) {loop += 1;}
            else {wei = MAXIMUM_NUM;}
            int tileLength = MAXIMUM_NUM;
            for(int i=0;i<loop;i++)
            {
                if(i == (loop-1))
                {
                    tileLength = wei;
                }
                copyParams.blockLen = tileLength * sizeof(float);
                inLocal = inQueue.AllocTensor<float>();
                DataCopyPad(inLocal, biasGm[MAXIMUM_NUM*i], copyParams, padParams); 
                inQueue.EnQue(inLocal);

                inLocal = inQueue.DeQue<float>();
                outLocal = outQueue.AllocTensor<float>();
                Adds(outLocal, inLocal, (float)0, tileLength);
                inQueue.FreeTensor(inLocal);
                outQueue.EnQue<float>(outLocal);

                outLocal = outQueue.DeQue<float>();
                DataCopyPad(cGm[MAXIMUM_NUM*i], outLocal, copyParams);
                outQueue.FreeTensor(outLocal);
            }
            SetAtomicAdd<float>();
        }
    }
    {
        LocalTensor<float> inLocal1, inLocal2, inLocal3, inLocal4;
        LocalTensor<float> outLocal;
        DataCopyExtParams copyParams{1, static_cast<uint16_t>(MAXIMUM_NUM * sizeof(float)), 0, 0, 0}; // 结构体DataCopyExtParams最后一个参数是rsv保留位
        DataCopyPadExtParams<float> padParams{false, 0, 0, 0};

        int loop = (tiling.M * tiling.N) / (MAXIMUM_NUM/4);
        int wei = (tiling.M * tiling.N) % (MAXIMUM_NUM/4);
        if(wei != 0) {loop += 1;}
        else {wei = MAXIMUM_NUM/4;}
        int tileLength = MAXIMUM_NUM/4;
        for(int i=0;i<loop;i++)
        {
            if(i == (loop-1))
            {
                tileLength = wei;
            }
            copyParams.blockLen = tileLength * sizeof(float);
            inLocal1 = inQueue.AllocTensor<float>();
            inLocal2 = inLocal1[MAXIMUM_NUM/4];
            inLocal3 = inLocal1[MAXIMUM_NUM/4*2];
            inLocal4 = inLocal1[MAXIMUM_NUM/4*3];
            DataCopyPad(inLocal1, c1Gm[MAXIMUM_NUM/4*i], copyParams, padParams); 
            DataCopyPad(inLocal2, c2Gm[MAXIMUM_NUM/4*i], copyParams, padParams); 
            DataCopyPad(inLocal3, c3Gm[MAXIMUM_NUM/4*i], copyParams, padParams); 
            DataCopyPad(inLocal4, c4Gm[MAXIMUM_NUM/4*i], copyParams, padParams); 
            inQueue.EnQue(inLocal1);

            inLocal1 = inQueue.DeQue<float>();
            inLocal2 = inLocal1[MAXIMUM_NUM/4];
            inLocal3 = inLocal1[MAXIMUM_NUM/4*2];
            inLocal4 = inLocal1[MAXIMUM_NUM/4*3];
            outLocal = outQueue.AllocTensor<float>();
            Sub(inLocal1, inLocal1, inLocal2, tileLength);
            Add(inLocal2, inLocal3, inLocal4, tileLength);
            CreateVecIndex(inLocal3.ReinterpretCast<int32_t>(), 0, tileLength*2);
            Muls(inLocal3.ReinterpretCast<int32_t>(), inLocal3.ReinterpretCast<int32_t>(), 2, tileLength*2);
            
            
            uint64_t mask1[2] = { 0x5555555555555555, 0x5555555555555555};
            uint64_t mask2[2] = { 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA};
            Gather(outLocal, inLocal1, inLocal3.ReinterpretCast<uint32_t>(), (uint32_t)0, mask1,  (tileLength*2 +63)/64, 8);
            Adds(inLocal3.ReinterpretCast<int32_t>(), inLocal3.ReinterpretCast<int32_t>(), -2, tileLength*2);
            Gather(outLocal, inLocal2, inLocal3.ReinterpretCast<uint32_t>(), (uint32_t)0, mask2,  (tileLength*2 +63)/64, 8);
            inQueue.FreeTensor(inLocal1);
            outQueue.EnQue<float>(outLocal);

            copyParams.blockLen = tileLength*2 * sizeof(float);
            outLocal = outQueue.DeQue<float>();
            DataCopyPad(cGm[MAXIMUM_NUM/2*i], outLocal, copyParams);
            outQueue.FreeTensor(outLocal);
        }

        SetAtomicNone();
    }
}

template <typename aType, typename bType, typename cType, typename biasType>
__aicore__ inline void KernelMatMul<aType, bType, cType, biasType>::CalcOffset(int32_t blockIdx, const TCubeTiling &tiling,
                                             int32_t &offsetA, int32_t &offsetB, int32_t &offsetC, int32_t &offsetBias)
{
    auto mSingleBlocks = Ceiling(tiling.M, tiling.singleCoreM);
    auto mCoreIndx = blockIdx % mSingleBlocks;
    auto nCoreIndx = blockIdx / mSingleBlocks;

    offsetA = mCoreIndx * tiling.Ka * tiling.singleCoreM;
    offsetB = nCoreIndx * tiling.singleCoreN;
    offsetC = mCoreIndx * tiling.N * tiling.singleCoreM + nCoreIndx * tiling.singleCoreN;
    offsetBias = nCoreIndx * tiling.singleCoreN;

    tailM = tiling.M - mCoreIndx * tiling.singleCoreM;
    tailM = tailM < tiling.singleCoreM ? tailM : tiling.singleCoreM;
    tailN = tiling.N - nCoreIndx * tiling.singleCoreN;
    tailN = tailN < tiling.singleCoreN ? tailN : tiling.singleCoreN;
    if (tailM < tiling.singleCoreM || tailN < tiling.singleCoreN) {
        matmulObj1.SetTail(tailM, tailN);
        matmulObj2.SetTail(tailM, tailN);
        matmulObj3.SetTail(tailM, tailN);
        matmulObj4.SetTail(tailM, tailN);
    }
    // printf("SetTail:%d %d\r\n ",tailM,tailN );
}
extern "C" __global__ __aicore__ void mat_mul(GM_ADDR x, GM_ADDR y, GM_ADDR bias, GM_ADDR z, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    // TODO: user kernel impl
    KERNEL_TASK_TYPE_DEFAULT(KERNEL_TYPE_MIX_AIC_1_1);
    KernelMatMul<float, float, float, float> op;
    TPipe pipe;
    REGIST_MATMUL_OBJ(&pipe, GetSysWorkSpacePtr(), 
    op.matmulObj1, &tiling_data.cubeTilingData, op.matmulObj2, &tiling_data.cubeTilingData, op.matmulObj3, &tiling_data.cubeTilingData, op.matmulObj4, &tiling_data.cubeTilingData);
    // init matmul kernel
    op.Init(x, y, bias, z, workspace, tiling_data.cubeTilingData, &pipe);
    // matmul kernel process
    int32_t c_start, c_end;
    if(GetBlockIdx() < tiling_data.big_num)
    {
        c_start = GetBlockIdx()*(tiling_data.c_small_num+1);
        c_end = c_start + tiling_data.c_small_num + 1;
        
    }
    else
    {
        c_start = GetBlockIdx()*tiling_data.c_small_num+tiling_data.big_num;
        c_end = c_start + tiling_data.c_small_num;
    }

    if(tiling_data.isBias)
    {
        for(int i=c_start; i<c_end; i++)
        {
            op.Process<true>(i, tiling_data.c_dim, tiling_data.a_shape, tiling_data.b_shape, tiling_data.bias_shape, tiling_data.c_shape);
        }
    }
    else
    {
        for(int i=c_start; i<c_end; i++)
        {
            op.Process<false>(i, tiling_data.c_dim, tiling_data.a_shape, tiling_data.b_shape, tiling_data.bias_shape, tiling_data.c_shape);
        }
    }
}