#include "kernel_operator.h"
#include "lib/matmul_intf.h"
#include "matmulsub_kernel.h"

using namespace matmul;
constexpr int32_t JIASU_NUM = 52;//104; //208
class KernelMatMulSub_fast {
public:
    __aicore__ inline KernelMatMulSub_fast(){};
    __aicore__ inline void Init(GM_ADDR x1, GM_ADDR x2, GM_ADDR x3, GM_ADDR y, GM_ADDR workspace,
                                                        // const TCubeTiling &tiling,
                                                        AscendC::TPipe *pipe)
    {
        // this->tiling = tiling;
        int offsetA;
        int offsetB;
        int offsetC;
        // int offsetBias = 0;
        CalcOffset(GetBlockIdx(), tiling, offsetA, offsetB, offsetC);//, offsetBias); // Calculate the gm offset based on the blockidx.

        // x1Gm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_X1 *>(x1+offsetA*4), tiling.M * tiling.Ka);
        // x2Gm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_X2 *>(x2+offsetB*4), tiling.Kb * tiling.N);
        // x3Gm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_X3 *>(x3+offsetC*4));
        // yGm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_Y *>(y+offsetC*4), tiling.M * tiling.N);
        x1Gm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_X1 *>(x1+offsetA*4));
        x2Gm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_X2 *>(x2+offsetB*4));
        x3Gm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_X3 *>(x3+offsetC*4));
        yGm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_Y *>(y+offsetC*4));
        
        // if ASCEND_IS_AIV
        // {  
        //     uint32_t numm = (tiling.M * tiling.N) / (GetBlockNum());
        //     // x3Gm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_X3 *>(x3) + numm * GetBlockIdx());
        //     y1Gm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_Y *>(y) + numm * GetBlockIdx());
        //     if(GetBlockIdx() == GetBlockNum()-1)
        //     {
        //         // printf("%d %d\r\n",numm, tiling.M * tiling.N- numm*GetBlockIdx());
        //         InitGlobalMemory(y1Gm, (tiling.M * tiling.N- numm*GetBlockIdx()), (DTYPE_Y)(0));
        //     }
        //     else
        //     {
        //         InitGlobalMemory(y1Gm, numm, (DTYPE_Y)(0));
        //     }
            
        //     // SyncAll();
        //     int32_t eventIDMTE3_MTE2 = static_cast<int32_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2));
        //     SetFlag<HardEvent::MTE3_MTE2>(eventIDMTE3_MTE2);
        //     WaitFlag<HardEvent::MTE3_MTE2>(eventIDMTE3_MTE2);
        // }

        // y1Gm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_Y *>(y));


        if ASCEND_IS_AIV
        {
        //196352
        pipe->InitBuffer(inQueueX3, 1, 11776 * sizeof(DTYPE_X3));
        pipe->InitBuffer(outQueueY, 1, 11776 * sizeof(DTYPE_Y)); // Init output buffer.
        }
        

        
        
        // yGm1 = yGm;
        // printf("offset:%d %d %d\r\n", offsetA, offsetB, offsetC);
        // printf("k:%d %d %d %d\r\n", tiling.M, tiling.N, tiling.Ka, tiling.Kb);
        // printf("base:%d %d %d\r\n", tiling.baseM, tiling.baseN, tiling.baseK);
        // printf("singleCore:%d %d %d\r\n", tiling.singleCoreM, tiling.singleCoreN, tiling.singleCoreK);
        // printf("DEFAULT_C0_SIZE:%d %d %d %d\r\n", DEFAULT_C0_SIZE,tiling.dbL0A,tiling.dbL0B,tiling.dbL0C);

        // x1Gm = x1Gm[offsetA];
        // x2Gm = x2Gm[offsetB];
        // yGm = yGm[offsetC];
        // x3Gm = x3Gm[offsetC];

        // GetSysWorkSpacePtr();
        // if (GetSysWorkSpacePtr() == nullptr) {
        //     // printf("error:GetSysWorkSpacePtr() == nullptr\r\n");
        //     return;
        // }
        
    }
    __aicore__ inline void Process(AscendC::TPipe *pipe)
    {
        // uint32_t computeRound = 0;
        matmulObj.ClearBias(); 
        // for(int i =0; i<8; i++ )
        // {
        //     CalcOffset(GetBlockIdx()+i*20, tiling, offsetA, offsetB, offsetC);
            matmulObj.SetTensorA(x1Gm);
            matmulObj.SetTensorB(x2Gm);
            // matmulObj.SetBias(biasGlobal);
            
            matmulObj.template IterateAll<false>(yGm,1,false,true);
        // }
        // while (matmulObj.Iterate<false>(yGm,1,false,false)) {
            // MatmulCompute();
            // LeakyReluCompute();
            // CopyOut(computeRound);
            // computeRound++;
        // }

        // matmulObj.template Iterate<false>();
        // SetAtomicAdd<DTYPE_Y>();
        // for (int i = 0; i < tiling.singleCoreM/tiling.baseM*tiling.singleCoreN/tiling.baseN; ++i) {   
        //     matmulObj.template GetTensorC<true>(yGm,0,false); 
        //     X3CopyOut(i); 
        // }

        // {
        // SetAtomicAdd<DTYPE_Y>();
        // int i = 0;
        // int loop = tiling.M*tiling.N/11776;
        // int end_of = ((tiling.M*tiling.N%11776) + 7)/8*8;
        // int loop_one = loop / (GetBlockNum());
        // int start = GetBlockIdx()*loop_one;
        // int end = start + loop_one;
        // if(GetBlockIdx() == ((GetBlockNum())-1))
        // {
        //     end = loop;
        // }
        // for(i=start; i<end; i++)
        // {
        //     LocalTensor<DTYPE_X3> x3Local = inQueueX3.AllocTensor<DTYPE_X3>();
        //     DataCopy(x3Local, x3Gm[11776*i], 11776);
        //     inQueueX3.EnQue(x3Local);

        //     x3Local = inQueueX3.DeQue<DTYPE_X3>();
        //     LocalTensor<DTYPE_Y> yLocal = outQueueY.AllocTensor<DTYPE_Y>();
        //     Muls(yLocal, x3Local, (DTYPE_X3)-1, 11776);
        //     inQueueX3.FreeTensor(x3Local);
        //     outQueueY.EnQue(yLocal);

        //     yLocal = outQueueY.DeQue<DTYPE_Y>();
        //     DataCopy(y1Gm[11776*i], yLocal, 11776);
        //     outQueueY.FreeTensor(yLocal);
        // }
        // if(GetBlockIdx() == ((GetBlockNum())-1))
        // {
        //     LocalTensor<DTYPE_X3> x3Local = inQueueX3.AllocTensor<DTYPE_X3>();
        //     DataCopy(x3Local, x3Gm[11776*loop], end_of);
        //     inQueueX3.EnQue(x3Local);

        //     x3Local = inQueueX3.DeQue<DTYPE_X3>();
        //     LocalTensor<DTYPE_Y> yLocal = outQueueY.AllocTensor<DTYPE_Y>();
        //     Muls(yLocal, x3Local, (DTYPE_X3)-1, end_of);
        //     inQueueX3.FreeTensor(x3Local);
        //     outQueueY.EnQue(yLocal);

        //     yLocal = outQueueY.DeQue<DTYPE_Y>();
        //     DataCopy(y1Gm[11776*loop], yLocal, end_of);
        //     outQueueY.FreeTensor(yLocal);
        // }
        // }

        // int S = 0;
        // // X3CopyOut(0); 
        // matmulObj.template Iterate<false>(false);
        // for (int i = 0; i < tiling.singleCoreM/tiling.baseM*tiling.singleCoreN/tiling.baseN-1; ++i) 
        // {
        //     matmulObj.template GetTensorC(yGm,1,false);
        //     // X3CopyOut(i+1); 
        // }
        // matmulObj.template GetTensorC(yGm,1,false);

        // while (matmulObj.template Iterate<false>(false)) {
        //     // MatmulCompute();
        //     // LeakyReluCompute();
        //     // CopyOut(computeRound);
        //     // computeRound++;
            
        //     matmulObj.GetTensorC(yGm,1,false);
        // }
        matmulObj.End();
        // if ASCEND_IS_AIV
        // {
        for (int i = 0; i < tiling.singleCoreM/tiling.baseM*tiling.singleCoreN/tiling.baseN; ++i) {   
        //     matmulObj.template GetTensorC<true>(yGm,0,false); 
            // SetAtomicAdd<DTYPE_Y>();
            X3CopyOut(i); 
        }
        // }
        
        
    }
    __aicore__ inline void X3CopyOut(uint32_t count)
    {
        const uint32_t roundM = tiling.singleCoreM / tiling.baseM;
        const uint32_t roundN = tiling.singleCoreN / tiling.baseN;
        uint32_t startOffset = (count % roundM * tiling.baseM * tiling.N + count / roundM * tiling.baseN);
        DataCopyParams copyParamIn = {(uint16_t)(tiling.baseM/JIASU_NUM), (uint16_t)(tiling.baseN * sizeof(DTYPE_Y) / 32), (uint16_t)((tiling.N - tiling.baseN) * sizeof(DTYPE_Y) / 32), 0 };
        DataCopyParams copyParamOut = {(uint16_t)(tiling.baseM/JIASU_NUM), (uint16_t)(tiling.baseN * sizeof(DTYPE_Y) / 32), 0, (uint16_t)((tiling.N - tiling.baseN) * sizeof(DTYPE_Y) / 32) };
        // DataCopyExtParams copyParamIn = {(uint16_t)(tiling.baseM/4), (uint16_t)(tiling.baseN * sizeof(DTYPE_Y)), (uint16_t)((tiling.N - tiling.baseN) * sizeof(DTYPE_Y)), 0 ,0};
        // DataCopyExtParams copyParamOut = {(uint16_t)(tiling.baseM/4), (uint16_t)(tiling.baseN * sizeof(DTYPE_Y)), 0, (uint16_t)((tiling.N - tiling.baseN) * sizeof(DTYPE_Y)) ,0};
        // DataCopyPadExtParams<DTYPE_X3> padParams {false, 0, 0, 0};

        LocalTensor<DTYPE_X3> x3Local;
        LocalTensor<DTYPE_Y> yLocal;

        for(int i=0; i<JIASU_NUM; i++)
        // for(int i=0; i<8; i++)
        {
            x3Local = inQueueX3.AllocTensor<DTYPE_X3>();

            // DataCopyExtParams copyParams {1, static_cast<uint32_t>(one_length * sizeof(float)), 0, 0, 0}; // 结构体DataCopyExtParams最后一个参数是rsv保留位
            // DataCopyPadExtParams<float> padParams {false, 0, 1, 0};
            // DataCopyPad(x3Local, x3Gm[startOffset + i*tiling.N*tiling.baseM/4], copyParamIn, padParams);

            DataCopy(x3Local, x3Gm[startOffset + i*tiling.N*tiling.baseM/JIASU_NUM], copyParamIn);
            inQueueX3.EnQue(x3Local);
            // printf("%d %d %d %d %d %d %d %d %d\r\n",tiling.M,tiling.N,tiling.baseM,tiling.baseN,tiling.singleCoreM,tiling.singleCoreN,count,roundM,roundN);
            yLocal = outQueueY.AllocTensor<DTYPE_Y>();
            x3Local = inQueueX3.DeQue<DTYPE_X3>();
            Muls(yLocal, x3Local, (DTYPE_X3)-1, tiling.baseN*tiling.baseM/JIASU_NUM);
            inQueueX3.FreeTensor(x3Local);
            outQueueY.EnQue(yLocal);

            yLocal = outQueueY.DeQue<DTYPE_Y>();
            // DataCopyPad(yGm[startOffset + i*tiling.N*tiling.baseM/4], yLocal, copyParamOut);
            DataCopy(yGm[startOffset + i*tiling.N*tiling.baseM/JIASU_NUM], yLocal, copyParamOut);
            outQueueY.FreeTensor(yLocal);
        }


        // reluOutQueue_.DeQue<DTYPE_Y>();
        // const uint32_t roundM = tiling.singleCoreM / tiling.baseM;
        // const uint32_t roundN = tiling.singleCoreN / tiling.baseN;
        // uint32_t startOffset = (count % roundM * tiling.baseM * tiling.N + count / roundM * tiling.baseN);
        // DataCopyParams copyParam = {(uint16_t)tiling.baseM, (uint16_t)(tiling.baseN * sizeof(DTYPE_Y) / DEFAULT_C0_SIZE), 0,
        //                             (uint16_t)((tiling.N - tiling.baseN) * sizeof(DTYPE_Y) / DEFAULT_C0_SIZE)};
        // DataCopy(yGm[startOffset], reluOutLocal, copyParam);
        // reluOutQueue_.FreeTensor(reluOutLocal);
    }
    __aicore__ inline void CalcOffset(int32_t blockIdx, const TCubeTiling &tiling,
                                                                int32_t &offsetA, int32_t &offsetB, int32_t &offsetC)
    {
        auto mSingleBlocks = Ceiling(tiling.M, tiling.singleCoreM);
        auto mCoreIndx = blockIdx % mSingleBlocks;
        auto nCoreIndx = blockIdx / mSingleBlocks;
        // auto mCoreIndx = blockIdx / 2;
        // auto nCoreIndx = blockIdx % 2;

        offsetA = mCoreIndx * tiling.Ka * tiling.singleCoreM;
        offsetB = nCoreIndx * tiling.singleCoreN;
        offsetC = mCoreIndx * tiling.N * tiling.singleCoreM + nCoreIndx * tiling.singleCoreN;

        // printf("CalcOffset:%d %d %d\r\n ",mSingleBlocks,mCoreIndx,nCoreIndx );

        // tailM = tiling.M - mCoreIndx * tiling.singleCoreM;
        // tailM = tailM < tiling.singleCoreM ? tailM : tiling.singleCoreM;
        // tailN = tiling.N - nCoreIndx * tiling.singleCoreN;
        // tailN = tailN < tiling.singleCoreN ? tailN : tiling.singleCoreN;
        // if (tailM < tiling.singleCoreM || tailN < tiling.singleCoreN) {
        //     matmulObj.SetTail(tailM, tailN);
        // }
        // printf("SetTail:%d %d\r\n ",tailM,tailN );
    }
    // typedef MatmulType<TPosition::GM, CubeFormat::ND, DTYPE_X1> aType;
    // typedef MatmulType<TPosition::GM, CubeFormat::ND, DTYPE_X1> bType;
    // typedef MatmulType<TPosition::GM, CubeFormat::ND, DTYPE_X1> cType;
    // typedef matmul::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, half> aType; 
    // typedef matmul::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, half> bType; 
    // typedef matmul::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, float> cType; 
    // typedef matmul::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, float> biasType; 
    // constexpr MatmulConfig CFG_MDL = GetMDLConfig(false, false, 1);//, false, false, false, true, true, true, false);
    // Matmul<MatmulType<TPosition::GM, CubeFormat::ND, DTYPE_X1>, 
    //        MatmulType<TPosition::GM, CubeFormat::ND, DTYPE_X2>,
    //        MatmulType<TPosition::GM, CubeFormat::ND, DTYPE_Y>, 
    //        MatmulType<TPosition::GM, CubeFormat::ND, DTYPE_Y>,
    //        CFG_MDL>
    //     matmulObj;
    // typedef matmul::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, DTYPE_X1> aType; 
    // typedef matmul::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, DTYPE_X2> bType; 
    // typedef matmul::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, DTYPE_Y> cType; 
    // typedef matmul::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, DTYPE_Y> biasType;

    static constexpr MatmulConfig MM_CFG = GetMDLConfig(false, false, 1, false, false, false, true);
    // static constexpr MatmulConfig MM_CFG = GetNormalConfig(false, false, false, BatchMode::SINGLE_LARGE_THAN_L1, false, IterateOrder::ORDER_M, ScheduleType::INNER_PRODUCT, true);

    // static constexpr MatmulConfig MM_CFG = GetSpecialMDLConfig();
    // static constexpr auto staticTiling = GetMatmulApiTiling<aType, bType, cType, biasType>(MM_CFG, 524288);

    Matmul<MatmulType<TPosition::GM, CubeFormat::ND, DTYPE_X1, false, LayoutMode::NONE, true>, 
           MatmulType<TPosition::GM, CubeFormat::ND, DTYPE_X2, false, LayoutMode::NONE, true>,
           MatmulType<TPosition::GM, CubeFormat::ND, DTYPE_Y>, 
           MatmulType<TPosition::GM, CubeFormat::ND, DTYPE_Y>,
            MM_CFG>
        matmulObj;

// constexpr static MatmulShapeParams shapeParams = {208, 1024, 2048, 208, 128, 32};
// constexpr static MatmulFuncParams funcParams = {false, false, (uint32_t)1, 
// false,false, //false, 
// IterateOrder::ORDER_N, ScheduleType::INNER_PRODUCT, true};
// constexpr static MatmulConfig MM_CFG = GetMMConfig<MatmulConfigMode::CONFIG_MDL>(shapeParams,funcParams);

// constexpr MatmulApiStaticTiling static MM_CFG_CONSTANT = GetMatmulApiTiling<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE>(mmConfig);

// Matmul<MatmulType<TPosition::GM, CubeFormat::ND, DTYPE_X1>, 
//            MatmulType<TPosition::GM, CubeFormat::ND, DTYPE_X2>,
//            MatmulType<TPosition::GM, CubeFormat::ND, DTYPE_Y>, 
//            MatmulType<TPosition::GM, CubeFormat::ND, DTYPE_Y>,
//             MM_CFG>
//         matmulObj;

// MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG_CONSTANT> mm;
    
    GlobalTensor<DTYPE_X1> x1Gm;
    GlobalTensor<DTYPE_X2> x2Gm;
    GlobalTensor<DTYPE_X3> x3Gm;
    GlobalTensor<DTYPE_Y> yGm;
    GlobalTensor<DTYPE_Y> y1Gm;

    // TCubeTiling tiling;
    static constexpr TCubeTiling tiling ={
        .usedCoreNum = 20,
        .M = 2048,
        .N = 2048,
        .Ka = 2048,
        .Kb = 2048,
        .singleCoreM = 208,
        .singleCoreN = 1024, 
        .singleCoreK = 2048,
        .baseM = 208, 
        .baseN = 128, 
        .baseK = 32,
        .depthA1 = 8,
        .depthB1 = 2,
        .stepM = 1,
        .stepN = 1,
        .stepKa = 4,
        .stepKb = 1,
        .isBias = 0,
        .transLength = 0,
        .iterateOrder = 0, //1
        .dbL0A = 2, 
        .dbL0B = 2,
        .dbL0C = 1,
        .shareMode = 0,
        .shareL1Size = 245760,
        .shareL0CSize = 106496,
        .shareUbSize = 0,
        .batchM = 1,
        .batchN = 1,
        .singleBatchM = 1,
        .singleBatchN = 1,
    };

    TQue<QuePosition::VECIN, 1> inQueueX3;
    TQue<QuePosition::VECOUT, 1> outQueueY;

    int8_t broadcast;
    int tailM;
    int tailN;

    
};


extern "C" __global__ __aicore__ void mat_mul_sub(GM_ADDR x1, GM_ADDR x2, GM_ADDR x3, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
    
    // TODO: user kernel impl
    
    if(TILING_KEY_IS(1))
    {
        // GET_TILING_DATA(tiling_data, tiling);

        KERNEL_TASK_TYPE_DEFAULT(KERNEL_TYPE_MIX_AIC_1_1);
        KernelMatMulSub_fast op;
        TPipe pipe;
        REGIST_MATMUL_OBJ(&pipe, GetSysWorkSpacePtr(), op.matmulObj, &op.tiling);
        // REGIST_MATMUL_OBJ(&pipe, GetSysWorkSpacePtr(), op.matmulObj, &tiling_data.cubeTilingData); // Initialize the matmul object.
        // REGIST_MATMUL_OBJ(&pipe, GetSysWorkSpacePtr(), op.matmulObj, &tiling_data.cubeTilingData); // Initialize the matmul object.
        // printf("%d-%d %d %d %d-%d %d %d-%d %d %d-%d %d-%d %d %d %d-%d-%d-%d-%d %d %d-%d %d %d %d-%d %d-%d %d-\r\n",tiling_data.cubeTilingData.usedCoreNum, 
        // tiling_data.cubeTilingData.M, tiling_data.cubeTilingData.N, tiling_data.cubeTilingData.Ka, tiling_data.cubeTilingData.Kb, 
        // tiling_data.cubeTilingData.singleCoreM, tiling_data.cubeTilingData.singleCoreN, tiling_data.cubeTilingData.singleCoreK,
        // tiling_data.cubeTilingData.baseM, tiling_data.cubeTilingData.baseN, tiling_data.cubeTilingData.baseK,
        // tiling_data.cubeTilingData.depthA1, tiling_data.cubeTilingData.depthB1,
        // tiling_data.cubeTilingData.stepM, tiling_data.cubeTilingData.stepN, tiling_data.cubeTilingData.stepKa, tiling_data.cubeTilingData.stepKb, 
        // tiling_data.cubeTilingData.isBias, 
        // tiling_data.cubeTilingData.transLength, 
        // tiling_data.cubeTilingData.iterateOrder, 
        // tiling_data.cubeTilingData.dbL0A, tiling_data.cubeTilingData.dbL0B, tiling_data.cubeTilingData.dbL0C, 
        // tiling_data.cubeTilingData.shareMode, tiling_data.cubeTilingData.shareL1Size, tiling_data.cubeTilingData.shareL0CSize, tiling_data.cubeTilingData.shareUbSize, 
        // tiling_data.cubeTilingData.batchM, tiling_data.cubeTilingData.batchN, 
        // tiling_data.cubeTilingData.singleBatchM, tiling_data.cubeTilingData.singleBatchN 
        // );
        // op.Init(x1, x2, x3, y, workspace, tiling_data.cubeTilingData, &pipe);
        op.Init(x1, x2, x3, y, workspace,  &pipe);
        // KERNEL_TASK_TYPE_DEFAULT(KERNEL_TYPE_MIX_AIC_1_2);
        op.Process(&pipe);
    }
    else if(TILING_KEY_IS(2))//通用
    {
        GET_TILING_DATA(tiling_data, tiling);
        KernelMatMulSub<DTYPE_X1, DTYPE_X2, DTYPE_Y> op;
        TPipe pipe;
        REGIST_MATMUL_OBJ(&pipe, GetSysWorkSpacePtr(), op.matmulObj, &tiling_data.cubeTilingData); // Initialize the matmul object.
        op.Init(x1, x2, x3, y,
        workspace, tiling_data.cubeTilingData, tiling_data.broadcast, &pipe);
        // KERNEL_TASK_TYPE_DEFAULT(KERNEL_TYPE_MIX_AIC_1_2);
        op.Process(&pipe);
    }
    
    
}