/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2022-2023. All rights reserved.
 *
 * Function : c = a * b (matrix multiplication)
 * This sample is a very basic sample that implements Matmul on Ascend plaform.
 * In this sample:
 * Shape of matrix a is [m, k]: [32, 32]
 * Shape of matrix b is [k, n]: [32, 32]
 * Shape of matrix c is [m, n]: [32, 32]
 */

#include "kernel_operator.h"
using namespace AscendC;

class KernelMatmul {

private:
   

    uint32_t m = 1023;//uint16_t太小了，只有65536
    uint32_t k = 1023;//最大2048 刚好 16 × 2048 × 2 = 65536
    uint32_t n = 1023;


    uint16_t block_sum;

    uint16_t window_H=16;

    uint16_t B_Split_W=16;

    uint16_t window_W=16;

    uint16_t split_K=16;//每一次从global memory运送 window_H * split_K的数据到L1_memory中

    uint16_t block_id;
  
    uint16_t aSize, bSize, cSize, mBlocks, nBlocks, kBlocks;
    TPipe pipe;

    TQue<QuePosition::A1, 1> inQueueA1;
    TQue<QuePosition::A2, 1> inQueueA2;
    TQue<QuePosition::B1, 1> inQueueB1;
    TQue<QuePosition::B2, 1> inQueueB2;
    // TQue<QuePosition::VECOUT, 1> csr_dense_VEC;
    // dst queue
    TQue<QuePosition::CO1, 1> outQueueCO1;
    TQue<QuePosition::CO2, 1> outQueueCO2;

    GlobalTensor<half> aGM, bGM;
    GlobalTensor<half> zero_GM;
    GlobalTensor<float> cGM;
public:
    __aicore__ inline KernelMatmul()
    {
     
    }
    __aicore__ inline void Init(GM_ADDR a, GM_ADDR b, GM_ADDR c, GM_ADDR zero)
    {
        this->block_id = GetBlockIdx();                                         // 获取aicore号
        this->block_sum = GetBlockNum();
        
        aGM.SetGlobalBuffer((__gm__ half*)a);
        bGM.SetGlobalBuffer((__gm__ half*)b);
        cGM.SetGlobalBuffer((__gm__ float*)c);
        zero_GM.SetGlobalBuffer((__gm__ half*)zero);

        // pipe.InitBuffer(csr_dense_VEC, 1, window_H * B_Split_W * sizeof(half));

        pipe.InitBuffer(inQueueA1, 1, window_H * split_K * sizeof(half));
        pipe.InitBuffer(inQueueA2, 1, window_H * window_W * sizeof(half));

        pipe.InitBuffer(inQueueB1, 1, split_K * B_Split_W * sizeof(half));
        pipe.InitBuffer(inQueueB2, 1, window_W * B_Split_W * sizeof(half));

        pipe.InitBuffer(outQueueCO1, 1, window_H  * B_Split_W * sizeof(float));
        pipe.InitBuffer(outQueueCO2, 1, window_H  * B_Split_W * sizeof(float));
    }
    __aicore__ inline void Process()
    {   
        LocalTensor<half> a1Local;
        LocalTensor<half> b1Local;
        for(int window_h_id=0 ;window_h_id < (m + window_H-1) / window_H; window_h_id++)// 
        {    
            for (int B_tile_id = block_id ; B_tile_id < (n + B_Split_W - 1 ) / B_Split_W; B_tile_id += block_sum)//以偏移分配
            {   
                bool init_zero=true;

                for(int split_k_id=0; split_k_id < ( k + split_K - 1 ) / split_K ; split_k_id++ )//不够整除就再取一个
                {
                    A_CopyND2NZ(aGM, m, k, window_h_id, split_K, split_k_id);
                    B_CopyND2NZ(bGM, k, n, B_tile_id, split_K, split_k_id);
                    for(int window_w_id = 0; window_w_id < split_K / window_W; window_w_id++)
                    {
                        A_CopyNZ2ZZ(window_w_id);
                        B_CopyNZ2ZN(window_w_id);
                        Compute(init_zero);
                        init_zero=false;
                    }
                    a1Local = inQueueA1.DeQue<half>();
                    b1Local = inQueueB1.DeQue<half>();
                    inQueueB1.FreeTensor(a1Local);
                    inQueueB1.FreeTensor(b1Local);    

                }
                Aggregate();
                CopyOut(n,window_h_id,B_tile_id);
            }
        }
       
    }

private:
    __aicore__ inline void A_CopyND2NZ( const GlobalTensor<half>& src, const uint32_t height,
        const uint32_t width ,const uint16_t A_block_id,const uint16_t split_K,const uint16_t split_k_id)
    {
       
        LocalTensor<half> dst = inQueueA1.AllocTensor<half>();
        // LocalTensor<half> dst = csr_dense_VEC.AllocTensor<half>();
        
        int srcOffset;
        int dstOffset;
        int row,col;
        if((A_block_id + 1 ) * window_H > height)//需要补的最后一行,直接从后往前回退就行
            row = (height - window_H) * width;
        else
            row = (A_block_id * window_H) * width;
        if((split_k_id + 1) * split_K > width)
            col = width - split_K;//往回退split_K
        else
            col = split_k_id * split_K; 

        for (int i = 0; i < split_K / 16; ++i) 
        {
            srcOffset = row + col + i * 16;
            dstOffset = i * 16 * window_H;
            // DataCopy(dst[dstOffset], src[srcOffset], { window_H, 1, uint16_t((width-16) / 16 ), 0 }); //width不可整除16不可用
            for (int j=0 ; j<window_H; j++)
            {
                DataCopy(dst[dstOffset], src[srcOffset], { 1, 1, 0, 0 });//一行一行的搬运，降为原来的window_H/1
                srcOffset+=width;
                dstOffset+=16;
            }
           
        }
         inQueueA1.EnQue(dst);
         
    }
    

     __aicore__ inline void B_CopyND2NZ(const GlobalTensor<half>& src, const uint32_t height,
        const uint32_t width, const uint16_t B_tile_id,const uint16_t split_K,const uint16_t split_k_id)
    {   
        // LocalTensor<half> dst = csr_dense_VEC.AllocTensor<half>();
        LocalTensor<half> dst = inQueueB1.AllocTensor<half>();
        

        int srcOffset, dstOffset, row, col, dst_offset, dst_height;
        half half_zero = 0.0;
        if((split_k_id + 1) * split_K > height)
        {   
            // InitConstValue(dst, {1, static_cast<uint16_t>( (split_K * window_H) / 16), 0, 0});//初始化0
            for (int i = 0; i < B_Split_W / 16; ++i) //只能用最笨的方法做零填充
             {       
                dstOffset = i * 16 * split_K;
                for (int j=0 ; j < split_K ; j++)
                {
                    DataCopy(dst[dstOffset], zero_GM[0], { 1, 1, 0, 0 });
                    dstOffset += 16;
                }
            }
            row = (height - height % split_K) * width;
            dst_height = height % split_K;
            dst_offset = ( split_K - dst_height ) * 16;
           
        }
          
        else
        {   
            row = split_k_id * split_K * width;
            dst_height = split_K;
            dst_offset = 0;  
        }
            
        
        if (( B_tile_id + 1 ) * B_Split_W > n)
            col = n - B_Split_W;//回退
        else
            col = B_tile_id * B_Split_W;
        
        for (int i = 0; i < B_Split_W / 16; ++i) 
        {    
            srcOffset = row + col + i * 16;//行、列和偏移
            dstOffset = dst_offset + i * 16 * split_K;
            // DataCopy(dst[dstOffset], src[srcOffset], { split_K, 1, uint16_t((width-16) / 16 ), 0 });//不可整除16不可用
            for (int j=0 ; j < dst_height ; j++)
            {
                DataCopy(dst[dstOffset], src[srcOffset], { 1, 1, 0, 0 });//一行一行的搬运，降为原来的split_K/1
                srcOffset+=width;
                dstOffset+=16;
            }
        }

        // const LocalTensor<half> dst = inQueueB1.AllocTensor<half>();
        // uint16_t repeatTimes=256;
        // uint16_t blockNum=0;
        // uint16_t dstGap=0;
        // half half_zero = 2.0;
        // InitConstValue(dst, {repeatTimes, blockNum, dstGap, half_zero});  
        // InitConstValueParams<half> initConstValueParams;
        // initConstValueParams.repeatTimes = repeatTimes;
        // initConstValueParams.blockNum = blockNum;
        // initConstValueParams.dstGap = dstGap;
        // initConstValueParams.initValue = half_zero;
        // InitConstValue(dst[0], initConstValueParams);

        inQueueB1.EnQue(dst);

    }
   
     __aicore__ inline void A_CopyNZ2ZZ(const uint16_t window_W_Id)
    {
        int srcOffset = window_W_Id * window_H * window_W;//前面window_W_Id 个 window_H * window_W
        int dstOffset = 0;
        LocalTensor<half> a1Local = inQueueA1.DeQue<half>();
        LocalTensor<half> a2Local = inQueueA2.AllocTensor<half>();

        // transform nz to zz
        for (int i = 0; i < window_H / 16; ++i) 
        {
            LoadData2dParams loadDataParams;
            loadDataParams.repeatTimes = window_W / 16;
            loadDataParams.srcStride =  window_H / 16;
            loadDataParams.ifTranspose = false;

            LoadData(a2Local[dstOffset], a1Local[srcOffset], loadDataParams);

            srcOffset += 16 * 16;

            dstOffset += window_W * 16;
        }

        inQueueA1.EnQue(a1Local);
        inQueueA2.EnQue<half>(a2Local);
        //  inQueueA2.FreeTensor(a2Local);
        
    }
    __aicore__ inline void B_CopyNZ2ZN(const uint16_t window_W_Id)
    {
        LocalTensor<half> b1Local=inQueueB1.DeQue<half>();
        LocalTensor<half> b2Local = inQueueB2.AllocTensor<half>();
       
        int srcOffset = window_W_Id * window_W * 16; //横切的每个窗口对应的首段位置
        int dstOffset = 0;
        for (int i = 0; i <  window_W / 16; ++i) {

            LoadData2dParams loadDataParams;
           
            loadDataParams.repeatTimes = B_Split_W / 16;
            loadDataParams.srcStride = k / 16;
            loadDataParams.ifTranspose = true;

            LoadData(b2Local[dstOffset], b1Local[srcOffset], loadDataParams);

            srcOffset += 16 * 16;
            dstOffset += B_Split_W * 16;
        }

        inQueueB1.EnQue<half>(b1Local);
        inQueueB2.EnQue<half>(b2Local);

         // inQueueB2.FreeTensor(b2Local);
    }
    __aicore__ inline void Compute(const bool init_zero)
    {
        
        LocalTensor<half> a2Local=inQueueA2.DeQue<half>();
        LocalTensor<half> b2Local=inQueueB2.DeQue<half>();
        LocalTensor<float> c1Local;
        if (init_zero)
            c1Local = outQueueCO1.AllocTensor<float>();//第一次申请空间
        else
            c1Local = outQueueCO1.DeQue<float>();
        
        MmadParams mmadParams;
        mmadParams.m = window_H;
        mmadParams.n = B_Split_W;
        mmadParams.k = window_W;
        mmadParams.cmatrixInitVal=init_zero;
        mmadParams.cmatrixSource=false;
        mmadParams.isBias=false;
        Mmad(c1Local, a2Local, b2Local,mmadParams);

      
        outQueueCO1.EnQue<float>(c1Local);
        // outQueueCO1.FreeTensor(c1Local);

        inQueueA2.FreeTensor(a2Local);
        inQueueB2.FreeTensor(b2Local);
    }
    __aicore__ inline void Aggregate()
    {
        LocalTensor<float> c1Local = outQueueCO1.DeQue<float>();
        LocalTensor<float> c2Local = outQueueCO2.AllocTensor<float>();

        DataCopyParams dataCopyParams;//c1->c2 1024B=16*16 4B

        dataCopyParams.blockCount = 1;

        dataCopyParams.blockLen = (window_H / 16) * (B_Split_W / 16);
        
      
        DataCopyEnhancedParams enhancedParams;
        enhancedParams.blockMode = BlockMode::BLOCK_MODE_MATRIX;
        DataCopy(c2Local[0], c1Local, dataCopyParams, enhancedParams); 
      
        outQueueCO1.FreeTensor(c1Local);
        outQueueCO2.EnQue<float>(c2Local);

        // outQueueCO2.FreeTensor(c2Local);
    }
    __aicore__ inline void CopyOut(const uint32_t width, const uint16_t A_block_id, const uint16_t B_tile_id)
    {
        LocalTensor<float> c2Local = outQueueCO2.DeQue<float>();
        int cGM_id,row,col;
        

        if( (A_block_id + 1 ) * window_H > m )//需要补的最后一行,直接从后往前回退就行
            row = m - window_H;
        else
            row = A_block_id * window_H;
        
        if (( B_tile_id + 1 ) * B_Split_W > n)
            col = n - B_Split_W;//回退
        else
            col = B_tile_id * B_Split_W;
        for(int cGM_row = row, c2_row=0; c2_row < window_H; cGM_row++,c2_row++)
        {    
            cGM_id= cGM_row * width + col;
            DataCopy(cGM[cGM_id],c2Local[c2_row * 16],{ uint16_t( B_Split_W / 16) ,2,uint16_t((window_H * 16 -16)/8),0});
        }

        outQueueCO2.FreeTensor(c2Local);
    }


};

extern "C" __global__ __aicore__ void matmul_custom(GM_ADDR a, GM_ADDR b, GM_ADDR c, GM_ADDR d)
{
    KernelMatmul op;
    op.Init(a, b, c, d);
    op.Process();
}

#ifndef __CCE_KT_TEST__
// call of kernel function
void matmul_custom_do(uint32_t blockDim, void* l2ctrl, void* stream, uint8_t* a, uint8_t* b, uint8_t* c, uint8_t * d)
{
    matmul_custom<<<blockDim, l2ctrl, stream>>>(a, b, c, d);
}
#endif
