#include "kernel_operator.h"
using namespace AscendC;

class KernelMatmul
{

private:
    int32_t m = 8192;  // uint16_t太小了，只有65536
    int32_t k = 68416; // 最大2048 刚好 16 × 2048 × 2 = 65536
    int32_t n = 256;

    int32_t block_sum;

    int32_t L1_m = 0;

    int32_t L1_n = 0; // B0空间大小[L2_k][L1_n]

    int32_t L1_k = 0; // A1空间大小[L1_m][L1_k], B1空间大小[L1_k][L1_n ](L1_k开不了4096)

    int32_t L2_k = 0; // A0空间大小[L1_m][L2_k],

    int32_t group_size = 0; // 一个分组里多少个aircore

    int32_t block_id;

    int32_t group_id, in_group_id, group_sum;

    int32_t aSize, bSize, cSize, mBlocks, nBlocks, kBlocks;
    TPipe pipe;

    TQue<QuePosition::A1, 1> inQueueA1;
    TQue<QuePosition::A2, 1> inQueueA2;
    TQue<QuePosition::B1, 1> inQueueB1;
    TQue<QuePosition::B2, 1> inQueueB2;
    // TQue<QuePosition::VECOUT, 1> csr_dense_VEC;
    // dst queue
    TQue<QuePosition::CO1, 1> outQueueCO1;
    TQue<QuePosition::CO2, 1> outQueueCO2;

    GlobalTensor<half> aGM, bGM;
    GlobalTensor<float> cGM;

public:
    __aicore__ inline KernelMatmul()
    {
    }
    __aicore__ inline void Init(GM_ADDR a, GM_ADDR b, GM_ADDR c, uint32_t m, uint32_t k, uint32_t n)
    {
        block_id = GetBlockIdx(); // 获取aicore号
        block_sum = GetBlockNum();

        this->m = m;
        this->k = k;
        this->n = n;
        // this->group_sum = block_sum / group_size;

        // this->group_id = block_id / group_size;
        // this->in_group_id = block_id % group_size;

        int max_limit;
        int n_divide_16 = n / 16;
        int A0_size = 32768; // A0空间大小为 65536字节，可存放half数据多少个
        int C0_size = 65536;

        if (n_divide_16 % 2 == 0) // 直接分两组
        {
            group_sum = 2;
            // group_sum = 1;
        }
        else
        {
            // group_sum = n_divide_16;
            group_sum = 1;
        }
        group_size = block_sum / group_sum; // 每组aicore的数目
        group_id = block_id / group_size;
        in_group_id = block_id % group_size;

        L1_n = n / group_sum;                                                                      // B做列切,(确保16的倍数)
        L1_m = (m / group_size) > (C0_size / L1_n) ? (C0_size / L1_n) : (m / group_size); // 先定L1_m，受到C0制约
        L1_m = L1_m / 16 * 16;                                                                  //(确保16的倍数)                                                                                        // 令L1_m为16的倍数

        max_limit = L1_n > L1_m ? L1_n : L1_m; // A0的L1_m和B0的L1_n都会对L2_k的选取产生影响
        L2_k = A0_size / max_limit;
        L2_k = L2_k / 16 * 16; //(确保16的倍数)
        L1_k = L2_k;            // A1和B1这样的绝不会溢出和犯错

        aGM.SetGlobalBuffer((__gm__ half *)a);
        bGM.SetGlobalBuffer((__gm__ half *)b);
        cGM.SetGlobalBuffer((__gm__ float *)c);

        // pipe.InitBuffer(csr_dense_VEC, 1, L1_m * L1_n * sizeof(half));

        pipe.InitBuffer(inQueueA1, 1, L1_m * L1_k * sizeof(half));
        pipe.InitBuffer(inQueueA2, 1, L1_m * L2_k * sizeof(half));

        pipe.InitBuffer(inQueueB1, 1, L1_k * L1_n * sizeof(half));
        pipe.InitBuffer(inQueueB2, 1, L2_k * L1_n * sizeof(half));

        pipe.InitBuffer(outQueueCO1, 1, L1_m * L1_n * sizeof(float));
        pipe.InitBuffer(outQueueCO2, 1, L1_m * L1_n * sizeof(float));
    }
    __aicore__ inline void Process()
    {
        if (group_id >= group_sum)//溢出的分组，不管了
            ;
        else
        {
            LocalTensor<half> a1Local;
            LocalTensor<half> b1Local;
            int A1_row, A1_col;                                                                                         // 每次从global memory拿出多少行和列
            for (int L1_m_id = in_group_id; L1_m_id < (m + L1_m - 1) / L1_m; L1_m_id += group_size) // 向上取整
            {
                if ((L1_m_id + 1) * L1_m <= m) // row = 有效行数
                    A1_row = L1_m;
                else
                    A1_row = m - L1_m_id * L1_m;

                for (int B_tile_id = group_id; B_tile_id < n / L1_n; B_tile_id += group_sum)
                {
                    bool init_zero = true;

                    for (int L1_k_id = 0; L1_k_id < (k + L1_k - 1) / L1_k; L1_k_id++)
                    {
                        if ((L1_k_id + 1) * L1_k <= k) // col = 有效行数
                            A1_col = L1_k;
                        else
                            A1_col = k - L1_k_id * L1_k;

                        A_CopyND2NZ(aGM, m, k, L1_m_id, L1_k, L1_k_id, A1_row, A1_col); // A1[A1_row][A1_col]
                        B_CopyND2NZ(bGM, k, n, B_tile_id, L1_k, L1_k_id, A1_col);           // B1[A1_col][L1_n]

                        int A2_col; // 从A1中拿出列
                        for (int L2_k_id = 0; L2_k_id < (A1_col + L2_k - 1) / L2_k; L2_k_id++)
                        {
                            if ((L2_k_id + 1) * L2_k <= A1_col) //  再次切分的有效列数
                                A2_col = L2_k;
                            else
                                A2_col = A1_col - L2_k_id * L2_k;

                            A_CopyNZ2ZZ(L2_k_id, A1_row, A2_col); // A2[A1_row][A2_col]
                            B_CopyNZ2ZN(L2_k_id, A1_col, A2_col); // B2[A2_col][L1_n]
                            Compute(init_zero, A1_row, A2_col);
                            init_zero = false;
                        }
                        a1Local = inQueueA1.DeQue<half>();
                        b1Local = inQueueB1.DeQue<half>();
                        inQueueB1.FreeTensor(a1Local);
                        inQueueB1.FreeTensor(b1Local);
                    }
                    Aggregate(A1_row);
                    CopyOut(L1_m_id, B_tile_id, A1_row, L1_n); // C2[A1_row][L1_n]
                }
            }
        }
    }

private:
    __aicore__ inline void A_CopyND2NZ(const GlobalTensor<half> &src, const uint32_t height,
                                       const uint32_t width, const uint16_t A_block_id, const uint16_t L1_k, const uint16_t L1_k_id,
                                       const uint16_t row, const uint16_t col)
    {

        LocalTensor<half> dst = inQueueA1.AllocTensor<half>();
        // LocalTensor<half> dst = csr_dense_VEC.AllocTensor<half>();
        uint64_t srcOffset;  // 取值范围的问题，选uint64_t
        uint64_t dstOffset;  // 取值范围的问题，选uint64_t
        if (col / 16 <= row) // 循环次数的比较
        {
            for (int i = 0; i < col / 16; ++i) // 每一次拿A1所需的一列,一列16个
            {
                srcOffset = A_block_id * L1_m * width + L1_k_id * L1_k + i * 16;
                dstOffset = i * 16 * row;
                DataCopy(dst[dstOffset], src[srcOffset], {row, 1, uint16_t((width - 16) / 16), 0});
            }
        }
        else
        {
            for (int i = 0; i < row; ++i) // 每一次拿A1所需的一行
            {
                srcOffset = A_block_id * L1_m * width + i * width + L1_k_id * L1_k;
                dstOffset = i * 16;
                DataCopy(dst[dstOffset], src[srcOffset], {uint16_t(col / 16), 1, 0, uint16_t((row * 16 - 16) / 16)});
            }
        }
        inQueueA1.EnQue(dst);
    }

    __aicore__ inline void B_CopyND2NZ(const GlobalTensor<half> &src, const uint32_t height,
                                       const uint32_t width, const uint16_t B_block_id, const uint16_t L1_k, const uint16_t L1_k_id, const uint16_t row)
    {
        // LocalTensor<half> dst = csr_dense_VEC.AllocTensor<half>();
        LocalTensor<half> dst = inQueueB1.AllocTensor<half>();

        uint64_t srcOffset; // 取值范围的问题，选uint64_t
        uint64_t dstOffset; // 取值范围的问题，选uint64_t

        if (L1_n / 16 <= row) // 循环次数的比较
        {
            for (int i = 0; i < L1_n / 16; ++i)
            {
                srcOffset = L1_k_id * L1_k * width + B_block_id * L1_n + i * 16; // 行、列和偏移
                dstOffset = i * 16 * row;
                DataCopy(dst[dstOffset], src[srcOffset], {row, 1, uint16_t((width - 16) / 16), 0});
            }
        }
        else
        {
            for (int i = 0; i < row; ++i) // 每一次拿A1所需的一行
            {
                srcOffset = L1_k_id * L1_k * width + i * width + B_block_id * L1_n; // 行、列和偏移
                dstOffset = i * 16;
                DataCopy(dst[dstOffset], src[srcOffset], {uint16_t(L1_n / 16), 1, 0, uint16_t((row * 16 - 16) / 16)});
            }
        }
        inQueueB1.EnQue(dst);
    }

    __aicore__ inline void A_CopyNZ2ZZ(const uint16_t L2_k_Id, const uint16_t A1_row, const uint16_t A2_col)
    {
        int srcOffset = L2_k_Id * A1_row * L2_k; // 前面L2_k_Id 个 A1_row * L2_k (最后一个窗口可能不位 L1_m * L2_k)
        int dstOffset = 0;
        LocalTensor<half> a1Local = inQueueA1.DeQue<half>();
        LocalTensor<half> a2Local = inQueueA2.AllocTensor<half>();

        // transform nz to zz
        for (int i = 0; i < A1_row / 16; ++i)
        {
            LoadData2dParams loadDataParams;
            loadDataParams.repeatTimes = A2_col / 16;
            loadDataParams.srcStride = A1_row / 16;
            loadDataParams.ifTranspose = false;

            LoadData(a2Local[dstOffset], a1Local[srcOffset], loadDataParams);

            srcOffset += 16 * 16;

            dstOffset += A2_col * 16;
        }

        inQueueA1.EnQue(a1Local);
        inQueueA2.EnQue<half>(a2Local);
        //  inQueueA2.FreeTensor(a2Local);
    }
    __aicore__ inline void B_CopyNZ2ZN(const uint16_t L2_k_Id, const uint16_t A1_col, const uint16_t A2_col)
    {
        LocalTensor<half> b1Local = inQueueB1.DeQue<half>();
        LocalTensor<half> b2Local = inQueueB2.AllocTensor<half>();

        int srcOffset = L2_k_Id * L2_k * 16; // 横切的每个窗口对应的首段位置
        int dstOffset = 0;
        for (int i = 0; i < A2_col / 16; ++i)
        {

            LoadData2dParams loadDataParams;

            loadDataParams.repeatTimes = L1_n / 16;
            loadDataParams.srcStride = A1_col / 16;
            loadDataParams.ifTranspose = true;

            LoadData(b2Local[dstOffset], b1Local[srcOffset], loadDataParams);

            srcOffset += 16 * 16;
            dstOffset += L1_n * 16;
        }

        inQueueB1.EnQue<half>(b1Local);
        inQueueB2.EnQue<half>(b2Local);

        // inQueueB2.FreeTensor(b2Local);
    }
    __aicore__ inline void Compute(const bool init_zero, const uint16_t A1_row, const uint16_t A2_col)
    {

        LocalTensor<half> a2Local = inQueueA2.DeQue<half>();
        LocalTensor<half> b2Local = inQueueB2.DeQue<half>();
        LocalTensor<float> c1Local;
        if (init_zero)
            c1Local = outQueueCO1.AllocTensor<float>(); // 第一次申请空间
        else
            c1Local = outQueueCO1.DeQue<float>();

        MmadParams mmadParams;
        mmadParams.m = A1_row;
        mmadParams.n = L1_n;
        mmadParams.k = A2_col;
        mmadParams.cmatrixInitVal = init_zero;
        mmadParams.cmatrixSource = false;
        mmadParams.isBias = false;
        Mmad(c1Local, a2Local, b2Local, mmadParams);

        outQueueCO1.EnQue<float>(c1Local);
        // outQueueCO1.FreeTensor(c1Local);

        inQueueA2.FreeTensor(a2Local);
        inQueueB2.FreeTensor(b2Local);
    }
    __aicore__ inline void Aggregate(const uint16_t A1_row)
    {
        LocalTensor<float> c1Local = outQueueCO1.DeQue<float>();
        LocalTensor<float> c2Local = outQueueCO2.AllocTensor<float>();

        DataCopyParams dataCopyParams; // c1->c2 1024B=16*16 4B

        dataCopyParams.blockCount = 1;

        dataCopyParams.blockLen = (A1_row / 16) * (L1_n / 16);

        DataCopyEnhancedParams enhancedParams;
        enhancedParams.blockMode = BlockMode::BLOCK_MODE_MATRIX;
        DataCopy(c2Local[0], c1Local, dataCopyParams, enhancedParams);

        outQueueCO1.FreeTensor(c1Local);
        outQueueCO2.EnQue<float>(c2Local);

        // outQueueCO2.FreeTensor(c2Local);
    }
    __aicore__ inline void CopyOut(const uint16_t block_H_id, const uint16_t block_W_id, const uint16_t A1_row, const uint16_t L1_n)
    {
        LocalTensor<float> c2Local = outQueueCO2.DeQue<float>();
        uint64_t cGM_id, srcOffset;

        if (L1_n / 16 <= A1_row) // 循环次数的比较
        {
            for (int i = 0, cGM_row = L1_m * block_H_id, cGM_col = block_W_id * L1_n;
                 i < L1_n / 16; ++i) // 每次拿出所需的一列
            {
                cGM_id = cGM_row * n + cGM_col;
                srcOffset = i * A1_row * 16;
                AscendC::DataCopy(cGM[cGM_id], c2Local[srcOffset], {A1_row, 2, 0, uint16_t((n - 16) / 8)});
                cGM_col += 16;
            }
        }
        else
        {
            for (int i = 0, cGM_row = L1_m * block_H_id, cGM_col = block_W_id * L1_n;
                 i < A1_row; ++i) // 每一次拿出所需的一行
            {
                cGM_id = cGM_row * n + cGM_col;
                srcOffset = i * 16;
                DataCopy(cGM[cGM_id], c2Local[srcOffset], {uint16_t(L1_n / 16), 2, uint16_t((A1_row * 16 - 16) / 8), 0});

                cGM_row += 1;
            }
        }
        outQueueCO2.FreeTensor(c2Local);
    }
};


extern "C" __global__ __aicore__ void mmad_custom(GM_ADDR x, GM_ADDR y, GM_ADDR z, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    // TODO: user kernel impl
    KernelMatmul op;
    op.Init(x, y, z, tiling_data.m, tiling_data.k, tiling_data.n);
    op.Process();
}