/**
 * Copyright (c) Huawei Technologies Co., Ltd. 2024. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef LCCL_GATHER_ALL_H
#define LCCL_GATHER_ALL_H

#include "collectives.h"
#include "ipc_queue.h"

using namespace AscendC;

template <typename T>
class GatherAll : public Collectives {
    constexpr static int INVALID_RANK_NUM = 0xFFFFFFFF;  // 非法rank
    constexpr static int64_t SHARE_QUE_DEPTH = 16;       // 单个共享队列深度
    constexpr static int64_t MULTI_RANK_SIZE = 32;

    constexpr static int64_t IDLER_CORE = 0;  // 闲置的核
    constexpr static int64_t PRODUCER_CORE = 1;  // 生产组，负责向共享内存写入数据，input->share，或者share->share
    constexpr static int64_t CONSUMER_CORE = 2;  // 消费组，负责从共享内存读出数据，share->output

public:
    __aicore__ inline GatherAll(int rank, int rankSize, uint32_t extraFlag)
        : Collectives(rank, rankSize, extraFlag)
    {
    }

    __aicore__ inline void Init(GM_ADDR emb_table, GM_ADDR lookup, GM_ADDR send_count_matrix, GM_ADDR shape_vec,
                                GM_ADDR peer_mem, GM_ADDR output, int64_t rank, int64_t rankSize, int64_t magic,
                                int64_t dim)
    {
        this->root = 0;
        this->len = 0;
        this->magic = magic;
        this->rank = rank;
        this->rankSize = rankSize;
        this->dim = dim;
        this->emb_table = emb_table;
        this->lookup = lookup;
        this->output = output;
        this->coreNumsPerStage = 16; // 16 core for each stage due to hardware has max 32 core

        blockIdx = GetBlockIdx();
        blockNum = GetBlockNum();

        GlobalTensor<int64_t> peerMemsAddrGm;
        int64_t peer_mem_addr = reinterpret_cast<int64_t>(peer_mem);
        peerMemsAddrGm.SetGlobalBuffer((__gm__ int64_t*)peer_mem_addr, rankSize * sizeof(int64_t));
        for (int i = 0; i < rankSize; ++i) {
            shareAddrs[i] = (GM_ADDR)(peerMemsAddrGm.GetValue(i)) +
                            (this->magic % PING_PONG_SIZE) * (IPC_BUFF_MAX_SIZE + IPC_DATA_OFFSET);
        }
        this->gather_data = (GM_ADDR)(peerMemsAddrGm.GetValue(rank)) +
                            ((this->magic + 1) % PING_PONG_SIZE) * (IPC_BUFF_MAX_SIZE + IPC_DATA_OFFSET) +
                            IPC_DATA_OFFSET;

        blockSize = UB_SINGLE_DMA_SIZE_MAX / PING_PONG_SIZE / (dim * sizeof(T));
        blockSize = (blockSize / 64) * 64 ;  // 64 Byte对齐

        sync.Init(rank, rankSize, shareAddrs, blockIdx, blockNum);

        sendCountMatrixGm.SetGlobalBuffer((__gm__ int64_t*)send_count_matrix, rankSize * rankSize * sizeof(int64_t));

        // 初始化输入输出
        for (int j = 0; j < rankSize; j++) {
            sendLen += sendCountMatrixGm.GetValue(rank * rankSize + j);
        }

        for (int j = 0; j < rankSize; j++) {
            revLen += sendCountMatrixGm.GetValue(j * rankSize + rank);
        }
        outputGt.SetGlobalBuffer((__gm__ T*)output, revLen* sizeof (T));
        pipe_barrier(PIPE_ALL);
        inputGt.SetGlobalBuffer((__gm__ T*)gather_data, sendLen* sizeof (T));
        // 初始化共享内存信息
        InitShare();
        // 初始化核分组
        InitCoreGroup();
        // 初始化数据切片
        InitDataSlice();
    }

    __aicore__ inline void Gather()
    {
        int totalNum = sendLen / dim;
        int gatherNumPerCore = totalNum / blockNum;
        int gatherNum = gatherNumPerCore;
        if (blockIdx == blockNum - 1) {
            gatherNum = totalNum - gatherNumPerCore * (blockNum -1);
        }
        __ubuf__ T* inputUBList[2] = {(__ubuf__ T*)get_imm(0), (__ubuf__ T*)get_imm(95*1024)};
        set_flag(PIPE_MTE3, PIPE_MTE2, EVENT_ID0);  // MTE2等MTE3
        set_flag(PIPE_MTE3, PIPE_MTE2, EVENT_ID1);  // MTE2等MTE3
        int copiedNum = 0;
        int copyId = 0;
        while (gatherNum > 0) {
            __ubuf__ T* inputUB = (copyId % PING_PONG_SIZE) ? inputUBList[0] : inputUBList[1];
            event_t event_id = (copyId % PING_PONG_SIZE) ? EVENT_ID0: EVENT_ID1;
            wait_flag(PIPE_MTE3, PIPE_MTE2, event_id);
            int toCopy = gatherNum < blockSize ? gatherNum : blockSize;
            for (int j = 0; j < toCopy; j++) {
                int embIndex = *((__gm__ int32_t*)lookup + gatherNumPerCore * blockIdx + copiedNum + j) * dim;
                CpGM2UB<T>((__ubuf__ T*)inputUB + j * dim, (__gm__ T*)emb_table + embIndex, dim * sizeof(T));
            }
            set_flag(PIPE_MTE2, PIPE_MTE3, event_id);
            wait_flag(PIPE_MTE2, PIPE_MTE3, event_id);
            CpUB2GM<T>(
                (__gm__ T*)gather_data + dim * (gatherNumPerCore * blockIdx + copiedNum),
                (__ubuf__ T*)inputUB, toCopy * dim * sizeof(T));
            set_flag(PIPE_MTE3, PIPE_MTE2, event_id);
            gatherNum -= toCopy;
            copiedNum += toCopy;
            copyId += 1;
            set_flag(PIPE_S, PIPE_MTE3, EVENT_ID3); // MTE3等Scalar
            wait_flag(PIPE_S, PIPE_MTE3, EVENT_ID3);
        }
        wait_flag(PIPE_MTE3, PIPE_MTE2, EVENT_ID0);  // MTE2等MTE3
        wait_flag(PIPE_MTE3, PIPE_MTE2, EVENT_ID1);  // MTE2等MTE3
        SetAtomicDataType<T>();
#ifdef __DAV_C220_VEC__
        SetAtomicOpType(0);
#endif
        sync.SetFlag((__gm__ int64_t*)(shareAddrs[rank] + IPC_DATA_OFFSET - 32), 1);
        set_atomic_none();
        pipe_barrier(PIPE_ALL);
        sync.WaitOneRankPartFlag((__gm__ int64_t*)(shareAddrs[rank] + IPC_DATA_OFFSET - 32), 1, blockNum);
    }

    __aicore__ inline void Process()
    {
        Gather();
        if (coreGroup == PRODUCER_CORE) {
            ProducerStage();
        }
        if (coreGroup == CONSUMER_CORE) {
            ConsumerStage();
            sync.SetFlag((__gm__ int64_t*)(shareAddrs[rank] + IPC_DATA_OFFSET - 32), 0);
        }
    }

private:
    // 计算rank数量较大时的queNum 以及  每个队列里单块可放入的元素数量queElemLen
    __aicore__ inline void InitShare()
    {
        int64_t queNum = coreNumsPerStage;  // 共享内存最小切分数量为单阶段核数
        if (rankSize > coreNumsPerStage) {
            queNum = rankSize;
        }
        queElemLen = IPC_BUFF_MAX_SIZE / sizeof(T) / queNum / SHARE_QUE_DEPTH;  // 计算共享队列元素大小
    }

    __aicore__ inline void InitCoreGroup()
    {
        // 每个rank在每个stage分到的core数量， 多卡下为1
        coreNumPerRank = coreNumsPerStage / rankSize > 1 ? coreNumsPerStage / rankSize : 1;
        // 小数据量下，单rank数据仅需一个核
        if (len < queElemLen) {
            coreNumPerRank = 1;
        }
        // 多卡下为coreNumsPerStage
        coreNumPerStage =
            coreNumPerRank * rankSize < coreNumsPerStage ? coreNumPerRank * rankSize : coreNumsPerStage;
        // 一个core处理多少rank
        rankNumPerCore = CeilDiv(rankSize, coreNumPerStage);

        // 单核负责多rank时，计算虚拟核索引并储存（索引为多个，由一个物理核执行其它虚拟核索引的操作）
        // 多卡场景下flagNumPerStage 为 ranksize
        flagNumPerStage = coreNumPerStage * rankNumPerCore;
        // 将core 分类到不同的stage， 并且找到本core对应要处理的rank
        if (blockIdx < coreNumPerStage) {
            coreGroup = PRODUCER_CORE;
            for (auto i = 0; i < rankNumPerCore; ++i) {
                groupCoreIdx[i] = blockIdx * rankNumPerCore + i;
            }
        } else if (blockIdx < coreNumPerStage + coreNumPerStage) {
            coreGroup = CONSUMER_CORE;
            for (auto i = 0; i < rankNumPerCore; ++i) {
                groupCoreIdx[i] = blockIdx * rankNumPerCore + i - flagNumPerStage;
            }
        } else {
            coreGroup = IDLER_CORE;
        }
    }

    __aicore__ inline void InitDataSlice()
    {
        queLen = queElemLen * SHARE_QUE_DEPTH;  // 一个que的可放入的元素数量
        queSize = queLen * sizeof(T);

        // 生产者负责搬运本rank的输入数据至共享内存，lookup-->share
        if (coreGroup == PRODUCER_CORE) {
            ProducerDataSlice();
        } else if (coreGroup == CONSUMER_CORE) {
            ConsumerDataSlice();
        }
    }

    __aicore__ inline void ProducerDataSlice()
    {
        maxSliceNum = 0;
        for (auto i = 0; i < rankNumPerCore; ++i) {
            targetRank[i] = groupCoreIdx[i] / coreNumPerRank;
            if (targetRank[i] >= rankSize) {
                targetRank[i] = INVALID_RANK_NUM;
                continue;
            }
            // 当前核负责的ipcQue
            writeQue[i].Init(&sync, magic, shareAddrs[rank] + IPC_DATA_OFFSET + groupCoreIdx[i] * queSize, queLen,
                             queElemLen);

            // 当前核负责的数据长度和偏移
            sendOffset[i] = 0;
            for (int j = 0; j < targetRank[i]; j++) {
                sendOffset[i] += sendCountMatrixGm.GetValue(rank * rankSize + j);
            }
            inputDataLen[i] = sendCountMatrixGm.GetValue(rank * rankSize + targetRank[i]);
            SplitData(inputDataLen[i], coreNumPerRank, groupCoreIdx[i] % coreNumPerRank, inputOffset[i], inputLen[i],
                      sendOffset[i]);
            // 当前核负责的数据切片数，能分成一个que中的多少小块
            sliceNum[i] = CeilDiv(inputLen[i], queElemLen);
            if (sliceNum[i] > maxSliceNum) {
                maxSliceNum = sliceNum[i];
            }
        }
    }

    __aicore__ inline void ConsumerDataSlice()
    {
        maxSliceNum = 0;
        for (auto i = 0; i < rankNumPerCore; ++i) {
            // 当前核负责的rank
            targetRank[i] = groupCoreIdx[i] / coreNumPerRank;
            if (targetRank[i] >= rankSize) {
                targetRank[i] = INVALID_RANK_NUM;
                continue;
            }
            // 当前核负责的ipcQue
            readQue[i].Init(
                &sync, magic,
                shareAddrs[targetRank[i]] + IPC_DATA_OFFSET +
                    (rank * coreNumPerRank + groupCoreIdx[i] % coreNumPerRank) * queSize,
                queLen, queElemLen);
            // 当前核负责的数据长度和偏移
            revOffset[i] = 0;
            for (int j = 0; j < targetRank[i]; j++) {
                revOffset[i] += sendCountMatrixGm.GetValue(j * rankSize + rank);
            }
            outputDataLen[i] = sendCountMatrixGm.GetValue(targetRank[i] * rankSize + rank);

            SplitData(outputDataLen[i], coreNumPerRank, groupCoreIdx[i] % coreNumPerRank, outputOffset[i], outputLen[i],
                      revOffset[i]);
            // 当前核负责的数据切片数
            sliceNum[i] = CeilDiv(outputLen[i], queElemLen);
            if (sliceNum[i] > maxSliceNum) {
                maxSliceNum = sliceNum[i];
            }
        }
    }

    __aicore__ inline void SplitData(const int64_t totalLen, const int64_t useCoreNum, const int64_t useCoreIdx,
                                     int64_t& dataOffset, int64_t& dataLen, int startOffset)
    {
        // 向上整除获取每个core切分的数据个数
        dataLen = CeilDiv(totalLen, useCoreNum);
        // 数据量极小或略微超过核数的情况，后面若干个core数据量为0
        dataOffset = useCoreIdx * dataLen + startOffset;  // 使用当前block在useBlock里的相对索引来计算偏移
        if (useCoreIdx * dataLen >= totalLen) {
            dataOffset = totalLen + startOffset;
            dataLen = 0;
            return;
        }
        // 非整除情况，最后一个core数据量为剩余数据量
        if (dataOffset + dataLen - startOffset > totalLen) {
            dataLen = totalLen - useCoreIdx * dataLen;
        }
    }

    __aicore__ inline void ProducerStage()
    {
        for (auto i = 0; i < rankNumPerCore; ++i) {
            if (targetRank[i] == INVALID_RANK_NUM) {
                continue;
            }

            // 写共享内存队列时，需要等待当前rank
            waitRankListForWrite[i][0] = targetRank[i];
            waitNumForWrite[i] = 1;
            waitBlockForWrite[i] = rank * coreNumPerRank + groupCoreIdx[i] % coreNumPerRank + flagNumPerStage;
        }
        InputToSharePipeline();
    }

    __aicore__ inline void InputToSharePipeline()
    {
        int64_t flagValue[MULTI_RANK_SIZE];  // 要等待标志位的储存值
        for (auto i = 0; i < rankNumPerCore; ++i) {
            flagValue[i] = -1;  // 统一赋值为-1，便于后续小于判断
        }
        // 以最多切片sliceNum[0]为切片数进行循环，切片数不足的不拷贝
        for (auto sliceIdx = 0; sliceIdx < maxSliceNum; ++sliceIdx) {
            for (auto i = 0; i < rankNumPerCore; ++i) {
                if (targetRank[i] == INVALID_RANK_NUM) {
                    continue;
                }
                InputToShareSlice(i, sliceIdx, flagValue[i]);
            }
        }
    }

    __aicore__ inline void InputToShareSlice(int64_t idx, int64_t sliceIdx, int64_t& flagValue)
    {
        readGt = inputGt[sliceIdx * queElemLen + inputOffset[idx]];
        // 计算当前切片拷贝数据量，数据量为0时不拷贝
        copyLen = inputLen[idx] - queElemLen * sliceIdx;
        if (copyLen > queElemLen) {
            copyLen = queElemLen;
        } else if (copyLen < 0) {
            copyLen = 0;
        }
        writeQue[idx].DeQue(waitRankListForWrite[idx], waitNumForWrite[idx], waitBlockForWrite[idx], sliceIdx);
        writeGt = writeQue[idx].EnQue();
        if (copyLen > 0) {
            CpGM2GMPingPong<T>(copyLen * sizeof(T), readGt, writeGt, COPYONLY);
        }
        sync.SetInnerFlag(magic, sliceIdx, rank, groupCoreIdx[idx]);
    }

    __aicore__ inline void ConsumerStage()
    {
        int64_t flagValue[MULTI_RANK_SIZE];  // 要等待标志位的储存值
        for (auto i = 0; i < rankNumPerCore; ++i) {
            flagValue[i] = -1;
        }
        // 以最多切片sliceNum[0]为切片数进行循环，切片数不足的不拷贝
        for (auto sliceIdx = 0; sliceIdx < maxSliceNum; ++sliceIdx) {
            for (auto i = 0; i < rankNumPerCore; ++i) {
                if (targetRank[i] == INVALID_RANK_NUM) {
                    continue;
                }
                ShareToOutputSlice(i, sliceIdx, flagValue[i]);
            }
        }
    }

    __aicore__ inline void ShareToOutputSlice(int64_t idx, int64_t sliceIdx, int64_t& flagValue)
    {
        // 计算当前切片拷贝数据量，数据量为0时不拷贝
        copyLen = outputLen[idx] - queElemLen * sliceIdx;
        if (copyLen > queElemLen) {
            copyLen = queElemLen;
        } else if (copyLen < 0) {
            copyLen = 0;
        }

        // 拉取本rank数据
        if (flagValue < sliceIdx) {
            sync.WaitInnerFlag(
                magic, sliceIdx, targetRank[idx], rank * coreNumPerRank + groupCoreIdx[idx] % coreNumPerRank);
            flagValue = sync.GetInnerFlag(
                targetRank[idx], rank * coreNumPerRank + groupCoreIdx[idx] % coreNumPerRank) & EVENT_ID_MASK;
        }
        readGt = readQue[idx].ReadFront();
        if (copyLen > 0) {
            writeGt = outputGt[sliceIdx * queElemLen + outputOffset[idx]];
            CpGM2GMPingPong<T>(copyLen * sizeof(T), readGt, writeGt, COPYONLY);
        }
        sync.SetInnerFlag(magic, sliceIdx, rank, groupCoreIdx[idx] + flagNumPerStage);

        if (sliceIdx == sliceNum[idx] - 1) {
            sync.SetInnerFlag(1, 0, rank, groupCoreIdx[idx] + flagNumPerStage);
            sync.SetInnerFlag(1, 0, targetRank[idx], rank * coreNumPerRank + groupCoreIdx[idx] % coreNumPerRank);
        }
    }

    GlobalTensor<T> inputGt;
    GlobalTensor<T> outputGt;
    GlobalTensor<T> readGt;
    GlobalTensor<T> writeGt;
    GM_ADDR lookup;
    GM_ADDR output;
    GM_ADDR gather_data;
    GlobalTensor<int64_t> sendCountMatrixGm;
    int blockSize;
    GM_ADDR emb_table;
    int64_t coreNumsPerStage;
    int64_t revLen = 0;
    int64_t sendLen = 0;
    int64_t sendOffset[MULTI_RANK_SIZE];
    int64_t revOffset[MULTI_RANK_SIZE];
    int64_t inputDataLen[MULTI_RANK_SIZE];
    int64_t outputDataLen[MULTI_RANK_SIZE];

    int waitRankListForWrite[MULTI_RANK_SIZE][1];  // 写共享内存时，需要等待的rank列表
    int waitNumForWrite[MULTI_RANK_SIZE];          // 写共享内存时，需要等待的数量
    int waitBlockForWrite[MULTI_RANK_SIZE];        // 写共享内存时，需要等待的标志位

    int64_t maxSliceNum;
    int64_t dim;
    int64_t queLen;
    int64_t queSize;
    int64_t coreNumPerStage;                // 每个阶段使用的核数
    int64_t flagNumPerStage;                // 每个阶段使用的同步标志位数
    int64_t coreNumPerRank;                 // 每个rank数据分配的核数
    int64_t rankNumPerCore;                 // 每个核负责的rank数
    int64_t coreGroup;                      // 当前核的功能分组
    int64_t groupCoreIdx[MULTI_RANK_SIZE];  // 当前核在组内的索引，可以为等效核索引
    int64_t targetRank[MULTI_RANK_SIZE];    // 当前核负责的rank

    IpcQueue<T> readQue[MULTI_RANK_SIZE];   // 读端共享内存队列
    IpcQueue<T> writeQue[MULTI_RANK_SIZE];  // 写端共享内存队列
    int64_t queElemLen;                     // 共享内存队列里每个元素大小（以T计）

    int64_t sliceNum[MULTI_RANK_SIZE];      // 当前核负责的数据切片总数
    int64_t copyLen;                        // 当前拷贝数据片的长度（以T计）
    int64_t inputOffset[MULTI_RANK_SIZE];   // 当前核负责的input偏移（以T计）
    int64_t inputLen[MULTI_RANK_SIZE];      // 当前核负责的input长度（以T计）
    int64_t outputOffset[MULTI_RANK_SIZE];  // 当前核负责的output偏移（以T计）
    int64_t outputLen[MULTI_RANK_SIZE];     // 当前核负责的output长度（以T计）
};

#endif  // LCCL_GatherAll_H
