#include "kernel_operator.h"
using namespace AscendC;

template <typename TYPE_X, typename TYPE_INDICES, typename TYPE_AXIS>
class KernelGatherV3 {
private:
    // Global Memory Tensors
    GlobalTensor<TYPE_X>        inputGm;         // input data
    GlobalTensor<TYPE_INDICES>  indicesGm;       // indices
    GlobalTensor<TYPE_X>        outputGm;        // output
    GlobalTensor<TYPE_AXIS>     axisGm;          // axis scalar

    // Computation Dimensions
    uint64_t batchSize;        
    uint64_t outerDims;        
    uint64_t axisDim;          
    uint64_t indicesInnerCount;
    uint64_t innerDims;        

    // Block-based Data Movement
    uint32_t blockSize;        // block length (e.g., 1024)
    uint64_t blockCount;       // number of blocks per inner copy

    uint64_t* inputShape;
    uint64_t* indicesShape;

    TBuf<TPosition::VECCALC> localBuf;

    __aicore__ inline uint64_t DimProduct(uint64_t* shape, uint64_t begin, uint64_t end) {
        uint64_t prod = 1;
        for (uint64_t i = begin; i < end; ++i) {
            prod *= shape[i];
        }
        return prod;
    }

public:
    
    __aicore__ inline KernelGatherV3()
        : inputGm(), indicesGm(), outputGm(), axisGm(), localBuf()
    {}

    __aicore__ inline void Init(
        TPipe* pipe,
        GM_ADDR x,              // input
        GM_ADDR indices,        // indices
        GM_ADDR axis,           // axis scalar
        GM_ADDR out,            // output
        uint64_t batchDims,
        uint64_t xDim,
        uint64_t indicesDim,
        uint64_t* xShape,
        uint64_t* indicesShape
    ) {
        // Read axis value
        axisGm.SetGlobalBuffer((__gm__ TYPE_AXIS*)axis);
        uint64_t axisVal = axisGm.GetValue(0);

        // Clamp axis to valid range [0, xDim)
        axisVal = (axisVal < xDim) ? axisVal : 0;

        // Set shape pointers
        inputShape = xShape;
        this->indicesShape = indicesShape;

        // Compute dimensions
        batchSize        = DimProduct(xShape, 0, batchDims);
        outerDims        = DimProduct(xShape, batchDims, axisVal);
        axisDim          = xShape[axisVal];
        indicesInnerCount= DimProduct(indicesShape, batchDims, indicesDim);
        innerDims        = DimProduct(xShape, axisVal + 1, xDim);

        // Block configuration
        blockSize = 1024;
        blockCount = (innerDims + blockSize - 1) / blockSize;

        // Setup GM tensors
        inputGm.SetGlobalBuffer((__gm__ TYPE_X*)x);
        indicesGm.SetGlobalBuffer((__gm__ TYPE_INDICES*)indices);
        outputGm.SetGlobalBuffer((__gm__ TYPE_X*)out);

        // Initialize local buffer
        pipe->InitBuffer(localBuf, blockSize * sizeof(TYPE_X));
    }

    __aicore__ inline void Process() {
        // Main loop: process each batch
        for (uint64_t batchIdx = 0; batchIdx < batchSize; ++batchIdx) {
            uint64_t inputBatchOffset = batchIdx * outerDims * axisDim * innerDims;
            uint64_t indicesBatchOffset = batchIdx * indicesInnerCount;
            uint64_t outputBatchOffset = batchIdx * outerDims * indicesInnerCount * innerDims;

            for (uint64_t outerIdx = 0; outerIdx < outerDims; ++outerIdx) {
                for (uint64_t idxIdx = 0; idxIdx < indicesInnerCount; ++idxIdx) {
                    // Read index value
                    int64_t gatherIdx = static_cast<int64_t>(indicesGm.GetValue(indicesBatchOffset + idxIdx));

                    // Bounds check
                    if (static_cast<uint64_t>(gatherIdx) >= axisDim) {
                        continue;  // Skip invalid index
                    }

                    // Copy inner dimensions in blocks
                    for (uint64_t blockId = 0; blockId < blockCount; ++blockId) {
                        uint64_t blockOffset = blockId * blockSize;
                        uint64_t remaining = innerDims - blockOffset;
                        uint64_t copyLen = (remaining >= blockSize) ? blockSize : remaining;

                        uint64_t inputOffset  = inputBatchOffset + ((outerIdx * axisDim + gatherIdx) * innerDims + blockOffset);
                        uint64_t outputOffset = outputBatchOffset + ((outerIdx * indicesInnerCount + idxIdx) * innerDims + blockOffset);

                        LocalTensor<TYPE_X> ubTensor = localBuf.Get<TYPE_X>();

                        if (copyLen == blockSize) {
                            // Full block copy
                            DataCopy(ubTensor, inputGm[inputOffset], copyLen);
                            AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE3>(1);
                            AscendC::WaitFlag<AscendC::HardEvent::MTE2_MTE3>(1);
                            PipeBarrier<PIPE_MTE2>();
                            DataCopy(outputGm[outputOffset], ubTensor, copyLen);
                            AscendC::SetFlag<AscendC::HardEvent::MTE3_MTE2>(1);
                            AscendC::WaitFlag<AscendC::HardEvent::MTE3_MTE2>(1);
                        } else {
                            // Partial block: use padding copy
                            DataCopyExtParams copyParams{1, static_cast<uint32_t>(copyLen * sizeof(TYPE_X)), 0, 0, 0};
                            DataCopyPadExtParams<TYPE_X> padParams{false, 0, 0, 0};
                            DataCopyPad(ubTensor, inputGm[inputOffset], copyParams, padParams);
                            AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE3>(0);
                            AscendC::WaitFlag<AscendC::HardEvent::MTE2_MTE3>(0);
                            PipeBarrier<PIPE_MTE2>();
                            DataCopyPad(outputGm[outputOffset], ubTensor, copyParams);
                            AscendC::SetFlag<AscendC::HardEvent::MTE3_MTE2>(0);
                            AscendC::WaitFlag<AscendC::HardEvent::MTE3_MTE2>(0);
                        }
                    }
                }
            }
        }
    }
};

// Entry point
extern "C" __global__ __aicore__ void gather_v3(
    GM_ADDR x,
    GM_ADDR indices,
    GM_ADDR axis,
    GM_ADDR out,
    GM_ADDR workspace,
    GM_ADDR tiling
) {
    TPipe pipe;
    GET_TILING_DATA(tilingData, tiling);

    KernelGatherV3<DTYPE_X, DTYPE_INDICES, DTYPE_AXIS> op;
    op.Init(&pipe, x, indices, axis, out,
            tilingData.batchdim,
            tilingData.xdim,
            tilingData.indicesdim,
            tilingData.x_ndarray,
            tilingData.indices_ndarray);
    op.Process();
}