#include "kernel_operator.h"

using namespace AscendC;

#define ull uint64_t

template<typename T> class KernelGroupNormV2Brute {
private:
    TPipe* pipe;
    GlobalTensor<T> xGm;
    GlobalTensor<T> gammaGm;
    GlobalTensor<T> betaGm;
    GlobalTensor<T> yGm;
    // LocalTensor<T> meanLocal;
    // LocalTensor<T> varLocal;
    // LocalTensor<T> rstdLocal;
    GlobalTensor<T> meanGm;
    GlobalTensor<T> varGm;
    // GlobalTensor<T> rstdGm;
    ull totalLength;
    ull tileNum;
    ull num_groups;
    bool data_format;
    float eps;
    bool is_training;
    ull n, c, h, w;

public:
    __aicore__ inline KernelGroupNormV2Brute() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR gamma, GM_ADDR beta, GM_ADDR y, GM_ADDR mean, GM_ADDR rstd, GM_ADDR workspace, TPipe* pipeIn, ull totalLength, ull tileNum, ull num_groups, bool data_format, float eps, bool is_training, ull n, ull c, ull h, ull w)
    {
        pipe = pipeIn;

        // if(GetBlockIdx() == 0) {
        //     // 开头输出所有参数
        //     printf("blockNum: %d, totalLength: %d\n", GetBlockNum(), totalLength);
        //     printf("tileNum: %d, num_groups: %d\n", tileNum, num_groups);
        //     printf("data_format: %d, eps: %f, is_training: %d\n", data_format, eps, is_training);
        //     printf("n: %d, c: %d, h: %d, w: %d\n", n, c, h, w);
        // }
        this->totalLength = totalLength;
        this->tileNum = tileNum;
        this->num_groups = num_groups;
        this->data_format = data_format;
        this->eps = eps;
        this->is_training = is_training;
        this->n = n;
        this->c = c;
        this->h = h;
        this->w = w;

        xGm.SetGlobalBuffer((__gm__ T *)x, totalLength);
        gammaGm.SetGlobalBuffer((__gm__ T *)gamma, c);
        betaGm.SetGlobalBuffer((__gm__ T *)beta, c);
        yGm.SetGlobalBuffer((__gm__ T *)y, totalLength);

        // TBuf<TPosition::VECCALC> buf[3];
        // for (int i = 0; i < 3; i++) {
        //     pipe->InitBuffer(buf[i], n * num_groups * sizeof(T));
        // }
        // pipe->InitBuffer(buf, 3 * n * num_groups * sizeof(T));
        // meanLocal = buf[0].Get<T>(n * num_groups * sizeof(T));
        // varLocal = buf[1].Get<T>(n * num_groups * sizeof(T));
        // rstdLocal = buf[2].Get<T>(n * num_groups * sizeof(T));

        meanGm.SetGlobalBuffer((__gm__ T *)mean, n * num_groups);
        varGm.SetGlobalBuffer((__gm__ T *)rstd, n * num_groups);
        // rstdGm.SetGlobalBuffer((__gm__ T *)rstd, n * num_groups);
    }
    __aicore__ inline void Process() {
        // c轴上每num_group一组
        ull groupSize = c / num_groups;
        // 先n个batch，再c轴上每num_group一组
        for (int i = 0; i < n; i++) {
            // 计算平均值mean
            for (int j = 0; j < num_groups; j++) {
                float sum = 0;
                for (int k = 0; k < groupSize; k++) {
                    for (int l = 0; l < h; l++) {
                        // float value = xGm.GetValue(i * c * h + j * groupSize * h + k * h + l);
                        // printf("%f ", value);
                        sum += static_cast<float>(xGm.GetValue(i * c * h + j * groupSize * h + k * h + l));
                    }
                    // printf("\n");
                }
                float mean = sum / (groupSize * h);
                // printf("mean %d %d: %f\n", i, j, mean);
                // meanArr[j] = mean;
                meanGm.SetValue(i * num_groups + j, static_cast<T>(mean));
            }
            // 计算方差，标准差倒数rstd
            for (int j = 0; j < num_groups; j++) {
                float sum = 0;
                for (int k = 0; k < groupSize; k++) {
                    for (int l = 0; l < h; l++) {
                        float x = static_cast<float>(xGm.GetValue(i * c * h + j * groupSize * h + k * h + l));
                        float mean = static_cast<float>(meanGm.GetValue(i * num_groups + j));
                        sum += sqr(x - mean);
                    }
                }
                float var = sum / (groupSize * h);
                varGm.SetValue(i * num_groups + j, static_cast<T>(var));
                float rstd = 1.0f / sqrt(var + eps);
                // rstdArr[j] = rstd;
                // rstdGm.SetValue(i * num_groups + j, static_cast<T>(rstd));
            }
            // 计算y
            for (int j = 0; j < c; j++) {
                for (int k = 0; k < h; k++) {
                    float value = static_cast<float>(xGm.GetValue(i * c * h + j * h + k));
                    // float mean = meanArr[j / groupSize];
                    // float rstd = rstdArr[j / groupSize];
                    float mean = static_cast<float>(meanGm.GetValue(i * num_groups + j / groupSize));
                    float var = static_cast<float>(varGm.GetValue(i * num_groups + j / groupSize));
                    // float rstd = static_cast<float>(rstdGm.GetValue(i * num_groups + j / groupSize));
                    float gamma = static_cast<float>(gammaGm.GetValue(j));
                    float beta = static_cast<float>(betaGm.GetValue(j));
                    // yGm.SetValue(i * c * h + j * h + k, static_cast<T>(gamma * (value - mean) * rstd + beta));
                    yGm.SetValue(i * c * h + j * h + k, static_cast<T>((value - mean) / sqrt(var + eps) * gamma + beta));
                }
            }
        }
    }

private:
    __aicore__ inline float sqr(float x) {
        return x * x;
    }
};


// constexpr int32_t BUFFER_NUM = 2;
constexpr int32_t BUFFER_NUM = 1;

template<typename T> class KernelGroupNormV2 {
private:
    TPipe* pipe;
    GlobalTensor<T> xGm;
    GlobalTensor<T> gammaGm;
    GlobalTensor<T> betaGm;
    GlobalTensor<T> yGm;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueSrc;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueDst;
    ull totalLength;
    ull blockLength;
    ull num_groups;
    bool data_format;
    float eps;
    bool is_training;
    ull n, c, hw;
    ull groupLength;
    ull groupSize;
    ull ubLimit = 192000;   // UB限制196352
    ull tileNum;
    ull tileLength;
    ull tileSize;

public:
    __aicore__ inline KernelGroupNormV2() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR gamma, GM_ADDR beta, GM_ADDR y, GM_ADDR mean, GM_ADDR rstd, GM_ADDR workspace, TPipe* pipeIn, ull totalLength, ull num_groups, bool data_format, float eps, bool is_training, ull n, ull c, ull hw)
    {
        pipe = pipeIn;

        this->totalLength = totalLength;
        this->num_groups = num_groups;
        this->data_format = data_format;
        this->eps = eps;
        this->is_training = is_training;
        this->n = n;
        this->c = c;
        this->hw = hw;

        // n个核，每个核c/num_groups*hw
        blockLength = totalLength / GetBlockNum();

        groupLength = c / num_groups;
        groupSize = groupLength * hw;

        ull offset = blockLength * GetBlockIdx();
        xGm.SetGlobalBuffer((__gm__ T *)x + offset, blockLength);
        yGm.SetGlobalBuffer((__gm__ T *)y + offset, blockLength);

        ull cStart = GetBlockIdx() % num_groups * groupLength;
        gammaGm.SetGlobalBuffer((__gm__ T *)gamma + cStart, groupLength);
        betaGm.SetGlobalBuffer((__gm__ T *)beta + cStart, groupLength);

        ull hwBytes = hw * sizeof(T);
        tileNum = (hwBytes * 2 + ubLimit - 1) / ubLimit;
        tileLength = (hw + tileNum - 1) / tileNum;
        tileSize = tileLength * 1;
        ull tileBytes = tileSize * sizeof(T);

        pipe->InitBuffer(inQueueSrc, BUFFER_NUM, tileBytes);
        pipe->InitBuffer(outQueueDst, BUFFER_NUM, tileBytes);

        // if(GetBlockIdx() == 0) {
        //     // 开头输出所有参数
        //     printf("data_format: %d, eps: %f, is_training: %d\n", data_format, eps, is_training);
        //     printf("n: %d, c: %d, hw: %d\n", n, c, hw);
        //     printf("blockNum: %d, totalLength: %d, blockLength: %d\n", GetBlockNum(), totalLength, blockLength);
        //     printf("num_groups: %d, groupLength: %d, groupSize: %d\n", num_groups, groupLength, groupSize);
        //     printf("tileNum: %d, tileLength: %d, tileSize: %d\n", tileNum, tileLength, tileSize);
        // }
    }
    __aicore__ inline void Process() {
        DataCopyPadExtParams<T> padParams{true, 0, 0, 0};
        // c轴上每num_group一组
        LocalTensor<T> srcLocal = inQueueSrc.AllocTensor<T>();
        // 计算平均值mean
        float sum = 0;
        for (int i = 0; i < groupLength; i++) {
            for (int j = 0; j < tileNum; j++) {
                ull st = j * tileLength;
                ull ed = min(st + tileLength, hw);
                // printf("blockidx: %d, n: %d, c1: %d, cReal: %d, st: %d, ed: %d\n", GetBlockIdx(), GetBlockIdx() / num_groups, GetBlockIdx() % num_groups, cReal, st, ed);
                ull nowLength = ed - st;
                DataCopyExtParams copyParams{1, static_cast<uint32_t>(nowLength * sizeof(T)), 0, 0, 0};

                DataCopyPad(srcLocal, xGm[i * hw + st], copyParams, padParams);
                inQueueSrc.EnQue(srcLocal);
                srcLocal = inQueueSrc.DeQue<T>();
                // 计算和
                ReduceSum<T>(srcLocal, srcLocal, srcLocal, nowLength);
                sum += (float)srcLocal.GetValue(0);
            }
        }
        T mean = sum / groupSize;
        // printf("mean: %f\n", mean);
        // 转换为负数
        mean = -mean;
        // 计算方差，标准差倒数rstd
        sum = 0;
        for (int i = 0; i < groupLength; i++) {
            for (int j = 0; j < tileNum; j++) {
                ull st = j * tileLength;
                ull ed = min(st + tileLength, hw);
                ull nowLength = ed - st;
                DataCopyExtParams copyParams{1, static_cast<uint32_t>(nowLength * sizeof(T)), 0, 0, 0};

                DataCopyPad(srcLocal, xGm[i * hw + st], copyParams, padParams);
                inQueueSrc.EnQue(srcLocal);
                srcLocal = inQueueSrc.DeQue<T>();
                // 减mean
                Adds(srcLocal, srcLocal, mean, nowLength);
                // 平方
                Mul(srcLocal, srcLocal, srcLocal, nowLength);
                // 计算和
                ReduceSum<T>(srcLocal, srcLocal, srcLocal, nowLength);
                sum += (float)srcLocal.GetValue(0);
            }
        }
        T var = sum / groupSize;
        // printf("var: %f\n", var);
        T rstd = 1.0f / sqrt(var + eps);
        inQueueSrc.FreeTensor(srcLocal);

        // 计算y
        for (int i = 0; i < groupLength; i++) {
            T gamma = gammaGm.GetValue(i);
            T beta = betaGm.GetValue(i);
            for (int j = 0; j < tileNum; j++) {
                LocalTensor<T> srcLocal = inQueueSrc.AllocTensor<T>();
                LocalTensor<T> dstLocal = outQueueDst.AllocTensor<T>();
                ull st = j * tileLength;
                ull ed = min(st + tileLength, hw);
                ull nowLength = ed - st;
                DataCopyExtParams copyParams{1, static_cast<uint32_t>(nowLength * sizeof(T)), 0, 0, 0};

                DataCopyPad(srcLocal, xGm[i * hw + st], copyParams, padParams);
                inQueueSrc.EnQue(srcLocal);
                srcLocal = inQueueSrc.DeQue<T>();
                // 减mean
                Adds(srcLocal, srcLocal, mean, nowLength);
                // 乘rstd*gamma
                Muls(srcLocal, srcLocal, rstd * gamma, nowLength);
                // 加beta
                Adds(dstLocal, srcLocal, beta, nowLength);
                outQueueDst.EnQue(dstLocal);

                // 恢复
                dstLocal = outQueueDst.DeQue<T>();
                DataCopyPad(yGm[i * hw + st], dstLocal, copyParams);
                inQueueSrc.FreeTensor(srcLocal);
                outQueueDst.FreeTensor(dstLocal);
            }
        }
    }
};

extern "C" __global__ __aicore__ void group_norm_v2(GM_ADDR x, GM_ADDR gamma, GM_ADDR beta, GM_ADDR y, GM_ADDR mean, GM_ADDR rstd, GM_ADDR workspace, GM_ADDR tiling) {
    // printf("@@@@@@@@@@@@@@@ BlockIdx %d @@@@@@@@@@@@@@@\n", GetBlockIdx());
    GET_TILING_DATA(tiling_data, tiling);
    TPipe pipe;
    if (TILING_KEY_IS(1)) {
        KernelGroupNormV2Brute<DTYPE_X> op;
        op.Init(x, gamma, beta, y, mean, rstd, workspace, &pipe, tiling_data.totalLength, tiling_data.tileNum, tiling_data.num_groups, tiling_data.data_format, tiling_data.eps, tiling_data.is_training, tiling_data.n, tiling_data.c, tiling_data.hw, 1);
        op.Process();
    } else if (TILING_KEY_IS(2)) {
        KernelGroupNormV2<float> op;
        op.Init(x, gamma, beta, y, mean, rstd, workspace, &pipe, tiling_data.totalLength, tiling_data.num_groups, tiling_data.data_format, tiling_data.eps, tiling_data.is_training, tiling_data.n, tiling_data.c, tiling_data.hw);
        op.Process();
    }
}