/**
 * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

/**
 * @file channel_mixing_310p.h
 */
#ifndef CHANNEL_MIXING_310P_H
#define CHANNEL_MIXING_310P_H
#include "kernel_operator.h"
#include "lib/matmul_intf.h"
using namespace AscendC;
using namespace matmul;
//ub:262144


class ChannelMixingKernel310p {
public:
    __aicore__ inline ChannelMixingKernel310p() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR h0, GM_ADDR xk, GM_ADDR kw, GM_ADDR vw, GM_ADDR out, GM_ADDR ht, 
                                GM_ADDR hs, GM_ADDR workspace, const TCubeTiling &tiling1, const TCubeTiling &tiling2, 
                                int32_t B, int32_t T,
                                TPipe *pipe) 
    {
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");

        this->tiling1 = tiling1;
        this->tiling2 = tiling2;
        this->T = T;
        // this->B = B;

        xGm.SetGlobalBuffer((__gm__ half*)x - this->T*C);
        h0Gm.SetGlobalBuffer((__gm__ half*)h0  - C);
        xkGm.SetGlobalBuffer((__gm__ half*)xk);
        kwGm.SetGlobalBuffer((__gm__ half*)kw);
        vwGm.SetGlobalBuffer((__gm__ half*)vw);

        outGm.SetGlobalBuffer((__gm__ half*)out  - this->T*C);
        htGm.SetGlobalBuffer((__gm__ half*)ht  - C);

        hsGm.SetGlobalBuffer((__gm__ half*)hs);
      

        syncGm.SetGlobalBuffer(reinterpret_cast<__gm__ int32_t *>(workspace), 8*8);
        pipe->InitBuffer(workQueue, 1, 8 * 8 * sizeof(int32_t));
        {
          LocalTensor<int32_t> workLocal = workQueue.AllocTensor<int32_t>();
          Duplicate(workLocal, (int32_t)0, 8 * 8);
          workQueue.EnQue(workLocal);
          workLocal = workQueue.DeQue<int32_t>();
          DataCopy(syncGm, workLocal, C);
          workQueue.FreeTensor(workLocal);
        }

        pipe->InitBuffer(tmpMMFormatUb, this->tiling1.baseM * this->tiling1.baseN * sizeof(half));
        mmformatUb = tmpMMFormatUb.Get<uint8_t>(this->tiling1.baseM * this->tiling1.baseN * sizeof(half));

        pipe->InitBuffer(inQueueX, BUFFER_NUM, C * 2 * sizeof(half));
        pipe->InitBuffer(inQueueXk, 1, C * sizeof(half));
        pipe->InitBuffer(outQueueOut, BUFFER_NUM, C * sizeof(half));

        int offsetA = 0;
        int offsetB = 0;
        int offsetC = 0;

        CalcOffset1(GetBlockIdx(), tiling1, offsetA, offsetB, offsetC);
        out1Gm = outGm[offsetA];
        kwGm = kwGm[offsetB];
        hs1Gm = hsGm[offsetC];


        CalcOffset2(GetBlockIdx(), tiling2, offsetA, offsetB, offsetC);
        hs2Gm = hsGm[offsetA];
        vwGm = vwGm[offsetB];
        out2Gm = outGm[offsetC];
        

        if (GetSysWorkSpacePtr() == nullptr) {
            return;
        }
    }
    
    __aicore__ inline void Process(TPipe *pipe)
    {
      xGm = xGm[this->T*C];
      h0Gm = h0Gm[C];
      outGm = outGm[this->T*C];
      htGm = htGm[C];

      out1Gm = out1Gm[this->T*C];
      out2Gm = out2Gm[this->T*C];

      // for(int i=0; i<this->B; i++)
      
      {
        if(this->T >= 8)
        {
          Process12(GetBlockIdx());
        }
        else if(GetBlockIdx() == 0)
        {
          Process11(1);
        }

        TEventID eventID1 = GetTPipePtr()->AllocEventID<HardEvent::MTE3_MTE2>();
        SetFlag<HardEvent::MTE3_MTE2>(eventID1);
        WaitFlag<HardEvent::MTE3_MTE2>(eventID1);
        GetTPipePtr()->ReleaseEventID<HardEvent::MTE3_MTE2>(eventID1);

        LocalTensor<int32_t> workLocal = workQueue.AllocTensor<int32_t>();
        SyncAll(syncGm, workLocal);
        workQueue.FreeTensor(workLocal);

        matmulObj1.SetLocalWorkspace(mmformatUb);
        matmulObj1.SetTensorA(out1Gm);
        matmulObj1.SetTensorB(kwGm, true);
        matmulObj1.DisableBias();
        matmulObj1.IterateAll(hs1Gm);
        matmulObj1.End();

        TEventID eventID2 = GetTPipePtr()->AllocEventID<HardEvent::MTE3_MTE2>();
        SetFlag<HardEvent::MTE3_MTE2>(eventID2);
        WaitFlag<HardEvent::MTE3_MTE2>(eventID2);
        GetTPipePtr()->ReleaseEventID<HardEvent::MTE3_MTE2>(eventID2);

        workLocal = workQueue.AllocTensor<int32_t>();
        SyncAll(syncGm, workLocal);
        workQueue.FreeTensor(workLocal);

        if(this->T != 1)
        {
          for (int32_t i = GetBlockIdx()*T/2; i < (GetBlockIdx()+1)*T/2; i++) 
          {
            CopyIn2(i);
            Compute2(i);
            CopyOut2(i);
          }
        }
        else if(GetBlockIdx() < 4)
        {
          for (int32_t i = GetBlockIdx()*T; i < (GetBlockIdx()+1)*T; i++) 
          {
            CopyIn2(i);
            Compute2(i);
            CopyOut2(i);
          }
        }

        PipeBarrier<PIPE_ALL>();
        TEventID eventID3 = GetTPipePtr()->AllocEventID<HardEvent::MTE3_MTE2>();
        SetFlag<HardEvent::MTE3_MTE2>(eventID3);
        WaitFlag<HardEvent::MTE3_MTE2>(eventID3);
        GetTPipePtr()->ReleaseEventID<HardEvent::MTE3_MTE2>(eventID3);

        workLocal = workQueue.AllocTensor<int32_t>();
        SyncAll(syncGm, workLocal);
        workQueue.FreeTensor(workLocal);

        matmulObj2.SetLocalWorkspace(mmformatUb);
        matmulObj2.SetTensorA(hs2Gm);
        matmulObj2.SetTensorB(vwGm, true);
        matmulObj2.DisableBias();
        matmulObj2.IterateAll(out2Gm);
        matmulObj2.End();

        PipeBarrier<PIPE_ALL>();
        TEventID eventID4 = GetTPipePtr()->AllocEventID<HardEvent::MTE3_MTE2>();
        SetFlag<HardEvent::MTE3_MTE2>(eventID4);
        WaitFlag<HardEvent::MTE3_MTE2>(eventID4);
        GetTPipePtr()->ReleaseEventID<HardEvent::MTE3_MTE2>(eventID4);

        workLocal = workQueue.AllocTensor<int32_t>();
        SyncAll(syncGm, workLocal);
        workQueue.FreeTensor(workLocal);

      }
    }

private:
    __aicore__ inline void Process11(int32_t progress)
    {
      CopyIn1Rwk();
      CopyIn1H0();
      Compute1(0);
      CopyOut1(0);

      for (int32_t i = 0; i < this->T-1; i++) 
      {
          CopyIn1(i);
          Compute1(i);
          CopyOut1(i+1);
      }
      LocalTensor<half> xkLocal = inQueueXk.DeQue<half>();
      inQueueXk.FreeTensor(xkLocal);

      {
        LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();
        DataCopy(xLocal, xGm[(this->T-1) * C], C);
        inQueueX.EnQue(xLocal);
        xLocal = inQueueX.DeQue<half>();
        LocalTensor<half> outLocal = outQueueOut.AllocTensor<half>();
        Adds(outLocal, xLocal, (half)0, C);
        outQueueOut.EnQue<half>(outLocal);
        inQueueX.FreeTensor(xLocal);
        outLocal = outQueueOut.DeQue<half>();  
        DataCopy(htGm, outLocal, C);
        outQueueOut.FreeTensor(outLocal);
      }
    }
    __aicore__ inline void Process12(int32_t progress)
    {
      CopyIn1Rwk();
      if(progress == 0)
      {
        CopyIn1H0();
        Compute1(0);
        CopyOut1(0);
      }
      int start = this->T*progress/8;
      int end = this->T*(progress+1)/8;
      if(progress == 7)
      {
        end -=1;
      }

      for (int32_t i = start; i < end; i++) 
      {
          CopyIn1(i);
          Compute1(i);
          CopyOut1(i+1);
      }
      LocalTensor<half> xkLocal = inQueueXk.DeQue<half>();
      inQueueXk.FreeTensor(xkLocal);

      if(progress == 7)
      {
        LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();
        DataCopy(xLocal, xGm[(this->T-1) * C], C);
        inQueueX.EnQue(xLocal);
        xLocal = inQueueX.DeQue<half>();
        LocalTensor<half> outLocal = outQueueOut.AllocTensor<half>();
        Adds(outLocal, xLocal, (half)0, C);
        outQueueOut.EnQue<half>(outLocal);
        inQueueX.FreeTensor(xLocal);
        outLocal = outQueueOut.DeQue<half>();  
        DataCopy(htGm, outLocal, C);
        outQueueOut.FreeTensor(outLocal);
      }
    }

    __aicore__ inline void CopyIn1Rwk()
    {
      LocalTensor<half> xkLocal = inQueueXk.AllocTensor<half>();
      DataCopy(xkLocal, xkGm, C);
      inQueueXk.EnQue(xkLocal);
    }
    __aicore__ inline void CopyIn1H0()
    {
      LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();
      DataCopy(xLocal, h0Gm, C);
      DataCopy(xLocal[C], xGm, C);
      inQueueX.EnQue(xLocal);
    }
    __aicore__ inline void CopyIn1(int32_t progress)
    {
      LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();
      DataCopy(xLocal, xGm[progress * C], C * 2);
      inQueueX.EnQue(xLocal);
    }
    __aicore__ inline void Compute1(int32_t progress)
    {
      LocalTensor<half> xLocal = inQueueX.DeQue<half>();
      LocalTensor<half> xkLocal = inQueueXk.DeQue<half>();
      LocalTensor<half> outLocal = outQueueOut.AllocTensor<half>();

      Sub(outLocal, xLocal, xLocal[C], C);
      Mul(outLocal, outLocal, xkLocal, C);
      Add(outLocal, outLocal, xLocal[C], C);

      outQueueOut.EnQue<half>(outLocal);
      inQueueX.FreeTensor(xLocal);
      inQueueXk.EnQue(xkLocal);
    }
    
    __aicore__ inline void CopyOut1(int32_t progress)
    {
      LocalTensor<half> outLocal = outQueueOut.DeQue<half>();  
      DataCopy(outGm[progress * C], outLocal, C);
      outQueueOut.FreeTensor(outLocal);
    }
    ////////////
     __aicore__ inline void CopyIn2(int32_t progress)
    {
      LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();
      DataCopy(xLocal, hsGm[progress * C], C);
      inQueueX.EnQue(xLocal);
    }
    __aicore__ inline void Compute2(int32_t progress)
    {
      LocalTensor<half> xLocal = inQueueX.DeQue<half>();
      LocalTensor<half> outLocal = outQueueOut.AllocTensor<half>();


      Relu(xLocal, xLocal, C);
      Mul(outLocal, xLocal, xLocal, C);
      
      outQueueOut.EnQue<half>(outLocal);
      inQueueX.FreeTensor(xLocal);
    }
    
    __aicore__ inline void CopyOut2(int32_t progress)
    {
      LocalTensor<half> outLocal = outQueueOut.DeQue<half>();  
      DataCopy(hsGm[progress * C], outLocal, C);
      outQueueOut.FreeTensor(outLocal);
    }

    __aicore__ inline void CalcOffset1(int32_t blockIdx, const TCubeTiling &tiling, 
                                      int32_t &offsetA, int32_t &offsetB, int32_t &offsetC)
    {
        auto mSingleBlocks = Ceiling(tiling.M, tiling.singleCoreM);
        auto mCoreIndx = blockIdx % mSingleBlocks;
        auto nCoreIndx = blockIdx / mSingleBlocks;

        offsetA = mCoreIndx * tiling.Ka * tiling.singleCoreM;
        // offsetB = nCoreIndx * tiling.singleCoreN;
        offsetB = nCoreIndx * tiling.Kb * tiling.singleCoreN; //转置
        offsetC = mCoreIndx * tiling.N * tiling.singleCoreM + nCoreIndx * tiling.singleCoreN;

        // process with tail block
        int tailM = tiling.M - mCoreIndx * tiling.singleCoreM;
        tailM = tailM < tiling.singleCoreM ? tailM : tiling.singleCoreM;
        int tailN = tiling.N - nCoreIndx * tiling.singleCoreN;
        tailN = tailN < tiling.singleCoreN ? tailN : tiling.singleCoreN;
        if (tailM < tiling.singleCoreM || tailN < tiling.singleCoreN) {
            matmulObj1.SetTail(tailM, tailN);
        }
    }
    __aicore__ inline void CalcOffset2(int32_t blockIdx, const TCubeTiling &tiling, 
                                      int32_t &offsetA, int32_t &offsetB, int32_t &offsetC)
    {
        auto mSingleBlocks = Ceiling(tiling.M, tiling.singleCoreM);
        auto mCoreIndx = blockIdx % mSingleBlocks;
        auto nCoreIndx = blockIdx / mSingleBlocks;

        offsetA = mCoreIndx * tiling.Ka * tiling.singleCoreM;
        // offsetB = nCoreIndx * tiling.singleCoreN;
        offsetB = nCoreIndx * tiling.Kb * tiling.singleCoreN; //转置
        offsetC = mCoreIndx * tiling.N * tiling.singleCoreM + nCoreIndx * tiling.singleCoreN;

        // process with tail block
        int tailM = tiling.M - mCoreIndx * tiling.singleCoreM;
        tailM = tailM < tiling.singleCoreM ? tailM : tiling.singleCoreM;
        int tailN = tiling.N - nCoreIndx * tiling.singleCoreN;
        tailN = tailN < tiling.singleCoreN ? tailN : tiling.singleCoreN;
        if (tailM < tiling.singleCoreM || tailN < tiling.singleCoreN) {
            matmulObj2.SetTail(tailM, tailN);
        }
    }
    __aicore__ inline uint32_t Ceiling(uint32_t a, uint32_t b)
    {
        return (a + b - 1) / b;
    }

public:

    static constexpr int32_t BUFFER_NUM = 2;
    static constexpr int32_t C = 2560;
    static constexpr MatmulConfig MM_CFG1 = GetNormalConfig(false, false, true);
    static constexpr MatmulConfig MM_CFG2 = GetNormalConfig(false, false, true);
    // static constexpr MatmulConfig MM_CFG = GetNormalConfig();
    // static constexpr MatmulConfig MM_CFG =GetMDLConfig(false, false, 0, true);//, false, false, true, true, true, false, false, false);

    Matmul<MatmulType<TPosition::GM, CubeFormat::ND, half>, 
           MatmulType<TPosition::GM, CubeFormat::ND, half, true>,
           MatmulType<TPosition::GM, CubeFormat::ND, half>, 
           MatmulType<TPosition::GM, CubeFormat::ND, float>,
           MM_CFG1>
        matmulObj1;

    Matmul<MatmulType<TPosition::GM, CubeFormat::ND, half>, 
           MatmulType<TPosition::GM, CubeFormat::ND, half, true>,
           MatmulType<TPosition::GM, CubeFormat::ND, half>, 
           MatmulType<TPosition::GM, CubeFormat::ND, float>,
           MM_CFG2>
        matmulObj2;

    GlobalTensor<half> xGm;
    GlobalTensor<half> h0Gm;
    GlobalTensor<half> xkGm;
    GlobalTensor<half> kwGm;
    GlobalTensor<half> vwGm;
    GlobalTensor<half> outGm;
    GlobalTensor<half> htGm;
    GlobalTensor<half> hsGm;

    GlobalTensor<half> out1Gm;
    GlobalTensor<half> out2Gm;
    GlobalTensor<half> hs1Gm;
    GlobalTensor<half> hs2Gm;


    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    TQue<QuePosition::VECIN, 1> inQueueXk;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueOut;
    TQue<QuePosition::VECOUT, 1> outQueueC;

    LocalTensor<half> cLocal;

    TBuf<> tmpMMFormatUb;
    LocalTensor<uint8_t> mmformatUb;

    TQue<TPosition::VECIN, 1> workQueue;
    GlobalTensor<int32_t> syncGm;

    // int32_t B;
    int32_t T;

    TCubeTiling tiling1;
    TCubeTiling tiling2;
};
#endif // CHANNEL_MIXING_310P_H
