/**
 * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

/**
 * @file token_shift.cpp
 */

#define K_MAX_SHAPE_DIM 0
#include "kernel_operator.h"
using namespace AscendC;
constexpr int32_t BUFFER_NUM = 2;
constexpr int32_t C = 2560;
//262144
//51.2
class KernelTokenShift {
public:
    __aicore__ inline KernelTokenShift() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR rwkvag, GM_ADDR h0, GM_ADDR xr, GM_ADDR xw, GM_ADDR xk, GM_ADDR xv, GM_ADDR xa, GM_ADDR xg, GM_ADDR ht,
                                uint32_t B, uint32_t T, TPipe *pipeIn) 
    {
      ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");

      this->T = T;

      xGm.SetGlobalBuffer((__gm__ half*)x + B*T*C);
      rwkvagGm.SetGlobalBuffer((__gm__ half*)rwkvag);
      h0Gm.SetGlobalBuffer((__gm__ half*)h0 + B*C);
      xrGm.SetGlobalBuffer((__gm__ half*)xr + B*T*C);
      xwGm.SetGlobalBuffer((__gm__ half*)xw + B*T*C);
      xkGm.SetGlobalBuffer((__gm__ half*)xk + B*T*C);
      xvGm.SetGlobalBuffer((__gm__ half*)xv + B*T*C);
      xaGm.SetGlobalBuffer((__gm__ half*)xa + B*T*C);
      xgGm.SetGlobalBuffer((__gm__ half*)xg + B*T*C);
      htGm.SetGlobalBuffer((__gm__ half*)ht + B*C);

      
      pipeIn->InitBuffer(inQueueRwkvag, 1, C * 6 * sizeof(half));
      
      pipeIn->InitBuffer(inQueueX, BUFFER_NUM, C * 2 * sizeof(half));
      pipeIn->InitBuffer(outQueueOut, BUFFER_NUM, C * 6 * sizeof(half));
    }
    
    __aicore__ inline void Process()
    {
      CopyInRwk();
      CopyIn1();
      Compute(0);
      CopyOut(0);

      for (int32_t i = 0; i < this->T-1; i++) 
      {
        CopyIn(i);
        Compute(i);
        CopyOut(i+1);
      }
      LocalTensor<half> rwkvagLocal = inQueueRwkvag.DeQue<half>();
      inQueueRwkvag.FreeTensor(rwkvagLocal);

      {
        LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();
        DataCopy(xLocal, xGm[(this->T-1) * C], C);
        inQueueX.EnQue(xLocal);

        xLocal = inQueueX.DeQue<half>();
        LocalTensor<half> outLocal = outQueueOut.AllocTensor<half>();
        Adds(outLocal, xLocal, (half)0, C);
        outQueueOut.EnQue<half>(outLocal);
        inQueueX.FreeTensor(xLocal);
        
        outLocal = outQueueOut.DeQue<half>();  
        DataCopy(htGm, outLocal, C);
        outQueueOut.FreeTensor(outLocal);
      }
    }

private:
    __aicore__ inline void CopyInRwk()
    {
      LocalTensor<half> rwkvagLocal = inQueueRwkvag.AllocTensor<half>();
      DataCopy(rwkvagLocal, rwkvagGm, C * 6);
      inQueueRwkvag.EnQue(rwkvagLocal);
    }
    __aicore__ inline void CopyIn1()
    {
      LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();
      DataCopy(xLocal, h0Gm, C);
      DataCopy(xLocal[C], xGm, C);
      inQueueX.EnQue(xLocal);
    }
    __aicore__ inline void CopyIn(int32_t progress)
    {
      LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();
      DataCopy(xLocal, xGm[progress * C], C * 2);
      inQueueX.EnQue(xLocal);
    }
    __aicore__ inline void Compute(int32_t progress)
    {
      LocalTensor<half> xLocal = inQueueX.DeQue<half>();
      LocalTensor<half> rwkvagLocal = inQueueRwkvag.DeQue<half>();
      LocalTensor<half> outLocal = outQueueOut.AllocTensor<half>();

      Sub(outLocal[C * 5], xLocal, xLocal[C], C);

      Mul(outLocal, outLocal[C * 5], rwkvagLocal, C);
      Mul(outLocal[C * 1], outLocal[C * 5], rwkvagLocal[C * 1], C);
      Mul(outLocal[C * 2], outLocal[C * 5], rwkvagLocal[C * 2], C);
      Mul(outLocal[C * 3], outLocal[C * 5], rwkvagLocal[C * 3], C);
      Mul(outLocal[C * 4], outLocal[C * 5], rwkvagLocal[C * 4], C);
      Mul(outLocal[C * 5], outLocal[C * 5], rwkvagLocal[C * 5], C);

      Add(outLocal, outLocal, xLocal[C], C);
      Add(outLocal[C * 1], outLocal[C * 1], xLocal[C], C);
      Add(outLocal[C * 2], outLocal[C * 2], xLocal[C], C);
      Add(outLocal[C * 3], outLocal[C * 3], xLocal[C], C);
      Add(outLocal[C * 4], outLocal[C * 4], xLocal[C], C);
      Add(outLocal[C * 5], outLocal[C * 5], xLocal[C], C);
      
      outQueueOut.EnQue<half>(outLocal);
      inQueueX.FreeTensor(xLocal);
      inQueueRwkvag.EnQue(rwkvagLocal);
    }
    
    __aicore__ inline void CopyOut(int32_t progress)
    {
      LocalTensor<half> outLocal = outQueueOut.DeQue<half>();  
      DataCopy(xrGm[progress * C], outLocal, C);
      DataCopy(xwGm[progress * C], outLocal[C], C);
      DataCopy(xkGm[progress * C], outLocal[C * 2], C);
      DataCopy(xvGm[progress * C], outLocal[C * 3], C);
      DataCopy(xaGm[progress * C], outLocal[C * 4], C);
      DataCopy(xgGm[progress * C], outLocal[C * 5], C);
      outQueueOut.FreeTensor(outLocal);
    }
private:
    
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    TQue<QuePosition::VECIN, 1> inQueueRwkvag;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueOut;

    GlobalTensor<half> xGm;
    GlobalTensor<half> rwkvagGm;
    GlobalTensor<half> h0Gm;
    GlobalTensor<half> xrGm;
    GlobalTensor<half> xwGm;
    GlobalTensor<half> xkGm;
    GlobalTensor<half> xvGm;
    GlobalTensor<half> xaGm;
    GlobalTensor<half> xgGm;
    GlobalTensor<half> htGm;
    uint32_t T;
};


class KernelTokenShiftB1T1 {
public:
    __aicore__ inline KernelTokenShiftB1T1() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR rwkvag, GM_ADDR h0, GM_ADDR xr, GM_ADDR xw, GM_ADDR xk, GM_ADDR xv, GM_ADDR xa, GM_ADDR xg, GM_ADDR ht,
                                TPipe *pipeIn) 
    {
      ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");


      xGm.SetGlobalBuffer((__gm__ half*)x);
      rwkvagGm.SetGlobalBuffer((__gm__ half*)rwkvag + C*GetBlockIdx());
      h0Gm.SetGlobalBuffer((__gm__ half*)h0);
      xrGm.SetGlobalBuffer((__gm__ half*)xr);
      xwGm.SetGlobalBuffer((__gm__ half*)xw);
      xkGm.SetGlobalBuffer((__gm__ half*)xk);
      xvGm.SetGlobalBuffer((__gm__ half*)xv);
      xaGm.SetGlobalBuffer((__gm__ half*)xa);
      xgGm.SetGlobalBuffer((__gm__ half*)xg);
      htGm.SetGlobalBuffer((__gm__ half*)ht);

      
      pipeIn->InitBuffer(inQueueRwkvag, 1, C * sizeof(half));
      pipeIn->InitBuffer(inQueueX, 1, C * sizeof(half));
      pipeIn->InitBuffer(inQueueH0, 1, C * sizeof(half));
      pipeIn->InitBuffer(outQueueOut, 1, C * sizeof(half));

      // pipeIn->InitBuffer(inQueueRwkvag, 1, 65536);
      // pipeIn->InitBuffer(inQueueX, 1, 65536);
      // pipeIn->InitBuffer(inQueueH0, 1, 65536);
      // pipeIn->InitBuffer(outQueueOut, 1, 65536);
    }
    
    __aicore__ inline void Process()
    {
      if(GetBlockIdx() != 6)
      {
        CopyInRwk();
        CopyIn1();
        Compute(0);
        CopyOut(0);     
      }
      else 
      // if(GetBlockIdx() == 0)
      {
        LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();
        DataCopy(xLocal, xGm, C);
        inQueueX.EnQue(xLocal);

        xLocal = inQueueX.DeQue<half>();
        LocalTensor<half> outLocal = outQueueOut.AllocTensor<half>();
        Adds(outLocal, xLocal, (half)0, C);
        outQueueOut.EnQue<half>(outLocal);
        inQueueX.FreeTensor(xLocal);
        
        outLocal = outQueueOut.DeQue<half>();  
        DataCopy(htGm, outLocal, C);
        outQueueOut.FreeTensor(outLocal);
      }
    }

private:
    __aicore__ inline void CopyInRwk()
    {
      LocalTensor<half> rwkvagLocal = inQueueRwkvag.AllocTensor<half>();
      DataCopy(rwkvagLocal, rwkvagGm, C);// * 6);
      inQueueRwkvag.EnQue(rwkvagLocal);
    }
     __aicore__ inline void CopyIn1()
    {
      LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();
      LocalTensor<half> h0Local = inQueueH0.AllocTensor<half>();
      
      DataCopy(h0Local, h0Gm, C);
      DataCopy(xLocal, xGm, C);
      inQueueX.EnQue(xLocal);
      inQueueH0.EnQue(h0Local);
    }

    __aicore__ inline void Compute(int32_t progress)
    {
      LocalTensor<half> h0Local = inQueueH0.DeQue<half>();
      LocalTensor<half> xLocal = inQueueX.DeQue<half>();
      LocalTensor<half> rwkvagLocal = inQueueRwkvag.DeQue<half>();
      LocalTensor<half> outLocal = outQueueOut.AllocTensor<half>();

      // Sub(outLocal[C * 5], xLocal, xLocal[C], C);

      // Mul(outLocal, outLocal[C * 5], rwkvagLocal, C);
      // Mul(outLocal[C * 1], outLocal[C * 5], rwkvagLocal[C * 1], C);
      // Mul(outLocal[C * 2], outLocal[C * 5], rwkvagLocal[C * 2], C);
      // Mul(outLocal[C * 3], outLocal[C * 5], rwkvagLocal[C * 3], C);
      // Mul(outLocal[C * 4], outLocal[C * 5], rwkvagLocal[C * 4], C);
      // Mul(outLocal[C * 5], outLocal[C * 5], rwkvagLocal[C * 5], C);

      // Add(outLocal, outLocal, xLocal[C], C);
      // Add(outLocal[C * 1], outLocal[C * 1], xLocal[C], C);
      // Add(outLocal[C * 2], outLocal[C * 2], xLocal[C], C);
      // Add(outLocal[C * 3], outLocal[C * 3], xLocal[C], C);
      // Add(outLocal[C * 4], outLocal[C * 4], xLocal[C], C);
      // Add(outLocal[C * 5], outLocal[C * 5], xLocal[C], C);

      Sub(outLocal, h0Local, xLocal, C);
      Mul(outLocal, outLocal, rwkvagLocal, C);
      Add(outLocal, outLocal, xLocal, C);
      
      outQueueOut.EnQue<half>(outLocal);
      // if(GetBlockIdx() == 0)
      // {
      //   DataCopy(htGm, xLocal, C);
      // }
      inQueueX.FreeTensor(xLocal);
      inQueueH0.FreeTensor(h0Local);
      inQueueRwkvag.FreeTensor(rwkvagLocal);
    }
    
    __aicore__ inline void CopyOut(int32_t progress)
    {
      LocalTensor<half> outLocal = outQueueOut.DeQue<half>();  
      switch (GetBlockIdx())
      {
        case 0: DataCopy(xrGm[progress * C], outLocal, C); break;
        case 1: DataCopy(xwGm[progress * C], outLocal, C); break;
        case 2: DataCopy(xkGm[progress * C], outLocal, C); break;
        case 3: DataCopy(xvGm[progress * C], outLocal, C); break;                                  
        case 4: DataCopy(xaGm[progress * C], outLocal, C); break;
        case 5: DataCopy(xgGm[progress * C], outLocal, C); break;
      }
      // DataCopy(xrGm[progress * C], outLocal, C);
      // DataCopy(xwGm[progress * C], outLocal[C], C);
      // DataCopy(xkGm[progress * C], outLocal[C * 2], C);
      // DataCopy(xvGm[progress * C], outLocal[C * 3], C);
      // DataCopy(xaGm[progress * C], outLocal[C * 4], C);
      // DataCopy(xgGm[progress * C], outLocal[C * 5], C);
      outQueueOut.FreeTensor(outLocal);
    }
private:
    
    TQue<QuePosition::VECIN, 1> inQueueX, inQueueH0;
    TQue<QuePosition::VECIN, 1> inQueueRwkvag;
    TQue<QuePosition::VECOUT, 1> outQueueOut;

    GlobalTensor<half> xGm;
    GlobalTensor<half> rwkvagGm;
    GlobalTensor<half> h0Gm;
    GlobalTensor<half> xrGm;
    GlobalTensor<half> xwGm;
    GlobalTensor<half> xkGm;
    GlobalTensor<half> xvGm;
    GlobalTensor<half> xaGm;
    GlobalTensor<half> xgGm;
    GlobalTensor<half> htGm;
};


class KernelTokenShiftB1 {
public:
    __aicore__ inline KernelTokenShiftB1() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR rwkvag, GM_ADDR h0, GM_ADDR xr, GM_ADDR xw, GM_ADDR xk, GM_ADDR xv, GM_ADDR xa, GM_ADDR xg, GM_ADDR ht,
                                uint32_t bigCore, uint32_t smallLoopNum, TPipe *pipeIn) 
    {
      ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");

      if(GetBlockIdx() < bigCore)
      {
        this->T_start = GetBlockIdx()*(smallLoopNum+1) - 1;
        this->T_end = this->T_start + smallLoopNum+1;
      }
      else
      {
        this->T_start = GetBlockIdx()*smallLoopNum + bigCore - 1;
        this->T_end = this->T_start + smallLoopNum;
      }
      if(this->T_start < 0) this->T_start = 0;
      // printf("%d %d..",this->T_start, this->T_end);

      xGm.SetGlobalBuffer((__gm__ half*)x);
      rwkvagGm.SetGlobalBuffer((__gm__ half*)rwkvag);
      h0Gm.SetGlobalBuffer((__gm__ half*)h0);
      xrGm.SetGlobalBuffer((__gm__ half*)xr);
      xwGm.SetGlobalBuffer((__gm__ half*)xw);
      xkGm.SetGlobalBuffer((__gm__ half*)xk);
      xvGm.SetGlobalBuffer((__gm__ half*)xv);
      xaGm.SetGlobalBuffer((__gm__ half*)xa);
      xgGm.SetGlobalBuffer((__gm__ half*)xg);
      htGm.SetGlobalBuffer((__gm__ half*)ht);

      
      pipeIn->InitBuffer(inQueueRwkvag, 1, C * 6 * sizeof(half));
      pipeIn->InitBuffer(inQueueX, BUFFER_NUM, C * 2 * sizeof(half));
      pipeIn->InitBuffer(outQueueOut, BUFFER_NUM, C * 6 * sizeof(half));
    }
    
    __aicore__ inline void Process()
    {
      CopyInRwk();
      if(GetBlockIdx() == 0)
      {
        CopyIn1();
        Compute(0);
        CopyOut(0);
      }

      for (int32_t i = this->T_start; i < this->T_end; i++) 
      {
        CopyIn(i);
        Compute(i);
        CopyOut(i+1);
      }
      LocalTensor<half> rwkvagLocal = inQueueRwkvag.DeQue<half>();
      inQueueRwkvag.FreeTensor(rwkvagLocal);
      if(GetBlockIdx() == 7)
      {
        LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();
        DataCopy(xLocal, xGm[(this->T_end) * C], C);
        inQueueX.EnQue(xLocal);

        xLocal = inQueueX.DeQue<half>();
        LocalTensor<half> outLocal = outQueueOut.AllocTensor<half>();
        Adds(outLocal, xLocal, (half)0, C);
        outQueueOut.EnQue<half>(outLocal);
        inQueueX.FreeTensor(xLocal);
        
        outLocal = outQueueOut.DeQue<half>();  
        DataCopy(htGm, outLocal, C);
        outQueueOut.FreeTensor(outLocal);
      }
    }

private:
    __aicore__ inline void CopyInRwk()
    {
      LocalTensor<half> rwkvagLocal = inQueueRwkvag.AllocTensor<half>();
      DataCopy(rwkvagLocal, rwkvagGm, C * 6);
      inQueueRwkvag.EnQue(rwkvagLocal);
    }
    __aicore__ inline void CopyIn1()
    {
      LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();
      DataCopy(xLocal, h0Gm, C);
      DataCopy(xLocal[C], xGm, C);
      inQueueX.EnQue(xLocal);
    }
    __aicore__ inline void CopyIn(int32_t progress)
    {
      LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();
      DataCopy(xLocal, xGm[progress * C], C * 2);
      inQueueX.EnQue(xLocal);
    }
    __aicore__ inline void Compute(int32_t progress)
    {
      LocalTensor<half> xLocal = inQueueX.DeQue<half>();
      LocalTensor<half> rwkvagLocal = inQueueRwkvag.DeQue<half>();
      LocalTensor<half> outLocal = outQueueOut.AllocTensor<half>();

      Sub(outLocal[C * 5], xLocal, xLocal[C], C);

      Mul(outLocal, outLocal[C * 5], rwkvagLocal, C);
      Mul(outLocal[C * 1], outLocal[C * 5], rwkvagLocal[C * 1], C);
      Mul(outLocal[C * 2], outLocal[C * 5], rwkvagLocal[C * 2], C);
      Mul(outLocal[C * 3], outLocal[C * 5], rwkvagLocal[C * 3], C);
      Mul(outLocal[C * 4], outLocal[C * 5], rwkvagLocal[C * 4], C);
      Mul(outLocal[C * 5], outLocal[C * 5], rwkvagLocal[C * 5], C);

      Add(outLocal, outLocal, xLocal[C], C);
      Add(outLocal[C * 1], outLocal[C * 1], xLocal[C], C);
      Add(outLocal[C * 2], outLocal[C * 2], xLocal[C], C);
      Add(outLocal[C * 3], outLocal[C * 3], xLocal[C], C);
      Add(outLocal[C * 4], outLocal[C * 4], xLocal[C], C);
      Add(outLocal[C * 5], outLocal[C * 5], xLocal[C], C);
      
      outQueueOut.EnQue<half>(outLocal);
      inQueueX.FreeTensor(xLocal);
      inQueueRwkvag.EnQue(rwkvagLocal);
    }
    
    __aicore__ inline void CopyOut(int32_t progress)
    {
      LocalTensor<half> outLocal = outQueueOut.DeQue<half>();  
      DataCopy(xrGm[progress * C], outLocal, C);
      DataCopy(xwGm[progress * C], outLocal[C], C);
      DataCopy(xkGm[progress * C], outLocal[C * 2], C);
      DataCopy(xvGm[progress * C], outLocal[C * 3], C);
      DataCopy(xaGm[progress * C], outLocal[C * 4], C);
      DataCopy(xgGm[progress * C], outLocal[C * 5], C);
      outQueueOut.FreeTensor(outLocal);
    }
private:
    
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    TQue<QuePosition::VECIN, 1> inQueueRwkvag;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueOut;

    GlobalTensor<half> xGm;
    GlobalTensor<half> rwkvagGm;
    GlobalTensor<half> h0Gm;
    GlobalTensor<half> xrGm;
    GlobalTensor<half> xwGm;
    GlobalTensor<half> xkGm;
    GlobalTensor<half> xvGm;
    GlobalTensor<half> xaGm;
    GlobalTensor<half> xgGm;
    GlobalTensor<half> htGm;
    int32_t T_start;
    int32_t T_end;
};

extern "C" __global__ __aicore__ void token_shift(GM_ADDR x, GM_ADDR rwkvag, GM_ADDR h0, GM_ADDR xr, GM_ADDR xw, GM_ADDR xk, GM_ADDR xv, GM_ADDR xa, GM_ADDR xg, GM_ADDR ht, GM_ADDR workspace, GM_ADDR tiling)
{
    GET_TILING_DATA(tiling_data, tiling);
    TPipe pipe;
    if (TILING_KEY_IS(1)) {
      KernelTokenShiftB1T1 op;
      op.Init(x, rwkvag, h0, xr, xw, xk, xv, xa, xg, ht,
              &pipe);
      op.Process();
    }
    else if (TILING_KEY_IS(2)) {
      KernelTokenShiftB1 op;
      op.Init(x, rwkvag, h0, xr, xw, xk, xv, xa, xg, ht,
              tiling_data.B, tiling_data.T, &pipe);
      op.Process();
    }
    else if (TILING_KEY_IS(3)) {
      KernelTokenShift op;
      for(int i=0; i<tiling_data.B; i++)
      {
        op.Init(x, rwkvag, h0, xr, xw, xk, xv, xa, xg, ht,
                i, tiling_data.T, &pipe);
        op.Process();
        pipe.Reset();
      }
    }
}
#ifndef ASCENDC_CPU_DEBUG
// call of kernel function
void token_shift_do(uint32_t blockDim, void* l2ctrl, void* stream, uint8_t* x, uint8_t* rwkvag, uint8_t* h0, uint8_t* xr, uint8_t* xw, uint8_t* xk, uint8_t* xv, uint8_t* xa, uint8_t* xg, uint8_t* ht,
    uint8_t* workspace, uint8_t* tiling)
{
    token_shift<<<blockDim, l2ctrl, stream>>>(x, rwkvag, h0, xr, xw, xk, xv, xa, xg, ht, workspace, tiling);
}
#endif
