/**
 * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

/*!
 * \file
 * \brief
 */

#ifndef CONV2D_COMMON_FUNC_H
#define CONV2D_COMMON_FUNC_H

#include "conv2d_util.h"
#include "conv2d_common_sub_api.h"
#include "conv2d_framework_util.h"
#include "kernel_tiling/kernel_tiling.h"

CONV_DECLARE_REG_IMPL(Init);
CONV_DECLARE_REG_IMPL(SetFmap);
CONV_DECLARE_REG_IMPL(SetWeight);
CONV_DECLARE_REG_IMPL(SetScale);
CONV_DECLARE_REG_IMPL(SetBias);
CONV_DECLARE_REG_IMPL(SetOrgFmapShape);
CONV_DECLARE_REG_IMPL(SetOrgWeightShape);
CONV_DECLARE_REG_IMPL(SetOrgOutputShape);
CONV_DECLARE_REG_IMPL(SetSingleFmapShape);
CONV_DECLARE_REG_IMPL(SetSingleWeightShape);
CONV_DECLARE_REG_IMPL(SetSingleOutputShape);
CONV_DECLARE_REG_IMPL(SetFmapStartPosition);
CONV_DECLARE_REG_IMPL(Iterate);
CONV_DECLARE_REG_IMPL(GetTensorC);
CONV_DECLARE_REG_IMPL(IterateAll);
CONV_DECLARE_REG_IMPL(End);

namespace Conv2dFunc {
using namespace AscendC;

using TypeFalse = struct {
    __uint128_t _[1024];
};

static __aicore__ inline uint64_t CalMuti(uint64_t a, uint64_t b)
{
    return a / b;
}

template <class Intf>
static __aicore__ inline size_t GetInputkInOneC0Block() {
    return C0_SIZE / sizeof(typename Intf::FmapT);
}

template <class Intf>
static __aicore__ inline void InitConv2dKDirectionBaseValue(Intf *self)
{
    // K方向变量计算
    self->ctx.maxKAL1Iter =
        CeilDIV(self->ctx.singleCoreCin * self->ctx.kernelHxkernelW, self->ctx.conv2dTiling->kAL1) - 1;
    self->ctx.maxKBL1Iter =
        CeilDIV(self->ctx.singleCoreCin * self->ctx.kernelHxkernelW, self->ctx.conv2dTiling->kBL1) - 1;
    self->ctx.maxKL0Iter =
        CeilDIV(self->ctx.singleCoreCin * self->ctx.kernelHxkernelW, self->ctx.conv2dTiling->kL0) - 1;
    self->ctx.ddr2l0LoopK = CeilDIV(
        AlignB(self->ctx.singleCoreCin, C0_SIZE / sizeof(typename Intf::FmapT)) * self->ctx.kernelHxkernelW,
        self->ctx.conv2dTiling->kL0);
    self->ctx.kAL1Tail = (self->ctx.singleCoreCin * self->ctx.kernelHxkernelW) % self->ctx.conv2dTiling->kAL1;
    self->ctx.kAL1Tail = self->ctx.kAL1Tail == 0 ? self->ctx.conv2dTiling->kAL1 : self->ctx.kAL1Tail;
    self->ctx.kBL1Tail = (self->ctx.singleCoreCin * self->ctx.kernelHxkernelW) % self->ctx.conv2dTiling->kBL1;
    self->ctx.kBL1Tail = self->ctx.kBL1Tail == 0 ? self->ctx.conv2dTiling->kBL1 : self->ctx.kBL1Tail;
    self->ctx.kL0Tail = (AlignB(self->ctx.singleCoreCin, GetInputkInOneC0Block<Intf>()) *
        self->ctx.kernelHxkernelW) % self->ctx.conv2dTiling->kL0;
    self->ctx.kL0Tail = self->ctx.kL0Tail == 0 ? self->ctx.conv2dTiling->kL0 : self->ctx.kL0Tail;
    self->ctx.multiKAL1 = CalMuti(self->ctx.conv2dTiling->kAL1, self->ctx.conv2dTiling->kL0);
    self->ctx.multiKBL1 = CalMuti(self->ctx.conv2dTiling->kBL1, self->ctx.conv2dTiling->kL0);
    self->ctx.kAL1fullload =
        (AlignB(self->ctx.singleCoreCin, C0_SIZE / sizeof(typename Intf::FmapT)) * self->ctx.kernelHxkernelW ==
            self->ctx.conv2dTiling->kAL1);
    self->ctx.kBL1fullload =
        (AlignB(self->ctx.singleCoreCin, C0_SIZE / sizeof(typename Intf::WeightT)) * self->ctx.kernelHxkernelW ==
            self->ctx.conv2dTiling->kBL1);
}

template <class Intf>
static __aicore__ inline void InitConv2dCoutDirectionBaseValue(Intf *self)
{
    // Cout方向变量计算
    self->ctx.maxNBL1Iter = CeilDIV(self->ctx.singleCoreCo,
        (self->ctx.conv2dTiling->nL0 * self->ctx.conv2dTiling->multiNBL1)) - 1;
    self->ctx.maxNL0Iter = self->ctx.conv2dTiling->multiNBL1 - 1;
    self->ctx.nBL1Tail = self->ctx.singleCoreCo %
                         (self->ctx.conv2dTiling->nL0 * self->ctx.conv2dTiling->multiNBL1);
    self->ctx.nBL1Tail = self->ctx.nBL1Tail == 0 ? self->ctx.conv2dTiling->nL0 * self->ctx.conv2dTiling->multiNBL1 :
                         self->ctx.nBL1Tail;
    self->ctx.nL0Tail = self->ctx.nBL1Tail % self->ctx.conv2dTiling->nL0;
    self->ctx.nL0TailNotAlign = self->ctx.nL0Tail == 0 ? self->ctx.conv2dTiling->nL0 : self->ctx.nL0Tail;
    self->ctx.nL0Tail = AlignB(self->ctx.nL0TailNotAlign, BLOCK_L0_N);
    
    self->ctx.ddr2l1LoopN = self->ctx.maxNBL1Iter + 1;
    self->ctx.l12l0LoopN = self->ctx.conv2dTiling->multiNBL1;
}

template <class Intf>
static __aicore__ inline void InitConv2dHoDirectionBaseValue(Intf *self)
{
    // Ho方向变量计算
    self->ctx.hoAL1Tail = self->ctx.singleCoreHo % self->ctx.hoL1; // hoL1后面改成从tiling中取
    self->ctx.hoAL1Tail = self->ctx.hoAL1Tail == 0 ? self->ctx.hoL1 : self->ctx.hoAL1Tail;
    self->ctx.maxHoL1Iter = CeilDIV(self->ctx.singleCoreHo, self->ctx.hoL1) - 1;
    self->ctx.ddr2l1LoopH = self->ctx.maxHoL1Iter + 1;
}

template <class Intf>
static __aicore__ inline void InitConv2dWoDirectionBaseValue(Intf *self)
{
    // Wo方向变量计算
    self->ctx.woAL1Tail = self->ctx.singleCoreWo % self->ctx.woL1; // woL1后面改成从tiling中取
    self->ctx.woAL1Tail = self->ctx.woAL1Tail == 0 ?  self->ctx.woL1 : self->ctx.woAL1Tail;
    if (CeilDIV(self->ctx.woAL1Tail, self->ctx.conv2dTiling->woL0) > 1 && self->ctx.woAL1Tail % 16 > 1) {
        self->ctx.woL1SmallTail = self->ctx.woAL1Tail % self->ctx.conv2dTiling->woL0;
        self->ctx.woAL1Tail = (self->ctx.woAL1Tail / self->ctx.conv2dTiling->woL0) * self->ctx.conv2dTiling->woL0;
    }
    self->ctx.maxWoL1Iter = CeilDIV(self->ctx.singleCoreWo, self->ctx.woL1) - 1;
    if (self->ctx.woL1SmallTail > 0) {
        self->ctx.maxWoL1Iter += 1;
    }
    self->ctx.ddr2l1LoopW = self->ctx.maxWoL1Iter + 1;
}

template <class Intf, uint32_t ImplType>
struct Init {
    static __aicore__ inline void call(Intf *self, const void *__restrict conv2dTiling)
    {
        self->ctx.conv2dTiling = (TConv2DTiling *)conv2dTiling;
        self->ctx.oriCi = self->ctx.conv2dTiling->orgCi;
        self->ctx.oriHi = self->ctx.conv2dTiling->orgHi;
        self->ctx.oriWi = self->ctx.conv2dTiling->orgWi;
        self->ctx.oriCo = self->ctx.conv2dTiling->orgCo;
        self->ctx.oriHo = self->ctx.conv2dTiling->orgHo;
        self->ctx.oriWo = self->ctx.conv2dTiling->orgWo;
        self->ctx.hoL1 = self->ctx.conv2dTiling->hoL1;
        self->ctx.woL1 = self->ctx.conv2dTiling->woL1;
        self->ctx.kernelH = self->ctx.conv2dTiling->kernelH;
        self->ctx.kernelW = self->ctx.conv2dTiling->kernelW;
        self->ctx.singleCoreCin = self->ctx.conv2dTiling->singleCoreCi; // 单核上处理的Cin大小
        self->ctx.singleCoreCo = self->ctx.conv2dTiling->singleCoreCo; // 单核上处理的Co大小
        self->ctx.singleCoreHo = self->ctx.conv2dTiling->singleCoreHo; // 单核上处理的Ho大小
        self->ctx.singleCoreWo = self->ctx.conv2dTiling->singleCoreWo; // 单核上处理的Wo大小
        self->ctx.biasFullLoadFlag = self->ctx.conv2dTiling->biasFullLoadFlag;
        self->ctx.scaleFullLoadFlag = self->ctx.conv2dTiling->fixpParamsFullLoadFlag;
        InitBuffer(self);
        InitConv2dKDirectionBaseValue<Intf>(self);
        InitConv2dWoDirectionBaseValue<Intf>(self);
        InitConv2dCoutDirectionBaseValue<Intf>(self);
        InitConv2dHoDirectionBaseValue<Intf>(self);
    }

    static __aicore__ inline void InitBuffer(Intf *self)
    {
        uint64_t mL0 =
            ((self->ctx.conv2dTiling->hoL0 * self->ctx.conv2dTiling->woL0 + BLOCK_L0_M - 1) / BLOCK_L0_M) * BLOCK_L0_M;
        uint64_t al0Spacesize = mL0 * self->ctx.conv2dTiling->kL0;
        uint64_t bl0Spacesize = self->ctx.conv2dTiling->nL0 * self->ctx.conv2dTiling->kL0;
        uint64_t cl0Spacesize = mL0 * self->ctx.conv2dTiling->nL0;
        uint64_t bl1Spacesize =
            self->ctx.conv2dTiling->multiNBL1 * self->ctx.conv2dTiling->nL0 * self->ctx.conv2dTiling->kBL1;
        uint64_t biasl1Spacesize = self->ctx.conv2dTiling->nL0 * sizeof(typename Intf::BiasT); // 非全载时bias伴随L0C切分
        uint64_t biasBTSpacesize = self->ctx.conv2dTiling->nL0;
        if (self->ctx.biasFullLoadFlag) {
            biasl1Spacesize = AlignB(self->ctx.singleCoreCo * sizeof(typename Intf::BiasT),
                                     BLOCK_L0_N * sizeof(typename Intf::BiasT));
        }
        uint64_t scaleL1SpaceSize = self->ctx.scaleFullLoadFlag ?
            AlignB(self->ctx.singleCoreCo * sizeof(typename Intf::ScaleT), BLOCK_L0_N * sizeof(typename Intf::ScaleT)) :
            self->ctx.conv2dTiling->nL0 * sizeof(typename Intf::ScaleT);

        self->ctx.dilatedKernelH = 1 + (self->ctx.kernelH - 1) * self->ctx.conv2dTiling->dilationH;
        self->ctx.dilatedKernelW = 1 + (self->ctx.kernelW - 1) * self->ctx.conv2dTiling->dilationW;
        self->ctx.dilatedKernelHxkernelW = self->ctx.dilatedKernelH * self->ctx.dilatedKernelW;
        self->ctx.kernelHxkernelW = self->ctx.kernelH * self->ctx.kernelW;
        uint64_t hiAL1Max =
            (self->ctx.hoL1 - 1) * self->ctx.conv2dTiling->strideH + self->ctx.dilatedKernelH;
        hiAL1Max = hiAL1Max > self->ctx.oriHi ? self->ctx.oriHi : hiAL1Max;
        uint64_t wiAL1Max =
            (self->ctx.woL1 - 1) * self->ctx.conv2dTiling->strideW + self->ctx.dilatedKernelW;
        wiAL1Max = wiAL1Max > self->ctx.oriWi ? self->ctx.oriWi : wiAL1Max;
        uint64_t ciAL1 = self->ctx.conv2dTiling->kAL1 / self->ctx.kernelHxkernelW;
        uint64_t al1Spacesize = ciAL1 * hiAL1Max * wiAL1Max;

        InitBufferWithDoubleBuf(self, al0Spacesize, bl0Spacesize, cl0Spacesize, al1Spacesize, bl1Spacesize);
        self->ctx.pipe.InitBuffer(
            self->ctx.queueBiasL1, 1, AlignB(biasl1Spacesize, C0_SIZE));
        self->ctx.pipe.InitBuffer(
            self->ctx.queueBiasBT, 1, AlignB(biasBTSpacesize * sizeof(typename Intf::L0cT), BT_SIZE));
        self->ctx.pipe.InitBuffer(
            self->ctx.queueScaleL1, 1, AlignB(scaleL1SpaceSize, C0_SIZE));
        self->ctx.pipe.InitBuffer(self->ctx.queueOutput, 1, cl0Spacesize * sizeof(typename Intf::OutputT));
    }

    static __aicore__ inline void InitBufferWithDoubleBuf(Intf *self, uint64_t al0Spacesize, uint64_t bl0Spacesize,
        int64_t cl0Spacesize, uint64_t al1Spacesize, uint64_t bl1Spacesize)
    {
        int8_t al0db = self->ctx.conv2dTiling->pBufferFlag & 0x01;
        int8_t bl0db = (self->ctx.conv2dTiling->pBufferFlag & 0x02) >> 1;
        int8_t cl0db = (self->ctx.conv2dTiling->pBufferFlag & 0x04) >> 2;
        int8_t al1db = (self->ctx.conv2dTiling->pBufferFlag & 0x08) >> 3;
        int8_t bl1db = (self->ctx.conv2dTiling->pBufferFlag & 0x10) >> 4;
        if (!al0db) {
            self->ctx.pipe.InitBuffer(
                self->ctx.queueAL0, 1, AlignB(al0Spacesize * sizeof(typename Intf::FmapT), FRACTAL_SIZE));
        } else {
            self->ctx.pipe.InitBuffer(
                self->ctx.queueAL0, 2, AlignB(al0Spacesize * sizeof(typename Intf::FmapT), FRACTAL_SIZE));
        }
        if (!bl0db) {
            self->ctx.pipe.InitBuffer(
                self->ctx.queueBL0, 1, AlignB(bl0Spacesize * sizeof(typename Intf::WeightT), FRACTAL_SIZE));
        } else {
            self->ctx.pipe.InitBuffer(
                self->ctx.queueBL0, 2, AlignB(bl0Spacesize * sizeof(typename Intf::WeightT), FRACTAL_SIZE));
        }
        if (!cl0db) {
            self->ctx.pipe.InitBuffer(self->ctx.queueCL0, 1, cl0Spacesize * sizeof(typename Intf::L0cT));
        } else {
            self->ctx.pipe.InitBuffer(self->ctx.queueCL0, 2, cl0Spacesize * sizeof(typename Intf::L0cT));
        }
        if (!al1db) {
            self->ctx.pipe.InitBuffer(self->ctx.queueAL1, 1,
                AlignB(al1Spacesize * sizeof(typename Intf::FmapT), C0_SIZE));
        } else {
            self->ctx.pipe.InitBuffer(self->ctx.queueAL1, 2,
                AlignB(al1Spacesize * sizeof(typename Intf::FmapT), C0_SIZE));
        }
        if (!bl1db) {
            self->ctx.pipe.InitBuffer(
                self->ctx.queueBL1, 1, AlignB(bl1Spacesize * sizeof(typename Intf::WeightT), C0_SIZE));
        } else {
            self->ctx.pipe.InitBuffer(
                self->ctx.queueBL1, 2, AlignB(bl1Spacesize * sizeof(typename Intf::WeightT), C0_SIZE));
        }
    }
};

template <class Intf, uint32_t ImplType>
struct SetFmap {
    static __aicore__ inline void call(Intf *self, const GlobalTensor<typename Intf::FmapT> &fmap)
    {
        self->ctx.agm.SetGlobalBuffer(fmap.GetPhyAddr(0), fmap.GetSize());
        self->ctx.isFirstIterate = true;
    }
};

template <class Intf, uint32_t ImplType>
struct SetWeight {
    static __aicore__ inline void call(Intf *self, const GlobalTensor<typename Intf::WeightT> &weight)
    {
        self->ctx.bgm.SetGlobalBuffer(weight.GetPhyAddr(0), weight.GetSize());
        self->ctx.isFirstIterate = true;
    }
};

template <class Intf, uint32_t ImplType>
struct SetScale {
    static __aicore__ inline void call(Intf *self, const GlobalTensor<typename Intf::ScaleT> &scale)
    {
        self->ctx.scalegm.SetGlobalBuffer(scale.GetPhyAddr(0), scale.GetSize());
        self->ctx.isFirstIterate = true;
        self->ctx.enableScale = true;
    }
};

template <class Intf, uint32_t ImplType>
struct SetBias {
    static __aicore__ inline void call(Intf *self, const GlobalTensor<typename Intf::BiasT> &bias)
    {
        self->ctx.biasgm.SetGlobalBuffer(bias.GetPhyAddr(0), bias.GetSize());
        self->ctx.isFirstIterate = true;
        self->ctx.enableBias = true;
    }
};

template <class Intf, uint32_t ImplType>
struct SetOrgFmapShape {
    static __aicore__ inline void call(Intf *self, uint64_t orgCi, uint64_t orgHi, uint64_t orgWi)
    {
        self->ctx.oriCi = orgCi;
        self->ctx.oriHi = orgHi;
        self->ctx.oriWi = orgWi;
    }
};

template <class Intf, uint32_t ImplType>
struct SetOrgWeightShape {
    static __aicore__ inline void call(Intf *self, uint64_t orgCo, uint64_t orgCi, uint64_t orgKh, uint64_t orgKw)
    {
        self->ctx.oriCo = orgCo;
        self->ctx.oriCi = orgCi;
        self->ctx.kernelH = orgKh;
        self->ctx.kernelW = orgKw;
    }
};

template <class Intf, uint32_t ImplType>
struct SetOrgOutputShape {
    static __aicore__ inline void call(Intf *self, uint64_t orgCo, uint64_t orgHo, uint64_t orgWo)
    {
        self->ctx.oriCo = orgCo;
        self->ctx.oriHo = orgHo;
        self->ctx.oriWo = orgWo;
    }
};

template <class Intf, uint32_t ImplType>
struct SetSingleFmapShape {
    static __aicore__ inline void call(Intf *self, uint64_t singleCi, uint64_t singleHi, uint64_t singleWi)
    {
        self->ctx.singleCoreCin = singleCi;
        self->ctx.singleCoreHi = singleHi;
        self->ctx.singleCoreWi = singleWi;
    }
};

template <class Intf, uint32_t ImplType>
struct SetSingleWeightShape {
    static __aicore__ inline void call(Intf *self, uint64_t singleCo, uint64_t singleCi, uint64_t singleKh,
                                       uint64_t singleKw)
    {
        self->ctx.singleCoreCo = singleCo;
        self->ctx.singleCoreCin = singleCi;
        InitConv2dCoutDirectionBaseValue<Intf>(self);
    }
};

template <class Intf, uint32_t ImplType>
struct SetSingleOutputShape {
    static __aicore__ inline void call(Intf *self, uint64_t singleCo, uint64_t singleHo, uint64_t singleWo)
    {
        self->ctx.singleCoreCo = singleCo;
        self->ctx.singleCoreHo = singleHo;
        self->ctx.singleCoreWo = singleWo;
        InitConv2dCoutDirectionBaseValue<Intf>(self);
        InitConv2dHoDirectionBaseValue<Intf>(self);
        InitConv2dWoDirectionBaseValue<Intf>(self);
    }
};

template <class Intf, uint32_t ImplType>
struct SetFmapStartPosition {
    static __aicore__ inline void call(Intf *self, int64_t singleCoreHiStartPos, int64_t wiStartPos)
    {
        self->ctx.singleCoreHiStartPos = singleCoreHiStartPos;
        self->ctx.wiStartPos = wiStartPos;
    }
};

template <class Intf, uint32_t ImplType>
struct End {
    static __aicore__ inline void call(Intf *self)
    {
        self->ctx.queueAL1.FreeAllEvent();
        self->ctx.queueBL1.FreeAllEvent();
        self->ctx.queueBiasL1.FreeAllEvent();
        self->ctx.queueScaleL1.FreeAllEvent();
        self->ctx.queueBiasBT.FreeAllEvent();
        self->ctx.queueOutput.FreeAllEvent();
        self->ctx.queueAL0.FreeAllEvent();
        self->ctx.queueBL0.FreeAllEvent();
        self->ctx.queueCL0.FreeAllEvent();
    }
};

template <class Intf, uint32_t ImplType>
struct Iterate {
    template <bool sync = true>
    static __aicore__ inline bool call(Intf *self, bool enPartialSum = false)
    {
        return IterateImpl(self, enPartialSum);
    }

    static __aicore__ inline bool IterateImpl(Intf *self, bool enPartialSum);
    static __aicore__ inline void FirstIterateImpl(Intf *self);
    static __aicore__ inline bool IterateMFirst(Intf *self);
    static __aicore__ inline bool IterateNFirst(Intf *self);
    static __aicore__ inline void IterateK(Intf *self);
    static __aicore__ inline void ReduceK(Intf *self);
    static __aicore__ inline uint64_t CalcLoad2dNExtension(Intf *self)
    {
        uint64_t n = (self->ctx.nBL1Iter == self->ctx.maxNBL1Iter && self->ctx.nL0Iter == self->ctx.maxNL0Iter)
                   ? self->ctx.nL0Tail
                   : self->ctx.conv2dTiling->nL0;
        return AlignB(n, BLOCK_L0_N);
    }
    static __aicore__ inline void CalcWoDirectionVar(Intf *self)
    {
        if (self->ctx.woL1SmallTail > 0) {
            if (self->ctx.woAL1Iter == self->ctx.maxWoL1Iter) {
                self->ctx.currentWoL1 = self->ctx.woL1SmallTail;
            } else if (self->ctx.woAL1Iter == self->ctx.maxWoL1Iter - 1) {
                self->ctx.currentWoL1 = self->ctx.woAL1Tail;
            } else {
                self->ctx.currentWoL1 = self->ctx.woL1;
            }
        } else {
            self->ctx.currentWoL1 = self->ctx.woAL1Iter == self->ctx.maxWoL1Iter ? self->ctx.woAL1Tail : self->ctx.woL1;
        }
        self->ctx.woL0Tail = self->ctx.currentWoL1 % self->ctx.conv2dTiling->woL0;
        self->ctx.woL0Tail = self->ctx.woL0Tail == 0 ? self->ctx.conv2dTiling->woL0 : self->ctx.woL0Tail;
        self->ctx.maxWoL0Iter = CeilDIV(self->ctx.currentWoL1, self->ctx.conv2dTiling->woL0) - 1;
        self->ctx.l12l0LoopW = self->ctx.maxWoL0Iter + 1;
    }
    static __aicore__ inline void CalcHoDirectionVar(Intf *self)
    {
        self->ctx.currentHoL1 = self->ctx.hoAL1Iter == self->ctx.maxHoL1Iter ? self->ctx.hoAL1Tail : self->ctx.hoL1;
        self->ctx.hoL0Tail = self->ctx.currentHoL1 % self->ctx.conv2dTiling->hoL0;
        self->ctx.hoL0Tail = self->ctx.hoL0Tail == 0 ? self->ctx.conv2dTiling->hoL0 : self->ctx.hoL0Tail;
        self->ctx.maxHoL0Iter = CeilDIV(self->ctx.currentHoL1, self->ctx.conv2dTiling->hoL0) - 1;
        self->ctx.l12l0LoopH = self->ctx.maxHoL0Iter + 1;
    }
    static __aicore__ inline bool UpdateTemporaryStoredLoopW(Intf *self)
    {
        if (self->ctx.hoL0Iter == self->ctx.l12l0LoopH) {
            self->ctx.hoL0Iter = 0;
            self->ctx.woAL1Iter++;  // 本次Iterate需要更新LoopW
            self->ctx.loadAL1Flag = true;
            if (self->ctx.kAL1fullload) {
                self->ctx.queueAL1.FreeTensor(self->ctx.al1);
            }
            return true;
        }
        return false;
    }
    static __aicore__ inline void FreeL1Tensor(Intf *self)
    {
        if (!(self->ctx.conv2dTiling->kAL1 == self->ctx.conv2dTiling->kL0 && (!self->ctx.loadAL0Flag))) {
            self->ctx.queueAL0.FreeTensor(self->ctx.al0);
        }
        if (!(self->ctx.conv2dTiling->kBL1 == self->ctx.conv2dTiling->kL0 && (!self->ctx.loadBL0Flag))) {
            self->ctx.queueBL0.FreeTensor(self->ctx.bl0);
        }
        if (self->ctx.enableBias && !self->ctx.biasFullLoadFlag) {
            self->ctx.queueBiasL1.FreeTensor(self->ctx.biasL1);
        }
        if (self->ctx.enableScale && !self->ctx.scaleFullLoadFlag) {
            self->ctx.queueScaleL1.FreeTensor(self->ctx.scaleL1);
        }
    }
};

template <class Intf, uint32_t ImplType>
struct GetTensorC {
    template <bool sync = true>
    static __aicore__ inline bool call(
        Intf *self, const GlobalTensor<typename Intf::OutputT> &output, bool enSequentialWrite = false)
    {
        self->ctx.copyOutIns.CopyOut(output);
        self->ctx.queueCL0.FreeTensor(self->ctx.cl0);
        if (self->ctx.enableBias) {
            self->ctx.queueBiasBT.FreeTensor(self->ctx.biasBT);
        }

        return false;
    }
};

template <class Intf, uint32_t ImplType>
struct IterateAll {
    template <bool sync = true>
    static __aicore__ inline bool call(
        Intf *self, const GlobalTensor<typename Intf::OutputT> &output, bool enPartialSum = false)
    {
        self->ctx.loadBiasL1Ins.SetParams(self);
        self->ctx.loadScaleL1Ins.SetParams(self);
        self->ctx.loadBL1Ins.SetParams(self);
        self->ctx.loadAl1Ins.SetParams(self);
        self->ctx.loadAL0Ins.SetParams(self, &self->ctx.loadAl1Ins);
        self->ctx.loadBL0Ins.SetParams(self);
        self->ctx.madIns.SetParams(self);
        self->ctx.loadBiasBTIns.SetParams(self);
        self->ctx.copyOutIns.SetParams(self);

        if (self->ctx.conv2dTiling->hf32Enable) {
            SetHF32(self, self->ctx.conv2dTiling->hf32Enable, self->ctx.conv2dTiling->hf32TransMode);
        }

        if (self->ctx.biasFullLoadFlag && self->ctx.enableBias) {
            self->ctx.biasL1 = self->ctx.queueBiasL1.template AllocTensor<typename Intf::BiasT>();
            self->ctx.loadBiasL1Ins.LoadChannelWiseL1FullLoad(self->ctx.biasL1, self->ctx.biasgm,
                self->ctx.conv2dTiling->singleCoreCo, 0);
            self->ctx.queueBiasL1.EnQue(self->ctx.biasL1);
            self->ctx.biasL1 = self->ctx.queueBiasL1.template DeQue<typename Intf::BiasT>();
        }
        if (self->ctx.scaleFullLoadFlag && self->ctx.enableScale) {
            self->ctx.scaleL1 = self->ctx.queueScaleL1.template AllocTensor<typename Intf::ScaleT>();
            self->ctx.loadScaleL1Ins.LoadChannelWiseL1FullLoad(self->ctx.scaleL1, self->ctx.scalegm,
                self->ctx.conv2dTiling->singleCoreCo, 0);
            self->ctx.queueScaleL1.EnQue(self->ctx.scaleL1);
            self->ctx.scaleL1 = self->ctx.queueScaleL1.template DeQue<typename Intf::ScaleT>();
        }
        while (Iterate<Intf, ImplType>::call(self, enPartialSum)) {
            GetTensorC<Intf, ImplType>::call(self, output);
        }
        if (self->ctx.biasFullLoadFlag && self->ctx.enableBias) {
            self->ctx.queueBiasL1.FreeTensor(self->ctx.biasL1);
        }
        if (self->ctx.scaleFullLoadFlag && self->ctx.enableScale) {
            self->ctx.queueScaleL1.FreeTensor(self->ctx.scaleL1);
        }
        self->ctx.isFirstIterate = true;
        self->ctx.loadAL1Flag = true;
        self->ctx.loadBL1Flag = true;
        self->ctx.nL0Iter = 0;    // L12L0LoopN
        self->ctx.hoAL1Iter = 0;  // DDR2L1LoopH
        self->ctx.woAL1Iter = 0;  // DDR2L1LoopW
        self->ctx.nBL1Iter = 0;   // DDR2L1loopN
        self->ctx.woL0Iter = 0;
        self->ctx.hoL0Iter = 0;
        return false;
    }

    static __aicore__ inline void SetHF32(Intf *self, bool hf32Enable, int32_t hf32TransMode)
    {
        ASCENDC_ASSERT((hf32TransMode == 0 || hf32TransMode == 1),
                    { KERNEL_LOG(KERNEL_ERROR, "transMode is %d , which should only be 0 / 1", transMode); });
        if (unlikely(hf32Enable)) {
            set_ctrl(sbitset1(get_ctrl(), CTRL_46_BIT));
        } else {
            set_ctrl(sbitset0(get_ctrl(), CTRL_46_BIT));
        }
        if (unlikely(hf32TransMode == 1)) {
            set_ctrl(sbitset1(get_ctrl(), CTRL_47_BIT));
        } else {
            set_ctrl(sbitset0(get_ctrl(), CTRL_47_BIT));
        }
    }
};

template <class Intf, uint32_t ImplType>
__aicore__ void Iterate<Intf, ImplType>::FirstIterateImpl(Intf *self)
{
    // 先更新index再load，就需要加第一次处理。
    self->ctx.nL0Iter = 0;    // L12L0LoopN
    self->ctx.hoAL1Iter = 0;  // DDR2L1LoopH
    self->ctx.woAL1Iter = 0;  // DDR2L1LoopW
    self->ctx.nBL1Iter = 0;   // DDR2L1loopN
    self->ctx.woL0Iter = 0;
    self->ctx.hoL0Iter = 0;
    self->ctx.loadAL1Flag = true;
    self->ctx.loadBL1Flag = true;
    self->ctx.loadAL0Flag = true;
    self->ctx.loadBL0Flag = true;
    self->ctx.isFirstIterate = false;
    CalcWoDirectionVar(self);
    CalcHoDirectionVar(self);
}

template <class Intf, uint32_t ImplType>
__aicore__ bool Iterate<Intf, ImplType>::IterateMFirst(Intf *self)
{
    // ReorderN: 先往M轴方向偏移再往N轴方向偏移。Fmap复用Weight。
    //    M
    //    |
    //    |
    //    |----------N-------->
    // ==================L0========================
    self->ctx.woL0Iter++;
    CalcWoDirectionVar(self);
    if (self->ctx.woL0Iter == self->ctx.l12l0LoopW) {
        self->ctx.woL0Iter = 0;
        self->ctx.hoL0Iter++;
    } else {
        self->ctx.loadBL0Flag = false;
        return true;
    }
    CalcHoDirectionVar(self);
    if (!UpdateTemporaryStoredLoopW(self)) {
        self->ctx.loadBL0Flag = false;
        return true;
    }
    if (self->ctx.woAL1Iter == self->ctx.ddr2l1LoopW) {  // w方向遍历结束
        self->ctx.woAL1Iter = 0;
        self->ctx.hoAL1Iter++;  // 本次Iterate需要更新LoopH
    } else {
        self->ctx.loadBL0Flag = false;
        return true;
    }
    if (self->ctx.hoAL1Iter == self->ctx.ddr2l1LoopH) {  // h方向遍历结束
        self->ctx.hoAL1Iter = 0;
        self->ctx.nL0Iter++;
    } else {
        self->ctx.loadBL0Flag = false;
        return true;
    }
    self->ctx.loadBL0Flag = true;
    if (self->ctx.nL0Iter == self->ctx.l12l0LoopN) {  // L1上数据被L0消费完，此时BL1驻留，AL1载入。
        self->ctx.nL0Iter = 0;
        self->ctx.nBL1Iter++;  // 本次Iterate需要更新LoopN
        self->ctx.loadBL1Flag = true;
        if (self->ctx.kBL1fullload) {
            self->ctx.queueBL1.FreeTensor(self->ctx.bl1);
        }
    } else {
        return true;
    }
    // ==================判定迭代结束===============
    if (unlikely(self->ctx.nBL1Iter == self->ctx.ddr2l1LoopN)) {  // L1载入结束 + L0计算结束
        return false;                                   // N方向循环结束，跳出Iterate
    }
    return true;
}

template <class Intf, uint32_t ImplType>
__aicore__ bool Iterate<Intf, ImplType>::IterateNFirst(Intf *self)
{
    // ReorderM: 先往N轴方向偏移再往M轴方向偏移。Weight复用Fmap。
    //    ----------N-------->
    //                       |
    //                       |
    //                       M
    //                       |
    //                       |
    // ==================L0========================
    self->ctx.nL0Iter++;
    // ==================L1========================
    if (self->ctx.nL0Iter == self->ctx.l12l0LoopN) {  // L1上数据被L0消费完，此时AL1驻留，BL1载入。
        self->ctx.nL0Iter = 0;
        self->ctx.nBL1Iter++;  // 本次Iterate需要更新LoopN
        self->ctx.loadBL1Flag = true;
        if (self->ctx.kBL1fullload) {
            self->ctx.queueBL1.FreeTensor(self->ctx.bl1);
        }
    } else {
        self->ctx.loadAL0Flag = false;
        return true;
    }
    if (self->ctx.nBL1Iter == self->ctx.ddr2l1LoopN) {  // N方向遍历结束
        self->ctx.nBL1Iter = 0;
        self->ctx.woL0Iter++;
    } else {
        self->ctx.loadAL0Flag = false;
        return true;
    }
    self->ctx.loadAL0Flag = true;
    CalcWoDirectionVar(self);
    if (self->ctx.woL0Iter == self->ctx.l12l0LoopW) {
        self->ctx.woL0Iter = 0;
        self->ctx.hoL0Iter++;
    } else {
        return true;
    }
    CalcHoDirectionVar(self);
    if (!UpdateTemporaryStoredLoopW(self)) {
        return true;
    }
    if (self->ctx.woAL1Iter == self->ctx.ddr2l1LoopW) {  // h方向遍历结束
        self->ctx.woAL1Iter = 0;
        self->ctx.hoAL1Iter++;  // 本次Iterate需要更新LoopH
    } else {
        return true;
    }
    // ==================判定迭代结束===============
    if (unlikely(self->ctx.hoAL1Iter == self->ctx.ddr2l1LoopH)) {
        return false;  // M方向循环结束，跳出Iterate
    }
    return true;
}

template <class Intf, uint32_t ImplType>
__aicore__ void Iterate<Intf, ImplType>::ReduceK(Intf *self)
{
    while (self->ctx.kIter < self->ctx.ddr2l0LoopK) {
        if (self->ctx.loadAL1Flag || (!self->ctx.kAL1fullload && self->ctx.kIter % self->ctx.multiKAL1 == 0)) {
            if (self->ctx.kIter != 0) {
                self->ctx.queueAL1.FreeTensor(self->ctx.al1);
            }
            self->ctx.al1 = self->ctx.queueAL1.template AllocTensor<typename Intf::FmapT>();
            self->ctx.kAL1Iter = self->ctx.kIter / self->ctx.multiKAL1;
            self->ctx.loadAl1Ins.LoadAL1();
            self->ctx.queueAL1.EnQue(self->ctx.al1);
            self->ctx.al1 = self->ctx.queueAL1.template DeQue<typename Intf::FmapT>();
            self->ctx.loadAL1Flag = false;  // LoopK中只有K方向可能重新载入。
        }
        if (self->ctx.loadBL1Flag || (!self->ctx.kBL1fullload && self->ctx.kIter % self->ctx.multiKBL1 == 0)) {
            if (self->ctx.kIter != 0) {
                self->ctx.queueBL1.FreeTensor(self->ctx.bl1);
            }
            self->ctx.bl1 = self->ctx.queueBL1.template AllocTensor<typename Intf::WeightT>();
            self->ctx.kBL1Iter = self->ctx.kIter / self->ctx.multiKBL1;
            self->ctx.loadBL1Ins.LoadBL1();
            self->ctx.queueBL1.EnQue(self->ctx.bl1);
            self->ctx.loadBL1Flag = false;  // LoopK中只有K方向可能重新载入。
            self->ctx.bl1 = self->ctx.queueBL1.template DeQue<typename Intf::WeightT>();
        }
        if (!((self->ctx.conv2dTiling->kAL1 == self->ctx.conv2dTiling->kL0) && (!self->ctx.loadAL0Flag))) {
            self->ctx.al0 = self->ctx.queueAL0.template AllocTensor<typename Intf::FmapT>();
            self->ctx.kL0AIter = self->ctx.kIter % self->ctx.multiKAL1;
            self->ctx.loadAL0Ins.LoadAL0();
            self->ctx.queueAL0.EnQue(self->ctx.al0);
            self->ctx.al0 = self->ctx.queueAL0.template DeQue<typename Intf::FmapT>();
        }
        // BL0
        if (!((self->ctx.conv2dTiling->kBL1 == self->ctx.conv2dTiling->kL0) && (!self->ctx.loadBL0Flag))) {
            self->ctx.bl0 = self->ctx.queueBL0.template AllocTensor<typename Intf::WeightT>();
            self->ctx.kL0BIter = self->ctx.kIter % self->ctx.multiKBL1;
            self->ctx.loadBL0Ins.LoadBL0();
            self->ctx.queueBL0.EnQue(self->ctx.bl0);
            self->ctx.bl0 = self->ctx.queueBL0.template DeQue<typename Intf::WeightT>();
        }
        if (self->ctx.kIter == 0) {
            self->ctx.cl0 = self->ctx.queueCL0.template AllocTensor<typename Intf::L0cT>();
        }
        self->ctx.madIns.Mad();
        FreeL1Tensor(self);
        self->ctx.kIter++;
    }
}

template <class Intf, uint32_t ImplType>
__aicore__ void Iterate<Intf, ImplType>::IterateK(Intf *self)
{
    uint64_t n = CalcLoad2dNExtension(self);
    self->ctx.madIns.SetN(n);
    self->ctx.loadBL0Ins.SetN(n);
    if (self->ctx.enableBias) {
        if (!self->ctx.biasFullLoadFlag) {
            self->ctx.biasL1 = self->ctx.queueBiasL1.template AllocTensor<typename Intf::BiasT>();
            self->ctx.loadBiasL1Ins.LoadChannelWiseL1(self->ctx.biasL1, self->ctx.biasgm);
            self->ctx.queueBiasL1.EnQue(self->ctx.biasL1);
            self->ctx.biasL1 = self->ctx.queueBiasL1.template DeQue<typename Intf::BiasT>();
        }
        self->ctx.biasBT = self->ctx.queueBiasBT.template AllocTensor<typename Intf::L0cT>();
        self->ctx.loadBiasBTIns.LoadBiasBt();
        self->ctx.queueBiasBT.EnQue(self->ctx.biasBT);
        self->ctx.biasBT = self->ctx.queueBiasBT.template DeQue<typename Intf::L0cT>();
    }
    if (self->ctx.enableScale && !self->ctx.scaleFullLoadFlag) {
        self->ctx.scaleL1 = self->ctx.queueScaleL1.template AllocTensor<typename Intf::ScaleT>();
        self->ctx.loadScaleL1Ins.LoadChannelWiseL1(self->ctx.scaleL1, self->ctx.scalegm);
        self->ctx.queueScaleL1.EnQue(self->ctx.scaleL1);
        self->ctx.scaleL1 = self->ctx.queueScaleL1.template DeQue<typename Intf::ScaleT>();
    }
    ReduceK(self);
    if (!self->ctx.kAL1fullload) {
        self->ctx.queueAL1.FreeTensor(self->ctx.al1);
    }
    if (!self->ctx.kBL1fullload) {
        self->ctx.queueBL1.FreeTensor(self->ctx.bl1);
    }
    self->ctx.queueCL0.EnQue(self->ctx.cl0);
    self->ctx.cl0 = self->ctx.queueCL0.template DeQue<typename Intf::L0cT>();
    self->ctx.kIter = 0;
}

template <class Intf, uint32_t ImplType>
__aicore__ bool Iterate<Intf, ImplType>::IterateImpl(Intf *self, bool enPartialSum)
{
    if (self->ctx.isFirstIterate) {
        FirstIterateImpl(self);
    } else if (likely(self->ctx.conv2dTiling->iterateMNOrder == static_cast<int>(IterateOrder::ORDER_MTERFIRST))) {
        if (IterateMFirst(self) == false) {
            return false;
        }
    } else if (likely(self->ctx.conv2dTiling->iterateMNOrder == static_cast<int>(IterateOrder::ORDER_NTERFIRST))) {
        if (IterateNFirst(self) == false) {
            return false;
        }
    }
    IterateK(self);
    return true;
}

}  // namespace Conv2dFunc
#endif // __CONV2D_COMMON_API_H__
