<<<<<<< HEAD
#ifndef QUANT_MATMUL_V2_BASE_H
#define QUANT_MATMUL_V2_BASE_H

#include <cstdint>
#include "kernel_operator.h"
#include "kernel_operator_intf.h"
#include "kernel_type.h"
#include "lib/matmul_intf.h"

#define TemplateBasicType typename X1Type, typename X2Type, typename ScaleType, typename YType, int X1Format, int X2Format, bool ATrans, bool BTrans, class UPDATE_TYPE
#define TemplateBasicValue X1Type, X2Type, ScaleType, YType, X1Format, X2Format, ATrans, BTrans, UPDATE_TYPE

constexpr uint32_t BMM_BLOCK_NUM = 16;
constexpr uint32_t K0_INT8 = 32;
constexpr uint32_t K0_FLOAT16 = 16;
constexpr uint32_t K0_FLOAT32 = 8;
constexpr uint32_t FORMAT_FRACTAL_NZ_INT = 29;
constexpr uint32_t BUFFER_NUM = 2;
// constexpr uint32_t BUFFER_NUM_MM = 4;
constexpr uint32_t M_N_TWO_DIMS = 2;

const uint32_t ROW_FIRST = 1;
const uint32_t COL_FIRST = 2;

constexpr uint16_t C2V_FLAG = 0x4;
constexpr uint16_t V2C_FLAG = 0x8;
constexpr uint16_t V2V_D2C_FLAG = 0xc;

constexpr MatmulConfig MM_CFG_VEC_ND2NZ = GetMDLConfig(false, false, false, true);
constexpr MatmulConfig MM_CFG_NO_PRELOAD{false, false, true, 0, 0, 0, false, false, false, false,
                                        false, 0, 0, 0, 0, 0, 0, 0, true};
constexpr MatmulConfig MM_CFG_PRELOAD{false, false, true, 0, 0, 0, false, false, false, false,
                                        true, 0, 0, 0, 0, 0, 0, 0, false};

struct QBmmBlockOffset{
    uint64_t offset_a = 0;
    uint64_t offset_b = 0;
    uint64_t offset_c = 0;
    uint64_t offset_scale = 0;
    uint64_t offset_bias = 0;
    uint64_t offset_pertoken = 0;
};

struct QBmmBaseBlockArgs {
    uint64_t index;
    uint64_t total_tile_cnt;
    uint64_t single_core_m;
    uint64_t single_core_n;
    uint64_t m_tile_cnt_l2;
    uint64_t n_tile_cnt_l2;
    uint64_t m_total_cnt;
    uint64_t n_total_cnt;
    uint64_t m_cnt_use;
    uint64_t n_cnt_use;
    uint64_t m_tile_addr_offset;
    uint64_t n_tile_addr_offset;
};
                        

namespace DequantBmm{
    template <typename T>
    __aicore__ inline T max(T a, T b) { return a > b ? a : b; }
    template <typename T>
    __aicore__ inline T min(T a, T b) { return a < b ? a : b; }

    __aicore__ inline uint64_t ceil_align(uint64_t a, uint64_t b = 16) {return (a + b -1) / b * b; }

    __aicore__ inline uint64_t ceil_div(uint64_t a, uint64_t b) {
        if (b == 0) {
            return a;
        }
        return (a + b - 1) / b; }
    
    __aicore__ inline constexpr CubeFormat get_format(int format){
        if (format == FORMAT_FRACTAL_NZ_INT){
            return CubeFormat::NZ;
        }
        return CubeFormat::ND;
    }

    __aicore__ inline void calc_dequant_params(uint32_t cur_aiv_m, uint32_t cur_aiv_n, AscendC::DequantParams &dequant_params, bool need_update = true){
        if (!need_update){
            return;
        }

        uint32_t computed_aiv_n = ceil_align(cur_aiv_n, 8U);
        uint32_t ub_res_aligned_n = ceil_align(cur_aiv_n);
        if (computed_aiv_n == ub_res_aligned_n){
            dequant_params.m = 1;
            dequant_params.n = cur_aiv_m * computed_aiv_n;
            dequant_params.calCount = computed_aiv_n;
        } else {
            dequant_params.m = cur_aiv_m;
            dequant_params.n = computed_aiv_n;
            dequant_params.calCount = cur_aiv_n;
        }
    }

    __aicore__ inline void set_gm2ub_params(AscendC::DataCopyParams &gm_to_ub_params, uint32_t cur_aiv_m, uint32_t cur_aiv_n){
        gm_to_ub_params.blockLen = cur_aiv_n * sizeof(int32_t);
        gm_to_ub_params.blockCount = cur_aiv_m;
        gm_to_ub_params.srcStride = 0;
    }

    template<typename YType>
    __aicore__ inline void set_ub_to_gm_params(AscendC::DataCopyExtParams &ub_to_gm_params, uint32_t cur_aiv_m, uint32_t cur_aiv_n, uint32_t n){
        ub_to_gm_params.blockLen = cur_aiv_n * sizeof(YType);
        ub_to_gm_params.blockCount = cur_aiv_m;
        ub_to_gm_params.dstStride = (n - cur_aiv_n) * sizeof(YType);
    }

    __aicore__ inline void copy_mm_out_to_local(AscendC::LocalTensor<int32_t> &src_local,
    AscendC::GlobalTensor<int32_t> &cur_mm_out_gm, AscendC::DataCopyParams &gm_to_ub_params,
    AscendC::DataCopyPadParams &pad_params, uint32_t cur_aic_aiv_offset
    ){
        DataCopyPad(src_local, cur_mm_out_gm[cur_aic_aiv_offset], gm_to_ub_params, pad_params);
        set_flag(PIPE_MTE2, PIPE_V, static_cast<event_t>(EVENT_ID0));
        wait_flag(PIPE_MTE2, PIPE_V, static_cast<event_t>(EVENT_ID0));
    }

    template<typename YType>
    __aicore__ inline void copy_ub_to_gm(uint64_t y_gm_offset, AscendC::DataCopyExtParams &ub_to_gm_params,
    AscendC::LocalTensor<YType> &dst_local, AscendC::GlobalTensor<YType> &y_gm
    ){
        DataCopyPad(y_gm[y_gm_offset], dst_local, ub_to_gm_params);
    }

    template <typename ScaleType>
    __aicore__ inline void bf16_scale_gm_to_ub(
        AscendC::LocalTensor<ScaleType> &scale_local,
        AscendC::GlobalTensor<ScaleType> &scale_gm,
        AscendC::DataCopyPadParams &pad_params,
        uint32_t cur_aiv_n,
        uint64_t offset_scale
    ){
        AscendC::DataCopyParams scale_to_ub_params{1,0,0,0};
        scale_to_ub_params.blockLen = cur_aiv_n * sizeof(ScaleType);
        DataCopyPad(scale_local, scale_gm[offset_scale], scale_to_ub_params, pad_params);
        set_flag(PIPE_MTE2, PIPE_V, static_cast<event_t>(EVENT_ID1));
        wait_flag(PIPE_MTE2, PIPE_V, static_cast<event_t>(EVENT_ID1));
    }

    template<typename YType>
    inline __attribute__((always_inline)) __aicore__ void CopyUbufToGmAlignB16(uint64_t y_gm_offset, __ubuf__ int16_t *src, AscendC::GlobalTensor<YType> &y_gm)
    {
        AscendC::DataCopyExtParams dataCopyParams(1, sizeof(int16_t), 0, 0, 0);
        AscendC::LocalTensor<YType> ubTensor;
        AscendC::TBuffAddr ubAddr;
        ubAddr.logicPos = static_cast<uint8_t>(AscendC::TPosition::VECIN);
        ubAddr.bufferAddr = reinterpret_cast<uint64_t>(src);
        ubTensor.SetAddr(ubAddr);
 
        copy_ub_to_gm<YType>(y_gm_offset, dataCopyParams, ubTensor, y_gm);
    }
 
    template<typename YType>
    inline __attribute__((always_inline)) __aicore__ void CopyGmToUbufAlignB16(uint64_t y_gm_offset, __ubuf__ int16_t *dst, AscendC::GlobalTensor<YType> &y_gm)
    {
        AscendC::DataCopyExtParams dataCopyParams(1, sizeof(int16_t), 0, 0, 0);
        AscendC::LocalTensor<YType> ubTensor;
        AscendC::TBuffAddr ubAddr;
        ubAddr.logicPos = static_cast<uint8_t>(AscendC::TPosition::VECIN);
        ubAddr.bufferAddr = reinterpret_cast<uint64_t>(dst);
        ubTensor.SetAddr(ubAddr);
 
        AscendC::DataCopyPadExtParams<YType> padParams;
        DataCopyPad(ubTensor, y_gm[y_gm_offset], dataCopyParams, padParams);
    }
 
    template<typename YType>
    inline __attribute__((always_inline)) __aicore__ void SetBuffFlag(__ubuf__ int16_t *ctrl_flags_UB,
                                                                    uint64_t y_gm_offset, 
                                                                    AscendC::GlobalTensor<YType> &y_gm,
                                                                    int16_t flag)
    {
        *ctrl_flags_UB = flag;
        AscendC::SetFlag<AscendC::HardEvent::S_MTE3>(EVENT_ID2);
        AscendC::WaitFlag<AscendC::HardEvent::S_MTE3>(EVENT_ID2);
        CopyUbufToGmAlignB16(y_gm_offset, ctrl_flags_UB, y_gm);
    }
 
    template<typename YType>
    inline __attribute__((always_inline)) __aicore__ void SetBuffFlagByAdd(__ubuf__ int16_t *ctrl_flags_UB,
                                                                        uint64_t y_gm_offset, 
                                                                        AscendC::GlobalTensor<YType> &y_gm,
                                                                        int16_t flag)
    {
        // AscendC::PipeBarrier<PIPE_ALL>();
        *ctrl_flags_UB = flag;
        AscendC::PipeBarrier<PIPE_ALL>();
        AscendC::SetAtomicAdd<int16_t>();
        // AscendC::PipeBarrier<PIPE_ALL>();
        CopyUbufToGmAlignB16(y_gm_offset, ctrl_flags_UB, y_gm);
        AscendC::PipeBarrier<PIPE_ALL>();
        AscendC::SetAtomicNone();
        // AscendC::PipeBarrier<PIPE_ALL>();
    }
 
    template<typename YType>
    inline __attribute__((always_inline)) __aicore__ void CheckBuffFlag(__ubuf__ int16_t *ctrl_flags_UB, \
                                                                        uint64_t y_gm_offset, 
                                                                        AscendC::GlobalTensor<YType> &y_gm, int16_t flag)
    {
        AscendC::SetFlag<AscendC::HardEvent::MTE3_MTE2>(EVENT_ID1);
        AscendC::WaitFlag<AscendC::HardEvent::MTE3_MTE2>(EVENT_ID1);
        while (true) {
            CopyGmToUbufAlignB16(y_gm_offset, ctrl_flags_UB, y_gm);
            AscendC::SetFlag<AscendC::HardEvent::MTE2_S>(EVENT_ID3);
            AscendC::WaitFlag<AscendC::HardEvent::MTE2_S>(EVENT_ID3); // Scalar等MTE2
            //("ctrl_flags_UB = %d\n", *ctrl_flags_UB);
            if (*ctrl_flags_UB == flag) {
                break;
            }
        }
    }

}// namespace DequantBmm
#endif // QUANT_MATMUL_V2_BASE_H
=======
tempalte<typename YType>
inline __attribute__((always_inline)) void CopyUbufToGmAlignB16(unit64_t y_gm offset,__ubuf__ int32_t *src, AscendC::GlobalTensor<YType> &y&gm)
{
    AscendC::DataCopyExParams dataCopyParams(1, sizeof(int32_t), 0, 0, 0);
    AscendC::LocalTensor<YType> ubTensor;
    AscendC::TBuffAddr ubAddr;
    ubAddr.logicPos = static_cast<unint8_t>(AscendC::Tposition::VECIN);
    ubAddr.bufferAddr = reinterpret_cast<uint64_t>(src);
    ubTensor.SetAddr(ubAddr);

    copy_ub_to_gm<YType>(y_gm_offset, dataCopyParams, ubTensor, y_gm);
}

template<typename YType>
inline __attribute__((always_inline)) __aicore__ void CopyGmToUbufAlignB16(unit64_t y_gm_offset,__ubuf__ int32_t *dst,AscendC::GlobalTensor<YType> &y_gm)
{
    AscendC::DataCopyExParams dataCopyParams(1, sizeof(int32_t), 0, 0, 0);
    AscendC::LocalTensor<YType> ubTensor;
    AscendC::TBuffAddr ubAddr;
    ubAddr.logicPos = static_cast<unint8_t>(AscendC::Tposition::VECIN);
    ubAddr.bufferAddr = reinterpret_cast<uint64_t>(dst);
    ubTensor.SetAddr(ubAddr);
    
    AscendC::DataCopyPadExtParams<YType> padParams;
    DataCopypad(ubTensor, y_gm[y_gm_offset], dataCopyParams, padParams);
}

template<typename YType>
inline __attribute__((always_inline)) __aicore__ void SetBuffFlag(__ubuf__ int32_t *ctrl_flags_UB,
                                                                uint64_t y_gm_offset,
                                                                AscendC::GlobalTensor<YType> &y_gm,
                                                                int32_t flag)
{
    *ctrl_flags_UB = flag;
    AscendC::SetFlag<AscendC::HardEvent::S_MTE3>(EVENT_ID2);
    AscendC::WaitFlag<AscendC::HardEvent::S_MTE3>(EVENT_ID2);
    CopyUbufToGmAlignB16(y_gm_offset, ctrl_flags_UB, y_gm);
}

template<typename YType>
inline __attribute__((always_inline)) __aicore__ void SetBuffFlagByAdd(__ubuf__ int32_t *ctrl_flags_UB,
                                                                    uint64_t y_gm_offset,
                                                                    AscendC::GlobalTensor<YType> &y_gm,
                                                                    int32_t flag)
{
    *ctrl_flags_UB = flag;
    AscendC::SetFlag<AscendC::HardEvent::S_MTE3>(EVENT_ID2);
    AscendC::WaitFlag<AscendC::HardEvent::S_MTE3>(EVENT_ID2);
    CopyUbufToGmAlignB16(y_gm_offset, ctrl_flags_UB, y_gm);
}

template<typename YType>
inline __attribute__((always_inline)) __aicore__ void SetBuffFlag(__ubuf__ int32_t *ctrl_flags_UB,
                                                                uint64_t y_gm_offset,
                                                                AscendC::GlobalTensor<YType> &y_gm,
                                                                int32_t flag)
{
    // AscendC::PipeBarrier<PIPE_ALL>();
    *ctrl_flags_UB = flag;
    AscendC::PipeBarrier<PIPE_ALL>();
    AscendC::SetAtomicAdd<int32_t>();
    //AscendC::PipeBarrier<PIPE_ALL>();
    CopyUbufToGmAlignB16(y_gm_offset, ctrl_flags_UB, y_gm);
    AscendC::PipeBarrier<PIPE_ALL>();
    AscendC::SetAtomicNone();
    // AscendC::PipeBarrier<PIPE_ALL>();
}

template<typename YType>
inline __attribute__((always_inline)) __aicore__ void SetBuffFlag(__ubuf__ int32_t *ctrl_flags_UB, \
                                                                uint64_t y_gm_offset,
                                                                AscendC::GlobalTensor<YType> &y_gm, int32_t flag)
{
    AscendC::SetFlag<AscendC::HardEvent::MTE3_MTE2>(EVENT_ID1);
    AscendC::WaitFlag<AscendC::HardEvent::MTE3_MTE2>(EVENT_ID2);
    while(true)
    {
        CopyGmToUbufAlignB16(y_gm_offset, ctrl_flags_UB, y_gm);
        AscendC::SetFlag<AscendC::HardEvent::MTE2_S>(EVENT_ID3);
        AscendC::WaitFlag<AscendC::HardEvent::MTE2_S>(EVENT_ID3); //Scalar等MTE2
        //("ctrl_flags_UB = %d\n", *ctrl_flags_UB);
        if(*ctrl_flags_UB == flag)
        {
            break;
        }
    }
} 
>>>>>>> origin/master
