#ifndef QUANT_MATMUL_V2_UPDATE_H
#define QUANT_MATMUL_V2_UPDATE_H

#include "quant_matmul_v2_base.h"

namespace AscendC{
struct QBmmUpdateInfo{
    uint64_t m_base_tail;
    uint64_t n_base_tail;
    uint64_t aligned_ka_size;
    uint64_t aligned_kb_size;
};

class QuantMatmulV2Update{

public:
    __aicore__ inline QuantMatmulV2Update(){}
    template <int X1Format, int X2Format, bool ATrans, bool BTrans>
    __aicore__ inline void init(const TCubeTiling *mm_tiling, const QBmmBaseBlockArgs &params);
    template <int X1Format, int X2Format, bool ATrans, bool BTrans>
    __aicore__ inline void updateBlockParamsAndCalcGmOffset(QBmmBaseBlockArgs &params, QBmmBlockOffset &offset, uint64_t m_tile_index, uint64_t n_tile_index);
    __aicore__ inline void updateBlockParams(QBmmBaseBlockArgs &params, uint64_t m_tile_index, uint64_t n_tile_index);
    template<int X1Format, int X2Format, bool ATrans, bool BTrans>
    __aicore__ inline void calcGMoffset(QBmmBaseBlockArgs &params, QBmmBlockOffset &offset);

private:
    QBmmUpdateInfo info_;
    const TCubeTiling *mm_tiling_;
};

template <int X1Format, int X2Format, bool ATrans, bool BTrans>
__aicore__ inline void QuantMatmulV2Update::init(const TCubeTiling *mm_tiling, const QBmmBaseBlockArgs &params){
    mm_tiling_ = mm_tiling;
    info_.n_base_tail = static_cast<uint64_t>(mm_tiling_->N) - (params.n_total_cnt - 1) * mm_tiling_->singleCoreN;
    info_.m_base_tail = static_cast<uint64_t>(mm_tiling_->M) - (params.m_total_cnt - 1) * mm_tiling_->singleCoreM;
    info_.aligned_ka_size = DequantBmm::ceil_align(mm_tiling_->Ka, K0_INT8);
    info_.aligned_kb_size = DequantBmm::ceil_align(mm_tiling_->Kb, BMM_BLOCK_NUM);
}

template <int X1Format, int X2Format, bool ATrans, bool BTrans>
__aicore__ inline void QuantMatmulV2Update::updateBlockParamsAndCalcGmOffset(QBmmBaseBlockArgs &params, QBmmBlockOffset &offset, uint64_t m_tile_index, uint64_t n_tile_index){
    updateBlockParams(params, m_tile_index, n_tile_index);
    calcGMoffset<X1Format, X2Format, ATrans, BTrans>(params, offset);
}

__aicore__ inline void QuantMatmulV2Update::updateBlockParams(QBmmBaseBlockArgs &params, uint64_t m_tile_index, uint64_t n_tile_index){
    if ((m_tile_index == (params.m_tile_cnt_l2 - 1)) && (n_tile_index == (params.n_tile_cnt_l2 - 1)) && (params.index == (params.total_tile_cnt - 1))){
        params.single_core_m = info_.m_base_tail;
        params.single_core_n = info_.n_base_tail;
    } else if ((m_tile_index == (params.m_tile_cnt_l2 - 1)) && (params.index >= (params.m_cnt_use - 1) * params.n_cnt_use)){
        params.single_core_m = info_.m_base_tail;
        params.single_core_n = mm_tiling_->baseN;
    } else if ((n_tile_index == (params.n_tile_cnt_l2 - 1)) && ((params.index + 1) % params.n_cnt_use == 0)){
        params.single_core_m = mm_tiling_->baseM;
        params.single_core_n = info_.n_base_tail;
    } else {
        params.single_core_m = mm_tiling_->baseM;
        params.single_core_n = mm_tiling_->baseN;
    }
}

template<int X1Format, int X2Format, bool ATrans, bool BTrans>
__aicore__ inline void QuantMatmulV2Update::calcGMoffset(QBmmBaseBlockArgs &params, QBmmBlockOffset &offset){
    uint64_t m_cnt_index = params.index / params.n_cnt_use;
    uint64_t n_cnt_index = params.index - m_cnt_index * params.n_cnt_use;
    uint64_t m_offset = m_cnt_index * mm_tiling_->singleCoreM + params.m_tile_addr_offset;
    offset.offset_a = m_offset * mm_tiling_->Ka;
    uint64_t n_offset = n_cnt_index * mm_tiling_->singleCoreN + params.n_tile_addr_offset;
    offset.offset_b = n_offset * info_.aligned_kb_size;

    offset.offset_c = m_offset * mm_tiling_->N + n_offset;
    offset.offset_scale = n_offset;
    offset.offset_bias = n_offset;
    offset.offset_pertoken = m_offset;
}

} //Namespace AscendC
#endif