<<<<<<< HEAD
#ifndef QUANT_MATMUL_V2_PERTOKEN_BASIC_H
#define QUANT_MATMUL_V2_PERTOKEN_BASIC_H

#include "quant_matmul_v2_block.h"
#include "quant_matmul_v2_update.h"


#define FLAG_ZERO_IDX 0
#define FLAG_ONE_IDX 2
namespace AscendC{

template<TemplateBasicType>
class BmmDequantPertokenBasic{
public:
    __aicore__ inline BmmDequantPertokenBasic() {}
    __aicore__ inline void init(GM_ADDR x1, GM_ADDR x2, GM_ADDR scale, GM_ADDR bias, GM_ADDR pertoken_scale,
    GM_ADDR y,
    GM_ADDR buff0, GM_ADDR buff1, GM_ADDR buff2, GM_ADDR buff3, GM_ADDR buff4, GM_ADDR buff5, GM_ADDR buff6, GM_ADDR buff7, 
    GM_ADDR workspace, const QuantMatmulV2TilingData *__restrict tiling_data, TPipe *t_pipe){
        
        used_core_num_ = tiling_data->matmul_tiling.usedCoreNum;
        if ASCEND_IS_AIV{
            block_idx_ = GetBlockIdx() / 2;
            // block_idx_ = GetBlockIdx();
        }

        if ASCEND_IS_AIC{
            block_idx_ = GetBlockIdx();
        }

        aiv_id = GetSubBlockIdx();
        
        if(block_idx_ >= used_core_num_){
            return;
        }
        pipe_ = t_pipe;
        mm_.Init(&(tiling_data->matmul_tiling), pipe_);
        p_value_comm = 3;
        
        initTilingData(tiling_data);
        initGlobalBuffers(x1, x2, scale, bias, pertoken_scale, y, buff0, buff1, buff2, buff3, buff4, buff5, buff6, buff7, workspace);
        initLocalBuffers();
        offset_workspace_c_ = 2 * p_value * block_idx_ * base_m_ * base_n_;
        loop_ = 0;
        
        
        const TCubeTiling * matmul_tiling_data_ = &(tiling_data->matmul_tiling);
        m_total_cnt_ = (static_cast<uint64_t>(matmul_tiling_data_->M) + matmul_tiling_data_->singleCoreM - 1) / matmul_tiling_data_->singleCoreM;
        n_total_cnt_ = (static_cast<uint64_t>(matmul_tiling_data_->N) + matmul_tiling_data_->singleCoreN - 1) / matmul_tiling_data_->singleCoreN;
        total_cnt_ = m_total_cnt_ * n_total_cnt_;
    }

    __aicore__ inline void process(){

        int32_t aiv_num = GetSubBlockNum();
        int32_t ratio = GetTaskRation();
        if(block_idx_ >= used_core_num_ ){
            return;
        }

        uint64_t ping_offset_c = offset_workspace_c_;
        uint32_t m_actual;
        uint32_t n_actual;
        uint64_t m_idx;
        uint64_t n_idx;
        uint64_t m_offset; // 实际读取数据时m的偏移
        uint64_t n_offset; // 实际读取数据时n的偏移


        int32_t tail_m = (m_)/(rank_size) % base_m_;
        m_loop_ = rank_size * ((m_ / rank_size + base_m_ - 1) / base_m_);
        n_loop_ = n_ / base_n_;
        core_loop_ = m_loop_ * n_loop_;
        loop_num_per_comm_ = p_value * used_core_num_;
        
        int32_t comm_num = (core_loop_ + loop_num_per_comm_ - 1) / loop_num_per_comm_;
        int32_t m_loop_per_rank = m_loop_ / rank_size;


        if ASCEND_IS_AIV{
            if((block_idx_ == rank) && (aiv_id == 1)){
            DequantBmm::SetBuffFlag<YType>(ctrl_flags_UB, FLAG_OFFSET + FLAG_ZERO_IDX, buff_gm_[rank], 0);
            PipeBarrier<PIPE_ALL>();
            }
            // 初始化通知aic共享内存是空闲的
            for (int64_t comm_idx = 0; comm_idx < 2; ++comm_idx){
                for (int64_t p = 0; p < p_value; ++p){
                    NotifyEventImpl<2, PIPE_MTE3>(V2C_FLAG);
                }
            }

        }

        int32_t comm_start = 0;
        // int32_t comm_start = p_value * rank;
        for (int32_t comm_times = 0; comm_times < comm_num ; comm_times++){
            int32_t comm_idx = (comm_start + comm_times) % comm_num;
            int32_t actual_loop_num = loop_num_per_comm_;
            if (comm_idx == comm_num - 1){
                actual_loop_num = core_loop_ - comm_idx * loop_num_per_comm_;
            }

            uint64_t flag_id = comm_idx % 2;

            if (aiv_id < 1){

            for (int32_t p = 0; p < p_value; p++){
                int32_t loop_idx = comm_idx * p_value * used_core_num_ + p * used_core_num_ + block_idx_;
                int32_t rank_idx = loop_idx % rank_size;
                int32_t in_rank_idx = loop_idx / rank_size;
                if (loop_idx >= core_loop_){
                    if ASCEND_IS_AIC{
                        WaitEvent(V2C_FLAG);
                        NotifyEventImpl<2, PIPE_FIX>(C2V_FLAG);
                    }

                    if ASCEND_IS_AIV{
                        WaitEvent(C2V_FLAG);
                        NotifyEventImpl<2, PIPE_MTE3>(V2C_FLAG);
                    }
                    continue;
                }
                
                m_idx = in_rank_idx / n_loop_;
                n_idx = in_rank_idx % n_loop_;
                
                // https://www.hiascend.com/document/detail/zh/canncommercial/800/apiref/ascendtbapi/ascendtb_01_0083.html
                // nd2nz, 当数据类型是int8时: 将一个shape为[m,n]的Tensor作为输入，m0 = 16，n0 = 32。输出tensor的shape为[1,⌈n/n0⌉,⌈m/m0⌉∗m0,n0], 256/32 = 8
                // todo： rank内计算顺序添加nz/swizzl, 增加l2 cache的命中率
                m_actual = (m_idx == (m_loop_per_rank - 1)) ? (m_ / rank_size - m_idx * base_m_) : base_m_;
                n_actual = (n_idx == (n_loop_ - 1)) ? (n_ - n_idx * base_n_) : base_n_;
                
                m_offset = m_idx * base_m_ + rank_idx * m_ / rank_size;
                offset_.offset_a = m_offset * ka_;
                n_offset = n_idx * base_n_;
                offset_.offset_b = n_offset * ka_;
                offset_.offset_c = (m_idx * base_m_) * n_ + n_offset; //m_offset * n_ + n_offset;
                offset_.offset_scale = n_offset;
                offset_.offset_bias = n_offset;
                offset_.offset_pertoken = m_offset;

                offset_workspace_c_ = ping_offset_c + (flag_id * p_value + p) * base_m_ *base_n_;

                if ASCEND_IS_AIC{
                    WaitEvent(V2C_FLAG);
                    basicMMCompute(m_actual, n_actual);
                    NotifyEventImpl<2, PIPE_FIX>(C2V_FLAG);
                }

                if ASCEND_IS_AIV{
                    WaitEvent(C2V_FLAG);
                    basicDequantCompute(mm_out_gm_, m_actual, n_actual, comm_idx, p, actual_loop_num); //todo 需要搬运的不是2的倍数，两个aiv搬运的不一样
                    NotifyEventImpl<2, PIPE_MTE3>(V2C_FLAG);
                }
            }
            

            if ASCEND_IS_AIV{
                NotifyEventImpl<0, PIPE_MTE3>(V2V_D2C_FLAG);
                WaitEvent(V2V_D2C_FLAG);
            }
            }

            else {
                
            for (int64_t p = 0; p < p_value; ++p){
                WaitEvent(C2V_FLAG);
            }
            
            NotifyEventImpl<0, PIPE_MTE3>(V2V_D2C_FLAG);
            WaitEvent(V2V_D2C_FLAG);

            int32_t other_rank = block_idx_ % rank_size;

            // 卡内matmul结果准备就绪
            if (block_idx_ == rank){
                DequantBmm::SetBuffFlagByAdd<YType>(ctrl_flags_UB, FLAG_OFFSET + FLAG_ZERO_IDX, buff_gm_[rank], 1); // FLAG_VALUE 1
            }
            int32_t comm_aivs = 1;
            if ((other_rank != rank) && (block_idx_ < comm_aivs * rank_size)){
                DequantBmm::CheckBuffFlag<YType>(ctrl_flags_UB, FLAG_OFFSET + FLAG_ZERO_IDX, buff_gm_[other_rank], comm_times + 1);
                // int32_t other_comm_idx = (p_value * other_rank + comm_times) % comm_num;
                int32_t other_comm_idx = (comm_times) % comm_num;
                int32_t comm_loop_num = (loop_num_per_comm_ / rank_size + p_value_comm - 1 ) / p_value_comm;
                int32_t actual_comm_loop_num = (actual_loop_num / rank_size + p_value_comm - 1) / p_value_comm;

                int32_t rank_offset = rank * p_value_comm * actual_comm_loop_num * base_m_ * base_n_ + flag_id * base_m_ * base_n_ * comm_loop_num * p_value_comm * rank_size;

                int32_t actual_comm_loop_num_block = (actual_comm_loop_num + comm_aivs - 1) / comm_aivs;
                for(int in_loop_idx_block = 0; in_loop_idx_block < actual_comm_loop_num_block; in_loop_idx_block++){
                    int32_t in_loop_idx = (in_loop_idx_block * comm_aivs + block_idx_/rank_size);
                    int32_t rank_buff_offset = p_value_comm * in_loop_idx * base_m_ * base_n_ + rank_offset;

                    int32_t loop_idx = other_comm_idx * p_value * used_core_num_ / rank_size + in_loop_idx * p_value_comm;
                    uint32_t cur_aiv_m = base_m_;
                    uint32_t cur_aiv_loop = (in_loop_idx == actual_comm_loop_num - 1) ? (actual_loop_num / rank_size) - in_loop_idx * p_value_comm : p_value_comm;
                    uint32_t cur_aiv_n = base_n_ * cur_aiv_loop;


                    DataCopyParams gm_to_ub_params{1, 0, 0, 0};
                    DataCopyExtParams ub_to_gm_params{1, 0, 0, 0, 0};
                    DataCopyPadParams pad_params;
                    DequantParams dequant_params;
                    
                    LocalTensor<YType> src_local = vec_que_mv_.AllocTensor<YType>();
                    gm_to_ub_params.blockLen = cur_aiv_n * sizeof(YType);
                    gm_to_ub_params.blockCount = cur_aiv_m;
                    gm_to_ub_params.srcStride = (base_n_ * p_value_comm - cur_aiv_n) * sizeof(YType);
                    DataCopyPad(src_local, buff_gm_[other_rank][rank_buff_offset], gm_to_ub_params, pad_params);
                    vec_que_mv_.EnQue<YType>(src_local);
                    src_local = vec_que_mv_.DeQue<YType>();
                    set_flag(PIPE_MTE2, PIPE_MTE3, static_cast<event_t>(EVENT_ID0));
                    wait_flag(PIPE_MTE2, PIPE_MTE3, static_cast<event_t>(EVENT_ID0));
                    
                    uint64_t y_offset;
                    uint32_t dst_stride;

                    m_idx = loop_idx / n_loop_;
                    n_idx = loop_idx % n_loop_;
                    if(n_idx + cur_aiv_loop - 1 < n_loop_){

                        m_actual = (m_idx == (m_loop_per_rank - 1)) ? (m_ / rank_size - m_idx * base_m_) : base_m_;
                        n_actual = (cur_aiv_loop - 1) * base_n_ + ((n_idx + cur_aiv_loop - 1 == (n_loop_ - 1)) ? (n_ - (n_loop_ - 1)* base_n_) : base_n_);
                        m_offset = m_idx * base_m_;
                        n_offset = n_idx * base_n_;
                        y_offset = m_offset * n_ + n_offset;
                        dst_stride = n_;
                        
                        ub_to_gm_params.blockLen = n_actual * sizeof(YType);
                        ub_to_gm_params.blockCount = m_actual;
                        ub_to_gm_params.dstStride = (dst_stride - n_actual) * sizeof(YType);
                        SetAtomicAdd<YType>();
                        DequantBmm::copy_ub_to_gm<YType>(y_offset, ub_to_gm_params, src_local, y_gm_);
                        

                    } else {

                        uint32_t cur_aiv_loop1 = n_loop_ - n_idx;
                        m_idx = loop_idx / n_loop_;
                        n_idx = loop_idx % n_loop_;
                        m_offset = m_idx * base_m_;
                        n_offset = n_idx * base_n_;

                        y_offset = m_offset * n_ + n_offset;
                        dst_stride = n_; 
                        
                        m_actual = (m_idx == (m_loop_per_rank - 1)) ? (m_ / rank_size - m_idx * base_m_) : base_m_;
                        n_actual = (cur_aiv_loop1 - 1) * base_n_ + ((n_idx + cur_aiv_loop1 - 1 == (n_loop_ - 1)) ? (n_ - (n_loop_ - 1) * base_n_) : base_n_);
                        ub_to_gm_params.blockLen = n_actual * sizeof(YType);
                        ub_to_gm_params.blockCount = m_actual;
                        ub_to_gm_params.dstStride = (dst_stride - n_actual) * sizeof(YType);
                        ub_to_gm_params.srcStride = (cur_aiv_n - n_actual) * sizeof(YType) / 32;

                        SetAtomicAdd<YType>();
                        DataCopyPad(y_gm_[y_offset], src_local, ub_to_gm_params);

                        uint32_t cur_aiv_loop2 = cur_aiv_loop - cur_aiv_loop1;
                        loop_idx = loop_idx + cur_aiv_loop1;
                        m_idx = loop_idx / n_loop_;
                        n_idx = loop_idx % n_loop_;
                        m_offset = m_idx * base_m_;
                        n_offset = n_idx * base_n_;
                        y_offset = m_offset * n_ + n_offset;

                        m_actual = (m_idx == (m_loop_per_rank - 1)) ? (m_ / rank_size - m_idx * base_m_) : base_m_;
                        n_actual = (cur_aiv_loop2 - 1) * base_n_ + ((n_idx + cur_aiv_loop2 - 1 == (n_loop_ - 1)) ? (n_ - (n_loop_ - 1) * base_n_) : base_n_);
                        dst_stride = n_;

                        ub_to_gm_params.blockLen = n_actual * sizeof(YType);
                        ub_to_gm_params.blockCount = m_actual;
                        ub_to_gm_params.dstStride = (dst_stride - n_actual) * sizeof(YType);
                        ub_to_gm_params.srcStride = (cur_aiv_n - n_actual) * sizeof(YType)/ 32;
                        
                        DataCopyPad(y_gm_[y_offset], src_local[base_n_ * cur_aiv_loop1], ub_to_gm_params);


                    }
                    PipeBarrier<PIPE_ALL>();
                    SetAtomicNone();
                    vec_que_mv_.FreeTensor(src_local);

                }


            }
            


            for (int64_t p = 0; p < p_value; ++p){
                NotifyEventImpl<2, PIPE_MTE3>(V2C_FLAG);
            }

            }

        }

    End();
}

    // __aicore__ inline UPDATE_TYPE &get_update_obj(){return update_;}
private:
    
    __aicore__ inline void initTilingData(const QuantMatmulV2TilingData *tiling_data){
        is_per_tensor_ = tiling_data->params.is_per_tensor;
        m_ = tiling_data->matmul_tiling.M;
        n_ = tiling_data->matmul_tiling.N;
        ka_ = tiling_data->matmul_tiling.Ka;

        base_m_ = tiling_data->matmul_tiling.baseM;
        base_n_ = tiling_data->matmul_tiling.baseN;

        has_bias_ = tiling_data->matmul_tiling.isBias;
        bias_dtype_ = tiling_data->params.bias_dtype;

        if (bias_dtype_ == DT_INT32 || bias_dtype_ == DT_FLOAT) {
            bias_dtypeSize_ = sizeof(int32_t);
        } else {
            bias_dtypeSize_ = sizeof(half);
        }
        
        ub_calc_m_ = tiling_data->params.ub_calc_m;
        ub_calc_n_ = tiling_data->params.ub_calc_n;
        ub_tmp_buffer_ = tiling_data->params.need_ub_buffer;
        split_trans_num_ = tiling_data->params.split_trans_num;

        rank_size = tiling_data->params.rank_size;
        p_value = tiling_data->params.p_value;
        rank = tiling_data->params.rank;
        FLAG_OFFSET =  p_value_comm * rank_size * (((used_core_num_ * p_value)/rank_size + p_value_comm -1 )/ p_value_comm) * 128  * 2 * 256;
    }

    __aicore__ inline void initGlobalBuffers(GM_ADDR x1, GM_ADDR x2, GM_ADDR scale, GM_ADDR bias, GM_ADDR pertoken_scale,
    GM_ADDR y,
    GM_ADDR buff0, GM_ADDR buff1, GM_ADDR buff2, GM_ADDR buff3, GM_ADDR buff4, GM_ADDR buff5, GM_ADDR buff6, GM_ADDR buff7,
    GM_ADDR workspace){
        
        if (is_per_tensor_){
            scale_scalar_ = *((__gm__ ScaleType *)scale);
        }
        x_gm_.SetGlobalBuffer((__gm__ X1Type *)x1);
        weight_gm_.SetGlobalBuffer((__gm__ X2Type *)x2);
        // has_bias_ == 0

        y_gm_.SetGlobalBuffer((__gm__ YType *)y);
        scale_gm_.SetGlobalBuffer((__gm__ ScaleType *)scale);
        pertoken_scale_gm_.SetGlobalBuffer((__gm__ float *)pertoken_scale);
        mm_out_gm_.SetGlobalBuffer((__gm__ int32_t *)workspace, 2 * p_value * used_core_num_ * base_m_ * base_n_);

        buff_gm_[0].SetGlobalBuffer((__gm__ YType *)buff0); //todo, 动态长度
        buff_gm_[1].SetGlobalBuffer((__gm__ YType *)buff1);
        buff_gm_[2].SetGlobalBuffer((__gm__ YType *)buff2);
        buff_gm_[3].SetGlobalBuffer((__gm__ YType *)buff3);
        buff_gm_[4].SetGlobalBuffer((__gm__ YType *)buff4);
        buff_gm_[5].SetGlobalBuffer((__gm__ YType *)buff5);
        buff_gm_[6].SetGlobalBuffer((__gm__ YType *)buff6);
        buff_gm_[7].SetGlobalBuffer((__gm__ YType *)buff7);
        

    }

    __aicore__ inline void initLocalBuffers(){
        if (aiv_id < 1 ){
        pipe_->InitBuffer(vec_que_src_, BUFFER_NUM, ub_calc_m_ * ub_calc_n_ * sizeof(int32_t));
        pipe_->InitBuffer(vec_que_tmp_, ub_tmp_buffer_);
        pipe_->InitBuffer(vec_que_out_, BUFFER_NUM, ub_calc_m_ * ub_calc_n_ * sizeof(YType));
        if(bias_dtype_ != DT_INT32){
            pipe_->InitBuffer(bias_fp32_tmp_, ub_calc_n_*sizeof(float));
            pipe_->InitBuffer(vec_que_bias_, BUFFER_NUM, ub_calc_n_ * bias_dtypeSize_);
        }
        if(!is_per_tensor_){
            pipe_->InitBuffer(vec_que_scale_, BUFFER_NUM, ub_calc_n_ * sizeof(ScaleType));
        }
        pipe_->InitBuffer(out_fp32_tmp_, ub_calc_m_ * ub_calc_n_ * sizeof(float));
        pipe_->InitBuffer(vec_que_pertoken_scale_, BUFFER_NUM, DequantBmm::ceil_align(ub_calc_m_, 8U) * sizeof(float));
        pipe_->InitBuffer(broadcast_fp32_tmp_, ub_calc_m_ * ub_calc_n_ * sizeof(float));
        }
        else{
            pipe_->InitBuffer(vec_que_mv_, 1, p_value_comm * base_m_ * base_n_ * sizeof(YType)); //todo 2 p_value_comm
        }
    }



    __aicore__ inline void basicMMCompute(uint32_t base_m, uint32_t base_n){
        mm_.SetSingleShape(base_m, base_n, ka_);
        mm_.SetTensorA(x_gm_[offset_.offset_a], ATrans);
        mm_.SetTensorB(weight_gm_[offset_.offset_b], BTrans);
        mm_.Iterate();
        mm_.GetTensorC(mm_out_gm_[offset_workspace_c_], 0, true);
    }

    __aicore__ inline void pertokenCalculate(uint32_t basic_block_compute_info[], uint32_t m_ub_loop_idx, DataCopyPadParams &pad_params,
    LocalTensor<float> &dst_local_fp32, LocalTensor<float> &tmpdst_local){
        uint32_t cur_aiv_n = basic_block_compute_info[0];
        uint32_t cur_aiv_m = basic_block_compute_info[1];
        uint32_t ub_res_aligned_n = basic_block_compute_info[2];
        DataCopyParams scale_to_ub_params{1, 0, 0, 0};
        scale_to_ub_params.blockLen = cur_aiv_m * sizeof(float);
        uint64_t offset_pertoken = offset_.offset_pertoken + m_ub_loop_idx * ub_calc_m_;
        uint32_t computed_aiv_n = DequantBmm::ceil_align(cur_aiv_n, 8U);

        const uint32_t broad_cast_dst[M_N_TWO_DIMS] = {cur_aiv_m, computed_aiv_n};
        const uint32_t broad_cast_src[M_N_TWO_DIMS] = {cur_aiv_m, 1};

        LocalTensor<float> broadcast_fp32 = broadcast_fp32_tmp_.Get<float>();
        LocalTensor<float> pertoken_scale_local = vec_que_pertoken_scale_.AllocTensor<float>();

        DataCopyPad(pertoken_scale_local, pertoken_scale_gm_[offset_pertoken], scale_to_ub_params, pad_params);
        vec_que_pertoken_scale_.EnQue<float>(pertoken_scale_local);
        pertoken_scale_local = vec_que_pertoken_scale_.DeQue<float>();

        BroadCast<float, M_N_TWO_DIMS, 1>(broadcast_fp32, pertoken_scale_local, broad_cast_dst, broad_cast_src);

        pipe_barrier(PIPE_V);

        if (computed_aiv_n == ub_res_aligned_n){
            Mul(tmpdst_local, broadcast_fp32, dst_local_fp32, computed_aiv_n * cur_aiv_m);
        } else {
            for (auto i = 0; i < cur_aiv_m; i++){
                Mul(tmpdst_local[ub_res_aligned_n * i], broadcast_fp32[computed_aiv_n * i], dst_local_fp32[computed_aiv_n * i],
                computed_aiv_n);
            }
        }
        vec_que_pertoken_scale_.FreeTensor(pertoken_scale_local);
    }

    __aicore__ inline void basicDequantCompute(GlobalTensor<int32_t> &cur_mm_out_gm, uint32_t cur_aic_m, uint32_t cur_aic_n, int32_t comm_idx, int32_t p, int32_t actual_loop_num){
        int32_t loop_idx = comm_idx * p_value * used_core_num_ + p * used_core_num_ + block_idx_;
        int32_t rank_idx = loop_idx % rank_size;
        int32_t in_rank_idx = loop_idx / rank_size;

        LocalTensor<float> dst_local_fp32 = out_fp32_tmp_.Get<float>();
        LocalTensor<float> bias_fp32;
        LocalTensor<bfloat16_t> ori_bias_bf16;
        uint32_t cur_aiv_m = ub_calc_m_;
        uint32_t cur_aiv_n = cur_aic_n;
        uint32_t m_ub_loops = Ceil(cur_aic_m, ub_calc_m_);
        DataCopyParams gm_to_ub_params{1, 0, 0, 0};
        DataCopyExtParams ub_to_gm_params{1, 0, 0, 0, 0};
        DataCopyPadParams pad_params;
        DequantParams dequant_params;
        DequantBmm::calc_dequant_params(m_ub_loops == 1 ? cur_aic_m : ub_calc_m_, cur_aic_n, dequant_params);
        for (uint32_t m_ub_loop_idx = 0; m_ub_loop_idx < m_ub_loops; ++m_ub_loop_idx){
            if (m_ub_loop_idx == m_ub_loops - 1){
                cur_aiv_m = cur_aic_m - ub_calc_m_ * (m_ub_loops - 1);
                DequantBmm::calc_dequant_params(cur_aiv_m, cur_aic_n, dequant_params, m_ub_loops != 1 && cur_aiv_m != ub_calc_m_);
            }
            LocalTensor<int32_t> src_local = vec_que_src_.AllocTensor<int32_t>();
            LocalTensor<YType> dst_local = vec_que_out_.AllocTensor<YType>();
            LocalTensor<uint8_t> tmp_local = vec_que_tmp_.Get<uint8_t>();
            
            DequantBmm::set_gm2ub_params(gm_to_ub_params, cur_aiv_m, cur_aiv_n);
            DequantBmm::copy_mm_out_to_local(src_local, cur_mm_out_gm, gm_to_ub_params, pad_params, offset_workspace_c_
            + m_ub_loop_idx * ub_calc_m_ * cur_aic_n);

            LocalTensor<ScaleType> scale_local = vec_que_scale_.AllocTensor<ScaleType>();
            DequantBmm::bf16_scale_gm_to_ub<ScaleType>(scale_local, scale_gm_, pad_params, cur_aic_n, offset_.offset_scale);
            AscendDequant(dst_local_fp32, src_local, scale_local, tmp_local, dequant_params);
            vec_que_scale_.FreeTensor(scale_local);

            uint32_t ub_res_aligned_n = DequantBmm::ceil_align(cur_aiv_n);
            LocalTensor<float> tmpdst_local = vec_que_tmp_.Get<float>();
            uint32_t basic_block_compute_info[3] = {cur_aiv_n, cur_aiv_m, ub_res_aligned_n};
            pertokenCalculate(basic_block_compute_info, m_ub_loop_idx, pad_params, dst_local_fp32, tmpdst_local);
            
            pipe_barrier(PIPE_V);
            Cast(dst_local, tmpdst_local, RoundMode::CAST_RINT, cur_aiv_m * ub_res_aligned_n);
            set_flag(PIPE_V, PIPE_MTE3, static_cast<event_t>(EVENT_ID2));
            vec_que_src_.FreeTensor(src_local);
            
            //int loop_idx = comm_idx * p_value * used_core_num_ + p * used_core_num_ + block_idx_;

            uint32_t dst_stride;
            uint64_t y_offset;
            GlobalTensor<YType> gm_out;
            if(rank_idx == rank){
                dst_stride = n_;
                y_offset = offset_.offset_c + m_ub_loop_idx * ub_calc_m_ * n_;
                gm_out = y_gm_;
                DequantBmm::set_ub_to_gm_params<YType>(ub_to_gm_params, cur_aiv_m, cur_aiv_n, dst_stride);
                wait_flag(PIPE_V, PIPE_MTE3, static_cast<event_t>(EVENT_ID2));
                // SetAtomicAdd<YType>();
                DequantBmm::copy_ub_to_gm<YType>(y_offset, ub_to_gm_params, dst_local, gm_out);
                // SetAtomicNone();
                
            } else {
                //todo: 写死，通讯double buffer (2) 

                int64_t actual_comm_loop_num = (actual_loop_num / rank_size + p_value_comm - 1) / p_value_comm;
                int64_t comm_loop_num = (loop_num_per_comm_ / rank_size + p_value_comm - 1) / p_value_comm;
                int64_t in_comm_idx = in_rank_idx  % (loop_num_per_comm_ / rank_size);
                int64_t rank_offset_c = rank_idx * actual_comm_loop_num * p_value_comm * base_m_ * base_n_;

                y_offset = (comm_idx % 2) * base_m_ * base_n_ * comm_loop_num * p_value_comm * rank_size
                + rank_offset_c
                + (in_comm_idx / p_value_comm) * p_value_comm * base_m_ * base_n_
                + (in_comm_idx % p_value_comm) * base_n_
                + m_ub_loop_idx * ub_calc_m_ * base_n_ * p_value_comm;


                dst_stride = base_n_ * p_value_comm;
                gm_out = buff_gm_[rank];
                DequantBmm::set_ub_to_gm_params<YType>(ub_to_gm_params, cur_aiv_m, cur_aiv_n, dst_stride);
                wait_flag(PIPE_V, PIPE_MTE3, static_cast<event_t>(EVENT_ID2));
                DequantBmm::copy_ub_to_gm<YType>(y_offset, ub_to_gm_params, dst_local, gm_out);

            }
            vec_que_out_.FreeTensor(dst_local);
        }
    }

    __aicore__ inline void End(){
        if ASCEND_IS_AIC{
            for (int64_t comm_idx = 0; comm_idx < 2; ++comm_idx){
                for (int64_t p = 0; p < p_value; ++p){
                    NotifyEventImpl<2, PIPE_MTE3>(V2C_FLAG);
                }
            }
            mm_.End();
        }
    }

private:
    GlobalTensor<X1Type> x_gm_;
    GlobalTensor<X2Type> weight_gm_;
    GlobalTensor<int32_t> bias_gm_int32_;
    GlobalTensor<bfloat16_t> bias_gm_bf16_;
    GlobalTensor<YType> y_gm_;
    GlobalTensor<ScaleType> scale_gm_;
    GlobalTensor<float> pertoken_scale_gm_;
    GlobalTensor<int32_t> mm_out_gm_;

    GlobalTensor<YType> buff_gm_[8];

    TPipe *pipe_;
    TQue<QuePosition::VECIN, BUFFER_NUM> vec_que_src_;
    TQueBind<QuePosition::VECIN, QuePosition::VECOUT, 1> vec_que_mv_;

    TQue<QuePosition::VECIN, BUFFER_NUM> vec_que_scale_;
    TQue<QuePosition::VECIN, BUFFER_NUM> vec_que_bias_;
    TBuf<TPosition::VECCALC> vec_que_tmp_;
    TQue<QuePosition::VECOUT, BUFFER_NUM> vec_que_out_;
    TBuf<TPosition::VECCALC> bias_fp32_tmp_;
    TBuf<TPosition::VECCALC> out_fp32_tmp_;
    TQue<TPosition::VECIN, BUFFER_NUM> vec_que_pertoken_scale_;
    TBuf<TPosition::VECCALC> broadcast_fp32_tmp_;

    ScaleType scale_scalar_;
    
    //tiling data
    bool is_per_tensor_;
    uint32_t used_core_num_;
    uint32_t m_;
    uint32_t n_;
    uint32_t ka_;
    uint32_t base_m_;
    uint32_t base_n_;
    uint32_t has_bias_;
    uint32_t bias_dtype_ = 0;
    uint32_t bias_dtypeSize_ = 0;

    //vector
    uint32_t ub_calc_m_;
    uint32_t ub_calc_n_;
    uint32_t ub_tmp_buffer_;
    uint32_t split_trans_num_;

    uint32_t block_idx_;
    uint64_t offset_workspace_c_ = 0;
    uint64_t loop_ = 0;

    uint64_t index_;
    uint64_t m_total_cnt_;
    uint64_t n_total_cnt_;
    uint64_t total_cnt_;
    uint64_t round_;
    uint64_t real_round_;
    uint64_t pre_core_num_;
    int32_t rank_size;
    int32_t m_loop_;
    int32_t n_loop_;
    int32_t core_loop_;
    int32_t p_value;
    int32_t rank;
    int32_t loop_num_per_comm_;
    int32_t aiv_id;
    uint64_t FLAG_OFFSET;
    int32_t p_value_comm;

    __ubuf__ int16_t *ctrl_flags_UB = (__ubuf__ int16_t *)(0);


    // QuantMatmulV2BaseBlock block_;
    // UPDATE_TYPE update_;
    QBmmBlockOffset offset_;

    using AMatmulType = matmul::MatmulType<TPosition::GM, CubeFormat::ND, X1Type, ATrans>;
    using BMatmulType = matmul::MatmulType<TPosition::GM, DequantBmm::get_format(X2Format), X2Type, BTrans>;
    using BiasMatmulType = matmul::MatmulType<TPosition::GM, CubeFormat::ND, int32_t>;
    using CMatmulType = matmul::MatmulType<TPosition::VECIN, CubeFormat::ND, int32_t>;
    matmul::MatmulImpl<AMatmulType, BMatmulType, CMatmulType, BiasMatmulType, CFG_MDL> mm_;
};

} // namespace AscendC
#endif
=======
int32_t aiv_num = GetSubBlockNum();
int32_t ratio = GetTaskRation();
if(block_idx_ >= used_core_num_ )
{
    return;
}

unint64_t ping_offset_c = offset_workspace_c_;
unint32_t m_actual;
unint32_t n_actual;
unint64_t m_idx;
unint64_t n_idx;
uint64_t m_offset; //实际读取数据时m的偏移
uint64_t n_offset; //实际读取数据时n的偏移


int32_t tail_m = (m_)/(rank_size) % base_m_;
m_loop_ = rank_size * ((m_ / rank_size + base_m_ -1) / base_m_);
n_loop_ = n_ / base_n_;
core_loop_ = m_loop_ * n_loop_ ;
loop_num_per_comm_ = p_vlaue * used_core_num;

int32_t comm_num = (core_loop_  + loop_num_per_comm_ - 1) / loop_num_per_comm_;
int32_t m_loop_per_rank = m_loop_ / rank_size;


if ASCEND_IS_AIV{
    if((block_idx_ == rank) && (aiv_id == 1)){
        DequantBmm::SetBuffFlag<YType>(ctrl_flags_UB, FLAG_OFFSET + FLAG_ZERO_IDX, buff_gm_[rank], 0);
        PipeBarrier<PIPE_ALL>();
    }
    for(int64_t comm_idx = 0; coom_idx < 2; ++comm_idx){
        for(int64_t p=0; p<p_value; ++p){
            NotifyEventImpl<2, PIPE_MTE3>(V2C_FLAG);
        }
    }
}



int32_t comm_start = 0;
// int32_t comm_start = p_value * rank;
for(int32_t comm_times = 0; comm_times < comm_num ; comm_times++){
    int32_t comm_idx = (comm_start + comm_times)% comm_num;
    int32_t actual_loop_num = loop_num_pre_comm_;
    if (comm_idx == comm_num -1){
        actual_loop_num = core_loop_ - comm_idx * loop_num_per_comm_;
    }

    uint64_t flag_id = comm_idx % 2;

    if (aiv_id < 1){

    for (int32_t p = 0; p < p_value; p++){
        int32_t loop_idx = comm_idx * p_value * used_core_num_ + p * used_core_num_ +block_idx_;
        int32_t rank_idx = loop_idx % rank_size;
        int32_t in_rank_idx = loop_idx / rank_size;
        if(loop_idx >= core_loop_){
            if ASCEND_IS_AIC{
                WaitEvent(V2C_FLAG);
                NotifyEventImpl<2, PIPE_FIX>(C2V_FLAG);
            }

            if ASCEND_IS_AIV{
                WaitEvent(C2V_FLAG);
                NotifyEventImpl<2,PIPE_MTE3>(V2C_FLAG);
            }
            continue;
        }
        
        m_idx = in_rank_idx / n_loop_;
        n_idx = in_rank_idx % n_loop_;
        
        // https://www.hiascend.com/document/detail/zh/canncommercial/800/apiref/ascendtbapi/ascendtb_01_0083.html
        // nd2nz, 当数据类型时int8时：将一个shape为[m,n]的Tensor作为输入，m0 = 16，n0 = 32。输出tensor的shape为[1,[n/n0],[m/m0]*m0,n0]，256/32 =8
        // todo：rank内计算顺序添加nz/swizzl，增加l2 cache的命中率
        m_actual = (m_idx == (m_loop_per_rank - 1)) ? (m_ / rank_size - m_idx * base_m_) : base_m_;
        offset_.offset_a = m_offset * ka_;
        n_offset = n_idx * base_n_;
        offset_.offset_b = n_offset * ka_;
        offset_.offset_c = (m_idx * base_m_) * n_ + n_offset; //m_offset * n_ + n_offset;
        offset_.offset_scale = n_offset;
        offset_.offset_bias = n_offset;
        offset_.offset_pertoken = m_offset;

        offset_workspcae_c_ = ping_offset_c + (flag_id * p_vlaue + p) * base_m_ * base_n_;

        if ASCEND_IS_AIC{
            WaitEvent(V2C_FLAG);
            basicMMCompute(m_actual,n_actual);
            NotifyEventImpl<2, PIPE_FIX>(C2V_FLAG);
        }

        if ASCEND_IS_AIV{
            WaitEvent(C2V_FLAG);
            basicDequantCompute(mm_out_gm_, m_actual, n_actual, comm_idx, p, actual_loop_num);//todo 需要搬运的不是2的倍数，两个aiv搬运的不一样
            NotifyEventImpl<2,PIPE_MTE3>(V2C_FLAG);
        }
    }
    
    if ASCEND_IS_AIV{
        NotifyEventImpl<0, PIPE_MTE3>(V2V_D2C_FLAG);
        WaitEvent(V2V_D2C_FLAG);
    }
    }
    
    else{

        for(int64_t p = 0;p < p_value; ++p){
            WaitEvent(C2V_FLAG);
        }

        NotifyEventImpl<0, PIPE_MTE3>(V2V_D2C_FLAG);
        WaitEvent(V2V_D2C_FLAG);

        int32_t other_rank = block_idx_ % rank_size;

        // 卡内mamul结果准备就绪
        if (block_idx_ == rank){
            DequantDmm::SetBuffFlagByAdd<YType>(ctrl_flags_UB, FLAG_OFFSET + FLAG_ZERO_IDX, buff_gm_[rank], 1);// FLAG_VALUE 1
        }
        int32_t comm_aivs = 1;
        if((other_rank != rank) && (block_idx_ < comm_aivs * rank_size)){
            DequantBmm::CheckBuffFlag<YType>(ctrl_flags_UB, FLAG_OFFSET + FLAG_ZERO_IDX, buff_gm_[other_rank], comm_times + 1);
            // int32_t other_comm_idx = (p_value * other_rank + comm_times) % comm_num;
            int32_t other_comm_idx = (comm_times) % comm_num;
            int32_t comm_loop_num = (loop_num_per_comm_ / rank_size + p_value_comm - 1) / p_value_comm;
            int32_t actual_comm_loop_num = (actual_loop_num / rank_size + p_value_comm -1) / p_value_comm;

            int32_t rank_offset = rank * p_value_comm * actual_comm_loop_num * base_m_ * base_n_ + flag_id * base_m_ * base_n_*comm_loop_num * p_value_comm * rank_size;

            int32_t actual_comm_loop_num_block = (actual_comm_loop_num + comm_aivs - 1) / comm_aivs;
            for(int in_loop_idx_block = 0; in_loop_idx_block < actual_comm_loop_num_block; in_loop_idx_block++){
                int32_t in_loop_idx = (in_loop_idx_block * comm_aivs + block_idx/rank_size);
                int32_t rank_buff_offset = p_value_comm * in_loop_idx * base_m_ * base_n_ +rank_offset;

                int32_t loop_idx = other_comm_idx * p_value * used_core_num_ / rank_size + in_loop_idx * p_value_comm;
                uint32_t cur_aiv_m = base_m_;
                uint32_t cur_aiv_loop = (in_loop_idx == actual_comm_loop_num -1 ) ? (actual_loop_num / rank_size) - in_loop_idx * p_value_comm : p_value_comm;
                uint32_t cur_aiv_n = base_n_ * cur_aiv_loop;


                DataCopyParams gm_to_ub_params{ 1, 0, 0, 0};
                DataCopyExtParams ub_to_gm_params{1, 0, 0, 0, 0};
                DataCopyPadParams pad_params;
                DequantParams dequant_params;

                LocalTensor<YType> src_local = vec_que_mv_.AllocTensor<YType>();
                gm_to_ub_params.blockLen = cur_aiv_n * sizeof(YType);
                gm_to_ub_params.blcokCount = cur_aiv_m;
                gm_to_ub_params.srcStride = (base_n_ * p_value_comm - cur_aiv_n) * sizeof(YType);
                DataCopyPad(src_local, buff_gm_[other_rank][rank_buff_offset], gm_to_ub_params, pad_params);
                vec_que_mv_.EnQue<YType>(src_local);
                src_local = vec_que_mv_.DeQue<YType>();
                set_flag(PIPE_MTE2, PIPE_MTE3, static_cast<event_t>(EVENT_ID0));
                wait_flag(PIPE_MTE2, PIPE_MTE3, static_cast<event_t>(EVENT_ID0));

                uint64_t y_offset;
                uint32_t dst_stride;

                m_idx = loop_idx / n_loop_;
                n_idx = loop_idx % n_loop_;
                if(n_idx + cur_aiv_loop - 1 < n_loop_){
                    m_actual = (m_idx == (m_loop_per_rank - 1)) ? (m_ / rank_size - m_idx * base_m_) : base_m_;
                    n_actual = (cur_aiv_loop -1) * base_n_ + ((n_idx + cur_aiv_loop -1 == (n_loop -1 )) ? (n - (n_loop_ - 1) * base_n_ ) : base_n_);
                    AscendC::printf("comm_idx: %d, block_idx: %d, in_loop_idx: %d, m_actual: %d, n_actual: %d\n", comm_idx, block_idx_, in_loop_idx_block, m_actual, n_actual);
                    m_offset = m_idx * base_m_;
                    n_offset = n_idx * base_n_;
                    y_offset = m_offset * n_ + n_offset;
                    dst_stride = n_;

                    ub_to_gm_params.blockLen = n_actual * sizeif(YType);
                    ub_to_gm_params.blcokCount = m_actual;
                    ub_to_gm_params.dstStride = (dst_stride - n_actual) * sizeof(YType);
                    SetAtomocAdd<YType>();

                    DequantBmm::copy_ub_to_gm<YType>(y_offset, ub_to_gm_params, src_local, y_gm);

                } else {
                    
                    uiny32_t cur_aiv_loop1 = n_loop_ - n_idx;
                    m_idx = loop_idx / n_loop_;
                    n_idx = loop_idx % n_loop_;
                    m_offset = m_idx * base_m_;
                    n_offset = n_idx * base_n_;

                    y_offset = m_offset * n_ + n_offset;
                    dst_stride = n_;

                    m_actual = (midx == (m_loop_per_rank - 1)) ? (m_ /rank_size - m_idx * base_m_) : base_m_;
                    n_actual = (cur_aiv_loop1 - 1) * base_n_ + ((n_idx + cur_aiv_loop1 -1 == (n_loop - 1)) ? (n_ - (n_loop_ - 1) * base_n_) : base_n_);
                    
                    ub_to_gm_params.blockLen = n_actual * sizeof(YType);
                    ub_to_gm_params.blockCount = m_actual;
                    ub_to_gm_params.dstStride = (dst_stride - n_actual) * sizeof(YType);
                    ub_to_gm_params.srcStride = (cur_aiv_n - n_actual) * sizeof(YType) / 32;

                    SetAtomicAdd<YType>();
                    DataCopyPad(y_gm_[y_offset], src_local, ub_to_gm_params);

                    uint32_t cur_aiv_loop2 = cur_aiv_loop - cur_aiv_loop1;
                    loop_idx = loop_idx + cur_aiv_loop1;
                    m_idx = loop_idx / n_loop_;
                    n_idx = loop_idx % n_loop_;
                    m_offset = m_idx * base_m_;
                    n_offset = n_idx * base_n_;
                    y_offset = m_offset * n_ + n_offset;

                    m_actual = (m_idx == (m_loop_per_rank - 1)) ? (m_ / rank_size - m_idx * base_m_) : base_m_;
                    n_actual = (cur_aiv_loop2 - 1) * base_n_ + ((n_idx +cur_aiv_loop2 -1 == (n_loop_ -1)) ? (n_ - (n_loop_ - 1) * base_n_));
                    AscendC::printf("comm_idx: %d, block_idx: %d, in_loop_idx: %d, m_actual: %d, n_actual: %d\n", comm_idx, block_idx_, in_loop_idx_block, m_actual, n_actual);
                    dst_stride = n_;
                    
                    ub_to_gm_params.blockLen = n_actual * sizeof(YType);
                    ub_to_gm_params.blockCount = m_actual;
                    ub_to_gm_params.dstStride = (dst_stride - n_actual) * sizeof(YType);
                    ub_to_gm_params.srcStride = (cur_aiv_n - n_actual) * sizeof(YType)/32;

                    DataCopyPad(y_gm_[y_offset], src_local[base_n_ * cur_aiv_local], ub_to_gm_params);


                }
                PipeBarrier<PIPE_ALL>();
                SetAtomicNone();
                vec_que_mv_.FreeTensor(src_local);
            }
        }

        for(int64_t p = 0; p < p_value; ++p){
            NotifyEventImpl<2, PIPE_MTE3>(V2C_FLAG);
        }
    }
}




//int loop_idx = comm_idx * p_value * used_core_num_ + p * used_core_num_ + block_idx_;
uint32_t dst_stride;
uint64_t y_offset;
GlobalTensor<YType> gm_out;
if(rank_idx == rank){
    dst_stride = n_;
    y_offset = offset_.offset_c + m_ub_loop_idx * ub_calc_m_ * n_;
    gm_out = y_gm_;
    DequantBmm::set_ub_to_gm_params<YType>(ub_to_gm_params, cur_aiv_m, cur_aiv_n, dst_stride);
    wait_flag(PIPE_V, PIPE_MTE3, static_cast<event_t>(EVENT_ID2));
    // SetAtomicAdd<YType>();
    DequantBmm::copy_ub_to_gm<YType>(y_offset, ub_to_gm_params, dst_local, gm_out);
    // SetAtomicNone();
}
else{
    //todo：写死，通讯double buffer (2)
    int64_t actual_comm_loop_num = (actual_loop_num / rank_size + p_value_comm - 1) / p_value_comm;
    int64_t comm_loop_num = (loop_num_per_comm_ / rank_size + p_valye_comm - 1)/ p_value_comm;
    int64_t in_comm_idx = in_rank_idx % (loop_num_per_comm_ / rank_size);
    int64_t rank_offset_c = rank_idx * actual_comm_loop_num * p_value_comm * base_m_ * base_n_;
    
    y_offset = (comm_idx % 2) * base_m_ * base_n_ * comm_loop_num * p_value_comm *rank_size
    + rank_offset_c
    + (in_comm_idx / p_value_comm) * p_value_comm * base_m_ * base_n_
    + (in_comm_idx % p_value_comm) * base_n_
    + m_ub_loop_id * ub_calc_m_ * base_n_ * p_value_comm;

    dfs_stride = base_n_ * p_value_comm;
    gm_out = buff_gm_[rank];
    DequantBmm::set_ub_to_gm_params<YType>(ub_to_gm_params, cur_aiv_m, cur_aiv_n, dst_stride);
    wait_flag(PIPE_V, PIPE_MTE3, static_cast<event_t>(EVENT_ID2));
    DequantBmm::copy_ub_to_gm<YType>(y_offset, ub_to_gm_params, dst_local, gm_out);
}
vec_que_out_.FreeTensor(dst_local);
>>>>>>> origin/master
