int32_t aiv_num = GetSubBlockNum();
int32_t ratio = GetTaskRation();
if(block_idx_ >= used_core_num_ )
{
    return;
}

unint64_t ping_offset_c = offset_workspace_c_;
unint32_t m_actual;
unint32_t n_actual;
unint64_t m_idx;
unint64_t n_idx;
uint64_t m_offset; //实际读取数据时m的偏移
uint64_t n_offset; //实际读取数据时n的偏移


int32_t tail_m = (m_)/(rank_size) % base_m_;
m_loop_ = rank_size * ((m_ / rank_size + base_m_ -1) / base_m_);
n_loop_ = n_ / base_n_;
core_loop_ = m_loop_ * n_loop_ ;
loop_num_per_comm_ = p_vlaue * used_core_num;

int32_t comm_num = (core_loop_  + loop_num_per_comm_ - 1) / loop_num_per_comm_;
int32_t m_loop_per_rank = m_loop_ / rank_size;


if ASCEND_IS_AIV{
    if((block_idx_ == rank) && (aiv_id == 1)){
        DequantBmm::SetBuffFlag<YType>(ctrl_flags_UB, FLAG_OFFSET + FLAG_ZERO_IDX, buff_gm_[rank], 0);
        PipeBarrier<PIPE_ALL>();
    }
    for(int64_t comm_idx = 0; coom_idx < 2; ++comm_idx){
        for(int64_t p=0; p<p_value; ++p){
            NotifyEventImpl<2, PIPE_MTE3>(V2C_FLAG);
        }
    }
}



int32_t comm_start = 0;
// int32_t comm_start = p_value * rank;
for(int32_t comm_times = 0; comm_times < comm_num ; comm_times++){
    int32_t comm_idx = (comm_start + comm_times)% comm_num;
    int32_t actual_loop_num = loop_num_pre_comm_;
    if (comm_idx == comm_num -1){
        actual_loop_num = core_loop_ - comm_idx * loop_num_per_comm_;
    }

    uint64_t flag_id = comm_idx % 2;

    if (aiv_id < 1){

    for (int32_t p = 0; p < p_value; p++){
        int32_t loop_idx = comm_idx * p_value * used_core_num_ + p * used_core_num_ +block_idx_;
        int32_t rank_idx = loop_idx % rank_size;
        int32_t in_rank_idx = loop_idx / rank_size;
        if(loop_idx >= core_loop_){
            if ASCEND_IS_AIC{
                WaitEvent(V2C_FLAG);
                NotifyEventImpl<2, PIPE_FIX>(C2V_FLAG);
            }

            if ASCEND_IS_AIV{
                WaitEvent(C2V_FLAG);
                NotifyEventImpl<2,PIPE_MTE3>(V2C_FLAG);
            }
            continue;
        }
        
        m_idx = in_rank_idx / n_loop_;
        n_idx = in_rank_idx % n_loop_;
        
        // https://www.hiascend.com/document/detail/zh/canncommercial/800/apiref/ascendtbapi/ascendtb_01_0083.html
        // nd2nz, 当数据类型时int8时：将一个shape为[m,n]的Tensor作为输入，m0 = 16，n0 = 32。输出tensor的shape为[1,[n/n0],[m/m0]*m0,n0]，256/32 =8
        // todo：rank内计算顺序添加nz/swizzl，增加l2 cache的命中率
        m_actual = (m_idx == (m_loop_per_rank - 1)) ? (m_ / rank_size - m_idx * base_m_) : base_m_;
        offset_.offset_a = m_offset * ka_;
        n_offset = n_idx * base_n_;
        offset_.offset_b = n_offset * ka_;
        offset_.offset_c = (m_idx * base_m_) * n_ + n_offset; //m_offset * n_ + n_offset;
        offset_.offset_scale = n_offset;
        offset_.offset_bias = n_offset;
        offset_.offset_pertoken = m_offset;

        offset_workspcae_c_ = ping_offset_c + (flag_id * p_vlaue + p) * base_m_ * base_n_;

        if ASCEND_IS_AIC{
            WaitEvent(V2C_FLAG);
            basicMMCompute(m_actual,n_actual);
            NotifyEventImpl<2, PIPE_FIX>(C2V_FLAG);
        }

        if ASCEND_IS_AIV{
            WaitEvent(C2V_FLAG);
            basicDequantCompute(mm_out_gm_, m_actual, n_actual, comm_idx, p, actual_loop_num);//todo 需要搬运的不是2的倍数，两个aiv搬运的不一样
            NotifyEventImpl<2,PIPE_MTE3>(V2C_FLAG);
        }
    }
    
    if ASCEND_IS_AIV{
        NotifyEventImpl<0, PIPE_MTE3>(V2V_D2C_FLAG);
        WaitEvent(V2V_D2C_FLAG);
    }
    }
    
    else{

        for(int64_t p = 0;p < p_value; ++p){
            WaitEvent(C2V_FLAG);
        }

        NotifyEventImpl<0, PIPE_MTE3>(V2V_D2C_FLAG);
        WaitEvent(V2V_D2C_FLAG);

        int32_t other_rank = block_idx_ % rank_size;

        // 卡内mamul结果准备就绪
        if (block_idx_ == rank){
            DequantDmm::SetBuffFlagByAdd<YType>(ctrl_flags_UB, FLAG_OFFSET + FLAG_ZERO_IDX, buff_gm_[rank], 1);// FLAG_VALUE 1
        }
        int32_t comm_aivs = 1;
        if((other_rank != rank) && (block_idx_ < comm_aivs * rank_size)){
            DequantBmm::CheckBuffFlag<YType>(ctrl_flags_UB, FLAG_OFFSET + FLAG_ZERO_IDX, buff_gm_[other_rank], comm_times + 1);
            // int32_t other_comm_idx = (p_value * other_rank + comm_times) % comm_num;
            int32_t other_comm_idx = (comm_times) % comm_num;
            int32_t comm_loop_num = (loop_num_per_comm_ / rank_size + p_value_comm - 1) / p_value_comm;
            int32_t actual_comm_loop_num = (actual_loop_num / rank_size + p_value_comm -1) / p_value_comm;

            int32_t rank_offset = rank * p_value_comm * actual_comm_loop_num * base_m_ * base_n_ + flag_id * base_m_ * base_n_*comm_loop_num * p_value_comm * rank_size;

            int32_t actual_comm_loop_num_block = (actual_comm_loop_num + comm_aivs - 1) / comm_aivs;
            for(int in_loop_idx_block = 0; in_loop_idx_block < actual_comm_loop_num_block; in_loop_idx_block++){
                int32_t in_loop_idx = (in_loop_idx_block * comm_aivs + block_idx/rank_size);
                int32_t rank_buff_offset = p_value_comm * in_loop_idx * base_m_ * base_n_ +rank_offset;

                int32_t loop_idx = other_comm_idx * p_value * used_core_num_ / rank_size + in_loop_idx * p_value_comm;
                uint32_t cur_aiv_m = base_m_;
                uint32_t cur_aiv_loop = (in_loop_idx == actual_comm_loop_num -1 ) ? (actual_loop_num / rank_size) - in_loop_idx * p_value_comm : p_value_comm;
                uint32_t cur_aiv_n = base_n_ * cur_aiv_loop;


                DataCopyParams gm_to_ub_params{ 1, 0, 0, 0};
                DataCopyExtParams ub_to_gm_params{1, 0, 0, 0, 0};
                DataCopyPadParams pad_params;
                DequantParams dequant_params;

                LocalTensor<YType> src_local = vec_que_mv_.AllocTensor<YType>();
                gm_to_ub_params.blockLen = cur_aiv_n * sizeof(YType);
                gm_to_ub_params.blcokCount = cur_aiv_m;
                gm_to_ub_params.srcStride = (base_n_ * p_value_comm - cur_aiv_n) * sizeof(YType);
                DataCopyPad(src_local, buff_gm_[other_rank][rank_buff_offset], gm_to_ub_params, pad_params);
                vec_que_mv_.EnQue<YType>(src_local);
                src_local = vec_que_mv_.DeQue<YType>();
                set_flag(PIPE_MTE2, PIPE_MTE3, static_cast<event_t>(EVENT_ID0));
                wait_flag(PIPE_MTE2, PIPE_MTE3, static_cast<event_t>(EVENT_ID0));

                uint64_t y_offset;
                uint32_t dst_stride;

                m_idx = loop_idx / n_loop_;
                n_idx = loop_idx % n_loop_;
                if(n_idx + cur_aiv_loop - 1 < n_loop_){
                    m_actual = (m_idx == (m_loop_per_rank - 1)) ? (m_ / rank_size - m_idx * base_m_) : base_m_;
                    n_actual = (cur_aiv_loop -1) * base_n_ + ((n_idx + cur_aiv_loop -1 == (n_loop -1 )) ? (n - (n_loop_ - 1) * base_n_ ) : base_n_);
                    AscendC::printf("comm_idx: %d, block_idx: %d, in_loop_idx: %d, m_actual: %d, n_actual: %d\n", comm_idx, block_idx_, in_loop_idx_block, m_actual, n_actual);
                    m_offset = m_idx * base_m_;
                    n_offset = n_idx * base_n_;
                    y_offset = m_offset * n_ + n_offset;
                    dst_stride = n_;

                    ub_to_gm_params.blockLen = n_actual * sizeif(YType);
                    ub_to_gm_params.blcokCount = m_actual;
                    ub_to_gm_params.dstStride = (dst_stride - n_actual) * sizeof(YType);
                    SetAtomocAdd<YType>();

                    DequantBmm::copy_ub_to_gm<YType>(y_offset, ub_to_gm_params, src_local, y_gm);

                } else {
                    
                    uiny32_t cur_aiv_loop1 = n_loop_ - n_idx;
                    m_idx = loop_idx / n_loop_;
                    n_idx = loop_idx % n_loop_;
                    m_offset = m_idx * base_m_;
                    n_offset = n_idx * base_n_;

                    y_offset = m_offset * n_ + n_offset;
                    dst_stride = n_;

                    m_actual = (midx == (m_loop_per_rank - 1)) ? (m_ /rank_size - m_idx * base_m_) : base_m_;
                    n_actual = (cur_aiv_loop1 - 1) * base_n_ + ((n_idx + cur_aiv_loop1 -1 == (n_loop - 1)) ? (n_ - (n_loop_ - 1) * base_n_) : base_n_);
                    
                    ub_to_gm_params.blockLen = n_actual * sizeof(YType);
                    ub_to_gm_params.blockCount = m_actual;
                    ub_to_gm_params.dstStride = (dst_stride - n_actual) * sizeof(YType);
                    ub_to_gm_params.srcStride = (cur_aiv_n - n_actual) * sizeof(YType) / 32;

                    SetAtomicAdd<YType>();
                    DataCopyPad(y_gm_[y_offset], src_local, ub_to_gm_params);

                    uint32_t cur_aiv_loop2 = cur_aiv_loop - cur_aiv_loop1;
                    loop_idx = loop_idx + cur_aiv_loop1;
                    m_idx = loop_idx / n_loop_;
                    n_idx = loop_idx % n_loop_;
                    m_offset = m_idx * base_m_;
                    n_offset = n_idx * base_n_;
                    y_offset = m_offset * n_ + n_offset;

                    m_actual = (m_idx == (m_loop_per_rank - 1)) ? (m_ / rank_size - m_idx * base_m_) : base_m_;
                    n_actual = (cur_aiv_loop2 - 1) * base_n_ + ((n_idx +cur_aiv_loop2 -1 == (n_loop_ -1)) ? (n_ - (n_loop_ - 1) * base_n_));
                    AscendC::printf("comm_idx: %d, block_idx: %d, in_loop_idx: %d, m_actual: %d, n_actual: %d\n", comm_idx, block_idx_, in_loop_idx_block, m_actual, n_actual);
                    dst_stride = n_;
                    
                    ub_to_gm_params.blockLen = n_actual * sizeof(YType);
                    ub_to_gm_params.blockCount = m_actual;
                    ub_to_gm_params.dstStride = (dst_stride - n_actual) * sizeof(YType);
                    ub_to_gm_params.srcStride = (cur_aiv_n - n_actual) * sizeof(YType)/32;

                    DataCopyPad(y_gm_[y_offset], src_local[base_n_ * cur_aiv_local], ub_to_gm_params);


                }
                PipeBarrier<PIPE_ALL>();
                SetAtomicNone();
                vec_que_mv_.FreeTensor(src_local);
            }
        }

        for(int64_t p = 0; p < p_value; ++p){
            NotifyEventImpl<2, PIPE_MTE3>(V2C_FLAG);
        }
    }
}




//int loop_idx = comm_idx * p_value * used_core_num_ + p * used_core_num_ + block_idx_;
uint32_t dst_stride;
uint64_t y_offset;
GlobalTensor<YType> gm_out;
if(rank_idx == rank){
    dst_stride = n_;
    y_offset = offset_.offset_c + m_ub_loop_idx * ub_calc_m_ * n_;
    gm_out = y_gm_;
    DequantBmm::set_ub_to_gm_params<YType>(ub_to_gm_params, cur_aiv_m, cur_aiv_n, dst_stride);
    wait_flag(PIPE_V, PIPE_MTE3, static_cast<event_t>(EVENT_ID2));
    // SetAtomicAdd<YType>();
    DequantBmm::copy_ub_to_gm<YType>(y_offset, ub_to_gm_params, dst_local, gm_out);
    // SetAtomicNone();
}
else{
    //todo：写死，通讯double buffer (2)
    int64_t actual_comm_loop_num = (actual_loop_num / rank_size + p_value_comm - 1) / p_value_comm;
    int64_t comm_loop_num = (loop_num_per_comm_ / rank_size + p_valye_comm - 1)/ p_value_comm;
    int64_t in_comm_idx = in_rank_idx % (loop_num_per_comm_ / rank_size);
    int64_t rank_offset_c = rank_idx * actual_comm_loop_num * p_value_comm * base_m_ * base_n_;
    
    y_offset = (comm_idx % 2) * base_m_ * base_n_ * comm_loop_num * p_value_comm *rank_size
    + rank_offset_c
    + (in_comm_idx / p_value_comm) * p_value_comm * base_m_ * base_n_
    + (in_comm_idx % p_value_comm) * base_n_
    + m_ub_loop_id * ub_calc_m_ * base_n_ * p_value_comm;

    dfs_stride = base_n_ * p_value_comm;
    gm_out = buff_gm_[rank];
    DequantBmm::set_ub_to_gm_params<YType>(ub_to_gm_params, cur_aiv_m, cur_aiv_n, dst_stride);
    wait_flag(PIPE_V, PIPE_MTE3, static_cast<event_t>(EVENT_ID2));
    DequantBmm::copy_ub_to_gm<YType>(y_offset, ub_to_gm_params, dst_local, gm_out);
}
vec_que_out_.FreeTensor(dst_local);