# p2p_communication_3 is special for the vpp test only
import contextlib

from . import p2p_communication, p2p_communication_2, p2p_communication_3, p2p_communication_4
from ..megatron import mpu
from torch.nn import CrossEntropyLoss

import torch

'''
Functionality: Executes the 1f1b pipeline
Author: Chuhongjie
Date: 2024-06-13
'''


# LYH：：：这里不对，明明传入了forward_step_func，在llama2_pp.py里的191行，却还是用了model。直接
# TODO: device ：给sgp用的？？？？？？？？？？
def forward_step(forward_step_func, data_iterator, model, input_tensor, device):
    """Forward step for passed-in model.

    If first stage, input tensor is obtained from data_iterator, otherwise
    passed-in input_tensor is used.

    Returns output tensor."""
    # if not the first stage,exec fwd,or get the input from the dataset
    # print(f'{mpu.get_pipeline_model_parallel_rank()} exec fwd')
    if input_tensor is not None:
        # print(f'{mpu.get_pipeline_model_parallel_rank()} has  input')
        # print(f'{mpu.get_pipeline_model_parallel_rank()} move input ot device')
        input_tensor = input_tensor.to(device)
        if mpu.is_pipeline_last_stage():
            # print(f'{mpu.get_pipeline_model_parallel_rank()} move labels ot device')
            labels = next(data_iterator)['label'].to(device)
            #labels =data_iterator['label'].to(device)
            # print(f'{mpu.get_pipeline_model_parallel_rank()} start the model forward ')
            # print("input_tensor is not None")
            output_tensor = model(inputs_embeds=input_tensor, labels=labels)
            # print("input_tensor is not None",output_tensor.shape)
        else:
            # print("input_tensor is not None else")
            output_tensor = model(inputs_embeds=input_tensor)
            # print("input_tensor is not None else",output_tensor.shape)
        # print(f'{mpu.get_pipeline_model_parallel_rank()} got output')
        # output_tensor= forward_step_func(input_tensor, model)
    else:
        # print(f'{mpu.get_pipeline_model_parallel_rank()} no input get from dataset')
        # input_tensor = next(data_iterator)['input_ids'].to(device)  #TODO LYH:::如果是stage 0，要从数据集里面获取输入,拿到tokens。回忆：在加载model的时候第一个stage多加了emedding的处理噢
        # print(f"input_tensor:{input_tensor}")
        batch = next(data_iterator)
        #batch =data_iterator
        input_tensor = batch['input_ids'].to(device)
        labels = batch['label'].to(device)

        # lyh 2025 add for no pipeline parallel --> ()
        if mpu.is_pipeline_last_stage():
            # labels = next(data_iterator)['label'].to(device)
            output_tensor = model(input_ids=input_tensor, labels=labels)
        else:
            # print(f'{mpu.get_pipeline_model_parallel_rank()} got input')
            output_tensor = model(input_ids=input_tensor)       # stage0的model有embedding处理。
            # print("else output",output_tensor.shape)
            # print(f'{mpu.get_pipeline_model_parallel_rank()} got output')
            # output_tensor= forward_step_func(input_tensor, model)

    # gees 2025 new add --- fail
    # input_tensor.retain_grad()

    loss = None
    # if the last stage,need to get the loss
    if mpu.is_pipeline_last_stage():  # gees 如果是最后一个stage，需要计算loss
        # output_tensor.hidden_states.retain_grad()
        # loss = get_loss2(input=  input_tensor,logits = output_tensor,labels=next(data_iterator)['label'],model = model)
        loss = output_tensor.loss

    # output_tensor.retain_grad()

    # # gees 2025 激活retain_grad来尝试解决dp问题
    # output_tensor.retain_grad()
    # output_tensor.hidden_states.retain_grad
    # output_tensor.logit.retain_grad()

    # print(f'output_tensor.grad:{output_tensor.grad}')
    # print(f'{mpu.get_pipeline_model_parallel_rank()} return')
    return output_tensor, loss


def backward_step(input_tensor, output_tensor, output_tensor_grad, loss=None):
    """Backward step through passed-in output tensor.

    If last stage, output_tensor_grad is None, otherwise gradient of loss
    with respect to stage's output tensor.

    Returns gradient of loss with respect to input tensor (None if first
    stage)."""
    # print(f'the loss {loss}')
    # print(f'{mpu.get_pipeline_model_parallel_rank()} bwd')

    if input_tensor is not None:
        input_tensor.retain_grad()
    # Backward pass.
    # if the last stage,no output_tensor_grad,need to get output_tensor_grad from loss

    # add for test bug
    if (mpu.is_pipeline_last_stage()) and (output_tensor_grad is None) and (loss is None):
        pass
        # print(f'{mpu.get_pipeline_model_parallel_rank()} need to compute the loss-------------------------------------------------------------------------------')
        # print("output_tensor 不应该为null")

    # add for test bug
    if (mpu.get_pipeline_model_parallel_rank()!=3) and (output_tensor_grad is None):
        pass
        # print(
        #     f'{mpu.get_pipeline_model_parallel_rank()} need to a output_tensor_grad-------------------------------------------------------------------------------')

    if (mpu.get_pipeline_model_parallel_rank() == 3) and mpu.get_data_parallel_rank()==0:
        pass
        # print("执行一次backward")

    # print(f'当前是pp_rank:{mpu.get_pipeline_model_parallel_rank()},获取得到的loss为{loss}')
    # print(f'当前是pp_rank:{mpu.get_pipeline_model_parallel_rank()},获取得到的output_tensor_grad为{output_tensor_grad}')


    # TODO LYH ?????
    if output_tensor_grad is None:
        if output_tensor.logits.requires_grad:
            # print(f'output_tensor.logits.requeires_grad:{output_tensor.logits.requires_grad}')
            output_tensor.logits.retain_grad()
        loss.backward(retain_graph=False)  # 如果是最后一个stage，显然output_tensor_grad为空，即传给我自己的output_tensor_grad为空，需要通过求loss来生成一个显然output_tensor_grad
        # print(output_tensor.logits.grad)
        # print(f'output_tensor.grad:{output_tensor.grad}')
        # output_tensor_grad = output_tensor.logits.clone()sh
        # print('input grad:')
        # print(input_tensor.grad)
    # if not the last stage,exec the bwd,need the output_tensor_grad
    else:
        # output_tensor.backward(output_tensor_grad)        # TODO 搞清楚是何含义
        torch.autograd.backward(output_tensor, grad_tensors=output_tensor_grad)
    input_tensor_grad = None

    if input_tensor is not None:
        input_tensor_grad = input_tensor.grad
    return input_tensor_grad, output_tensor_grad


def forward_step_func(input_tensor, model):
    return model(input_tensor)


def get_loss(lm_logits, labels):
    # get the loss,the code is from transformers xxLMHeadModel
    labels = labels.to(lm_logits.device)
    shift_logits = lm_logits[..., :-1, :].contiguous()
    shift_labels = labels[..., 1:].contiguous()
    loss_fct = CrossEntropyLoss()
    loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
    return loss


def get_loss2(input, logits, labels, model):
    batch_size, sequence_length = input.shape[:2]

    sequence_lengths = -1

    pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]

    loss = None

    loss_fct = CrossEntropyLoss()
    loss = loss_fct(pooled_logits.view(-1, model.num_labels), labels.view(-1).to(pooled_logits.device))
    return loss


# gees use old p2p_communication_1
#  use old-model 当前不是old-model.old-model是给sgp使用的
def forward_backward_pipelining_without_interleaving2(forward_step_func, data_iterator,
                                                      model,
                                                      forward_only, micro_batch, gpus_list, recv_tensor_shape):
    """Run non-interleaved 1F1B schedule, with communication between pipeline
    stages.

    Returns dictionary with losses if the last stage, empty dict otherwise."""

    micro_batch = micro_batch
    num_warmup_microbatches = \
        (mpu.get_pipeline_model_parallel_world_size() -
         mpu.get_pipeline_model_parallel_rank() - 1)
    # print(f'{mpu.get_pipeline_model_parallel_rank()} num_warmup_microbatches :{num_warmup_microbatches},micro_batch:{micro_batch} ')
    num_warmup_microbatches = min(
        num_warmup_microbatches,
        micro_batch)
    num_microbatches_remaining = \
        micro_batch - num_warmup_microbatches
    input_tensors = []
    output_tensors = []
    loss_list = []
    recv_fwd_tensor_shape = recv_tensor_shape
    recv_bwd_tensor_shape = recv_tensor_shape
    # add for tp
    gpus_list[0] = torch.cuda.current_device()
    # exec the warm up ,only fwd
    for i in range(num_warmup_microbatches):
        print(f'{mpu.get_pipeline_model_parallel_rank()} warm up {i} / {num_warmup_microbatches}')
        # if not the first stage ,recv the tensor from last stage
        input_tensor = p2p_communication.recv_forward(recv_fwd_tensor_shape, gpus_list[0])
        print(f'{mpu.get_pipeline_model_parallel_rank()} recv fwd ,{i} / {num_warmup_microbatches} ')

        # exec the fwd
        output_tensor, loss = forward_step(forward_step_func, data_iterator, model,
                                           input_tensor, gpus_list[0])
        print(f'{mpu.get_pipeline_model_parallel_rank()}  fwd {i} / {num_warmup_microbatches}')

        # send the output to the next stage
        p2p_communication.send_forward(output_tensor)
        print(f'{mpu.get_pipeline_model_parallel_rank()}  send fwd {i} / {num_warmup_microbatches}')

        # save the input and the output,and then exec the bwd
        input_tensors.append(input_tensor)
        output_tensors.append(output_tensor)
    # if not thr first stahe ,need to recv the input from last stage,and then exec the 1f1b stage
    if num_microbatches_remaining > 0:
        print(f'{mpu.get_pipeline_model_parallel_rank()} get before 1f1b ')

        input_tensor = p2p_communication.recv_forward(recv_fwd_tensor_shape, gpus_list[0])
    # exec the stable 1f1b stage,the order is :
    # 1 exec fwd,send fwd
    # 2 recv bwd,exec bwd,
    # 3 send bwd,recv fwd

    for i in range(num_microbatches_remaining):
        print(f'{mpu.get_pipeline_model_parallel_rank()}  1f1b {i} / {num_microbatches_remaining}')

        last_iteration = (i == (num_microbatches_remaining - 1))
        output_tensor, loss = forward_step(forward_step_func, data_iterator, model, input_tensor, gpus_list[0])
        if loss is not None:
            loss_list.append(loss.item())  # gees 只在最后一个stage才会产出一个loss
        p2p_communication.send_forward(output_tensor)
        output_tensor_grad = p2p_communication.recv_backward(recv_bwd_tensor_shape, gpus_list[0])

        input_tensors.append(input_tensor)
        output_tensors.append(output_tensor)
        # print(f'output_tensor.grad:{output_tensor.grad}')

        input_tensor, output_tensor = input_tensors.pop(0), output_tensors.pop(0)
        input_tensor_grad, output_tensor_grad = \
            backward_step(input_tensor, output_tensor,
                          output_tensor_grad, loss)  # gees nk
        if last_iteration:
            input_tensor = None
            p2p_communication.send_backward(input_tensor_grad)
        else:
            input_tensor = p2p_communication.recv_forward(recv_fwd_tensor_shape, gpus_list[0])
            p2p_communication.send_backward(input_tensor_grad)
    # exec the cool done stage,only bwd.
    if not forward_only:
        for i in range(num_warmup_microbatches):
            print(f'{mpu.get_pipeline_model_parallel_rank()} cool done {i} / {num_warmup_microbatches} ')

            # print(f'{mpu.get_pipeline_model_parallel_rank()}cool down')
            input_tensor = input_tensors.pop(0)
            output_tensor = output_tensors.pop(0)
            output_tensor_grad = p2p_communication.recv_backward(recv_bwd_tensor_shape, gpus_list[0])
            input_tensor_grad, output_tensor_grad = \
                backward_step(input_tensor, output_tensor,
                              output_tensor_grad)
            p2p_communication.send_backward(input_tensor_grad)
    loss = None
    if len(loss_list) > 0:
        loss = sum(loss_list) / len(loss_list)  # LYH 在mgt里，返回的是loss_list，在这里直接处理了，返回一个loss
    return loss


# gees use new p2p_communication_2
#  use new model
def forward_backward_pipelining_without_interleaving2_remove_sgp(forward_step_func, data_iterator,
                                                                 model,
                                                                 forward_only, micro_batch, gpus_list, recv_tensor_shape):
    """Run non-interleaved 1F1B schedule, with communication between pipeline
    stages.

    Returns dictionary with losses if the last stage, empty dict otherwise."""

    pipeline_parallel_rank = mpu.get_pipeline_model_parallel_rank()  # gees without_interleaving也有，得到当前进程/GPU/stage的rank

    micro_batch = micro_batch
    num_warmup_microbatches = \
        (mpu.get_pipeline_model_parallel_world_size() -
         mpu.get_pipeline_model_parallel_rank() - 1)
    # print(f'{mpu.get_pipeline_model_parallel_rank()} num_warmup_microbatches :{num_warmup_microbatches},micro_batch:{micro_batch} ')
    num_warmup_microbatches = min(
        num_warmup_microbatches,
        micro_batch)
    num_microbatches_remaining = \
        micro_batch - num_warmup_microbatches
    input_tensors = []
    output_tensors = []
    loss_list = []

    # exec the warm up ,only fwd
    for i in range(num_warmup_microbatches):
        # print(f'{mpu.get_pipeline_model_parallel_rank()} warm up {i} / {num_warmup_microbatches}')
        # if not the first stage ,recv the tensor from last stage
        input_tensor = p2p_communication_2.recv_forward(tensor_shape=recv_tensor_shape)
        # print(f'{mpu.get_pipeline_model_parallel_rank()} recv fwd ,{i} / {num_warmup_microbatches} ')
        # exec the fwd
        output_tensor, loss = forward_step(forward_step_func, data_iterator, model,
                                           input_tensor, device=torch.cuda.current_device())
        # print(f'{mpu.get_pipeline_model_parallel_rank()}  fwd {i} / {num_warmup_microbatches}')

        # send the output to the next stage
        p2p_communication_2.send_forward(output_tensor)
        # print(f'{mpu.get_pipeline_model_parallel_rank()}  send fwd {i} / {num_warmup_microbatches}')

        # save the input and the output,and then exec the bwd
        input_tensors.append(input_tensor)
        output_tensors.append(output_tensor)
    # if not thr first stahe ,need to recv the input from last stage,and then exec the 1f1b stage
    if num_microbatches_remaining > 0:
        # print(f'{mpu.get_pipeline_model_parallel_rank()} get before 1f1b ')

        input_tensor = p2p_communication_2.recv_forward(tensor_shape=recv_tensor_shape)
    # exec the stable 1f1b stage,the order is :
    # 1 exec fwd,send fwd
    # 2 recv bwd,exec bwd,
    # 3 send bwd,recv fwd

    for i in range(num_microbatches_remaining):
        # print(f'{mpu.get_pipeline_model_parallel_rank()}  1f1b {i} / {num_microbatches_remaining}')

        last_iteration = (i == (num_microbatches_remaining - 1))
        # output_tensor, loss = forward_step(forward_step_func, data_iterator, model, input_tensor, device=torch.cuda.current_device())
        output_tensor, loss = forward_step(forward_step_func, data_iterator, model, input_tensor, device=torch.cuda.current_device())

        if loss is not None:
            loss_list.append(loss.item())
        if forward_only:
            p2p_communication_2.send_forward(output_tensor)  # ??为什么是forward_only
        else:
            output_tensor_grad = \
                p2p_communication_2.send_forward_recv_backward(output_tensor)
        # output_tensor_grad = p2p_communication_2.recv_backward(recv_fwd_tensor_shape,tensor_chunk_shape,args.fp16)
        input_tensors.append(input_tensor)
        output_tensors.append(output_tensor)
        # print(f'output_tensor.grad:{output_tensor.grad}')

        if forward_only:
            if not last_iteration:
                input_tensor = p2p_communication_2.recv_forward(tensor_shape=recv_tensor_shape)
        else:
            input_tensor, output_tensor = input_tensors.pop(0), output_tensors.pop(0)
            input_tensor_grad, output_tensor_grad = \
                backward_step(input_tensor, output_tensor,
                              output_tensor_grad, loss)
            if last_iteration:
                input_tensor = None
                p2p_communication_2.send_backward(input_tensor_grad)
            else:
                # TODO
                input_tensor = p2p_communication_2.send_backward_recv_forward(input_tensor_grad)
                # input_tensor = p2p_communication_2.recv_forward(recv_fwd_tensor_shape,tensor_chunk_shape,args.fp16)
                # p2p_communication_2.send_backward(input_tensor_grad)
    # exec the cool done stage,only bwd.
    if not forward_only:
        for i in range(num_warmup_microbatches):
            # print(f'{mpu.get_pipeline_model_parallel_rank()} cool done {i} / {num_warmup_microbatches} ')

            # print(f'{mpu.get_pipeline_model_parallel_rank()}cool down')
            input_tensor = input_tensors.pop(0)
            output_tensor = output_tensors.pop(0)
            output_tensor_grad = p2p_communication_2.recv_backward()
            input_tensor_grad, output_tensor_grad = \
                backward_step(input_tensor, output_tensor,
                              output_tensor_grad)
            p2p_communication_2.send_backward(input_tensor_grad)
    for i, loss in enumerate(loss_list):
        print(f"loss_list[{i}] = {loss} (type: {type(loss)})")
    print("-------------------------------------------------------------------------------------")
    # loss = None
    if len(loss_list) > 0:
        loss = sum(loss_list) / len(loss_list)
    return loss


# TODO gn 修改forward_step_helper中传给forward_step(...)函数的的device--
def forward_backward_pipelining_with_interleaving(forward_step_func,
                                                  data_iterator,
                                                  model,
                                                  forward_only,
                                                  micro_batch,
                                                  gpus_list,
                                                  recv_tensor_shape):
    micro_batch = micro_batch
    num_microbatches = micro_batch  # 换个名,好记一点.

    input_tensors = [[] for _ in range(len(model))]
    output_tensors = [[] for _ in range(len(model))]

    # attention! new add vs normal 1F1B
    if not forward_only:
        output_tensor_grads = [[] for _ in range(len(model))]

    loss_list = []

    recv_fwd_tensor_shape = recv_tensor_shape
    recv_bwd_tensor_shape = recv_tensor_shape
    tensor_shape = recv_tensor_shape  # 统一使用tensor_shape

    # vs1 megatron的tensor_shape
    # tensor_shape = [seq_length, micro_batch_size, config.hidden_size]

    # vs2 gees传入的recv_tensor_shape
    # recv_tensor_shape = (micro_batch_size, tensor_length, cfg.hidden_size)  # gees nk 为什么tensor_shape不一样呢?---->GPTmode不同,output_tensor的shape不同

    pipeline_parallel_size = mpu.get_pipeline_model_parallel_world_size()  # gees 获得PP_group的size，被分配了多少个GPU,pp进程组内有多少个进程/GPU---这是without_interleaving没有的
    pipeline_parallel_rank = mpu.get_pipeline_model_parallel_rank()  # gees without_interleaving也有，得到当前进程/GPU/stage的rank

    if micro_batch % pipeline_parallel_size != 0:
        msg = f'number of microbatches ({num_microbatches}) is not divisible by '
        msg += f'pipeline-model-parallel-size ({pipeline_parallel_size}) '
        msg += 'when using interleaved schedule'
        raise RuntimeError(msg)

    # Compute number of warmup and remaining microbatches.
    num_model_chunks = len(model)

    # vpp 实际上增加了要执行的microbatches的个数
    total_num_microbatches = num_microbatches * num_model_chunks  # gees total_num_microbatches = num_microbatches * vpp_size
    all_warmup_microbatches = False

    if forward_only:
        num_warmup_microbatches = total_num_microbatches
    else:
        # Run all forward passes and then all backward passes if number of
        # microbatches is just the number of pipeline stages.
        # Otherwise, perform (num_model_chunks-1)*pipeline_parallel_size on
        # all workers, followed by more microbatches after depending on
        # stage ID (more forward passes for earlier stages, later stages can
        # immediately start with 1F1B).
        if num_microbatches == pipeline_parallel_size:  # gees 如果num_microbatches等于pipeline_parallel_size,并行方式跟GPipe中的思路一样，先全forward再走backward，在warmup阶段把所有的microbatch都训练完。
            num_warmup_microbatches = total_num_microbatches
            all_warmup_microbatches = True
        else:
            num_warmup_microbatches = (pipeline_parallel_size - pipeline_parallel_rank - 1) * 2
            num_warmup_microbatches += (num_model_chunks - 1) * pipeline_parallel_size
            num_warmup_microbatches = min(num_warmup_microbatches, total_num_microbatches)
    num_microbatches_remaining = total_num_microbatches - num_warmup_microbatches

    # print(f"当前进程rank为：{pipeline_parallel_rank},total_num_microbatches={total_num_microbatches},num_microbatches={num_microbatches},num_model_chunks={num_model_chunks}")
    # print(f"当前进程rank为：{pipeline_parallel_rank},num_warmup_microbatches={num_warmup_microbatches},num_microbatches_remaining={num_microbatches_remaining}")


    # print()

    # Add two important helper functions.
    def get_model_chunk_id(microbatch_id, forward):
        """Helper method to get the model chunk ID given the iteration number."""
        microbatch_id_in_group = microbatch_id % (pipeline_parallel_size * num_model_chunks)  # 计算微批次在一个完整流水线循环中的相对位置。见那张图
        model_chunk_id = microbatch_id_in_group // pipeline_parallel_size  # 确定应该处理这个微批次的模型块 ID。
        if not forward:  # 如果是反向传播，翻转模型块的顺序：
            model_chunk_id = num_model_chunks - model_chunk_id - 1
        return model_chunk_id

    # gees   获得真实的micro-batch 获取序号（K/forward_k../iteration_id）得到真实的microbatch_id
    def get_microbatch_id_in_model_chunk(iteration_id, forward):
        """Helper method to get the microbatch_id within model chunk given the iteration number."""
        assert forward
        iteration_group_id = iteration_id // (pipeline_parallel_size * num_model_chunks)
        microbatch_id_in_model_chunk = (iteration_group_id * pipeline_parallel_size) + (
                iteration_id % pipeline_parallel_size
        )
        return microbatch_id_in_model_chunk

    def forward_step_helper(microbatch_id, current_microbatch, checkpoint_activations_microbatch):
        """
            Helper method to run forward step with model split into chunks
                (run set_virtual_pipeline_model_parallel_rank() before calling
                forward_step()).

            current_microbatch, checkpoint_activations_microbatch are not used in Geesibling now
        """

        # get_model_chunk_id(microbatch_id, forward=True) 函数用于确定给定的微批次应该由哪个模型块处理!
        model_chunk_id = get_model_chunk_id(microbatch_id, forward=True)

        mpu.set_virtual_pipeline_model_parallel_rank(model_chunk_id)  # mpu.is_pipeline_first_stage():判断需要使用

        # forward step
        # 只可能在model_chunk_id=0中input_tensors被追加null.因为当mpu.is_pipeline_first_stage()=True时,真实的model_chunk_id一定为0,即Vpp_rank=model_chunk_id=0
        if mpu.is_pipeline_first_stage():
            if len(input_tensors[model_chunk_id]) == len(output_tensors[model_chunk_id]):
                input_tensors[model_chunk_id].append(None)

        input_tensor = input_tensors[model_chunk_id][-1]

        # attention! forward_step is different with megatron.Geesibling has loss returned,no num_tokens returned yet.
        # forward_step in gees:
        # If first stage, input tensor is obtained from data_iterator, otherwise passed-in input_tensor is used.
        # Returns output tensor and loss.
        # loss is None unless this is in the true last stage:loss = output_tensor.loss
        # gn TODO 如果在多机上，device不能使用pipeline_parallel_rank，应该改用args.local_rank..在单机上刚好一样而已,所以跑通了
        output_tensor, loss = forward_step(forward_step_func,
                                           data_iterator[model_chunk_id],
                                           model[model_chunk_id],
                                           input_tensor,
                                           torch.cuda.current_device())
        output_tensors[model_chunk_id].append(output_tensor)
        # gn 修正错误 TypeError: unsupported operand type(s) for +: 'int' and 'NoneType'
        # loss_list.append(loss)  # different vs megatron
        if loss is not None:
            loss_list.append(loss.item())

        # if forward-only, no need to save tensors for a backward pass
        if forward_only:
            input_tensors[model_chunk_id].pop()
            output_tensors[model_chunk_id].pop()

        return output_tensor, loss

    def backward_step_helper(microbatch_id, loss=None):
        """Helper method to run backward step with model split into chunks
                (run set_virtual_pipeline_model_parallel_rank() before calling
                backward_step())."""
        model_chunk_id = get_model_chunk_id(microbatch_id, forward=False)
        mpu.set_virtual_pipeline_model_parallel_rank(model_chunk_id)

        # 对于真正的last_stage要做一些额外处理,因为last_stage的output_tensor_grads是None(没有其他stage给他传output_tensor_grads)
        if mpu.is_pipeline_last_stage():
            if len(output_tensor_grads[model_chunk_id]) == 0:
                output_tensor_grads[model_chunk_id].append(None)

        input_tensor = input_tensors[model_chunk_id].pop(0)
        output_tensor = output_tensors[model_chunk_id].pop(0)
        output_tensor_grad = output_tensor_grads[model_chunk_id].pop(0)

        # print(f"当前进程序号为：{pipeline_parallel_rank}input_tensor{input_tensor},output_tensor{output_tensor},output_tensor_grad为{output_tensor_grad}")
        # print()
        # print(f"当前进程序号为：{pipeline_parallel_rank},output_tensor为{output_tensor}")
        # print()
        # print(f"当前进程序号为：{pipeline_parallel_rank},output_tensor_grad为{output_tensor_grad}")
        # print()
        # if output_tensor_grad is None:
        #     print("output_tensor_grad 不应该为空")
        #     print()

        input_tensor_grad, output_tensor_grad = backward_step(input_tensor,
                                                              output_tensor,
                                                              output_tensor_grad,
                                                              loss)
        # gn 8.23 修正ValueError: too many values to unpack (expected 2)
        return input_tensor_grad, output_tensor_grad

    # Run warmup forward passes.
    mpu.set_virtual_pipeline_model_parallel_rank(0)

    # input_tensors[0].append(p2p_communication.recv_forward(recv_fwd_tensor_shape, config))
    # gn p2p_communication_3.recv_forward
    input_tensors[0].append(p2p_communication_3.recv_forward(tensor_shape=tensor_shape))

    for k in range(num_warmup_microbatches):
        # cur_model_chunk_id = get_model_chunk_id(k, forward=True)
        checkpoint_activations_microbatch = None

        current_microbatch = get_microbatch_id_in_model_chunk(k, forward=True)
        # print(f"当前进程序号为：{pipeline_parallel_rank},model_chunk_id为{get_model_chunk_id(k, forward=True)},microbath为{current_microbatch}准备开始执行forward_step_helper---------------------------------------")

        output_tensor, loss = forward_step_helper(
            k, current_microbatch, checkpoint_activations_microbatch
        )
        # print(f"当前进程序号为：{pipeline_parallel_rank},model_chunk_id为{get_model_chunk_id(k, forward=True)},microbath为{current_microbatch}的一个forward_step_helper结束---------------------------------------")
        # Determine if tensor should be received from previous stage.
        next_forward_model_chunk_id = get_model_chunk_id(k + 1, forward=True)
        recv_prev = True
        if mpu.is_pipeline_first_stage(ignore_virtual=True):
            if next_forward_model_chunk_id == 0:
                recv_prev = False
        if k == (total_num_microbatches - 1):
            recv_prev = False

        # gn 8.23发现一处错误，修正，取消ignore_virtual=True
        if mpu.is_pipeline_last_stage():
            output_tensor = None

        # ---gees 先采取不重叠通信
        if (  # gees nk 特殊情况处理最后一个warmup的mb
                k == (num_warmup_microbatches - 1)
                and not forward_only
                and not all_warmup_microbatches
        ):
            input_tensor_grad = None
            recv_next = True
            if mpu.is_pipeline_last_stage(ignore_virtual=True):
                recv_next = False
            # print(f"当前处在warm_up阶段，已经完成第{k}个warm-up的前向过程，已经来到最后一个mb，当前的进程序号为:{pipeline_parallel_rank},准备开始接受input_tensor和output_tensor_grad：")
            input_tensor, output_tensor_grad = p2p_communication_3.send_forward_backward_recv_forward_backward(
                output_tensor,
                input_tensor_grad,
                recv_prev=recv_prev,
                recv_next=recv_next,
                # tensor_shape=tensor_shape,
                # config=config,
            )
            # gees 在GPU0，这里的input_tensor=NULL。在GPU_last这里的output_tensor_grad=NULL
            # print(f"当前处在warm_up阶段，已经完成第{k}个warm-up的前向过程，已经来到最后一个mb，接受到了input_tensor和output_tensor_grad：{type(input_tensor)},{type(output_tensor_grad)}")
            output_tensor_grads[num_model_chunks - 1].append(output_tensor_grad)
        else:
            # print(f"当前处在warm_up阶段，已经完成第{k}个warm-up的前向过程，当前的进程序号为:{pipeline_parallel_rank},准备开始接受input_tensor：")
            input_tensor = p2p_communication_3.send_forward_recv_forward(
                output_tensor,
                recv_prev=recv_prev,
                # tensor_shape=tensor_shape,
                # config=config
            )
            # if input_tensor is not None:
            #     print(f"当前处在warm_up阶段，已经完成第{k}个warm-up的前向过程，当前的进程序号为:{pipeline_parallel_rank},成功接受到了input_tensor：{input_tensor.shape}")
        input_tensors[next_forward_model_chunk_id].append(input_tensor)

        # GeeSibling now has no deallocate_output_tensor fuction while megatron has
        # deallocate_output_tensor(output_tensor, config.deallocate_pipeline_outputs)

    # warmup state is over
    # Move to another state.
    # Run 1F1B in steady state.
    for k in range(num_microbatches_remaining):
        # Forward pass.
        forward_k = k + num_warmup_microbatches  # 只是一个序号

        checkpoint_activations_microbatch = None

        cur_model_chunk_id = get_model_chunk_id(forward_k, forward=True)  # 当前forward_k应当由当前worker哪个chunk来负责。
        current_microbatch = get_microbatch_id_in_model_chunk(forward_k, forward=True)  # micro_batch的真实标号。

        # deallocate_output_tensor(output_tensor, config.deallocate_pipeline_outputs)

        output_tensor, loss = forward_step_helper(
            forward_k, current_microbatch, checkpoint_activations_microbatch
        )

        # Backward pass.
        backward_k = k
        # print(f"进程{pipeline_parallel_rank}的第{k}次反向传播开始----------------")
        input_tensor_grad, output_tensor_grad = backward_step_helper(backward_k, loss)
        # print(f"进程{pipeline_parallel_rank}的第{k}次反向传播结束-----------------------------------")

        # Send output_tensor and input_tensor_grad, receive input_tensor and output_tensor_grad.

        # Determine if current stage has anything to send in either direction,
        # otherwise set tensor to None.

        # f
        forward_model_chunk_id = get_model_chunk_id(forward_k, forward=True)  # 当前forward_k应当由当前worker哪个chunk来负责
        mpu.set_virtual_pipeline_model_parallel_rank(forward_model_chunk_id)
        # Last virtual stage no activation tensor to send
        if mpu.is_pipeline_last_stage():
            output_tensor = None

        # b
        backward_model_chunk_id = get_model_chunk_id(backward_k, forward=False)
        mpu.set_virtual_pipeline_model_parallel_rank(backward_model_chunk_id)
        # First virtual stage no activation gradient tensor to send
        if mpu.is_pipeline_first_stage():  # 考虑了virtual_stage,反向传播最后一个阶段处于first_stage
            input_tensor_grad = None

        # Determine if peers are sending, and where in data structure to put
        # received tensors.
        recv_prev = True
        if mpu.is_pipeline_first_stage(ignore_virtual=True):
            # gees here ignore_virtual must be true！For deciding next micro-bacth need recv_input or not.
            #  First stage is ahead of last stage by (pipeline_parallel_size - 1).
            next_forward_model_chunk_id = get_model_chunk_id(forward_k - (pipeline_parallel_size - 1), forward=True)
            # gees top is false next_forward_model_chunk_id.Actually false next_forward_model_chunk_id is device_last's current model_chunk_id,
            # which is equal to device_0's (real_next_forward_model_chunk_id - 1).
            # use false next forward_model_chunk_id to decide whether to recv_prev.    add by gees
            if next_forward_model_chunk_id == (num_model_chunks - 1):
                recv_prev = False
            next_forward_model_chunk_id += 1  # real next_forward_model_chunk_id
        else:
            next_forward_model_chunk_id = get_model_chunk_id(forward_k + 1, forward=True)

        # If last iteration, don't receive; we already received one extra
        # before the start of the for loop.
        if k == (num_microbatches_remaining - 1):  # 下面没有更多的前向传播了
            recv_prev = False

        # ------------------------
        # error: next is overlap
        # # Send activation tensor to the next stage and receive activation tensor from the
        # # previous stage
        # # gees from here we can see that activation tensor actually is out_tensor
        # # gees 相比于without interleaving，这里多了一个recv_forward（接受recv_prev）为当前worker下一个mb的输入input_tensor做准备
        #
        # input_tensor, fwd_wait_handles = send_forward_recv_forward(
        #     output_tensor,
        #     recv_prev=recv_prev,
        #     tensor_shape=tensor_shape,
        #     config=config,
        #     overlap_p2p_comm=True,
        # )
        #
        # # assert fwd_wait_handles is not None   gn:不清楚这里是做什么的
        # if bwd_wait_handles is not None:
        #     for req in bwd_wait_handles:
        #         req.wait()
        # ---------------------------------

        # Determine if the current virtual stage has an activation gradient tensor to receive
        recv_next = True
        if mpu.is_pipeline_last_stage(ignore_virtual=True):
            # Last stage is ahead of first stage by (pipeline_parallel_size - 1).
            # gees Same reason in 1f1b forward
            next_backward_model_chunk_id = get_model_chunk_id(
                backward_k - (pipeline_parallel_size - 1), forward=False
            )
            if next_backward_model_chunk_id == 0:
                recv_next = False
            next_backward_model_chunk_id -= 1
        else:
            next_backward_model_chunk_id = get_model_chunk_id(backward_k + 1, forward=False)

        input_tensor, output_tensor_grad = p2p_communication_3.send_forward_backward_recv_forward_backward(
            output_tensor,
            input_tensor_grad,
            recv_prev=recv_prev,
            recv_next=recv_next,
        )
        # deallocate_output_tensor(output_tensor, config.deallocate_pipeline_outputs)

        # either overlap or not---------------------------------
        # Put input_tensor and output_tensor_grad in data structures in the
        # right location.
        if recv_prev:
            input_tensors[next_forward_model_chunk_id].append(input_tensor)  # gees 正式为下一个foward做准备
        if recv_next:
            output_tensor_grads[next_backward_model_chunk_id].append(output_tensor_grad)  # gees 正式为下一个backward做准备

    # deallocate_output_tensor(output_tensor, config.deallocate_pipeline_outputs)

    # 1F1B/steady state is over
    # Move to next state
    # Run cooldown backward passes (flush out pipeline).
    if not forward_only:

        # gn not sure what this is doing
        # if config.overlap_p2p_comm and bwd_wait_handles is not None:
        #     for wait_handle in bwd_wait_handles:
        #         wait_handle.wait()

        if all_warmup_microbatches:
            # output_tensor_grads[num_model_chunks - 1].append(
            #     p2p_communication_3.recv_backward(tensor_shape)  # TODO
            # )
            output_tensor_grad = p2p_communication_3.recv_backward(tensor_shape)
            output_tensor_grads[num_model_chunks - 1].append(output_tensor_grad)

        flag = True
        for k in range(num_microbatches_remaining, total_num_microbatches):
            if flag:
                print(f"total_num_microbatches is {total_num_microbatches}")
                print(f"num_warmup_microbatches is{num_warmup_microbatches}")
                print(f"num_microbatches_remaining is{num_microbatches_remaining}")
                print(f"当前处于{mpu.get_pipeline_model_parallel_rank()}-gpu的第{k + num_microbatches_remaining + 1}个虚拟微批次的反向传播开始")
                print(k + num_microbatches_remaining + 1)
                print(f"当前处于{mpu.get_pipeline_model_parallel_rank()}-gpu的第{k}个cooldown微批次的反向传播开始")
                print(k)
                flag = False
            # gn 8.23 修正RuntimeError: Invalid function argument. Expected parameter `tensor` to be of type torch.Tensor.
            # gn input_tensor_grad = backward_step_helper(k)是错误的。因为backward_step_helper返回的是一个元组Tuple[Tensor,Tensor]
            input_tensor_grad, output_tensor_grad = backward_step_helper(k)  # 这里不传loss,
            next_backward_model_chunk_id = get_model_chunk_id(k + 1, forward=False)
            recv_next = True
            if mpu.is_pipeline_last_stage(ignore_virtual=True):
                if next_backward_model_chunk_id == (num_model_chunks - 1):
                    recv_next = False
            if k == (total_num_microbatches - 1):
                recv_next = False
            # output_tensor_grads[next_backward_model_chunk_id].append(
            #     p2p_communication_3.send_backward_recv_backward(
            #         input_tensor_grad, recv_next=recv_next
            #     )
            # )
            output_tensor_grad = p2p_communication_3.send_backward_recv_backward(
                input_tensor_grad, recv_next=recv_next
            )
            output_tensor_grads[next_backward_model_chunk_id].append(output_tensor_grad)

    loss_finial = None
    for i, loss in enumerate(loss_list):
        print(f"loss_list[{i}] = {loss} (type: {type(loss)})")
    print("-------------------------------------------------------------------------------------")
    if len(loss_list) > 0:
        loss_finial = sum(loss_list) / len(loss_list)
    return loss_finial




def forward_backward_pipelining_with_interleaving_overlap_comm(forward_step_func,
                                                  data_iterator,
                                                  model,
                                                  forward_only,
                                                  micro_batch,
                                                  gpus_list,
                                                  recv_tensor_shape):
    micro_batch = micro_batch
    num_microbatches = micro_batch  # 换个名,好记一点.

    input_tensors = [[] for _ in range(len(model))]
    output_tensors = [[] for _ in range(len(model))]

    # attention! new add vs normal 1F1B
    if not forward_only:
        output_tensor_grads = [[] for _ in range(len(model))]

    loss_list = []

    recv_fwd_tensor_shape = recv_tensor_shape
    recv_bwd_tensor_shape = recv_tensor_shape
    tensor_shape = recv_tensor_shape  # 统一使用tensor_shape

    # vs1 megatron的tensor_shape
    # tensor_shape = [seq_length, micro_batch_size, config.hidden_size]

    # vs2 gees传入的recv_tensor_shape
    # recv_tensor_shape = (micro_batch_size, tensor_length, cfg.hidden_size)  # gees nk 为什么tensor_shape不一样呢?---->GPTmode不同,output_tensor的shape不同

    pipeline_parallel_size = mpu.get_pipeline_model_parallel_world_size()  # gees 获得PP_group的size，被分配了多少个GPU,pp进程组内有多少个进程/GPU---这是without_interleaving没有的
    pipeline_parallel_rank = mpu.get_pipeline_model_parallel_rank()  # gees without_interleaving也有，得到当前进程/GPU/stage的rank

    if micro_batch % pipeline_parallel_size != 0:
        msg = f'number of microbatches ({num_microbatches}) is not divisible by '
        msg += f'pipeline-model-parallel-size ({pipeline_parallel_size}) '
        msg += 'when using interleaved schedule'
        raise RuntimeError(msg)

    # Compute number of warmup and remaining microbatches.
    num_model_chunks = len(model)

    # vpp 实际上增加了要执行的microbatches的个数
    total_num_microbatches = num_microbatches * num_model_chunks  # gees total_num_microbatches = num_microbatches * vpp_size
    all_warmup_microbatches = False

    if forward_only:
        num_warmup_microbatches = total_num_microbatches
    else:
        # Run all forward passes and then all backward passes if number of
        # microbatches is just the number of pipeline stages.
        # Otherwise, perform (num_model_chunks-1)*pipeline_parallel_size on
        # all workers, followed by more microbatches after depending on
        # stage ID (more forward passes for earlier stages, later stages can
        # immediately start with 1F1B).
        if num_microbatches == pipeline_parallel_size:  # gees 如果num_microbatches等于pipeline_parallel_size,并行方式跟GPipe中的思路一样，先全forward再走backward，在warmup阶段把所有的microbatch都训练完。
            num_warmup_microbatches = total_num_microbatches
            all_warmup_microbatches = True
        else:
            num_warmup_microbatches = (pipeline_parallel_size - pipeline_parallel_rank - 1) * 2
            num_warmup_microbatches += (num_model_chunks - 1) * pipeline_parallel_size
            num_warmup_microbatches = min(num_warmup_microbatches, total_num_microbatches)
    num_microbatches_remaining = total_num_microbatches - num_warmup_microbatches

    # print(f"当前进程rank为：{pipeline_parallel_rank},total_num_microbatches={total_num_microbatches},num_microbatches={num_microbatches},num_model_chunks={num_model_chunks}")
    # print(f"当前进程rank为：{pipeline_parallel_rank},num_warmup_microbatches={num_warmup_microbatches},num_microbatches_remaining={num_microbatches_remaining}")


    # print()

    # Add two important helper functions.
    def get_model_chunk_id(microbatch_id, forward):
        """Helper method to get the model chunk ID given the iteration number."""
        microbatch_id_in_group = microbatch_id % (pipeline_parallel_size * num_model_chunks)  # 计算微批次在一个完整流水线循环中的相对位置。见那张图
        model_chunk_id = microbatch_id_in_group // pipeline_parallel_size  # 确定应该处理这个微批次的模型块 ID。
        if not forward:  # 如果是反向传播，翻转模型块的顺序：
            model_chunk_id = num_model_chunks - model_chunk_id - 1
        return model_chunk_id

    # gees   获得真实的micro-batch 获取序号（K/forward_k../iteration_id）得到真实的microbatch_id
    def get_microbatch_id_in_model_chunk(iteration_id, forward):
        """Helper method to get the microbatch_id within model chunk given the iteration number."""
        assert forward
        iteration_group_id = iteration_id // (pipeline_parallel_size * num_model_chunks)
        microbatch_id_in_model_chunk = (iteration_group_id * pipeline_parallel_size) + (
                iteration_id % pipeline_parallel_size
        )
        return microbatch_id_in_model_chunk

    def forward_step_helper(microbatch_id, current_microbatch, checkpoint_activations_microbatch):
        """
            Helper method to run forward step with model split into chunks
                (run set_virtual_pipeline_model_parallel_rank() before calling
                forward_step()).

            current_microbatch, checkpoint_activations_microbatch are not used in Geesibling now
        """

        # get_model_chunk_id(microbatch_id, forward=True) 函数用于确定给定的微批次应该由哪个模型块处理!
        model_chunk_id = get_model_chunk_id(microbatch_id, forward=True)

        mpu.set_virtual_pipeline_model_parallel_rank(model_chunk_id)  # mpu.is_pipeline_first_stage():判断需要使用

        # forward step
        # 只可能在model_chunk_id=0中input_tensors被追加null.因为当mpu.is_pipeline_first_stage()=True时,真实的model_chunk_id一定为0,即Vpp_rank=model_chunk_id=0
        if mpu.is_pipeline_first_stage():
            if len(input_tensors[model_chunk_id]) == len(output_tensors[model_chunk_id]):
                input_tensors[model_chunk_id].append(None)

        input_tensor = input_tensors[model_chunk_id][-1]

        # attention! forward_step is different with megatron.Geesibling has loss returned,no num_tokens returned yet.
        # forward_step in gees:
        # If first stage, input tensor is obtained from data_iterator, otherwise passed-in input_tensor is used.
        # Returns output tensor and loss.
        # loss is None unless this is in the true last stage:loss = output_tensor.loss
        # gn TODO 如果在多机上，device不能使用pipeline_parallel_rank，应该改用args.local_rank..在单机上刚好一样而已,所以跑通了
        output_tensor, loss = forward_step(forward_step_func,
                                           data_iterator[model_chunk_id],
                                           model[model_chunk_id],
                                           input_tensor,
                                           torch.cuda.current_device())
        output_tensors[model_chunk_id].append(output_tensor)

        # gn 修正错误 TypeError: unsupported operand type(s) for +: 'int' and 'NoneType'
        # loss_list.append(loss)  # different vs megatron
        if loss is not None:
            loss_list.append(loss.item())

        # if forward-only, no need to save tensors for a backward pass
        if forward_only:
            input_tensors[model_chunk_id].pop()
            output_tensors[model_chunk_id].pop()

        return output_tensor, loss

    def backward_step_helper(microbatch_id, loss=None):
        """Helper method to run backward step with model split into chunks
                (run set_virtual_pipeline_model_parallel_rank() before calling
                backward_step())."""
        model_chunk_id = get_model_chunk_id(microbatch_id, forward=False)
        mpu.set_virtual_pipeline_model_parallel_rank(model_chunk_id)

        # 对于真正的last_stage要做一些额外处理,因为last_stage的output_tensor_grads是None(没有其他stage给他传output_tensor_grads)
        if mpu.is_pipeline_last_stage():
            if len(output_tensor_grads[model_chunk_id]) == 0:
                output_tensor_grads[model_chunk_id].append(None)

        input_tensor = input_tensors[model_chunk_id].pop(0)
        output_tensor = output_tensors[model_chunk_id].pop(0)
        output_tensor_grad = output_tensor_grads[model_chunk_id].pop(0)

        input_tensor_grad, output_tensor_grad = backward_step(input_tensor,
                                                              output_tensor,
                                                              output_tensor_grad,
                                                              loss)
        # gn 8.23 修正ValueError: too many values to unpack (expected 2)
        return input_tensor_grad, output_tensor_grad

    # Run warmup forward passes.
    mpu.set_virtual_pipeline_model_parallel_rank(0)

    # input_tensors[0].append(p2p_communication.recv_forward(recv_fwd_tensor_shape, config))
    # gn p2p_communication_4.recv_forward
    input_tensors[0].append(p2p_communication_4.recv_forward(tensor_shape=tensor_shape))

    # overlap
    fwd_wait_handles = None
    bwd_wait_handles = None

    for k in range(num_warmup_microbatches):

        # overlap
        if fwd_wait_handles is not None:
            for req in fwd_wait_handles:
                req.wait()

        # cur_model_chunk_id = get_model_chunk_id(k, forward=True)
        checkpoint_activations_microbatch = None

        current_microbatch = get_microbatch_id_in_model_chunk(k, forward=True)
        # print(f"当前进程序号为：{pipeline_parallel_rank},model_chunk_id为{get_model_chunk_id(k, forward=True)},microbath为{current_microbatch}准备开始执行forward_step_helper---------------------------------------")

        output_tensor, loss = forward_step_helper(
            k, current_microbatch, checkpoint_activations_microbatch
        )
        # print(f"当前进程序号为：{pipeline_parallel_rank},model_chunk_id为{get_model_chunk_id(k, forward=True)},microbath为{current_microbatch}的一个forward_step_helper结束---------------------------------------")
        # Determine if tensor should be received from previous stage.
        next_forward_model_chunk_id = get_model_chunk_id(k + 1, forward=True)
        recv_prev = True
        if mpu.is_pipeline_first_stage(ignore_virtual=True):
            if next_forward_model_chunk_id == 0:
                recv_prev = False
        if k == (total_num_microbatches - 1):
            recv_prev = False

        # gn 8.23发现一处错误，修正，取消ignore_virtual=True
        if mpu.is_pipeline_last_stage():
            output_tensor = None
        # # 注释掉了不重叠的通信
        # # ---gees 先采取不重叠通信
        # if (  # gees nk 特殊情况处理最后一个warmup的mb
        #         k == (num_warmup_microbatches - 1)
        #         and not forward_only
        #         and not all_warmup_microbatches
        # ):
        #     input_tensor_grad = None
        #     recv_next = True
        #     if mpu.is_pipeline_last_stage(ignore_virtual=True):
        #         recv_next = False
        #     # print(f"当前处在warm_up阶段，已经完成第{k}个warm-up的前向过程，已经来到最后一个mb，当前的进程序号为:{pipeline_parallel_rank},准备开始接受input_tensor和output_tensor_grad：")
        #     input_tensor, output_tensor_grad = p2p_communication_4.send_forward_backward_recv_forward_backward(
        #         output_tensor,
        #         input_tensor_grad,
        #         recv_prev=recv_prev,
        #         recv_next=recv_next,
        #         # tensor_shape=tensor_shape,
        #         # config=config,
        #     )
        #     # gees 在GPU0，这里的input_tensor=NULL。在GPU_last这里的output_tensor_grad=NULL
        #     # print(f"当前处在warm_up阶段，已经完成第{k}个warm-up的前向过程，已经来到最后一个mb，接受到了input_tensor和output_tensor_grad：{type(input_tensor)},{type(output_tensor_grad)}")
        #     output_tensor_grads[num_model_chunks - 1].append(output_tensor_grad)
        # else:
        #     # print(f"当前处在warm_up阶段，已经完成第{k}个warm-up的前向过程，当前的进程序号为:{pipeline_parallel_rank},准备开始接受input_tensor：")
        #     input_tensor = p2p_communication_4.send_forward_recv_forward(
        #         output_tensor,
        #         recv_prev=recv_prev,
        #         # tensor_shape=tensor_shape,
        #         # config=config
        #     )
        #     # if input_tensor is not None:
        #     #     print(f"当前处在warm_up阶段，已经完成第{k}个warm-up的前向过程，当前的进程序号为:{pipeline_parallel_rank},成功接受到了input_tensor：{input_tensor.shape}")

        # gees 采取重叠通信
        input_tensor, fwd_wait_handles = p2p_communication_4.send_forward_recv_forward(
            output_tensor,
            recv_prev=recv_prev,
            # tensor_shape=tensor_shape,
            # config=config,
            overlap_p2p_comm=True,
        )

        if (
                k == (num_warmup_microbatches - 1)
                and not forward_only
                and not all_warmup_microbatches
        ):
            input_tensor_grad = None
            recv_next = True
            if mpu.is_pipeline_last_stage(ignore_virtual=True):
                recv_next = False

            (
                output_tensor_grad,
                bwd_wait_handles,
            ) = p2p_communication_4.send_backward_recv_backward(
                input_tensor_grad,
                recv_next=recv_next,
                # tensor_shape=tensor_shape,
                # config=config,
                overlap_p2p_comm=True,
            )

            output_tensor_grads[num_model_chunks - 1].append(output_tensor_grad)

        input_tensors[next_forward_model_chunk_id].append(input_tensor)

        # GeeSibling now has no deallocate_output_tensor fuction while megatron has
        # deallocate_output_tensor(output_tensor, config.deallocate_pipeline_outputs)

    # warmup state is over
    # Move to another state.
    # Run 1F1B in steady state.
    for k in range(num_microbatches_remaining):
        # Forward pass.
        forward_k = k + num_warmup_microbatches  # 只是一个序号

        checkpoint_activations_microbatch = None

        cur_model_chunk_id = get_model_chunk_id(forward_k, forward=True)  # 当前forward_k应当由当前worker哪个chunk来负责。
        current_microbatch = get_microbatch_id_in_model_chunk(forward_k, forward=True)  # micro_batch的真实标号。

        # gees overlap 通信.modified
        if fwd_wait_handles is not None:
            for req in fwd_wait_handles:
                req.wait()

        # deallocate_output_tensor(output_tensor, config.deallocate_pipeline_outputs)

        output_tensor, loss = forward_step_helper(
            forward_k, current_microbatch, checkpoint_activations_microbatch
        )

        # # Backward pass.
        # backward_k = k
        # # print(f"进程{pipeline_parallel_rank}的第{k}次反向传播开始----------------")
        # input_tensor_grad, output_tensor_grad = backward_step_helper(backward_k, loss)
        # # print(f"进程{pipeline_parallel_rank}的第{k}次反向传播结束-----------------------------------")

        # Send output_tensor and input_tensor_grad, receive input_tensor and output_tensor_grad.

        # Determine if current stage has anything to send in either direction,
        # otherwise set tensor to None.

        # f
        forward_model_chunk_id = get_model_chunk_id(forward_k, forward=True)  # 当前forward_k应当由当前worker哪个chunk来负责
        mpu.set_virtual_pipeline_model_parallel_rank(forward_model_chunk_id)
        # Last virtual stage no activation tensor to send
        if mpu.is_pipeline_last_stage():
            output_tensor = None

        # # b
        # backward_model_chunk_id = get_model_chunk_id(backward_k, forward=False)
        # mpu.set_virtual_pipeline_model_parallel_rank(backward_model_chunk_id)
        # # First virtual stage no activation gradient tensor to send
        # if mpu.is_pipeline_first_stage():  # 考虑了virtual_stage,反向传播最后一个阶段处于first_stage
        #     input_tensor_grad = None

        # Determine if peers are sending, and where in data structure to put
        # received tensors.
        recv_prev = True
        if mpu.is_pipeline_first_stage(ignore_virtual=True):
            # gees here ignore_virtual must be true！For deciding next micro-bacth need recv_input or not.
            #  First stage is ahead of last stage by (pipeline_parallel_size - 1).
            next_forward_model_chunk_id = get_model_chunk_id(forward_k - (pipeline_parallel_size - 1), forward=True)
            # gees top is false next_forward_model_chunk_id.Actually false next_forward_model_chunk_id is device_last's current model_chunk_id,
            # which is equal to device_0's (real_next_forward_model_chunk_id - 1).
            # use false next forward_model_chunk_id to decide whether to recv_prev.    add by gees
            if next_forward_model_chunk_id == (num_model_chunks - 1):
                recv_prev = False
            next_forward_model_chunk_id += 1  # real next_forward_model_chunk_id
        else:
            next_forward_model_chunk_id = get_model_chunk_id(forward_k + 1, forward=True)

        # If last iteration, don't receive; we already received one extra
        # before the start of the for loop.
        if k == (num_microbatches_remaining - 1):  # 下面没有更多的前向传播了
            recv_prev = False

        # ------------------------
        # error: next is overlap
        # # Send activation tensor to the next stage and receive activation tensor from the
        # # previous stage
        # # gees from here we can see that activation tensor actually is out_tensor
        # # gees 相比于without interleaving，这里多了一个recv_forward（接受recv_prev）为当前worker下一个mb的输入input_tensor做准备
        #
        input_tensor, fwd_wait_handles = p2p_communication_4.send_forward_recv_forward(
            output_tensor,
            recv_prev=recv_prev,
            # tensor_shape=tensor_shape,
            # config=config,
            overlap_p2p_comm=True,
        )

        # assert fwd_wait_handles is not None   gn:不清楚这里是做什么的
        if bwd_wait_handles is not None:
            for req in bwd_wait_handles:
                req.wait()
        # ---------------------------------


        # gees 9.13 attention! 重叠通信和非重叠通信反向传播和前向传播的代码流程有一些区别，主要是顺序的区别。
        # Backward pass.
        backward_k = k
        # print(f"进程{pipeline_parallel_rank}的第{k}次反向传播开始----------------")
        input_tensor_grad, output_tensor_grad = backward_step_helper(backward_k, loss)
        # print(f"进程{pipeline_parallel_rank}的第{k}次反向传播结束-----------------------------------")

        # b
        backward_model_chunk_id = get_model_chunk_id(backward_k, forward=False)
        mpu.set_virtual_pipeline_model_parallel_rank(backward_model_chunk_id)
        # First virtual stage no activation gradient tensor to send
        if mpu.is_pipeline_first_stage():  # 考虑了virtual_stage,反向传播最后一个阶段处于first_stage
            input_tensor_grad = None

        # Determine if the current virtual stage has an activation gradient tensor to receive
        recv_next = True
        if mpu.is_pipeline_last_stage(ignore_virtual=True):
            # Last stage is ahead of first stage by (pipeline_parallel_size - 1).
            # gees Same reason in 1f1b forward
            next_backward_model_chunk_id = get_model_chunk_id(
                backward_k - (pipeline_parallel_size - 1), forward=False
            )
            if next_backward_model_chunk_id == 0:
                recv_next = False
            next_backward_model_chunk_id -= 1
        else:
            next_backward_model_chunk_id = get_model_chunk_id(backward_k + 1, forward=False)

        output_tensor_grad, bwd_wait_handles = p2p_communication_4.send_backward_recv_backward(
            input_tensor_grad,
            recv_next=recv_next,
            # tensor_shape=tensor_shape,
            # config=config,
            overlap_p2p_comm=True,
        )

        # # 这是非重叠通信
        # input_tensor, output_tensor_grad = p2p_communication_4.send_forward_backward_recv_forward_backward(
        #     output_tensor,
        #     input_tensor_grad,
        #     recv_prev=recv_prev,
        #     recv_next=recv_next,
        # )
        # # deallocate_output_tensor(output_tensor, config.deallocate_pipeline_outputs)

        # either overlap or not---------------------------------
        # Put input_tensor and output_tensor_grad in data structures in the
        # right location.
        if recv_prev:
            input_tensors[next_forward_model_chunk_id].append(input_tensor)  # gees 正式为下一个foward做准备
        if recv_next:
            output_tensor_grads[next_backward_model_chunk_id].append(output_tensor_grad)  # gees 正式为下一个backward做准备

    # deallocate_output_tensor(output_tensor, config.deallocate_pipeline_outputs)

    # 1F1B/steady state is over
    # Move to next state
    # Run cooldown backward passes (flush out pipeline).
    if not forward_only:

        # overlap
        for wait_handle in bwd_wait_handles:
            wait_handle.wait()

        # gn not sure what this is doing
        # if config.overlap_p2p_comm and bwd_wait_handles is not None:
        #     for wait_handle in bwd_wait_handles:
        #         wait_handle.wait()

        if all_warmup_microbatches:
            # output_tensor_grads[num_model_chunks - 1].append(
            #     p2p_communication_4.recv_backward(tensor_shape)  # TODO
            # )
            output_tensor_grad = p2p_communication_4.recv_backward(tensor_shape)
            output_tensor_grads[num_model_chunks - 1].append(output_tensor_grad)

        for k in range(num_microbatches_remaining, total_num_microbatches):
            # gn 8.23 修正RuntimeError: Invalid function argument. Expected parameter `tensor` to be of type torch.Tensor.
            # gn input_tensor_grad = backward_step_helper(k)是错误的。因为backward_step_helper返回的是一个元组Tuple[Tensor,Tensor]
            input_tensor_grad, output_tensor_grad = backward_step_helper(k)  # 这里不传loss,
            next_backward_model_chunk_id = get_model_chunk_id(k + 1, forward=False)
            recv_next = True
            if mpu.is_pipeline_last_stage(ignore_virtual=True):
                if next_backward_model_chunk_id == (num_model_chunks - 1):
                    recv_next = False
            if k == (total_num_microbatches - 1):
                recv_next = False
            # output_tensor_grads[next_backward_model_chunk_id].append(
            #     p2p_communication_4.send_backward_recv_backward(
            #         input_tensor_grad, recv_next=recv_next
            #     )
            # )
            output_tensor_grad = p2p_communication_4.send_backward_recv_backward(
                input_tensor_grad, recv_next=recv_next
            )
            output_tensor_grads[next_backward_model_chunk_id].append(output_tensor_grad)

    loss_finial = None
    for i, loss in enumerate(loss_list):
        print(f"loss_list[{i}] = {loss} (type: {type(loss)})")
    print("-------------------------------------------------------------------------------------")
    if len(loss_list) > 0:
        loss_finial = sum(loss_list) / len(loss_list)
    return loss_finial




def forward_backward_pipelining_without_interleaving2_remove_sgp_fix_dp(forward_step_func, data_iterator,
                                                                 model,
                                                                 forward_only, micro_batch, gpus_list, recv_tensor_shape):
    """Run non-interleaved 1F1B schedule, with communication between pipeline
    stages.

    Returns dictionary with losses if the last stage, empty dict otherwise."""

    # gees add to fix dp sync
    if isinstance(model,torch.nn.parallel.distributed.DistributedDataParallel):
        no_sync_func = model.no_sync
    else:
        no_sync_func = contextlib.nullcontext
    no_sync_context = None

    def disable_grad_sync():
        """Disable asynchronous grad reductions"""
        nonlocal no_sync_context
        if no_sync_context is None:
            no_sync_context = no_sync_func()
            no_sync_context.__enter__()

    def enable_grad_sync():
        """Enable asynchronous grad reductions"""
        nonlocal no_sync_context
        if no_sync_context is not None:
            no_sync_context.__exit__(None, None, None)
            no_sync_context = None

    # disable_grad_sync()
    # enable_grad_sync()

    pipeline_parallel_rank = mpu.get_pipeline_model_parallel_rank()  # gees without_interleaving也有，得到当前进程/GPU/stage的rank

    micro_batch = micro_batch
    num_warmup_microbatches = \
        (mpu.get_pipeline_model_parallel_world_size() -
         mpu.get_pipeline_model_parallel_rank() - 1)
    # print(f'{mpu.get_pipeline_model_parallel_rank()} num_warmup_microbatches :{num_warmup_microbatches},micro_batch:{micro_batch} ')
    num_warmup_microbatches = min(
        num_warmup_microbatches,
        micro_batch)
    num_microbatches_remaining = \
        micro_batch - num_warmup_microbatches

    # print(f'rank:{mpu.get_pipeline_model_parallel_rank()}  warmup:{num_warmup_microbatches}')
    # print(f'rank:{mpu.get_pipeline_model_parallel_rank()}  remaining:{num_microbatches_remaining}')
    input_tensors = []
    output_tensors = []
    loss_list = []

    with no_sync_func():
        # exec the warm up ,only fwd
        for i in range(num_warmup_microbatches):
            # print(f'{mpu.get_pipeline_model_parallel_rank()} warm up {i} / {num_warmup_microbatches}')
            # if not the first stage ,recv the tensor from last stage
            input_tensor = p2p_communication_2.recv_forward(tensor_shape=recv_tensor_shape)
            # print(f'{mpu.get_pipeline_model_parallel_rank()} recv fwd ,{i} / {num_warmup_microbatches} ')
            # exec the fwd
            output_tensor, loss = forward_step(forward_step_func, data_iterator, model,
                                               input_tensor, device=torch.cuda.current_device())
            # print(f'{mpu.get_pipeline_model_parallel_rank()}  fwd {i} / {num_warmup_microbatches}')

            # send the output to the next stage
            p2p_communication_2.send_forward(output_tensor)
            # print(f'{mpu.get_pipeline_model_parallel_rank()}  send fwd {i} / {num_warmup_microbatches}')

            # save the input and the output,and then exec the bwd
            input_tensors.append(input_tensor)
            output_tensors.append(output_tensor)
        # if not thr first stahe ,need to recv the input from last stage,and then exec the 1f1b stage
        if num_microbatches_remaining > 0:
            # print(f'{mpu.get_pipeline_model_parallel_rank()} get before 1f1b ')

            input_tensor = p2p_communication_2.recv_forward(tensor_shape=recv_tensor_shape)
        # exec the stable 1f1b stage,the order is :
        # 1 exec fwd,send fwd
        # 2 recv bwd,exec bwd,
        # 3 send bwd,recv fwd
        # disable_grad_sync()

        for i in range(num_microbatches_remaining):
            # print(f'{mpu.get_pipeline_model_parallel_rank()}  1f1b {i} / {num_microbatches_remaining}')

            last_iteration = (i == (num_microbatches_remaining - 1))

            # output_tensor, loss = forward_step(forward_step_func, data_iterator, model, input_tensor, device=torch.cuda.current_device())
            output_tensor, loss = forward_step(forward_step_func, data_iterator, model, input_tensor, device=torch.cuda.current_device())

            if loss is not None:
                loss_list.append(loss.item())
            if forward_only:
                p2p_communication_2.send_forward(output_tensor)  # ??为什么是forward_only
            else:
                output_tensor_grad = \
                    p2p_communication_2.send_forward_recv_backward(output_tensor)
            # output_tensor_grad = p2p_communication_2.recv_backward(recv_fwd_tensor_shape,tensor_chunk_shape,args.fp16)
            input_tensors.append(input_tensor)
            output_tensors.append(output_tensor)
            # print(f'output_tensor.grad:{output_tensor.grad}')

            if forward_only:
                if not last_iteration:
                    input_tensor = p2p_communication_2.recv_forward(tensor_shape=recv_tensor_shape)
            else:
                input_tensor, output_tensor = input_tensors.pop(0), output_tensors.pop(0)
                input_tensor_grad, output_tensor_grad = \
                    backward_step(input_tensor, output_tensor,
                                  output_tensor_grad, loss)
                if last_iteration:
                    input_tensor = None
                    p2p_communication_2.send_backward(input_tensor_grad)
                else:
                    # TODO
                    input_tensor = p2p_communication_2.send_backward_recv_forward(input_tensor_grad)
                    # input_tensor = p2p_communication_2.recv_forward(recv_fwd_tensor_shape,tensor_chunk_shape,args.fp16)
                    # p2p_communication_2.send_backward(input_tensor_grad)
        # exec the cool done stage,only bwd.

        # enable_grad_sync()
        if not forward_only:
            for i in range(num_warmup_microbatches):
                # print(f'{mpu.get_pipeline_model_parallel_rank()} cool done {i} / {num_warmup_microbatches} ')

                # print(f'{mpu.get_pipeline_model_parallel_rank()}cool down')

                # 2025 add to fix dp
                if i == num_warmup_microbatches - 1:
                    enable_grad_sync()
                #     print("正确开启梯度同步--------------------------------------------")
                #     enable_grad_sync()

                input_tensor = input_tensors.pop(0)
                output_tensor = output_tensors.pop(0)
                output_tensor_grad = p2p_communication_2.recv_backward()
                # 调试：检查接收的梯度
                assert output_tensor_grad is not None, f"Rank {mpu.get_pipeline_model_parallel_rank()}: 冷却阶段收到空梯度！"
                # print(f'当前rank为{mpu.get_pipeline_model_parallel_rank()},得到的input_tensor is {input_tensor}')
                # print(f'当前rank为{mpu.get_pipeline_model_parallel_rank()},得到的output_tensor is {output_tensor}')
                # print(f'当前rank为{mpu.get_pipeline_model_parallel_rank()},得到的output_tensor is {output_tensor}')
                input_tensor_grad, output_tensor_grad = \
                    backward_step(input_tensor, output_tensor,
                                  output_tensor_grad,loss=None)
                p2p_communication_2.send_backward(input_tensor_grad)


    # # 示例：打印第一个参数的梯度均值
    # if mpu.get_data_parallel_rank() == 0:
    #     grad = model.parameters().__next__().grad
    #     print(f"0 梯度均值: {grad.mean()}")
    #
    # 
    # # 示例：打印第一个参数的梯度均值
    # if mpu.get_data_parallel_rank() == 1:
    #     grad = model.parameters().__next__().grad
    #     print(f"1 梯度均值: {grad.mean()}")

    # 手动DP
    if isinstance(model, torch.nn.parallel.distributed.DistributedDataParallel):
        # 获取数据并行组
        dp_group = mpu.get_data_parallel_group()
        # 遍历所有参数，手动同步梯度
        for param in model.parameters():
            if param.grad is not None:
                # 执行 All-Reduce 并取平均
                torch.distributed.all_reduce(
                    param.grad,
                    op=torch.distributed.ReduceOp.AVG,
                    group=dp_group
                )

    #


    # 示例：打印第一个参数的梯度均值
    # if mpu.get_data_parallel_rank() == 0:
    #     grad = model.parameters().__next__().grad
    #     print(f"All-Reduce 0后梯度均值: {grad.mean()}")
    #
    #
    # # 示例：打印第一个参数的梯度均值
    # if mpu.get_data_parallel_rank() == 1:
    #     grad = model.parameters().__next__().grad
    #     print(f"All-Reduce 1后梯度均值: {grad.mean()}")

    # # gees Launch any remaining grad reductions
    # if no_sync_context is not None:
    #     enable_grad_sync()

    for i, loss in enumerate(loss_list):
        print(f"loss_list[{i}] = {loss} (type: {type(loss)})")
    print("-------------------------------------------------------------------------------------")
    # loss = None
    if len(loss_list) > 0:
        loss = sum(loss_list) / len(loss_list)
    return loss


from geesibling.adapters.pytorch.pipeline.megatron.distributed import DistributedDataParallel as LocalDDP

def forward_backward_pipelining_without_interleaving2_remove_sgp_use_megatronDDP(forward_step_func, data_iterator,
                                                                 model,
                                                                 forward_only, micro_batch, gpus_list, recv_tensor_shape):
    """Run non-interleaved 1F1B schedule, with communication between pipeline
    stages.

    Returns dictionary with losses if the last stage, empty dict otherwise."""

    # # gees add to fix dp sync
    # if isinstance(model,torch.nn.parallel.distributed.DistributedDataParallel):
    #     no_sync_func = model.no_sync
    # else:
    #     no_sync_func = contextlib.nullcontext
    # if isinstance(model,LocalDDP):
    #     print("我是LocalDDP Model")
    # no_sync_func = model.no_sync
    # no_sync_context = None
    Dp_flag = False
    if isinstance(model,LocalDDP):
        Dp_flag = True
        no_sync_func = model.no_sync
    else:
        no_sync_func = contextlib.nullcontext
    no_sync_context = None

    def disable_grad_sync():
        """Disable asynchronous grad reductions"""
        nonlocal no_sync_context
        if no_sync_context is None:
            no_sync_context = no_sync_func()
            no_sync_context.__enter__()

    def enable_grad_sync():
        """Enable asynchronous grad reductions"""
        nonlocal no_sync_context
        if no_sync_context is not None:
            no_sync_context.__exit__(None, None, None)
            no_sync_context = None

    disable_grad_sync()

    pipeline_parallel_rank = mpu.get_pipeline_model_parallel_rank()  # gees without_interleaving也有，得到当前进程/GPU/stage的rank

    micro_batch = micro_batch
    num_warmup_microbatches = \
        (mpu.get_pipeline_model_parallel_world_size() -
         mpu.get_pipeline_model_parallel_rank() - 1)
    # print(f'{mpu.get_pipeline_model_parallel_rank()} num_warmup_microbatches :{num_warmup_microbatches},micro_batch:{micro_batch} ')
    num_warmup_microbatches = min(
        num_warmup_microbatches,
        micro_batch)
    num_microbatches_remaining = \
        micro_batch - num_warmup_microbatches

    # print(f'rank:{mpu.get_pipeline_model_parallel_rank()}  warmup:{num_warmup_microbatches}')
    # print(f'rank:{mpu.get_pipeline_model_parallel_rank()}  remaining:{num_microbatches_remaining}')
    input_tensors = []
    output_tensors = []
    loss_list = []


    # exec the warm up ,only fwd
    for i in range(num_warmup_microbatches):
        # print(f'{mpu.get_pipeline_model_parallel_rank()} warm up {i} / {num_warmup_microbatches}')
        # if not the first stage ,recv the tensor from last stage
        input_tensor = p2p_communication_2.recv_forward(tensor_shape=recv_tensor_shape)
        # print(f'{mpu.get_pipeline_model_parallel_rank()} recv fwd ,{i} / {num_warmup_microbatches} ')
        # exec the fwd
        output_tensor, loss = forward_step(forward_step_func, data_iterator, model,
                                           input_tensor, device=torch.cuda.current_device())
        # print(f'{mpu.get_pipeline_model_parallel_rank()}  fwd {i} / {num_warmup_microbatches}')

        # send the output to the next stage
        p2p_communication_2.send_forward(output_tensor)
        # print(f'{mpu.get_pipeline_model_parallel_rank()}  send fwd {i} / {num_warmup_microbatches}')

        # save the input and the output,and then exec the bwd
        input_tensors.append(input_tensor)
        output_tensors.append(output_tensor)
    # if not thr first stahe ,need to recv the input from last stage,and then exec the 1f1b stage
    if num_microbatches_remaining > 0:
        # print(f'{mpu.get_pipeline_model_parallel_rank()} get before 1f1b ')

        input_tensor = p2p_communication_2.recv_forward(tensor_shape=recv_tensor_shape)
    # exec the stable 1f1b stage,the order is :
    # 1 exec fwd,send fwd
    # 2 recv bwd,exec bwd,
    # 3 send bwd,recv fwd


    for i in range(num_microbatches_remaining):
        # print(f'{mpu.get_pipeline_model_parallel_rank()}  1f1b {i} / {num_microbatches_remaining}')

        last_iteration = (i == (num_microbatches_remaining - 1))

        # output_tensor, loss = forward_step(forward_step_func, data_iterator, model, input_tensor, device=torch.cuda.current_device())
        output_tensor, loss = forward_step(forward_step_func, data_iterator, model, input_tensor, device=torch.cuda.current_device())

        if loss is not None:
            loss_list.append(loss.item())
        if forward_only:
            p2p_communication_2.send_forward(output_tensor)  # ??为什么是forward_only
        else:
            output_tensor_grad = \
                p2p_communication_2.send_forward_recv_backward(output_tensor)
        # output_tensor_grad = p2p_communication_2.recv_backward(recv_fwd_tensor_shape,tensor_chunk_shape,args.fp16)
        input_tensors.append(input_tensor)
        output_tensors.append(output_tensor)
        # print(f'output_tensor.grad:{output_tensor.grad}')



        if forward_only:
            if not last_iteration:
                input_tensor = p2p_communication_2.recv_forward(tensor_shape=recv_tensor_shape)
        else:
            input_tensor, output_tensor = input_tensors.pop(0), output_tensors.pop(0)

            # Enable grad sync for the last microbatch in the batch if the full
            # backward pass completes in the 1F1B stage.
            if num_warmup_microbatches == 0 and last_iteration:
                    enable_grad_sync()
                    if (mpu.get_pipeline_model_parallel_rank() == 3) and mpu.get_data_parallel_rank() == 0:
                        pass
                        # print("正确开启梯度同步2--------------------------------------------")

            input_tensor_grad, output_tensor_grad = \
                backward_step(input_tensor, output_tensor,
                              output_tensor_grad, loss)
            if last_iteration:
                input_tensor = None
                p2p_communication_2.send_backward(input_tensor_grad)
            else:
                # TODO
                input_tensor = p2p_communication_2.send_backward_recv_forward(input_tensor_grad)
                # input_tensor = p2p_communication_2.recv_forward(recv_fwd_tensor_shape,tensor_chunk_shape,args.fp16)
                # p2p_communication_2.send_backward(input_tensor_grad)
    # exec the cool done stage,only bwd.

    # enable_grad_sync()
    if not forward_only:
        for i in range(num_warmup_microbatches):
            # print(f'{mpu.get_pipeline_model_parallel_rank()} cool done {i} / {num_warmup_microbatches} ')

            # print(f'{mpu.get_pipeline_model_parallel_rank()}cool down')

            # 2025 add to fix dp
            if i == num_warmup_microbatches - 1:
                    enable_grad_sync()
                    if (mpu.get_pipeline_model_parallel_rank() == 3) and mpu.get_data_parallel_rank() == 0:
                        print("正确开启梯度同步2--------------------------------------------")
            #     enable_grad_sync()

            input_tensor = input_tensors.pop(0)
            output_tensor = output_tensors.pop(0)
            output_tensor_grad = p2p_communication_2.recv_backward()
            # 调试：检查接收的梯度
            assert output_tensor_grad is not None, f"Rank {mpu.get_pipeline_model_parallel_rank()}: 冷却阶段收到空梯度！"
            # print(f'当前rank为{mpu.get_pipeline_model_parallel_rank()},得到的input_tensor is {input_tensor}')
            # print(f'当前rank为{mpu.get_pipeline_model_parallel_rank()},得到的output_tensor is {output_tensor}')
            # print(f'当前rank为{mpu.get_pipeline_model_parallel_rank()},得到的output_tensor is {output_tensor}')
            input_tensor_grad, output_tensor_grad = \
                backward_step(input_tensor, output_tensor,
                              output_tensor_grad,loss=None)
            p2p_communication_2.send_backward(input_tensor_grad)

    # 保证DDP-AllReduce完成
    # gees Launch any remaining grad reductions
    if not forward_only and Dp_flag:
        # Finalize model grads (perform full grad all-reduce / reduce-scatter for
        # data parallelism, layernorm all-reduce for sequence parallelism, and
        # embedding all-reduce for pipeline parallelism).
        # config.finalize_model_grads_func([model])
        model.finish_grad_sync()



    # for i, loss in enumerate(loss_list):
    #     print(f"loss_list[{i}] = {loss} (type: {type(loss)})")
    # print("-------------------------------------------------------------------------------------")
    # loss = None
    if len(loss_list) > 0:
        loss = sum(loss_list) / len(loss_list)
    return loss










# TODO gn 修改forward_step_helper中传给forward_step(...)函数的的device--
def forward_backward_pipelining_with_interleaving_fix_dp(forward_step_func,
                                                  data_iterator,
                                                  model,
                                                  forward_only,
                                                  micro_batch,
                                                  gpus_list,
                                                  recv_tensor_shape):

    # Disable async grad reductions
    no_sync_func_list = [model_chunk.no_sync for model_chunk in model]
    assert isinstance(
        no_sync_func_list, list
    ), "no_sync_func_list must be a list"

    if isinstance(no_sync_func_list, list):
        def multi_no_sync():
            stack = contextlib.ExitStack()
            for model_chunk_no_sync_func in no_sync_func_list:
                stack.enter_context(model_chunk_no_sync_func())
            return stack
        no_sync_func = multi_no_sync
    if no_sync_func is None:
        no_sync_func = contextlib.nullcontext
    no_sync_context = None

    # TODO Currently, grad_sync_func is None,so as param_sync_func(which is servered for distrubuted optimizer/Zero)
    # if config.grad_sync_func is not None and not isinstance(config.grad_sync_func, list):
    #     config.grad_sync_func = [config.grad_sync_func for _ in model]
    #
    # if config.param_sync_func is not None and not isinstance(config.param_sync_func, list):
    #     config.param_sync_func = [config.param_sync_func for _ in model]

    def disable_grad_sync():
        """Disable asynchronous grad reductions"""
        nonlocal no_sync_context
        if no_sync_context is None:
            no_sync_context = no_sync_func()
            no_sync_context.__enter__()

    def enable_grad_sync():
        """Enable asynchronous grad reductions"""
        nonlocal no_sync_context
        if no_sync_context is not None:
            no_sync_context.__exit__(None, None, None)
            no_sync_context = None

    disable_grad_sync()

    # TODO Unknown
    # Model chunk IDs with synchronized grads
    synchronized_model_chunks = set()

    micro_batch = micro_batch
    num_microbatches = micro_batch  # 换个名,好记一点.

    input_tensors = [[] for _ in range(len(model))]
    output_tensors = [[] for _ in range(len(model))]

    # attention! new add vs normal 1F1B
    if not forward_only:
        output_tensor_grads = [[] for _ in range(len(model))]

    loss_list = []

    recv_fwd_tensor_shape = recv_tensor_shape
    recv_bwd_tensor_shape = recv_tensor_shape
    tensor_shape = recv_tensor_shape  # 统一使用tensor_shape

    # vs1 megatron的tensor_shape
    # tensor_shape = [seq_length, micro_batch_size, config.hidden_size]

    # vs2 gees传入的recv_tensor_shape
    # recv_tensor_shape = (micro_batch_size, tensor_length, cfg.hidden_size)  # gees nk 为什么tensor_shape不一样呢?---->GPTmode不同,output_tensor的shape不同

    pipeline_parallel_size = mpu.get_pipeline_model_parallel_world_size()  # gees 获得PP_group的size，被分配了多少个GPU,pp进程组内有多少个进程/GPU---这是without_interleaving没有的
    pipeline_parallel_rank = mpu.get_pipeline_model_parallel_rank()  # gees without_interleaving也有，得到当前进程/GPU/stage的rank

    if micro_batch % pipeline_parallel_size != 0:
        msg = f'number of microbatches ({num_microbatches}) is not divisible by '
        msg += f'pipeline-model-parallel-size ({pipeline_parallel_size}) '
        msg += 'when using interleaved schedule'
        raise RuntimeError(msg)

    # Compute number of warmup and remaining microbatches.
    num_model_chunks = len(model)

    # vpp 实际上增加了要执行的microbatches的个数
    total_num_microbatches = num_microbatches * num_model_chunks  # gees total_num_microbatches = num_microbatches * vpp_size
    all_warmup_microbatches = False

    if forward_only:
        num_warmup_microbatches = total_num_microbatches
    else:
        # Run all forward passes and then all backward passes if number of
        # microbatches is just the number of pipeline stages.
        # Otherwise, perform (num_model_chunks-1)*pipeline_parallel_size on
        # all workers, followed by more microbatches after depending on
        # stage ID (more forward passes for earlier stages, later stages can
        # immediately start with 1F1B).
        if num_microbatches == pipeline_parallel_size:  # gees 如果num_microbatches等于pipeline_parallel_size,并行方式跟GPipe中的思路一样，先全forward再走backward，在warmup阶段把所有的microbatch都训练完。
            num_warmup_microbatches = total_num_microbatches
            all_warmup_microbatches = True
        else:
            num_warmup_microbatches = (pipeline_parallel_size - pipeline_parallel_rank - 1) * 2
            num_warmup_microbatches += (num_model_chunks - 1) * pipeline_parallel_size
            num_warmup_microbatches = min(num_warmup_microbatches, total_num_microbatches)
    num_microbatches_remaining = total_num_microbatches - num_warmup_microbatches

    # print(f"当前进程rank为：{pipeline_parallel_rank},total_num_microbatches={total_num_microbatches},num_microbatches={num_microbatches},num_model_chunks={num_model_chunks}")
    # print(f"当前进程rank为：{pipeline_parallel_rank},num_warmup_microbatches={num_warmup_microbatches},num_microbatches_remaining={num_microbatches_remaining}")


    # print()

    # Add two important helper functions.
    def get_model_chunk_id(microbatch_id, forward):
        """Helper method to get the model chunk ID given the iteration number."""
        microbatch_id_in_group = microbatch_id % (pipeline_parallel_size * num_model_chunks)  # 计算微批次在一个完整流水线循环中的相对位置。见那张图
        model_chunk_id = microbatch_id_in_group // pipeline_parallel_size  # 确定应该处理这个微批次的模型块 ID。
        if not forward:  # 如果是反向传播，翻转模型块的顺序：
            model_chunk_id = num_model_chunks - model_chunk_id - 1
        return model_chunk_id

    # gees   获得真实的micro-batch 获取序号（K/forward_k../iteration_id）得到真实的microbatch_id
    def get_microbatch_id_in_model_chunk(iteration_id, forward):
        """Helper method to get the microbatch_id within model chunk given the iteration number."""
        assert forward
        iteration_group_id = iteration_id // (pipeline_parallel_size * num_model_chunks)
        microbatch_id_in_model_chunk = (iteration_group_id * pipeline_parallel_size) + (
                iteration_id % pipeline_parallel_size
        )
        return microbatch_id_in_model_chunk

    def is_last_microbatch_for_model_chunk(microbatch_id: int) -> bool:
        """Check if an iteration is the last for a model chunk."""
        microbatch_group_size = pipeline_parallel_size * num_model_chunks
        num_microbatch_groups = total_num_microbatches // microbatch_group_size
        microbatch_group_id = microbatch_id // microbatch_group_size
        microbatch_id_in_group = microbatch_id % microbatch_group_size
        if microbatch_group_id == num_microbatch_groups - 1:
            return microbatch_id_in_group % pipeline_parallel_size == pipeline_parallel_size - 1
        else:
            return False

    def forward_step_helper(microbatch_id, current_microbatch, checkpoint_activations_microbatch):
        """
            Helper method to run forward step with model split into chunks
                (run set_virtual_pipeline_model_parallel_rank() before calling
                forward_step()).

            current_microbatch, checkpoint_activations_microbatch are not used in Geesibling now
        """

        # get_model_chunk_id(microbatch_id, forward=True) 函数用于确定给定的微批次应该由哪个模型块处理!
        model_chunk_id = get_model_chunk_id(microbatch_id, forward=True)

        mpu.set_virtual_pipeline_model_parallel_rank(model_chunk_id)  # mpu.is_pipeline_first_stage():判断需要使用

        # forward step
        # 只可能在model_chunk_id=0中input_tensors被追加null.因为当mpu.is_pipeline_first_stage()=True时,真实的model_chunk_id一定为0,即Vpp_rank=model_chunk_id=0
        if mpu.is_pipeline_first_stage():
            if len(input_tensors[model_chunk_id]) == len(output_tensors[model_chunk_id]):
                input_tensors[model_chunk_id].append(None)

        input_tensor = input_tensors[model_chunk_id][-1]

        # attention! forward_step is different with megatron.Geesibling has loss returned,no num_tokens returned yet.
        # forward_step in gees:
        # If first stage, input tensor is obtained from data_iterator, otherwise passed-in input_tensor is used.
        # Returns output tensor and loss.
        # loss is None unless this is in the true last stage:loss = output_tensor.loss
        # gn TODO 如果在多机上，device不能使用pipeline_parallel_rank，应该改用args.local_rank..在单机上刚好一样而已,所以跑通了
        output_tensor, loss = forward_step(forward_step_func,
                                           data_iterator[model_chunk_id],
                                           model[model_chunk_id],
                                           input_tensor,
                                           torch.cuda.current_device())
        output_tensors[model_chunk_id].append(output_tensor)
        # gn 修正错误 TypeError: unsupported operand type(s) for +: 'int' and 'NoneType'
        # loss_list.append(loss)  # different vs megatron
        if loss is not None:
            loss_list.append(loss.item())

        # if forward-only, no need to save tensors for a backward pass
        if forward_only:
            input_tensors[model_chunk_id].pop()
            output_tensors[model_chunk_id].pop()

        return output_tensor, loss

    def backward_step_helper(microbatch_id, loss=None):
        """Helper method to run backward step with model split into chunks
                (run set_virtual_pipeline_model_parallel_rank() before calling
                backward_step())."""
        model_chunk_id = get_model_chunk_id(microbatch_id, forward=False)
        mpu.set_virtual_pipeline_model_parallel_rank(model_chunk_id)



        # 2025 launch grad synchronization (default)
        # if config.grad_sync_func is None and is_last_microbatch_for_model_chunk(microbatch_id):
        if  is_last_microbatch_for_model_chunk(microbatch_id):
            enable_grad_sync()
            synchronized_model_chunks.add(model_chunk_id)



        # 对于真正的last_stage要做一些额外处理,因为last_stage的output_tensor_grads是None(没有其他stage给他传output_tensor_grads)
        if mpu.is_pipeline_last_stage():
            if len(output_tensor_grads[model_chunk_id]) == 0:
                output_tensor_grads[model_chunk_id].append(None)

        input_tensor = input_tensors[model_chunk_id].pop(0)
        output_tensor = output_tensors[model_chunk_id].pop(0)
        output_tensor_grad = output_tensor_grads[model_chunk_id].pop(0)

        # print(f"当前进程序号为：{pipeline_parallel_rank}input_tensor{input_tensor},output_tensor{output_tensor},output_tensor_grad为{output_tensor_grad}")
        # print()
        # print(f"当前进程序号为：{pipeline_parallel_rank},output_tensor为{output_tensor}")
        # print()
        # print(f"当前进程序号为：{pipeline_parallel_rank},output_tensor_grad为{output_tensor_grad}")
        # print()
        # if output_tensor_grad is None:
        #     print("output_tensor_grad 不应该为空")
        #     print()

        input_tensor_grad, output_tensor_grad = backward_step(input_tensor,
                                                              output_tensor,
                                                              output_tensor_grad,
                                                              loss)

        # new add 2025
        disable_grad_sync()


        # gn 8.23 修正ValueError: too many values to unpack (expected 2)
        return input_tensor_grad, output_tensor_grad

    # Run warmup forward passes.
    mpu.set_virtual_pipeline_model_parallel_rank(0)

    # input_tensors[0].append(p2p_communication.recv_forward(recv_fwd_tensor_shape, config))
    # gn p2p_communication_3.recv_forward
    input_tensors[0].append(p2p_communication_3.recv_forward(tensor_shape=tensor_shape))

    for k in range(num_warmup_microbatches):
        # cur_model_chunk_id = get_model_chunk_id(k, forward=True)
        checkpoint_activations_microbatch = None

        current_microbatch = get_microbatch_id_in_model_chunk(k, forward=True)
        # print(f"当前进程序号为：{pipeline_parallel_rank},model_chunk_id为{get_model_chunk_id(k, forward=True)},microbath为{current_microbatch}准备开始执行forward_step_helper---------------------------------------")

        output_tensor, loss = forward_step_helper(
            k, current_microbatch, checkpoint_activations_microbatch
        )
        # print(f"当前进程序号为：{pipeline_parallel_rank},model_chunk_id为{get_model_chunk_id(k, forward=True)},microbath为{current_microbatch}的一个forward_step_helper结束---------------------------------------")
        # Determine if tensor should be received from previous stage.
        next_forward_model_chunk_id = get_model_chunk_id(k + 1, forward=True)
        recv_prev = True
        if mpu.is_pipeline_first_stage(ignore_virtual=True):
            if next_forward_model_chunk_id == 0:
                recv_prev = False
        if k == (total_num_microbatches - 1):
            recv_prev = False

        # gn 8.23发现一处错误，修正，取消ignore_virtual=True
        if mpu.is_pipeline_last_stage():
            output_tensor = None

        # ---gees 先采取不重叠通信
        if (  # gees nk 特殊情况处理最后一个warmup的mb
                k == (num_warmup_microbatches - 1)
                and not forward_only
                and not all_warmup_microbatches
        ):
            input_tensor_grad = None
            recv_next = True
            if mpu.is_pipeline_last_stage(ignore_virtual=True):
                recv_next = False
            # print(f"当前处在warm_up阶段，已经完成第{k}个warm-up的前向过程，已经来到最后一个mb，当前的进程序号为:{pipeline_parallel_rank},准备开始接受input_tensor和output_tensor_grad：")
            input_tensor, output_tensor_grad = p2p_communication_3.send_forward_backward_recv_forward_backward(
                output_tensor,
                input_tensor_grad,
                recv_prev=recv_prev,
                recv_next=recv_next,
                # tensor_shape=tensor_shape,
                # config=config,
            )
            # gees 在GPU0，这里的input_tensor=NULL。在GPU_last这里的output_tensor_grad=NULL
            # print(f"当前处在warm_up阶段，已经完成第{k}个warm-up的前向过程，已经来到最后一个mb，接受到了input_tensor和output_tensor_grad：{type(input_tensor)},{type(output_tensor_grad)}")
            output_tensor_grads[num_model_chunks - 1].append(output_tensor_grad)
        else:
            # print(f"当前处在warm_up阶段，已经完成第{k}个warm-up的前向过程，当前的进程序号为:{pipeline_parallel_rank},准备开始接受input_tensor：")
            input_tensor = p2p_communication_3.send_forward_recv_forward(
                output_tensor,
                recv_prev=recv_prev,
                # tensor_shape=tensor_shape,
                # config=config
            )
            # if input_tensor is not None:
            #     print(f"当前处在warm_up阶段，已经完成第{k}个warm-up的前向过程，当前的进程序号为:{pipeline_parallel_rank},成功接受到了input_tensor：{input_tensor.shape}")
        input_tensors[next_forward_model_chunk_id].append(input_tensor)

        # GeeSibling now has no deallocate_output_tensor fuction while megatron has
        # deallocate_output_tensor(output_tensor, config.deallocate_pipeline_outputs)

    # warmup state is over
    # Move to another state.
    # Run 1F1B in steady state.
    for k in range(num_microbatches_remaining):
        # Forward pass.
        forward_k = k + num_warmup_microbatches  # 只是一个序号

        checkpoint_activations_microbatch = None

        cur_model_chunk_id = get_model_chunk_id(forward_k, forward=True)  # 当前forward_k应当由当前worker哪个chunk来负责。
        current_microbatch = get_microbatch_id_in_model_chunk(forward_k, forward=True)  # micro_batch的真实标号。

        # deallocate_output_tensor(output_tensor, config.deallocate_pipeline_outputs)

        output_tensor, loss = forward_step_helper(
            forward_k, current_microbatch, checkpoint_activations_microbatch
        )

        # Backward pass.
        backward_k = k
        # print(f"进程{pipeline_parallel_rank}的第{k}次反向传播开始----------------")
        input_tensor_grad, output_tensor_grad = backward_step_helper(backward_k, loss)
        # print(f"进程{pipeline_parallel_rank}的第{k}次反向传播结束-----------------------------------")

        # Send output_tensor and input_tensor_grad, receive input_tensor and output_tensor_grad.

        # Determine if current stage has anything to send in either direction,
        # otherwise set tensor to None.

        # f
        forward_model_chunk_id = get_model_chunk_id(forward_k, forward=True)  # 当前forward_k应当由当前worker哪个chunk来负责
        mpu.set_virtual_pipeline_model_parallel_rank(forward_model_chunk_id)
        # Last virtual stage no activation tensor to send
        if mpu.is_pipeline_last_stage():
            output_tensor = None

        # b
        backward_model_chunk_id = get_model_chunk_id(backward_k, forward=False)
        mpu.set_virtual_pipeline_model_parallel_rank(backward_model_chunk_id)
        # First virtual stage no activation gradient tensor to send
        if mpu.is_pipeline_first_stage():  # 考虑了virtual_stage,反向传播最后一个阶段处于first_stage
            input_tensor_grad = None

        # Determine if peers are sending, and where in data structure to put
        # received tensors.
        recv_prev = True
        if mpu.is_pipeline_first_stage(ignore_virtual=True):
            # gees here ignore_virtual must be true！For deciding next micro-bacth need recv_input or not.
            #  First stage is ahead of last stage by (pipeline_parallel_size - 1).
            next_forward_model_chunk_id = get_model_chunk_id(forward_k - (pipeline_parallel_size - 1), forward=True)
            # gees top is false next_forward_model_chunk_id.Actually false next_forward_model_chunk_id is device_last's current model_chunk_id,
            # which is equal to device_0's (real_next_forward_model_chunk_id - 1).
            # use false next forward_model_chunk_id to decide whether to recv_prev.    add by gees
            if next_forward_model_chunk_id == (num_model_chunks - 1):
                recv_prev = False
            next_forward_model_chunk_id += 1  # real next_forward_model_chunk_id
        else:
            next_forward_model_chunk_id = get_model_chunk_id(forward_k + 1, forward=True)

        # If last iteration, don't receive; we already received one extra
        # before the start of the for loop.
        if k == (num_microbatches_remaining - 1):  # 下面没有更多的前向传播了
            recv_prev = False

        # ------------------------
        # error: next is overlap
        # # Send activation tensor to the next stage and receive activation tensor from the
        # # previous stage
        # # gees from here we can see that activation tensor actually is out_tensor
        # # gees 相比于without interleaving，这里多了一个recv_forward（接受recv_prev）为当前worker下一个mb的输入input_tensor做准备
        #
        # input_tensor, fwd_wait_handles = send_forward_recv_forward(
        #     output_tensor,
        #     recv_prev=recv_prev,
        #     tensor_shape=tensor_shape,
        #     config=config,
        #     overlap_p2p_comm=True,
        # )
        #
        # # assert fwd_wait_handles is not None   gn:不清楚这里是做什么的
        # if bwd_wait_handles is not None:
        #     for req in bwd_wait_handles:
        #         req.wait()
        # ---------------------------------

        # Determine if the current virtual stage has an activation gradient tensor to receive
        recv_next = True
        if mpu.is_pipeline_last_stage(ignore_virtual=True):
            # Last stage is ahead of first stage by (pipeline_parallel_size - 1).
            # gees Same reason in 1f1b forward
            next_backward_model_chunk_id = get_model_chunk_id(
                backward_k - (pipeline_parallel_size - 1), forward=False
            )
            if next_backward_model_chunk_id == 0:
                recv_next = False
            next_backward_model_chunk_id -= 1
        else:
            next_backward_model_chunk_id = get_model_chunk_id(backward_k + 1, forward=False)

        input_tensor, output_tensor_grad = p2p_communication_3.send_forward_backward_recv_forward_backward(
            output_tensor,
            input_tensor_grad,
            recv_prev=recv_prev,
            recv_next=recv_next,
        )
        # deallocate_output_tensor(output_tensor, config.deallocate_pipeline_outputs)

        # either overlap or not---------------------------------
        # Put input_tensor and output_tensor_grad in data structures in the
        # right location.
        if recv_prev:
            input_tensors[next_forward_model_chunk_id].append(input_tensor)  # gees 正式为下一个foward做准备
        if recv_next:
            output_tensor_grads[next_backward_model_chunk_id].append(output_tensor_grad)  # gees 正式为下一个backward做准备

    # deallocate_output_tensor(output_tensor, config.deallocate_pipeline_outputs)

    # 1F1B/steady state is over
    # Move to next state
    # Run cooldown backward passes (flush out pipeline).
    if not forward_only:

        # gn not sure what this is doing
        # if config.overlap_p2p_comm and bwd_wait_handles is not None:
        #     for wait_handle in bwd_wait_handles:
        #         wait_handle.wait()

        if all_warmup_microbatches:
            # output_tensor_grads[num_model_chunks - 1].append(
            #     p2p_communication_3.recv_backward(tensor_shape)  # TODO
            # )
            output_tensor_grad = p2p_communication_3.recv_backward(tensor_shape)
            output_tensor_grads[num_model_chunks - 1].append(output_tensor_grad)

        flag = True
        for k in range(num_microbatches_remaining, total_num_microbatches):
            if flag:
                print(f"total_num_microbatches is {total_num_microbatches}")
                print(f"num_warmup_microbatches is{num_warmup_microbatches}")
                print(f"num_microbatches_remaining is{num_microbatches_remaining}")
                print(f"当前处于{mpu.get_pipeline_model_parallel_rank()}-gpu的第{k + num_microbatches_remaining + 1}个虚拟微批次的反向传播开始")
                print(k + num_microbatches_remaining + 1)
                print(f"当前处于{mpu.get_pipeline_model_parallel_rank()}-gpu的第{k}个cooldown微批次的反向传播开始")
                print(k)
                flag = False
            # gn 8.23 修正RuntimeError: Invalid function argument. Expected parameter `tensor` to be of type torch.Tensor.
            # gn input_tensor_grad = backward_step_helper(k)是错误的。因为backward_step_helper返回的是一个元组Tuple[Tensor,Tensor]
            input_tensor_grad, output_tensor_grad = backward_step_helper(k)  # 这里不传loss,
            next_backward_model_chunk_id = get_model_chunk_id(k + 1, forward=False)
            recv_next = True
            if mpu.is_pipeline_last_stage(ignore_virtual=True):
                if next_backward_model_chunk_id == (num_model_chunks - 1):
                    recv_next = False
            if k == (total_num_microbatches - 1):
                recv_next = False
            # output_tensor_grads[next_backward_model_chunk_id].append(
            #     p2p_communication_3.send_backward_recv_backward(
            #         input_tensor_grad, recv_next=recv_next
            #     )
            # )
            output_tensor_grad = p2p_communication_3.send_backward_recv_backward(
                input_tensor_grad, recv_next=recv_next
            )
            output_tensor_grads[next_backward_model_chunk_id].append(output_tensor_grad)
        # Launch any remaining grad reductions.

        # 2025 好像不需要添加这里
        # enable_grad_sync()

        # if config.grad_sync_func is not None:
        #     for model_chunk_id in range(num_model_chunks):
        #         if model_chunk_id not in synchronized_model_chunks:
        #             config.grad_sync_func[model_chunk_id](model[model_chunk_id].parameters())
        #             synchronized_model_chunks.add(model_chunk_id)



        # if config.finalize_model_grads_func is not None and not forward_only:
        #     # Finalize model grads (perform full grad all-reduce / reduce-scatter for
        #     # data parallelism, layernorm all-reduce for sequence parallelism, and
        #     # embedding all-reduce for pipeline parallelism).
        #     config.finalize_model_grads_func(model)
        # Finalize model grads (perform full grad all-reduce / reduce-scatter fordata parallelism
        # TODO layernorm all-reduce for sequence parallelism, and
        #   embedding all-reduce for pipeline parallelism).
        for model_chunk in model:
            model_chunk.finish_grad_sync()

    loss_finial = None
    for i, loss in enumerate(loss_list):
        print(f"loss_list[{i}] = {loss} (type: {type(loss)})")
    print("-------------------------------------------------------------------------------------")
    if len(loss_list) > 0:
        loss_finial = sum(loss_list) / len(loss_list)
    return loss_finial



def forward_backward_no_pipelining(forward_step_func, data_iterator,
                                                                 model,
                                                                 forward_only, num_microbatches, gpus_list, recv_tensor_shape):
    Dp_flag = False
    if isinstance(model,LocalDDP):
        Dp_flag = True
        no_sync_func = model.no_sync
    else:
        no_sync_func = contextlib.nullcontext
    no_sync_context = None

    loss_list = []
    input_tensor, output_tensor_grad = None, None



    with no_sync_func():
        for i in range(num_microbatches - 1):
            output_tensor,loss = forward_step(
                forward_step_func,
                data_iterator,
                model,
                input_tensor,
                device=torch.cuda.current_device()
            )
            print(loss)
            print(loss)
            print(loss)
            print(loss)
            print(loss)
            if not forward_only:
                backward_step(input_tensor, output_tensor, output_tensor_grad, loss)
                print("ok")

    # Run computation for last microbatch out of context handler (want to
    # synchronize gradients).
    output_tensor, loss = forward_step(
        forward_step_func,
        data_iterator,
        model,
        input_tensor,
        device=torch.cuda.current_device()
    )
    print("ok--------------------")
    if not forward_only:
        backward_step(input_tensor, output_tensor, output_tensor_grad, loss)

    # 保证DDP-AllReduce完成
    # gees Launch any remaining grad reductions
    if not forward_only and Dp_flag:
        # Finalize model grads (perform full grad all-reduce / reduce-scatter for
        # data parallelism, layernorm all-reduce for sequence parallelism, and
        # embedding all-reduce for pipeline parallelism).
        # config.finalize_model_grads_func([model])
        model.finish_grad_sync()

    return loss

