from functools import reduce
import operator
import torch
import torch.distributed as dist
from .set_args import get_args

from ..megatron import mpu
from ..megatron.mpu import (
    get_pipeline_model_parallel_group,
    get_pipeline_model_parallel_next_rank,
    get_pipeline_model_parallel_prev_rank,
    get_pipeline_model_parallel_rank,
    get_pipeline_model_parallel_world_size,
)

from typing import Callable, List, Optional, Tuple, Union
'''
Functionality: exec the send and recv tensor at 1f1b
Author: Chuhongjie
Date: 2024-06-13
'''

import logging
import os


# add for tp
def split_tensor_into_1d_equal_chunks(tensor):
    """Break a tensor into equal 1D chunks."""
    data = tensor.view(-1)
    partition_size = torch.numel(data) // mpu.get_tensor_model_parallel_world_size()
    start_index = partition_size * mpu.get_tensor_model_parallel_rank()
    end_index = start_index + partition_size
    return data[start_index:end_index]


def gather_split_1d_tensor(tensor):
    """Opposite of above function, gather values from model parallel ranks."""
    world_size = mpu.get_tensor_model_parallel_world_size()
    numel = torch.numel(tensor)
    numel_gathered = world_size * numel
    gathered = torch.empty(numel_gathered, dtype=tensor.dtype,
                           device=torch.cuda.current_device(),
                           requires_grad=False)
    chunks = [gathered[i * numel:(i + 1) * numel] for i in range(world_size)]
    torch.distributed.all_gather(chunks, tensor,
                                 group=mpu.get_tensor_model_parallel_group())
    return gathered


# 从环境变量读取日志目录
log_dir = os.environ.get('LOG_DIR', './logs')
os.makedirs(log_dir, exist_ok=True)

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s [%(levelname)s] %(message)s',
    handlers=[
        logging.FileHandler(os.path.join(log_dir, "pipeline_log.log")),
        logging.StreamHandler()
    ]
)

logger = logging.getLogger(__name__)


def _batched_p2p_ops(
    *,
    tensor_send_prev: Optional[torch.Tensor],
    tensor_recv_prev: Optional[torch.Tensor],
    tensor_send_next: Optional[torch.Tensor],
    tensor_recv_next: Optional[torch.Tensor],
    group: torch.distributed.ProcessGroup
):
    ops = []
    if tensor_send_prev is not None:
        send_prev_op = torch.distributed.P2POp(
            torch.distributed.isend,
            tensor_send_prev,
            get_pipeline_model_parallel_prev_rank(),
            group,
        )
        ops.append(send_prev_op)
    if tensor_recv_prev is not None:
        recv_prev_op = torch.distributed.P2POp(
            torch.distributed.irecv,
            tensor_recv_prev,
            get_pipeline_model_parallel_prev_rank(),
            group,
        )
        ops.append(recv_prev_op)
    if tensor_send_next is not None:
        send_next_op = torch.distributed.P2POp(
            torch.distributed.isend,
            tensor_send_next,
            get_pipeline_model_parallel_next_rank(),
            group,
        )
        ops.append(send_next_op)
    if tensor_recv_next is not None:
        recv_next_op = torch.distributed.P2POp(
            torch.distributed.irecv,
            tensor_recv_next,
            get_pipeline_model_parallel_next_rank(),
            group,
        )
        ops.append(recv_next_op)
    if len(ops) > 0:
        reqs = torch.distributed.batch_isend_irecv(ops)
    else:
        reqs = []
    return reqs


def _p2p_ops(
    *,
    tensor_send_prev: Optional[torch.Tensor],
    tensor_recv_prev: Optional[torch.Tensor],
    tensor_send_next: Optional[torch.Tensor],
    tensor_recv_next: Optional[torch.Tensor],
    group: torch.distributed.ProcessGroup
):
    reqs = []
    rank = get_pipeline_model_parallel_rank()
    even_send_odd_recv_group = group
    if get_pipeline_model_parallel_world_size() == 2:
        # Use the global process group for one of the two p2p communications
        # to allow the overlap of the independent communications.
        # Using the global process group is compatible because the pipeline-parallel
        # communications set the source and destination by global rank.
        even_recv_odd_send_group = torch.distributed.group.WORLD
    else:
        even_recv_odd_send_group = group
    if get_pipeline_model_parallel_rank() % 2 == 0:
        if tensor_send_next is not None:
            send_next_req = torch.distributed.isend(
                tensor=tensor_send_next,
                dst=get_pipeline_model_parallel_next_rank(),
                group=even_send_odd_recv_group,
            )
            reqs.append(send_next_req)

        if tensor_recv_prev is not None:
            recv_prev_req = torch.distributed.irecv(
                tensor=tensor_recv_prev,
                src=get_pipeline_model_parallel_prev_rank(),
                group=even_recv_odd_send_group,
            )
            reqs.append(recv_prev_req)

        if tensor_send_prev is not None:
            send_prev_req = torch.distributed.isend(
                tensor=tensor_send_prev,
                dst=get_pipeline_model_parallel_prev_rank(),
                group=even_send_odd_recv_group,
            )
            reqs.append(send_prev_req)

        if tensor_recv_next is not None:
            recv_next_req = torch.distributed.irecv(
                tensor=tensor_recv_next,
                src=get_pipeline_model_parallel_next_rank(),
                group=even_recv_odd_send_group,
            )
            reqs.append(recv_next_req)

    else:
        if tensor_recv_prev is not None:
            recv_prev_req = torch.distributed.irecv(
                tensor=tensor_recv_prev,
                src=get_pipeline_model_parallel_prev_rank(),
                group=even_send_odd_recv_group,
            )
            reqs.append(recv_prev_req)

        if tensor_send_next is not None:
            send_next_req = torch.distributed.isend(
                tensor=tensor_send_next,
                dst=get_pipeline_model_parallel_next_rank(),
                group=even_recv_odd_send_group,
            )
            reqs.append(send_next_req)

        if tensor_recv_next is not None:
            recv_next_req = torch.distributed.irecv(
                tensor=tensor_recv_next,
                src=get_pipeline_model_parallel_next_rank(),
                group=even_send_odd_recv_group,
            )
            reqs.append(recv_next_req)

        if tensor_send_prev is not None:
            send_prev_req = torch.distributed.isend(
                tensor=tensor_send_prev,
                dst=get_pipeline_model_parallel_prev_rank(),
                group=even_recv_odd_send_group,
            )
            reqs.append(send_prev_req)
    return reqs




def _communicate(
    *,
    tensor_send_next: Optional[torch.Tensor],
    tensor_send_prev: Optional[torch.Tensor],
    recv_prev: bool,
    recv_next: bool,
    use_ring_exchange=False,
    tensor_shape=None,
    override_scatter_gather_tensors_in_pipeline=False,
    dtype_=None,
    wait_on_reqs: bool = True
) -> Tuple[torch.Tensor, torch.Tensor]:
    """Communicate tensors between stages. Used as helper method in other
    communication methods that are used in megatron/schedules.py.

    Args:
        tensor_send_next (torch.Tensor, optional):
            Tensor to send to next rank (no tensor sent if None)

        tensor_send_prev (torch.Tensor, optional):
            Tensor to send to prev rank (no tensor sent if None)

        recv_prev (boolean, required):
            whether tensor should be received from previous rank.

        recv_next (boolean, required):
            whether tensor should be received from next rank.

        tensor_shape (List[int] or torch.Size, required):
            shape of tensor to receive (this method assumes that all
            tensors sent and received in a single function call are
            the same shape).

        wait_on_reqs (boolean, optional, default=False):
            For non-batched p2p communication, wait on each request
            before returning.

    Returns:
        tuple containing

        - tensor_recv_prev: torch.Tensor if recv_prev is True, None otherwise.
        - tensor_recv_next: torch.Tensor if recv_next is True, None otherwise.

    """

    # gees new add from old megatron
    args = get_args()
    # Create placeholder tensors for receive in forward and backward directions
    # if needed.
    tensor_recv_prev = None
    tensor_recv_next = None

    # if not config.variable_seq_lengths:
    #     recv_prev_shape = tensor_shape
    #     recv_next_shape = tensor_shape
    # else:
    #     recv_prev_shape, recv_next_shape = _communicate_shapes(
    #         tensor_send_next, tensor_send_prev, recv_prev, recv_next, config
    #     )

    # gees attention!from old megatron
    if tensor_shape is None:
        tensor_shape = (args.micro_batch_size, args.tensor_length, args.hidden_size)
    if not override_scatter_gather_tensors_in_pipeline and \
            args.scatter_gather_tensors_in_pipeline:
        tensor_chunk_shape = reduce(operator.mul, tensor_shape, 1) // \
                             mpu.get_tensor_model_parallel_world_size()
    else:
        tensor_chunk_shape = tensor_shape
    dtype = torch.float

    # gees new add to adapt
    recv_prev_shape = tensor_chunk_shape
    recv_next_shape = tensor_chunk_shape

    if recv_prev:
        # if config.pipeline_dtype is None:
        #     raise RuntimeError("pipeline_dtype must be provided if recv_prev is True")
        # if tensor_shape is None:
        #     raise RuntimeError(
        #         "tensor_shape must be specified if recv_prev is True. "
        #         "Common tensor_shape is (seq_length, micro_batch_size, hidden_size)"
        #     )
        tensor_recv_prev = torch.empty(
            recv_prev_shape,
            requires_grad=True,
            device=torch.cuda.current_device(),
            # dtype=config.pipeline_dtype,
            dtype=dtype,
        )
    if recv_next:
        # if config.pipeline_dtype is None:
        #     raise RuntimeError("dtype must be provided if recv_next is True")
        # if tensor_shape is None:
        #     raise RuntimeError(
        #         "tensor_shape must be specified if recv_next is True. "
        #         "Common tensor_shape is (seq_length, micro_batch_size, hidden_size)"
        #     )
        tensor_recv_next = torch.empty(
            recv_next_shape,
            requires_grad=True,
            device=torch.cuda.current_device(),
            # dtype=config.pipeline_dtype,
            dtype=dtype,
        )

    # Send tensors in both the forward and backward directions as appropriate.
    # if config.use_ring_exchange_p2p:
    if use_ring_exchange:
        def _ring_exchange_wrapper(**kwargs):
            torch.distributed.ring_exchange(**kwargs)
            return []

        p2p_func = _ring_exchange_wrapper
    # elif config.batch_p2p_comm:
    #     assert wait_on_reqs
    #     p2p_func = _batched_p2p_ops
    else:
        p2p_func = _p2p_ops

    reqs = p2p_func(
        tensor_send_prev=tensor_send_prev,
        tensor_recv_prev=tensor_recv_prev,
        tensor_send_next=tensor_send_next,
        tensor_recv_next=tensor_recv_next,
        group=get_pipeline_model_parallel_group(),
    )

    if wait_on_reqs and len(reqs) > 0:
        for req in reqs:
            req.wait()
        reqs = None

    # if config.batch_p2p_comm and config.batch_p2p_sync:
    #     # To protect against race condition when using batch_isend_irecv().
    #     # User should assert that we have a modern enough PyTorch to not need this
    #     torch.cuda.synchronize()

    return tensor_recv_prev, tensor_recv_next, reqs

def _communicate_old(tensor_send_next, tensor_send_prev, recv_prev, recv_next,
                 use_ring_exchange=False, tensor_shape=None,
                 override_scatter_gather_tensors_in_pipeline=False,
                 dtype_=None):
    """Communicate tensors between stages. Used as helper method in other
    communication methods that are used in megatron/schedules.py.

    Takes the following arguments:
        tensor_send_next: tensor to send to next rank (no tensor sent if
                          set to None).
        tensor_send_prev: tensor to send to prev rank (no tensor sent if
                          set to None).
        recv_prev: boolean for whether tensor should be received from
                   previous rank.
        recv_next: boolean for whether tensor should be received from
                   next rank.
        use_ring_exchange: boolean for whether torch.distributed.ring_exchange()
                           API should be used.
        tensor_shape: optional, use when the input sequence contains less
                      tokens than the default sequence length
        override_scatter_gather_tensors_in_pipeline: optional, this is used
                                                     when tensor_shape is
                                                     provided to overwide
                                                     scatter gather tensors
        dtype_: optional, this is used when tensor_shape is provied and what
                is the type of tensor_shape
    Returns:
        (tensor_recv_prev, tensor_recv_next)
    """
    args = get_args()

    # Create placeholder tensors for receive in forward and backward directions
    # if needed.
    tensor_recv_prev = None
    tensor_recv_next = None

    # gees attention!
    if tensor_shape is None:
        tensor_shape = (args.micro_batch_size, args.tensor_length, args.hidden_size)
    if not override_scatter_gather_tensors_in_pipeline and \
            args.scatter_gather_tensors_in_pipeline:
        tensor_chunk_shape = reduce(operator.mul, tensor_shape, 1) // \
                             mpu.get_tensor_model_parallel_world_size()
    else:
        tensor_chunk_shape = tensor_shape
    dtype = torch.float

    requires_grad = True
    if args.fp16 is not None:
        dtype = torch.half

    if recv_prev:
        tensor_recv_prev = torch.empty(tensor_chunk_shape,
                                       requires_grad=requires_grad,
                                       device=torch.cuda.current_device(),
                                       dtype=dtype)
    if recv_next:
        tensor_recv_next = torch.empty(tensor_chunk_shape,
                                       requires_grad=requires_grad,
                                       device=torch.cuda.current_device(),
                                       dtype=dtype)

    # Split tensor into smaller chunks if using scatter-gather optimization.
    if not override_scatter_gather_tensors_in_pipeline and \
            args.scatter_gather_tensors_in_pipeline:
        if tensor_send_next is not None:
            tensor_send_next = split_tensor_into_1d_equal_chunks(tensor_send_next)

        if tensor_send_prev is not None:
            tensor_send_prev = split_tensor_into_1d_equal_chunks(tensor_send_prev)

    # Send tensors in both the forward and backward directions as appropriate.
    if use_ring_exchange:
        torch.distributed.ring_exchange(tensor_send_prev=tensor_send_prev,
                                        tensor_recv_prev=tensor_recv_prev,
                                        tensor_send_next=tensor_send_next,
                                        tensor_recv_next=tensor_recv_next,
                                        group=mpu.get_pipeline_model_parallel_group())
    else:
        ops = []
        if tensor_send_prev is not None:
            send_prev_op = torch.distributed.P2POp(
                torch.distributed.isend, tensor_send_prev,
                mpu.get_pipeline_model_parallel_prev_rank())
            ops.append(send_prev_op)
        if tensor_recv_prev is not None:
            recv_prev_op = torch.distributed.P2POp(
                torch.distributed.irecv, tensor_recv_prev,
                mpu.get_pipeline_model_parallel_prev_rank())
            ops.append(recv_prev_op)
        if tensor_send_next is not None:
            send_next_op = torch.distributed.P2POp(
                torch.distributed.isend, tensor_send_next,
                mpu.get_pipeline_model_parallel_next_rank())
            ops.append(send_next_op)
        if tensor_recv_next is not None:
            recv_next_op = torch.distributed.P2POp(
                torch.distributed.irecv, tensor_recv_next,
                mpu.get_pipeline_model_parallel_next_rank())
            ops.append(recv_next_op)
        if len(ops) > 0:
            reqs = torch.distributed.batch_isend_irecv(ops)
            # gees add for test reqs
            if len(reqs) > 0:
                print("reqs is ok --------------------------------------------------------------------------------")
            else:
                print("reqs is not ok --------------------------------------------------------------------------------")
            for req in reqs:
                req.wait()
    # To protect against race condition when using batch_isend_irecv().
    torch.cuda.synchronize()

    # If using scatter-gather optimization, gather smaller chunks.
    if not override_scatter_gather_tensors_in_pipeline and \
            args.scatter_gather_tensors_in_pipeline:
        if recv_prev:
            tensor_recv_prev = gather_split_1d_tensor(
                tensor_recv_prev).view(tensor_shape).requires_grad_()

        if recv_next:
            tensor_recv_next = gather_split_1d_tensor(
                tensor_recv_next).view(tensor_shape).requires_grad_()

    return tensor_recv_prev, tensor_recv_next ,reqs


def recv_forward(tensor_shape=None,
                 override_scatter_gather_tensors_in_pipeline=False,
                 dtype_=None, timers=None):
    """Receive tensor from previous rank in pipeline (forward receive)."""

    if mpu.is_pipeline_first_stage():
        input_tensor = None
    else:
        if timers is not None:
            timers('forward-recv').start()
        input_tensor, _, _ = _communicate(
            tensor_send_next=None,
            tensor_send_prev=None,
            recv_prev=True,
            recv_next=False,
            tensor_shape=tensor_shape,
            override_scatter_gather_tensors_in_pipeline= \
                override_scatter_gather_tensors_in_pipeline,
            dtype_=dtype_)
        if timers is not None:
            timers('forward-recv').stop()
    return input_tensor


# gees new add 和p2p_commication_2不同，新增了tensor_shape作为参数，怎么可以没有参数呢？
def recv_backward(tensor_shape=None, timers=None):
    """Receive tensor from next rank in pipeline (backward receive)."""
    if mpu.is_pipeline_last_stage():
        output_tensor_grad = None
    else:
        if timers is not None:
            timers('backward-recv').start()
        _, output_tensor_grad,_ = _communicate(
            tensor_shape=tensor_shape,
            tensor_send_next=None,
            tensor_send_prev=None,
            recv_prev=False,
            recv_next=True)
        if timers is not None:
            timers('backward-recv').stop()
    return output_tensor_grad


def send_forward(output_tensor, timers=None,
                 override_scatter_gather_tensors_in_pipeline=False,
                 dtype_=None):
    """Send tensor to next rank in pipeline (forward send)."""

    if not mpu.is_pipeline_last_stage():
        if timers is not None:
            timers('forward-send').start()
        _communicate(
            tensor_send_next=output_tensor,
            tensor_send_prev=None,
            recv_prev=False,
            recv_next=False,
            override_scatter_gather_tensors_in_pipeline= \
                override_scatter_gather_tensors_in_pipeline,
            dtype_=dtype_)
        if timers is not None:
            timers('forward-send').stop()


def send_backward(input_tensor_grad, timers=None):
    """Send tensor to previous rank in pipeline (backward send)."""
    if not mpu.is_pipeline_first_stage():
        if timers is not None:
            timers('backward-send').start()
        _communicate(
            tensor_send_next=None,
            tensor_send_prev=input_tensor_grad,
            recv_prev=False,
            recv_next=False)
        if timers is not None:
            timers('backward-send').stop()


# gees
# different with megatron new p2pcomm.
# 取消了tensor_shape
def send_forward_recv_backward(output_tensor, timers=None):
    """Batched send and recv with next rank in pipeline."""
    if mpu.is_pipeline_last_stage():
        output_tensor_grad = None
    else:
        if timers is not None:
            timers('forward-send-backward-recv').start()
        _, output_tensor_grad,_ = _communicate(
            tensor_send_next=output_tensor,
            tensor_send_prev=None,
            recv_prev=False,
            recv_next=True)
        if timers is not None:
            timers('forward-send-backward-recv').stop()
    return output_tensor_grad


def send_backward_recv_forward(input_tensor_grad, timers=None):
    """Batched send and recv with previous rank in pipeline."""
    if mpu.is_pipeline_first_stage():
        input_tensor = None
    else:
        if timers is not None:
            timers('backward-send-forward-recv').start()
        input_tensor, _ ,_= _communicate(
            tensor_send_next=None,
            tensor_send_prev=input_tensor_grad,
            recv_prev=True,
            recv_next=False)
        if timers is not None:
            timers('backward-send-forward-recv').stop()
    return input_tensor


def send_forward_recv_forward(output_tensor, recv_prev,
                              overlap_p2p_comm: bool = False,       # gees new add
                              timers=None):
    """Batched recv from previous rank and send to next rank in pipeline."""
    if timers is not None:
        timers('forward-send-forward-recv').start()
    # print(f"进程{mpu.get_pipeline_model_parallel_rank()}正在接受")
    input_tensor, _, wait_handles= _communicate(
        tensor_send_next=output_tensor,
        tensor_send_prev=None,
        recv_prev=recv_prev,
        recv_next=False,
        wait_on_reqs=(not overlap_p2p_comm))
    if timers is not None:
        timers('forward-send-forward-recv').stop()
    # print(f"进程{mpu.get_pipeline_model_parallel_rank()}接收到了")
    if overlap_p2p_comm:
        return input_tensor, wait_handles
    return input_tensor


def send_backward_recv_backward(input_tensor_grad, recv_next,
                                overlap_p2p_comm: bool = False,
                                timers=None):
    """Batched recv from next rank and send to previous rank in pipeline."""
    if timers is not None:
        timers('backward-send-backward-recv').start()
    _, output_tensor_grad, wait_handles = _communicate(
        tensor_send_next=None,
        tensor_send_prev=input_tensor_grad,
        recv_prev=False,
        recv_next=recv_next,
        wait_on_reqs=(not overlap_p2p_comm),)
    if timers is not None:
        timers('backward-send-backward-recv').stop()
    if overlap_p2p_comm:
        return output_tensor_grad, wait_handles
    return output_tensor_grad


def send_forward_backward_recv_forward_backward(
        output_tensor: torch.Tensor,
        input_tensor_grad: torch.Tensor,
        recv_prev: bool,
        recv_next: bool,
        timers=None
):
    """Batched send and recv with previous and next ranks in pipeline.

    See _communicate for argument details.
    """
    if timers is not None:
        timers('forward-backward-send-forward-backward-recv').start()
    input_tensor, output_tensor_grad,_ = _communicate(
        tensor_send_next=output_tensor,
        tensor_send_prev=input_tensor_grad,
        recv_prev=recv_prev,
        recv_next=recv_next,
    )
    if timers is not None:
        timers('forward-backward-send-forward-backward-recv').stop()
    return input_tensor, output_tensor_grad


'''
def send_forward(output_tensor):
    next_rank = mpu.get_pipeline_model_parallel_next_rank()
    if mpu.is_pipeline_last_stage():
        logger.info(f"阶段 {mpu.get_pipeline_model_parallel_rank()} 是最后一个阶段；不进行发送。")
        return None
    output_tensor = split_tensor_into_1d_equal_chunks(output_tensor)
    logger.info(f"从 {mpu.get_pipeline_model_parallel_rank()} 发送到 {next_rank},shape:{output_tensor.shape}")
    req = dist.send(tensor=output_tensor, dst=next_rank)
    logger.info(f"从 {mpu.get_pipeline_model_parallel_rank()} 发送到 {next_rank},等待中............")
    # # req.wait()
    logger.info(f"数据已发送到 {next_rank}")

def recv_forward(origin_shape,recv_shape,fp16):
    prev_rank = mpu.get_pipeline_model_parallel_prev_rank()
    if mpu.is_pipeline_first_stage():
        logger.info(f"阶段 {mpu.get_pipeline_model_parallel_rank()} 是第一个阶段；无数据接收。")
        return None
    if fp16:
        recv_tensor = torch.empty(recv_shape,requires_grad=True,dtype=torch.float16,device=torch.cuda.current_device())
    else:
        recv_tensor = torch.empty(recv_shape,requires_grad=True,device=torch.cuda.current_device())
    logger.info(f"在 {mpu.get_pipeline_model_parallel_rank()} 初始化空tensor shape:{recv_shape}")
    logger.info(f"在 {mpu.get_pipeline_model_parallel_rank()} 从 {prev_rank} 接收数据")
    req = dist.recv(tensor=recv_tensor, src=prev_rank)
    logger.info(f"数据从 {prev_rank} 接收中..............")
    # req.wait()
    logger.info(f"数据已从 {prev_rank} 接收,shape:{recv_tensor.shape}")
    recv_tensor = gather_split_1d_tensor(recv_tensor).view(origin_shape).requires_grad_()
    return recv_tensor


def recv_backward(origin_shape,recv_bwd_tensor_shape,fp16):
    if mpu.is_pipeline_last_stage():
        logger.info(f"阶段 {mpu.get_pipeline_model_parallel_rank()} 是最后一个阶段；无数据接收。")

        return None
    logger.info(f"在 {mpu.get_pipeline_model_parallel_rank()} 初始化空tensor shape:{recv_bwd_tensor_shape}")
    if fp16:
        recv_tensor = torch.empty(recv_bwd_tensor_shape,requires_grad=True,dtype=torch.float16,device=torch.cuda.current_device())
    else:
        recv_tensor = torch.empty(recv_bwd_tensor_shape,requires_grad=True,device=torch.cuda.current_device())
    logger.info(f"在 {mpu.get_pipeline_model_parallel_rank()} 从 {mpu.get_pipeline_model_parallel_next_rank()} 接收数据")
    req = dist.irecv(tensor=recv_tensor, src=mpu.get_pipeline_model_parallel_next_rank())
    logger.info(f"数据从 {mpu.get_pipeline_model_parallel_next_rank()} 接收中..............")
    # req.wait()  # wait the req finished
    logger.info(f"数据已从 {mpu.get_pipeline_model_parallel_next_rank()} 接收,shape:{recv_tensor.shape}")
    recv_tensor = gather_split_1d_tensor(recv_tensor).view(origin_shape).requires_grad_()
    # recv_tensor.retain_grad()  # 确保梯度被保留
    return recv_tensor

def send_backward(input_tensor_grad):
    if mpu.is_pipeline_first_stage():
        logger.info(f"阶段 {mpu.get_pipeline_model_parallel_rank()} 是第一个阶段；不进行发送。")
        return None
    input_tensor_grad = split_tensor_into_1d_equal_chunks(input_tensor_grad)
    logger.info(f"从 {mpu.get_pipeline_model_parallel_rank()} 发送到 {mpu.get_pipeline_model_parallel_prev_rank()},shape:{input_tensor_grad.shape}")
    req = dist.isend(tensor=input_tensor_grad, dst=mpu.get_pipeline_model_parallel_prev_rank())
    logger.info(f"从 {mpu.get_pipeline_model_parallel_rank()} 发送到 {mpu.get_pipeline_model_parallel_prev_rank()},等待中............")
    # req.wait()  # wait the req finished
    logger.info(f"数据已发送到 {mpu.get_pipeline_model_parallel_prev_rank()}")


def send_forward_recv_backward(origin_shape,tensor_chunk_shape,output_tensor, fp16=False):
    """Batched send and recv with next rank in pipeline."""
    if mpu.is_pipeline_last_stage():
        tensor_recv_next = None
    else:
        if fp16:
            tensor_recv_next = torch.empty(tensor_chunk_shape,
                                        requires_grad=True,
                                        device=torch.cuda.current_device(),
                                        dtype=torch.float16)
        else:
            tensor_recv_next = torch.empty(tensor_chunk_shape,
                                        requires_grad=True,
                                        device=torch.cuda.current_device(),
                                        dtype=torch.float32) 
        # send output
        tensor_send_next = split_tensor_into_1d_equal_chunks(output_tensor)
        ops = []

        if tensor_send_next is not None:
            send_next_op = torch.distributed.P2POp(
                torch.distributed.isend, tensor_send_next,
                mpu.get_pipeline_model_parallel_next_rank())
            ops.append(send_next_op)
        if tensor_recv_next is not None:
            recv_next_op = torch.distributed.P2POp(
                torch.distributed.irecv, tensor_recv_next,
                mpu.get_pipeline_model_parallel_next_rank())
            ops.append(recv_next_op)
        if len(ops) > 0:
            reqs = torch.distributed.batch_isend_irecv(ops)
            for req in reqs:
                req.wait()
        tensor_recv_next = gather_split_1d_tensor(
                tensor_recv_next).view(origin_shape).requires_grad_()
    return tensor_recv_next


def send_backward_recv_forward(origin_shape,tensor_chunk_shape,input_tensor_grad,fp16):
    """Batched send and recv with previous rank in pipeline."""
    if mpu.is_pipeline_first_stage():
        tensor_recv_prev = None
    else:
        tensor_recv_prev = None
        if fp16:
            tensor_recv_prev = torch.empty(tensor_chunk_shape,
                                        requires_grad=True,
                                        device=torch.cuda.current_device(),
                                        dtype=torch.float16)
        else:
            tensor_recv_prev = torch.empty(tensor_chunk_shape,
                                       requires_grad=True,
                                       device=torch.cuda.current_device(),
                                       dtype=torch.float32)
        tensor_send_prev = split_tensor_into_1d_equal_chunks(input_tensor_grad)
        ops = []
        if tensor_recv_prev is not None:
            recv_prev_op = torch.distributed.P2POp(
                torch.distributed.irecv, tensor_recv_prev,
                mpu.get_pipeline_model_parallel_prev_rank())
            ops.append(recv_prev_op)
        if tensor_send_prev is not None:
            send_prev_op = torch.distributed.P2POp(
                torch.distributed.isend, tensor_send_prev,
                mpu.get_pipeline_model_parallel_prev_rank())
            ops.append(send_prev_op)
        if len(ops) > 0:
            reqs = torch.distributed.batch_isend_irecv(ops)
            for req in reqs:
                req.wait()
        torch.cuda.synchronize()

        tensor_recv_prev = gather_split_1d_tensor(
            tensor_recv_prev).view(origin_shape).requires_grad_()
    return tensor_recv_prev
    '''
