from functools import reduce
import operator
import torch
import torch.distributed as dist


from ..megatron import mpu

'''
Functionality: exec the send and recv tensor at 1f1b
Author: Chuhongjie
Date: 2024-06-13
'''

import logging
import os

# 从环境变量读取日志目录
log_dir = os.environ.get('LOG_DIR', './logs')
os.makedirs(log_dir, exist_ok=True)

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s [%(levelname)s] %(message)s',
    handlers=[
        logging.FileHandler(os.path.join(log_dir, "pipeline_log.log")),
        logging.StreamHandler()
    ]
)

logger = logging.getLogger(__name__)
def send_forward(output_tensor):
    next_rank = mpu.get_pipeline_model_parallel_next_rank()
    if mpu.is_pipeline_last_stage():
        logger.info(f"阶段 {mpu.get_pipeline_model_parallel_rank()} 是最后一个阶段；不进行发送。")
        return None
    logger.info(f"从 {mpu.get_pipeline_model_parallel_rank()} 发送到 {next_rank},shape:{output_tensor.shape},date:{output_tensor[0,1,:5]}")
    req = dist.send(tensor=output_tensor, dst=next_rank)
    logger.info(f"从 {mpu.get_pipeline_model_parallel_rank()} 发送到 {next_rank},等待中............")
    # req.wait()
    logger.info(f"数据已发送到 {next_rank}")


# TODO：接受Tensor的时候，需要初始化一个tensor来接受，这个tensor必须保留梯度，否则反向的时候拿不掉梯度。 接受的tensor会被放在input[]队列中，要在反向传播的时候使用到，需要梯度信息？所以要保留梯度
# TODO: 为什么又要将这个Tensor放到GPU上呢？而且为什么放入的是gpus_list[0],为什么不直接引用当前的local_rank（这不是全局变量吗）  ？？？？？   ---> gpus_list[0]是sgp的
def recv_forward(recv_shape, device):
    prev_rank = mpu.get_pipeline_model_parallel_prev_rank()
    if mpu.is_pipeline_first_stage():
        logger.info(f"阶段 {mpu.get_pipeline_model_parallel_rank()} 是第一个阶段；无数据接收。")
        return None
    logger.info(f"在 {mpu.get_pipeline_model_parallel_rank()} 初始化空tensor shape:{recv_shape}")
    recv_tensor = torch.empty(recv_shape, requires_grad=True).to(device)  # TODO ！！！！ recv_tensor的tensor传到当前进程GPU上。为什么
    # TODO:当你使用 torch.empty() 创建一个新的张量时，它默认是在 CPU 上创建的。即使你的代码运行在 GPU 上，新创建的张量也需要显式地移动到 GPU。
    # 模型、输入数据和中间结果都需要在同一个 GPU 上以避免不必要的数据传输和潜在的错误。
    logger.info(f"在 {mpu.get_pipeline_model_parallel_rank()} 从 {prev_rank} 接收数据")
    req = dist.recv(tensor=recv_tensor, src=prev_rank)
    logger.info(f"数据从 {prev_rank} 接收中..............")
    # req.wait()
    logger.info(f"数据已从 {prev_rank} 接收,shape:{recv_tensor.shape},date:{recv_tensor[0,1,:5]}")
    return recv_tensor


def recv_backward(recv_bwd_tensor_shape,device):
    if mpu.is_pipeline_last_stage():
        logger.info(f"阶段 {mpu.get_pipeline_model_parallel_rank()} 是最后一个阶段；无数据接收。")
        
        return None
    logger.info(f"在 {mpu.get_pipeline_model_parallel_rank()} 初始化空tensor shape:{recv_bwd_tensor_shape}")
    recv_tensor = torch.empty(recv_bwd_tensor_shape,requires_grad=True).to(device)
    logger.info(f"在 {mpu.get_pipeline_model_parallel_rank()} 从 {mpu.get_pipeline_model_parallel_next_rank()} 接收数据")
    req = dist.irecv(tensor=recv_tensor, src=mpu.get_pipeline_model_parallel_next_rank())
    logger.info(f"数据从 {mpu.get_pipeline_model_parallel_next_rank()} 接收中..............")
    req.wait()  # wait the req finished
    logger.info(f"数据已从 {mpu.get_pipeline_model_parallel_next_rank()} 接收,shape:{recv_tensor.shape},date:{recv_tensor[0,1,:5]}")
    recv_tensor.retain_grad()  # 确保梯度被保留
    return recv_tensor

def send_backward(input_tensor_grad):
    if mpu.is_pipeline_first_stage():
        logger.info(f"阶段 {mpu.get_pipeline_model_parallel_rank()} 是第一个阶段；不进行发送。")
        return None
    logger.info(f"从 {mpu.get_pipeline_model_parallel_rank()} 发送到 {mpu.get_pipeline_model_parallel_prev_rank()},shape:{input_tensor_grad.shape},date:{input_tensor_grad[0,1,:5]}")
    req = dist.isend(tensor=input_tensor_grad, dst=mpu.get_pipeline_model_parallel_prev_rank())
    logger.info(f"从 {mpu.get_pipeline_model_parallel_rank()} 发送到 {mpu.get_pipeline_model_parallel_prev_rank()},等待中............")
    req.wait()  # wait the req finished
    logger.info(f"数据已发送到 {mpu.get_pipeline_model_parallel_prev_rank()}")
    
