from . import p2p_communication
from ..megatron import mpu
from torch.nn import CrossEntropyLoss



'''
Functionality: Executes the 1f1b pipeline
Author: Chuhongjie
Date: 2024-06-13
'''

# TODO: device ：给sgp用的？？？？？？？？？？?
def forward_step(forward_step_func, data_iterator, model, input_tensor,device):
    """Forward step for passed-in model.

    If first stage, input tensor is obtained from data_iterator, otherwise
    passed-in input_tensor is used.

    Returns output tensor."""
    # if not the first stage,exec fwd,or get the input from the dataset
    if input_tensor is not None:
        input_tensor = input_tensor.to(device)
        output_tensor = model(inputs_embeds = input_tensor)
        # output_tensor= forward_step_func(input_tensor, model)
    else: # TODO LYH:::如果是stage 0，要从数据集里面获取输入,拿到tokens。回忆：在加载model的时候第一个stage多加了emedding的处理噢
        input_tensor = next(data_iterator)['input_ids'].to(device)
        output_tensor = model(input_ids = input_tensor)
        # output_tensor= forward_step_func(input_tensor, model)
    loss = None
    # if the last stage,need to get the loss
    if mpu.is_pipeline_last_stage():
        output_tensor.retain_grad()
        loss = get_loss(output_tensor,next(data_iterator)['label'])
    output_tensor.retain_grad()
    # print(f'output_tensor.grad:{output_tensor.grad}')
    
    return output_tensor,loss



# TODO 为什么会需要input_tensor\output_tensor来计算
def backward_step(input_tensor, output_tensor, output_tensor_grad,loss = None):
    """Backward step through passed-in output tensor.

    If last stage, output_tensor_grad is None, otherwise gradient of loss
    with respect to stage's output tensor.

    Returns gradient of loss with respect to input tensor (None if first
    stage)."""
    
    if input_tensor is not None:
        input_tensor.retain_grad()
    # Backward pass.
    # if the last stage,no output_tensor_grad,need to get output_tensor_grad from loss
    #TODO LYH:grad的生成，loss的生成---理论缺。
    if output_tensor_grad is None:
        output_tensor.retain_grad()
        loss.backward()         # LYH::::::最后一个stage没有传入给自己的output_tensor_grad，通过从损失函数开始计算梯度
        # print(f'output_tensor.grad:{output_tensor.grad}')
        output_tensor_grad = output_tensor.grad.clone()
    # if not the last stage,exec the bwd,need the output_tensor_grad
    else:
        output_tensor.backward(output_tensor_grad)
    input_tensor_grad = None
    
    if input_tensor is not None:
        input_tensor_grad = input_tensor.grad
    return input_tensor_grad,output_tensor_grad

def forward_step_func(input_tensor,model):
    return model(input_tensor)

def get_loss(lm_logits,labels):
    # get the loss,the code is from transformers xxLMHeadModel
    labels = labels.to(lm_logits.device)
    shift_logits = lm_logits[..., :-1, :].contiguous()
    shift_labels = labels[..., 1:].contiguous()
    loss_fct = CrossEntropyLoss()
    loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
    return loss


# TODO: gpus_list 传入的理由是什么呢？？？？？？？
def forward_backward_pipelining_without_interleaving(forward_step_func, data_iterator,
                                                     model,
                                                     forward_only,micro_batch,gpus_list,recv_tensor_shape):
    """Run non-interleaved 1F1B schedule, with communication between pipeline
    stages.

    Returns dictionary with losses if the last stage, empty dict otherwise."""

    micro_batch = micro_batch
    num_warmup_microbatches = \
        (mpu.get_pipeline_model_parallel_world_size() -
         mpu.get_pipeline_model_parallel_rank() - 1)
    num_warmup_microbatches = min(
        num_warmup_microbatches,
        micro_batch)
    num_microbatches_remaining = \
        micro_batch - num_warmup_microbatches
    # TODO: 为什么需要input_tensors和output_tensors？？
    input_tensors = []
    output_tensors = []
    loss_list = []  # TODO
    recv_fwd_tensor_shape = recv_tensor_shape
    recv_bwd_tensor_shape = recv_tensor_shape
    # exec the warm up ,only fwd
    for i in range(num_warmup_microbatches):
        
        # if not the first stage ,recv the tensor from last stage
        input_tensor = p2p_communication.recv_forward(recv_fwd_tensor_shape,gpus_list[0])
        # exec the fwd
        # input tensor=接收到的数据可能为null，这时候forward_step会从数据集里面读出一个作为input_tensor
        output_tensor,loss = forward_step(forward_step_func, data_iterator, model,
                                     input_tensor,gpus_list[0])
        
        # send the output to the next stage
        p2p_communication.send_forward(output_tensor)
        # save the input and the output,and then exec the bwd
        input_tensors.append(input_tensor)
        output_tensors.append(output_tensor)
    # if not thr first stahe ,need to recv the input from last stage,and then exec the 1f1b stage
    if num_microbatches_remaining > 0:
        input_tensor = p2p_communication.recv_forward(recv_fwd_tensor_shape,gpus_list[0])
    # exec the stable 1f1b stage,the order is :
    # 1 exec fwd,send fwd
    # 2 recv bwd,exec bwd,
    # 3 send bwd,recv fwd
    

    for i in range(num_microbatches_remaining):
        last_iteration = (i == (num_microbatches_remaining - 1))
        output_tensor,loss= forward_step(forward_step_func, data_iterator, model, input_tensor,gpus_list[0])
        if loss is not None:
            loss_list.append(loss.item())
        p2p_communication.send_forward(output_tensor)
        output_tensor_grad = p2p_communication.recv_backward(recv_bwd_tensor_shape,gpus_list[0])

        input_tensors.append(input_tensor)
        output_tensors.append(output_tensor)
        # print(f'output_tensor.grad:{output_tensor.grad}')
        
        input_tensor, output_tensor = input_tensors.pop(0), output_tensors.pop(0)
        input_tensor_grad,output_tensor_grad= \
            backward_step(input_tensor, output_tensor,
                            output_tensor_grad,loss)
        if last_iteration:
            input_tensor = None
            p2p_communication.send_backward(input_tensor_grad)
        else:
            input_tensor = p2p_communication.recv_forward(recv_fwd_tensor_shape,gpus_list[0])
            p2p_communication.send_backward(input_tensor_grad)


    # exec the cool done stage,only bwd.
    if not forward_only:
        for i in range(num_warmup_microbatches):
            input_tensor = input_tensors.pop(0)
            output_tensor = output_tensors.pop(0)
            output_tensor_grad = p2p_communication.recv_backward(recv_bwd_tensor_shape,gpus_list[0])
            input_tensor_grad,output_tensor_grad= \
                backward_step(input_tensor, output_tensor,
                              output_tensor_grad)
            p2p_communication.send_backward(input_tensor_grad)
    loss = None
    if len(loss_list) > 0:
        loss = sum(loss_list) / len(loss_list)
    return loss
