# coding=utf-8
# copyright (c) 2024 tencent inc. all rights reserved.
# xiaotaoliu@tencent.com, guanyouhe@tencent.com, nrwu@tencent.com

from abc import ABC, abstractclassmethod
from copy import deepcopy
from typing import Literal, Optional, Tuple, Union, Dict, Callable, List
from enum import Enum
from datetime import datetime
import logging
import random
import math
import sys

from einops import rearrange
from torch import Tensor
from unittest.mock import patch
import torch
import torch.nn.functional as F

from megatron.core import mpu, parallel_state, tensor_parallel, InferenceParams
from megatron.core.models.gpt.gpt_model import GPTModel
from megatron.core.packed_seq_params import PackedSeqParams
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
from megatron.core.tensor_parallel.layers import RowParallelLinear
from megatron.core.transformer.enums import AttnMaskType, ModelType
from megatron.core.transformer.module import MegatronModule
from megatron.core.transformer.spec_utils import ModuleSpec
from megatron.core.transformer.transformer_block import TransformerBlock
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.utils import divide
from megatron.core.utils import make_tp_sharded_tensor_for_checkpoint

from gpatch.core.aligner_helper import get_iterator_k_split
from gpatch.core.aligner_helper import get_ltor_masks_and_position_ids
from gpatch.core.aligner_interface import SupervisedInterface, Inferrable, ResetArgsMixin
from gpatch.core.device_type import is_wxacc1
from gpatch.core.model_parallel_config import ForcedConfig


class StateDictState(Enum):
    CRITIC = 0
    REWARD = 1


class RewardModelHead(RowParallelLinear):
    """
    Reward model head to convert from output_size to scalar reward.
    """

    def __init__(
        self,
        input_size,
        output_size,
        *,
        config: TransformerConfig,
        init_method: Callable,
        bias: bool,
        input_is_parallel: bool,
        skip_bias_add: bool,
        stride: int = 1,
        # RM args
        output_sequence: bool = False,
        output_scalar: bool = True,
        use_avg_pool: bool = False,
        dtype: torch.dtype = torch.float32,
        merge_attributes: bool = False,
        attributes_weights: Optional[List[Union[float, int]]] = None,
    ):
        assert output_size > 0, "Output size of reward model head should be greater than zero"
        assert not input_is_parallel  # Don't know why Nv do so, remain unanswered.
        self.yet_forced_config = config
        forced_config = ForcedConfig(config, params_dtype=dtype, sequence_parallel=False)

        super().__init__(
            input_size,
            output_size,
            config=forced_config,
            init_method=init_method,
            bias=bias,
            input_is_parallel=input_is_parallel,
            skip_bias_add=skip_bias_add,
            stride=stride,
        )

        # 这个逻辑比较特殊，lucky 将其分成了多种情况：
        #
        # 1. 没有都 false 的情况；
        # 2. critic 目前只有 output sequence（即 per token reward）；
        # 3. reward 默认情况，按照 ppo paper，是 seq level reward，也就是 output scalar。
        # 4. lucky 希望支持两者都为 true。
        #
        # 可能要背一下这个绕口令：
        #  output sequence 输出 sequence，表示 token     level reward，
        #  output scalar   输出 scalar  ，表示 sequuence level reward。
        self.output_sequence = output_sequence
        self.output_scalar = output_scalar

        self.use_avg_pool = use_avg_pool
        self.dtype = dtype
        self.merge_attributes = merge_attributes

        if attributes_weights is None:
            self.attributes_weights = torch.full((self.output_size, ), 1.0 / self.output_size)
        else:
            self.attributes_weights = torch.tensor(attributes_weights, dtype=torch.float)

        assert self.attributes_weights.size(0) == self.output_size

    @torch.no_grad()
    def debug_values(self, hidden_states, lengths):
        seqlen = hidden_states.size(0)
        mask = torch.arange(seqlen, device=lengths.device).unsqueeze(-1) < lengths

        per_token_rewards = super().forward(hidden_states.to(
            self.weight.dtype))[0]  # [S x B x self.output_size]
        per_token_rewards = (per_token_rewards.squeeze(-1) * mask.float()).permute(1, 0)

        if torch.distributed.get_rank() == 7:
            if random.randint(0, 1000) < 50:
                print(
                    f'per_token_rewards {torch.min(per_token_rewards)} {torch.max(per_token_rewards)}'
                )

    def _compute_attributes(self, hidden_states, lengths, prompt_lens):
        """
        for critic, return a tensor with shape [B x S x self.output_size]
        for reward, return a tensor with shape [B x self.output_size]
        """

        # we sometimes want to run our RM head in FP32, this allows it
        autocast_context = torch.autocast(device_type=hidden_states.device.type, dtype=self.dtype)

        # hidden_size is S x B x D
        if not self.output_sequence:
            seq_out = None
        else:
            with autocast_context:
                output = super().forward(hidden_states.to(
                    self.weight.dtype))[0]  # [S x B x self.output_size]

            # Making it contiguous is required at least by `torch.distributed.gather()`.
            seq_out = output.permute(1, 0, 2).contiguous()  # [B x S x self.output_size]

        if not self.output_scalar:
            scal_out = None
        else:
            if self.use_avg_pool:
                # lengths is shape B and arange is shape S, broadcast it to S x B
                # S x 1 op with B -> mask for S x B
                if prompt_lens is None:
                    mask = torch.arange(hidden_states.size(0),
                                        device=lengths.device).unsqueeze(-1) < lengths
                else:
                    mask = torch.arange(hidden_states.size(0), device=lengths.device).unsqueeze(-1)
                    mask = torch.logical_and(mask < lengths, mask >= prompt_lens)

                # S x B x D * S x B x 1
                last_state = (hidden_states * mask.unsqueeze(-1)).sum(0)

                # divide by mean post hoc
                # sum implicitly casts back to fp32, but the autocast will handle it below if needed
                last_state = last_state / lengths.unsqueeze(-1)
            else:
                last_state = hidden_states[
                    lengths - 1,
                    torch.arange(lengths.shape[0], device=hidden_states.device), :]

            # B x D -> 1 x B x D b/c RowParallel wants S x B x D
            last_state = last_state.unsqueeze(0)

            # squeeze out the S term on dim 0, we always add bias
            with autocast_context:
                scal_out = super().forward(last_state.to(self.weight.dtype))[0].squeeze(0)

        return seq_out, scal_out

    def forward(self, hidden_states, lengths, prompt_lens, return_attributes=False):
        assert self.output_sequence or self.output_scalar

        seq_out, scal_out = self._compute_attributes(
            hidden_states, lengths,
            prompt_lens)  # [B x S x self.output_size] or [B x self.output_size]

        if return_attributes:
            if seq_out is not None:
                seq_out = seq_out.float()
            if scal_out is not None:
                scal_out = scal_out.float()
            return seq_out, scal_out

        attributes = seq_out if seq_out is not None else scal_out
        self.attributes_weights = self.attributes_weights.to(attributes.device)
        assert self.attributes_weights.dtype == torch.float32

        if seq_out is None:
            scores_seq = None
        else:
            # a sequence of multi-attribute rewards, used for critic model, returning tensor with shape [B x S]
            attributes = seq_out
            assert attributes.dim(
            ) == 3, "for critic, attributes should have shape [B x S x self.output_size]"
            scores_seq = (attributes @ self.attributes_weights.to(attributes.dtype)).float()

        if scal_out is None:
            scores_scal = None
        else:
            attributes = scal_out
            assert attributes.dim(
            ) == 2, "for reward, attributes should have shape [B x self.output_size]"
            if not self.merge_attributes:
                # do not merge attributes during regression rm training ([B, output_size])
                scores_scal = attributes.float()
            else:
                # during ppo, returning tensor with shape [B, 1]
                scores_scal = (attributes @ self.attributes_weights.to(
                    attributes.dtype)).unsqueeze(-1).float()

        return scores_seq, scores_scal


class MultiLayerRMHead(MegatronModule):
    # 一个快速实现，只对 qwen 2.5 rm 的 2 rm head layer 起作用，而且不支持 multi rewards。

    def __init__(
        self,
        input_size,
        output_size,
        *,
        config: TransformerConfig,
        init_method: Callable,
        stride: int = 1,
        # RM args
        output_sequence: bool = False,
        output_scalar: bool = True,
        use_avg_pool: bool = False,
        dtype: torch.dtype = torch.float32,
    ):
        super().__init__(config=config)
        self.input_size = input_size
        self.output_size = output_size

        self.stride = stride
        self.output_sequence = output_sequence
        self.output_scalar = output_scalar
        self.use_avg_pool = use_avg_pool
        self.dtype = dtype
        self.config = config
        self.init_method = init_method

        self.score = torch.nn.Sequential(torch.nn.Linear(self.input_size, self.config.hidden_size),
                                         torch.nn.ReLU(),
                                         torch.nn.Linear(self.config.hidden_size, self.output_size))
        for module in self.score:
            if isinstance(module, torch.nn.Linear):
                self.init_method(module.weight)

    def forward(self, hidden_states, lengths, prompt_lens, return_attributes=False):
        assert not return_attributes
        # we sometimes want to run our RM head in FP32, this allows it
        autocast_context = torch.autocast(device_type=hidden_states.device.type, dtype=self.dtype)

        if not self.output_sequence:
            seq_out = None
        else:
            with autocast_context:
                output = self.score(hidden_states.to(self.score[0].weight.dtype))

            # Making it contiguous is required at least by `torch.distributed.gather()`.
            seq_out = output.permute(1, 0, 2).contiguous()  # [B x S x self.output_size]
            seq_out = seq_out.squeeze(-1).float()
        if not self.output_scalar:
            scal_out = None
        else:
            assert not self.use_avg_pool, f"qwen-rm-72B multi-layer rm does not support avg_pool"
            # 多层rm 暂时屏蔽掉 use_avg_pool
            if self.use_avg_pool:
                # lengths is shape B and arange is shape S, broadcast it to S x B
                # S x 1 op with B -> mask for S x B
                if prompt_lens is None:
                    mask = torch.arange(hidden_states.size(0),
                                        device=lengths.device).unsqueeze(-1) < lengths
                else:
                    mask = torch.arange(hidden_states.size(0), device=lengths.device).unsqueeze(-1)
                    mask = torch.logical_and(mask < lengths, mask >= prompt_lens)

                # S x B x D * S x B x 1
                last_state = (hidden_states * mask.unsqueeze(-1)).sum(0)

                # divide by mean post hoc
                # sum implicitly casts back to fp32, but the autocast will handle it below if needed
                last_state = last_state / lengths.unsqueeze(-1)
            else:
                last_state = hidden_states[
                    lengths - 1,
                    torch.arange(lengths.shape[0], device=hidden_states.device), :]
            # B x D -> 1 x B x D b/c RowParallel wants S x B x D
            last_state = last_state.unsqueeze(0)

            # squeeze out the S term on dim 0, we always add bias
            with autocast_context:
                scal_out = self.score(last_state.to(self.score[0].weight.dtype)).squeeze(0).float()

        return seq_out, scal_out


# TODO(@xiaotaoliu, @guanyouhe): 这里也改成代理包装器（类似 ppo actor）
class GptRewardModel(GPTModel):

    def __init__(
        self,
        config: TransformerConfig,
        transformer_layer_spec: ModuleSpec,
        vocab_size: int,
        max_sequence_length: int,
        pre_process: bool = True,
        post_process: bool = True,
        fp16_lm_cross_entropy: bool = False,
        parallel_output: bool = True,
        share_embeddings_and_output_weights: bool = False,
        position_embedding_type: Literal['learned_absolute', 'rope', 'none'] = 'learned_absolute',
        rotary_percent: float = 1.0,
        rotary_base: int = 10000,
        rope_scaling: bool = False,
        rope_scaling_factor: float = 8.0,
        scatter_embedding_sequence_parallel: bool = True,
        seq_len_interpolation_factor: Optional[float] = None,
        # RM args
        output_sequence: bool = False,
        output_scalar: bool = True,
        use_avg_pool: bool = False,
        head_dtype: torch.dtype = None,
        num_attributes: int = 1,
        attribute_weights: Optional[List[Union[float, int]]] = None,
        merge_attributes: bool = False,
        mask_prompt: bool = False,
    ):
        GPTModel.__init__(
            self,
            config,
            transformer_layer_spec,
            vocab_size,
            max_sequence_length,
            pre_process=pre_process,
            post_process=post_process,
            fp16_lm_cross_entropy=fp16_lm_cross_entropy,
            parallel_output=parallel_output,
            share_embeddings_and_output_weights=share_embeddings_and_output_weights,
            position_embedding_type=position_embedding_type,
            rotary_percent=rotary_percent,
            rotary_base=rotary_base,
            rope_scaling=rope_scaling,
            rope_scaling_factor=rope_scaling_factor,
            scatter_embedding_sequence_parallel=scatter_embedding_sequence_parallel,
            seq_len_interpolation_factor=seq_len_interpolation_factor,
        )
        self.num_attributes = num_attributes
        self.mask_prompt = mask_prompt

        assert self.config.expert_model_parallel_size >= 1

        if self.post_process and self.config.rm_head_arch == "single_layer":
            self.rm_head = RewardModelHead(
                self.config.hidden_size,
                num_attributes,
                config=config,
                init_method=self.config.init_method,
                bias=False,
                input_is_parallel=False,
                skip_bias_add=False,
                output_sequence=output_sequence,
                output_scalar=output_scalar,
                use_avg_pool=use_avg_pool,
                dtype=config.params_dtype if head_dtype is None else head_dtype,
                merge_attributes=merge_attributes,
                attributes_weights=attribute_weights,
            )
        elif self.post_process and self.config.rm_head_arch == "multi_layers":
            self.rm_head = MultiLayerRMHead(
                self.config.hidden_size,
                num_attributes,
                config=config,
                init_method=self.config.init_method,
                output_sequence=output_sequence,
                output_scalar=output_scalar,
                use_avg_pool=use_avg_pool,
                dtype=config.params_dtype if head_dtype is None else head_dtype,
            )

    def forward(
        self,
        input_ids: Tensor,
        lengths: Tensor,
        position_ids: Tensor,
        attention_mask: Tensor,
        prompt_lens: Tensor = None,
        decoder_input: Tensor = None,
        labels: Tensor = None,
        inference_params=None,
        packed_seq_params: PackedSeqParams = None,
        extra_block_kwargs: dict = None,
        return_attributes: bool = False,
    ):
        assert packed_seq_params == None and extra_block_kwargs == None

        # hack to get the hidden states
        # and for mcore to not call the output layer
        with patch.object(self, "post_process", False):
            hidden_states = super().forward(
                input_ids=input_ids,
                position_ids=position_ids,
                attention_mask=attention_mask,
                decoder_input=decoder_input,
                labels=labels,
                inference_params=inference_params,
            )
        if not self.post_process:
            return hidden_states

        if self.config.sequence_parallel:
            hidden_states = tensor_parallel.gather_from_sequence_parallel_region(
                hidden_states, tensor_parallel_output_grad=False)
        if not self.mask_prompt:
            prompt_lens = None
        else:
            assert prompt_lens is not None

        # train_rm backward compat
        out_seq, out_scalar = self.rm_head(hidden_states,
                                           lengths,
                                           prompt_lens,
                                           return_attributes=return_attributes)

        # 返回多个 tensor 在 MLM trainer 中会 crash，但并不会在 MLM trainer 中被调用。
        rets = ()
        if self.rm_head.output_sequence:
            rets += (out_seq, )
        if self.rm_head.output_scalar:
            rets += (out_scalar, )
        if len(rets) == 1:
            return rets[0]
        else:
            return rets

    def split_output_tensor(self, output_tensor):
        out_golden = None
        if self.config.rm_use_triplet_loss:
            # todo: 三个out
            assert output_tensor.shape[0] % 3 == 0, f"mbs must be divisible by 3"
            rbs = output_tensor.shape[0] // 3
            out_chosen, out_rejected, out_golden = torch.split(output_tensor.float(), rbs, dim=0)
        else:
            out_chosen, out_rejected = torch.split(output_tensor.float(),
                                                   output_tensor.shape[0] // 2,
                                                   dim=0)
        return out_chosen, out_rejected, out_golden

    def focal_loss(self, focal_loss_lambda, focal_loss_gamma, focal_loss_range, ranking_coef,
                   out_chosen, out_rejected):
        # https://hub.baai.ac.cn/view/36639
        # https://yiyibooks.cn/arxiv/2403.17297v1/index.html
        # 不知道为何 paper 里没有 1-lambda，不管了。
        if is_wxacc1():
            # strided_slice_grad这个graph node的参数不对，从结果看是底层不支持某种shape的输入
            out_chosen = out_chosen.squeeze(0)
            out_rejected = out_rejected.squeeze(0)

        assert ranking_coef > 0
        ranking_inverse_one_minus_coef = 1. / (1 - ranking_coef)

        p_ij = torch.nn.functional.sigmoid(out_chosen - out_rejected)
        l_rank = -(
            (1. - ranking_inverse_one_minus_coef * torch.nn.functional.relu(p_ij - ranking_coef))**
            focal_loss_gamma) * torch.log(p_ij)
        l_panalty_c = -(torch.nn.functional.logsigmoid(out_chosen + focal_loss_range) +
                        torch.nn.functional.logsigmoid(focal_loss_range - out_chosen))
        l_panalty_r = -(torch.nn.functional.logsigmoid(out_rejected + focal_loss_range) +
                        torch.nn.functional.logsigmoid(focal_loss_range - out_rejected))
        l_panalty = 0.5 * (l_panalty_c + l_panalty_r)
        loss = (l_rank[0] + focal_loss_lambda * l_panalty[0]).mean()
        return loss

    def rm_triplet_loss(self, focal_loss_lambda, focal_loss_gamma, focal_loss_range, out_chosen,
                        out_rejected, out_golden, acc_chosen):
        triplet_loss = F.triplet_margin_loss(out_golden,
                                             out_chosen,
                                             out_rejected,
                                             margin=self.config.rm_golden_margin)
        ranking_coefs = self.config.rm_focal_loss_ranking_coef
        loss_cr = self.focal_loss(focal_loss_lambda, focal_loss_gamma, focal_loss_range,
                                  ranking_coefs[0], out_chosen, out_rejected)
        loss_gc = self.focal_loss(focal_loss_lambda, focal_loss_gamma, focal_loss_range,
                                  ranking_coefs[1], out_golden, out_chosen)
        loss_gr = self.focal_loss(focal_loss_lambda, focal_loss_gamma, focal_loss_range,
                                  ranking_coefs[2], out_golden, out_rejected)
        # rm_triplet_focal_coef contains [loss_cr_coef, loss_gc_coef, loss_gr_coef]
        triplet_focal_loss_coef = self.config.rm_triplet_focal_coef
        focal_loss = (triplet_focal_loss_coef[0] * loss_cr + \
                      triplet_focal_loss_coef[1] * loss_gc + \
                      triplet_focal_loss_coef[2] * loss_gr)
        loss = self.config.rm_triplet_coef * triplet_loss + (
            1 - self.config.rm_triplet_coef) * focal_loss

        acc_gc_comp = out_golden > out_chosen
        acc_gc = torch.sum(acc_gc_comp) / acc_gc_comp.shape[0]
        acc_gr_comp = out_golden > out_rejected
        acc_gr = torch.sum(acc_gr_comp) / acc_gr_comp.shape[0]

        return loss, acc_chosen, triplet_loss, loss_cr, loss_gc, loss_gr, acc_gc, acc_gr

    # adapted from `MegatronGPTRewardModel`
    def rm_loss(self, output_tensor, use_focal_loss, focal_loss_lambda, focal_loss_gamma,
                focal_loss_range):
        out_chosen, out_rejected, out_golden = self.split_output_tensor(output_tensor)
        if not self.config.rm_use_triplet_loss:
            assert out_golden is None

        comp = out_chosen > out_rejected
        acc_chosen = torch.sum(comp) / comp.shape[0]

        if self.config.rm_use_triplet_loss:
            triplet_loss_output = self.rm_triplet_loss(focal_loss_lambda, focal_loss_gamma,
                                                       focal_loss_range, out_chosen, out_rejected,
                                                       out_golden, acc_chosen)
            return triplet_loss_output
        elif not use_focal_loss:
            loss = -torch.nn.functional.logsigmoid(out_chosen - out_rejected).mean()
        else:
            loss = self.focal_loss(focal_loss_lambda, focal_loss_gamma, focal_loss_range,
                                   self.config.rm_focal_loss_ranking_coef[0], out_chosen,
                                   out_rejected)

        return loss, acc_chosen

    # TODO 抽子类
    # 句子级别（而非整个 sequence 级别）的 RM from yunshengshi，如果这个 naming 不准确请告诉我，我改下。
    # https://git.xxx.com/pretrainx-team/stanford_alpaca/blob/mrc_reward_yunshengshi/src/model/baichuan_7b/modeling_baichuan_process_reward_sent_label_sigmoid_bias.py
    def rm_sentence_loss(self, score, scattered_labels):
        score = score.view(-1, self.num_attributes)
        scattered_labels = scattered_labels.view(-1, self.num_attributes).float()

        fn = torch.nn.BCEWithLogitsLoss(reduction='none')
        losses = fn(score, scattered_labels)
        return losses

    # TODO 抽子类
    def rm_loss_softmax(self, output_tensor):
        out_chosen, out_rejected = self.split_output_tensor(output_tensor)
        comp = out_chosen > out_rejected
        acc_chosen = torch.sum(comp) / comp.shape[0]

        fn = torch.nn.LogSoftmax(dim=1)
        out = torch.concat([out_chosen, out_rejected], dim=1)
        out = fn(out)
        label = torch.zeros([out.shape[0]], device=out.device).long()
        loss = F.nll_loss(out, label)
        return loss, acc_chosen

    # TODO 抽子类
    def rm_qa_dt_loss(self, score, scattered_labels):
        score = score.view(-1)
        scattered_labels = scattered_labels.view(-1).float()
        fn = torch.nn.BCEWithLogitsLoss(reduction='none')

        target_ignore = torch.where(scattered_labels != -1, 1.0, 0.0).float()
        loss = torch.sum(
            fn(score, scattered_labels) * target_ignore) / torch.sum(target_ignore + 1e-5)
        return loss

    # TODO 抽子类
    def rm_loss_mo(self, output_tensor, p_labels, masks):

        out_chosen, out_rejected, _ = self.split_output_tensor(output_tensor)
        p_labels = p_labels[:p_labels.shape[0] // 2]
        masks = masks[:masks.shape[0] // 2]

        def get_pair_wise_loss(out_chosen, out_rejected, p_labels, masks, idx):
            fn = torch.nn.LogSoftmax(dim=1)

            # if idx == 2:
            #     out = torch.stack([out_chosen[:, 0] + out_chosen[:, 1], out_rejected[:, 0] + out_rejected[:, 1]], dim=1)
            # else:
            #     out = torch.stack([out_chosen[:, idx], out_rejected[:, idx]], dim=1)

            out = torch.stack([out_chosen[:, idx], out_rejected[:, idx]], dim=1)

            pred = fn(out)
            label = p_labels[:, idx]
            mask = masks[:, idx]
            loss = F.nll_loss(pred, label, reduction='none')
            loss = torch.sum(loss * mask) / (torch.sum(mask) + 1e-5)
            return loss

        ## qa_use
        idx = 0
        qa_use_loss = get_pair_wise_loss(out_chosen, out_rejected, p_labels, masks, idx)

        ## qa_read
        idx = 1
        qa_read_loss = get_pair_wise_loss(out_chosen, out_rejected, p_labels, masks, idx)

        ##qa_total
        idx = 2
        qa_total_loss = get_pair_wise_loss(out_chosen, out_rejected, p_labels, masks, idx)

        loss = qa_use_loss + qa_read_loss + qa_total_loss
        # loss = qa_use_loss

        idx = 2
        comp = out_chosen[:, idx] > out_rejected[:, idx]
        acc_chosen = torch.sum(comp) / comp.shape[0]

        return loss, acc_chosen

    # TODO 抽子类
    def rm_loss_odin(self, tokens, output_tensor):
        # tokens bz x seq
        tokens_lens = torch.sum(tokens != 0, dim=1)
        lens_chosen, lens_rejected = self.split_output_tensor(tokens_lens)

        out_chosen, out_rejected = self.split_output_tensor(output_tensor)
        ranking_loss = -torch.nn.functional.logsigmoid(out_chosen - out_rejected).mean()

        dp_group = parallel_state.get_data_parallel_group()
        tokens_lens = tokens_lens.to(device=torch.cuda.current_device(), dtype=torch.float32)
        tokens_lens_gather_list = [
            torch.empty_like(tokens_lens) for _ in range(torch.distributed.get_world_size(dp_group))
        ]
        torch.distributed.all_gather(tokens_lens_gather_list, tokens_lens, group=dp_group)
        tokens_lens_gather = torch.concat(tokens_lens_gather_list, dim=0)

        output_tensor = output_tensor.to(device=torch.cuda.current_device(), dtype=torch.float32)
        output_tensor_gather_list = [
            torch.empty_like(output_tensor)
            for _ in range(torch.distributed.get_world_size(dp_group))
        ]
        torch.distributed.all_gather(output_tensor_gather_list, output_tensor, group=dp_group)
        output_tensor_gather = torch.concat(output_tensor_gather_list, dim=0)

        # print(f"lens: \n {str(tokens_lens_gather)} \n scores: \n {str(output_tensor_gather)}")

        length_corr_matrix1 = torch.stack((tokens_lens_gather, output_tensor_gather[:, 0]))
        length_corr1 = torch.corrcoef(length_corr_matrix1.float())[0, 1]
        length_loss1 = 1 - length_corr1  # Encourage correlation
        # Length correlation loss for head 2 (discouraging correlation)
        length_corr_matrix2 = torch.stack((tokens_lens_gather, output_tensor_gather[:, 1]))
        length_corr2 = torch.corrcoef(length_corr_matrix2.float())[0, 1]
        length_loss2 = torch.abs(length_corr2)  # Discourage correlation

        loss = ranking_loss + (length_loss2) / torch.distributed.get_world_size(dp_group)

        ## 需要学会 || broadcast
        # 计算acc
        comp = out_chosen > out_rejected
        acc_chosen = torch.sum(comp) / comp.shape[0]

        return loss, acc_chosen, ranking_loss.detach(), length_loss1.detach(), length_loss2.detach()
