# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0

# DeepSpeed Team
import torch
from torch import nn
from transformers import OPTForSequenceClassification
from dschat.utils.model.imdb_reward_model import OPTTokenLevelClassifier


## Note that the following code is modified from
## https://github.com/CarperAI/trlx/blob/main/examples/summarize_rlhf/reward_model/reward_model.py
class RewardModel(nn.Module):

    def __init__(self,
                 base_model,
                 tokenizer,
                 num_padding_at_beginning=0,
                 compute_fp32_loss=False):
        super().__init__()
        self.config = base_model.config
        self.num_padding_at_beginning = num_padding_at_beginning
        if hasattr(self.config, "word_embed_proj_dim"):
            # `OPT` models use word_embed_proj_dim as final output
            # https://github.com/huggingface/transformers/blob/main/src/transformers/models/opt/modeling_opt.py#L497
            self.v_head = nn.Linear(self.config.word_embed_proj_dim,
                                    1,
                                    bias=False)
        else:
            # `gpt-neo(x)` models use `hidden_size` attribute names instead of `n_embd``
            self.config.n_embd = self.config.hidden_size if hasattr(
                self.config, "hidden_size") else self.config.n_embd
            self.v_head = nn.Linear(self.config.n_embd, 1, bias=False)
        self.rwtransformer = base_model
        if "OPTForSequenceClassification" in self.config.architectures:
            print("||||||load local rewardmodel||||||||")
            # self.rwtransformer = OPTForSequenceClassification.from_pretrained(
            #     r"/root/autodl-tmp/DeepSpeedExamples/applications/fcrlhf/DeepSpeed-Chat/tests/opt_imdb_model")
            # self.score_layer = self.rwtransformer.score
            self.rwtransformer = OPTTokenLevelClassifier(
                opt_model_name="/root/autodl-tmp/fcrlhf/DeepSpeed-Chat/models/opt-350m").to(
                "cuda")
            save_path = "/root/autodl-tmp/fcrlhf/DeepSpeed-Chat/tests/output"
            print("tokenizer.pad_token_id", tokenizer.pad_token_id)
            # 加载参数
            self.rwtransformer.load_state_dict(torch.load(f"{save_path}/pytorch_model.bin"))
            self.score_layer = self.rwtransformer.token_classifier
        else:
            self.score_layer = None
        self.PAD_ID = tokenizer.pad_token_id
        self.compute_fp32_loss = compute_fp32_loss
        self.eos_token_id = tokenizer.eos_token_id

    def gradient_checkpointing_enable(self):
        self.rwtransformer.gradient_checkpointing_enable()

    def gradient_checkpointing_disable(self):
        self.rwtransformer.gradient_checkpointing_disable()

    def forward(self,
                input_ids=None,
                past_key_values=None,
                attention_mask=None,
                position_ids=None,
                head_mask=None,
                inputs_embeds=None,
                use_cache=False):

        loss = None

        if self.config.model_type == "llama":
            kwargs = dict()
        else:
            kwargs = dict(head_mask=head_mask)

        transformer_outputs = self.rwtransformer(
            input_ids,
            past_key_values=past_key_values,
            attention_mask=attention_mask,
            inputs_embeds=inputs_embeds,
            use_cache=use_cache,
            **kwargs)

        hidden_states = transformer_outputs[0]
        rewards = self.v_head(hidden_states).squeeze(-1)
        chosen_mean_scores = []
        rejected_mean_scores = []

        # Split the inputs and rewards into two parts, chosen and rejected
        assert len(input_ids.shape) == 2
        bs = input_ids.shape[0] // 2
        seq_len = input_ids.shape[1]

        chosen_ids = input_ids[:bs]  # bs x seq x 1
        rejected_ids = input_ids[bs:]
        chosen_rewards = rewards[:bs]
        rejected_rewards = rewards[bs:]

        # Compute pairwise loss. Only backprop on the different tokens before padding
        loss = 0.
        for i in range(bs):
            chosen_id = chosen_ids[i]
            rejected_id = rejected_ids[i]
            chosen_reward = chosen_rewards[i]
            rejected_reward = rejected_rewards[i]

            c_inds = (chosen_id == self.PAD_ID).nonzero()
            c_ind = c_inds[self.num_padding_at_beginning].item() if len(
                c_inds
            ) > self.num_padding_at_beginning else seq_len  # OPT model pads the first token, so we need to use the second padding token as the end of the sequence
            check_divergence = (chosen_id != rejected_id).nonzero()

            if len(check_divergence) == 0:
                end_ind = rejected_reward.size(-1)
                divergence_ind = end_ind - 1
                r_ind = c_ind
            else:
                # Check if there is any padding otherwise take length of sequence
                r_inds = (rejected_id == self.PAD_ID).nonzero()
                r_ind = r_inds[self.num_padding_at_beginning].item(
                ) if len(r_inds) > self.num_padding_at_beginning else seq_len
                end_ind = max(c_ind, r_ind)
                divergence_ind = check_divergence[0]
            assert divergence_ind > 0
            c_truncated_reward = chosen_reward[divergence_ind:end_ind]
            r_truncated_reward = rejected_reward[divergence_ind:end_ind]
            chosen_mean_scores.append(
                chosen_reward[c_ind - 1])  # use the end score for reference
            rejected_mean_scores.append(rejected_reward[r_ind - 1])

            if self.compute_fp32_loss:
                c_truncated_reward = c_truncated_reward.float()
                r_truncated_reward = r_truncated_reward.float()
            loss += -torch.nn.functional.logsigmoid(c_truncated_reward -
                                                    r_truncated_reward).mean()

        loss = loss / bs
        chosen_mean_scores = torch.stack(chosen_mean_scores)
        rejected_mean_scores = torch.stack(rejected_mean_scores)
        return {
            "loss": loss,
            "chosen_mean_scores": chosen_mean_scores,
            "rejected_mean_scores": rejected_mean_scores,
        }

    def forward_value(self,
                      input_ids=None,
                      attention_mask=None,
                      past_key_values=None,
                      position_ids=None,
                      head_mask=None,
                      inputs_embeds=None,
                      return_value_only=False,
                      prompt_length=0,
                      use_cache=False):
        # if 1:
        #     return self.forward_value_tdrd(
        #               input_ids,
        #               attention_mask,
        #               past_key_values,
        #               position_ids,
        #               head_mask,
        #               inputs_embeds,
        #               return_value_only,
        #               prompt_length,
        #               use_cache)
        if "OPTForSequenceClassification" in self.config.architectures:
            # return self.forward_value2(input_ids,
            #           attention_mask,
            #           past_key_values,
            #           position_ids,
            #           head_mask,
            #           inputs_embeds,
            #           return_value_only,
            #           prompt_length,
            #           use_cache=False)
            return self.rwtransformer.forward_value(input_ids,
                                                    attention_mask,
                                                    past_key_values,
                                                    position_ids,
                                                    head_mask,
                                                    inputs_embeds,
                                                    return_value_only,
                                                    prompt_length,
                                                    use_cache=False)
        if self.config.model_type == "llama":
            kwargs = dict()
        else:
            kwargs = dict(head_mask=head_mask)

        transformer_outputs = self.rwtransformer(
            input_ids,
            past_key_values=past_key_values,
            attention_mask=attention_mask,
            inputs_embeds=inputs_embeds,
            use_cache=use_cache,
            output_attentions=True,
            **kwargs)
        hidden_states = transformer_outputs[0]
        values = self.v_head(hidden_states).squeeze(-1)
        # print("values shape", values.shape) [16 : 512]
        attentions = transformer_outputs.attentions
        if return_value_only:
            return values
        else:
            # [0 0 0 0 prompt, answer, 0 0 0 0 ] for step 3, we have padding at the beginning
            # [prompt, answer, 0, 0, 0, 0] this is normal
            assert prompt_length > 1, "prompt_length must be greater than 1 to help select the end score"
            bs = values.size(0)
            seq_len = input_ids.shape[1]
            chosen_end_scores = [
            ]  # we use this name for consistency with the original forward function
            ends_pos = []
            for i in range(bs):
                input_id = input_ids[i]
                value = values[i]
                c_inds = (input_id[prompt_length:] == self.PAD_ID).nonzero()
                # here we only use the answer part of the sequence so we do not need to care about the padding at the beginning
                c_ind = c_inds[0].item() + prompt_length if len(
                    c_inds) > 0 else seq_len
                chosen_end_scores.append(value[c_ind - 1])
                ends_pos.append(c_ind)
            return {
                "values": values,
                "chosen_end_scores": torch.stack(chosen_end_scores),
                "ends_pos": ends_pos,
                "attentions": attentions,
            }

    def forward_value2(self,
                       input_ids=None,
                       attention_mask=None,
                       past_key_values=None,
                       position_ids=None,
                       head_mask=None,
                       inputs_embeds=None,
                       return_value_only=False,
                       prompt_length=0,
                       use_cache=False):
        print("IMDBdataset")
        if self.config.model_type == "llama":
            kwargs = dict()
        else:
            kwargs = dict(head_mask=head_mask)
        transformer_outputs = self.rwtransformer.model(
            input_ids,
            past_key_values=past_key_values,
            attention_mask=attention_mask,
            inputs_embeds=inputs_embeds,
            use_cache=use_cache,
            output_attentions=True,
            **kwargs)
        hidden_states = transformer_outputs.last_hidden_state
        outputs = self.score_layer(hidden_states)
        values = outputs[:, :, 1] - outputs[:, :, 0]
        # values = outputs[:,:, 1]
        attentions = transformer_outputs.attentions
        if return_value_only:
            return values
        else:
            # [0 0 0 0 prompt, answer, 0 0 0 0 ] for step 3, we have padding at the beginning
            # [prompt, answer, 0, 0, 0, 0] this is normal
            assert prompt_length > 1, "prompt_length must be greater than 1 to help select the end score"
            bs = values.size(0)
            seq_len = input_ids.shape[1]
            chosen_end_scores = [
            ]  # we use this name for consistency with the original forward function
            ends_pos = []
            for i in range(bs):
                input_id = input_ids[i]
                value = values[i]
                # print("value", value)
                c_inds = (input_id[prompt_length:] == self.PAD_ID).nonzero()
                # here we only use the answer part of the sequence so we do not need to care about the padding at the beginning
                c_ind = c_inds[0].item() + prompt_length if len(
                    c_inds) > 0 else seq_len
                chosen_end_scores.append(value[c_ind - 1])
                ends_pos.append(c_ind)
            return {
                "values": values,
                "chosen_end_scores": torch.stack(chosen_end_scores),
                "ends_pos": ends_pos,
                "attentions": attentions,
            }

    def forward_value_tdrd(self,
                      input_ids=None,
                      attention_mask=None,
                      past_key_values=None,
                      position_ids=None,
                      head_mask=None,
                      inputs_embeds=None,
                      return_value_only=False,
                      prompt_length=0,
                      use_cache=False):
        if self.config.model_type == "llama":
            kwargs = dict()
        else:
            kwargs = dict(head_mask=head_mask)

        logits_orig = []
        logits_eos = []
        past = None
        print("eos_token_id", self.eos_token_id)
        print("pad_token_id", self.PAD_ID)
        eos_id = self.eos_token_id
        transformer_outputs = self.rwtransformer(
            input_ids,
            past_key_values=past_key_values,
            attention_mask=attention_mask,
            inputs_embeds=inputs_embeds,
            use_cache=use_cache,
            output_attentions=True,
            output_hidden_states=True,
            **kwargs)
        hidden_states = transformer_outputs[0]
        print("hidden_states shape", hidden_states.shape)
        values = self.v_head(hidden_states).squeeze(-1)
        attentions = transformer_outputs.attentions
        #new 2025/4/18
        B, L = input_ids.shape
        device = input_ids.device

        # 只要最后一层 hidden_state，不要 attention，也别缓存 past_key_values
        self.rwtransformer.config.output_hidden_states = True
        self.rwtransformer.config.output_attentions = False
        self.rwtransformer.config.use_cache = False

        # 如果你还没把模型转 fp16，可以：
        # model.half()
        # 或是在前向时加 autocast
        # from torch.cuda.amp import autocast

        # 找出所有“有效”位置的线性索引
        flat_mask = attention_mask.view(-1).bool()  # (B*L,)
        idxs = flat_mask.nonzero(as_tuple=False).squeeze(1)  # N = sum(mask)

        batch_idx = idxs // L  # (N,)
        pos_idx = idxs % L  # (N,)

        # 3) 随便跑一小批，拿到 hidden_size 和 dtype
        with torch.no_grad():
            tmp_out = self.rwtransformer(
                input_ids=input_ids[:1],
                attention_mask=attention_mask[:1],
            )
        H = hidden_states.size(-1)  # 隐藏维度
        DT = hidden_states.dtype  # float16 或 float32

        # 4) 准备输出张量，用同样的 dtype
        repl_hidden = torch.zeros(B, L, H, device=device, dtype=DT)

        # 调整这个数到你的显存极限，比如 64、128、256
        chunk_size = 64

        for start in range(0, idxs.size(0), chunk_size):
            end = start + chunk_size
            bi = batch_idx[start:end]  # (C,)
            pi = pos_idx[start:end]  # (C,)
            C = bi.size(0)

            # 直接从原 inputs 里挑对应的那些行，避免一次 expand B*L
            sel_ids = input_ids[bi].clone()  # (C, L)
            sel_attn = attention_mask[bi].clone()  # (C, L)

            # 在每行的 pi 上做替换 & 屏蔽后续
            sel_ids[torch.arange(C, device=device), pi] = eos_id
            col_idx = torch.arange(L, device=device).unsqueeze(0).expand(C, L)
            keep = col_idx <= pi.unsqueeze(1)
            sel_attn = sel_attn * keep

            with torch.no_grad():
                # 如果没转 fp16，用下面这两行包裹；如果模型已是 fp16，可省略 autocast
                # with autocast():
                out = self.rwtransformer(input_ids=sel_ids, attention_mask=sel_attn)
            new_hs = out[0]  # (C, L, H)

            # 抽出每条变体在 pi 处的向量
            new_vecs = new_hs[torch.arange(C, device=device), pi]  # (C, H)

            # 写回结果张量
            repl_hidden[bi, pi] = new_vecs

            # 及时清缓存，缓解碎片化
            torch.cuda.empty_cache()
        print("repl_hidden shape", repl_hidden.shape)
        #end


        if return_value_only:
            return values
        else:
            # [0 0 0 0 prompt, answer, 0 0 0 0 ] for step 3, we have padding at the beginning
            # [prompt, answer, 0, 0, 0, 0] this is normal
            assert prompt_length > 1, "prompt_length must be greater than 1 to help select the end score"
            bs = values.size(0)
            seq_len = input_ids.shape[1]
            chosen_end_scores = [
            ]  # we use this name for consistency with the original forward function
            ends_pos = []
            for i in range(bs):
                input_id = input_ids[i]
                value = values[i]
                c_inds = (input_id[prompt_length:] == self.PAD_ID).nonzero()
                # here we only use the answer part of the sequence so we do not need to care about the padding at the beginning
                c_ind = c_inds[0].item() + prompt_length if len(
                    c_inds) > 0 else seq_len
                chosen_end_scores.append(value[c_ind - 1])
                ends_pos.append(c_ind)
            return {
                "values": values,
                "chosen_end_scores": torch.stack(chosen_end_scores),
                "ends_pos": ends_pos,
                "attentions": attentions,
            }
