# encoding: utf-8
# @Time:    :2024/12/22 16:57

import torch
from datasets import load_dataset
from transformers import default_data_collator
from tqdm import tqdm
from transformers import BertModel, BertConfig, BertTokenizerFast, BertTokenizer, AutoTokenizer
from accelerate import Accelerator

from constant import pretrained_model, roberta_pretrained_model, device, train_data_path

# tokenizer = AutoTokenizer.from_pretrained(pretrained_model)
tokenizer = AutoTokenizer.from_pretrained(roberta_pretrained_model)
pad_token_id = tokenizer.pad_token_id  # 151643
eos_token_id = tokenizer.eos_token_id  # 151645

batch_size = 8
MAX_LENGTH = 256
lr = 1e-4

dataset = load_dataset("json", data_files=train_data_path, split="train")


def process_data(example):
    instruction = example.get("instruction", "")
    ipt_text = example.get("input", "")
    output = example.get("output", "")
    reject = example.get("reject", "")

    chosen = instruction + ipt_text + output
    rejected = instruction + ipt_text + reject

    chosen_instruction = tokenizer(chosen, add_special_tokens=False)
    chosen_response = tokenizer(chosen, add_special_tokens=False, padding=True, max_length=MAX_LENGTH)
    chosen_input_ids = chosen_instruction["input_ids"] + chosen_response["input_ids"] + [pad_token_id] * MAX_LENGTH
    chosen_attention_mask = chosen_instruction["attention_mask"] + chosen_response["attention_mask"] + [1] * MAX_LENGTH

    rejected_instruction = tokenizer(rejected, add_special_tokens=False)
    rejected_response = tokenizer(rejected, add_special_tokens=False, padding=True, max_length=MAX_LENGTH)
    rejected_input_ids = rejected_instruction["input_ids"] + rejected_response["input_ids"] + [pad_token_id] * MAX_LENGTH
    rejected_attention_mask = rejected_instruction["attention_mask"] + rejected_response["attention_mask"] + [1] * MAX_LENGTH

    if len(chosen_input_ids) > MAX_LENGTH:  # 做一个截断
        chosen_input_ids = chosen_input_ids[:MAX_LENGTH]
        chosen_attention_mask = chosen_attention_mask[:MAX_LENGTH]
        rejected_input_ids = rejected_input_ids[:MAX_LENGTH]
        rejected_attention_mask = rejected_attention_mask[:MAX_LENGTH]

    return {
        'chosen_input_ids': chosen_input_ids,
        'chosen_attention_mask': chosen_attention_mask,
        'rejected_input_ids': rejected_input_ids,
        'rejected_attention_mask': rejected_attention_mask
    }


dataset = dataset.map(process_data)
dataset.set_format("torch")


def collect_fn(example):
    chosen_input_ids = [i['chosen_input_ids'] for i in example]
    chosen_attention_mask = [i['chosen_attention_mask'] for i in example]
    rejected_input_ids = [i['rejected_input_ids'] for i in example]
    rejected_attention_mask = [i['rejected_attention_mask'] for i in example]

    input_ids = torch.stack(chosen_input_ids + rejected_input_ids, dim=0)
    attention_mask = torch.stack(chosen_attention_mask +
                                 rejected_attention_mask,
                                 dim=0)

    return {'input_ids': input_ids, 'attention_mask': attention_mask}


dataloader = torch.utils.data.DataLoader(dataset,
                                         collate_fn=collect_fn,
                                         batch_size=batch_size,
                                         shuffle=True,
                                         drop_last=True)
print("len of dataloader:", len(dataloader))


class CriticModel(torch.nn.Module):
    def __init__(self):
        super(CriticModel, self).__init__()
        self.bert = BertModel.from_pretrained(roberta_pretrained_model)
        bert_config = BertConfig.from_pretrained(roberta_pretrained_model)
        self.linear = torch.nn.Linear(bert_config.hidden_size, 2, bias=False)

    def forward(self, input_ids, attention_mask):
        # 这边有异常
        value = self.bert(input_ids=input_ids, attention_mask=attention_mask)
        value = value.last_hidden_state
        value = self.linear(value).squeeze(-1)

        loss_sum = 0.0
        value_chosen_sum = 0.0
        value_rejected_sum = 0.0
        # loss_sum = torch.tensor(0.0)
        # value_chosen_sum = torch.tensor(0.0)
        # value_rejected_sum = torch.tensor(0.0)

        for input_ids_chosen, input_ids_rejected, value_chosen, value_rejected in zip(
            input_ids[:MAX_LENGTH], input_ids[MAX_LENGTH:], value[:MAX_LENGTH], value[MAX_LENGTH:]):

            # 找出每条回答中的起止索引
            start = (input_ids_chosen == input_ids_rejected).tolist().index(False)

            end_chosen = input_ids_chosen.tolist().index(
                tokenizer.eos_token_id) + 1
            end_rejected = input_ids_rejected.tolist().index(
                tokenizer.eos_token_id) + 1
            end = max(end_chosen, end_rejected)
            value_chosen = value_chosen[start:end]
            value_rejected = value_rejected[start:end]

            loss = value_chosen - value_rejected
            loss = -torch.nn.functional.logsigmoid(loss).mean()

            loss_sum += loss
            value_chosen_sum += value_chosen.mean().item()
            value_rejected_sum += value_rejected.mean().item()
        return loss_sum / MAX_LENGTH, value_chosen_sum, value_rejected_sum


model_critic = CriticModel()
model_critic.to(device)
model_critic.train()

optimizer = torch.optim.Adam(model_critic.parameters(), lr=5e-5)

accelerator = Accelerator(mixed_precision='fp16')

device_placement = [True, True, False]
dataloader, model_critic, optimizer = accelerator.prepare(dataloader, model_critic, optimizer,
                                                          device_placement=device_placement
                                                          )

for i, data in tqdm(enumerate(dataloader)):

    loss, value_chosen_sum, value_rejected_sum = model_critic(**data)
    accelerator.backward(loss)
    accelerator.clip_grad_norm_(model_critic.parameters(), 1.0)
    optimizer.step()
    optimizer.zero_grad()

    if (i + 1) % 100 == 0:
        print(f"i:{i}, loss:{loss.item()}, value_chosen_sum:{value_chosen_sum}, value_rejected_sum:{value_rejected_sum}")


torch.save(model_critic.to('cpu'), './model/critic')
