# encoding: utf-8
# @Time:    :2024/12/22 15:43

import torch
import random
from tqdm import tqdm
from datasets import load_dataset
from transformers import default_data_collator
from transformers import AutoModelForCausalLM, AutoTokenizer
from accelerate import Accelerator

from constant import pretrained_model, train_data_path, device

tokenizer = AutoTokenizer.from_pretrained(pretrained_model)
pad_token_id = tokenizer.pad_token_id  # 151643
eos_token_id = tokenizer.eos_token_id  # 151645

batch_size = 8
MAX_LENGTH = 256
lr = 1e-4


def process_data(example):
    instruction = example.get("instruction", "")
    ipt_text = example.get("input", "")
    output = example.get("output", "")
    prompt = f"<|im_start|>system\n你是一个机器人，能回答用户的问题<|im_end|>\n<|im_start|>user\n{instruction + ipt_text}<|im_end|>\n<|im_start|>assistant\n"
    instruction = tokenizer(prompt, add_special_tokens=False)
    response = tokenizer(f"{output}", add_special_tokens=False, padding=True, max_length=MAX_LENGTH)
    input_ids = instruction["input_ids"] + response["input_ids"] + [tokenizer.pad_token_id] * MAX_LENGTH
    attention_mask = instruction["attention_mask"] + response["attention_mask"] + [1] * MAX_LENGTH
    labels = [-100] * len(instruction["input_ids"]) + response["input_ids"] + [tokenizer.pad_token_id] * MAX_LENGTH

    if len(input_ids) > MAX_LENGTH:  # 做一个截断
        input_ids = input_ids[:MAX_LENGTH]
        attention_mask = attention_mask[:MAX_LENGTH]
        labels = labels[:MAX_LENGTH]

    return {
        "input_ids": input_ids,
        "attention_mask": attention_mask,
        "labels": labels
    }


dataset = load_dataset("json", data_files=train_data_path, split="train")

# 只选取一部分
dataset = dataset.select(range(5000))

dataset = dataset.map(process_data, remove_columns=dataset.column_names)  # noqa

dataloader = torch.utils.data.DataLoader(dataset,
                                         collate_fn=default_data_collator,
                                         batch_size=batch_size,
                                         shuffle=True,
                                         drop_last=True
                                         )

print("len of dataloader:", len(dataloader))

model_actor = AutoModelForCausalLM.from_pretrained(pretrained_model)
model_actor.to(device)
model_actor.train()

optimizer = torch.optim.Adam(model_actor.parameters(), lr=lr)
accelerate = Accelerator(mixed_precision="fp16")

device_placement = [True, True, False]
dataloader, model_actor, optimizer = accelerate.prepare(dataloader,
                                                        model_actor,
                                                        optimizer,
                                                        device_placement=device_placement)

for i, data in tqdm(enumerate(dataloader)):
    out = model_actor(**data)
    accelerate.backward(out.loss)
    accelerate.clip_grad_norm_(model_actor.parameters(), 1.0)
    optimizer.step()
    optimizer.zero_grad()

    if (i + 1) % 50 == 0:
        print(f"i:{i+1}, loss:{out.loss.item()}")

    if i == 500:
        break

model_actor.save_pretrained("./model/actor")
tokenizer.save_pretrained("./model/actor")

