# encoding: utf-8

import os
import warnings
from loguru import logger

os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import torch
from transformers import TrainingArguments, AutoModelForCausalLM, AutoTokenizer
from trl import DPOConfig, DPOTrainer
from datasets import load_dataset
from model.model import Transformer
from model.LMConfig import LMConfig

lm_config = LMConfig()
warnings.filterwarnings('ignore')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("using device", device)


def init_model():
    my_pretrained = "/root/train_about/llm_from_zero/my_minimind/my_minimind"
    tokenizer = AutoTokenizer.from_pretrained(my_pretrained)
    model_from = 1  # 1从权重，2用transformers

    def count_parameters(model):
        return sum(p.numel() for p in model.parameters() if p.requires_grad)

    if model_from == 1:
        model = Transformer(lm_config)
        # pretrained model weights
        ckp = f'./out/full_sft_epoch_1_loss_1.66621.pth'
        state_dict = torch.load(ckp, map_location=device)
        unwanted_prefix = '_orig_mod.'
        for k, v in list(state_dict.items()):
            if k.startswith(unwanted_prefix):
                state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)
        model.load_state_dict(state_dict, strict=False)
    else:
        model = AutoModelForCausalLM.from_pretrained('./minimind-v1-small', trust_remote_code=True)

    logger.info(f'LLM总参数量：{count_parameters(model) / 1e6:.3f} 百万')
    model = model.to(device)

    return model, tokenizer


if __name__ == '__main__':
    # init method 1
    # model, tokenizer = init_model()
    # init method 2
    pretrained_model = "/root/train_about/llm_from_zero/my_minimind/my_minimind"
    tokenizer = AutoTokenizer.from_pretrained(pretrained_model)
    model = AutoModelForCausalLM.from_pretrained(pretrained_model, trust_remote_code=True)

    tokenizer.pad_token = tokenizer.eos_token
    training_config = DPOConfig(
        output_dir="./minimind_dpo",
        per_device_train_batch_size=4,
        remove_unused_columns=False,
        report_to="none",
        save_steps=2000,
        learning_rate=4e-5
    )

    dataset_path = './datas/dpo.json'
    train_dataset = load_dataset('json', data_files=dataset_path)

    dpo_trainer = DPOTrainer(
        model,
        ref_model=None,
        args=training_config,
        beta=0.1,
        train_dataset=train_dataset['train'],
        tokenizer=tokenizer,
        max_length=512,
        max_prompt_length=512
    )
    dpo_trainer.train()
