# encoding: utf-8
# description: 使用transformers来做sft，模型没有适配，后面再调整


import json
from tqdm import tqdm
import transformers
from transformers import Trainer
from transformers import AutoModelForCausalLM, AutoTokenizer

from dataclasses import dataclass, field
import torch
from torch.utils import data
from loguru import logger
import warnings

from model.model import Transformer
from model.LMConfig import LMConfig

lm_config = LMConfig()

warnings.filterwarnings("ignore")

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("using device", device)

MAX_LENGTH = 512


@dataclass
class CustomArguments(transformers.TrainingArguments):
    output_dir: str = field(default="./tf_out")
    # LoRA_r
    lora_r: int = field(default=8)
    # 数据处理时的并行进程数
    num_proc: int = field(default=1)
    # 最大序列长度
    max_seq_length: int = field(default=MAX_LENGTH)
    # 验证策略，如不想进行验证，可以设置为 ‘no’
    eval_strategy: str = field(default="steps")
    # 每多少步进行一次验证
    eval_steps: int = field(default=2000)
    # 随机种子
    seed: int = field(default=0)
    # 优化器
    optim: str = field(default="adamw_torch")
    # 训练epoch数
    num_train_epochs: int = field(default=10)
    # 每个设备上的批量大小
    per_device_train_batch_size: int = field(default=16)

    # 学习率
    learning_rate: float = field(default=5e-5)
    # 权重衰减
    weight_decay: float = field(default=0)
    # 预热步数
    warmup_steps: int = field(default=10)
    # 学习率规划期类型
    lr_scheduler_type: str = field(default="linear")
    # 是否使用梯度检查点
    gradient_checkpointing: bool = field(default=False)
    # 是否使用bf16作为混合精度训练类型
    bf16: bool = field(default=True)
    # 梯度累加步数
    gradient_accumulation_steps: int = field(default=1)

    # 日志记录的步长频率
    logging_steps: int = field(default=500)
    # checkpoint保存策略
    save_strategy: str = field(default="steps")
    # checkpoint保存的步长频率
    save_steps: int = field(default=1000)
    # 总的保存checkpoint的数量
    save_total_limit: int = field(default=3)
    # 是否测试
    is_test: str = field(default="0")


parser = transformers.HfArgumentParser(CustomArguments)
training_args, = parser.parse_args_into_dataclasses()


def init_model():
    tokenizer = AutoTokenizer.from_pretrained('./model/minimind_tokenizer')
    model_from = 1  # 1从权重，2用transformers

    def count_parameters(model):
        return sum(p.numel() for p in model.parameters() if p.requires_grad)

    if model_from == 1:
        model = Transformer(lm_config)
        # pretrained model weights
        ckp = f'./out/pretrain_epoch_0_loss_0.49837.pth'
        state_dict = torch.load(ckp, map_location=device)
        unwanted_prefix = '_orig_mod.'
        for k, v in list(state_dict.items()):
            if k.startswith(unwanted_prefix):
                state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)
        model.load_state_dict(state_dict, strict=False)
    else:
        model = AutoModelForCausalLM.from_pretrained('./minimind-v1-small', trust_remote_code=True)

    logger.info(f'LLM总参数量：{count_parameters(model) / 1e6:.3f} 百万')
    model = model.to(device)

    return model, tokenizer


model, tokenizer = init_model()

tokenizer.pad_token = tokenizer.eos_token

model.to(device)
optimizer = torch.optim.AdamW(model.parameters())


def process_func(example):
    instruction = example.get("instruction", "")
    ipt_text = example.get("input", "")
    output = example.get("output", "")
    prompt = f"<|im_start|>system\n你现在是一个商品简称生成机器人<|im_end|>\n<|im_start|>user\n{instruction + ipt_text}<|im_end|>\n<|im_start|>assistant\n"
    instruction = tokenizer(prompt, add_special_tokens=False)
    response = tokenizer(f"{output}", add_special_tokens=False, padding=True, max_length=MAX_LENGTH)
    input_ids = instruction["input_ids"] + response["input_ids"] + [tokenizer.pad_token_id] * MAX_LENGTH
    attention_mask = instruction["attention_mask"] + response["attention_mask"] + [1] * MAX_LENGTH  # 因为eos token咱们也是要关注的所以 补充为1
    labels = [-100] * len(instruction["input_ids"]) + response["input_ids"] + [tokenizer.pad_token_id] * MAX_LENGTH
    if len(input_ids) > MAX_LENGTH:  # 做一个截断
        input_ids = input_ids[:MAX_LENGTH]
        attention_mask = attention_mask[:MAX_LENGTH]
        labels = labels[:MAX_LENGTH]
    return {
        "input_ids": input_ids,
        "attention_mask": attention_mask,
        "label": labels
    }

# tokens,targets,kv_cache,keyargs,label_ids,label.


class MyDataSet(data.Dataset):
    def __init__(self, json_path: str):
        self.data = []
        self._load_json_data(json_path)

    def _load_json_data(self, json_path):
        with open(json_path, "r", encoding="utf-8") as f:
            lines = f.readlines()
        for line in tqdm(lines, desc="loading dataset"):
            before = json.loads(line)
            processed = process_func(before)
            self.data.append(processed)

    def __len__(self):
        return len(self.data)

    def __getitem__(self, item):
        after = self.data[item]
        return after


if training_args.is_test == "0":
    train_dataset = MyDataSet("./datas/medical_sft.json")
else:
    train_dataset = MyDataSet("./datas/medical_sft_demo.json")
# train_dataset = MyDataSet("/root/train_about/gen_short_name/datas/test_short_name.json")  # test
eval_dataset = MyDataSet("./datas/medical_sft_eval.json")  # test

if __name__ == '__main__':
    """
    模型输出没有适配transformers，这个后面再调整吧
    """
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=eval_dataset
    )
    output_dir = "./checkpoints"
    trainer.train()
    trainer.save_model(output_dir)
