
from transformers import AutoModelForCausalLM,TrainingArguments, AutoTokenizer , Trainer,HfArgumentParser
import torch

from peft import get_peft_model
import os
import sys
sys.path.append(".")
from lora import lo_conf

from datasets import load_dataset
# 配置随机种子
torch.manual_seed(2618)

import torch.nn.functional as F
def data_formator(data, tokenizer, max_length, prefix_token=None, suffix_token=None, middle_token=None, eos_token=None, **kwargs):
    prefix_token = prefix_token or tokenizer.prefix_token
    suffix_token = suffix_token or tokenizer.suffix_token
    middle_token = middle_token or tokenizer.middle_token
    eos_token = eos_token or tokenizer.eos_token

# def data_formator(data, tokenizer, max_length,  **kwargs):
    data_ = [
        f"{prefix_token}{prefix[-512:]}{suffix_token}{suffix[:512]}{middle_token}{mid[:512]}{eos_token}"
        for prefix, suffix, mid in zip(data['prefix'], data['suffix'], data['mid'])
    ]
    
    result = tokenizer.batch_encode_plus(data_, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt")
    input_ids = result["input_ids"]
    attention_mask = result["attention_mask"]
    # 用 relu 实现将 0 值替换为 0 的操作
    attention_mask = F.relu(input_ids) * attention_mask
    # attention_mask = torch.where(input_ids == 0, torch.zeros_like(attention_mask), attention_mask)
    labels = input_ids.clone()
    return {
        "input_ids": input_ids,
        "attention_mask": attention_mask,
        "labels": labels
    }

def md(model_cfg):
    model = AutoModelForCausalLM.from_pretrained(model_cfg.pretrained_model_name_or_path, trust_remote_code=True)
    return model

def tr(model_cfg):
    tokenizer = AutoTokenizer.from_pretrained(model_cfg.pretrained_model_name_or_path)
    tokenizer.pad_token_id = tokenizer.unk_token_id
    return tokenizer

# 初始化Trainer
def initialize_trainer(model, training_args: TrainingArguments, train_dataset, eval_dataset, tokenizer) -> Trainer:
    return Trainer(
        model=model,
        train_dataset=train_dataset,
        eval_dataset=eval_dataset,
        args=training_args,
        tokenizer=tokenizer,

        data_collator=lambda data: {"input_ids": torch.tensor(([f["input_ids"] for f in data])), 
                                    "attention_mask": torch.tensor(([f["attention_mask"] for f in data])), 
                                    "labels": torch.tensor(([f["labels"] for f in data]))}
    )

import os
os.environ['WANDB_MODE'] = 'offline'


from train_parser import load_args
from parser_make import dump_parser_conf_new
def main():
    dump_parser_conf_new()
    model_args, data_args, training_args = load_args("./parser_conf_new.json")


    #加载model和tokenizer
    mod = md(model_args)
    word = tr(model_args)
    mod = get_peft_model(mod, lo_conf)

    print(type(data_args)) 
    print((data_args)) 
    print("type(data_args)") 
    data_cfg_dict = vars(data_args)
    train_dataset = load_dataset('parquet', data_files={'train':data_args.train_dataset_path.split(',')})['train']
    eval_dataset = load_dataset('parquet', data_files={'train':data_args.eval_dataset_path})['train']
    train_dataset = train_dataset.map(data_formator, batched=True, num_proc=data_args.num_data_proc, fn_kwargs={
        "tokenizer": word,
        **data_cfg_dict
    })
    eval_dataset = eval_dataset.map(data_formator, batched=True, num_proc=data_args.num_data_proc, fn_kwargs={
        "tokenizer": word,
        **data_cfg_dict
    })
    

    # 初始化Trainer
    trainer = initialize_trainer(mod, training_args, train_dataset, eval_dataset, word)
    trainer.train()

if __name__ == '__main__':
    main()
