from types import MethodType
from dataset import MyDataset,DataCollator
import json
import torch
from modeling import fuseModelWithAppendToBegin,custom_forward
from transformers import AutoModel,AutoTokenizer,AutoModelForCausalLM
with open('/hy-tmp/data/train.json','r',encoding='utf-8') as f:
    train_data=json.load(f)['data']

with open('/hy-tmp/data/dev.json','r',encoding='utf-8') as f:
    eval_data=json.load(f)['data']

with open('/hy-tmp/data/test.json','r',encoding='utf-8') as f:
    test_data=json.load(f)['data']
train_dataset=MyDataset(train_data)
eval_dataset=MyDataset(eval_data)
test_dataset=MyDataset(test_data)

sent2vec_tokenizer = AutoTokenizer.from_pretrained('/hy-tmp/thenlper/gte-large-zh',model_max_length=512)
# sent2vec = AutoModel.from_pretrained('BAAI/bge-large-zh-v1.5',torch_dtype=torch.float16)
llm_tokenizer = AutoTokenizer.from_pretrained('/hy-tmp/multi-r2/Fuser/add2begin/baichuan7bchat',trust_remote_code=True)
# llm = AutoModelForCausalLM.from_pretrained('/data/lxy/baichuan2-chat-7b',trust_remote_code=True,torch_dtype=torch.bfloat16)


model=fuseModelWithAppendToBegin.from_pretrained('/hy-tmp/damnModel')
for param in model.model.parameters():
    param.requires_grad = False
for param in model.sent2vec.parameters():
    param.requires_grad = False
from transformers import Seq2SeqTrainingArguments,Seq2SeqTrainer
model.model.forward = MethodType(custom_forward, model.model)



trainingArguments=Seq2SeqTrainingArguments(
    predict_with_generate=True,
    output_dir = '.',                                                                                                                                                                                                
    load_best_model_at_end = True,                                                                                                                                                                                   
    metric_for_best_model = 'loss',                                                                                                                                                                                  
    evaluation_strategy = 'epoch',                                                                                                                                                                                     
    save_strategy = 'epoch',
    dataloader_drop_last = True,
    bf16 = True,
    gradient_accumulation_steps = 32,
    auto_find_batch_size = False,
    per_device_train_batch_size = 2, # 这个必须，因为如果不是这个的话就会pad
    gradient_checkpointing = True,
    remove_unused_columns = False,
    save_total_limit =3,
)

trainer=Seq2SeqTrainer(
    model=model,
    args=trainingArguments,
    train_dataset=train_data,
    eval_dataset=eval_data,
    data_collator=DataCollator(llm_tokenizer=llm_tokenizer,sent2vec_tokenizer=sent2vec_tokenizer,max_length=4096,padding=False)
)

trainer.train()