from dataset import MyDataset,DataCollator
import json
import torch
from modeling import fuseModelWithAppendToBegin
from transformers import AutoModel,AutoTokenizer,AutoModelForCausalLM
with open('/home/lxy/multiR/Fuser/data/train.json','r',encoding='utf-8') as f:
    train_data=json.load(f)['data']

with open('/home/lxy/multiR/Fuser/data/dev.json','r',encoding='utf-8') as f:
    eval_data=json.load(f)['data']

with open('/home/lxy/multiR/Fuser/data/test.json','r',encoding='utf-8') as f:
    test_data=json.load(f)['data']
train_dataset=MyDataset(train_data)
eval_dataset=MyDataset(eval_data)
test_dataset=MyDataset(test_data)

sent2vec_tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-large-zh-v1.5',model_max_length=512)
sent2vec = AutoModel.from_pretrained('BAAI/bge-large-zh-v1.5',torch_dtype=torch.float16)
llm_tokenizer = AutoTokenizer.from_pretrained('/data/lxy/baichuan2-chat-7b',trust_remote_code=True)
llm = AutoModelForCausalLM.from_pretrained('/data/lxy/baichuan2-chat-7b',trust_remote_code=True,torch_dtype=torch.bfloat16)

model=fuseModelWithAppendToBegin(sent2vec=sent2vec,llm=llm)

from transformers import Seq2SeqTrainingArguments,Seq2SeqTrainer




trainingArguments=Seq2SeqTrainingArguments(
    predict_with_generate=True,
    output_dir = '.',                                                                                                                                                                                                
    load_best_model_at_end = True,                                                                                                                                                                                   
    metric_for_best_model = 'loss',                                                                                                                                                                                  
    evaluation_strategy = 'epoch',                                                                                                                                                                                     
    save_strategy = 'epoch',
    dataloader_drop_last = True,
    fp16 = True,
    gradient_accumulation_steps = 32,
    auto_find_batch_size = False,
    per_device_train_batch_size = 1, # 这个必须，因为如果不是这个的话就会pad
    gradient_checkpointing = True,
    remove_unused_columns = False,
)

trainer=Seq2SeqTrainer(
    model=model,
    args=trainingArguments,
    train_dataset=train_data,
    eval_dataset=eval_data,
    data_collator=DataCollator(llm_tokenizer=llm_tokenizer,sent2vec_tokenizer=sent2vec_tokenizer,max_length=4096,padding=False)
)

trainer.train()