from datasets import get_dataset_split_names
from transformers import AutoModelForSequenceClassification,AutoTokenizer,DataCollatorWithPadding,Trainer,TrainingArguments
from datasets import load_dataset
import torch
from torch.utils.data import DataLoader,Dataset,random_split
import pandas as pd
import pathlib
from torch.optim import Adam
from rich import print
import evaluate


base_folder = pathlib.Path(__file__).parent
data_file = base_folder.joinpath('train_pair_1w.json')

save_model_checkpoint_path = base_folder.joinpath('models')
save_model_checkpoint_path.mkdir(parents=True,exist_ok=True)

metric_f1_file_path = base_folder.parent.joinpath('metrics').joinpath('f1.py').resolve().__str__()
metric_recall_file_path = base_folder.parent.joinpath('metrics').joinpath('recall.py').resolve().__str__()
metric_accuracy_file_path = base_folder.parent.joinpath('metrics').joinpath('accuracy.py').resolve().__str__()



model_floder = r'D:\models\chinese-macbert-base'


d = load_dataset('json',data_files=data_file.resolve().__str__(),split='train')
data_init = d.filter(lambda x:x['sentence1'] and x['sentence2'])

data_init = data_init.train_test_split(test_size=0.2)
tokenizer = AutoTokenizer.from_pretrained(model_floder)
def collate_fn_batch(batch):
    outputs = tokenizer(batch['sentence1'],batch['sentence2'],max_length=128,truncation=True)
    outputs['labels'] = float(batch['label'])
    return outputs
data_init_all = data_init.map(collate_fn_batch,remove_columns=d.column_names)
# trainset, validset = data_init_all["train"], data_init_all["test"]

# data_map = data_init.map(collate_fn_batch,batched=False)
print(data_init_all)


train_args = TrainingArguments(
    output_dir=save_model_checkpoint_path.resolve().__str__(),
    per_device_train_batch_size=12,
    per_device_eval_batch_size=12,
    logging_steps=10,
    # evaluation_strategy="epoch",     # 评估策略
    save_strategy="epoch",           # 保存策略
    save_total_limit=3,              # 最大保存数
    learning_rate=2e-5,              # 学习率
    weight_decay=0.01,               # weight_decay
    metric_for_best_model="f1",      # 设定评估指标
    eval_strategy='steps',
    eval_steps=300
    
    
)




model = AutoModelForSequenceClassification.from_pretrained(model_floder,num_labels=1)



metric_accuracy = evaluate.load(metric_accuracy_file_path)
metric_f1 = evaluate.load(metric_f1_file_path)



def eval_metric(eval_predict):
    predictions, labels = eval_predict
    print('----开始评估')
    print('predictions',predictions)
    print('labels',labels)
    print('----开始评估')
    predictions = [int(p > 0.5) for p in predictions]
    labels = [int(l) for l in labels]
    # predictions = predictions.argmax(axis=-1)
    acc = metric_accuracy.compute(predictions=predictions, references=labels)
    f1 = metric_f1.compute(predictions=predictions, references=labels)
    acc.update(f1)
    return acc

trainer = Trainer(model=model, 
                  args=train_args, 
                  train_dataset=data_init_all['train'], 
                  eval_dataset=data_init_all['test'], 
                  data_collator=DataCollatorWithPadding(tokenizer=tokenizer),
                  compute_metrics=eval_metric
                  )

trainer.train()
