from transformers import TrainingArguments, Trainer  # 导入trainer相关的包
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from transformers import DataCollatorWithPadding
from datasets import load_dataset
import evaluate


def process_function(examples):
    tokenizer_examples = tokenizer(examples['sentence1'], examples['sentence2'], truncation=True, max_length=128)
    tokenizer_examples["labels"] = [float(label) for label in examples['label']]
    return tokenizer_examples


# 写一个多种标准合成的评价函数
def eval_metrics(eval_predict):
    predictions, labels = eval_predict
    predictions = [int(p > 0.5) for p in predictions]
    labels = [int(l) for l in labels]

    combine_acc = acc_metric.compute(predictions=predictions, references=labels)
    f1 = f1_metric.compute(predictions=predictions, references=labels)
    combine_acc.update(f1)
    return combine_acc


if __name__ == "__main__":
    dataset_dir = "/data/datasets/SimCLUE/datasets/train_pair_4w.json"
    # dataset_dir = "/data/datasets/SimCLUE/simclue_public/train_pair.json"
    pre_train_model = "/data/models/huggingface/chinese-macbert-large"
    model_save_dir = '/data/logs/qq_search_robot/cross_model'

    dataset = load_dataset('json', data_files=dataset_dir, split='train')
    datasets = dataset.train_test_split(test_size=0.05, shuffle=False)
    tokenizer = AutoTokenizer.from_pretrained(pre_train_model)
    tokenizer_datasets = datasets.map(process_function, batched=True, remove_columns=datasets['train'].column_names)
    # 这里注意如果num_labels一旦为1 则不会计算交叉熵而是计算均方误差,任务转为回归任务
    model = AutoModelForSequenceClassification.from_pretrained(pre_train_model,
                                                               num_labels=1)
    # 4 项目都加入
    acc_metric = evaluate.load('/evaluate-main/metrics/accuracy')
    f1_metric = evaluate.load('/evaluate-main/metrics/f1')
    precision_metric = evaluate.load('/evaluate-main/metrics/precision')
    recall_metric = evaluate.load('/evaluate-main/metrics/recall')
    train_args = TrainingArguments(
        output_dir=model_save_dir,  # 训练模型的输出路径
        per_device_train_batch_size=8,  # 训练集 batch size
        per_device_eval_batch_size=8,  # 验证集 batch size
        num_train_epochs=12,  # 训练的 epochs 数
        logging_steps=250,  # 每 150 个 batch 记录一次日志
        # save_steps=100, # 每 100 个 batch 保存一次模型 和每个epoch 保存一次模型互斥
        save_strategy='epoch',  # 每 1 个 epoch 保存一次模型
        save_total_limit=3,  # 保存 1 个模型
        evaluation_strategy='epoch',  # 每 1 个 epoch 验证一次模型
        learning_rate=1e-5,  # 学习率
        weight_decay=0.01,  # 权重衰减
        metric_for_best_model='f1',
        # load_best_model_at_end=True, # 加载最优模型
        # report_to='tensorboard', # 训练日志保存到 tensorboard
    )

    trainer = Trainer(
        model=model,  # 指定模型
        args=train_args,  # 执行训练参数
        train_dataset=tokenizer_datasets['train'],  # 训练数据集指定 切分后的数据集
        eval_dataset=tokenizer_datasets['test'],  # 验证数据集指定
        data_collator=DataCollatorWithPadding(tokenizer=tokenizer),  # 数据集的预处理对数据集进行padding操作
        compute_metrics=eval_metrics,  # tokenizer
    )

    trainer.train()
