from transformers import TrainingArguments, Trainer  # 导入trainer相关的包
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from datasets import load_dataset
import os

dataset_dir = "/data/datasets/SimCLUE/datasets/train_pair_1w.json"
dataset = load_dataset('json', data_files=dataset_dir, split='train')
datasets = dataset.train_test_split(test_size=0.2)
tokenizer = AutoTokenizer.from_pretrained('/data/models/huggingface/chinese-macbert-base')


def process_function(examples):
    sententces = []
    labels = []
    for sen1, sen2, label in zip(examples['sentence1'], examples['sentence2'], examples['label']):
        sententces.append(sen1)
        sententces.append(sen2)
        labels.append(1 if int(label) == 1 else -1)

    tokenizer_examples = tokenizer(sententces, max_length=128, truncation=True, padding="max_length")
    tokenizer_examples = {k: [v[i:i + 2] for i in range(0, len(v), 2)] for k, v in tokenizer_examples.items()}
    tokenizer_examples["labels"] = labels
    return tokenizer_examples


tokenizer_datasets = datasets.map(process_function, batched=True, remove_columns=datasets['train'].column_names)
print(tokenizer_datasets['train'][0:2])
