import warnings
from datasets import load_metric
import numpy as np
import torch
from datasets import load_dataset
from transformers import AutoTokenizer
from transformers import DataCollatorWithPadding
from transformers import TrainingArguments
from transformers import AutoModelForSequenceClassification
from transformers import Trainer

warnings.filterwarnings("ignore")

import os


def test_load_dataset():
    """数据集使用huggingface提供的现成的数据集
    GLUE (General Language Understanding Evaluation)
    MRPC (Microsoft Research Paraphrase Corpus) 数据集。
    """
    dataset = load_dataset('glue', 'mrpc')  # 判断两个句子是否语义相同
    print(dataset)


def test_handle_data():
    tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
    inputs = tokenizer('This is a test', 'This should be a test')

    # print(inputs)  # tokenizer会拼接直接输入的字符串，并用句分隔符分割
    # print(tokenizer.convert_ids_to_tokens(inputs["input_ids"]))  # id转文字
    # 对所有数据进行操作
    def tokenize_function(example):
        return tokenizer(example["sentence1"], example["sentence2"], truncation=True)

    raw_datasets = load_dataset("glue", "mrpc")
    tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)
    # print(tokenized_datasets)
    samples = tokenized_datasets["train"][:8]  # 取到前八行的所有的列
    samples = {k: v for k, v in samples.items() if k not in ["idx", "sentence1", "sentence2"]}  # 不需要这些列
    # print([len(x) for x in samples["input_ids"]])  # 每一个样本的长度
    data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
    batch = data_collator(samples)  # 经过data_collator处理后所有长度都是一样的
    # print({k: v.shape for k, v in batch.items()})
    return tokenized_datasets, data_collator, tokenizer


def test_train():
    training_args = TrainingArguments("test-trainer", num_train_epochs=2)
    training_args.set_training(num_epochs=2)
    print(training_args)
    model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased")
    tokenized_datasets, data_collator, tokenizer = test_handle_data()
    trainer = Trainer(
        model,
        training_args,
        train_dataset=tokenized_datasets["train"],
        eval_dataset=tokenized_datasets["validation"],
        data_collator=data_collator,
        tokenizer=tokenizer,
    )
    # trainer.train()
    predictions = trainer.predict(tokenized_datasets["validation"])
    print(predictions.predictions.shape, predictions.label_ids.shape)
    preds = np.argmax(predictions.predictions, axis=-1)
    metric = load_metric("glue", "mrpc")
    print(metric.compute(predictions=preds, references=predictions.label_ids))

    # 训练过程也可制定好评估方法
    def compute_metrics(eval_preds):
        metric = load_metric("glue", "mrpc")
        logits, labels = eval_preds
        predictions = np.argmax(logits, axis=-1)
        return metric.compute(predictions=predictions, references=labels)

    training_args = TrainingArguments("test-trainer", evaluation_strategy="epoch")
    model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2)

    trainer = Trainer(
        model,
        training_args,
        train_dataset=tokenized_datasets["train"],
        eval_dataset=tokenized_datasets["validation"],
        data_collator=data_collator,
        tokenizer=tokenizer,
        compute_metrics=compute_metrics,
    )
    trainer.train()# 会把每一个epoch的评估结果进行返回


if __name__ == "__main__":
    # test_load_dataset()
    # test_handle_data()
    os.environ["CUDA_VISIBLE_DEVICES"] = "1,2,3"  # 只使用GPU 1和2
    test_train()
