import torch
from transformers import (
    AutoTokenizer,
    AutoModelForSequenceClassification,
    AdamW,
    Trainer,
    DataCollatorWithPadding,
    TrainingArguments)
from datasets import load_dataset


model_path = '/home/will/huggingface-models/distilbert-base-uncased-finetuned-sst-2-english'
# 如果报错ValueError: Invalid pattern: '**' can only be an entire path component，升级datasets:
# pip install -U datasets
# HF_HOME 可以自定义缓存文件夹
data_path = '/home/will/glue/mrpc'
checkpoint = "distilbert-base-uncased-finetuned-sst-2-english"

tokenizer = AutoTokenizer.from_pretrained(model_path, checkpoint=checkpoint)
model = AutoModelForSequenceClassification.from_pretrained(model_path, num_labels =2)
optimizer = AdamW(model.parameters())

batch = tokenizer("i've been waiting for a huggingface course for too long", "This course is amazing!")

print(f'type(batch) = {type(batch)}')

batch['labels'] = torch.tensor([1, 1])
print(f'batch = {batch}')
print(f"batch.input_ids = {batch['input_ids']}")

tokenized_sentences = tokenizer.convert_ids_to_tokens(batch['input_ids'])
print(tokenized_sentences)

# loss = model(**batch).loss
# loss.backward()
# optimizer.step()

raw_datasets = load_dataset(path=data_path)
print(f'raw_datasets = {raw_datasets}')

tokenized_dataset = tokenizer(raw_datasets["train"]["sentence1"],
                            raw_datasets["train"]["sentence2"],
                            padding=True,
                            truncation=True)
# print(tokenized_dataset)


def tokenize_function(example):
    return tokenizer(example["sentence1"], example["sentence2"], truncation=True)


tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)
print(tokenized_datasets)

data_collator = DataCollatorWithPadding(tokenizer=tokenizer)

samples = tokenized_datasets["train"][:32]
samples = {k: v for k, v in samples.items() if k not in ["idx", "sentence1", "sentence2"]}
lens = [len(x) for x in samples["input_ids"]]
print(lens)

batch = data_collator(samples)
print(f'batch after DataCollatorWithPadding: {batch}')
dict1 = {k: v.shape for k, v in batch.items()}
print(dict1)

training_args = TrainingArguments('test-trainer')

trainer = Trainer(
    model,
    training_args,
    train_dataset=tokenized_datasets['train'],
    eval_dataset=tokenized_datasets['validation'],
    data_collator=data_collator, # 可以省略
    tokenizer=tokenizer,
)
trainer.train()
