DKK-nli / conto.py
Seongsu Park
a naive model
c776352
raw
history blame contribute delete
No virus
2.66 kB
from datasets import load_dataset, DatasetDict
from evaluate import load
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
import torch
import numpy as np
labels = {
'contradiction': 0,
'neutral': 1,
'entailment': 2,
}
datasets = load_dataset('seongs1024/DKK-nli')
datasets = DatasetDict({
'train': datasets['train'].shard(num_shards=100, index=0),
'validation': datasets['validation'].shard(num_shards=100, index=0),
})
metric = load('glue', 'mnli')
def compute_metrics(eval_pred):
predictions, labels = eval_pred
predictions = np.argmax(predictions, axis=1)
return metric.compute(predictions=predictions, references=labels)
check_point = 'klue/roberta-small'
model = AutoModelForSequenceClassification.from_pretrained(check_point, num_labels=3)
tokenizer = AutoTokenizer.from_pretrained(check_point)
def preprocess_function(examples):
return tokenizer(
examples['premise'],
examples['hypothesis'],
truncation=True,
return_token_type_ids=False,
)
encoded_datasets = datasets.map(preprocess_function, batched=True)
batch_size = 8
args = TrainingArguments(
"test-nli",
evaluation_strategy="epoch",
save_strategy='epoch',
learning_rate=2e-5,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
num_train_epochs=5,
weight_decay=0.01,
load_best_model_at_end=True,
metric_for_best_model='accuracy',
)
trainer = Trainer(
model,
args,
train_dataset=encoded_datasets["train"],
eval_dataset=encoded_datasets["validation"],
tokenizer=tokenizer,
compute_metrics=compute_metrics,
)
trainer.train()
trainer.evaluate()
trainer.save_model('./model')
# features = tokenizer(
# [
# 'A man is eating pizza',
# 'A black race car starts up in front of a crowd of people.',
# '였늘 λ§›μžˆλŠ” λ°₯을 λ¨Ήμ—ˆμ–΄ 근데 이게 μ–΄λ–»κ²Œ λ§›μžˆλ‹€ μ„€λͺ…ν•˜κΈ°λŠ” μ’€ 애맀해.',
# 'λ‚˜ 집에 κ°€λŠ” 쀑.',
# ],
# [
# 'A man eats something',
# 'A man is driving down a lonely road.',
# '였늘 λ§›μ—†λŠ” λ°₯을 λ¨Ήμ—ˆμ–΄ 근데 이게 μ–΄λ–»κ²Œ λ§›μžˆλ‹€ μ„€λͺ…ν•˜κΈ°λŠ” μ’€ 애맀해.',
# 'λ‚˜ 집에 λ„μ°©ν–ˆμ–΄.',
# ],
# padding=True,
# truncation=True,
# return_tensors="pt"
# )
# print(features)
# model.eval()
# with torch.no_grad():
# scores = model(**features).logits
# print(scores)
# label_mapping = ['contradiction', 'entailment', 'neutral']
# labels = [label_mapping[score_max] for score_max in scores.argmax(dim=1)]
# print(labels)