|
from datasets import load_dataset,concatenate_datasets |
|
from setfit import SetFitModel, SetFitTrainer |
|
from sentence_transformers.losses import CosineSimilarityLoss |
|
|
|
|
|
|
|
dataset = load_dataset("ag_news") |
|
|
|
|
|
seed=20 |
|
labels = 4 |
|
samples_per_label = 8 |
|
sampled_datasets = [] |
|
|
|
for i in range(labels): |
|
sampled_datasets.append(dataset["train"].filter(lambda x: x["label"] == i).shuffle(seed=seed).select(range(samples_per_label))) |
|
|
|
|
|
train_dataset = concatenate_datasets(sampled_datasets) |
|
|
|
|
|
test_dataset = dataset["test"] |
|
|
|
|
|
model_id = "sentence-transformers/all-mpnet-base-v2" |
|
model = SetFitModel.from_pretrained(model_id) |
|
|
|
|
|
trainer = SetFitTrainer( |
|
model=model, |
|
train_dataset=train_dataset, |
|
eval_dataset=test_dataset, |
|
loss_class=CosineSimilarityLoss, |
|
metric="accuracy", |
|
batch_size=64, |
|
num_iterations=20, |
|
num_epochs=1, |
|
) |
|
|
|
|
|
trainer.train() |
|
metrics = trainer.evaluate() |
|
|
|
print(f"model used: {model_id}") |
|
print(f"train dataset: {len(train_dataset)} samples") |
|
print(f"accuracy: {metrics['accuracy']}") |