""" MODIFIED: (efv) Use STSb-multi-mt Spanish source: https://github.com/UKPLab/sentence-transformers/blob/master/examples/training/sts/training_stsbenchmark.py --- This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity. Usage: python training_nli.py OR python training_nli.py pretrained_transformer_model_name """ from torch.utils.data import DataLoader import math from sentence_transformers import SentenceTransformer, LoggingHandler, losses, models, util from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator from sentence_transformers.readers import InputExample import logging from datetime import datetime import sys import os import gzip import csv from datasets import load_dataset #### Just some code to print debug information to stdout logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO, handlers=[LoggingHandler()]) #### /print debug information to stdout #You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base model_name = sys.argv[1] if len(sys.argv) > 1 else 'distilbert-base-uncased' # Read the dataset train_batch_size = 16 num_epochs = 4 model_save_path = 'output/training_stsbenchmark_'+model_name.replace("/", "-")+'-'+datetime.now().strftime("%Y-%m-%d_%H-%M-%S") # Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings word_embedding_model = models.Transformer(model_name) # Apply mean pooling to get one fixed sized sentence vector pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), pooling_mode_mean_tokens=True, pooling_mode_cls_token=False, pooling_mode_max_tokens=False) model = SentenceTransformer(modules=[word_embedding_model, pooling_model]) # Convert the dataset to a DataLoader ready for training logging.info("Read stsb-multi-mt train dataset de mis documentos") train_samples = [] dev_samples = [] test_samples = [] def samples_from_dataset(dataset): samples = [InputExample(texts=[e['sentence1'], e['sentence2']], label=e['similarity_score'] / 5) \ for e in dataset] return samples print("vamos a cargar") train_samples = load_dataset("csv", name="Bases_dades\Catala\stsb-ca-train.csv",split="train", column_names = ['sentence1', 'sentence2', 'similarity_score'] ) print("cargada dataset") train_samples = samples_from_dataset(train_samples) print("Samples del train creades") print("Cargar dev samples") dev_samples = samples_from_dataset(load_dataset("csv", name="Bases_dades\Catala\stsb-ca-dev.csv", split="validation", column_names = ['sentence1', 'sentence2', 'similarity_score'])) print("dev samples creades") print("Cargar test samples") test_samples = samples_from_dataset(load_dataset("csv", name="Bases_dades\Catala\stsb-ca-test.csv", split="test", column_names = ['sentence1', 'sentence2', 'similarity_score'])) print("Test samples creades") train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size) train_loss = losses.CosineSimilarityLoss(model=model) logging.info("Read stsb-multi-mt dev dataset") evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name='sts-dev') # Configure the training. We skip evaluation in this example warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) #10% of train data for warm-up logging.info("Warmup-steps: {}".format(warmup_steps)) ## Train the model model.fit(train_objectives=[(train_dataloader, train_loss)], evaluator=evaluator, epochs=num_epochs, evaluation_steps=1000, warmup_steps=warmup_steps, output_path=model_save_path) ############################################################################## # # Load the stored model and evaluate its performance on STS benchmark dataset # ############################################################################## #model = SentenceTransformer(model_save_path) test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name='stsb-multi-mt-test') test_evaluator(model, output_path=model_save_path)