eduardofv commited on
Commit
92a62c5
1 Parent(s): 632aa4d

Added training script and updated README

Browse files
Files changed (2) hide show
  1. README.md +7 -0
  2. training_stsb_m_mt.py +104 -0
README.md CHANGED
@@ -14,3 +14,10 @@ While the fine-tuned version with the defaults of the training script and the Sp
14
  Cosine-Similarity : Pearson: 0.7451 Spearman: 0.7364
15
  ```
16
 
 
 
 
 
 
 
 
 
14
  Cosine-Similarity : Pearson: 0.7451 Spearman: 0.7364
15
  ```
16
 
17
+ ## Resources
18
+
19
+ Check the modified training script [training_stsb_m_mt.py]
20
+
21
+ Check [sts_eval](https://github.com/eduardofv/sts_eval) for a comparison with Tensorflow and Sentence-Transformers models
22
+
23
+ Check the [development environment](https://github.com/eduardofv/ai-denv)
training_stsb_m_mt.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ MODIFIED: (efv) Use STSb-multi-mt Spanish
3
+ source: https://github.com/UKPLab/sentence-transformers/blob/master/examples/training/sts/training_stsbenchmark.py
4
+
5
+ ---
6
+
7
+ This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It generates sentence embeddings
8
+ that can be compared using cosine-similarity to measure the similarity.
9
+
10
+ Usage:
11
+ python training_nli.py
12
+
13
+ OR
14
+ python training_nli.py pretrained_transformer_model_name
15
+ """
16
+ from torch.utils.data import DataLoader
17
+ import math
18
+ from sentence_transformers import SentenceTransformer, LoggingHandler, losses, models, util
19
+ from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
20
+ from sentence_transformers.readers import InputExample
21
+ import logging
22
+ from datetime import datetime
23
+ import sys
24
+ import os
25
+ import gzip
26
+ import csv
27
+
28
+ from datasets import load_dataset
29
+
30
+ #### Just some code to print debug information to stdout
31
+ logging.basicConfig(format='%(asctime)s - %(message)s',
32
+ datefmt='%Y-%m-%d %H:%M:%S',
33
+ level=logging.INFO,
34
+ handlers=[LoggingHandler()])
35
+ #### /print debug information to stdout
36
+
37
+
38
+
39
+ #You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
40
+ model_name = sys.argv[1] if len(sys.argv) > 1 else 'distilbert-base-uncased'
41
+
42
+ # Read the dataset
43
+ train_batch_size = 16
44
+ num_epochs = 4
45
+ model_save_path = 'output/training_stsbenchmark_'+model_name.replace("/", "-")+'-'+datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
46
+
47
+ # Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
48
+ word_embedding_model = models.Transformer(model_name)
49
+
50
+ # Apply mean pooling to get one fixed sized sentence vector
51
+ pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(),
52
+ pooling_mode_mean_tokens=True,
53
+ pooling_mode_cls_token=False,
54
+ pooling_mode_max_tokens=False)
55
+
56
+ model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
57
+
58
+ # Convert the dataset to a DataLoader ready for training
59
+ logging.info("Read stsb-multi-mt train dataset")
60
+
61
+ train_samples = []
62
+ dev_samples = []
63
+ test_samples = []
64
+
65
+ def samples_from_dataset(dataset):
66
+ samples = [InputExample(texts=[e['sentence1'], e['sentence2']], label=e['similarity_score'] / 5) \
67
+ for e in dataset]
68
+ return samples
69
+
70
+ train_samples = samples_from_dataset(load_dataset("stsb_multi_mt", name="es", split="train"))
71
+ dev_samples = samples_from_dataset(load_dataset("stsb_multi_mt", name="es", split="dev"))
72
+ test_samples = samples_from_dataset(load_dataset("stsb_multi_mt", name="es", split="test"))
73
+
74
+ train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
75
+ train_loss = losses.CosineSimilarityLoss(model=model)
76
+
77
+
78
+ logging.info("Read stsb-multi-mt dev dataset")
79
+ evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name='sts-dev')
80
+
81
+
82
+ # Configure the training. We skip evaluation in this example
83
+ warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) #10% of train data for warm-up
84
+ logging.info("Warmup-steps: {}".format(warmup_steps))
85
+
86
+
87
+ ## Train the model
88
+ model.fit(train_objectives=[(train_dataloader, train_loss)],
89
+ evaluator=evaluator,
90
+ epochs=num_epochs,
91
+ evaluation_steps=1000,
92
+ warmup_steps=warmup_steps,
93
+ output_path=model_save_path)
94
+
95
+
96
+ ##############################################################################
97
+ #
98
+ # Load the stored model and evaluate its performance on STS benchmark dataset
99
+ #
100
+ ##############################################################################
101
+
102
+ #model = SentenceTransformer(model_save_path)
103
+ test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name='stsb-multi-mt-test')
104
+ test_evaluator(model, output_path=model_save_path)