nreimers commited on
Commit
83658e1
·
1 Parent(s): 0ae72b8
1_Pooling/config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false
7
+ }
README.md ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pipeline_tag: sentence-similarity
3
+ tags:
4
+ - sentence-transformers
5
+ - feature-extraction
6
+ - sentence-similarity
7
+ - transformers
8
+ ---
9
+
10
+ # {MODEL_NAME}
11
+
12
+ This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
13
+
14
+ <!--- Describe your model here -->
15
+
16
+ ## Usage (Sentence-Transformers)
17
+
18
+ Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
19
+
20
+ ```
21
+ pip install -U sentence-transformers
22
+ ```
23
+
24
+ Then you can use the model like this:
25
+
26
+ ```python
27
+ from sentence_transformers import SentenceTransformer
28
+ sentences = ["This is an example sentence", "Each sentence is converted"]
29
+
30
+ model = SentenceTransformer('{MODEL_NAME}')
31
+ embeddings = model.encode(sentences)
32
+ print(embeddings)
33
+ ```
34
+
35
+
36
+
37
+ ## Usage (HuggingFace Transformers)
38
+ Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
39
+
40
+ ```python
41
+ from transformers import AutoTokenizer, AutoModel
42
+ import torch
43
+
44
+
45
+ #Mean Pooling - Take attention mask into account for correct averaging
46
+ def mean_pooling(model_output, attention_mask):
47
+ token_embeddings = model_output[0] #First element of model_output contains all token embeddings
48
+ input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
49
+ return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
50
+
51
+
52
+ # Sentences we want sentence embeddings for
53
+ sentences = ['This is an example sentence', 'Each sentence is converted']
54
+
55
+ # Load model from HuggingFace Hub
56
+ tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}')
57
+ model = AutoModel.from_pretrained('{MODEL_NAME}')
58
+
59
+ # Tokenize sentences
60
+ encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
61
+
62
+ # Compute token embeddings
63
+ with torch.no_grad():
64
+ model_output = model(**encoded_input)
65
+
66
+ # Perform pooling. In this case, mean pooling.
67
+ sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
68
+
69
+ print("Sentence embeddings:")
70
+ print(sentence_embeddings)
71
+ ```
72
+
73
+
74
+
75
+ ## Evaluation Results
76
+
77
+ <!--- Describe how your model was evaluated -->
78
+
79
+ For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
80
+
81
+
82
+ ## Training
83
+ The model was trained with the parameters:
84
+
85
+ **DataLoader**:
86
+
87
+ `torch.utils.data.dataloader.DataLoader` of length 7858 with parameters:
88
+ ```
89
+ {'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
90
+ ```
91
+
92
+ **Loss**:
93
+
94
+ `sentence_transformers.losses.MarginMSELoss.MarginMSELoss`
95
+
96
+ Parameters of the fit()-Method:
97
+ ```
98
+ {
99
+ "epochs": 30,
100
+ "evaluation_steps": 0,
101
+ "evaluator": "NoneType",
102
+ "max_grad_norm": 1,
103
+ "optimizer_class": "<class 'transformers.optimization.AdamW'>",
104
+ "optimizer_params": {
105
+ "lr": 2e-05
106
+ },
107
+ "scheduler": "WarmupLinear",
108
+ "steps_per_epoch": null,
109
+ "warmup_steps": 1000,
110
+ "weight_decay": 0.01
111
+ }
112
+ ```
113
+
114
+
115
+ ## Full Model Architecture
116
+ ```
117
+ SentenceTransformer(
118
+ (0): Transformer({'max_seq_length': 250, 'do_lower_case': False}) with Transformer model: DistilBertModel
119
+ (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
120
+ )
121
+ ```
122
+
123
+ ## Citing & Authors
124
+
125
+ <!--- Describe where people can find more information -->
config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-uncased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertModel"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "initializer_range": 0.02,
12
+ "max_position_embeddings": 512,
13
+ "model_type": "distilbert",
14
+ "n_heads": 12,
15
+ "n_layers": 6,
16
+ "pad_token_id": 0,
17
+ "qa_dropout": 0.1,
18
+ "seq_classif_dropout": 0.2,
19
+ "sinusoidal_pos_embds": false,
20
+ "tie_weights_": true,
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.17.0",
23
+ "vocab_size": 30522
24
+ }
config_sentence_transformers.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "2.2.0",
4
+ "transformers": "4.17.0",
5
+ "pytorch": "1.11.0"
6
+ }
7
+ }
modules.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ }
14
+ ]
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26ee5ca26c85a6d6fd27c5e37be2b28d1684f3731fd2e379fd29f89a1c9cf707
3
+ size 265488505
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 250,
3
+ "do_lower_case": false
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "distilbert-base-uncased", "tokenizer_class": "DistilBertTokenizer"}
train_script.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import json
3
+ from torch.utils.data import DataLoader
4
+ from sentence_transformers import SentenceTransformer, LoggingHandler, util, models, evaluation, losses, InputExample
5
+ import logging
6
+ from datetime import datetime
7
+ import gzip
8
+ import os
9
+ import tarfile
10
+ import tqdm
11
+ from torch.utils.data import Dataset
12
+ import random
13
+ from shutil import copyfile
14
+ import pickle
15
+ import argparse
16
+
17
+ #### Just some code to print debug information to stdout
18
+ logging.basicConfig(format='%(asctime)s - %(message)s',
19
+ datefmt='%Y-%m-%d %H:%M:%S',
20
+ level=logging.INFO,
21
+ handlers=[LoggingHandler()])
22
+ #### /print debug information to stdout
23
+
24
+
25
+ parser = argparse.ArgumentParser()
26
+ parser.add_argument("--train_batch_size", default=64, type=int)
27
+ parser.add_argument("--max_seq_length", default=250, type=int)
28
+ parser.add_argument("--model_name", default="distilbert-base-uncased")
29
+ parser.add_argument("--max_passages", default=0, type=int)
30
+ parser.add_argument("--epochs", default=30, type=int)
31
+ parser.add_argument("--pooling", default="mean")
32
+ parser.add_argument("--negs_to_use", default=None, help="From which systems should negatives be used? Multiple systems seperated by comma. None = all")
33
+ parser.add_argument("--warmup_steps", default=1000, type=int)
34
+ parser.add_argument("--lr", default=2e-5, type=float)
35
+ parser.add_argument("--num_negs_per_system", default=5, type=int)
36
+ parser.add_argument("--use_all_queries", default=False, action="store_true")
37
+ parser.add_argument("--no_freeze_embeddings", default=False, action="store_true")
38
+ parser.add_argument("--name", default="")
39
+ args = parser.parse_args()
40
+
41
+ logging.info(str(args))
42
+
43
+
44
+
45
+ # The model we want to fine-tune
46
+ train_batch_size = args.train_batch_size #Increasing the train batch size improves the model performance, but requires more GPU memory
47
+ model_name = args.model_name
48
+ max_passages = args.max_passages
49
+ max_seq_length = args.max_seq_length #Max length for passages. Increasing it, requires more GPU memory
50
+
51
+ num_negs_per_system = args.num_negs_per_system # We used different systems to mine hard negatives. Number of hard negatives to add from each system
52
+ num_epochs = args.epochs # Number of epochs we want to train
53
+
54
+ # Load our embedding model
55
+
56
+ logging.info("Create new SBERT model")
57
+ word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
58
+ pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), args.pooling)
59
+ model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
60
+
61
+ #Freeze embedding layer
62
+ if not args.no_freeze_embeddings:
63
+ print("Freeze embedding layer")
64
+ word_embedding_model.auto_model.embeddings.word_embeddings.requires_grad_(False)
65
+
66
+ model_save_path = f'output-dense/{model_name.replace("/", "-")}-batch_size_{train_batch_size}-{args.name}-{datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}'
67
+
68
+
69
+ # Write self to path
70
+ os.makedirs(model_save_path, exist_ok=True)
71
+
72
+ train_script_path = os.path.join(model_save_path, 'train_script.py')
73
+ copyfile(__file__, train_script_path)
74
+ with open(train_script_path, 'a') as fOut:
75
+ fOut.write("\n\n# Script was called via:\n#python " + " ".join(sys.argv))
76
+
77
+
78
+ ### Now we read the MS Marco dataset
79
+ data_folder = 'msmarco-data'
80
+
81
+ #### Read the corpus files, that contain all the passages. Store them in the corpus dict
82
+ corpus = {} #dict in the format: passage_id -> passage. Stores all existent passages
83
+ collection_filepath = os.path.join(data_folder, 'collection.tsv')
84
+ if not os.path.exists(collection_filepath):
85
+ tar_filepath = os.path.join(data_folder, 'collection.tar.gz')
86
+ if not os.path.exists(tar_filepath):
87
+ logging.info("Download collection.tar.gz")
88
+ util.http_get('https://msmarco.blob.core.windows.net/msmarcoranking/collection.tar.gz', tar_filepath)
89
+
90
+ with tarfile.open(tar_filepath, "r:gz") as tar:
91
+ tar.extractall(path=data_folder)
92
+
93
+ logging.info("Read corpus: collection.tsv")
94
+ with open(collection_filepath, 'r', encoding='utf8') as fIn:
95
+ for line in fIn:
96
+ pid, passage = line.strip().split("\t")
97
+ pid = int(pid)
98
+ corpus[pid] = passage
99
+
100
+
101
+ ### Read the train queries, store in queries dict
102
+ queries = {} #dict in the format: query_id -> query. Stores all training queries
103
+ queries_filepath = os.path.join(data_folder, 'queries.train.tsv')
104
+ if not os.path.exists(queries_filepath):
105
+ tar_filepath = os.path.join(data_folder, 'queries.tar.gz')
106
+ if not os.path.exists(tar_filepath):
107
+ logging.info("Download queries.tar.gz")
108
+ util.http_get('https://msmarco.blob.core.windows.net/msmarcoranking/queries.tar.gz', tar_filepath)
109
+
110
+ with tarfile.open(tar_filepath, "r:gz") as tar:
111
+ tar.extractall(path=data_folder)
112
+
113
+
114
+ with open(queries_filepath, 'r', encoding='utf8') as fIn:
115
+ for line in fIn:
116
+ qid, query = line.strip().split("\t")
117
+ qid = int(qid)
118
+ queries[qid] = query
119
+
120
+
121
+ # Load a dict (qid, pid) -> ce_score that maps query-ids (qid) and paragraph-ids (pid)
122
+ # to the CrossEncoder score computed by the cross-encoder/ms-marco-MiniLM-L-6-v2 model
123
+ ce_scores_file = os.path.join(data_folder, 'cross-encoder-ms-marco-MiniLM-L-6-v2-scores.pkl.gz')
124
+ if not os.path.exists(ce_scores_file):
125
+ logging.info("Download cross-encoder scores file")
126
+ util.http_get('https://huggingface.co/datasets/sentence-transformers/msmarco-hard-negatives/resolve/main/cross-encoder-ms-marco-MiniLM-L-6-v2-scores.pkl.gz', ce_scores_file)
127
+
128
+ logging.info("Load CrossEncoder scores dict")
129
+ with gzip.open(ce_scores_file, 'rb') as fIn:
130
+ ce_scores = pickle.load(fIn)
131
+
132
+ # As training data we use hard-negatives that have been mined using various systems
133
+ hard_negatives_filepath = os.path.join(data_folder, 'msmarco-hard-negatives.jsonl.gz')
134
+ if not os.path.exists(hard_negatives_filepath):
135
+ logging.info("Download cross-encoder scores file")
136
+ util.http_get('https://huggingface.co/datasets/sentence-transformers/msmarco-hard-negatives/resolve/main/msmarco-hard-negatives.jsonl.gz', hard_negatives_filepath)
137
+
138
+
139
+ logging.info("Read hard negatives train file")
140
+ train_queries = {}
141
+ negs_to_use = None
142
+ with gzip.open(hard_negatives_filepath, 'rt') as fIn:
143
+ for line in tqdm.tqdm(fIn):
144
+ if max_passages > 0 and len(train_queries) >= max_passages:
145
+ break
146
+ data = json.loads(line)
147
+
148
+ #Get the positive passage ids
149
+ pos_pids = data['pos']
150
+
151
+ #Get the hard negatives
152
+ neg_pids = set()
153
+ if negs_to_use is None:
154
+ if args.negs_to_use is not None: #Use specific system for negatives
155
+ negs_to_use = args.negs_to_use.split(",")
156
+ else: #Use all systems
157
+ negs_to_use = list(data['neg'].keys())
158
+ logging.info("Using negatives from the following systems: {}".format(", ".join(negs_to_use)))
159
+
160
+ for system_name in negs_to_use:
161
+ if system_name not in data['neg']:
162
+ continue
163
+
164
+ system_negs = data['neg'][system_name]
165
+ negs_added = 0
166
+ for pid in system_negs:
167
+ if pid not in neg_pids:
168
+ neg_pids.add(pid)
169
+ negs_added += 1
170
+ if negs_added >= num_negs_per_system:
171
+ break
172
+
173
+ if args.use_all_queries or (len(pos_pids) > 0 and len(neg_pids) > 0):
174
+ train_queries[data['qid']] = {'qid': data['qid'], 'query': queries[data['qid']], 'pos': pos_pids, 'neg': neg_pids}
175
+
176
+ logging.info("Train queries: {}".format(len(train_queries)))
177
+
178
+ # We create a custom MSMARCO dataset that returns triplets (query, positive, negative)
179
+ # on-the-fly based on the information from the mined-hard-negatives jsonl file.
180
+ class MSMARCODataset(Dataset):
181
+ def __init__(self, queries, corpus, ce_scores):
182
+ self.queries = queries
183
+ self.queries_ids = list(queries.keys())
184
+ self.corpus = corpus
185
+ self.ce_scores = ce_scores
186
+
187
+ for qid in self.queries:
188
+ self.queries[qid]['pos'] = list(self.queries[qid]['pos'])
189
+ self.queries[qid]['neg'] = list(self.queries[qid]['neg'])
190
+ random.shuffle(self.queries[qid]['neg'])
191
+
192
+ def __getitem__(self, item):
193
+ query = self.queries[self.queries_ids[item]]
194
+ query_text = query['query']
195
+ qid = query['qid']
196
+
197
+ if len(query['pos']) > 0:
198
+ pos_id = query['pos'].pop(0) #Pop positive and add at end
199
+ pos_text = self.corpus[pos_id]
200
+ query['pos'].append(pos_id)
201
+ else: #We only have negatives, use two negs
202
+ pos_id = query['neg'].pop(0) #Pop negative and add at end
203
+ pos_text = self.corpus[pos_id]
204
+ query['neg'].append(pos_id)
205
+
206
+ #Get a negative passage
207
+ neg_id = query['neg'].pop(0) #Pop negative and add at end
208
+ neg_text = self.corpus[neg_id]
209
+ query['neg'].append(neg_id)
210
+
211
+ pos_score = self.ce_scores[qid][pos_id]
212
+ neg_score = self.ce_scores[qid][neg_id]
213
+
214
+ return InputExample(texts=[query_text, pos_text, neg_text], label=pos_score-neg_score)
215
+
216
+ def __len__(self):
217
+ return len(self.queries)
218
+
219
+ # For training the SentenceTransformer model, we need a dataset, a dataloader, and a loss used for training.
220
+ train_dataset = MSMARCODataset(queries=train_queries, corpus=corpus, ce_scores=ce_scores)
221
+ train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size, drop_last=True)
222
+ train_loss = losses.MarginMSELoss(model=model)
223
+
224
+ # Train the model
225
+ model.fit(train_objectives=[(train_dataloader, train_loss)],
226
+ epochs=num_epochs,
227
+ warmup_steps=args.warmup_steps,
228
+ use_amp=True,
229
+ checkpoint_path=model_save_path,
230
+ checkpoint_save_steps=10000,
231
+ optimizer_params = {'lr': args.lr},
232
+ )
233
+
234
+ # Train latest model
235
+ model.save(model_save_path)
236
+
237
+ # Script was called via:
238
+ #python train_dense.py
vocab.txt ADDED
The diff for this file is too large to render. See raw diff