nreimers commited on
Commit
153dc40
1 Parent(s): cbb0a3b
1_Pooling/config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false
7
+ }
README.md ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pipeline_tag: sentence-similarity
3
+ tags:
4
+ - sentence-transformers
5
+ - feature-extraction
6
+ - sentence-similarity
7
+ - transformers
8
+ ---
9
+
10
+ # dense_encoder-msmarco-distilbert-word2vec256k
11
+
12
+ This model is based on [msmarco-word2vec256000-distilbert-base-uncased](https://huggingface.co/nicoladecao/msmarco-word2vec256000-distilbert-base-uncased) with a 256k sized vocabulary initialized with word2vec.
13
+
14
+ It has been trained on MS MARCO using [MarginMSELoss](https://github.com/UKPLab/sentence-transformers/blob/master/examples/training/ms_marco/train_bi-encoder_margin-mse.py). See the train_script.py in this repository.
15
+ Performance:
16
+ - MS MARCO dev: - (MRR@10)
17
+ - TREC-DL 2019: 65.53 (nDCG@10)
18
+ - TREC-DL 2020: 67.42 (nDCG@10)
19
+ - Avg. on 4 BEIR datasets: 38.97
20
+
21
+
22
+ The word embedding matrix has been frozen while training.
23
+
24
+ ## Usage (Sentence-Transformers)
25
+
26
+ Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
27
+
28
+ ```
29
+ pip install -U sentence-transformers
30
+ ```
31
+
32
+ Then you can use the model like this:
33
+
34
+ ```python
35
+ from sentence_transformers import SentenceTransformer
36
+ sentences = ["This is an example sentence", "Each sentence is converted"]
37
+
38
+ model = SentenceTransformer('{MODEL_NAME}')
39
+ embeddings = model.encode(sentences)
40
+ print(embeddings)
41
+ ```
42
+
43
+
44
+
45
+ ## Usage (HuggingFace Transformers)
46
+ Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
47
+
48
+ ```python
49
+ from transformers import AutoTokenizer, AutoModel
50
+ import torch
51
+
52
+
53
+ #Mean Pooling - Take attention mask into account for correct averaging
54
+ def mean_pooling(model_output, attention_mask):
55
+ token_embeddings = model_output[0] #First element of model_output contains all token embeddings
56
+ input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
57
+ return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
58
+
59
+
60
+ # Sentences we want sentence embeddings for
61
+ sentences = ['This is an example sentence', 'Each sentence is converted']
62
+
63
+ # Load model from HuggingFace Hub
64
+ tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}')
65
+ model = AutoModel.from_pretrained('{MODEL_NAME}')
66
+
67
+ # Tokenize sentences
68
+ encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
69
+
70
+ # Compute token embeddings
71
+ with torch.no_grad():
72
+ model_output = model(**encoded_input)
73
+
74
+ # Perform pooling. In this case, mean pooling.
75
+ sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
76
+
77
+ print("Sentence embeddings:")
78
+ print(sentence_embeddings)
79
+ ```
80
+
81
+
82
+
83
+ ## Evaluation Results
84
+
85
+ <!--- Describe how your model was evaluated -->
86
+
87
+ For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
88
+
89
+
90
+ ## Training
91
+ The model was trained with the parameters:
92
+
93
+ **DataLoader**:
94
+
95
+ `torch.utils.data.dataloader.DataLoader` of length 7858 with parameters:
96
+ ```
97
+ {'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
98
+ ```
99
+
100
+ **Loss**:
101
+
102
+ `sentence_transformers.losses.MarginMSELoss.MarginMSELoss`
103
+
104
+ Parameters of the fit()-Method:
105
+ ```
106
+ {
107
+ "epochs": 30,
108
+ "evaluation_steps": 0,
109
+ "evaluator": "NoneType",
110
+ "max_grad_norm": 1,
111
+ "optimizer_class": "<class 'transformers.optimization.AdamW'>",
112
+ "optimizer_params": {
113
+ "lr": 2e-05
114
+ },
115
+ "scheduler": "WarmupLinear",
116
+ "steps_per_epoch": null,
117
+ "warmup_steps": 1000,
118
+ "weight_decay": 0.01
119
+ }
120
+ ```
121
+
122
+
123
+ ## Full Model Architecture
124
+ ```
125
+ SentenceTransformer(
126
+ (0): Transformer({'max_seq_length': 250, 'do_lower_case': False}) with Transformer model: DistilBertModel
127
+ (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
128
+ )
129
+ ```
130
+
131
+ ## Citing & Authors
132
+
133
+ <!--- Describe where people can find more information -->
config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "nicoladecao/msmarco-word2vec256000-distilbert-base-uncased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertModel"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "initializer_range": 0.02,
12
+ "max_position_embeddings": 512,
13
+ "model_type": "distilbert",
14
+ "n_heads": 12,
15
+ "n_layers": 6,
16
+ "pad_token_id": 0,
17
+ "qa_dropout": 0.1,
18
+ "seq_classif_dropout": 0.2,
19
+ "sinusoidal_pos_embds": false,
20
+ "tie_weights_": true,
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.16.2",
23
+ "vocab_size": 256000
24
+ }
config_sentence_transformers.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "2.2.0",
4
+ "transformers": "4.16.2",
5
+ "pytorch": "1.10.2"
6
+ }
7
+ }
modules.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ }
14
+ ]
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ab74f1d9ca75b60a82a718c7f60e9dd840656ad718e8748c03ae8f8c8d8e80c
3
+ size 958156601
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 250,
3
+ "do_lower_case": false
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_max_length": 512, "unk_token": "[UNK]", "cls_token": "[CLS]", "sep_token": "[SEP]", "pad_token": "[PAD]", "mask_token": "[MASK]", "model_input_names": ["input_ids", "attention_mask"], "special_tokens_map_file": "/root/.cache/huggingface/transformers/fe09c361189d8238b9e387f10a088e93f70620bfe74b82036baff1fed512a153.dd8bd9bfd3664b530ea4e645105f557769387b3da9f79bdb55ed556bdd80611d", "name_or_path": "nicoladecao/msmarco-word2vec256000-distilbert-base-uncased", "tokenizer_class": "PreTrainedTokenizerFast"}
train_script.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import json
3
+ from torch.utils.data import DataLoader
4
+ from sentence_transformers import SentenceTransformer, LoggingHandler, util, models, evaluation, losses, InputExample
5
+ import logging
6
+ from datetime import datetime
7
+ import gzip
8
+ import os
9
+ import tarfile
10
+ import tqdm
11
+ from torch.utils.data import Dataset
12
+ import random
13
+ from shutil import copyfile
14
+ import pickle
15
+ import argparse
16
+
17
+ #### Just some code to print debug information to stdout
18
+ logging.basicConfig(format='%(asctime)s - %(message)s',
19
+ datefmt='%Y-%m-%d %H:%M:%S',
20
+ level=logging.INFO,
21
+ handlers=[LoggingHandler()])
22
+ #### /print debug information to stdout
23
+
24
+
25
+ parser = argparse.ArgumentParser()
26
+ parser.add_argument("--train_batch_size", default=64, type=int)
27
+ parser.add_argument("--max_seq_length", default=250, type=int)
28
+ parser.add_argument("--model_name", default="nicoladecao/msmarco-word2vec256000-distilbert-base-uncased")
29
+ parser.add_argument("--max_passages", default=0, type=int)
30
+ parser.add_argument("--epochs", default=30, type=int)
31
+ parser.add_argument("--pooling", default="mean")
32
+ parser.add_argument("--negs_to_use", default=None, help="From which systems should negatives be used? Multiple systems seperated by comma. None = all")
33
+ parser.add_argument("--warmup_steps", default=1000, type=int)
34
+ parser.add_argument("--lr", default=2e-5, type=float)
35
+ parser.add_argument("--num_negs_per_system", default=5, type=int)
36
+ parser.add_argument("--use_all_queries", default=False, action="store_true")
37
+ args = parser.parse_args()
38
+
39
+ logging.info(str(args))
40
+
41
+
42
+
43
+ # The model we want to fine-tune
44
+ train_batch_size = args.train_batch_size #Increasing the train batch size improves the model performance, but requires more GPU memory
45
+ model_name = args.model_name
46
+ max_passages = args.max_passages
47
+ max_seq_length = args.max_seq_length #Max length for passages. Increasing it, requires more GPU memory
48
+
49
+ num_negs_per_system = args.num_negs_per_system # We used different systems to mine hard negatives. Number of hard negatives to add from each system
50
+ num_epochs = args.epochs # Number of epochs we want to train
51
+
52
+ # Load our embedding model
53
+
54
+ logging.info("Create new SBERT model")
55
+ word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
56
+ pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), args.pooling)
57
+ model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
58
+
59
+ #Freeze embedding layer
60
+ #word_embedding_model.auto_model.embeddings.requires_grad = False
61
+ word_embedding_model.auto_model.embeddings.requires_grad_(False)
62
+
63
+ model_save_path = f'output-dense/{model_name.replace("/", "-")}-batch_size_{train_batch_size}-{datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}'
64
+
65
+
66
+ # Write self to path
67
+ os.makedirs(model_save_path, exist_ok=True)
68
+
69
+ train_script_path = os.path.join(model_save_path, 'train_script.py')
70
+ copyfile(__file__, train_script_path)
71
+ with open(train_script_path, 'a') as fOut:
72
+ fOut.write("\n\n# Script was called via:\n#python " + " ".join(sys.argv))
73
+
74
+
75
+ ### Now we read the MS Marco dataset
76
+ data_folder = 'msmarco-data'
77
+
78
+ #### Read the corpus files, that contain all the passages. Store them in the corpus dict
79
+ corpus = {} #dict in the format: passage_id -> passage. Stores all existent passages
80
+ collection_filepath = os.path.join(data_folder, 'collection.tsv')
81
+ if not os.path.exists(collection_filepath):
82
+ tar_filepath = os.path.join(data_folder, 'collection.tar.gz')
83
+ if not os.path.exists(tar_filepath):
84
+ logging.info("Download collection.tar.gz")
85
+ util.http_get('https://msmarco.blob.core.windows.net/msmarcoranking/collection.tar.gz', tar_filepath)
86
+
87
+ with tarfile.open(tar_filepath, "r:gz") as tar:
88
+ tar.extractall(path=data_folder)
89
+
90
+ logging.info("Read corpus: collection.tsv")
91
+ with open(collection_filepath, 'r', encoding='utf8') as fIn:
92
+ for line in fIn:
93
+ pid, passage = line.strip().split("\t")
94
+ pid = int(pid)
95
+ corpus[pid] = passage
96
+
97
+
98
+ ### Read the train queries, store in queries dict
99
+ queries = {} #dict in the format: query_id -> query. Stores all training queries
100
+ queries_filepath = os.path.join(data_folder, 'queries.train.tsv')
101
+ if not os.path.exists(queries_filepath):
102
+ tar_filepath = os.path.join(data_folder, 'queries.tar.gz')
103
+ if not os.path.exists(tar_filepath):
104
+ logging.info("Download queries.tar.gz")
105
+ util.http_get('https://msmarco.blob.core.windows.net/msmarcoranking/queries.tar.gz', tar_filepath)
106
+
107
+ with tarfile.open(tar_filepath, "r:gz") as tar:
108
+ tar.extractall(path=data_folder)
109
+
110
+
111
+ with open(queries_filepath, 'r', encoding='utf8') as fIn:
112
+ for line in fIn:
113
+ qid, query = line.strip().split("\t")
114
+ qid = int(qid)
115
+ queries[qid] = query
116
+
117
+
118
+ # Load a dict (qid, pid) -> ce_score that maps query-ids (qid) and paragraph-ids (pid)
119
+ # to the CrossEncoder score computed by the cross-encoder/ms-marco-MiniLM-L-6-v2 model
120
+ ce_scores_file = os.path.join(data_folder, 'cross-encoder-ms-marco-MiniLM-L-6-v2-scores.pkl.gz')
121
+ if not os.path.exists(ce_scores_file):
122
+ logging.info("Download cross-encoder scores file")
123
+ util.http_get('https://huggingface.co/datasets/sentence-transformers/msmarco-hard-negatives/resolve/main/cross-encoder-ms-marco-MiniLM-L-6-v2-scores.pkl.gz', ce_scores_file)
124
+
125
+ logging.info("Load CrossEncoder scores dict")
126
+ with gzip.open(ce_scores_file, 'rb') as fIn:
127
+ ce_scores = pickle.load(fIn)
128
+
129
+ # As training data we use hard-negatives that have been mined using various systems
130
+ hard_negatives_filepath = os.path.join(data_folder, 'msmarco-hard-negatives.jsonl.gz')
131
+ if not os.path.exists(hard_negatives_filepath):
132
+ logging.info("Download cross-encoder scores file")
133
+ util.http_get('https://huggingface.co/datasets/sentence-transformers/msmarco-hard-negatives/resolve/main/msmarco-hard-negatives.jsonl.gz', hard_negatives_filepath)
134
+
135
+
136
+ logging.info("Read hard negatives train file")
137
+ train_queries = {}
138
+ negs_to_use = None
139
+ with gzip.open(hard_negatives_filepath, 'rt') as fIn:
140
+ for line in tqdm.tqdm(fIn):
141
+ if max_passages > 0 and len(train_queries) >= max_passages:
142
+ break
143
+ data = json.loads(line)
144
+
145
+ #Get the positive passage ids
146
+ pos_pids = data['pos']
147
+
148
+ #Get the hard negatives
149
+ neg_pids = set()
150
+ if negs_to_use is None:
151
+ if args.negs_to_use is not None: #Use specific system for negatives
152
+ negs_to_use = args.negs_to_use.split(",")
153
+ else: #Use all systems
154
+ negs_to_use = list(data['neg'].keys())
155
+ logging.info("Using negatives from the following systems: {}".format(", ".join(negs_to_use)))
156
+
157
+ for system_name in negs_to_use:
158
+ if system_name not in data['neg']:
159
+ continue
160
+
161
+ system_negs = data['neg'][system_name]
162
+ negs_added = 0
163
+ for pid in system_negs:
164
+ if pid not in neg_pids:
165
+ neg_pids.add(pid)
166
+ negs_added += 1
167
+ if negs_added >= num_negs_per_system:
168
+ break
169
+
170
+ if args.use_all_queries or (len(pos_pids) > 0 and len(neg_pids) > 0):
171
+ train_queries[data['qid']] = {'qid': data['qid'], 'query': queries[data['qid']], 'pos': pos_pids, 'neg': neg_pids}
172
+
173
+ logging.info("Train queries: {}".format(len(train_queries)))
174
+
175
+ # We create a custom MSMARCO dataset that returns triplets (query, positive, negative)
176
+ # on-the-fly based on the information from the mined-hard-negatives jsonl file.
177
+ class MSMARCODataset(Dataset):
178
+ def __init__(self, queries, corpus, ce_scores):
179
+ self.queries = queries
180
+ self.queries_ids = list(queries.keys())
181
+ self.corpus = corpus
182
+ self.ce_scores = ce_scores
183
+
184
+ for qid in self.queries:
185
+ self.queries[qid]['pos'] = list(self.queries[qid]['pos'])
186
+ self.queries[qid]['neg'] = list(self.queries[qid]['neg'])
187
+ random.shuffle(self.queries[qid]['neg'])
188
+
189
+ def __getitem__(self, item):
190
+ query = self.queries[self.queries_ids[item]]
191
+ query_text = query['query']
192
+ qid = query['qid']
193
+
194
+ if len(query['pos']) > 0:
195
+ pos_id = query['pos'].pop(0) #Pop positive and add at end
196
+ pos_text = self.corpus[pos_id]
197
+ query['pos'].append(pos_id)
198
+ else: #We only have negatives, use two negs
199
+ pos_id = query['neg'].pop(0) #Pop negative and add at end
200
+ pos_text = self.corpus[pos_id]
201
+ query['neg'].append(pos_id)
202
+
203
+ #Get a negative passage
204
+ neg_id = query['neg'].pop(0) #Pop negative and add at end
205
+ neg_text = self.corpus[neg_id]
206
+ query['neg'].append(neg_id)
207
+
208
+ pos_score = self.ce_scores[qid][pos_id]
209
+ neg_score = self.ce_scores[qid][neg_id]
210
+
211
+ return InputExample(texts=[query_text, pos_text, neg_text], label=pos_score-neg_score)
212
+
213
+ def __len__(self):
214
+ return len(self.queries)
215
+
216
+ # For training the SentenceTransformer model, we need a dataset, a dataloader, and a loss used for training.
217
+ train_dataset = MSMARCODataset(queries=train_queries, corpus=corpus, ce_scores=ce_scores)
218
+ train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size, drop_last=True)
219
+ train_loss = losses.MarginMSELoss(model=model)
220
+
221
+ # Train the model
222
+ model.fit(train_objectives=[(train_dataloader, train_loss)],
223
+ epochs=num_epochs,
224
+ warmup_steps=args.warmup_steps,
225
+ use_amp=True,
226
+ checkpoint_path=model_save_path,
227
+ checkpoint_save_steps=10000,
228
+ optimizer_params = {'lr': args.lr},
229
+ )
230
+
231
+ # Train latest model
232
+ model.save(model_save_path)
233
+
234
+ # Script was called via:
235
+ #python train_dense.py