lrodrigues commited on
Commit
1fc781f
1 Parent(s): a73bf00
.gitattributes CHANGED
@@ -17,7 +17,6 @@
17
  *.pt filter=lfs diff=lfs merge=lfs -text
18
  *.pth filter=lfs diff=lfs merge=lfs -text
19
  *.rar filter=lfs diff=lfs merge=lfs -text
20
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
  *.tar.* filter=lfs diff=lfs merge=lfs -text
22
  *.tflite filter=lfs diff=lfs merge=lfs -text
23
  *.tgz filter=lfs diff=lfs merge=lfs -text
@@ -25,3 +24,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
17
  *.pt filter=lfs diff=lfs merge=lfs -text
18
  *.pth filter=lfs diff=lfs merge=lfs -text
19
  *.rar filter=lfs diff=lfs merge=lfs -text
 
20
  *.tar.* filter=lfs diff=lfs merge=lfs -text
21
  *.tflite filter=lfs diff=lfs merge=lfs -text
22
  *.tgz filter=lfs diff=lfs merge=lfs -text
24
  *.zip filter=lfs diff=lfs merge=lfs -text
25
  *.zstandard filter=lfs diff=lfs merge=lfs -text
26
  *tfevents* filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: en
3
+ license: mit
4
+ pipeline_tag: text-classification
5
+ tags:
6
+ - sentence-transformers
7
+ ---
8
+
9
+ # Cross-Encoder for MS Marco
10
+
11
+ The model can be used for Information Retrieval: Given a query, encode the query will all possible passages (e.g. retrieved with ElasticSearch). Then sort the passages in a decreasing order. See [SBERT.net Retrieve & Re-rank](https://www.sbert.net/examples/applications/retrieve_rerank/README.html) for more details. The training code is available here: [SBERT.net Training MS Marco](https://github.com/UKPLab/sentence-transformers/tree/master/examples/training/ms_marco)
12
+
13
+ ## Training Data
14
+ This model was trained on the [MS Marco Passage Ranking](https://github.com/microsoft/MSMARCO-Passage-Ranking) task.
15
+
16
+ ## Usage
17
+ The usage becomes easier when you have [SentenceTransformers](https://www.sbert.net/) installed. Then, you can use the pre-trained models like this:
18
+ ```python
19
+ from sentence_transformers import CrossEncoder
20
+ model = CrossEncoder('model_name', max_length=512)
21
+ scores = model.predict([('Query', 'Paragraph1'), ('Query', 'Paragraph2')])
22
+ ```
23
+
24
+ ## Performance
25
+ In the following table, we provide various pre-trained Cross-Encoders together with their performance on the [TREC Deep Learning 2019](https://microsoft.github.io/TREC-2019-Deep-Learning/) and the [MS Marco Passage Reranking](https://github.com/microsoft/MSMARCO-Passage-Ranking/) dataset.
26
+
27
+
28
+ | Model-Name | NDCG@10 (TREC DL 19) | MRR@10 (MS Marco Dev) | Docs / Sec |
29
+ | ------------- |:-------------| -----| --- |
30
+ | **Version 2 models** | | |
31
+ | cross-encoder/ms-marco-TinyBERT-L-2-v2 | 69.84 | 32.56 | 9000
32
+ | cross-encoder/ms-marco-MiniLM-L-2-v2 | 71.01 | 34.85 | 4100
33
+ | cross-encoder/ms-marco-MiniLM-L-4-v2 | 73.04 | 37.70 | 2500
34
+ | cross-encoder/ms-marco-MiniLM-L-6-v2 | 74.30 | 39.01 | 1800
35
+ | cross-encoder/ms-marco-MiniLM-L-12-v2 | 74.31 | 39.02 | 960
36
+ | **Version 1 models** | | |
37
+ | cross-encoder/ms-marco-TinyBERT-L-2 | 67.43 | 30.15 | 9000
38
+ | cross-encoder/ms-marco-TinyBERT-L-4 | 68.09 | 34.50 | 2900
39
+ | cross-encoder/ms-marco-TinyBERT-L-6 | 69.57 | 36.13 | 680
40
+ | cross-encoder/ms-marco-electra-base | 71.99 | 36.41 | 340
41
+ | **Other models** | | |
42
+ | nboost/pt-tinybert-msmarco | 63.63 | 28.80 | 2900
43
+ | nboost/pt-bert-base-uncased-msmarco | 70.94 | 34.75 | 340
44
+ | nboost/pt-bert-large-msmarco | 73.36 | 36.48 | 100
45
+ | Capreolus/electra-base-msmarco | 71.23 | 36.89 | 340
46
+ | amberoad/bert-multilingual-passage-reranking-msmarco | 68.40 | 35.54 | 330
47
+ | sebastian-hofstaetter/distilbert-cat-margin_mse-T2-msmarco | 72.82 | 37.88 | 720
48
+
49
+ Note: Runtime was computed on a V100 GPU.
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "cross-encoder/ms-marco-MiniLM-L-12-v2",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "gradient_checkpointing": false,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 384,
11
+ "id2label": {
12
+ "0": "LABEL_0"
13
+ },
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 1536,
16
+ "label2id": {
17
+ "LABEL_0": 0
18
+ },
19
+ "layer_norm_eps": 1e-12,
20
+ "max_position_embeddings": 512,
21
+ "model_type": "bert",
22
+ "num_attention_heads": 12,
23
+ "num_hidden_layers": 6,
24
+ "pad_token_id": 0,
25
+ "position_embedding_type": "absolute",
26
+ "sbert_ce_default_activation_function": "torch.nn.modules.linear.Identity",
27
+ "transformers_version": "4.4.2",
28
+ "type_vocab_size": 2,
29
+ "use_cache": true,
30
+ "vocab_size": 30522
31
+ }
flax_model.msgpack ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc6033077e458cf98490d51883382806373325919e7b5dd12f4dd694dcd667cd
3
+ size 90858210
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ae17b87eda3d184502a821fddff43d82feb7c206f665a851c491ec715b497ed
3
+ size 90903017
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "do_basic_tokenize": true,
4
+ "do_lower_case": true,
5
+ "mask_token": "[MASK]",
6
+ "model_max_length": 512,
7
+ "name_or_path": "microsoft/MiniLM-L12-H384-uncased",
8
+ "never_split": null,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "strip_accents": null,
12
+ "tokenize_chinese_chars": true,
13
+ "unk_token": "[UNK]"
14
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff