Pristinenlp commited on
Commit
d7b760d
1 Parent(s): 5785133
README.md CHANGED
@@ -1,3 +1,85 @@
1
  ---
2
- license: mit
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ tags:
3
+ - mteb
4
+ model-index:
5
+ - name: qinglong-large-zh
6
+ results:
7
+ - task:
8
+ type: Reranking
9
+ dataset:
10
+ type: C-MTEB/CMedQAv1-reranking
11
+ name: MTEB CMedQAv1
12
+ config: default
13
+ split: test
14
+ revision: None
15
+ metrics:
16
+ - type: map
17
+ value: 82.32176162633382
18
+ - type: mrr
19
+ value: 84.91440476190478
20
+ - task:
21
+ type: Reranking
22
+ dataset:
23
+ type: C-MTEB/CMedQAv2-reranking
24
+ name: MTEB CMedQAv2
25
+ config: default
26
+ split: test
27
+ revision: None
28
+ metrics:
29
+ - type: map
30
+ value: 84.08586457179406
31
+ - type: mrr
32
+ value: 86.9011507936508
33
+ - task:
34
+ type: Reranking
35
+ dataset:
36
+ type: C-MTEB/Mmarco-reranking
37
+ name: MTEB MMarcoReranking
38
+ config: default
39
+ split: dev
40
+ revision: None
41
+ metrics:
42
+ - type: map
43
+ value: 35.497382125464284
44
+ - type: mrr
45
+ value: 35.29206349206349
46
+ - task:
47
+ type: Reranking
48
+ dataset:
49
+ type: C-MTEB/T2Reranking
50
+ name: MTEB T2Reranking
51
+ config: default
52
+ split: dev
53
+ revision: None
54
+ metrics:
55
+ - type: map
56
+ value: 68.25849742148222
57
+ - type: mrr
58
+ value: 78.64202157956387
59
  ---
60
+
61
+ # alime-reranker-large-zh
62
+
63
+ The alime reranker model.
64
+
65
+ # Usage
66
+ ```python
67
+
68
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
69
+ import torch
70
+
71
+ pairs = [["西湖在哪?", "西湖风景名胜区位于浙江省杭州市"],["今天天气不错","你吓死我了"]]
72
+
73
+ if torch.cuda.is_available():
74
+ device = torch.device("cuda")
75
+ else:
76
+ device = torch.device("cpu")
77
+
78
+ tokenizer = AutoTokenizer.from_pretrained("Pristinenlp/alime-reranker-large-zh")
79
+ model = AutoModelForSequenceClassification.from_pretrained("Pristinenlp/alime-reranker-large-zh").to(device)
80
+
81
+ inputs = tokenizer(pairs, padding=True, truncation=True, return_tensors='pt', max_length=512).to(device)
82
+ scores = model(**inputs, return_dict=True).logits.view(-1, ).float()
83
+ print(scores.tolist())
84
+
85
+ ```
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "XLMRobertaForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "LABEL_0"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 4096,
17
+ "label2id": {
18
+ "LABEL_0": 0
19
+ },
20
+ "layer_norm_eps": 1e-05,
21
+ "max_position_embeddings": 514,
22
+ "model_type": "xlm-roberta",
23
+ "num_attention_heads": 16,
24
+ "num_hidden_layers": 24,
25
+ "output_past": true,
26
+ "pad_token_id": 1,
27
+ "position_embedding_type": "absolute",
28
+ "torch_dtype": "float32",
29
+ "transformers_version": "4.31.0",
30
+ "type_vocab_size": 1,
31
+ "use_cache": true,
32
+ "vocab_size": 250002
33
+ }
gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9124850b3ed51219c665675e32cd7104efab748d039b0a93a93c390689717505
3
+ size 2239701425
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "clean_up_tokenization_spaces": true,
4
+ "cls_token": "<s>",
5
+ "eos_token": "</s>",
6
+ "mask_token": {
7
+ "__type": "AddedToken",
8
+ "content": "<mask>",
9
+ "lstrip": true,
10
+ "normalized": true,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "model_max_length": 512,
15
+ "pad_token": "<pad>",
16
+ "sep_token": "</s>",
17
+ "sp_model_kwargs": {},
18
+ "tokenizer_class": "XLMRobertaTokenizer",
19
+ "unk_token": "<unk>"
20
+ }