d0rj commited on
Commit
7fcf5d4
1 Parent(s): 4704415

feat: update model to sentence-transformers version

Browse files
1_Pooling/config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 1024,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false
7
+ }
README.md CHANGED
@@ -15,6 +15,9 @@ tags:
15
  - retriever
16
  - pruned
17
  - e5
 
 
 
18
  ---
19
 
20
  # E5-large-en-ru
@@ -33,6 +36,10 @@ Uses only russian and english tokens.
33
 
34
  ## Usage
35
 
 
 
 
 
36
  ```python
37
  import torch.nn.functional as F
38
  from torch import Tensor
@@ -63,4 +70,33 @@ embeddings = F.normalize(embeddings, p=2, dim=1)
63
  scores = (embeddings[:2] @ embeddings[2:].T) * 100
64
  print(scores.tolist())
65
  # [[68.59542846679688, 81.75910949707031], [80.36100769042969, 64.77748107910156]]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  ```
 
15
  - retriever
16
  - pruned
17
  - e5
18
+ - sentence-transformers
19
+ - feature-extraction
20
+ - sentence-similarity
21
  ---
22
 
23
  # E5-large-en-ru
 
36
 
37
  ## Usage
38
 
39
+ ### transformers
40
+
41
+ #### Direct usage
42
+
43
  ```python
44
  import torch.nn.functional as F
45
  from torch import Tensor
 
70
  scores = (embeddings[:2] @ embeddings[2:].T) * 100
71
  print(scores.tolist())
72
  # [[68.59542846679688, 81.75910949707031], [80.36100769042969, 64.77748107910156]]
73
+ ```
74
+
75
+ #### Pipeline
76
+
77
+ ```python
78
+ from transformers import pipeline
79
+
80
+
81
+ pipe = pipeline('feature-extraction', model='d0rj/e5-large-en-ru')
82
+ embeddings = pipe(input_texts, return_tensors=True)
83
+ embeddings[0].size()
84
+ # torch.Size([1, 17, 1024])
85
+ ```
86
+
87
+ ### sentence-transformers
88
+
89
+ ```python
90
+ from sentence_transformers import SentenceTransformer
91
+
92
+
93
+ sentences = [
94
+ 'query: Что такое круглые тензоры?',
95
+ 'passage: Abstract: we introduce a novel method for compressing round tensors based on their inherent radial symmetry. We start by generalising PCA and eigen decomposition on round tensors...'б
96
+ ]
97
+
98
+ model = SentenceTransformer('d0rj/e5-large-en-ru')
99
+ embeddings = model.encode(sentences, convert_to_tensor=True)
100
+ embeddings.size()
101
+ # torch.Size([2, 1024])
102
  ```
config.json CHANGED
@@ -21,7 +21,7 @@
21
  "pad_token_id": 1,
22
  "position_embedding_type": "absolute",
23
  "torch_dtype": "float32",
24
- "transformers_version": "4.25.1",
25
  "type_vocab_size": 1,
26
  "use_cache": true,
27
  "vocab_size": 60302
 
21
  "pad_token_id": 1,
22
  "position_embedding_type": "absolute",
23
  "torch_dtype": "float32",
24
+ "transformers_version": "4.30.1",
25
  "type_vocab_size": 1,
26
  "use_cache": true,
27
  "vocab_size": 60302
config_sentence_transformers.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "2.2.2",
4
+ "transformers": "4.30.1",
5
+ "pytorch": "1.12.1"
6
+ }
7
+ }
modules.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ },
14
+ {
15
+ "idx": 2,
16
+ "name": "2",
17
+ "path": "2_Normalize",
18
+ "type": "sentence_transformers.models.Normalize"
19
+ }
20
+ ]
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b057df4cbbcccfe1bec23d948afcebee3727b4b88a8ae17a0cc992467e5498a1
3
- size 1462712045
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a900d8829b407aaadc83b6315504ba1acdfde420b5e2288c706a0215c6b11ddb
3
+ size 1462678449
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 514,
3
+ "do_lower_case": false
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "clean_up_tokenization_spaces": true,
4
+ "cls_token": "<s>",
5
+ "eos_token": "</s>",
6
+ "mask_token": {
7
+ "__type": "AddedToken",
8
+ "content": "<mask>",
9
+ "lstrip": true,
10
+ "normalized": true,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "model_max_length": 1000000000000000019884624838656,
15
+ "pad_token": "<pad>",
16
+ "sep_token": "</s>",
17
+ "sp_model_kwargs": {},
18
+ "tokenizer_class": "XLMRobertaTokenizer",
19
+ "unk_token": "<unk>"
20
+ }