innocent-charles commited on
Commit
ea11c0a
1 Parent(s): 97e84b4

Upload 17 files

Browse files
1_Pooling/config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_cls_token": true,
4
+ "pooling_mode_mean_tokens": false,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false
7
+ }
2_Dense/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"in_features": 768, "out_features": 768, "bias": true, "activation_function": "torch.nn.modules.activation.Tanh"}
2_Dense/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a548639f4e10e8b96be6a4846f0932ca2d011d491b37489a6b4751a3c096e49d
3
+ size 132
2_Dense/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4af843f3f778124777c11604c9e22c6afdca8c27764a44961099e981cf6355d
3
+ size 132
README.md ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - multilingual
4
+ - af
5
+ - sq
6
+ - am
7
+ - ar
8
+ - hy
9
+ - as
10
+ - az
11
+ - eu
12
+ - be
13
+ - bn
14
+ - bs
15
+ - bg
16
+ - my
17
+ - ca
18
+ - ceb
19
+ - zh
20
+ - co
21
+ - hr
22
+ - cs
23
+ - da
24
+ - nl
25
+ - en
26
+ - eo
27
+ - et
28
+ - fi
29
+ - fr
30
+ - fy
31
+ - gl
32
+ - ka
33
+ - de
34
+ - el
35
+ - gu
36
+ - ht
37
+ - ha
38
+ - haw
39
+ - he
40
+ - hi
41
+ - hmn
42
+ - hu
43
+ - is
44
+ - ig
45
+ - id
46
+ - ga
47
+ - it
48
+ - ja
49
+ - jv
50
+ - kn
51
+ - kk
52
+ - km
53
+ - rw
54
+ - ko
55
+ - ku
56
+ - ky
57
+ - lo
58
+ - la
59
+ - lv
60
+ - lt
61
+ - lb
62
+ - mk
63
+ - mg
64
+ - ms
65
+ - ml
66
+ - mt
67
+ - mi
68
+ - mr
69
+ - mn
70
+ - ne
71
+ - no
72
+ - ny
73
+ - or
74
+ - fa
75
+ - pl
76
+ - pt
77
+ - pa
78
+ - ro
79
+ - ru
80
+ - sm
81
+ - gd
82
+ - sr
83
+ - st
84
+ - sn
85
+ - si
86
+ - sk
87
+ - sl
88
+ - so
89
+ - es
90
+ - su
91
+ - sw
92
+ - sv
93
+ - tl
94
+ - tg
95
+ - ta
96
+ - tt
97
+ - te
98
+ - th
99
+ - bo
100
+ - tr
101
+ - tk
102
+ - ug
103
+ - uk
104
+ - ur
105
+ - uz
106
+ - vi
107
+ - cy
108
+ - wo
109
+ - xh
110
+ - yi
111
+ - yo
112
+ - zu
113
+ pipeline_tag: sentence-similarity
114
+ tags:
115
+ - sentence-transformers
116
+ - feature-extraction
117
+ - sentence-similarity
118
+ library_name: sentence-transformers
119
+ license: apache-2.0
120
+ ---
121
+
122
+ # LaBSE
123
+ This is a port of the [LaBSE](https://tfhub.dev/google/LaBSE/1) model to PyTorch. It can be used to map 109 languages to a shared vector space.
124
+
125
+
126
+ ## Usage (Sentence-Transformers)
127
+
128
+ Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
129
+
130
+ ```
131
+ pip install -U sentence-transformers
132
+ ```
133
+
134
+ Then you can use the model like this:
135
+
136
+ ```python
137
+ from sentence_transformers import SentenceTransformer
138
+ sentences = ["This is an example sentence", "Each sentence is converted"]
139
+
140
+ model = SentenceTransformer('sentence-transformers/LaBSE')
141
+ embeddings = model.encode(sentences)
142
+ print(embeddings)
143
+ ```
144
+
145
+
146
+
147
+ ## Evaluation Results
148
+
149
+
150
+
151
+ For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=sentence-transformers/LaBSE)
152
+
153
+
154
+
155
+ ## Full Model Architecture
156
+ ```
157
+ SentenceTransformer(
158
+ (0): Transformer({'max_seq_length': 256, 'do_lower_case': False}) with Transformer model: BertModel
159
+ (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
160
+ (2): Dense({'in_features': 768, 'out_features': 768, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'})
161
+ (3): Normalize()
162
+ )
163
+ ```
164
+
165
+ ## Citing & Authors
166
+
167
+ Have a look at [LaBSE](https://tfhub.dev/google/LaBSE/1) for the respective publication that describes LaBSE.
168
+
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "old_models/LaBSE/0_Transformer",
3
+ "architectures": [
4
+ "BertModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "directionality": "bidi",
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "pooler_fc_size": 768,
21
+ "pooler_num_attention_heads": 12,
22
+ "pooler_num_fc_layers": 3,
23
+ "pooler_size_per_head": 128,
24
+ "pooler_type": "first_token_transform",
25
+ "position_embedding_type": "absolute",
26
+ "transformers_version": "4.7.0",
27
+ "type_vocab_size": 2,
28
+ "use_cache": true,
29
+ "vocab_size": 501153
30
+ }
config_sentence_transformers.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "2.0.0",
4
+ "transformers": "4.7.0",
5
+ "pytorch": "1.9.0+cu102"
6
+ }
7
+ }
flax_model.msgpack ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:211fcbb6fed2aedfa31e7da2ecd7ac485ac8010de6e18afbd8c00f722b18c8cc
3
+ size 135
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6566b39f08255d6abd32052675a6534fc20f5c262ce06e2ab5862dbd01cf7b7d
3
+ size 135
modules.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ },
14
+ {
15
+ "idx": 2,
16
+ "name": "2",
17
+ "path": "2_Dense",
18
+ "type": "sentence_transformers.models.Dense"
19
+ },
20
+ {
21
+ "idx": 3,
22
+ "name": "3",
23
+ "path": "3_Normalize",
24
+ "type": "sentence_transformers.models.Normalize"
25
+ }
26
+ ]
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e6ff92c0dc1e0e18a7abf27a78921b7cd1a8c51373b44970e718efc81aada64
3
+ size 135
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 256,
3
+ "do_lower_case": false
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e0ebd757dea4709bb5d69d66ed94e6e46f5275d67ac5f04d791b7854106a885
3
+ size 135
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": "special_tokens_map.json", "full_tokenizer_file": null, "name_or_path": "old_models/LaBSE/0_Transformer", "do_basic_tokenize": true, "never_split": null}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff