Jyiyiyiyi commited on
Commit
23f713b
1 Parent(s): 67d14b7

Upload 23 files

Browse files

Pretrained model on latex, Presentation MathML and Content MathML of formulas. It was introduced in https://link.springer.com/chapter/10.1007/978-981-99-7254-8_8

embedding_latex = model.encode([{'latex': latex}])
embedding_pmml = model.encode([{'mathml': pmml}])
embedding_cmml = model.encode([{'mathml': cmml}])


@inproceedings{wang2023math,
title={Math Information Retrieval with Contrastive Learning of Formula Embeddings},
author={Wang, Jingyi and Tian, Xuedong},
booktitle={International Conference on Web Information Systems Engineering},
pages={97--107},
year={2023},
organization={Springer}
}

0_Asym/140000857040976_MarkuplmTransformerForConMATH/added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<end-of-node>": 50266,
3
+ "[empty-title]": 50265
4
+ }
0_Asym/140000857040976_MarkuplmTransformerForConMATH/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "output/training_markuplm_simcsemicrosoft-markuplm-base-2023-03-16_18-40-10",
3
+ "architectures": [
4
+ "MarkupLMModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 3072,
16
+ "layer_norm_eps": 1e-05,
17
+ "max_depth": 50,
18
+ "max_position_embeddings": 514,
19
+ "max_xpath_subs_unit_embeddings": 1024,
20
+ "max_xpath_tag_unit_embeddings": 256,
21
+ "model_type": "markuplm",
22
+ "num_attention_heads": 12,
23
+ "num_hidden_layers": 12,
24
+ "pad_token_id": 1,
25
+ "position_embedding_type": "absolute",
26
+ "subs_pad_id": 1001,
27
+ "tag_pad_id": 216,
28
+ "torch_dtype": "float32",
29
+ "transformers_version": "4.26.1",
30
+ "type_vocab_size": 1,
31
+ "use_cache": true,
32
+ "vocab_size": 50267,
33
+ "xpath_unit_hidden_size": 32
34
+ }
0_Asym/140000857040976_MarkuplmTransformerForConMATH/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 1,
6
+ "transformers_version": "4.26.1"
7
+ }
0_Asym/140000857040976_MarkuplmTransformerForConMATH/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
0_Asym/140000857040976_MarkuplmTransformerForConMATH/preprocessor_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "feature_extractor_type": "MarkupLMFeatureExtractor",
3
+ "processor_class": "MarkupLMProcessor"
4
+ }
0_Asym/140000857040976_MarkuplmTransformerForConMATH/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:644beecc9dcbf64a22d4a6058e21e564e47333f7384d16d7f85a1c43be7a4b0e
3
+ size 540919261
0_Asym/140000857040976_MarkuplmTransformerForConMATH/sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 512,
3
+ "do_lower_case": false
4
+ }
0_Asym/140000857040976_MarkuplmTransformerForConMATH/special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
0_Asym/140000857040976_MarkuplmTransformerForConMATH/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
0_Asym/140000857040976_MarkuplmTransformerForConMATH/tokenizer_config.json ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "cls_token": {
12
+ "__type": "AddedToken",
13
+ "content": "<s>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false
18
+ },
19
+ "eos_token": {
20
+ "__type": "AddedToken",
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false
26
+ },
27
+ "errors": "replace",
28
+ "mask_token": {
29
+ "__type": "AddedToken",
30
+ "content": "<mask>",
31
+ "lstrip": true,
32
+ "normalized": true,
33
+ "rstrip": false,
34
+ "single_word": false
35
+ },
36
+ "max_depth": 50,
37
+ "max_width": 1000,
38
+ "model_max_length": 512,
39
+ "name_or_path": "output/training_markuplm_simcsemicrosoft-markuplm-base-2023-03-16_18-40-10",
40
+ "only_label_first_subword": true,
41
+ "pad_token": {
42
+ "__type": "AddedToken",
43
+ "content": "<pad>",
44
+ "lstrip": false,
45
+ "normalized": true,
46
+ "rstrip": false,
47
+ "single_word": false
48
+ },
49
+ "pad_token_label": -100,
50
+ "pad_width": 1001,
51
+ "processor_class": "MarkupLMProcessor",
52
+ "sep_token": {
53
+ "__type": "AddedToken",
54
+ "content": "</s>",
55
+ "lstrip": false,
56
+ "normalized": true,
57
+ "rstrip": false,
58
+ "single_word": false
59
+ },
60
+ "special_tokens_map_file": null,
61
+ "tags_dict": {
62
+ "abs": 0,
63
+ "and": 1,
64
+ "annotation": 2,
65
+ "annotation-xml": 3,
66
+ "apply": 4,
67
+ "approx": 5,
68
+ "arccos": 6,
69
+ "arccosh": 7,
70
+ "arccot": 8,
71
+ "arccoth": 9,
72
+ "arccsc": 10,
73
+ "arccsch": 11,
74
+ "arcsec": 12,
75
+ "arcsech": 13,
76
+ "arcsin": 14,
77
+ "arcsinh": 15,
78
+ "arctan": 16,
79
+ "arctanh": 17,
80
+ "arg": 18,
81
+ "bind": 19,
82
+ "bvar": 20,
83
+ "card": 21,
84
+ "cartesianproduct": 22,
85
+ "cbytes": 23,
86
+ "ceiling": 24,
87
+ "cerror": 25,
88
+ "ci": 26,
89
+ "cn": 27,
90
+ "codomain": 28,
91
+ "complexes": 29,
92
+ "compose": 30,
93
+ "condition": 31,
94
+ "conjugate": 32,
95
+ "cos": 33,
96
+ "cosh": 34,
97
+ "cot": 35,
98
+ "coth": 36,
99
+ "cs": 37,
100
+ "csc": 38,
101
+ "csch": 39,
102
+ "csymbol": 40,
103
+ "curl": 41,
104
+ "declare": 42,
105
+ "degree": 43,
106
+ "determinant": 44,
107
+ "diff": 45,
108
+ "divergence": 46,
109
+ "divide": 47,
110
+ "domain": 48,
111
+ "domainofapplication": 49,
112
+ "emptyset": 50,
113
+ "eq": 51,
114
+ "equivalent": 52,
115
+ "eulergamma": 53,
116
+ "exists": 54,
117
+ "exp": 55,
118
+ "exponentiale": 56,
119
+ "factorial": 57,
120
+ "factorof": 58,
121
+ "false": 59,
122
+ "floor": 60,
123
+ "fn": 61,
124
+ "forall": 62,
125
+ "gcd": 63,
126
+ "geq": 64,
127
+ "grad": 65,
128
+ "gt": 66,
129
+ "ident": 67,
130
+ "image": 68,
131
+ "imaginary": 69,
132
+ "imaginaryi": 70,
133
+ "implies": 71,
134
+ "in": 72,
135
+ "infinity": 73,
136
+ "int": 74,
137
+ "integers": 75,
138
+ "intersect": 76,
139
+ "interval": 77,
140
+ "inverse": 78,
141
+ "lambda": 79,
142
+ "laplacian": 80,
143
+ "lcm": 81,
144
+ "leq": 82,
145
+ "limit": 83,
146
+ "list": 84,
147
+ "ln": 85,
148
+ "log": 86,
149
+ "logbase": 87,
150
+ "lowlimit": 88,
151
+ "lt": 89,
152
+ "maction": 90,
153
+ "maligngroup": 91,
154
+ "malignmark": 92,
155
+ "math": 93,
156
+ "matrix": 94,
157
+ "matrixrow": 95,
158
+ "max": 96,
159
+ "mean": 97,
160
+ "median": 98,
161
+ "menclose": 99,
162
+ "merror": 100,
163
+ "mfenced": 101,
164
+ "mfrac": 102,
165
+ "mglyph": 103,
166
+ "mi": 104,
167
+ "mi\"": 105,
168
+ "min": 106,
169
+ "minus": 107,
170
+ "mlabeledtr": 108,
171
+ "mlongdiv": 109,
172
+ "mmultiscripts": 110,
173
+ "mn": 111,
174
+ "mo": 112,
175
+ "mode": 113,
176
+ "moment": 114,
177
+ "momentabout": 115,
178
+ "mover": 116,
179
+ "mpadded": 117,
180
+ "mphantom": 118,
181
+ "mprescripts": 119,
182
+ "mroot": 120,
183
+ "mrow": 121,
184
+ "ms": 122,
185
+ "mscarries": 123,
186
+ "mscarry": 124,
187
+ "msgroup": 125,
188
+ "msline": 126,
189
+ "mspace": 127,
190
+ "msqrt": 128,
191
+ "msrow": 129,
192
+ "mstack": 130,
193
+ "mstyle": 131,
194
+ "msub": 132,
195
+ "msubsup": 133,
196
+ "msup": 134,
197
+ "mtable": 135,
198
+ "mtd": 136,
199
+ "mtext": 137,
200
+ "mtr": 138,
201
+ "munder": 139,
202
+ "munderover": 140,
203
+ "naturalnumbers": 141,
204
+ "neq": 142,
205
+ "none": 143,
206
+ "not": 144,
207
+ "notanumber": 145,
208
+ "notin": 146,
209
+ "notprsubset": 147,
210
+ "notsubset": 148,
211
+ "or": 149,
212
+ "otherwise": 150,
213
+ "outerproduct": 151,
214
+ "partialdiff": 152,
215
+ "pi": 153,
216
+ "piece": 154,
217
+ "piecewise": 155,
218
+ "plus": 156,
219
+ "power": 157,
220
+ "primes": 158,
221
+ "product": 159,
222
+ "prsubset": 160,
223
+ "quotient": 161,
224
+ "rationals": 162,
225
+ "real": 163,
226
+ "reals": 164,
227
+ "reln": 165,
228
+ "rem": 166,
229
+ "root": 167,
230
+ "scalarproduct": 168,
231
+ "sdev": 169,
232
+ "sec": 170,
233
+ "sech": 171,
234
+ "selector": 172,
235
+ "semantics": 173,
236
+ "sep": 174,
237
+ "set": 175,
238
+ "setdiff": 176,
239
+ "share": 177,
240
+ "sin": 178,
241
+ "sinh": 179,
242
+ "span": 180,
243
+ "subset": 181,
244
+ "sum": 182,
245
+ "tan": 183,
246
+ "tanh": 184,
247
+ "tendsto": 185,
248
+ "times": 186,
249
+ "transpose": 187,
250
+ "true": 188,
251
+ "union": 189,
252
+ "uplimit": 190,
253
+ "variance": 191,
254
+ "vector": 192,
255
+ "vectorproduct": 193,
256
+ "xor": 194
257
+ },
258
+ "tokenizer_class": "MarkupLMTokenizer",
259
+ "trim_offsets": false,
260
+ "unk_token": {
261
+ "__type": "AddedToken",
262
+ "content": "<unk>",
263
+ "lstrip": false,
264
+ "normalized": true,
265
+ "rstrip": false,
266
+ "single_word": false
267
+ }
268
+ }
0_Asym/140000857040976_MarkuplmTransformerForConMATH/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
0_Asym/140000860578048_Transformer/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "sentence-transformers/all-mpnet-base-v2",
3
+ "architectures": [
4
+ "MPNetModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 514,
16
+ "model_type": "mpnet",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 1,
20
+ "relative_attention_num_buckets": 32,
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.26.1",
23
+ "vocab_size": 30527
24
+ }
0_Asym/140000860578048_Transformer/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15d7a9813b0a24c11d46812f8bda297891490b64c6e7f47c228bc8dee06079f2
3
+ size 438014769
0_Asym/140000860578048_Transformer/sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 512,
3
+ "do_lower_case": false
4
+ }
0_Asym/140000860578048_Transformer/special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "[UNK]"
15
+ }
0_Asym/140000860578048_Transformer/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
0_Asym/140000860578048_Transformer/tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "do_lower_case": true,
5
+ "eos_token": "</s>",
6
+ "mask_token": "<mask>",
7
+ "model_max_length": 512,
8
+ "name_or_path": "sentence-transformers/all-mpnet-base-v2",
9
+ "pad_token": "<pad>",
10
+ "sep_token": "</s>",
11
+ "special_tokens_map_file": null,
12
+ "strip_accents": null,
13
+ "tokenize_chinese_chars": true,
14
+ "tokenizer_class": "MPNetTokenizer",
15
+ "unk_token": "[UNK]"
16
+ }
0_Asym/140000860578048_Transformer/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
0_Asym/config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "types": {
3
+ "140000860578048_Transformer": "sentence_transformers.models.Transformer",
4
+ "140000857040976_MarkuplmTransformerForConMATH": "sentence_transformers.models.MarkuplmTransformerForConMATH"
5
+ },
6
+ "structure": {
7
+ "latex": [
8
+ "140000860578048_Transformer"
9
+ ],
10
+ "mathml": [
11
+ "140000857040976_MarkuplmTransformerForConMATH"
12
+ ]
13
+ },
14
+ "parameters": {
15
+ "allow_empty_key": true
16
+ }
17
+ }
1_Pooling/config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false,
7
+ "pooling_mode_weightedmean_tokens": false,
8
+ "pooling_mode_lasttoken": false
9
+ }
README.md CHANGED
@@ -1,3 +1,93 @@
1
  ---
2
- license: unknown
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ pipeline_tag: sentence-similarity
3
+ tags:
4
+ - sentence-transformers
5
+ - feature-extraction
6
+ - sentence-similarity
7
+
8
  ---
9
+
10
+ # {MODEL_NAME}
11
+
12
+ This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
13
+
14
+ <!--- Describe your model here -->
15
+
16
+ ## Usage (Sentence-Transformers)
17
+
18
+ Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
19
+
20
+ ```
21
+ pip install -U sentence-transformers
22
+ ```
23
+
24
+ Then you can use the model like this:
25
+
26
+ ```python
27
+ from sentence_transformers import SentenceTransformer
28
+ sentences = ["This is an example sentence", "Each sentence is converted"]
29
+
30
+ model = SentenceTransformer('{MODEL_NAME}')
31
+ embeddings = model.encode(sentences)
32
+ print(embeddings)
33
+ ```
34
+
35
+
36
+
37
+ ## Evaluation Results
38
+
39
+ <!--- Describe how your model was evaluated -->
40
+
41
+ For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
42
+
43
+
44
+ ## Training
45
+ The model was trained with the parameters:
46
+
47
+ **DataLoader**:
48
+
49
+ `torch.utils.data.dataloader.DataLoader` of length 45861 with parameters:
50
+ ```
51
+ {'batch_size': 4, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
52
+ ```
53
+
54
+ **Loss**:
55
+
56
+ `sentence_transformers.losses.MultipleNegativesSymmetricRankingLoss.MultipleNegativesSymmetricRankingLoss` with parameters:
57
+ ```
58
+ {'scale': 20.0, 'similarity_fct': 'cos_sim'}
59
+ ```
60
+
61
+ Parameters of the fit()-Method:
62
+ ```
63
+ {
64
+ "epochs": 1,
65
+ "evaluation_steps": 4586,
66
+ "evaluator": "sentence_transformers.evaluation.AlignmentandUniformityEvaluator.AlignmentandUniformityEvaluator",
67
+ "max_grad_norm": 1,
68
+ "optimizer_class": "<class 'torch.optim.adamw.AdamW'>",
69
+ "optimizer_params": {
70
+ "lr": 5e-05
71
+ },
72
+ "scheduler": "WarmupLinear",
73
+ "steps_per_epoch": null,
74
+ "warmup_steps": 4587,
75
+ "weight_decay": 0.01
76
+ }
77
+ ```
78
+
79
+
80
+ ## Full Model Architecture
81
+ ```
82
+ SentenceTransformer(
83
+ (0): Asym(
84
+ (latex-0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: MPNetModel
85
+ (mathml-0): MarkuplmTransformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: MarkupLMModel
86
+ )
87
+ (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False})
88
+ )
89
+ ```
90
+
91
+ ## Citing & Authors
92
+
93
+ <!--- Describe where people can find more information -->
config_sentence_transformers.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "2.2.2",
4
+ "transformers": "4.26.1",
5
+ "pytorch": "1.12.1+cu113"
6
+ }
7
+ }
modules.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "0_Asym",
6
+ "type": "sentence_transformers.models.Asym"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ }
14
+ ]