jfarray commited on
Commit
1762390
1 Parent(s): b56f231

Add new SentenceTransformer model.

Browse files
.gitattributes CHANGED
@@ -25,3 +25,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
1_Pooling/config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false
7
+ }
2_Dense/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"in_features": 768, "out_features": 256, "bias": true, "activation_function": "torch.nn.modules.activation.Tanh"}
2_Dense/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c255448751e7dbde4338e76d6c6c659b8e90b734506ca32e1962d9df8c0c0a3
3
+ size 788519
README.md ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pipeline_tag: sentence-similarity
3
+ tags:
4
+ - sentence-transformers
5
+ - feature-extraction
6
+ - sentence-similarity
7
+ ---
8
+
9
+ # {MODEL_NAME}
10
+
11
+ This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 256 dimensional dense vector space and can be used for tasks like clustering or semantic search.
12
+
13
+ <!--- Describe your model here -->
14
+
15
+ ## Usage (Sentence-Transformers)
16
+
17
+ Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
18
+
19
+ ```
20
+ pip install -U sentence-transformers
21
+ ```
22
+
23
+ Then you can use the model like this:
24
+
25
+ ```python
26
+ from sentence_transformers import SentenceTransformer
27
+ sentences = ["This is an example sentence", "Each sentence is converted"]
28
+
29
+ model = SentenceTransformer('{MODEL_NAME}')
30
+ embeddings = model.encode(sentences)
31
+ print(embeddings)
32
+ ```
33
+
34
+
35
+
36
+ ## Evaluation Results
37
+
38
+ <!--- Describe how your model was evaluated -->
39
+
40
+ For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
41
+
42
+
43
+ ## Training
44
+ The model was trained with the parameters:
45
+
46
+ **DataLoader**:
47
+
48
+ `torch.utils.data.dataloader.DataLoader` of length 11 with parameters:
49
+ ```
50
+ {'batch_size': 15, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
51
+ ```
52
+
53
+ **Loss**:
54
+
55
+ `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss`
56
+
57
+ Parameters of the fit()-Method:
58
+ ```
59
+ {
60
+ "epochs": 1,
61
+ "evaluation_steps": 1,
62
+ "evaluator": "sentence_transformers.evaluation.EmbeddingSimilarityEvaluator.EmbeddingSimilarityEvaluator",
63
+ "max_grad_norm": 1,
64
+ "optimizer_class": "<class 'transformers.optimization.AdamW'>",
65
+ "optimizer_params": {
66
+ "lr": 2e-05
67
+ },
68
+ "scheduler": "WarmupLinear",
69
+ "steps_per_epoch": null,
70
+ "warmup_steps": 2,
71
+ "weight_decay": 0.01
72
+ }
73
+ ```
74
+
75
+
76
+ ## Full Model Architecture
77
+ ```
78
+ SentenceTransformer(
79
+ (0): Transformer({'max_seq_length': 256, 'do_lower_case': False}) with Transformer model: BertModel
80
+ (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
81
+ (2): Dense({'in_features': 768, 'out_features': 256, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'})
82
+ )
83
+ ```
84
+
85
+ ## Citing & Authors
86
+
87
+ <!--- Describe where people can find more information -->
config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "dccuchile/bert-base-spanish-wwm-uncased",
3
+ "architectures": [
4
+ "BertModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "output_past": true,
20
+ "pad_token_id": 1,
21
+ "position_embedding_type": "absolute",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.16.2",
24
+ "type_vocab_size": 2,
25
+ "use_cache": true,
26
+ "vocab_size": 31002
27
+ }
config_sentence_transformers.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "2.2.0",
4
+ "transformers": "4.16.2",
5
+ "pytorch": "1.10.0+cu111"
6
+ }
7
+ }
eval/similarity_evaluation_results.csv ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ epoch,steps,cosine_pearson,cosine_spearman,euclidean_pearson,euclidean_spearman,manhattan_pearson,manhattan_spearman,dot_pearson,dot_spearman
2
+ 0,1,-0.06845528660656129,0.0,-0.1037180996893938,-0.10680697223796014,-0.08489845746771399,-0.05848953241602578,0.09080651903103532,0.2250575486442731
3
+ 0,2,-0.04093050272744914,0.00381453472278429,-0.08960881302600357,-0.12333662270335873,-0.06891766870147537,-0.034330812505058615,0.09472502908877314,0.23141510651558028
4
+ 0,3,-0.021069275637259337,0.022887208336705742,-0.08329324460160221,-0.1309656921489273,-0.06289007753730332,-0.05848953241602578,0.09520335252804764,0.23395812966410312
5
+ 0,4,-0.039098877275739685,-0.03941685880210433,-0.09777089428670038,-0.2352296412383646,-0.08290093860954469,-0.16148196993120162,0.08981496690115205,0.2403156875354103
6
+ 0,5,-0.0020685477875039627,-0.010172092594091442,-0.09175676796943902,-0.2225145254957503,-0.07723822832435108,-0.1309656921489273,0.1023607440560786,0.2657459190206389
7
+ 0,6,0.10756473753156832,0.21615696762444314,-0.055994234731942205,-0.11189301853500584,-0.03849863149781329,-0.07883371760420867,0.13108543741356604,0.2911761505058675
8
+ 0,7,0.20354603848811686,0.3674668449615533,0.006580894831589592,-0.0839197639012544,0.029944790594284596,-0.01907267361392145,0.15779398873363035,0.28863312735734464
9
+ 0,8,0.21776114798676288,0.3471226597733704,0.021844909688378305,-0.01907267361392145,0.04455138809897711,0.020344185188182883,0.16378130107978045,0.29244766208012896
10
+ 0,9,0.21576310271923516,0.36492382181303046,0.016853402094537905,-0.02924476620801289,0.03593206046035723,0.020344185188182883,0.16742583654896556,0.31533487041683467
11
+ 0,10,0.21473750448856857,0.36492382181303046,0.015925986457911892,-0.02924476620801289,0.03285971363581102,-0.010172092594091442,0.16894357250972658,0.319149405139619
12
+ 0,11,0.21201235935822244,0.354751729218939,0.01326319593794517,-0.02924476620801289,0.029071428657869915,-0.010172092594091442,0.1684931745610912,0.319149405139619
13
+ 0,-1,0.21201235935822244,0.354751729218939,0.01326319593794517,-0.02924476620801289,0.029071428657869915,-0.010172092594091442,0.1684931745610912,0.319149405139619
modules.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ },
14
+ {
15
+ "idx": 2,
16
+ "name": "2",
17
+ "path": "2_Dense",
18
+ "type": "sentence_transformers.models.Dense"
19
+ }
20
+ ]
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a61e5a65ab35fb16fc90dc494b8a11c3282bd5b0191b8c14b9719baf4b29cd54
3
+ size 439484849
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 256,
3
+ "do_lower_case": false
4
+ }
similarity_evaluation_sts-test_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ epoch,steps,cosine_pearson,cosine_spearman,euclidean_pearson,euclidean_spearman,manhattan_pearson,manhattan_spearman,dot_pearson,dot_spearman
2
+ -1,-1,0.8023057841506823,0.522337287251783,0.8224652364924893,0.5040880642923905,0.8221832402518131,0.5116282597092722,0.5812810863347823,0.3174194764610981
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": false, "do_basic_tokenize": true, "never_split": null, "model_max_length": 512, "special_tokens_map_file": "/root/.cache/huggingface/transformers/78141ed1e8dcc5ff370950397ca0d1c5c9da478f54ec14544187d8a93eff1a26.f982506b52498d4adb4bd491f593dc92b2ef6be61bfdbe9d30f53f963f9f5b66", "name_or_path": "dccuchile/bert-base-spanish-wwm-uncased", "tokenizer_class": "BertTokenizer"}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff