Muennighoff commited on
Commit
27102fa
1 Parent(s): c8c91f6

Add SGPT-125M-mean-nli-linearthenpool5

Browse files
1_Dense/config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"in_features": 768, "out_features": 768, "bias": true, "activation_function": "torch.nn.modules.activation.GELU", "key_name": "token_embeddings"}
1_Dense/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fd71f2bd92bb24f04273c39208cd28e861ba6676b2c1a293c5ca24021687441
3
+ size 2363431
2_Dense/config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"in_features": 768, "out_features": 768, "bias": true, "activation_function": "torch.nn.modules.activation.GELU", "key_name": "token_embeddings"}
2_Dense/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b14aa0b3055788567d4d26f9f4de7a148f1e00373fc445669dec6a790b72fcf9
3
+ size 2363431
3_Dense/config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"in_features": 768, "out_features": 768, "bias": true, "activation_function": "torch.nn.modules.activation.GELU", "key_name": "token_embeddings"}
3_Dense/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e2da52a7b57f8f038c3c193c89ef8af894149c08b0c42f423af94ec568026a1
3
+ size 2363431
4_Dense/config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"in_features": 768, "out_features": 768, "bias": true, "activation_function": "torch.nn.modules.activation.GELU", "key_name": "token_embeddings"}
4_Dense/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e17920e173fd7bfac6c0d15a6d1639aee39c6cd9fa53dba126872e67c0afd71
3
+ size 2363431
5_Dense/config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"in_features": 768, "out_features": 768, "bias": true, "activation_function": "torch.nn.modules.activation.GELU", "key_name": "token_embeddings"}
5_Dense/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:758f793d7c904b3ebd8fe8c0bf95de8c8ee7fded523b8aa2f3b3b6caced4623e
3
+ size 2363431
6_Pooling/config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false,
7
+ "pooling_mode_weightedmean_tokens": false,
8
+ "pooling_mode_lasttoken": false
9
+ }
README.md ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pipeline_tag: sentence-similarity
3
+ tags:
4
+ - sentence-transformers
5
+ - feature-extraction
6
+ - sentence-similarity
7
+ ---
8
+
9
+ # {MODEL_NAME}
10
+
11
+ This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
12
+
13
+ <!--- Describe your model here -->
14
+
15
+ ## Usage (Sentence-Transformers)
16
+
17
+ Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
18
+
19
+ ```
20
+ pip install -U sentence-transformers
21
+ ```
22
+
23
+ Then you can use the model like this:
24
+
25
+ ```python
26
+ from sentence_transformers import SentenceTransformer
27
+ sentences = ["This is an example sentence", "Each sentence is converted"]
28
+
29
+ model = SentenceTransformer('{MODEL_NAME}')
30
+ embeddings = model.encode(sentences)
31
+ print(embeddings)
32
+ ```
33
+
34
+
35
+
36
+ ## Evaluation Results
37
+
38
+ <!--- Describe how your model was evaluated -->
39
+
40
+ For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
41
+
42
+
43
+ ## Training
44
+ The model was trained with the parameters:
45
+
46
+ **DataLoader**:
47
+
48
+ `sentence_transformers.datasets.NoDuplicatesDataLoader.NoDuplicatesDataLoader` of length 8807 with parameters:
49
+ ```
50
+ {'batch_size': 64}
51
+ ```
52
+
53
+ **Loss**:
54
+
55
+ `sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters:
56
+ ```
57
+ {'scale': 20.0, 'similarity_fct': 'cos_sim'}
58
+ ```
59
+
60
+ Parameters of the fit()-Method:
61
+ ```
62
+ {
63
+ "epochs": 1,
64
+ "evaluation_steps": 880,
65
+ "evaluator": "sentence_transformers.evaluation.EmbeddingSimilarityEvaluator.EmbeddingSimilarityEvaluator",
66
+ "max_grad_norm": 1,
67
+ "optimizer_class": "<class 'transformers.optimization.AdamW'>",
68
+ "optimizer_params": {
69
+ "lr": 2e-05
70
+ },
71
+ "scheduler": "WarmupLinear",
72
+ "steps_per_epoch": null,
73
+ "warmup_steps": 881,
74
+ "weight_decay": 0.01
75
+ }
76
+ ```
77
+
78
+
79
+ ## Full Model Architecture
80
+ ```
81
+ SentenceTransformer(
82
+ (0): Transformer({'max_seq_length': 75, 'do_lower_case': False}) with Transformer model: GPTNeoModel
83
+ (1): Dense({'in_features': 768, 'out_features': 768, 'bias': True, 'activation_function': 'torch.nn.modules.activation.GELU', 'key_name': 'token_embeddings'})
84
+ (2): Dense({'in_features': 768, 'out_features': 768, 'bias': True, 'activation_function': 'torch.nn.modules.activation.GELU', 'key_name': 'token_embeddings'})
85
+ (3): Dense({'in_features': 768, 'out_features': 768, 'bias': True, 'activation_function': 'torch.nn.modules.activation.GELU', 'key_name': 'token_embeddings'})
86
+ (4): Dense({'in_features': 768, 'out_features': 768, 'bias': True, 'activation_function': 'torch.nn.modules.activation.GELU', 'key_name': 'token_embeddings'})
87
+ (5): Dense({'in_features': 768, 'out_features': 768, 'bias': True, 'activation_function': 'torch.nn.modules.activation.GELU', 'key_name': 'token_embeddings'})
88
+ (6): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False})
89
+ )
90
+ ```
91
+
92
+ ## Citing & Authors
93
+
94
+ <!--- Describe where people can find more information -->
config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "EleutherAI/gpt-neo-125M",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPTNeoModel"
6
+ ],
7
+ "attention_dropout": 0,
8
+ "attention_layers": [
9
+ "global",
10
+ "local",
11
+ "global",
12
+ "local",
13
+ "global",
14
+ "local",
15
+ "global",
16
+ "local",
17
+ "global",
18
+ "local",
19
+ "global",
20
+ "local"
21
+ ],
22
+ "attention_types": [
23
+ [
24
+ [
25
+ "global",
26
+ "local"
27
+ ],
28
+ 6
29
+ ]
30
+ ],
31
+ "bos_token_id": 50256,
32
+ "embed_dropout": 0,
33
+ "eos_token_id": 50256,
34
+ "gradient_checkpointing": false,
35
+ "hidden_size": 768,
36
+ "initializer_range": 0.02,
37
+ "intermediate_size": null,
38
+ "layer_norm_epsilon": 1e-05,
39
+ "max_position_embeddings": 2048,
40
+ "model_type": "gpt_neo",
41
+ "num_heads": 12,
42
+ "num_layers": 12,
43
+ "resid_dropout": 0,
44
+ "summary_activation": null,
45
+ "summary_first_dropout": 0.1,
46
+ "summary_proj_to_labels": true,
47
+ "summary_type": "cls_index",
48
+ "summary_use_proj": true,
49
+ "torch_dtype": "float32",
50
+ "transformers_version": "4.11.3",
51
+ "use_cache": true,
52
+ "vocab_size": 50257,
53
+ "window_size": 256
54
+ }
config_sentence_transformers.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "2.1.0",
4
+ "transformers": "4.11.3",
5
+ "pytorch": "1.10.1"
6
+ }
7
+ }
eval/similarity_evaluation_sts-dev_results.csv ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ epoch,steps,cosine_pearson,cosine_spearman,euclidean_pearson,euclidean_spearman,manhattan_pearson,manhattan_spearman,dot_pearson,dot_spearman
2
+ 0,880,0.695697767418695,0.6946315185185322,0.6566675432392686,0.6649657453768263,0.6561515431946573,0.6649154731001401,0.4053754975335031,0.48304897036792616
3
+ 0,1760,0.7281897946527534,0.7270505142008357,0.686362655304324,0.6932464125804493,0.6863509784633364,0.6932349588358903,0.44368362120688054,0.5233329981683638
4
+ 0,2640,0.7409358159115291,0.7405094042718775,0.6912688329360896,0.69842007985192,0.691085534996914,0.6986855853732264,0.4430951462895188,0.529006701372103
5
+ 0,3520,0.7468269240616145,0.7462893602179821,0.6908051906059811,0.6986931352005008,0.6908816573096065,0.698702649441942,0.444691950277587,0.5251307965620684
6
+ 0,4400,0.7535440193116522,0.7532937031300458,0.6914680580225847,0.699171749409801,0.6912367126125329,0.6989475766572323,0.44123483422331394,0.5340187279626287
7
+ 0,5280,0.7576531559904383,0.7580565836411597,0.6913830015315365,0.6986255906699018,0.6916925802443095,0.6991484041979608,0.4560692755242858,0.5363828078859204
8
+ 0,6160,0.7611782339131163,0.7584775305508767,0.6897809024340384,0.6974802126552649,0.6899038015194063,0.6976009903186859,0.4634874548267457,0.5466089822028989
9
+ 0,7040,0.7579760917824654,0.7598389583224086,0.688414163328087,0.6952389228522992,0.6883256888786746,0.6951853979043289,0.4479040960953756,0.5342083722216181
10
+ 0,7920,0.7627901353787523,0.762340476277235,0.6888356954042394,0.6962232295814017,0.6887710933575818,0.6962806210800829,0.4610979723868065,0.5520326798960286
11
+ 0,8800,0.7606244379243029,0.7613140260676582,0.6881744330531037,0.6958299695235318,0.688084630669583,0.6957845993854599,0.45281938448119274,0.5458636222579131
12
+ 0,-1,0.7606247315575784,0.7613215663214055,0.6881741798346358,0.6958293218362521,0.6880835662936146,0.6957892648691067,0.4528238571653357,0.5458631293750107
merges.txt ADDED
The diff for this file is too large to render. See raw diff
modules.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Dense",
12
+ "type": "sentence_transformers.models.Dense"
13
+ },
14
+ {
15
+ "idx": 2,
16
+ "name": "2",
17
+ "path": "2_Dense",
18
+ "type": "sentence_transformers.models.Dense"
19
+ },
20
+ {
21
+ "idx": 3,
22
+ "name": "3",
23
+ "path": "3_Dense",
24
+ "type": "sentence_transformers.models.Dense"
25
+ },
26
+ {
27
+ "idx": 4,
28
+ "name": "4",
29
+ "path": "4_Dense",
30
+ "type": "sentence_transformers.models.Dense"
31
+ },
32
+ {
33
+ "idx": 5,
34
+ "name": "5",
35
+ "path": "5_Dense",
36
+ "type": "sentence_transformers.models.Dense"
37
+ },
38
+ {
39
+ "idx": 6,
40
+ "name": "6",
41
+ "path": "6_Pooling",
42
+ "type": "sentence_transformers.models.Pooling"
43
+ }
44
+ ]
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e4119de5997c833c7566dc4766cfd1ce0fcafab88f327adf0dbd85e41f03e3c
3
+ size 551190545
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
1
+ {
2
+ "max_seq_length": 75,
3
+ "do_lower_case": false
4
+ }
similarity_evaluation_sts-test_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
1
+ epoch,steps,cosine_pearson,cosine_spearman,euclidean_pearson,euclidean_spearman,manhattan_pearson,manhattan_spearman,dot_pearson,dot_spearman
2
+ -1,-1,0.7157778926074485,0.6859945985094986,0.6435634476151055,0.6275229981931764,0.6425847092278667,0.6266504348935201,0.33569692797255907,0.41022138537628117
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": "<|endoftext|>"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "errors": "replace", "model_max_length": 2048, "special_tokens_map_file": null, "name_or_path": "EleutherAI/gpt-neo-125M", "tokenizer_class": "GPT2Tokenizer"}
vocab.json ADDED
The diff for this file is too large to render. See raw diff