krunchykat commited on
Commit
07ed9e0
1 Parent(s): cf0f99a

Upload folder using huggingface_hub

Browse files
1_Pooling/config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_cls_token": true,
4
+ "pooling_mode_mean_tokens": false,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false
7
+ }
README.md ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pipeline_tag: sentence-similarity
3
+ tags:
4
+ - sentence-transformers
5
+ - feature-extraction
6
+ - sentence-similarity
7
+
8
+ ---
9
+
10
+ # {MODEL_NAME}
11
+
12
+ This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
13
+
14
+ <!--- Describe your model here -->
15
+
16
+ ## Usage (Sentence-Transformers)
17
+
18
+ Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
19
+
20
+ ```
21
+ pip install -U sentence-transformers
22
+ ```
23
+
24
+ Then you can use the model like this:
25
+
26
+ ```python
27
+ from sentence_transformers import SentenceTransformer
28
+ sentences = ["This is an example sentence", "Each sentence is converted"]
29
+
30
+ model = SentenceTransformer('{MODEL_NAME}')
31
+ embeddings = model.encode(sentences)
32
+ print(embeddings)
33
+ ```
34
+
35
+
36
+
37
+ ## Evaluation Results
38
+
39
+ <!--- Describe how your model was evaluated -->
40
+
41
+ For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
42
+
43
+
44
+ ## Training
45
+ The model was trained with the parameters:
46
+
47
+ **DataLoader**:
48
+
49
+ `torch.utils.data.dataloader.DataLoader` of length 11371 with parameters:
50
+ ```
51
+ {'batch_size': 32, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
52
+ ```
53
+
54
+ **Loss**:
55
+
56
+ `sentence_transformers.losses.OnlineContrastiveLoss.OnlineContrastiveLoss`
57
+
58
+ Parameters of the fit()-Method:
59
+ ```
60
+ {
61
+ "epochs": 8,
62
+ "evaluation_steps": 0,
63
+ "evaluator": "sentence_transformers.evaluation.BinaryClassificationEvaluator.BinaryClassificationEvaluator",
64
+ "max_grad_norm": 1,
65
+ "optimizer_class": "<class 'torch.optim.adamw.AdamW'>",
66
+ "optimizer_params": {
67
+ "lr": 2e-05
68
+ },
69
+ "scheduler": "warmuplinear",
70
+ "steps_per_epoch": null,
71
+ "warmup_steps": 10000,
72
+ "weight_decay": 0.01
73
+ }
74
+ ```
75
+
76
+
77
+ ## Full Model Architecture
78
+ ```
79
+ SentenceTransformer(
80
+ (0): Transformer({'max_seq_length': 512, 'do_lower_case': True}) with Transformer model: BertModel
81
+ (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
82
+ (2): Normalize()
83
+ )
84
+ ```
85
+
86
+ ## Citing & Authors
87
+
88
+ <!--- Describe where people can find more information -->
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/root/.cache/torch/sentence_transformers/BAAI_bge-base-en-v1.5/",
3
+ "architectures": [
4
+ "BertModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "LABEL_0"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 3072,
17
+ "label2id": {
18
+ "LABEL_0": 0
19
+ },
20
+ "layer_norm_eps": 1e-12,
21
+ "max_position_embeddings": 512,
22
+ "model_type": "bert",
23
+ "num_attention_heads": 12,
24
+ "num_hidden_layers": 12,
25
+ "pad_token_id": 0,
26
+ "position_embedding_type": "absolute",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.36.2",
29
+ "type_vocab_size": 2,
30
+ "use_cache": true,
31
+ "vocab_size": 30522
32
+ }
config_sentence_transformers.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "2.2.2",
4
+ "transformers": "4.28.1",
5
+ "pytorch": "1.13.0+cu117"
6
+ }
7
+ }
eval/binary_classification_evaluation_results.csv ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ epoch,steps,cossim_accuracy,cossim_accuracy_threshold,cossim_f1,cossim_precision,cossim_recall,cossim_f1_threshold,cossim_ap,manhattan_accuracy,manhattan_accuracy_threshold,manhattan_f1,manhattan_precision,manhattan_recall,manhattan_f1_threshold,manhattan_ap,euclidean_accuracy,euclidean_accuracy_threshold,euclidean_f1,euclidean_precision,euclidean_recall,euclidean_f1_threshold,euclidean_ap,dot_accuracy,dot_accuracy_threshold,dot_f1,dot_precision,dot_recall,dot_f1_threshold,dot_ap
2
+ 0,-1,0.8894110663137846,0.8726729154586792,0.853081485301702,0.814739564093092,0.8952103910161007,0.863213300704956,0.8974880874953977,0.8889905760716318,11.074914932250977,0.8533814014871085,0.8140006140620203,0.8967663374374238,11.570099830627441,0.8974635036866925,0.8894110663137846,0.5046326518058777,0.853081485301702,0.814739564093092,0.8952103910161007,0.5230423212051392,0.8975743958448185,0.8894110663137846,0.8726729154586792,0.853081485301702,0.814739564093092,0.8952103910161007,0.8632134199142456,0.8974631750450622
3
+ 1,-1,0.9000717306883672,0.8647794127464294,0.8678657230049998,0.8343320848938827,0.9042078203220133,0.8561683893203735,0.913514950622972,0.899750179326721,11.650308609008789,0.8675159648610976,0.8328250451235452,0.9052225679880936,11.875024795532227,0.9135494898711622,0.9000717306883672,0.5200395584106445,0.8678657230049998,0.8343320848938827,0.9042078203220133,0.5363423824310303,0.9136270979496859,0.9000717306883672,0.8647793531417847,0.8678657230049998,0.8343320848938827,0.9042078203220133,0.8561684489250183,0.9135615930066004
4
+ 2,-1,0.9045981844715427,0.8679709434509277,0.8726106310552499,0.8452758402029169,0.9017724259234203,0.8619228005409241,0.9182664631857849,0.9045734497514161,11.385364532470703,0.8726089785296032,0.840456197518486,0.9073197131646598,11.72685432434082,0.9182858121348876,0.9045981844715427,0.5138657689094543,0.8726106310552499,0.8452758402029169,0.9017724259234203,0.5255039930343628,0.9183749712496869,0.9045981844715427,0.8679710626602173,0.8726106310552499,0.8452758402029169,0.9017724259234203,0.8619227409362793,0.9183340377592901
5
+ 3,-1,0.9063296148804076,0.8731502890586853,0.8760233536645031,0.8458147005101719,0.9084697605195508,0.8660376071929932,0.9215888359128617,0.9064285537609142,11.016427040100098,0.8761563517915311,0.8448925744440257,0.909822757407658,11.455008506774902,0.9215934348778176,0.9063296148804076,0.5036858320236206,0.8760233536645031,0.8458147005101719,0.9084697605195508,0.517614483833313,0.9216842776366039,0.9063296148804076,0.8731503486633301,0.8760233536645031,0.8458147005101719,0.9084697605195508,0.8660376071929932,0.9216146854005058
6
+ 4,-1,0.9071953300848401,0.8642836213111877,0.875092960843276,0.8381542273149581,0.9154376944933027,0.843239426612854,0.9219608863490714,0.9069727176037004,11.583162307739258,0.8754039970978168,0.8540540540540541,0.8978487349479096,11.946100234985352,0.9219844364277952,0.9071953300848401,0.5209921598434448,0.875092960843276,0.8381542273149581,0.9154376944933027,0.559929609298706,0.9220771892342658,0.9071953300848401,0.8642836809158325,0.875092960843276,0.8381542273149581,0.9154376944933027,0.843239426612854,0.9219703976581184
7
+ 5,-1,0.9090256993742116,0.8728982210159302,0.8785083543971374,0.8533256807601556,0.9052225679880936,0.8636312484741211,0.9243418101254697,0.908926760493705,11.300579071044922,0.8784308563154084,0.8552935144834658,0.9028548234339061,11.466693878173828,0.9243873681520582,0.9090256993742116,0.5041859745979309,0.8785083543971374,0.8533256807601556,0.9052225679880936,0.5222426652908325,0.9244457747214353,0.9090256993742116,0.8728982210159302,0.8785083543971374,0.8533256807601556,0.9052225679880936,0.8636312484741211,0.924384609603758
8
+ 6,-1,0.9097677409780108,0.8619492053985596,0.8794529469228264,0.8478151682571572,0.9135434988499527,0.8502360582351685,0.9240219599928648,0.9095203937767444,11.501441955566406,0.8793131483950697,0.8466837853071961,0.914558246516033,12.112016677856445,0.9240483764870904,0.9097677409780108,0.5254536271095276,0.8794529469228264,0.8478151682571572,0.9135434988499527,0.5472913980484009,0.9241222123353483,0.9097677409780108,0.8619492053985596,0.8794529469228264,0.8478151682571572,0.9135434988499527,0.850236177444458,0.9240385030261205
9
+ 7,-1,0.9099903534591506,0.8681730031967163,0.8798801732278336,0.8482013936844749,0.9140170477607902,0.852232813835144,0.9248234406964589,0.9101387617799105,11.39983081817627,0.8795641740709514,0.8446686596910812,0.9174671898254634,12.09014892578125,0.9248780629159912,0.9099903534591506,0.5134725570678711,0.8798801732278336,0.8482013936844749,0.9140170477607902,0.5436307787895203,0.9249197879998878,0.9099903534591506,0.8681729435920715,0.8798801732278336,0.8482013936844749,0.9140170477607902,0.852232813835144,0.9247845722254051
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:242bffd6baa3e536452adf44b5e8832f8f3e7dcff910479608ff6b924f9f93f9
3
+ size 437951328
modules.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ },
14
+ {
15
+ "idx": 2,
16
+ "name": "2",
17
+ "path": "2_Normalize",
18
+ "type": "sentence_transformers.models.Normalize"
19
+ }
20
+ ]
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 512,
3
+ "do_lower_case": true
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "never_split": null,
51
+ "pad_token": "[PAD]",
52
+ "sep_token": "[SEP]",
53
+ "strip_accents": null,
54
+ "tokenize_chinese_chars": true,
55
+ "tokenizer_class": "BertTokenizer",
56
+ "unk_token": "[UNK]"
57
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff