omarelsayeed commited on
Commit
aaa5b96
·
1 Parent(s): c31b889

Upload folder using huggingface_hub

Browse files
1_Pooling/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "word_embedding_dimension": 384,
3
  "pooling_mode_cls_token": false,
4
  "pooling_mode_mean_tokens": true,
5
  "pooling_mode_max_tokens": false,
 
1
  {
2
+ "word_embedding_dimension": 768,
3
  "pooling_mode_cls_token": false,
4
  "pooling_mode_mean_tokens": true,
5
  "pooling_mode_max_tokens": false,
README.md CHANGED
@@ -10,7 +10,7 @@ tags:
10
 
11
  # {MODEL_NAME}
12
 
13
- This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search.
14
 
15
  <!--- Describe your model here -->
16
 
@@ -97,7 +97,7 @@ The model was trained with the parameters:
97
  Parameters of the fit()-Method:
98
  ```
99
  {
100
- "epochs": 5,
101
  "evaluation_steps": 0,
102
  "evaluator": "NoneType",
103
  "max_grad_norm": 1,
@@ -116,8 +116,8 @@ Parameters of the fit()-Method:
116
  ## Full Model Architecture
117
  ```
118
  SentenceTransformer(
119
- (0): Transformer({'max_seq_length': 50, 'do_lower_case': False}) with Transformer model: BertModel
120
- (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
121
  )
122
  ```
123
 
 
10
 
11
  # {MODEL_NAME}
12
 
13
+ This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
14
 
15
  <!--- Describe your model here -->
16
 
 
97
  Parameters of the fit()-Method:
98
  ```
99
  {
100
+ "epochs": 3,
101
  "evaluation_steps": 0,
102
  "evaluator": "NoneType",
103
  "max_grad_norm": 1,
 
116
  ## Full Model Architecture
117
  ```
118
  SentenceTransformer(
119
+ (0): Transformer({'max_seq_length': 80, 'do_lower_case': False}) with Transformer model: XLMRobertaModel
120
+ (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
121
  )
122
  ```
123
 
config.json CHANGED
@@ -1,26 +1,29 @@
1
  {
2
- "_name_or_path": "/root/.cache/torch/sentence_transformers/sentence-transformers_paraphrase-multilingual-MiniLM-L12-v2/",
3
  "architectures": [
4
- "BertModel"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
 
7
  "classifier_dropout": null,
 
8
  "gradient_checkpointing": false,
9
  "hidden_act": "gelu",
10
  "hidden_dropout_prob": 0.1,
11
- "hidden_size": 384,
12
  "initializer_range": 0.02,
13
- "intermediate_size": 1536,
14
- "layer_norm_eps": 1e-12,
15
- "max_position_embeddings": 512,
16
- "model_type": "bert",
17
  "num_attention_heads": 12,
18
  "num_hidden_layers": 12,
19
- "pad_token_id": 0,
 
20
  "position_embedding_type": "absolute",
21
  "torch_dtype": "float32",
22
  "transformers_version": "4.30.2",
23
- "type_vocab_size": 2,
24
  "use_cache": true,
25
- "vocab_size": 250037
26
  }
 
1
  {
2
+ "_name_or_path": "/root/.cache/torch/sentence_transformers/sentence-transformers_paraphrase-multilingual-mpnet-base-v2/",
3
  "architectures": [
4
+ "XLMRobertaModel"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
  "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
  "gradient_checkpointing": false,
11
  "hidden_act": "gelu",
12
  "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
  "initializer_range": 0.02,
15
+ "intermediate_size": 3072,
16
+ "layer_norm_eps": 1e-05,
17
+ "max_position_embeddings": 514,
18
+ "model_type": "xlm-roberta",
19
  "num_attention_heads": 12,
20
  "num_hidden_layers": 12,
21
+ "output_past": true,
22
+ "pad_token_id": 1,
23
  "position_embedding_type": "absolute",
24
  "torch_dtype": "float32",
25
  "transformers_version": "4.30.2",
26
+ "type_vocab_size": 1,
27
  "use_cache": true,
28
+ "vocab_size": 250002
29
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:06774f97bfe0fa5f87a3f94468d4068403b994cceca36251a580ca9f4f131e50
3
- size 470686253
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3582c57c542caf1c222073f27a89102a0df6557dc0b458e555c4c087fe140799
3
+ size 1112245805
sentence_bert_config.json CHANGED
@@ -1,4 +1,4 @@
1
  {
2
- "max_seq_length": 50,
3
  "do_lower_case": false
4
  }
 
1
  {
2
+ "max_seq_length": 80,
3
  "do_lower_case": false
4
  }
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1f0e547aaa62c434b38638d309fd726cafaabff159e25b10afdd819a8595c3dd
3
  size 17082912
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e54a06c2a8b8041b979a9167588cf3adec464fb634ad3b32fbc412c8a4398303
3
  size 17082912
tokenizer_config.json CHANGED
@@ -2,7 +2,6 @@
2
  "bos_token": "<s>",
3
  "clean_up_tokenization_spaces": true,
4
  "cls_token": "<s>",
5
- "do_lower_case": true,
6
  "eos_token": "</s>",
7
  "mask_token": {
8
  "__type": "AddedToken",
@@ -15,8 +14,6 @@
15
  "model_max_length": 512,
16
  "pad_token": "<pad>",
17
  "sep_token": "</s>",
18
- "strip_accents": null,
19
- "tokenize_chinese_chars": true,
20
- "tokenizer_class": "BertTokenizer",
21
  "unk_token": "<unk>"
22
  }
 
2
  "bos_token": "<s>",
3
  "clean_up_tokenization_spaces": true,
4
  "cls_token": "<s>",
 
5
  "eos_token": "</s>",
6
  "mask_token": {
7
  "__type": "AddedToken",
 
14
  "model_max_length": 512,
15
  "pad_token": "<pad>",
16
  "sep_token": "</s>",
17
+ "tokenizer_class": "XLMRobertaTokenizer",
 
 
18
  "unk_token": "<unk>"
19
  }