Upload folder using huggingface_hub
Browse files- README.md +1 -1
- config.json +2 -1
- pytorch_model.bin +1 -1
- sentence_bert_config.json +1 -1
- tokenizer_config.json +2 -1
README.md
CHANGED
@@ -119,7 +119,7 @@ Parameters of the fit()-Method:
|
|
119 |
## Full Model Architecture
|
120 |
```
|
121 |
SentenceTransformer(
|
122 |
-
(0): Transformer({'max_seq_length':
|
123 |
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
|
124 |
)
|
125 |
```
|
|
|
119 |
## Full Model Architecture
|
120 |
```
|
121 |
SentenceTransformer(
|
122 |
+
(0): Transformer({'max_seq_length': 256, 'do_lower_case': False}) with Transformer model: RobertaModel
|
123 |
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
|
124 |
)
|
125 |
```
|
config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "/root/.cache/torch/sentence_transformers/
|
3 |
"architectures": [
|
4 |
"RobertaModel"
|
5 |
],
|
@@ -7,6 +7,7 @@
|
|
7 |
"bos_token_id": 0,
|
8 |
"classifier_dropout": null,
|
9 |
"eos_token_id": 2,
|
|
|
10 |
"hidden_act": "gelu",
|
11 |
"hidden_dropout_prob": 0.1,
|
12 |
"hidden_size": 768,
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "/root/.cache/torch/sentence_transformers/VoVanPhuc_sup-SimCSE-VietNamese-phobert-base",
|
3 |
"architectures": [
|
4 |
"RobertaModel"
|
5 |
],
|
|
|
7 |
"bos_token_id": 0,
|
8 |
"classifier_dropout": null,
|
9 |
"eos_token_id": 2,
|
10 |
+
"gradient_checkpointing": false,
|
11 |
"hidden_act": "gelu",
|
12 |
"hidden_dropout_prob": 0.1,
|
13 |
"hidden_size": 768,
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 540059817
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fbade96fe7eb6c557683bb90a606d9ad80cb59b0f65929a2569e5ddf8df71c87
|
3 |
size 540059817
|
sentence_bert_config.json
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
{
|
2 |
-
"max_seq_length":
|
3 |
"do_lower_case": false
|
4 |
}
|
|
|
1 |
{
|
2 |
+
"max_seq_length": 256,
|
3 |
"do_lower_case": false
|
4 |
}
|
tokenizer_config.json
CHANGED
@@ -4,9 +4,10 @@
|
|
4 |
"cls_token": "<s>",
|
5 |
"eos_token": "</s>",
|
6 |
"mask_token": "<mask>",
|
7 |
-
"model_max_length":
|
8 |
"pad_token": "<pad>",
|
9 |
"sep_token": "</s>",
|
10 |
"tokenizer_class": "PhobertTokenizer",
|
|
|
11 |
"unk_token": "<unk>"
|
12 |
}
|
|
|
4 |
"cls_token": "<s>",
|
5 |
"eos_token": "</s>",
|
6 |
"mask_token": "<mask>",
|
7 |
+
"model_max_length": 256,
|
8 |
"pad_token": "<pad>",
|
9 |
"sep_token": "</s>",
|
10 |
"tokenizer_class": "PhobertTokenizer",
|
11 |
+
"tokenizer_file": null,
|
12 |
"unk_token": "<unk>"
|
13 |
}
|