Commit
•
e473745
0
Parent(s):
Duplicate from Lajavaness/sentence-camembert-large
Browse filesCo-authored-by: Van Tuan DANG <dangvantuan@users.noreply.huggingface.co>
- .DS_Store +0 -0
- .gitattributes +35 -0
- 1_Pooling/config.json +7 -0
- README.md +139 -0
- added_tokens.json +9 -0
- config.json +28 -0
- config_sentence_transformers.json +7 -0
- model.safetensors +3 -0
- modules.json +14 -0
- pytorch_model.bin +3 -0
- sentence_bert_config.json +4 -0
- sentencepiece.bpe.model +3 -0
- special_tokens_map.json +13 -0
- tokenizer.json +0 -0
- tokenizer_config.json +75 -0
.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
1_Pooling/config.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"word_embedding_dimension": 1024,
|
3 |
+
"pooling_mode_cls_token": false,
|
4 |
+
"pooling_mode_mean_tokens": true,
|
5 |
+
"pooling_mode_max_tokens": false,
|
6 |
+
"pooling_mode_mean_sqrt_len_tokens": false
|
7 |
+
}
|
README.md
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
pipeline_tag: sentence-similarity
|
3 |
+
language: fr
|
4 |
+
datasets:
|
5 |
+
- stsb_multi_mt
|
6 |
+
tags:
|
7 |
+
- Text
|
8 |
+
- Sentence Similarity
|
9 |
+
- Sentence-Embedding
|
10 |
+
- camembert-large
|
11 |
+
license: apache-2.0
|
12 |
+
model-index:
|
13 |
+
- name: sentence-camembert-large by Van Tuan DANG
|
14 |
+
results:
|
15 |
+
- task:
|
16 |
+
name: Sentence-Embedding
|
17 |
+
type: Text Similarity
|
18 |
+
dataset:
|
19 |
+
name: Text Similarity fr
|
20 |
+
type: stsb_multi_mt
|
21 |
+
args: fr
|
22 |
+
metrics:
|
23 |
+
- name: Test Pearson correlation coefficient
|
24 |
+
type: Pearson_correlation_coefficient
|
25 |
+
value: 88.63
|
26 |
+
---
|
27 |
+
## Description:
|
28 |
+
This [**Sentence-CamemBERT-Large**](https://huggingface.co/Lajavaness/sentence-camembert-large) Model is an Embedding Model for French developed by [La Javaness](https://www.lajavaness.com/). The purpose of this embedding model is to represent the content and semantics of a French sentence as a mathematical vector, allowing it to understand the meaning of the text beyond individual words in queries and documents. It offers powerful semantic search capabilities.
|
29 |
+
## Pre-trained sentence embedding models are state-of-the-art of Sentence Embeddings for French.
|
30 |
+
|
31 |
+
The [Lajavaness/sentence-camembert-large](https://huggingface.co/Lajavaness/sentence-camembert-large) model is an improvement over the [dangvantuan/sentence-camembert-base](https://huggingface.co/dangvantuan/sentence-camembert-large) offering greater robustness and better performance on all STS benchmark datasets. It has been fine-tuned using the pre-trained [facebook/camembert-large](https://huggingface.co/camembert/camembert-large) and
|
32 |
+
[Siamese BERT-Networks with 'sentences-transformers'](https://www.sbert.net/) on dataset [stsb](https://huggingface.co/datasets/stsb_multi_mt/viewer/fr/train). Additionally, it has been combined with [Augmented SBERT](https://aclanthology.org/2021.naacl-main.28.pdf) on dataset [stsb](https://huggingface.co/datasets/stsb_multi_mt/viewer/fr/train). The model benefits from Pair Sampling Strategies using two models: [CrossEncoder-camembert-large](https://huggingface.co/dangvantuan/CrossEncoder-camembert-large) and [dangvantuan/sentence-camembert-large](https://huggingface.co/dangvantuan/sentence-camembert-large)
|
33 |
+
|
34 |
+
## Usage
|
35 |
+
The model can be used directly (without a language model) as follows:
|
36 |
+
|
37 |
+
```python
|
38 |
+
from sentence_transformers import SentenceTransformer
|
39 |
+
model = SentenceTransformer("Lajavaness/sentence-camembert-large")
|
40 |
+
|
41 |
+
sentences = ["Un avion est en train de décoller.",
|
42 |
+
"Un homme joue d'une grande flûte.",
|
43 |
+
"Un homme étale du fromage râpé sur une pizza.",
|
44 |
+
"Une personne jette un chat au plafond.",
|
45 |
+
"Une personne est en train de plier un morceau de papier.",
|
46 |
+
]
|
47 |
+
|
48 |
+
embeddings = model.encode(sentences)
|
49 |
+
```
|
50 |
+
|
51 |
+
## Evaluation
|
52 |
+
The model can be evaluated as follows on the French test data of stsb.
|
53 |
+
|
54 |
+
```python
|
55 |
+
from sentence_transformers import SentenceTransformer
|
56 |
+
from sentence_transformers.readers import InputExample
|
57 |
+
from datasets import load_dataset
|
58 |
+
def convert_dataset(dataset):
|
59 |
+
dataset_samples=[]
|
60 |
+
for df in dataset:
|
61 |
+
score = float(df['similarity_score'])/5.0 # Normalize score to range 0 ... 1
|
62 |
+
inp_example = InputExample(texts=[df['sentence1'],
|
63 |
+
df['sentence2']], label=score)
|
64 |
+
dataset_samples.append(inp_example)
|
65 |
+
return dataset_samples
|
66 |
+
|
67 |
+
# Loading the dataset for evaluation
|
68 |
+
df_dev = load_dataset("stsb_multi_mt", name="fr", split="dev")
|
69 |
+
df_test = load_dataset("stsb_multi_mt", name="fr", split="test")
|
70 |
+
|
71 |
+
# Convert the dataset for evaluation
|
72 |
+
|
73 |
+
# For Dev set:
|
74 |
+
dev_samples = convert_dataset(df_dev)
|
75 |
+
val_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name='sts-dev')
|
76 |
+
val_evaluator(model, output_path="./")
|
77 |
+
|
78 |
+
# For Test set:
|
79 |
+
test_samples = convert_dataset(df_test)
|
80 |
+
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name='sts-test')
|
81 |
+
test_evaluator(model, output_path="./")
|
82 |
+
```
|
83 |
+
|
84 |
+
**Test Result**:
|
85 |
+
The performance is measured using Pearson and Spearman correlation:
|
86 |
+
- On dev
|
87 |
+
|
88 |
+
|
89 |
+
| Model | Pearson correlation | Spearman correlation | #params |
|
90 |
+
| ------------- | ------------- | ------------- |------------- |
|
91 |
+
| [Lajavaness/sentence-camembert-large](https://huggingface.co/dangvantuan/sentence-camembert-large)| **88.63** |**88.46** | 336M|
|
92 |
+
| [dangvantuan/sentence-camembert-large](https://huggingface.co/dangvantuan/sentence-camembert-large)| 88.2 |88.02 | 336M|
|
93 |
+
| [Sahajtomar/french_semanti](https://huggingface.co/Sahajtomar/french_semantic)| 87.44 |87.30 | 336M|
|
94 |
+
| [Lajavaness/sentence-flaubert-base](https://huggingface.co/Lajavaness/sentence-flaubert-base)| 87.14 |87.10 | 137M |
|
95 |
+
| [GPT-3 (text-davinci-003)](https://platform.openai.com/docs/models) | 85 | NaN|175B |
|
96 |
+
| [GPT-(text-embedding-ada-002)](https://platform.openai.com/docs/models) | 79.75 | 80.44|NaN |
|
97 |
+
|
98 |
+
|
99 |
+
- On test, Pearson and Spearman correlation are evaluated on many different benchmark datasets:
|
100 |
+
|
101 |
+
|
102 |
+
**Pearson score**
|
103 |
+
| Model | [STS-B](https://huggingface.co/datasets/stsb_multi_mt/viewer/fr/train) | [STS12-fr ](https://huggingface.co/datasets/Lajavaness/STS12-fr)| [STS13-fr](https://huggingface.co/datasets/Lajavaness/STS13-fr) | [STS14-fr](https://huggingface.co/datasets/Lajavaness/STS14-fr) | [STS15-fr](https://huggingface.co/datasets/Lajavaness/STS15-fr) | [STS16-fr](https://huggingface.co/datasets/Lajavaness/STS16-fr) | [SICK-fr](https://huggingface.co/datasets/Lajavaness/SICK-fr) | params |
|
104 |
+
|------------------------------------------|-------|----------|----------|----------|----------|----------|---------|--------|
|
105 |
+
| [Lajavaness/sentence-camembert-large](https://huggingface.co/dangvantuan/sentence-camembert-large) | **86.26** | **87.42** | **89.34** | **88.05** | **88.91** | 77.15 | 83.13 | 336M |
|
106 |
+
| [dangvantuan/sentence-camembert-large](https://huggingface.co/dangvantuan/sentence-camembert-large) | 85.88 | 87.28 | 89.25 | 87.91 | 88.54 | 76.90 | 83.26 | 336M |
|
107 |
+
| [Sahajtomar/french_semantic](https://huggingface.co/Sahajtomar/french_semantic) | 85.80 | 86.05 | 88.50 | 86.57 | 87.49 | 77.85 | 83.27 | 336M |
|
108 |
+
| [Lajavaness/sentence-flaubert-base](https://huggingface.co/Lajavaness/sentence-flaubert-base) | 85.39 | 86.64 | 87.24 | 85.68 | 87.99 | 75.78 | 82.84 | 137M |
|
109 |
+
| [GPT3 (text-embedding-ada-002)](https://platform.openai.com/docs/models) | 79.03 | 66.16 | 75.48 | 70.69 | 77.88 | 65.18 | - | - |
|
110 |
+
|
111 |
+
|
112 |
+
**Spearman score**
|
113 |
+
| Model | [STS-B](https://huggingface.co/datasets/stsb_multi_mt/viewer/fr/train) | [STS12-fr ](https://huggingface.co/datasets/Lajavaness/STS12-fr)| [STS13-fr](https://huggingface.co/datasets/Lajavaness/STS13-fr) | [STS14-fr](https://huggingface.co/datasets/Lajavaness/STS14-fr) | [STS15-fr](https://huggingface.co/datasets/Lajavaness/STS15-fr) | [STS16-fr](https://huggingface.co/datasets/Lajavaness/STS16-fr) | [SICK-fr](https://huggingface.co/datasets/Lajavaness/SICK-fr) | params |
|
114 |
+
|:-------------------------------------|-------:|---------:|---------:|---------:|---------:|---------:|--------:|:-------|
|
115 |
+
| [Lajavaness/sentence-camembert-large](https://huggingface.co/dangvantuan/sentence-camembert-large) | **86.14** | **81.22** | 88.61 | **86.28** | **89.01** | 78.65 | **77.71** | 336M |
|
116 |
+
| [dangvantuan/sentence-camembert-large](https://huggingface.co/dangvantuan/sentence-camembert-large) | 85.78 | 81.09 | 88.68 | 85.81 | 88.56 | 78.49 | 77.70 | 336M |
|
117 |
+
| [Sahajtomar/french_semantic](https://huggingface.co/Sahajtomar/french_semantic) | 85.55 | 77.92 | 87.85 | 83.96 | 87.63 | 79.07 | 77.14 | 336M |
|
118 |
+
| [Lajavaness/sentence-flaubert-base](https://huggingface.co/Lajavaness/sentence-flaubert-base) | 85.67 | 79.97 | 86.91 | 84.57 | 88.10 | 77.84 | 77.55 | 137M |
|
119 |
+
| [GPT3 (text-embedding-ada-002)](https://platform.openai.com/docs/models) | 77.53 | 64.27 | 76.41 | 69.63 | 78.65 | 75.30 | - | - |
|
120 |
+
|
121 |
+
|
122 |
+
|
123 |
+
## Citation
|
124 |
+
|
125 |
+
|
126 |
+
@article{reimers2019sentence,
|
127 |
+
title={Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks},
|
128 |
+
author={Nils Reimers, Iryna Gurevych},
|
129 |
+
journal={https://arxiv.org/abs/1908.10084},
|
130 |
+
year={2019}
|
131 |
+
}
|
132 |
+
|
133 |
+
|
134 |
+
@article{martin2020camembert,
|
135 |
+
title={CamemBERT: a Tasty French Language Mode},
|
136 |
+
author={Martin, Louis and Muller, Benjamin and Su{\'a}rez, Pedro Javier Ortiz and Dupont, Yoann and Romary, Laurent and de la Clergerie, {\'E}ric Villemonte and Seddah, Djam{\'e} and Sagot, Beno{\^\i}t},
|
137 |
+
journal={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
|
138 |
+
year={2020}
|
139 |
+
}
|
added_tokens.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"</s>": 6,
|
3 |
+
"</s>NOTUSED": 2,
|
4 |
+
"<mask>": 32004,
|
5 |
+
"<pad>": 1,
|
6 |
+
"<s>": 5,
|
7 |
+
"<s>NOTUSED": 0,
|
8 |
+
"<unk>": 3
|
9 |
+
}
|
config.json
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "Lajavaness/sentence-camembert-large",
|
3 |
+
"architectures": [
|
4 |
+
"CamembertModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"classifier_dropout": null,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"hidden_act": "gelu",
|
11 |
+
"hidden_dropout_prob": 0.1,
|
12 |
+
"hidden_size": 1024,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 4096,
|
15 |
+
"layer_norm_eps": 1e-05,
|
16 |
+
"max_position_embeddings": 514,
|
17 |
+
"model_type": "camembert",
|
18 |
+
"num_attention_heads": 16,
|
19 |
+
"num_hidden_layers": 24,
|
20 |
+
"output_past": true,
|
21 |
+
"pad_token_id": 1,
|
22 |
+
"position_embedding_type": "absolute",
|
23 |
+
"torch_dtype": "float16",
|
24 |
+
"transformers_version": "4.38.2",
|
25 |
+
"type_vocab_size": 1,
|
26 |
+
"use_cache": true,
|
27 |
+
"vocab_size": 32005
|
28 |
+
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "2.2.2",
|
4 |
+
"transformers": "4.34.0",
|
5 |
+
"pytorch": "2.0.1"
|
6 |
+
}
|
7 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:402e561f8da2b00dac77e4880062f02ac3057faf41ba51b93a2ac094086e5c23
|
3 |
+
size 1346690896
|
modules.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
}
|
14 |
+
]
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:21e2cf7956d639d06096d34da9c8b521969ba9ab822bc99393f0ced4314aec48
|
3 |
+
size 1346777833
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 514,
|
3 |
+
"do_lower_case": false
|
4 |
+
}
|
sentencepiece.bpe.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f98f266fdc548c94216aaadc13ffaaafacf0c8793303e2195322d954549ea261
|
3 |
+
size 808767
|
special_tokens_map.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<s>NOTUSED",
|
4 |
+
"</s>NOTUSED"
|
5 |
+
],
|
6 |
+
"bos_token": "<s>",
|
7 |
+
"cls_token": "<s>",
|
8 |
+
"eos_token": "</s>",
|
9 |
+
"mask_token": "<mask>",
|
10 |
+
"pad_token": "<pad>",
|
11 |
+
"sep_token": "</s>",
|
12 |
+
"unk_token": "<unk>"
|
13 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "<s>NOTUSED",
|
5 |
+
"lstrip": true,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": true,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"1": {
|
12 |
+
"content": "<pad>",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"2": {
|
20 |
+
"content": "</s>NOTUSED",
|
21 |
+
"lstrip": true,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": true,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"3": {
|
28 |
+
"content": "<unk>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"5": {
|
36 |
+
"content": "<s>",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
},
|
43 |
+
"6": {
|
44 |
+
"content": "</s>",
|
45 |
+
"lstrip": false,
|
46 |
+
"normalized": false,
|
47 |
+
"rstrip": false,
|
48 |
+
"single_word": false,
|
49 |
+
"special": true
|
50 |
+
},
|
51 |
+
"32004": {
|
52 |
+
"content": "<mask>",
|
53 |
+
"lstrip": true,
|
54 |
+
"normalized": true,
|
55 |
+
"rstrip": false,
|
56 |
+
"single_word": false,
|
57 |
+
"special": true
|
58 |
+
}
|
59 |
+
},
|
60 |
+
"additional_special_tokens": [
|
61 |
+
"<s>NOTUSED",
|
62 |
+
"</s>NOTUSED"
|
63 |
+
],
|
64 |
+
"bos_token": "<s>",
|
65 |
+
"clean_up_tokenization_spaces": true,
|
66 |
+
"cls_token": "<s>",
|
67 |
+
"eos_token": "</s>",
|
68 |
+
"mask_token": "<mask>",
|
69 |
+
"model_max_length": 1000000000000000019884624838656,
|
70 |
+
"pad_token": "<pad>",
|
71 |
+
"sep_token": "</s>",
|
72 |
+
"sp_model_kwargs": {},
|
73 |
+
"tokenizer_class": "CamembertTokenizer",
|
74 |
+
"unk_token": "<unk>"
|
75 |
+
}
|