Add SetFit ABSA model
Browse files- .gitattributes +1 -0
- 1_Pooling/config.json +10 -0
- README.md +196 -0
- config.json +28 -0
- config_sentence_transformers.json +10 -0
- config_setfit.json +11 -0
- model.safetensors +3 -0
- model_head.pkl +3 -0
- modules.json +20 -0
- sentence_bert_config.json +4 -0
- sentencepiece.bpe.model +3 -0
- special_tokens_map.json +51 -0
- tokenizer.json +3 -0
- tokenizer_config.json +62 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
1_Pooling/config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"word_embedding_dimension": 1024,
|
3 |
+
"pooling_mode_cls_token": true,
|
4 |
+
"pooling_mode_mean_tokens": false,
|
5 |
+
"pooling_mode_max_tokens": false,
|
6 |
+
"pooling_mode_mean_sqrt_len_tokens": false,
|
7 |
+
"pooling_mode_weightedmean_tokens": false,
|
8 |
+
"pooling_mode_lasttoken": false,
|
9 |
+
"include_prompt": true
|
10 |
+
}
|
README.md
ADDED
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: setfit
|
3 |
+
tags:
|
4 |
+
- setfit
|
5 |
+
- absa
|
6 |
+
- sentence-transformers
|
7 |
+
- text-classification
|
8 |
+
- generated_from_setfit_trainer
|
9 |
+
metrics:
|
10 |
+
- accuracy
|
11 |
+
widget:
|
12 |
+
- text: yang bersih. Pelayanan sangat Ramah dan:Tempat nya yang bersih. Pelayanan
|
13 |
+
sangat Ramah dan makanan ny yg sangat lezat
|
14 |
+
- text: Restoran dengan pelayanan yang baik di:Restoran dengan pelayanan yang baik
|
15 |
+
di kota bandung, makanan yang disajikan sesuai dengan harga dan sangat enak. …
|
16 |
+
- text: dan higienis dengan pelayanan sangat maksimal dan:Saya Makanan disini sangat
|
17 |
+
enak dan higienis dengan pelayanan sangat maksimal dan ditunjang dengan fasilitas
|
18 |
+
yang oke. Parkiran luas, tempat bersih dan nyaman. Good
|
19 |
+
- text: ke sini, tempat ini makanan cepat:Saya pernah ke sini, tempat ini makanan
|
20 |
+
cepat saji yang enak bersama kalian untuk makan siang cepat saji, kamarnya bersih,
|
21 |
+
sirkulasi udaranya sempurna dan tentu saja memiliki internet berkecepatan tinggi,
|
22 |
+
sangat direkomendasikan
|
23 |
+
- text: 'Ini tempat yang bagus untuk:Ini tempat yang bagus untuk keluarga, sahabat..
|
24 |
+
|
25 |
+
Dan juga baik untuk tamu kita..
|
26 |
+
|
27 |
+
Tapi pelayanannya terlambat..'
|
28 |
+
pipeline_tag: text-classification
|
29 |
+
inference: false
|
30 |
+
---
|
31 |
+
|
32 |
+
# SetFit Polarity Model
|
33 |
+
|
34 |
+
This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Aspect Based Sentiment Analysis (ABSA). A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification. In particular, this model is in charge of classifying aspect polarities.
|
35 |
+
|
36 |
+
The model has been trained using an efficient few-shot learning technique that involves:
|
37 |
+
|
38 |
+
1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning.
|
39 |
+
2. Training a classification head with features from the fine-tuned Sentence Transformer.
|
40 |
+
|
41 |
+
This model was trained within the context of a larger system for ABSA, which looks like so:
|
42 |
+
|
43 |
+
1. Use a spaCy model to select possible aspect span candidates.
|
44 |
+
2. Use a SetFit model to filter these possible aspect span candidates.
|
45 |
+
3. **Use this SetFit model to classify the filtered aspect span candidates.**
|
46 |
+
|
47 |
+
## Model Details
|
48 |
+
|
49 |
+
### Model Description
|
50 |
+
- **Model Type:** SetFit
|
51 |
+
<!-- - **Sentence Transformer:** [Unknown](https://huggingface.co/unknown) -->
|
52 |
+
- **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance
|
53 |
+
- **spaCy Model:** id_core_news_trf
|
54 |
+
- **SetFitABSA Aspect Model:** [pupugu02/absa-setfit-resto-aspect](https://huggingface.co/pupugu02/absa-setfit-resto-aspect)
|
55 |
+
- **SetFitABSA Polarity Model:** [pupugu02/absa-setfit-resto-polarity](https://huggingface.co/pupugu02/absa-setfit-resto-polarity)
|
56 |
+
- **Maximum Sequence Length:** 8192 tokens
|
57 |
+
- **Number of Classes:** 3 classes
|
58 |
+
<!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) -->
|
59 |
+
<!-- - **Language:** Unknown -->
|
60 |
+
<!-- - **License:** Unknown -->
|
61 |
+
|
62 |
+
### Model Sources
|
63 |
+
|
64 |
+
- **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit)
|
65 |
+
- **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055)
|
66 |
+
- **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit)
|
67 |
+
|
68 |
+
### Model Labels
|
69 |
+
| Label | Examples |
|
70 |
+
|:--------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
71 |
+
| positif | <ul><li>'time, selain tempat yang Sangat bersih:Mcd selalu jadi tempat ternyaman untuk me time, selain tempat yang Sangat bersih dan nyaman, mcdonals juga selalu menjaga kualitas makanan. Bagi saya mcd sangat affdorable dan worth it, selain itu paling digemari oleh kalangan anak muda dan anak anak sangat menyukai ayam nya.'</li><li>'bagus dan ada tempat buat bermain anak2:Makanan nya enak n harga nya juga murah.. View nya bagus dan ada tempat buat bermain anak2.. Naek beca ato perahu angsa..'</li><li>'produknya, memberikan pelayanan yang memuaskan,:McDonald’s adalah menjadi restoran cepat saji dengan pelayanan terbaik di dunia. Untuk mencapai visi ini, McDonald’s selalu menjamin mutu produk-produknya, memberikan pelayanan yang memuaskan, menawarkan kebersihan dan keamanan produk …'</li></ul> |
|
72 |
+
| negatif | <ul><li>"\nKopi jelly terasa agak 'cawerang:Gang drive-thru sangat sempit sehingga Anda harus ekstra hati-hati.\nLayanan yang pasti cepat dan berbagai pilihan pembayaran.\nKopi jelly terasa agak 'cawerang' tapi okelah. Mereka juga menambahkan segel plastik untuk mencegah tumpah. Pemikiran yang bagus."</li><li>'maap banget yaaaa pelayanan nya lama bgt:aduh maap banget yaaaa pelayanan nya lama bgt ga kaya gacoan2 yg lainn, dr smua gacoan yg pernah dine in cuma ini paling lama ,'</li><li>'suka terlamat dan pelayanannya kurang bagus..:Tempatnya bersih sejuk cocok buat makan sambil bersantai... Tetapi kalo kondisi ramai pesanan suka terlamat dan pelayanannya kurang bagus.. untuk kondisi sepi masih aman pelayanan bagus ramah..'</li></ul> |
|
73 |
+
| netral | <ul><li>'Banyak lalat. Rasanya biasa. Yg:Banyak lalat. Rasanya biasa. Yg lumayan sop iga bakar madu'</li><li>'Ada harga, ada rasa:Ada harga, ada rasa'</li><li>'D ini, tempat nya menjorok kedalam:Tiba di Bandung, kita mampir di restoran Mc D ini, tempat nya menjorok kedalam, tatanan design nya Mc D semua standard sesuai dengan kapasitas lahan nya. Disini memiliki tempat bermain anak-anak, dan untuk order mereka menyediakan mesin …'</li></ul> |
|
74 |
+
|
75 |
+
## Uses
|
76 |
+
|
77 |
+
### Direct Use for Inference
|
78 |
+
|
79 |
+
First install the SetFit library:
|
80 |
+
|
81 |
+
```bash
|
82 |
+
pip install setfit
|
83 |
+
```
|
84 |
+
|
85 |
+
Then you can load this model and run inference.
|
86 |
+
|
87 |
+
```python
|
88 |
+
from setfit import AbsaModel
|
89 |
+
|
90 |
+
# Download from the 🤗 Hub
|
91 |
+
model = AbsaModel.from_pretrained(
|
92 |
+
"pupugu02/absa-setfit-resto-aspect",
|
93 |
+
"pupugu02/absa-setfit-resto-polarity",
|
94 |
+
)
|
95 |
+
# Run inference
|
96 |
+
preds = model("The food was great, but the venue is just way too busy.")
|
97 |
+
```
|
98 |
+
|
99 |
+
<!--
|
100 |
+
### Downstream Use
|
101 |
+
|
102 |
+
*List how someone could finetune this model on their own dataset.*
|
103 |
+
-->
|
104 |
+
|
105 |
+
<!--
|
106 |
+
### Out-of-Scope Use
|
107 |
+
|
108 |
+
*List how the model may foreseeably be misused and address what users ought not to do with the model.*
|
109 |
+
-->
|
110 |
+
|
111 |
+
<!--
|
112 |
+
## Bias, Risks and Limitations
|
113 |
+
|
114 |
+
*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
|
115 |
+
-->
|
116 |
+
|
117 |
+
<!--
|
118 |
+
### Recommendations
|
119 |
+
|
120 |
+
*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
|
121 |
+
-->
|
122 |
+
|
123 |
+
## Training Details
|
124 |
+
|
125 |
+
### Training Set Metrics
|
126 |
+
| Training set | Min | Median | Max |
|
127 |
+
|:-------------|:----|:--------|:----|
|
128 |
+
| Word count | 3 | 28.0911 | 62 |
|
129 |
+
|
130 |
+
| Label | Training Sample Count |
|
131 |
+
|:--------|:----------------------|
|
132 |
+
| konflik | 0 |
|
133 |
+
| negatif | 15 |
|
134 |
+
| netral | 28 |
|
135 |
+
| positif | 363 |
|
136 |
+
|
137 |
+
### Training Hyperparameters
|
138 |
+
- batch_size: (128, 128)
|
139 |
+
- num_epochs: (1, 1)
|
140 |
+
- max_steps: -1
|
141 |
+
- sampling_strategy: oversampling
|
142 |
+
- body_learning_rate: (2e-05, 1e-05)
|
143 |
+
- head_learning_rate: 0.01
|
144 |
+
- loss: CosineSimilarityLoss
|
145 |
+
- distance_metric: cosine_distance
|
146 |
+
- margin: 0.25
|
147 |
+
- end_to_end: False
|
148 |
+
- use_amp: True
|
149 |
+
- warmup_proportion: 0.1
|
150 |
+
- seed: 42
|
151 |
+
- eval_max_steps: -1
|
152 |
+
- load_best_model_at_end: False
|
153 |
+
|
154 |
+
### Framework Versions
|
155 |
+
- Python: 3.10.12
|
156 |
+
- SetFit: 1.0.3
|
157 |
+
- Sentence Transformers: 3.0.0
|
158 |
+
- spaCy: 3.7.4
|
159 |
+
- Transformers: 4.36.2
|
160 |
+
- PyTorch: 2.3.0+cu121
|
161 |
+
- Datasets: 2.19.2
|
162 |
+
- Tokenizers: 0.15.2
|
163 |
+
|
164 |
+
## Citation
|
165 |
+
|
166 |
+
### BibTeX
|
167 |
+
```bibtex
|
168 |
+
@article{https://doi.org/10.48550/arxiv.2209.11055,
|
169 |
+
doi = {10.48550/ARXIV.2209.11055},
|
170 |
+
url = {https://arxiv.org/abs/2209.11055},
|
171 |
+
author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren},
|
172 |
+
keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},
|
173 |
+
title = {Efficient Few-Shot Learning Without Prompts},
|
174 |
+
publisher = {arXiv},
|
175 |
+
year = {2022},
|
176 |
+
copyright = {Creative Commons Attribution 4.0 International}
|
177 |
+
}
|
178 |
+
```
|
179 |
+
|
180 |
+
<!--
|
181 |
+
## Glossary
|
182 |
+
|
183 |
+
*Clearly define terms in order to be accessible across audiences.*
|
184 |
+
-->
|
185 |
+
|
186 |
+
<!--
|
187 |
+
## Model Card Authors
|
188 |
+
|
189 |
+
*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
|
190 |
+
-->
|
191 |
+
|
192 |
+
<!--
|
193 |
+
## Model Card Contact
|
194 |
+
|
195 |
+
*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
|
196 |
+
-->
|
config.json
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "firqaaa/indo-setfit-absa-bert-base-restaurants-polarity",
|
3 |
+
"architectures": [
|
4 |
+
"XLMRobertaModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"classifier_dropout": null,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"hidden_act": "gelu",
|
11 |
+
"hidden_dropout_prob": 0.1,
|
12 |
+
"hidden_size": 1024,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 4096,
|
15 |
+
"layer_norm_eps": 1e-05,
|
16 |
+
"max_position_embeddings": 8194,
|
17 |
+
"model_type": "xlm-roberta",
|
18 |
+
"num_attention_heads": 16,
|
19 |
+
"num_hidden_layers": 24,
|
20 |
+
"output_past": true,
|
21 |
+
"pad_token_id": 1,
|
22 |
+
"position_embedding_type": "absolute",
|
23 |
+
"torch_dtype": "float32",
|
24 |
+
"transformers_version": "4.36.2",
|
25 |
+
"type_vocab_size": 1,
|
26 |
+
"use_cache": true,
|
27 |
+
"vocab_size": 250002
|
28 |
+
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "2.2.2",
|
4 |
+
"transformers": "4.33.0",
|
5 |
+
"pytorch": "2.1.2+cu121"
|
6 |
+
},
|
7 |
+
"prompts": {},
|
8 |
+
"default_prompt_name": null,
|
9 |
+
"similarity_fn_name": null
|
10 |
+
}
|
config_setfit.json
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"labels": [
|
3 |
+
"konflik",
|
4 |
+
"negatif",
|
5 |
+
"netral",
|
6 |
+
"positif"
|
7 |
+
],
|
8 |
+
"normalize_embeddings": false,
|
9 |
+
"span_context": 3,
|
10 |
+
"spacy_model": "id_core_news_trf"
|
11 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7f63dee23a0bf95fda64e8d449f3b986d248f47902b6bd425fe8c3c8a990cf1a
|
3 |
+
size 2271064456
|
model_head.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b969a8393f074faca1b3632bbec9d7cc8f87bdee2a40bd4058c693cbcf86b58c
|
3 |
+
size 33735
|
modules.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
},
|
14 |
+
{
|
15 |
+
"idx": 2,
|
16 |
+
"name": "2",
|
17 |
+
"path": "2_Normalize",
|
18 |
+
"type": "sentence_transformers.models.Normalize"
|
19 |
+
}
|
20 |
+
]
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 8192,
|
3 |
+
"do_lower_case": false
|
4 |
+
}
|
sentencepiece.bpe.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
|
3 |
+
size 5069051
|
special_tokens_map.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"cls_token": {
|
10 |
+
"content": "<s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"eos_token": {
|
17 |
+
"content": "</s>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"mask_token": {
|
24 |
+
"content": "<mask>",
|
25 |
+
"lstrip": true,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"pad_token": {
|
31 |
+
"content": "<pad>",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
},
|
37 |
+
"sep_token": {
|
38 |
+
"content": "</s>",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": false,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false
|
43 |
+
},
|
44 |
+
"unk_token": {
|
45 |
+
"content": "<unk>",
|
46 |
+
"lstrip": false,
|
47 |
+
"normalized": false,
|
48 |
+
"rstrip": false,
|
49 |
+
"single_word": false
|
50 |
+
}
|
51 |
+
}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1af481bd08ed9347cf9d3d07c24e5de75a10983819de076436400609e6705686
|
3 |
+
size 17083075
|
tokenizer_config.json
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "<s>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"1": {
|
12 |
+
"content": "<pad>",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"2": {
|
20 |
+
"content": "</s>",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"3": {
|
28 |
+
"content": "<unk>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"250001": {
|
36 |
+
"content": "<mask>",
|
37 |
+
"lstrip": true,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"bos_token": "<s>",
|
45 |
+
"clean_up_tokenization_spaces": true,
|
46 |
+
"cls_token": "<s>",
|
47 |
+
"eos_token": "</s>",
|
48 |
+
"mask_token": "<mask>",
|
49 |
+
"max_length": 8192,
|
50 |
+
"model_max_length": 8192,
|
51 |
+
"pad_to_multiple_of": null,
|
52 |
+
"pad_token": "<pad>",
|
53 |
+
"pad_token_type_id": 0,
|
54 |
+
"padding_side": "right",
|
55 |
+
"sep_token": "</s>",
|
56 |
+
"sp_model_kwargs": {},
|
57 |
+
"stride": 0,
|
58 |
+
"tokenizer_class": "XLMRobertaTokenizer",
|
59 |
+
"truncation_side": "right",
|
60 |
+
"truncation_strategy": "longest_first",
|
61 |
+
"unk_token": "<unk>"
|
62 |
+
}
|