multilingual upgrade upload of language-detector
Browse files- .gitattributes +4 -11
- README.md +186 -0
- config.json +73 -0
- model.safetensors +3 -0
- pytorch_model.bin +3 -0
- sentencepiece.bpe.model +3 -0
- special_tokens_map.json +1 -0
- tf_model.h5 +3 -0
- tokenizer.json +0 -0
- tokenizer_config.json +1 -0
.gitattributes
CHANGED
@@ -1,35 +1,28 @@
|
|
1 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
|
|
4 |
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
1 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
|
|
6 |
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
|
|
11 |
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
13 |
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
17 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
|
21 |
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
|
|
22 |
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
*.tgz filter=lfs diff=lfs merge=lfs -text
|
|
|
24 |
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
model.safetensors filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- multilingual
|
4 |
+
- ar
|
5 |
+
- bg
|
6 |
+
- de
|
7 |
+
- el
|
8 |
+
- en
|
9 |
+
- es
|
10 |
+
- fr
|
11 |
+
- hi
|
12 |
+
- it
|
13 |
+
- ja
|
14 |
+
- nl
|
15 |
+
- pl
|
16 |
+
- pt
|
17 |
+
- ru
|
18 |
+
- sw
|
19 |
+
- th
|
20 |
+
- tr
|
21 |
+
- ur
|
22 |
+
- vi
|
23 |
+
- zh
|
24 |
+
license: mit
|
25 |
+
tags:
|
26 |
+
- generated_from_trainer
|
27 |
+
datasets: papluca/language-identification
|
28 |
+
metrics:
|
29 |
+
- accuracy
|
30 |
+
- f1
|
31 |
+
base_model: xlm-roberta-base
|
32 |
+
model-index:
|
33 |
+
- name: xlm-roberta-base-language-detection
|
34 |
+
results: []
|
35 |
+
---
|
36 |
+
|
37 |
+
# xlm-roberta-base-language-detection
|
38 |
+
|
39 |
+
This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the [Language Identification](https://huggingface.co/datasets/papluca/language-identification#additional-information) dataset.
|
40 |
+
|
41 |
+
## Model description
|
42 |
+
|
43 |
+
This model is an XLM-RoBERTa transformer model with a classification head on top (i.e. a linear layer on top of the pooled output).
|
44 |
+
For additional information please refer to the [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) model card or to the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Conneau et al.
|
45 |
+
|
46 |
+
## Intended uses & limitations
|
47 |
+
|
48 |
+
You can directly use this model as a language detector, i.e. for sequence classification tasks. Currently, it supports the following 20 languages:
|
49 |
+
|
50 |
+
`arabic (ar), bulgarian (bg), german (de), modern greek (el), english (en), spanish (es), french (fr), hindi (hi), italian (it), japanese (ja), dutch (nl), polish (pl), portuguese (pt), russian (ru), swahili (sw), thai (th), turkish (tr), urdu (ur), vietnamese (vi), and chinese (zh)`
|
51 |
+
|
52 |
+
## Training and evaluation data
|
53 |
+
|
54 |
+
The model was fine-tuned on the [Language Identification](https://huggingface.co/datasets/papluca/language-identification#additional-information) dataset, which consists of text sequences in 20 languages. The training set contains 70k samples, while the validation and test sets 10k each. The average accuracy on the test set is **99.6%** (this matches the average macro/weighted F1-score being the test set perfectly balanced). A more detailed evaluation is provided by the following table.
|
55 |
+
|
56 |
+
| Language | Precision | Recall | F1-score | support |
|
57 |
+
|:--------:|:---------:|:------:|:--------:|:-------:|
|
58 |
+
|ar |0.998 |0.996 |0.997 |500 |
|
59 |
+
|bg |0.998 |0.964 |0.981 |500 |
|
60 |
+
|de |0.998 |0.996 |0.997 |500 |
|
61 |
+
|el |0.996 |1.000 |0.998 |500 |
|
62 |
+
|en |1.000 |1.000 |1.000 |500 |
|
63 |
+
|es |0.967 |1.000 |0.983 |500 |
|
64 |
+
|fr |1.000 |1.000 |1.000 |500 |
|
65 |
+
|hi |0.994 |0.992 |0.993 |500 |
|
66 |
+
|it |1.000 |0.992 |0.996 |500 |
|
67 |
+
|ja |0.996 |0.996 |0.996 |500 |
|
68 |
+
|nl |1.000 |1.000 |1.000 |500 |
|
69 |
+
|pl |1.000 |1.000 |1.000 |500 |
|
70 |
+
|pt |0.988 |1.000 |0.994 |500 |
|
71 |
+
|ru |1.000 |0.994 |0.997 |500 |
|
72 |
+
|sw |1.000 |1.000 |1.000 |500 |
|
73 |
+
|th |1.000 |0.998 |0.999 |500 |
|
74 |
+
|tr |0.994 |0.992 |0.993 |500 |
|
75 |
+
|ur |1.000 |1.000 |1.000 |500 |
|
76 |
+
|vi |0.992 |1.000 |0.996 |500 |
|
77 |
+
|zh |1.000 |1.000 |1.000 |500 |
|
78 |
+
|
79 |
+
### Benchmarks
|
80 |
+
|
81 |
+
As a baseline to compare `xlm-roberta-base-language-detection` against, we have used the Python [langid](https://github.com/saffsd/langid.py) library. Since it comes pre-trained on 97 languages, we have used its `.set_languages()` method to constrain the language set to our 20 languages. The average accuracy of langid on the test set is **98.5%**. More details are provided by the table below.
|
82 |
+
|
83 |
+
| Language | Precision | Recall | F1-score | support |
|
84 |
+
|:--------:|:---------:|:------:|:--------:|:-------:|
|
85 |
+
|ar |0.990 |0.970 |0.980 |500 |
|
86 |
+
|bg |0.998 |0.964 |0.981 |500 |
|
87 |
+
|de |0.992 |0.944 |0.967 |500 |
|
88 |
+
|el |1.000 |0.998 |0.999 |500 |
|
89 |
+
|en |1.000 |1.000 |1.000 |500 |
|
90 |
+
|es |1.000 |0.968 |0.984 |500 |
|
91 |
+
|fr |0.996 |1.000 |0.998 |500 |
|
92 |
+
|hi |0.949 |0.976 |0.963 |500 |
|
93 |
+
|it |0.990 |0.980 |0.985 |500 |
|
94 |
+
|ja |0.927 |0.988 |0.956 |500 |
|
95 |
+
|nl |0.980 |1.000 |0.990 |500 |
|
96 |
+
|pl |0.986 |0.996 |0.991 |500 |
|
97 |
+
|pt |0.950 |0.996 |0.973 |500 |
|
98 |
+
|ru |0.996 |0.974 |0.985 |500 |
|
99 |
+
|sw |1.000 |1.000 |1.000 |500 |
|
100 |
+
|th |1.000 |0.996 |0.998 |500 |
|
101 |
+
|tr |0.990 |0.968 |0.979 |500 |
|
102 |
+
|ur |0.998 |0.996 |0.997 |500 |
|
103 |
+
|vi |0.971 |0.990 |0.980 |500 |
|
104 |
+
|zh |1.000 |1.000 |1.000 |500 |
|
105 |
+
|
106 |
+
## How to get started with the model
|
107 |
+
|
108 |
+
The easiest way to use the model is via the high-level `pipeline` API:
|
109 |
+
|
110 |
+
```python
|
111 |
+
from transformers import pipeline
|
112 |
+
|
113 |
+
text = [
|
114 |
+
"Brevity is the soul of wit.",
|
115 |
+
"Amor, ch'a nullo amato amar perdona."
|
116 |
+
]
|
117 |
+
|
118 |
+
model_ckpt = "papluca/xlm-roberta-base-language-detection"
|
119 |
+
pipe = pipeline("text-classification", model=model_ckpt)
|
120 |
+
pipe(text, top_k=1, truncation=True)
|
121 |
+
```
|
122 |
+
|
123 |
+
Or one can proceed with the tokenizer and model separately:
|
124 |
+
|
125 |
+
```python
|
126 |
+
import torch
|
127 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
128 |
+
|
129 |
+
text = [
|
130 |
+
"Brevity is the soul of wit.",
|
131 |
+
"Amor, ch'a nullo amato amar perdona."
|
132 |
+
]
|
133 |
+
|
134 |
+
model_ckpt = "papluca/xlm-roberta-base-language-detection"
|
135 |
+
tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
|
136 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_ckpt)
|
137 |
+
|
138 |
+
inputs = tokenizer(text, padding=True, truncation=True, return_tensors="pt")
|
139 |
+
|
140 |
+
with torch.no_grad():
|
141 |
+
logits = model(**inputs).logits
|
142 |
+
|
143 |
+
preds = torch.softmax(logits, dim=-1)
|
144 |
+
|
145 |
+
# Map raw predictions to languages
|
146 |
+
id2lang = model.config.id2label
|
147 |
+
vals, idxs = torch.max(preds, dim=1)
|
148 |
+
{id2lang[k.item()]: v.item() for k, v in zip(idxs, vals)}
|
149 |
+
```
|
150 |
+
|
151 |
+
## Training procedure
|
152 |
+
|
153 |
+
Fine-tuning was done via the `Trainer` API. Here is the [Colab notebook](https://colab.research.google.com/drive/15LJTckS6gU3RQOmjLqxVNBmbsBdnUEvl?usp=sharing) with the training code.
|
154 |
+
|
155 |
+
### Training hyperparameters
|
156 |
+
|
157 |
+
The following hyperparameters were used during training:
|
158 |
+
- learning_rate: 2e-05
|
159 |
+
- train_batch_size: 64
|
160 |
+
- eval_batch_size: 128
|
161 |
+
- seed: 42
|
162 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
163 |
+
- lr_scheduler_type: linear
|
164 |
+
- num_epochs: 2
|
165 |
+
- mixed_precision_training: Native AMP
|
166 |
+
|
167 |
+
### Training results
|
168 |
+
|
169 |
+
The validation results on the `valid` split of the Language Identification dataset are summarised here below.
|
170 |
+
|
171 |
+
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
|
172 |
+
|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|
|
173 |
+
| 0.2492 | 1.0 | 1094 | 0.0149 | 0.9969 | 0.9969 |
|
174 |
+
| 0.0101 | 2.0 | 2188 | 0.0103 | 0.9977 | 0.9977 |
|
175 |
+
|
176 |
+
In short, it achieves the following results on the validation set:
|
177 |
+
- Loss: 0.0101
|
178 |
+
- Accuracy: 0.9977
|
179 |
+
- F1: 0.9977
|
180 |
+
|
181 |
+
### Framework versions
|
182 |
+
|
183 |
+
- Transformers 4.12.5
|
184 |
+
- Pytorch 1.10.0+cu111
|
185 |
+
- Datasets 1.15.1
|
186 |
+
- Tokenizers 0.10.3
|
config.json
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "papluca/xlm-roberta-base-language-detection",
|
3 |
+
"architectures": [
|
4 |
+
"XLMRobertaForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"classifier_dropout": null,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"hidden_act": "gelu",
|
11 |
+
"hidden_dropout_prob": 0.1,
|
12 |
+
"hidden_size": 768,
|
13 |
+
"id2label": {
|
14 |
+
"0": "ja",
|
15 |
+
"1": "nl",
|
16 |
+
"2": "ar",
|
17 |
+
"3": "pl",
|
18 |
+
"4": "de",
|
19 |
+
"5": "it",
|
20 |
+
"6": "pt",
|
21 |
+
"7": "tr",
|
22 |
+
"8": "es",
|
23 |
+
"9": "hi",
|
24 |
+
"10": "el",
|
25 |
+
"11": "ur",
|
26 |
+
"12": "bg",
|
27 |
+
"13": "en",
|
28 |
+
"14": "fr",
|
29 |
+
"15": "zh",
|
30 |
+
"16": "ru",
|
31 |
+
"17": "th",
|
32 |
+
"18": "sw",
|
33 |
+
"19": "vi"
|
34 |
+
},
|
35 |
+
"initializer_range": 0.02,
|
36 |
+
"intermediate_size": 3072,
|
37 |
+
"label2id": {
|
38 |
+
"ar": 2,
|
39 |
+
"bg": 12,
|
40 |
+
"de": 4,
|
41 |
+
"el": 10,
|
42 |
+
"en": 13,
|
43 |
+
"es": 8,
|
44 |
+
"fr": 14,
|
45 |
+
"hi": 9,
|
46 |
+
"it": 5,
|
47 |
+
"ja": 0,
|
48 |
+
"nl": 1,
|
49 |
+
"pl": 3,
|
50 |
+
"pt": 6,
|
51 |
+
"ru": 16,
|
52 |
+
"sw": 18,
|
53 |
+
"th": 17,
|
54 |
+
"tr": 7,
|
55 |
+
"ur": 11,
|
56 |
+
"vi": 19,
|
57 |
+
"zh": 15
|
58 |
+
},
|
59 |
+
"layer_norm_eps": 1e-05,
|
60 |
+
"max_position_embeddings": 514,
|
61 |
+
"model_type": "xlm-roberta",
|
62 |
+
"num_attention_heads": 12,
|
63 |
+
"num_hidden_layers": 12,
|
64 |
+
"output_past": true,
|
65 |
+
"pad_token_id": 1,
|
66 |
+
"position_embedding_type": "absolute",
|
67 |
+
"problem_type": "single_label_classification",
|
68 |
+
"torch_dtype": "float32",
|
69 |
+
"transformers_version": "4.12.5",
|
70 |
+
"type_vocab_size": 1,
|
71 |
+
"use_cache": true,
|
72 |
+
"vocab_size": 250002
|
73 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a835d6e8ed50ef6b3c180db87446a83ee5ac437e981c932c8e1e239aacbe08b7
|
3 |
+
size 1112264584
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eb6bded160fdd712245e1bd19c4de417e1508094a9f69d92ae287f32a8732888
|
3 |
+
size 1112318701
|
sentencepiece.bpe.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
|
3 |
+
size 5069051
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
|
tf_model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d6417044a1451c9a5fd302579ee5d39bae3831b0cd57bd008b61e79d33156f6e
|
3 |
+
size 1112525696
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "drive/MyDrive/Colab Notebooks/HuggingFace_course/HF_course_community_event/xlm-roberta-base-finetuned-language-detection", "tokenizer_class": "XLMRobertaTokenizer"}
|