Go Inoue
commited on
Commit
•
f02a425
1
Parent(s):
ace44e7
Add model files
Browse files- README.md +45 -0
- config.json +78 -0
- eval_results_test.txt +4 -0
- optimizer.pt +3 -0
- pytorch_model.bin +3 -0
- scheduler.pt +3 -0
- special_tokens_map.json +1 -0
- tf_model.h5 +3 -0
- tokenizer_config.json +1 -0
- training_args.bin +3 -0
- vocab.txt +0 -0
README.md
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- ar
|
4 |
+
license: apache-2.0
|
5 |
+
widget:
|
6 |
+
- text: "عامل ايه ؟"
|
7 |
+
---
|
8 |
+
# CAMeLBERT-Mix DID Corpus26 Model
|
9 |
+
## Model description
|
10 |
+
**CAMeLBERT-Mix DID Corpus26 Model** is a dialect identification (DID) model that was built by fine-tuning the [CAMeLBERT-Mix](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-mix/) model.
|
11 |
+
For the fine-tuning, we used the [MADAR Corpus 26](https://camel.abudhabi.nyu.edu/madar-shared-task-2019/) dataset, which includes 26 labels.
|
12 |
+
Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678)."* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT).
|
13 |
+
|
14 |
+
## Intended uses
|
15 |
+
You can use the CAMeLBERT-Mix DID Corpus26 model as part of the transformers pipeline.
|
16 |
+
This model will also be available in [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) soon.
|
17 |
+
|
18 |
+
#### How to use
|
19 |
+
To use the model with a transformers pipeline:
|
20 |
+
```python
|
21 |
+
>>> from transformers import pipeline
|
22 |
+
>>> did = pipeline('text-classification', model='CAMeL-Lab/bert-base-arabic-camelbert-mix-did-madar26')
|
23 |
+
>>> sentences = ['عامل ايه ؟', 'شلونك ؟ شخبارك ؟']
|
24 |
+
>>> did(sentences)
|
25 |
+
[{'label': 'CAI', 'score': 0.8751305937767029},
|
26 |
+
{'label': 'DOH', 'score': 0.9867215156555176}]
|
27 |
+
```
|
28 |
+
*Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models
|
29 |
+
## Citation
|
30 |
+
```bibtex
|
31 |
+
@inproceedings{inoue-etal-2021-interplay,
|
32 |
+
title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models",
|
33 |
+
author = "Inoue, Go and
|
34 |
+
Alhafni, Bashar and
|
35 |
+
Baimukan, Nurpeiis and
|
36 |
+
Bouamor, Houda and
|
37 |
+
Habash, Nizar",
|
38 |
+
booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop",
|
39 |
+
month = apr,
|
40 |
+
year = "2021",
|
41 |
+
address = "Kyiv, Ukraine (Online)",
|
42 |
+
publisher = "Association for Computational Linguistics",
|
43 |
+
abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.",
|
44 |
+
}
|
45 |
+
```
|
config.json
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "bert-base-arabic-camelbert-mix-did-madar26/",
|
3 |
+
"architectures": [
|
4 |
+
"BertForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"finetuning_task": "arabic_did",
|
8 |
+
"gradient_checkpointing": false,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 768,
|
12 |
+
"id2label": {
|
13 |
+
"0": "KHA",
|
14 |
+
"1": "TUN",
|
15 |
+
"10": "TRI",
|
16 |
+
"11": "ALG",
|
17 |
+
"12": "MSA",
|
18 |
+
"13": "FES",
|
19 |
+
"14": "BEN",
|
20 |
+
"15": "SAL",
|
21 |
+
"16": "JER",
|
22 |
+
"17": "BEI",
|
23 |
+
"18": "SFX",
|
24 |
+
"19": "MUS",
|
25 |
+
"2": "MOS",
|
26 |
+
"20": "JED",
|
27 |
+
"21": "RIY",
|
28 |
+
"22": "RAB",
|
29 |
+
"23": "DAM",
|
30 |
+
"24": "ASW",
|
31 |
+
"25": "AMM",
|
32 |
+
"3": "CAI",
|
33 |
+
"4": "BAG",
|
34 |
+
"5": "ALE",
|
35 |
+
"6": "DOH",
|
36 |
+
"7": "ALX",
|
37 |
+
"8": "SAN",
|
38 |
+
"9": "BAS"
|
39 |
+
},
|
40 |
+
"initializer_range": 0.02,
|
41 |
+
"intermediate_size": 3072,
|
42 |
+
"label2id": {
|
43 |
+
"ALE": 5,
|
44 |
+
"ALG": 11,
|
45 |
+
"ALX": 7,
|
46 |
+
"AMM": 25,
|
47 |
+
"ASW": 24,
|
48 |
+
"BAG": 4,
|
49 |
+
"BAS": 9,
|
50 |
+
"BEI": 17,
|
51 |
+
"BEN": 14,
|
52 |
+
"CAI": 3,
|
53 |
+
"DAM": 23,
|
54 |
+
"DOH": 6,
|
55 |
+
"FES": 13,
|
56 |
+
"JED": 20,
|
57 |
+
"JER": 16,
|
58 |
+
"KHA": 0,
|
59 |
+
"MOS": 2,
|
60 |
+
"MSA": 12,
|
61 |
+
"MUS": 19,
|
62 |
+
"RAB": 22,
|
63 |
+
"RIY": 21,
|
64 |
+
"SAL": 15,
|
65 |
+
"SAN": 8,
|
66 |
+
"SFX": 18,
|
67 |
+
"TRI": 10,
|
68 |
+
"TUN": 1
|
69 |
+
},
|
70 |
+
"layer_norm_eps": 1e-12,
|
71 |
+
"max_position_embeddings": 512,
|
72 |
+
"model_type": "bert",
|
73 |
+
"num_attention_heads": 12,
|
74 |
+
"num_hidden_layers": 12,
|
75 |
+
"pad_token_id": 0,
|
76 |
+
"type_vocab_size": 2,
|
77 |
+
"vocab_size": 30000
|
78 |
+
}
|
eval_results_test.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
acc = 62.86538461538461
|
2 |
+
f1 = 62.908251684423114
|
3 |
+
precision = 63.30140739141851
|
4 |
+
recall = 62.86538461538461
|
optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:664ab443c4f4bbfdf8199cc31e9efc3f3774401ebe8553e8db9b1c0ba77dabe5
|
3 |
+
size 872870730
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:13993e45dfaa873d0e5d7bae7a92fded87bf6539c9ae8c10e4575097df07985e
|
3 |
+
size 436459917
|
scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:89a7b2895340bad6dc702f94e4d47001ebab74ee2203746d157540df4e87a0f1
|
3 |
+
size 326
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
|
tf_model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c8a020beaad2573febc55431d8d74fa42d6fb1bb61b3f0399d1afe3ca1611883
|
3 |
+
size 436592640
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"do_lower_case": false, "special_tokens_map_file": null, "full_tokenizer_file": null}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:939f5f588e71b04f86d857acccd25cc371f2b4a9903387e4b81c4e199f8f59d8
|
3 |
+
size 1397
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|