mofawzy commited on
Commit
c0b8a42
1 Parent(s): 77c477f

add bert model fine tuned on hard dataset

Browse files
config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "asafaya/bert-large-arabic",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "BertForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "classifier_dropout": null,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "layer_norm_eps": 1e-12,
16
+ "max_position_embeddings": 512,
17
+ "model_type": "bert",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 24,
20
+ "output_past": true,
21
+ "pad_token_id": 0,
22
+ "position_embedding_type": "absolute",
23
+ "problem_type": "single_label_classification",
24
+ "torch_dtype": "float32",
25
+ "transformers_version": "4.16.2",
26
+ "type_vocab_size": 2,
27
+ "use_cache": true,
28
+ "vocab_size": 32000
29
+ }
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47e33277c95e6cbe30a488f7d174fc4983a0c3df1805e46371673ca51e478fea
3
+ size 2693488029
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89bbbbd6f722c9a0de62650c24f46ea8ed1b71ee078058f7c2ca629dbe84ee9e
3
+ size 1346791341
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76a963db23e1ffd2f6f4f4df078cdc6ee636c7665596bfab5d059dc3baa67dca
3
+ size 14503
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19cfba3471e0fe55718aff3c8ffb4d09ef0ba480c0e672e291ea757073d3e490
3
+ size 623
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "special_tokens_map_file": null, "full_tokenizer_file": null, "name_or_path": "asafaya/bert-large-arabic", "do_basic_tokenize": true, "never_split": null, "tokenizer_class": "BertTokenizer"}
trainer_state.json ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.11405563354492188,
3
+ "best_model_checkpoint": "BERT-HARD-balanced/checkpoint-1500",
4
+ "epoch": 1.680672268907563,
5
+ "global_step": 2000,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.42,
12
+ "learning_rate": 4.2997198879551826e-05,
13
+ "loss": 0.1682,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.42,
18
+ "eval_accuracy": 0.950212866603595,
19
+ "eval_f1": 0.9498032669607726,
20
+ "eval_loss": 0.13225148618221283,
21
+ "eval_precision": 0.9576821351286366,
22
+ "eval_recall": 0.9420529801324503,
23
+ "eval_runtime": 6.9677,
24
+ "eval_samples_per_second": 1213.595,
25
+ "eval_steps_per_second": 19.088,
26
+ "step": 500
27
+ },
28
+ {
29
+ "epoch": 0.84,
30
+ "learning_rate": 3.5994397759103643e-05,
31
+ "loss": 0.1337,
32
+ "step": 1000
33
+ },
34
+ {
35
+ "epoch": 0.84,
36
+ "eval_accuracy": 0.9571901608325449,
37
+ "eval_f1": 0.9578481602235678,
38
+ "eval_loss": 0.12283609807491302,
39
+ "eval_precision": 0.943348623853211,
40
+ "eval_recall": 0.9728003784295175,
41
+ "eval_runtime": 6.9706,
42
+ "eval_samples_per_second": 1213.091,
43
+ "eval_steps_per_second": 19.08,
44
+ "step": 1000
45
+ },
46
+ {
47
+ "epoch": 1.26,
48
+ "learning_rate": 2.8991596638655467e-05,
49
+ "loss": 0.1132,
50
+ "step": 1500
51
+ },
52
+ {
53
+ "epoch": 1.26,
54
+ "eval_accuracy": 0.9605014191106906,
55
+ "eval_f1": 0.9605294256676908,
56
+ "eval_loss": 0.11405563354492188,
57
+ "eval_precision": 0.9598488427019367,
58
+ "eval_recall": 0.9612109744560076,
59
+ "eval_runtime": 7.0035,
60
+ "eval_samples_per_second": 1207.4,
61
+ "eval_steps_per_second": 18.991,
62
+ "step": 1500
63
+ },
64
+ {
65
+ "epoch": 1.68,
66
+ "learning_rate": 2.1988795518207285e-05,
67
+ "loss": 0.0999,
68
+ "step": 2000
69
+ },
70
+ {
71
+ "epoch": 1.68,
72
+ "eval_accuracy": 0.9609744560075686,
73
+ "eval_f1": 0.9610940815845319,
74
+ "eval_loss": 0.1178417056798935,
75
+ "eval_precision": 0.9581570286788904,
76
+ "eval_recall": 0.9640491958372753,
77
+ "eval_runtime": 6.9707,
78
+ "eval_samples_per_second": 1213.078,
79
+ "eval_steps_per_second": 19.08,
80
+ "step": 2000
81
+ }
82
+ ],
83
+ "max_steps": 3570,
84
+ "num_train_epochs": 3,
85
+ "total_flos": 1.4904145310863872e+16,
86
+ "trial_name": null,
87
+ "trial_params": null
88
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a8a5638fed315463a869a97c283cb7ba57b1e142b1bbb12e94d66133ac47bd2
3
+ size 2991
vocab.txt ADDED
The diff for this file is too large to render. See raw diff