mofawzy commited on
Commit
80ff749
1 Parent(s): 5080b3e

add fine tuned model on ASDT dataset

Browse files
config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/jupyter/hub_models/BERT-ASTD",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "BertForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.2,
8
+ "classifier_dropout": null,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.2,
12
+ "hidden_size": 1024,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "layer_norm_eps": 1e-12,
16
+ "max_position_embeddings": 512,
17
+ "model_type": "bert",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 24,
20
+ "output_past": true,
21
+ "pad_token_id": 0,
22
+ "position_embedding_type": "absolute",
23
+ "problem_type": "single_label_classification",
24
+ "torch_dtype": "float32",
25
+ "transformers_version": "4.16.2",
26
+ "type_vocab_size": 2,
27
+ "use_cache": true,
28
+ "vocab_size": 32000
29
+ }
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e0a9d2d59706c382fcb8ceaa0560dab95cae1da9b94d0146ca925ffaf9ab139
3
+ size 2693487581
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:679663e58a1f5bcbfe9f136ef4d9ac5e837c926e86c6a971f459a7eb1eebb4b2
3
+ size 1346791341
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9caadf401dbc9622048b88b02752277bf73c4e79c6823cdc5ade347a01c25ad5
3
+ size 14503
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59c533a0c7aff754054f92578b8374262499dfed0785fe7cf84de2d22ab19937
3
+ size 623
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bb0c7218e463fd413fe75ba1cc94868c35eac2f44e8f5bfcb401262bdbc5951
3
+ size 1347181680
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "special_tokens_map_file": null, "full_tokenizer_file": null, "name_or_path": "asafaya/bert-large-arabic", "do_basic_tokenize": true, "never_split": null, "tokenizer_class": "BertTokenizer"}
trainer_state.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.4706691801548004,
3
+ "best_model_checkpoint": "BERT-ASTD-b/checkpoint-100",
4
+ "epoch": 7.142857142857143,
5
+ "global_step": 100,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 7.14,
12
+ "learning_rate": 3.2142857142857144e-05,
13
+ "loss": 0.1503,
14
+ "step": 100
15
+ },
16
+ {
17
+ "epoch": 7.14,
18
+ "eval_accuracy": 0.9061032863849765,
19
+ "eval_f1": 0.9,
20
+ "eval_loss": 0.4706691801548004,
21
+ "eval_precision": 0.9574468085106383,
22
+ "eval_recall": 0.8490566037735849,
23
+ "eval_runtime": 1.5526,
24
+ "eval_samples_per_second": 137.187,
25
+ "eval_steps_per_second": 2.576,
26
+ "step": 100
27
+ }
28
+ ],
29
+ "max_steps": 280,
30
+ "num_train_epochs": 20,
31
+ "total_flos": 463144405075800.0,
32
+ "trial_name": null,
33
+ "trial_params": null
34
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cfa7a831db13af49f0a0ceea4c936d844e4491dd820d27b0d3ee922e7c92d08
3
+ size 2991
vocab.txt ADDED
The diff for this file is too large to render. See raw diff