Alireza1044 commited on
Commit
a12ece8
1 Parent(s): 9ca6722
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ checkpoint-*/
README.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: apache-2.0
5
+ tags:
6
+ - generated_from_trainer
7
+ datasets:
8
+ - glue
9
+ metrics:
10
+ - accuracy
11
+ - f1
12
+ model_index:
13
+ - name: mrpc
14
+ results:
15
+ - task:
16
+ name: Text Classification
17
+ type: text-classification
18
+ dataset:
19
+ name: GLUE MRPC
20
+ type: glue
21
+ args: mrpc
22
+ metric:
23
+ name: F1
24
+ type: f1
25
+ value: 0.901060070671378
26
+ ---
27
+
28
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
29
+ should probably proofread and complete it, then remove this comment. -->
30
+
31
+ # mrpc
32
+
33
+ This model is a fine-tuned version of [albert-base-v2](https://huggingface.co/albert-base-v2) on the GLUE MRPC dataset.
34
+ It achieves the following results on the evaluation set:
35
+ - Loss: 0.4171
36
+ - Accuracy: 0.8627
37
+ - F1: 0.9011
38
+ - Combined Score: 0.8819
39
+
40
+ ## Model description
41
+
42
+ More information needed
43
+
44
+ ## Intended uses & limitations
45
+
46
+ More information needed
47
+
48
+ ## Training and evaluation data
49
+
50
+ More information needed
51
+
52
+ ## Training procedure
53
+
54
+ ### Training hyperparameters
55
+
56
+ The following hyperparameters were used during training:
57
+ - learning_rate: 2e-05
58
+ - train_batch_size: 32
59
+ - eval_batch_size: 8
60
+ - seed: 42
61
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
62
+ - lr_scheduler_type: linear
63
+ - num_epochs: 4.0
64
+
65
+ ### Training results
66
+
67
+
68
+
69
+ ### Framework versions
70
+
71
+ - Transformers 4.9.0
72
+ - Pytorch 1.9.0+cu102
73
+ - Datasets 1.10.2
74
+ - Tokenizers 0.10.3
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "eval_accuracy": 0.8627450980392157,
4
+ "eval_combined_score": 0.8819025843552969,
5
+ "eval_f1": 0.901060070671378,
6
+ "eval_loss": 0.41709813475608826,
7
+ "eval_runtime": 3.8487,
8
+ "eval_samples": 408,
9
+ "eval_samples_per_second": 106.011,
10
+ "eval_steps_per_second": 13.251,
11
+ "train_loss": 0.2594421552575153,
12
+ "train_runtime": 339.0573,
13
+ "train_samples": 3668,
14
+ "train_samples_per_second": 43.273,
15
+ "train_steps_per_second": 1.357
16
+ }
config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "albert-base-v2",
3
+ "architectures": [
4
+ "AlbertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0,
7
+ "bos_token_id": 2,
8
+ "classifier_dropout_prob": 0.1,
9
+ "down_scale_factor": 1,
10
+ "embedding_size": 128,
11
+ "eos_token_id": 3,
12
+ "finetuning_task": "mrpc",
13
+ "gap_size": 0,
14
+ "hidden_act": "gelu_new",
15
+ "hidden_dropout_prob": 0,
16
+ "hidden_size": 768,
17
+ "initializer_range": 0.02,
18
+ "inner_group_num": 1,
19
+ "intermediate_size": 3072,
20
+ "layer_norm_eps": 1e-12,
21
+ "max_position_embeddings": 512,
22
+ "model_type": "albert",
23
+ "net_structure_type": 0,
24
+ "num_attention_heads": 12,
25
+ "num_hidden_groups": 1,
26
+ "num_hidden_layers": 12,
27
+ "num_memory_blocks": 0,
28
+ "pad_token_id": 0,
29
+ "position_embedding_type": "absolute",
30
+ "problem_type": "single_label_classification",
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.9.0",
33
+ "type_vocab_size": 2,
34
+ "vocab_size": 30000
35
+ }
eval_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "eval_accuracy": 0.8627450980392157,
4
+ "eval_combined_score": 0.8819025843552969,
5
+ "eval_f1": 0.901060070671378,
6
+ "eval_loss": 0.41709813475608826,
7
+ "eval_runtime": 3.8487,
8
+ "eval_samples": 408,
9
+ "eval_samples_per_second": 106.011,
10
+ "eval_steps_per_second": 13.251
11
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd6c004c9dadcfe3c11463044cd796f040aa658d1270a6112c6e7591a4ee9701
3
+ size 46755537
runs/Jul26_11-40-40_578c7831d7ba/1627299664.304798/events.out.tfevents.1627299664.578c7831d7ba.479.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88aec1602fbfad5be6d446ddb16a93d6482ce42e6e2af2496bfef3cb6086011d
3
+ size 4167
runs/Jul26_11-40-40_578c7831d7ba/events.out.tfevents.1627299664.578c7831d7ba.479.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b0b683bf9efd9fa6aeede1420b866e763cb422b42eb347e7e776ad0a2f55eb9
3
+ size 3470
runs/Jul26_11-40-40_578c7831d7ba/events.out.tfevents.1627300007.578c7831d7ba.479.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76a9a6b73144bbd8e36f47724a09490baaa6e593d76673d9d4ece1b6693bad56
3
+ size 467
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "<unk>", "sep_token": "[SEP]", "pad_token": "<pad>", "cls_token": "[CLS]", "mask_token": {"content": "[MASK]", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "remove_space": true, "keep_accents": false, "bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "<unk>", "sep_token": "[SEP]", "pad_token": "<pad>", "cls_token": "[CLS]", "mask_token": {"content": "[MASK]", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "albert-base-v2", "tokenizer_class": "AlbertTokenizer"}
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "train_loss": 0.2594421552575153,
4
+ "train_runtime": 339.0573,
5
+ "train_samples": 3668,
6
+ "train_samples_per_second": 43.273,
7
+ "train_steps_per_second": 1.357
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 4.0,
5
+ "global_step": 460,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 4.0,
12
+ "step": 460,
13
+ "total_flos": 87658237255680.0,
14
+ "train_loss": 0.2594421552575153,
15
+ "train_runtime": 339.0573,
16
+ "train_samples_per_second": 43.273,
17
+ "train_steps_per_second": 1.357
18
+ }
19
+ ],
20
+ "max_steps": 460,
21
+ "num_train_epochs": 4,
22
+ "total_flos": 87658237255680.0,
23
+ "trial_name": null,
24
+ "trial_params": null
25
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12c3c0263264d6de9df02ed48cb951575b338159f63ee57de0c067eded15facf
3
+ size 2607