ShengdingHu commited on
Commit
577295d
·
1 Parent(s): a089475

Training in progress, epoch 1

Browse files
all_results.json CHANGED
@@ -1,14 +1,14 @@
1
  {
2
  "epoch": 10.0,
3
- "eval_accuracy": 0.7220216606498195,
4
- "eval_loss": 0.5765948295593262,
5
- "eval_runtime": 0.2028,
6
  "eval_samples": 277,
7
- "eval_samples_per_second": 1365.807,
8
- "eval_steps_per_second": 14.792,
9
- "train_loss": 0.5880965404021434,
10
- "train_runtime": 74.518,
11
  "train_samples": 2490,
12
- "train_samples_per_second": 334.147,
13
- "train_steps_per_second": 10.467
14
  }
 
1
  {
2
  "epoch": 10.0,
3
+ "eval_accuracy": 0.5270758122743683,
4
+ "eval_loss": 0.6930166482925415,
5
+ "eval_runtime": 0.2259,
6
  "eval_samples": 277,
7
+ "eval_samples_per_second": 1226.394,
8
+ "eval_steps_per_second": 13.282,
9
+ "train_loss": 0.6997218401004106,
10
+ "train_runtime": 107.0767,
11
  "train_samples": 2490,
12
+ "train_samples_per_second": 232.544,
13
+ "train_steps_per_second": 7.284
14
  }
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "../../../../plm_cache/roberta-base",
3
  "architectures": [
4
  "RobertaForSequenceClassification"
5
  ],
@@ -10,13 +10,13 @@
10
  "finetuning_task": "rte",
11
  "hidden_act": "gelu",
12
  "hidden_dropout_prob": 0.1,
13
- "hidden_size": 768,
14
  "id2label": {
15
  "0": "entailment",
16
  "1": "not_entailment"
17
  },
18
  "initializer_range": 0.02,
19
- "intermediate_size": 3072,
20
  "label2id": {
21
  "entailment": 0,
22
  "not_entailment": 1
@@ -24,8 +24,8 @@
24
  "layer_norm_eps": 1e-05,
25
  "max_position_embeddings": 514,
26
  "model_type": "roberta",
27
- "num_attention_heads": 12,
28
- "num_hidden_layers": 12,
29
  "pad_token_id": 1,
30
  "position_embedding_type": "absolute",
31
  "problem_type": "single_label_classification",
 
1
  {
2
+ "_name_or_path": "../../../../plm_cache/roberta-large",
3
  "architectures": [
4
  "RobertaForSequenceClassification"
5
  ],
 
10
  "finetuning_task": "rte",
11
  "hidden_act": "gelu",
12
  "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 1024,
14
  "id2label": {
15
  "0": "entailment",
16
  "1": "not_entailment"
17
  },
18
  "initializer_range": 0.02,
19
+ "intermediate_size": 4096,
20
  "label2id": {
21
  "entailment": 0,
22
  "not_entailment": 1
 
24
  "layer_norm_eps": 1e-05,
25
  "max_position_embeddings": 514,
26
  "model_type": "roberta",
27
+ "num_attention_heads": 16,
28
+ "num_hidden_layers": 24,
29
  "pad_token_id": 1,
30
  "position_embedding_type": "absolute",
31
  "problem_type": "single_label_classification",
eval_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 10.0,
3
- "eval_accuracy": 0.7220216606498195,
4
- "eval_loss": 0.5765948295593262,
5
- "eval_runtime": 0.2028,
6
  "eval_samples": 277,
7
- "eval_samples_per_second": 1365.807,
8
- "eval_steps_per_second": 14.792
9
  }
 
1
  {
2
  "epoch": 10.0,
3
+ "eval_accuracy": 0.5270758122743683,
4
+ "eval_loss": 0.6930166482925415,
5
+ "eval_runtime": 0.2259,
6
  "eval_samples": 277,
7
+ "eval_samples_per_second": 1226.394,
8
+ "eval_steps_per_second": 13.282
9
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a89c652f2d16b97921600702b2a9695bef86f5720d7a2384eaf6268f5196a9b2
3
- size 2704633
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc154e75055ed4ffe4f6fee8eb3b2c83fd827575bcbab6fd3b65b47693d12588
3
+ size 17106681
runs/Jan29_15-48-48_node4/events.out.tfevents.1643442552.node4 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4a9718edfa67d6d7154739ab7868ff2be8fa78bf35e6f7ba4849484bd95bde41
3
- size 6573
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61d693dfa0eb2f0e4882ba26a780134f70dae796a68b3cdf5bc8eaf2a81b26cc
3
+ size 7250
runs/Jan29_15-48-48_node4/events.out.tfevents.1643442660.node4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a37a54639e8f663b9e48afc468172b55940869c6f34d503fa8f0e51752ff238d
3
+ size 363
runs/Jan29_16-31-17_node4/1643445112.230297/events.out.tfevents.1643445112.node4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82d225b734cb18ede058b02137259711497603f297b73c1984d9fc069f368086
3
+ size 4585
runs/Jan29_16-31-17_node4/events.out.tfevents.1643445112.node4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:583a5a78f47360afe9e409cd945eeec5395fbe3831f7bd1c7cec649c5bd82552
3
+ size 3517
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "add_prefix_space": false, "errors": "replace", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": "<mask>", "trim_offsets": true, "special_tokens_map_file": null, "name_or_path": "../../../../plm_cache/roberta-base", "tokenizer_class": "RobertaTokenizer"}
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "add_prefix_space": false, "errors": "replace", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": "<mask>", "trim_offsets": true, "special_tokens_map_file": null, "name_or_path": "../../../../plm_cache/roberta-large", "tokenizer_class": "RobertaTokenizer"}
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 10.0,
3
- "train_loss": 0.5880965404021434,
4
- "train_runtime": 74.518,
5
  "train_samples": 2490,
6
- "train_samples_per_second": 334.147,
7
- "train_steps_per_second": 10.467
8
  }
 
1
  {
2
  "epoch": 10.0,
3
+ "train_loss": 0.6997218401004106,
4
+ "train_runtime": 107.0767,
5
  "train_samples": 2490,
6
+ "train_samples_per_second": 232.544,
7
+ "train_steps_per_second": 7.284
8
  }
trainer_state.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "best_metric": 0.7220216606498195,
3
- "best_model_checkpoint": "outputs/lora/roberta-base/v2/rte/checkpoint-780",
4
  "epoch": 10.0,
5
  "global_step": 780,
6
  "is_hyper_param_search": false,
@@ -10,112 +10,112 @@
10
  {
11
  "epoch": 1.0,
12
  "eval_accuracy": 0.4729241877256318,
13
- "eval_loss": 0.7042641639709473,
14
- "eval_runtime": 0.2314,
15
- "eval_samples_per_second": 1197.187,
16
- "eval_steps_per_second": 12.966,
17
  "step": 78
18
  },
19
  {
20
  "epoch": 2.0,
21
- "eval_accuracy": 0.5776173285198556,
22
- "eval_loss": 0.6811862587928772,
23
- "eval_runtime": 0.2009,
24
- "eval_samples_per_second": 1378.756,
25
- "eval_steps_per_second": 14.932,
26
  "step": 156
27
  },
28
  {
29
  "epoch": 3.0,
30
- "eval_accuracy": 0.6028880866425993,
31
- "eval_loss": 0.6594843864440918,
32
- "eval_runtime": 0.2361,
33
- "eval_samples_per_second": 1173.317,
34
- "eval_steps_per_second": 12.707,
35
  "step": 234
36
  },
37
  {
38
  "epoch": 4.0,
39
- "eval_accuracy": 0.6714801444043321,
40
- "eval_loss": 0.6216505765914917,
41
- "eval_runtime": 0.1961,
42
- "eval_samples_per_second": 1412.672,
43
- "eval_steps_per_second": 15.3,
44
  "step": 312
45
  },
46
  {
47
  "epoch": 5.0,
48
- "eval_accuracy": 0.6787003610108303,
49
- "eval_loss": 0.6077037453651428,
50
- "eval_runtime": 0.2048,
51
- "eval_samples_per_second": 1352.477,
52
- "eval_steps_per_second": 14.648,
53
  "step": 390
54
  },
55
  {
56
  "epoch": 6.0,
57
- "eval_accuracy": 0.6642599277978339,
58
- "eval_loss": 0.6047545075416565,
59
- "eval_runtime": 0.1975,
60
- "eval_samples_per_second": 1402.222,
61
- "eval_steps_per_second": 15.187,
62
  "step": 468
63
  },
64
  {
65
  "epoch": 6.41,
66
- "learning_rate": 0.00019099590723055935,
67
- "loss": 0.6366,
68
  "step": 500
69
  },
70
  {
71
  "epoch": 7.0,
72
- "eval_accuracy": 0.7111913357400722,
73
- "eval_loss": 0.5592861771583557,
74
- "eval_runtime": 0.1967,
75
- "eval_samples_per_second": 1408.382,
76
- "eval_steps_per_second": 15.253,
77
  "step": 546
78
  },
79
  {
80
  "epoch": 8.0,
81
- "eval_accuracy": 0.703971119133574,
82
- "eval_loss": 0.5585260987281799,
83
- "eval_runtime": 0.2088,
84
- "eval_samples_per_second": 1326.752,
85
- "eval_steps_per_second": 14.369,
86
  "step": 624
87
  },
88
  {
89
  "epoch": 9.0,
90
- "eval_accuracy": 0.7111913357400722,
91
- "eval_loss": 0.571499764919281,
92
- "eval_runtime": 0.2055,
93
- "eval_samples_per_second": 1347.626,
94
- "eval_steps_per_second": 14.595,
95
  "step": 702
96
  },
97
  {
98
  "epoch": 10.0,
99
- "eval_accuracy": 0.7220216606498195,
100
- "eval_loss": 0.5765948295593262,
101
- "eval_runtime": 0.1974,
102
- "eval_samples_per_second": 1403.091,
103
- "eval_steps_per_second": 15.196,
104
  "step": 780
105
  },
106
  {
107
  "epoch": 10.0,
108
  "step": 780,
109
- "total_flos": 1638956800972800.0,
110
- "train_loss": 0.5880965404021434,
111
- "train_runtime": 74.518,
112
- "train_samples_per_second": 334.147,
113
- "train_steps_per_second": 10.467
114
  }
115
  ],
116
  "max_steps": 780,
117
  "num_train_epochs": 10,
118
- "total_flos": 1638956800972800.0,
119
  "trial_name": null,
120
  "trial_params": null
121
  }
 
1
  {
2
+ "best_metric": 0.5270758122743683,
3
+ "best_model_checkpoint": "outputs/lora/roberta-base/v2/rte/checkpoint-234",
4
  "epoch": 10.0,
5
  "global_step": 780,
6
  "is_hyper_param_search": false,
 
10
  {
11
  "epoch": 1.0,
12
  "eval_accuracy": 0.4729241877256318,
13
+ "eval_loss": 0.7130928039550781,
14
+ "eval_runtime": 0.2073,
15
+ "eval_samples_per_second": 1336.057,
16
+ "eval_steps_per_second": 14.47,
17
  "step": 78
18
  },
19
  {
20
  "epoch": 2.0,
21
+ "eval_accuracy": 0.4729241877256318,
22
+ "eval_loss": 0.6993798613548279,
23
+ "eval_runtime": 0.2002,
24
+ "eval_samples_per_second": 1383.767,
25
+ "eval_steps_per_second": 14.987,
26
  "step": 156
27
  },
28
  {
29
  "epoch": 3.0,
30
+ "eval_accuracy": 0.5270758122743683,
31
+ "eval_loss": 0.6930166482925415,
32
+ "eval_runtime": 0.2199,
33
+ "eval_samples_per_second": 1259.694,
34
+ "eval_steps_per_second": 13.643,
35
  "step": 234
36
  },
37
  {
38
  "epoch": 4.0,
39
+ "eval_accuracy": 0.5270758122743683,
40
+ "eval_loss": 0.6923182606697083,
41
+ "eval_runtime": 0.201,
42
+ "eval_samples_per_second": 1378.119,
43
+ "eval_steps_per_second": 14.925,
44
  "step": 312
45
  },
46
  {
47
  "epoch": 5.0,
48
+ "eval_accuracy": 0.4729241877256318,
49
+ "eval_loss": 0.696506917476654,
50
+ "eval_runtime": 0.2195,
51
+ "eval_samples_per_second": 1262.144,
52
+ "eval_steps_per_second": 13.669,
53
  "step": 390
54
  },
55
  {
56
  "epoch": 6.0,
57
+ "eval_accuracy": 0.5270758122743683,
58
+ "eval_loss": 0.6918743252754211,
59
+ "eval_runtime": 0.2123,
60
+ "eval_samples_per_second": 1304.889,
61
+ "eval_steps_per_second": 14.132,
62
  "step": 468
63
  },
64
  {
65
  "epoch": 6.41,
66
+ "learning_rate": 0.001145975443383356,
67
+ "loss": 0.7031,
68
  "step": 500
69
  },
70
  {
71
  "epoch": 7.0,
72
+ "eval_accuracy": 0.4729241877256318,
73
+ "eval_loss": 0.6935069561004639,
74
+ "eval_runtime": 0.2336,
75
+ "eval_samples_per_second": 1185.748,
76
+ "eval_steps_per_second": 12.842,
77
  "step": 546
78
  },
79
  {
80
  "epoch": 8.0,
81
+ "eval_accuracy": 0.4729241877256318,
82
+ "eval_loss": 0.6938801407814026,
83
+ "eval_runtime": 0.2086,
84
+ "eval_samples_per_second": 1327.87,
85
+ "eval_steps_per_second": 14.381,
86
  "step": 624
87
  },
88
  {
89
  "epoch": 9.0,
90
+ "eval_accuracy": 0.4729241877256318,
91
+ "eval_loss": 0.6936783194541931,
92
+ "eval_runtime": 0.2108,
93
+ "eval_samples_per_second": 1313.785,
94
+ "eval_steps_per_second": 14.229,
95
  "step": 702
96
  },
97
  {
98
  "epoch": 10.0,
99
+ "eval_accuracy": 0.4729241877256318,
100
+ "eval_loss": 0.6938663125038147,
101
+ "eval_runtime": 0.2046,
102
+ "eval_samples_per_second": 1353.687,
103
+ "eval_steps_per_second": 14.661,
104
  "step": 780
105
  },
106
  {
107
  "epoch": 10.0,
108
  "step": 780,
109
+ "total_flos": 1639048592332800.0,
110
+ "train_loss": 0.6997218401004106,
111
+ "train_runtime": 107.0767,
112
+ "train_samples_per_second": 232.544,
113
+ "train_steps_per_second": 7.284
114
  }
115
  ],
116
  "max_steps": 780,
117
  "num_train_epochs": 10,
118
+ "total_flos": 1639048592332800.0,
119
  "trial_name": null,
120
  "trial_params": null
121
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d942a12835e627c5832563e175e275a68f55113feef1e6c7484e8dc0409bd4fa
3
  size 2991
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3459cc4e350849119f9397e1e235b5df88dabf546417d4c780d5756a1941640
3
  size 2991