Blaise-g commited on
Commit
2da61fe
1 Parent(s): e4771bb

Upload 5 files

Browse files
Files changed (5) hide show
  1. optimizer.pt +3 -0
  2. rng_state.pth +3 -0
  3. scaler.pt +3 -0
  4. scheduler.pt +3 -0
  5. trainer_state.json +144 -0
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab790d3e5298ae9db09e71b1872e1b3b9875e3c39a778816613a20f3276a1903
3
+ size 5885590
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7db9feae642274caf5f2085e4241f4b451df45c81a3c5c9b1aa3c8dd8e5ca155
3
+ size 14503
scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04c72eb02261b9afad4d2943f2907af0c5e11074ebf8fdf590c584dbcaf959d1
3
+ size 559
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2de9f97e4e42495ec8d46f258233cea6f7a38b9c611fdd15b8cc05ae6c8e1dc7
3
+ size 623
trainer_state.json ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.2248995983935744,
5
+ "global_step": 69,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.13,
12
+ "learning_rate": 0.0008709677419354839,
13
+ "loss": 2.7331,
14
+ "step": 4
15
+ },
16
+ {
17
+ "epoch": 0.26,
18
+ "learning_rate": 0.0007419354838709678,
19
+ "loss": 2.6157,
20
+ "step": 8
21
+ },
22
+ {
23
+ "epoch": 0.39,
24
+ "learning_rate": 0.0006129032258064516,
25
+ "loss": 2.566,
26
+ "step": 12
27
+ },
28
+ {
29
+ "epoch": 0.51,
30
+ "learning_rate": 0.0004838709677419355,
31
+ "loss": 2.48,
32
+ "step": 16
33
+ },
34
+ {
35
+ "epoch": 0.64,
36
+ "learning_rate": 0.0003548387096774194,
37
+ "loss": 2.3692,
38
+ "step": 20
39
+ },
40
+ {
41
+ "epoch": 0.77,
42
+ "learning_rate": 0.00022580645161290321,
43
+ "loss": 2.2791,
44
+ "step": 24
45
+ },
46
+ {
47
+ "epoch": 0.9,
48
+ "learning_rate": 0.0006989247311827958,
49
+ "loss": 2.2783,
50
+ "step": 28
51
+ },
52
+ {
53
+ "epoch": 1.0,
54
+ "eval_gen_len": 20.55,
55
+ "eval_loss": 2.021012306213379,
56
+ "eval_rouge1": 32.3793,
57
+ "eval_rouge2": 12.6427,
58
+ "eval_rougeL": 27.4027,
59
+ "eval_rougeLsum": 27.3,
60
+ "eval_runtime": 3629.2928,
61
+ "eval_samples_per_second": 0.028,
62
+ "eval_steps_per_second": 0.009,
63
+ "step": 31
64
+ },
65
+ {
66
+ "epoch": 1.03,
67
+ "learning_rate": 0.0006559139784946236,
68
+ "loss": 2.2677,
69
+ "step": 32
70
+ },
71
+ {
72
+ "epoch": 1.16,
73
+ "learning_rate": 0.0006129032258064516,
74
+ "loss": 1.8913,
75
+ "step": 36
76
+ },
77
+ {
78
+ "epoch": 1.29,
79
+ "learning_rate": 0.0005698924731182796,
80
+ "loss": 1.9234,
81
+ "step": 40
82
+ },
83
+ {
84
+ "epoch": 1.42,
85
+ "learning_rate": 0.0005268817204301075,
86
+ "loss": 1.8524,
87
+ "step": 44
88
+ },
89
+ {
90
+ "epoch": 1.55,
91
+ "learning_rate": 0.0004838709677419355,
92
+ "loss": 1.8549,
93
+ "step": 48
94
+ },
95
+ {
96
+ "epoch": 1.67,
97
+ "learning_rate": 0.00044086021505376343,
98
+ "loss": 1.9571,
99
+ "step": 52
100
+ },
101
+ {
102
+ "epoch": 1.8,
103
+ "learning_rate": 0.0003978494623655914,
104
+ "loss": 1.8606,
105
+ "step": 56
106
+ },
107
+ {
108
+ "epoch": 1.93,
109
+ "learning_rate": 0.0003548387096774194,
110
+ "loss": 1.791,
111
+ "step": 60
112
+ },
113
+ {
114
+ "epoch": 2.0,
115
+ "eval_gen_len": 21.6,
116
+ "eval_loss": 1.9703446626663208,
117
+ "eval_rouge1": 34.5495,
118
+ "eval_rouge2": 12.6789,
119
+ "eval_rougeL": 28.3741,
120
+ "eval_rougeLsum": 28.4386,
121
+ "eval_runtime": 3780.972,
122
+ "eval_samples_per_second": 0.026,
123
+ "eval_steps_per_second": 0.009,
124
+ "step": 62
125
+ },
126
+ {
127
+ "epoch": 2.06,
128
+ "learning_rate": 0.0003118279569892473,
129
+ "loss": 1.7687,
130
+ "step": 64
131
+ },
132
+ {
133
+ "epoch": 2.19,
134
+ "learning_rate": 0.00026881720430107527,
135
+ "loss": 1.5525,
136
+ "step": 68
137
+ }
138
+ ],
139
+ "max_steps": 93,
140
+ "num_train_epochs": 3,
141
+ "total_flos": 1.813836532618199e+17,
142
+ "trial_name": null,
143
+ "trial_params": null
144
+ }