File size: 3,589 Bytes
f0cd44b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
{
  "best_metric": 0.4671708345413208,
  "best_model_checkpoint": "./Vit-GPT2-COCO2017Flickr-80k-08/checkpoint-3000",
  "epoch": 0.6,
  "eval_steps": 500,
  "global_step": 3000,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.1,
      "grad_norm": 0.9981909990310669,
      "learning_rate": 4.8333333333333334e-05,
      "loss": 0.3691,
      "step": 500
    },
    {
      "epoch": 0.1,
      "eval_gen_len": 11.77575,
      "eval_loss": 0.4729623794555664,
      "eval_rouge1": 39.8086,
      "eval_rouge2": 14.7674,
      "eval_rougeL": 36.1546,
      "eval_rougeLsum": 36.1739,
      "eval_runtime": 459.5392,
      "eval_samples_per_second": 8.704,
      "eval_steps_per_second": 2.176,
      "step": 500
    },
    {
      "epoch": 0.2,
      "grad_norm": 0.8220647573471069,
      "learning_rate": 4.666666666666667e-05,
      "loss": 0.3706,
      "step": 1000
    },
    {
      "epoch": 0.2,
      "eval_gen_len": 11.59775,
      "eval_loss": 0.47387805581092834,
      "eval_rouge1": 39.8972,
      "eval_rouge2": 14.9064,
      "eval_rougeL": 36.1193,
      "eval_rougeLsum": 36.138,
      "eval_runtime": 446.0956,
      "eval_samples_per_second": 8.967,
      "eval_steps_per_second": 2.242,
      "step": 1000
    },
    {
      "epoch": 0.3,
      "grad_norm": 1.052058219909668,
      "learning_rate": 4.5e-05,
      "loss": 0.3709,
      "step": 1500
    },
    {
      "epoch": 0.3,
      "eval_gen_len": 11.71025,
      "eval_loss": 0.47592678666114807,
      "eval_rouge1": 39.9874,
      "eval_rouge2": 14.8528,
      "eval_rougeL": 36.3155,
      "eval_rougeLsum": 36.3317,
      "eval_runtime": 466.5502,
      "eval_samples_per_second": 8.574,
      "eval_steps_per_second": 2.143,
      "step": 1500
    },
    {
      "epoch": 0.4,
      "grad_norm": 0.8642559051513672,
      "learning_rate": 4.3333333333333334e-05,
      "loss": 0.3721,
      "step": 2000
    },
    {
      "epoch": 0.4,
      "eval_gen_len": 12.175,
      "eval_loss": 0.4677760601043701,
      "eval_rouge1": 39.7192,
      "eval_rouge2": 14.5844,
      "eval_rougeL": 35.8447,
      "eval_rougeLsum": 35.8728,
      "eval_runtime": 455.6895,
      "eval_samples_per_second": 8.778,
      "eval_steps_per_second": 2.194,
      "step": 2000
    },
    {
      "epoch": 0.5,
      "grad_norm": 0.8769400715827942,
      "learning_rate": 4.166666666666667e-05,
      "loss": 0.3655,
      "step": 2500
    },
    {
      "epoch": 0.5,
      "eval_gen_len": 11.90025,
      "eval_loss": 0.4683643877506256,
      "eval_rouge1": 40.3132,
      "eval_rouge2": 15.1157,
      "eval_rougeL": 36.5749,
      "eval_rougeLsum": 36.5823,
      "eval_runtime": 449.7304,
      "eval_samples_per_second": 8.894,
      "eval_steps_per_second": 2.224,
      "step": 2500
    },
    {
      "epoch": 0.6,
      "grad_norm": 0.8613843321800232,
      "learning_rate": 4e-05,
      "loss": 0.3623,
      "step": 3000
    },
    {
      "epoch": 0.6,
      "eval_gen_len": 12.025,
      "eval_loss": 0.4671708345413208,
      "eval_rouge1": 40.1643,
      "eval_rouge2": 14.978,
      "eval_rougeL": 36.3002,
      "eval_rougeLsum": 36.3232,
      "eval_runtime": 451.1814,
      "eval_samples_per_second": 8.866,
      "eval_steps_per_second": 2.216,
      "step": 3000
    }
  ],
  "logging_steps": 500,
  "max_steps": 15000,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 3,
  "save_steps": 500,
  "total_flos": 8.662266773766144e+18,
  "train_batch_size": 4,
  "trial_name": null,
  "trial_params": null
}