dimasik87 commited on
Commit
f6fcd99
·
verified ·
1 Parent(s): 8de074d

Training in progress, step 12, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7490742c318434f985399c767a1c602f55fb6a0dfe00dcfd110170199aad847e
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6240728159d513faa05abf6b5d7ed2406f17cba8a6ca15979a18ce62688bde0f
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ae79ea085872aedd87bc5a62cb5c44bd8029427fd8eebd823d147f0676a7f312
3
  size 335922386
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3647ec73c1e57bf25af24da7e11057a0a6dc36496110229d4e9f1d78ece690b
3
  size 335922386
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d73744cc9da6cf4515a44034b122856d46a3fed397efa1e527493ee573696292
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9273107ed22f2b0460ae677b61e5921af160c5c9d230441f93d948a850671aa
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:26a93b2a1f4b5368650119fe6e0d6eec6d19cda6badeba4d21943ab48964fa00
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68888158764ed5e658b457a541f86335ea31432325308674d2962aa98e037fa4
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.002395388876412905,
5
  "eval_steps": 4,
6
- "global_step": 8,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -87,6 +87,42 @@
87
  "eval_samples_per_second": 8.267,
88
  "eval_steps_per_second": 8.267,
89
  "step": 8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  }
91
  ],
92
  "logging_steps": 1,
@@ -106,7 +142,7 @@
106
  "attributes": {}
107
  }
108
  },
109
- "total_flos": 2967549134241792.0,
110
  "train_batch_size": 1,
111
  "trial_name": null,
112
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.003593083314619358,
5
  "eval_steps": 4,
6
+ "global_step": 12,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
87
  "eval_samples_per_second": 8.267,
88
  "eval_steps_per_second": 8.267,
89
  "step": 8
90
+ },
91
+ {
92
+ "epoch": 0.002694812485964518,
93
+ "grad_norm": 12.036263465881348,
94
+ "learning_rate": 0.00018,
95
+ "loss": 7.8039,
96
+ "step": 9
97
+ },
98
+ {
99
+ "epoch": 0.0029942360955161314,
100
+ "grad_norm": 13.046463012695312,
101
+ "learning_rate": 0.0002,
102
+ "loss": 7.8042,
103
+ "step": 10
104
+ },
105
+ {
106
+ "epoch": 0.0032936597050677446,
107
+ "grad_norm": 24.538808822631836,
108
+ "learning_rate": 0.0001996917333733128,
109
+ "loss": 8.7757,
110
+ "step": 11
111
+ },
112
+ {
113
+ "epoch": 0.003593083314619358,
114
+ "grad_norm": 26.114961624145508,
115
+ "learning_rate": 0.00019876883405951377,
116
+ "loss": 5.6577,
117
+ "step": 12
118
+ },
119
+ {
120
+ "epoch": 0.003593083314619358,
121
+ "eval_loss": 5.31056022644043,
122
+ "eval_runtime": 85.1933,
123
+ "eval_samples_per_second": 8.264,
124
+ "eval_steps_per_second": 8.264,
125
+ "step": 12
126
  }
127
  ],
128
  "logging_steps": 1,
 
142
  "attributes": {}
143
  }
144
  },
145
+ "total_flos": 4451323701362688.0,
146
  "train_batch_size": 1,
147
  "trial_name": null,
148
  "trial_params": null