Hanzalwi commited on
Commit
defeea5
1 Parent(s): d21e148

Training in progress, step 600, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eccdbedcfd02db37fab2c321abc8799e0c738813d53cd749fc58e302c427de5f
3
  size 6298560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6a65a2cec0b98abb941a47c278d4fd01a73be7e17a33fd19e7ef329bf934624
3
  size 6298560
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e0c7856033e6995c0f48f6c31356e3e3ec2f58212d0e3a1afffe6ecfbd088cf5
3
  size 12611145
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79ea1756f97a900deefc42d09a070222a7264e5f683f195ebe22e4d55cb14af1
3
  size 12611145
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ab796cb22d1f99e1a64175a4124c719d63af93d54a8325b07b8b479b7426620f
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ac0a8b470048cdb4c96752220e831580920123f93d34998d2eff45f95da23c8
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5efc416a6883409dd7ab6f5c779e107c7c2baa7af6e12ed9fbd9dd73b8b20784
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c1a2ac1c11599601eeac95feb1dbfd49ec5c625e61dcce18b3f094491f9cf2d
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.273476243019104,
3
- "best_model_checkpoint": "./outputs/checkpoint-500",
4
- "epoch": 0.6666666666666666,
5
  "eval_steps": 100,
6
- "global_step": 500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -77,6 +77,20 @@
77
  "eval_samples_per_second": 4.18,
78
  "eval_steps_per_second": 0.524,
79
  "step": 500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  }
81
  ],
82
  "logging_steps": 100,
@@ -84,7 +98,7 @@
84
  "num_input_tokens_seen": 0,
85
  "num_train_epochs": 3,
86
  "save_steps": 100,
87
- "total_flos": 1.6636259895410688e+16,
88
  "trial_name": null,
89
  "trial_params": null
90
  }
 
1
  {
2
+ "best_metric": 1.2602167129516602,
3
+ "best_model_checkpoint": "./outputs/checkpoint-600",
4
+ "epoch": 0.8,
5
  "eval_steps": 100,
6
+ "global_step": 600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
77
  "eval_samples_per_second": 4.18,
78
  "eval_steps_per_second": 0.524,
79
  "step": 500
80
+ },
81
+ {
82
+ "epoch": 0.8,
83
+ "learning_rate": 0.0002,
84
+ "loss": 1.0495,
85
+ "step": 600
86
+ },
87
+ {
88
+ "epoch": 0.8,
89
+ "eval_loss": 1.2602167129516602,
90
+ "eval_runtime": 455.1997,
91
+ "eval_samples_per_second": 4.24,
92
+ "eval_steps_per_second": 0.532,
93
+ "step": 600
94
  }
95
  ],
96
  "logging_steps": 100,
 
98
  "num_input_tokens_seen": 0,
99
  "num_train_epochs": 3,
100
  "save_steps": 100,
101
+ "total_flos": 1.996145225446195e+16,
102
  "trial_name": null,
103
  "trial_params": null
104
  }