ngocquangt2k46 commited on
Commit
1ed8096
·
verified ·
1 Parent(s): 20d6b43

Training in progress, step 10, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -21,12 +21,12 @@
21
  "revision": null,
22
  "target_modules": [
23
  "o_proj",
 
24
  "k_proj",
25
  "v_proj",
26
  "q_proj",
27
- "gate_proj",
28
- "down_proj",
29
- "up_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
21
  "revision": null,
22
  "target_modules": [
23
  "o_proj",
24
+ "down_proj",
25
  "k_proj",
26
  "v_proj",
27
  "q_proj",
28
+ "up_proj",
29
+ "gate_proj"
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:48d68a71dfef1c4c6dd33816bf2215605926ea753f15cdad07096d20c6e6b79b
3
  size 50624
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6aee6bb013fc8ea429d729acf7270e8468011967bb2e9b5e6a1e18618de0f56a
3
  size 50624
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8ad6c348e291e9de26f5c84511dba04c1d91aa94ab6d76842533644e0f233f9f
3
  size 111142
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bbb243eab50ad3f978d2bd6df3bbac3ca9ba945a743cfe83c832345de43231c
3
  size 111142
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6c8ae2cb1e582e9310963ad3ed8b41f0acd771a18e5c2e6e9ee8cbbe52fdc3cd
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:359d0ddb346e1a051d153b9361e52227f5a93bfd11d7d59f8d34fe81bcaff9e5
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2d63217b923cb177f669d6bc2174b89abdc6a56d968d279b505491b37976d9bb
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4849a6ac0a1d895740f1ab4eba9d346b8d898008d0cfe93dd108cd928d7c63e
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0016256197675363732,
5
  "eval_steps": 5,
6
- "global_step": 5,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.0003251239535072746,
13
- "grad_norm": 0.024652473628520966,
14
  "learning_rate": 1e-05,
15
  "loss": 11.7645,
16
  "step": 1
@@ -18,46 +18,89 @@
18
  {
19
  "epoch": 0.0003251239535072746,
20
  "eval_loss": 11.76447582244873,
21
- "eval_runtime": 276.626,
22
- "eval_samples_per_second": 37.455,
23
- "eval_steps_per_second": 18.729,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 0.0006502479070145493,
28
- "grad_norm": 0.027672940865159035,
29
  "learning_rate": 2e-05,
30
  "loss": 11.7658,
31
  "step": 2
32
  },
33
  {
34
  "epoch": 0.000975371860521824,
35
- "grad_norm": 0.025643622502684593,
36
  "learning_rate": 3e-05,
37
  "loss": 11.7649,
38
  "step": 3
39
  },
40
  {
41
  "epoch": 0.0013004958140290985,
42
- "grad_norm": 0.025678519159555435,
43
  "learning_rate": 4e-05,
44
  "loss": 11.7648,
45
  "step": 4
46
  },
47
  {
48
  "epoch": 0.0016256197675363732,
49
- "grad_norm": 0.02572811394929886,
50
  "learning_rate": 5e-05,
51
  "loss": 11.7649,
52
  "step": 5
53
  },
54
  {
55
  "epoch": 0.0016256197675363732,
56
- "eval_loss": 11.764395713806152,
57
- "eval_runtime": 276.5749,
58
- "eval_samples_per_second": 37.462,
59
- "eval_steps_per_second": 18.733,
60
  "step": 5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  }
62
  ],
63
  "logging_steps": 1,
@@ -77,7 +120,7 @@
77
  "attributes": {}
78
  }
79
  },
80
- "total_flos": 16296003502080.0,
81
  "train_batch_size": 2,
82
  "trial_name": null,
83
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0032512395350727465,
5
  "eval_steps": 5,
6
+ "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.0003251239535072746,
13
+ "grad_norm": 0.021640103310346603,
14
  "learning_rate": 1e-05,
15
  "loss": 11.7645,
16
  "step": 1
 
18
  {
19
  "epoch": 0.0003251239535072746,
20
  "eval_loss": 11.76447582244873,
21
+ "eval_runtime": 249.3031,
22
+ "eval_samples_per_second": 41.56,
23
+ "eval_steps_per_second": 20.782,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 0.0006502479070145493,
28
+ "grad_norm": 0.024706723168492317,
29
  "learning_rate": 2e-05,
30
  "loss": 11.7658,
31
  "step": 2
32
  },
33
  {
34
  "epoch": 0.000975371860521824,
35
+ "grad_norm": 0.022929754108190536,
36
  "learning_rate": 3e-05,
37
  "loss": 11.7649,
38
  "step": 3
39
  },
40
  {
41
  "epoch": 0.0013004958140290985,
42
+ "grad_norm": 0.022171195596456528,
43
  "learning_rate": 4e-05,
44
  "loss": 11.7648,
45
  "step": 4
46
  },
47
  {
48
  "epoch": 0.0016256197675363732,
49
+ "grad_norm": 0.02208411693572998,
50
  "learning_rate": 5e-05,
51
  "loss": 11.7649,
52
  "step": 5
53
  },
54
  {
55
  "epoch": 0.0016256197675363732,
56
+ "eval_loss": 11.764402389526367,
57
+ "eval_runtime": 249.1579,
58
+ "eval_samples_per_second": 41.584,
59
+ "eval_steps_per_second": 20.794,
60
  "step": 5
61
+ },
62
+ {
63
+ "epoch": 0.001950743721043648,
64
+ "grad_norm": 0.02443164400756359,
65
+ "learning_rate": 6e-05,
66
+ "loss": 11.7634,
67
+ "step": 6
68
+ },
69
+ {
70
+ "epoch": 0.0022758676745509225,
71
+ "grad_norm": 0.023470092564821243,
72
+ "learning_rate": 7e-05,
73
+ "loss": 11.7654,
74
+ "step": 7
75
+ },
76
+ {
77
+ "epoch": 0.002600991628058197,
78
+ "grad_norm": 0.023716744035482407,
79
+ "learning_rate": 8e-05,
80
+ "loss": 11.7644,
81
+ "step": 8
82
+ },
83
+ {
84
+ "epoch": 0.002926115581565472,
85
+ "grad_norm": 0.022970277816057205,
86
+ "learning_rate": 9e-05,
87
+ "loss": 11.7658,
88
+ "step": 9
89
+ },
90
+ {
91
+ "epoch": 0.0032512395350727465,
92
+ "grad_norm": 0.022244542837142944,
93
+ "learning_rate": 0.0001,
94
+ "loss": 11.7641,
95
+ "step": 10
96
+ },
97
+ {
98
+ "epoch": 0.0032512395350727465,
99
+ "eval_loss": 11.764139175415039,
100
+ "eval_runtime": 248.9949,
101
+ "eval_samples_per_second": 41.611,
102
+ "eval_steps_per_second": 20.808,
103
+ "step": 10
104
  }
105
  ],
106
  "logging_steps": 1,
 
120
  "attributes": {}
121
  }
122
  },
123
+ "total_flos": 32592007004160.0,
124
  "train_batch_size": 2,
125
  "trial_name": null,
126
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e17a6c83dec956fb9efedd2c17093957afde82b766a4186ec8d108b6cbea110d
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a413dd30c82bf1458b4c9aa8cd9fc6df506b56de5b15f10d2e2a254ff755ea0
3
  size 6776