immortalPi commited on
Commit
a78bcb6
·
verified ·
1 Parent(s): ea661ae

Upload folder using huggingface_hub

Browse files
adapter_config.json CHANGED
@@ -29,13 +29,13 @@
29
  "rank_pattern": {},
30
  "revision": null,
31
  "target_modules": [
32
- "k_proj",
33
- "q_proj",
34
- "up_proj",
35
  "o_proj",
 
 
36
  "v_proj",
37
  "gate_proj",
38
- "down_proj"
 
39
  ],
40
  "target_parameters": null,
41
  "task_type": "SEQ_CLS",
 
29
  "rank_pattern": {},
30
  "revision": null,
31
  "target_modules": [
 
 
 
32
  "o_proj",
33
+ "q_proj",
34
+ "down_proj",
35
  "v_proj",
36
  "gate_proj",
37
+ "up_proj",
38
+ "k_proj"
39
  ],
40
  "target_parameters": null,
41
  "task_type": "SEQ_CLS",
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:edd631c5036ee04697cc781e0080e4ceeeec4dca3c8f5b8dbc4e037e50058c05
3
- size 664593800
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64dd9b803586bb8a80e10f6d915bc9074d37b5545bbc9be6a6e0941ec5982ca0
3
+ size 664603016
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:71a59bebe794f1a808bca018d70f99f505319403c07240cd87156b7425e16bee
3
- size 1329396842
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:790d597629cb916df27a771e70599d10a2ce72255c8ee695b47e80b8d160d7d1
3
+ size 1329415274
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2adb2436d8231356dbef633a9b00f9c997d3d088cde9229bafc340f774707d36
3
  size 14645
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d43788e4fdd49a910adde6e956129a90a4b56e5ced8ed62f8b6e1c0cb844b3e7
3
  size 14645
scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c70c8af470d16cae8adab12055155c2988464b9af430aed08af74c6c7f742ad5
3
  size 1465
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1bea375e784db877b5f714bc6aaefba3577522e040195e1831e996a0014e30d
3
  size 1465
trainer_state.json CHANGED
@@ -1,64 +1,81 @@
1
  {
2
- "best_global_step": 60,
3
- "best_metric": 0.7314814814814815,
4
- "best_model_checkpoint": "/content/gemma_lora_imb/checkpoint-60",
5
- "epoch": 1.1764705882352942,
6
  "eval_steps": 20,
7
- "global_step": 60,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
11
  "log_history": [
12
  {
13
  "epoch": 0.39215686274509803,
14
- "grad_norm": 33.80354309082031,
15
  "learning_rate": 8.137254901960784e-06,
16
- "loss": 1.2953,
17
  "step": 20
18
  },
19
  {
20
  "epoch": 0.39215686274509803,
21
- "eval_accuracy": 0.645320197044335,
22
- "eval_f1": 0.6326530612244898,
23
- "eval_loss": 0.9012272357940674,
24
- "eval_runtime": 1.3299,
25
- "eval_samples_per_second": 152.646,
26
- "eval_steps_per_second": 5.264,
27
  "step": 20
28
  },
29
  {
30
  "epoch": 0.7843137254901961,
31
- "grad_norm": 21.962677001953125,
32
  "learning_rate": 6.176470588235295e-06,
33
- "loss": 0.7861,
34
  "step": 40
35
  },
36
  {
37
  "epoch": 0.7843137254901961,
38
- "eval_accuracy": 0.6847290640394089,
39
- "eval_f1": 0.6923076923076923,
40
- "eval_loss": 0.764927089214325,
41
- "eval_runtime": 1.3089,
42
- "eval_samples_per_second": 155.088,
43
- "eval_steps_per_second": 5.348,
44
  "step": 40
45
  },
46
  {
47
  "epoch": 1.1764705882352942,
48
- "grad_norm": 39.092864990234375,
49
  "learning_rate": 4.215686274509805e-06,
50
- "loss": 0.4942,
51
  "step": 60
52
  },
53
  {
54
  "epoch": 1.1764705882352942,
55
- "eval_accuracy": 0.7142857142857143,
56
- "eval_f1": 0.7314814814814815,
57
- "eval_loss": 0.7433407306671143,
58
- "eval_runtime": 1.3004,
59
- "eval_samples_per_second": 156.101,
60
- "eval_steps_per_second": 5.383,
61
  "step": 60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  }
63
  ],
64
  "logging_steps": 20,
@@ -78,7 +95,7 @@
78
  "attributes": {}
79
  }
80
  },
81
- "total_flos": 3346006641030144.0,
82
  "train_batch_size": 32,
83
  "trial_name": null,
84
  "trial_params": null
 
1
  {
2
+ "best_global_step": 80,
3
+ "best_metric": 0.6859903381642513,
4
+ "best_model_checkpoint": "/content/gemma_lora_imb/checkpoint-80",
5
+ "epoch": 1.5686274509803921,
6
  "eval_steps": 20,
7
+ "global_step": 80,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
11
  "log_history": [
12
  {
13
  "epoch": 0.39215686274509803,
14
+ "grad_norm": 24.230987548828125,
15
  "learning_rate": 8.137254901960784e-06,
16
+ "loss": 1.1665,
17
  "step": 20
18
  },
19
  {
20
  "epoch": 0.39215686274509803,
21
+ "eval_accuracy": 0.6206896551724138,
22
+ "eval_f1": 0.631578947368421,
23
+ "eval_loss": 1.0547616481781006,
24
+ "eval_runtime": 34.5129,
25
+ "eval_samples_per_second": 5.882,
26
+ "eval_steps_per_second": 0.203,
27
  "step": 20
28
  },
29
  {
30
  "epoch": 0.7843137254901961,
31
+ "grad_norm": 26.34092140197754,
32
  "learning_rate": 6.176470588235295e-06,
33
+ "loss": 0.8846,
34
  "step": 40
35
  },
36
  {
37
  "epoch": 0.7843137254901961,
38
+ "eval_accuracy": 0.6551724137931034,
39
+ "eval_f1": 0.6634615384615384,
40
+ "eval_loss": 0.8848929405212402,
41
+ "eval_runtime": 34.4309,
42
+ "eval_samples_per_second": 5.896,
43
+ "eval_steps_per_second": 0.203,
44
  "step": 40
45
  },
46
  {
47
  "epoch": 1.1764705882352942,
48
+ "grad_norm": 28.944477081298828,
49
  "learning_rate": 4.215686274509805e-06,
50
+ "loss": 0.5412,
51
  "step": 60
52
  },
53
  {
54
  "epoch": 1.1764705882352942,
55
+ "eval_accuracy": 0.6699507389162561,
56
+ "eval_f1": 0.6824644549763034,
57
+ "eval_loss": 0.8630681037902832,
58
+ "eval_runtime": 34.3671,
59
+ "eval_samples_per_second": 5.907,
60
+ "eval_steps_per_second": 0.204,
61
  "step": 60
62
+ },
63
+ {
64
+ "epoch": 1.5686274509803921,
65
+ "grad_norm": 14.226760864257812,
66
+ "learning_rate": 2.254901960784314e-06,
67
+ "loss": 0.3088,
68
+ "step": 80
69
+ },
70
+ {
71
+ "epoch": 1.5686274509803921,
72
+ "eval_accuracy": 0.6798029556650246,
73
+ "eval_f1": 0.6859903381642513,
74
+ "eval_loss": 0.8230202794075012,
75
+ "eval_runtime": 34.5725,
76
+ "eval_samples_per_second": 5.872,
77
+ "eval_steps_per_second": 0.202,
78
+ "step": 80
79
  }
80
  ],
81
  "logging_steps": 20,
 
95
  "attributes": {}
96
  }
97
  },
98
+ "total_flos": 4553569010414592.0,
99
  "train_batch_size": 32,
100
  "trial_name": null,
101
  "trial_params": null
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2c7796473a527b84a6f77258264c10212e0e9d41d86534b20e066486b8bf6013
3
  size 5905
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ea7b0406c4d7142b34f920ec82ea3a1f32915f9c273c789d19ddd7b5d0cd3c4
3
  size 5905