WangXFng commited on
Commit
7d35b8f
·
verified ·
1 Parent(s): 44e1ab3

Model save

Browse files
adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "k_proj",
24
- "o_proj",
25
- "up_proj",
26
  "down_proj",
27
- "gate_proj",
28
  "v_proj",
29
- "q_proj"
 
 
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
 
23
  "down_proj",
 
24
  "v_proj",
25
+ "o_proj",
26
+ "q_proj",
27
+ "gate_proj",
28
+ "k_proj",
29
+ "up_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1c562fea497beb9107f5413d5934bf3864db9d61d7d39787737cff1c10e37401
3
  size 1781853744
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12029ef30407a3d83ed399e75ec5d72a1fdb571a7ce6c5cf5d1dcdb9c3eac9f8
3
  size 1781853744
trainer_state.json CHANGED
@@ -10,67 +10,67 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.4854368932038835,
13
- "grad_norm": 0.42759230732917786,
14
  "learning_rate": 8.786407766990292e-05,
15
- "loss": 0.7922,
16
  "step": 250
17
  },
18
  {
19
  "epoch": 0.970873786407767,
20
- "grad_norm": 0.34185582399368286,
21
  "learning_rate": 7.572815533980583e-05,
22
- "loss": 0.3971,
23
  "step": 500
24
  },
25
  {
26
  "epoch": 1.4563106796116505,
27
- "grad_norm": 0.3420446217060089,
28
  "learning_rate": 6.359223300970875e-05,
29
- "loss": 0.3661,
30
  "step": 750
31
  },
32
  {
33
  "epoch": 1.941747572815534,
34
- "grad_norm": 0.3248656094074249,
35
  "learning_rate": 5.145631067961165e-05,
36
- "loss": 0.3525,
37
  "step": 1000
38
  },
39
  {
40
  "epoch": 2.4271844660194173,
41
- "grad_norm": 0.34264662861824036,
42
  "learning_rate": 3.9320388349514564e-05,
43
- "loss": 0.3395,
44
  "step": 1250
45
  },
46
  {
47
  "epoch": 2.912621359223301,
48
- "grad_norm": 0.3852052390575409,
49
  "learning_rate": 2.7184466019417475e-05,
50
- "loss": 0.3277,
51
  "step": 1500
52
  },
53
  {
54
  "epoch": 3.3980582524271843,
55
- "grad_norm": 0.402174174785614,
56
  "learning_rate": 1.5048543689320387e-05,
57
- "loss": 0.3094,
58
  "step": 1750
59
  },
60
  {
61
  "epoch": 3.883495145631068,
62
- "grad_norm": 0.3894464671611786,
63
  "learning_rate": 2.912621359223301e-06,
64
- "loss": 0.2984,
65
  "step": 2000
66
  },
67
  {
68
  "epoch": 4.0,
69
  "step": 2060,
70
  "total_flos": 1.4631911156603474e+18,
71
- "train_loss": 0.3948872992135946,
72
- "train_runtime": 18740.9732,
73
- "train_samples_per_second": 28.139,
74
  "train_steps_per_second": 0.11
75
  }
76
  ],
 
10
  "log_history": [
11
  {
12
  "epoch": 0.4854368932038835,
13
+ "grad_norm": 0.429694265127182,
14
  "learning_rate": 8.786407766990292e-05,
15
+ "loss": 0.7853,
16
  "step": 250
17
  },
18
  {
19
  "epoch": 0.970873786407767,
20
+ "grad_norm": 0.3274327218532562,
21
  "learning_rate": 7.572815533980583e-05,
22
+ "loss": 0.395,
23
  "step": 500
24
  },
25
  {
26
  "epoch": 1.4563106796116505,
27
+ "grad_norm": 0.3228892683982849,
28
  "learning_rate": 6.359223300970875e-05,
29
+ "loss": 0.3651,
30
  "step": 750
31
  },
32
  {
33
  "epoch": 1.941747572815534,
34
+ "grad_norm": 0.3434309661388397,
35
  "learning_rate": 5.145631067961165e-05,
36
+ "loss": 0.3521,
37
  "step": 1000
38
  },
39
  {
40
  "epoch": 2.4271844660194173,
41
+ "grad_norm": 0.34895673394203186,
42
  "learning_rate": 3.9320388349514564e-05,
43
+ "loss": 0.3388,
44
  "step": 1250
45
  },
46
  {
47
  "epoch": 2.912621359223301,
48
+ "grad_norm": 0.39222878217697144,
49
  "learning_rate": 2.7184466019417475e-05,
50
+ "loss": 0.3261,
51
  "step": 1500
52
  },
53
  {
54
  "epoch": 3.3980582524271843,
55
+ "grad_norm": 0.4256580173969269,
56
  "learning_rate": 1.5048543689320387e-05,
57
+ "loss": 0.306,
58
  "step": 1750
59
  },
60
  {
61
  "epoch": 3.883495145631068,
62
+ "grad_norm": 0.41629472374916077,
63
  "learning_rate": 2.912621359223301e-06,
64
+ "loss": 0.2938,
65
  "step": 2000
66
  },
67
  {
68
  "epoch": 4.0,
69
  "step": 2060,
70
  "total_flos": 1.4631911156603474e+18,
71
+ "train_loss": 0.39223564203503064,
72
+ "train_runtime": 18749.6172,
73
+ "train_samples_per_second": 28.126,
74
  "train_steps_per_second": 0.11
75
  }
76
  ],
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0d5f479cf1eadef4b27f3ea98dc36656b6a76cff3987cd2b0431c9bf903ec49a
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c95733d4a4ee5cb7ce3d773e7d419b3140ff452f3f548a33474577b01303c51
3
  size 5240